Merge "Merge android-4.9-o.80 (a9fd318) into msm-4.9"
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 9a9a6d0..3a96610 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -190,6 +190,20 @@
* reg-names: funnel-base-real: actual register space for the
duplicate funnel.
+* Optional properties for CSRs:
+
+ * qcom,usb-bam-support: boolean, indicates CSR has the ability to operate on
+ usb bam, include enable,disable and flush.
+
+ * qcom,hwctrl-set-support: boolean, indicates CSR has the ability to operate on
+ to "HWCTRL" register.
+
+ * qcom,set-byte-cntr-support:boolean, indicates CSR has the ability to operate on
+ to "BYTECNT" register.
+
+ * qcom,timestamp-support:boolean, indicates CSR support sys interface to read
+ timestamp value.
+
Example:
1. Sinks
diff --git a/Documentation/devicetree/bindings/arm/msm/clock-controller.txt b/Documentation/devicetree/bindings/arm/msm/clock-controller.txt
index 4cc49a59..83a1601 100644
--- a/Documentation/devicetree/bindings/arm/msm/clock-controller.txt
+++ b/Documentation/devicetree/bindings/arm/msm/clock-controller.txt
@@ -12,10 +12,14 @@
Required properties:
- compatible: Must be one of following,
"qcom,gcc-8953"
+ "qcom,gcc-sdm632"
"qcom,cc-debug-8953"
+ "qcom,cc-debug-sdm632"
"qcom,gcc-mdss-8953"
+ "qcom,gcc-mdss-sdm632"
"qcom,gcc-gfx-8953"
"qcom,gcc-gfx-sdm450"
+ "qcom,gcc-gfx-sdm632"
- reg: Pairs of physical base addresses and region sizes of
memory mapped registers.
diff --git a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
index 105dcac..30961be 100644
--- a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
@@ -7,7 +7,7 @@
Required Properties:
- compatible: The bus devices need to be compatible with
"qcom,mdm2-modem", "qcom,ext-mdm9x25", "qcom,ext-mdm9x35", "qcom, ext-mdm9x45",
- "qcom,ext-mdm9x55".
+ "qcom,ext-mdm9x55", "qcom,ext-sdxpoorwills".
Required named gpio properties:
- qcom,mdm2ap-errfatal-gpio: gpio for the external modem to indicate to the apps processor
diff --git a/Documentation/devicetree/bindings/arm/msm/msm-machine-name.txt b/Documentation/devicetree/bindings/arm/msm/msm-machine-name.txt
new file mode 100644
index 0000000..28f6e7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm-machine-name.txt
@@ -0,0 +1,63 @@
+Msm Machine Name
+
+Machine name is used to:
+ 1. show up in the beginning of kernel message.
+ Example:
+ [ 0.000000] Machine: Qualcomm Technologies, Inc. MSM8953 PMI8950 MTP
+ 2. show up as arch description when do dump stack.
+ Example:
+ [ 1.222319] WARNING: CPU: 2 PID: 1 at kernel/lib/debugobjects.c:263 debug_print_object+0xa8/0xb0
+ [ 1.222334] Modules linked in:
+ [ 1.222362] CPU: 2 PID: 1 Comm: swapper/0 Not tainted 4.9.65+ #71
+ [ 1.222376] Hardware name: Qualcomm Technologies, Inc. MSM8953 PMI8950 MTP (DT)
+ [ 1.222392] task: ffffffc0ed1b0080 task.stack: ffffffc0ed1b8000
+ [ 1.222408] PC is at debug_print_object+0xa8/0xb0
+ [ 1.222424] LR is at debug_print_object+0xa8/0xb0
+
+Msm machine name is a string concatenated from:
+ 1. constant string contain msm information: "Qualcomm Technologies, Inc.".
+ 2. string of device tree property "qcom,msm-name".
+ 3. string of device tree property "qcom,pmic-name".
+ 4. string of device tree property "model".
+
+The reason for using msm machine Name is single board overlay device tree
+may applied to multiple soc device trees. The "model" property in soc device
+tree is overwritten with board overlay device tree. So the final string in
+"model" property can only contain Board information. And "qcom,msm-name"
+and "qcom,pmic-name" property is introduced.
+
+Optional properties:
+- qcom,msm-name: The name string of MSM SoC chip
+- qcom,pmic-name: The name string of MSM Pmic chip
+
+Required properties:
+- model: in soc device tree
+ Contain the soc and pmic information.
+ Will be overwritten by model string in board overlay device tree.
+ It will be used in bootloader for debug purpose.
+- model: in board overlay device tree
+ Contain the board information. It is the final model string that
+ kernel can see.
+
+Note:
+When device tree property qcom,msm-name and qcom,pmic-name exist, it will
+use concatenated msm machine name string for final machine name.
+When device tree property qcom,msm-name and qcom,pmic-name doesn't exist,
+it will use model property string for final machine name.
+
+Example:
+* In soc device tree:
+ / {
+ model = "Qualcomm Technologies, Inc. APQ 8953 + PMI8950 SOC";
+ compatible = "qcom,apq8053";
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+ qcom,pmic-name = "PMI8950";
+ qcom,msm-id = <293 0x0>;
+ qcom,msm-name = "APQ8053";
+ };
+* In board overlay device tree:
+ / {
+ model = "MTP";
+ compatible = "qcom,mtp";
+ };
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 9bc8168..d150116 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -328,6 +328,9 @@
compatible = "qcom,sdm450-cdp"
compatible = "qcom,sdm450-qrd"
compatible = "qcom,sdm632-rumi"
+compatible = "qcom,sdm632-cdp"
+compatible = "qcom,sdm632-mtp"
+compatible = "qcom,sdm632-qrd"
compatible = "qcom,mdm9640-cdp"
compatible = "qcom,mdm9640-mtp"
compatible = "qcom,mdm9640-rumi"
diff --git a/Documentation/devicetree/bindings/fb/adv7533.txt b/Documentation/devicetree/bindings/fb/adv7533.txt
new file mode 100644
index 0000000..b198f37
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/adv7533.txt
@@ -0,0 +1,54 @@
+ADV7533 DSI to HDMI bridge
+
+
+Required properties:
+- compatible: Must be "adv7533"
+- reg: Main I2C slave ID (for I2C host driver)
+- adi,video-mode: Excepted a number and possible inputs are 0 to 3, while:
+ 3 = 1080p
+ 2 = 720p
+ 1 = 480p
+ 0 = 1080p pattern
+- adi,main-addr: Main I2C slave ID
+- adi,cec-dsi-addr: CEC DSI I2C slave ID
+
+Optional properties:
+- adi,enable-audio:
+- adi,disable-gpios:
+- adi,irq-gpio: Main IRQ gpio mapping
+- adi,hpd-irq-gpio: HPD IRQ gpio mapping
+- adi,switch-gpio: DSI switch gpio mapping
+- qcom,supply-names: Regulator names that supply 5v to bridge chip
+- qcom,min-voltage-level Minimum voltage level to be supplied to bridge chip
+- qcom,max-voltage-level Maximum voltage level to be supplied to bridge chip
+- qcom,enable-load Load current to bridge chip when enabled
+- qcom,disable-load Load current to bridge chip when disabled
+- qcom,post-on-sleep Sleep time (ms) to indicate the sleep
+ time after the vreg is enabled
+
+Example:
+&soc {
+ i2c@78b8000 {
+ adv7533@39 {
+ compatible = "adv7533";
+ reg = <0x39>;
+ adi,video-mode = <3>; /* 3 = 1080p */
+ adi,main-addr = <0x39>;
+ adi,cec-dsi-addr = <0x3C>;
+ adi,enable-audio;
+ pinctrl-names = "pmx_adv7533_active","pmx_adv7533_suspend";
+ pinctrl-0 = <&adv7533_int_active &adv7533_hpd_int_active &adv7533_switch_active>;
+ pinctrl-1 = <&adv7533_int_suspend &adv7533_hpd_int_suspend &adv7533_switch_suspend>;
+ adi,irq-gpio = <&msm_gpio 31 0x2002>;
+ adi,hpd-irq-gpio = <&msm_gpio 20 0x2003>;
+ adi,switch-gpio = <&msm_gpio 32 0x0>;
+ hpd-5v-en-supply = <&adv_vreg>;
+ qcom,supply-names = "hpd-5v-en";
+ qcom,min-voltage-level = <0>;
+ qcom,max-voltage-level = <0>;
+ qcom,enable-load = <0>;
+ qcom,disable-load = <0>;
+ qcom,post-on-sleep = <10>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/fb/lt8912.txt b/Documentation/devicetree/bindings/fb/lt8912.txt
new file mode 100644
index 0000000..daeb15f
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/lt8912.txt
@@ -0,0 +1,20 @@
+LT8912 DSI to HDMI bridge
+
+
+Required properties:
+- compatible: Must be "lontium,lt8912"
+- reg: Main I2C slave ID (for I2C host driver)
+
+Optional properties:
+- qcom,hdmi-reset: Main reset gpio mapping
+
+Example:
+&soc {
+ i2c@78b8000 {
+ lt8912@48 {
+ compatible = "lontium,lt8912";
+ reg = <0x48>;
+ qcom,hdmi-reset = <&tlmm 64 0x0>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 608b426..493a1aa 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -12,6 +12,7 @@
This property specifies the version
for DSI HW that this panel will work with
"qcom,dsi-panel-v2" = DSI V2.0
+ "qcom,msm-dsi-v2" = DSI V2.0
- status: This property applies to DSI V2 panels only.
This property should not be added for panels
that work based on version "V6.0"
@@ -37,8 +38,8 @@
"display_2" = DISPLAY_2
- qcom,mdss-dsi-panel-timings: An array of length 12 that specifies the PHY
timing settings for the panel.
-- qcom,mdss-dsi-panel-timings-8996: An array of length 40 char that specifies the 8996 PHY lane
- timing settings for the panel.
+- qcom,mdss-dsi-panel-timings-phy-v2: An array of length 40 char that specifies the PHY version 2
+ lane timing settings for the panel.
- qcom,mdss-dsi-on-command: A byte stream formed by multiple dcs packets base on
qcom dsi controller protocol.
byte 0: dcs data type
@@ -61,9 +62,39 @@
transmitted
byte 5, 6: 16 bits length in network byte order
byte 7 and beyond: number byte of payload
+- qcom,mdss-dsi-lp-mode-on: This is used to enable display low persistence mode.
+ A byte stream formed by multiple dcs packets base on
+ qcom dsi controller protocol.
+ byte 0: dcs data type
+ byte 1: set to indicate this is an individual packet
+ (no chain)
+ byte 2: virtual channel number
+ byte 3: expect ack from client (dcs read command)
+ byte 4: wait number of specified ms after dcs command
+ transmitted
+ byte 5, 6: 16 bits length in network byte order
+ byte 7 and beyond: number byte of payload
+- qcom,mdss-dsi-lp-mode-off: This is used to disable display low persistence mode.
+ A byte stream formed by multiple dcs packets base on
+ qcom dsi controller protocol.
+ byte 0: dcs data type
+ byte 1: set to indicate this is an individual packet
+ (no chain)
+ byte 2: virtual channel number
+ byte 3: expect ack from client (dcs read command)
+ byte 4: wait number of specified ms after dcs command
+ transmitted
+ byte 5, 6: 16 bits length in network byte order
+ byte 7 and beyond: number byte of payload
- qcom,mdss-dsi-post-panel-on-command: same as "qcom,mdss-dsi-on-command" except commands are
sent after displaying an image.
+- qcom,mdss-dsi-idle-on-command: same as "qcom,mdss-dsi-on-command". Set of DCS command
+ used for idle mode entry.
+
+- qcom,mdss-dsi-idle-off-command: same as "qcom,mdss-dsi-on-command". Set of DCS command
+ used for idle mode exit.
+
Note, if a short DCS packet(i.e packet with Byte 0:dcs data type as 05) mentioned in
qcom,mdss-dsi-on-command/qcom,mdss-dsi-off-command stream fails to transmit,
then 3 options can be tried.
@@ -275,14 +306,10 @@
to the physical width in the framebuffer information.
- qcom,mdss-pan-physical-height-dimension: Specifies panel physical height in mm which corresponds
to the physical height in the framebuffer information.
-- qcom,mdss-dsi-mode-sel-gpio-state: String that specifies the lcd mode for panel
- (such as single-port/dual-port), if qcom,panel-mode-gpio
- binding is defined in dsi controller.
- "dual_port" = Set GPIO to LOW
- "single_port" = Set GPIO to HIGH
+- qcom,mdss-dsi-panel-mode-gpio-state: String that specifies the mode state for panel if it is defined
+ in dsi controller.
"high" = Set GPIO to HIGH
"low" = Set GPIO to LOW
- The default value is "dual_port".
- qcom,mdss-tear-check-disable: Boolean to disable mdp tear check. Tear check is enabled by default to avoid
tearing. Other tear-check properties are ignored if this property is present.
The below tear check configuration properties can be individually tuned if
@@ -330,6 +357,28 @@
2A/2B command.
- qcom,dcs-cmd-by-left: Boolean to indicate that dcs command are sent
through the left DSI controller only in a dual-dsi configuration
+- qcom,mdss-dsi-panel-hdr-enabled: Boolean to indicate HDR support in panel.
+- qcom,mdss-dsi-panel-hdr-color-primaries:
+ Array of 8 unsigned integers denoting chromaticity of panel.These
+ values are specified in nits units. The value range is 0 through 50000.
+ To obtain real chromacity, these values should be divided by factor of
+ 50000. The structure of array is defined in below order
+ value 1: x value of white chromaticity of display panel
+ value 2: y value of white chromaticity of display panel
+ value 3: x value of red chromaticity of display panel
+ value 4: y value of red chromaticity of display panel
+ value 5: x value of green chromaticity of display panel
+ value 6: y value of green chromaticity of display panel
+ value 7: x value of blue chromaticity of display panel
+ value 8: y value of blue chromaticity of display panel
+- qcom,mdss-dsi-panel-peak-brightness: Maximum brightness supported by panel.In absence of maximum value
+ typical value becomes peak brightness. Value is specified in nits units.
+ To obtail real peak brightness, this value should be divided by factor of
+ 10000.
+- qcom,mdss-dsi-panel-blackness-level: Blackness level supported by panel. Blackness level is defined as
+ ratio of peak brightness to contrast. Value is specified in nits units.
+ To obtail real blackness level, this value should be divided by factor of
+ 10000.
- qcom,mdss-dsi-lp11-init: Boolean used to enable the DSI clocks and data lanes (low power 11)
before issuing hardware reset line.
- qcom,mdss-dsi-init-delay-us: Delay in microseconds(us) before performing any DSI activity in lp11
@@ -424,7 +473,11 @@
fields in the supply entry, refer to the qcom,ctrl-supply-entries
binding above.
- qcom,config-select: Optional property to select default configuration.
-
+- qcom,panel-allow-phy-poweroff: A boolean property indicates that panel allows to turn off the phy power
+ supply during idle screen. A panel should able to handle the dsi lanes
+ in floating state(not LP00 or LP11) to turn on this property. Software
+ turns off PHY pmic power supply, phy ldo and DSI Lane ldo during
+ idle screen (footswitch control off) when this property is enabled.
[[Optional config sub-nodes]] These subnodes provide different configurations for a given same panel.
Default configuration can be chosen by specifying phandle of the
selected subnode in the qcom,config-select.
@@ -471,6 +524,7 @@
to a non-DSI interface.
- qcom,bridge-name: A string to indicate the name of the bridge chip connected to DSI. qcom,bridge-name
is required if qcom,dba-panel is defined for the panel.
+- qcom,hdmi-mode: Indicates where current panel is HDMI mode, otherwise, it will be DVI mode.
- qcom,adjust-timer-wakeup-ms: An integer value to indicate the timer delay(in ms) to accommodate
s/w delay while configuring the event timer wakeup logic.
@@ -493,6 +547,8 @@
Note, if a given optional qcom,* binding is not present, then the driver will configure
the default values specified.
+Note, all the "qcom,supply-*" properties have their definitions in mdss-dsi-txt.
+
Example:
&mdss_mdp {
dsi_sim_vid: qcom,mdss_dsi_sim_video {
@@ -538,7 +594,6 @@
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = < 15>;
- qcom,mdss-brightness-max-level = <255>;
qcom,mdss-dsi-interleave-mode = <0>;
qcom,mdss-dsi-panel-type = "dsi_video_mode";
qcom,mdss-dsi-te-check-enable;
@@ -571,7 +626,7 @@
qcom,mdss-mdp-transfer-time-us = <12500>;
qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
22 27 1e 03 04 00];
- qcom,mdss-dsi-panel-timings-8996 = [23 20 06 09 05 03 04 a0
+ qcom,mdss-dsi-panel-timings-phy-v2 = [23 20 06 09 05 03 04 a0
23 20 06 09 05 03 04 a0
23 20 06 09 05 03 04 a0
23 20 06 09 05 03 04 a0
@@ -581,6 +636,9 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command = [22 01 00 00 00 00 00];
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-lp-mode-on = [32 01 00 00 00 00 02 00 00
+ 29 01 00 00 10 00 02 FF 99];
+ qcom,mdss-dsi-lp-mode-off = [22 01 00 00 00 00 00];
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode";
@@ -592,7 +650,7 @@
qcom,5v-boost-gpio = <&pm8994_gpios 14 0>;
qcom,mdss-pan-physical-width-dimension = <60>;
qcom,mdss-pan-physical-height-dimension = <140>;
- qcom,mdss-dsi-mode-sel-gpio-state = "dsc_mode";
+ qcom,mdss-dsi-panel-mode-gpio-state = "low";
qcom,mdss-tear-check-sync-cfg-height = <0xfff0>;
qcom,mdss-tear-check-sync-init-val = <1280>;
qcom,mdss-tear-check-sync-threshold-start = <4>;
@@ -611,6 +669,7 @@
qcom,suspend-ulps-enabled;
qcom,panel-roi-alignment = <4 4 2 2 20 20>;
qcom,esd-check-enabled;
+ qcom,panel-allow-phy-poweroff;
qcom,mdss-dsi-panel-status-command = [06 01 00 01 05 00 02 0A 08];
qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
qcom,mdss-dsi-panel-status-check-mode = "reg_read";
@@ -682,6 +741,7 @@
qcom,supply-max-voltage = <2800000>;
qcom,supply-enable-load = <100000>;
qcom,supply-disable-load = <100>;
+ qcom,supply-ulp-load = <100>;
qcom,supply-pre-on-sleep = <0>;
qcom,supply-post-on-sleep = <0>;
qcom,supply-pre-off-sleep = <0>;
@@ -695,6 +755,7 @@
qcom,supply-max-voltage = <1800000>;
qcom,supply-enable-load = <100000>;
qcom,supply-disable-load = <100>;
+ qcom,supply-ulp-load = <100>;
qcom,supply-pre-on-sleep = <0>;
qcom,supply-post-on-sleep = <0>;
qcom,supply-pre-off-sleep = <0>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi.txt b/Documentation/devicetree/bindings/fb/mdss-dsi.txt
new file mode 100644
index 0000000..2f74f7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi.txt
@@ -0,0 +1,261 @@
+Qualcomm mdss-dsi
+
+mdss-dsi is the master DSI device which supports multiple DSI host controllers that
+are compatible with MIPI display serial interface specification.
+
+Required properties:
+- compatible: Must be "qcom,mdss-dsi"
+- hw-config: Specifies the DSI host setup configuration
+ "hw-config" = "single_dsi"
+ "hw-config" = "dual_dsi"
+ "hw-config" = "split_dsi"
+- ranges: The standard property which specifies the child address
+ space, parent address space and the length.
+- vdda-supply: Phandle for vreg regulator device node.
+
+Bus Scaling Data:
+- qcom,msm-bus,name: String property describing MDSS client.
+- qcom, msm-bus,num-cases: This is the number of bus scaling use cases
+ defined in the vectors property. This must be
+ set to <2> for MDSS DSI driver where use-case 0
+ is used to remove BW votes from the system. Use
+ case 1 is used to generate bandwidth requestes
+ when sending command packets.
+- qcom,msm-bus,num-paths: This represents number of paths in each bus
+ scaling usecase. This value depends on number of
+ AXI master ports dedicated to MDSS for
+ particular chipset.
+- qcom,msm-bus,vectors-KBps: A series of 4 cell properties, with a format
+ of (src, dst, ab, ib) which is defined at
+ Documentation/devicetree/bindings/arm/msm/msm_bus.txt.
+ DSI driver should always set average bandwidth
+ (ab) to 0 and always use instantaneous
+ bandwidth(ib) values.
+
+Optional properties:
+- vcca-supply: Phandle for vcca regulator device node.
+- qcom,<type>-supply-entries: A node that lists the elements of the supply used by the
+ a particular "type" of DSI modulee. The module "types"
+ can be "core", "ctrl", and "phy". Within the same type,
+ there can be more than one instance of this binding,
+ in which case the entry would be appended with the
+ supply entry index.
+ e.g. qcom,ctrl-supply-entry@0
+ -- qcom,supply-name: name of the supply (vdd/vdda/vddio)
+ -- qcom,supply-min-voltage: minimum voltage level (uV)
+ -- qcom,supply-max-voltage: maximum voltage level (uV)
+ -- qcom,supply-enable-load: load drawn (uA) from enabled supply
+ -- qcom,supply-disable-load: load drawn (uA) from disabled supply
+ -- qcom,supply-ulp-load: load drawn (uA) from supply in ultra-low power mode
+ -- qcom,supply-pre-on-sleep: time to sleep (ms) before turning on
+ -- qcom,supply-post-on-sleep: time to sleep (ms) after turning on
+ -- qcom,supply-pre-off-sleep: time to sleep (ms) before turning off
+ -- qcom,supply-post-off-sleep: time to sleep (ms) after turning off
+- pll-src-config Specified the source PLL for the DSI
+ link clocks:
+ "PLL0" - Clocks sourced out of DSI PLL0
+ "PLL1" - Clocks sourced out of DSI PLL1
+ This property is only valid for
+ certain DSI hardware configurations
+ mentioned in the "hw-config" binding above.
+ For example, in split_dsi config, the clocks can
+ only be sourced out of PLL0. For
+ dual_dsi, both PLL would be active.
+ For single DSI, it is possible to
+ select either PLL. If no value is specified,
+ the default value for single DSI is set as PLL0.
+- qcom,mmss-ulp-clamp-ctrl-offset: Specifies the offset for dsi ulps clamp control register.
+- qcom,mmss-phyreset-ctrl-offset: Specifies the offset for dsi phy reset control register.
+- qcom,dsi-clk-ln-recovery: Boolean which enables the clk lane recovery
+
+mdss-dsi-ctrl is a dsi controller device which is treated as a subnode of the mdss-dsi device.
+
+Required properties:
+- compatible: Must be "qcom,mdss-dsi-ctrl"
+- cell-index: Specifies the controller used among the two controllers.
+- reg: Base address and length of the different register
+ regions(s) required for DSI device functionality.
+- reg-names: A list of strings that map in order to the list of regs.
+ "dsi_ctrl" - MDSS DSI controller register region
+ "dsi_phy" - MDSS DSI PHY register region
+ "dsi_phy_regulator" - MDSS DSI PHY REGULATOR region
+ "mmss_misc_phys" - Register region for MMSS DSI clamps
+- vdd-supply: Phandle for vdd regulator device node.
+- vddio-supply: Phandle for vdd-io regulator device node.
+- qcom,mdss-fb-map-prim: pHandle that specifies the framebuffer to which the
+ primary interface is mapped.
+- qcom,mdss-mdp: pHandle that specifies the mdss-mdp device.
+- qcom,platform-regulator-settings: An array of length 7 or 5 that specifies the PHY
+ regulator settings. It use 5 bytes for 8996 pll.
+- qcom,platform-strength-ctrl: An array of length 2 or 10 that specifies the PHY
+ strengthCtrl settings. It use 10 bytes for 8996 pll.
+- qcom,platform-lane-config: An array of length 45 or 20 that specifies the PHY
+ lane configuration settings. It use 20 bytes for 8996 pll.
+- qcom,platform-bist-ctrl: An array of length 6 that specifies the PHY
+ BIST ctrl settings.
+- qcom,dsi-pref-prim-pan: phandle that specifies the primary panel to be used
+ with the controller.
+
+Optional properties:
+- label: A string used to describe the controller used.
+- qcom,mdss-fb-map: pHandle that specifies the framebuffer to which the
+ interface is mapped.
+- qcom,mdss-fb-map-sec: pHandle that specifies the framebuffer to which the
+ secondary interface is mapped.
+- qcom,platform-enable-gpio: Specifies the panel lcd/display enable gpio.
+- qcom,platform-reset-gpio: Specifies the panel reset gpio.
+- qcom,platform-te-gpio: Specifies the gpio used for TE.
+- qcom,platform-bklight-en-gpio: Specifies the gpio used to enable display back-light
+- qcom,platform-mode-gpio: Select video/command mode of panel through gpio when it supports
+ both modes.
+- qcom,platform-intf-mux-gpio: Select dsi/external(hdmi) interface through gpio when it supports
+ either dsi or external interface.
+- pinctrl-names: List of names to assign mdss pin states defined in pinctrl device node
+ Refer to pinctrl-bindings.txt
+- pinctrl-<0..n>: Lists phandles each pointing to the pin configuration node within a pin
+ controller. These pin configurations are installed in the pinctrl
+ device node. Refer to pinctrl-bindings.txt
+- qcom,regulator-ldo-mode: Boolean to enable ldo mode for the dsi phy regulator
+- qcom,null-insertion-enabled: Boolean to enable NULL packet insertion
+ feature for DSI controller.
+- qcom,dsi-irq-line: Boolean specifies if DSI has a different irq line than mdp.
+- qcom,lane-map: Specifies the data lane swap configuration.
+ "lane_map_0123" = <0 1 2 3> (default value)
+ "lane_map_3012" = <3 0 1 2>
+ "lane_map_2301" = <2 3 0 1>
+ "lane_map_1230" = <1 2 3 0>
+ "lane_map_0321" = <0 3 2 1>
+ "lane_map_1032" = <1 0 3 2>
+ "lane_map_2103" = <2 1 0 3>
+ "lane_map_3210" = <3 2 1 0>
+- qcom,pluggable Boolean to enable hotplug feature.
+- qcom,timing-db-mode: Boolean specifies dsi timing mode registers are supported or not.
+- qcom,display-id A string indicates the display ID for the controller.
+ The possible values are:
+ - "primary"
+ - "secondary"
+ - "tertiary"
+- qcom,bridge-index: Instance id of the bridge chip connected to DSI. qcom,bridge-index is
+ required if a bridge chip panel is used.
+
+Example:
+ mdss_dsi: qcom,mdss_dsi@0 {
+ compatible = "qcom,mdss-dsi";
+ hw-config = "single_dsi";
+ pll-src-config = "PLL0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ vdda-supply = <&pm8226_l4>;
+ vcca-supply = <&pm8226_l28>;
+ reg = <0x1a98000 0x1a98000 0x25c
+ 0x1a98500 0x1a98500 0x280
+ 0x1a98780 0x1a98780 0x30
+ 0x193e000 0x193e000 0x30>;
+
+ qcom,dsi-clk-ln-recovery;
+
+ qcom,core-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,core-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ qcom,supply-ulp-load = <0>;
+ qcom,supply-pre-on-sleep = <0>;
+ qcom,supply-post-on-sleep = <0>;
+ qcom,supply-pre-off-sleep = <0>;
+ qcom,supply-post-off-sleep = <0>;
+ };
+ };
+
+ qcom,phy-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,phy-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vddio";
+ qcom,supply-min-voltage = <1800000>;
+ qcom,supply-max-voltage = <1800000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ qcom,supply-ulp-load = <100>;
+ qcom,supply-pre-on-sleep = <0>;
+ qcom,supply-post-on-sleep = <20>;
+ qcom,supply-pre-off-sleep = <0>;
+ qcom,supply-post-off-sleep = <0>;
+ };
+ };
+
+ qcom,ctrl-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ctrl-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda";
+ qcom,supply-min-voltage = <1200000>;
+ qcom,supply-max-voltage = <1200000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ qcom,supply-ulp-load = <1000>;
+ qcom,supply-pre-on-sleep = <0>;
+ qcom,supply-post-on-sleep = <20>;
+ qcom,supply-pre-off-sleep = <0>;
+ qcom,supply-post-off-sleep = <0>;
+ };
+ };
+
+ mdss_dsi0: mdss_dsi_ctrl0@fd922800 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ label = "MDSS DSI CTRL->0";
+ cell-index = <0>;
+ reg = <0xfd922800 0x1f8>,
+ <0xfd922b00 0x2b0>,
+ <0xfd998780 0x30>,
+ <0xfd828000 0x108>;
+ reg-names = "dsi_ctrl", "dsi_phy",
+ "dsi_phy_regulator", "mmss_misc_phys";
+
+ vdd-supply = <&pm8226_l15>;
+ vddio-supply = <&pm8226_l8>;
+ qcom,mdss-fb-map-prim = <&mdss_fb0>;
+ qcom,mdss-mdp = <&mdss_mdp>;
+
+ qcom,dsi-pref-prim-pan = <&dsi_tosh_720_vid>;
+
+ qcom,platform-strength-ctrl = [ff 06];
+ qcom,platform-bist-ctrl = [00 00 b1 ff 00 00];
+ qcom,platform-regulator-settings = [07 09 03 00 20 00 01];
+ qcom,platform-lane-config = [00 00 00 00 00 00 00 01 97
+ 00 00 00 00 05 00 00 01 97
+ 00 00 00 00 0a 00 00 01 97
+ 00 00 00 00 0f 00 00 01 97
+ 00 c0 00 00 00 00 00 01 bb];
+
+ qcom,mmss-ulp-clamp-ctrl-offset = <0x20>;
+ qcom,mmss-phyreset-ctrl-offset = <0x24>;
+ qcom,regulator-ldo-mode;
+ qcom,null-insertion-enabled;
+ qcom,timing-db-mode;
+
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active>;
+ pinctrl-1 = <&mdss_dsi_suspend>;
+ qcom,platform-reset-gpio = <&msmgpio 25 1>;
+ qcom,platform-te-gpio = <&msmgpio 24 0>;
+ qcom,platform-enable-gpio = <&msmgpio 58 1>;
+ qcom,platform-bklight-en-gpio = <&msmgpio 86 0>;
+ qcom,platform-mode-gpio = <&msmgpio 7 0>;
+ qcom,platform-intf-mux-gpio = <&tlmm 115 0>;
+ qcom,dsi-irq-line;
+ qcom,lane-map = "lane_map_3012";
+ qcom,display-id = "primary";
+ qcom,bridge-index = <00>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/fb/mdss-edp.txt b/Documentation/devicetree/bindings/fb/mdss-edp.txt
new file mode 100644
index 0000000..c474b88
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-edp.txt
@@ -0,0 +1,52 @@
+Qualcomm MDSS EDP
+
+MDSS EDP is a edp driver which supports panels that are compatible with
+VESA EDP display interface specification.
+
+When configuring the optional properties for external backlight, one should also
+configure the gpio that drives the pwm to it.
+
+Required properties
+- compatible : Must be "qcom,mdss-edp".
+- reg : Offset and length of the register set for the
+ device.
+- reg-names : Names to refer to register sets related to this
+ device
+- vdda-supply : Phandle for vdd regulator device node.
+- gpio-panel-en : GPIO for supplying power to panel and backlight
+ driver.
+- gpio-lvl-en : GPIO to enable HPD be received by host.
+- status : A string that has to be set to "okay/ok" to enable
+ the driver. By default this property will be set to
+ "disable". Will be set to "ok/okay" status for
+ specific platforms.
+- qcom,mdss-fb-map: pHandle that specifies the framebuffer to which the
+ interface is mapped.
+- gpio-panel-hpd : gpio pin use for edp hpd
+
+Optional properties
+- qcom,panel-lpg-channel : LPG channel for backlight.
+- qcom,panel-pwm-period : PWM period in microseconds.
+
+
+Optional properties:
+- qcom,mdss-brightness-max-level: Specifies the max brightness level supported.
+ 255 = default value.
+
+Example:
+ mdss_edp: qcom,mdss_edp@fd923400 {
+ compatible = "qcom,mdss-edp";
+ reg = <0xfd923400 0x700>,
+ <0xfd8c2000 0x1000>;
+ reg-names = "edp_base", "mmss_cc_base";
+ vdda-supply = <&pm8941_l12>;
+ gpio-panel-en = <&msmgpio 58 0>;
+ gpio-lvl-en = <&msmgpio 91 0>;
+ qcom,panel-lpg-channel = <7>; /* LPG Channel 8 */
+ qcom,panel-pwm-period = <53>;
+ status = "disable";
+ qcom,mdss-fb-map = <&mdss_fb0>;
+ gpio-panel-hpd = <&msmgpio 102 0>;
+ };
+
+
diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
new file mode 100644
index 0000000..e33d358
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
@@ -0,0 +1,898 @@
+Qualcomm MDSS MDP
+
+MDSS is Mobile Display SubSystem which implements Linux framebuffer APIs to
+drive user interface to different panel interfaces. MDP driver is the core of
+MDSS which manage all data paths to different panel interfaces.
+
+Required properties
+- compatible : Must be "qcom,mdss_mdp"
+ - "qcom,mdss_mdp3" for mdp3
+- reg : offset and length of the register set for the device.
+- reg-names : names to refer to register sets related to this device
+- interrupts : Interrupt associated with MDSS.
+- interrupt-controller: Mark the device node as an interrupt controller.
+ This is an empty, boolean property.
+- #interrupt-cells: Should be one. The first cell is interrupt number.
+- vdd-supply : Phandle for vdd regulator device node.
+- qcom,max-clk-rate: Specify maximum MDP core clock rate in hz that this
+ device supports.
+- qcom,mdss-pipe-vig-off: Array of offset for MDP source surface pipes of
+ type VIG, the offsets are calculated from
+ register "mdp_phys" defined in reg property.
+ The number of offsets defined here should
+ reflect the amount of VIG pipes that can be
+ active in MDP for this configuration.
+- qcom,mdss-pipe-vig-fetch-id: Array of shared memory pool fetch ids
+ corresponding to the VIG pipe offsets defined in
+ previous property, the amount of fetch ids
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-vig-xin-id: Array of VBIF clients ids (xins) corresponding
+ to the respective VIG pipes. Number of xin ids
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-vig-clk-ctrl-off: Array of offsets describing clk control
+ offsets for dynamic clock gating. 1st value
+ in the array represents offset of the control
+ register. 2nd value represents bit offset within
+ control register and 3rd value represents bit
+ offset within status register. Number of tuples
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-rgb-off: Array of offsets for MDP source surface pipes of
+ type RGB, the offsets are calculated from
+ register "mdp_phys" defined in reg property.
+ The number of offsets defined here should
+ reflect the amount of RGB pipes that can be
+ active in MDP for this configuration.
+- qcom,mdss-pipe-rgb-fetch-id: Array of shared memory pool fetch ids
+ corresponding to the RGB pipe offsets defined in
+ previous property, the amount of fetch ids
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-rgb-xin-id: Array of VBIF clients ids (xins) corresponding
+ to the respective RGB pipes. Number of xin ids
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-rgb-clk-ctrl-off: Array of offsets describing clk control
+ offsets for dynamic clock gating. 1st value
+ in the array represents offset of the control
+ register. 2nd value represents bit offset within
+ control register and 3rd value represents bit
+ offset within status register. Number of tuples
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-dma-off: Array of offsets for MDP source surface pipes of
+ type DMA, the offsets are calculated from
+ register "mdp_phys" defined in reg property.
+ The number of offsets defined here should
+ reflect the amount of DMA pipes that can be
+ active in MDP for this configuration.
+- qcom,mdss-pipe-dma-fetch-id: Array of shared memory pool fetch ids
+ corresponding to the DMA pipe offsets defined in
+ previous property, the amount of fetch ids
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-dma-off
+- qcom,mdss-pipe-dma-xin-id: Array of VBIF clients ids (xins) corresponding
+ to the respective DMA pipes. Number of xin ids
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-dma-off
+- qcom,mdss-pipe-dma-clk-ctrl-off: Array of offsets describing clk control
+ offsets for dynamic clock gating. 1st value
+ in the array represents offset of the control
+ register. 2nd value represents bit offset within
+ control register and 3rd value represents bit
+ offset within status register. Number of tuples
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-dma-off
+- qcom,mdss-pipe-cursor-off: Array of offsets for MDP source surface pipes of
+ type cursor, the offsets are calculated from
+ register "mdp_phys" defined in reg property.
+ The number of offsets defined here should
+ reflect the amount of cursor pipes that can be
+ active in MDP for this configuration. Meant for
+ hardware that has hw cursors support as a
+ source pipe.
+- qcom,mdss-pipe-cursor-xin-id: Array of VBIF clients ids (xins) corresponding
+ to the respective cursor pipes. Number of xin ids
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-cursor-off
+- qcom,mdss-pipe-cursor-clk-ctrl-off: Array of offsets describing clk control
+ offsets for dynamic clock gating. 1st value
+ in the array represents offset of the control
+ register. 2nd value represents bit offset within
+ control register and 3rd value represents bit
+ offset within status register. Number of tuples
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-cursor-off
+- qcom,mdss-ctl-off: Array of offset addresses for the available ctl
+ hw blocks within MDP, these offsets are
+ calculated from register "mdp_phys" defined in
+ reg property. The number of ctl offsets defined
+ here should reflect the number of control paths
+ that can be configured concurrently on MDP for
+ this configuration.
+- qcom,mdss-wb-off: Array of offset addresses for the progammable
+ writeback blocks within MDP. The number of
+ offsets defined should match the number of ctl
+ blocks defined in property: qcom,mdss-ctl-off
+- qcom,mdss-mixer-intf-off: Array of offset addresses for the available
+ mixer blocks that can drive data to panel
+ interfaces.
+ These offsets are be calculated from register
+ "mdp_phys" defined in reg property.
+ The number of offsets defined should reflect the
+ amount of mixers that can drive data to a panel
+ interface.
+- qcom,mdss-dspp-off: Array of offset addresses for the available dspp
+ blocks. These offsets are calculated from
+ regsiter "mdp_phys" defined in reg property.
+ The number of dspp blocks should match the
+ number of mixers driving data to interface
+ defined in property: qcom,mdss-mixer-intf-off
+- qcom,mdss-pingpong-off: Array of offset addresses for the available
+ pingpong blocks. These offsets are calculated
+ from regsiter "mdp_phys" defined in reg property.
+ The number of pingpong blocks should match the
+ number of mixers driving data to interface
+ defined in property: qcom,mdss-mixer-intf-off
+- qcom,mdss-mixer-wb-off: Array of offset addresses for the available
+ mixer blocks that can be drive data to writeback
+ block. These offsets will be calculated from
+ register "mdp_phys" defined in reg property.
+ The number of writeback mixer offsets defined
+ should reflect the number of mixers that can
+ drive data to a writeback block.
+- qcom,mdss-intf-off: Array of offset addresses for the available MDP
+ video interface blocks that can drive data to a
+ panel controller through timing engine.
+ The offsets are calculated from "mdp_phys"
+ defined in reg property. The number of offsets
+ defiend should reflect the number of progammable
+ interface blocks available in hardware.
+- qcom,mdss-pref-prim-intf: A string which indicates the configured hardware
+ interface between MDP and the primary panel.
+ Individual panel controller drivers initialize
+ hardware based on this property.
+ Based on the interfaces supported at present,
+ possible values are:
+ - "dsi"
+ - "edp"
+ - "hdmi"
+
+Bus Scaling Data:
+- qcom,msm-bus,name: String property describing MDSS client.
+- qcom,msm-bus,num-cases: This is the the number of Bus Scaling use cases
+ defined in the vectors property. This must be
+ set to <3> for MDSS driver where use-case 0 is
+ used to take off MDSS BW votes from the system.
+ And use-case 1 & 2 are used in ping-pong fashion
+ to generate run-time BW requests.
+- qcom,msm-bus,active-only: A boolean flag indicating if it is active only.
+- qcom,msm-bus,num-paths: This represents the number of paths in each
+ Bus Scaling Usecase. This value depends on
+ how many number of AXI master ports are
+ dedicated to MDSS for particular chipset. This
+ value represents the RT + NRT AXI master ports.
+- qcom,msm-bus,vectors-KBps: * A series of 4 cell properties, with a format
+ of (src, dst, ab, ib) which is defined at
+ Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+ * Current values of src & dst are defined at
+ include/linux/msm-bus-board.h
+ src values allowed for MDSS are:
+ 22 = MSM_BUS_MASTER_MDP_PORT0
+ 23 = MSM_BUS_MASTER_MDP_PORT1
+ 25 = MSM_BUS_MASTER_ROTATOR
+ dst values allowed for MDSS are:
+ 512 = MSM_BUS_SLAVE_EBI_CH0
+ ab: Represents aggregated bandwidth.
+ ib: Represents instantaneous bandwidth.
+ * Total number of 4 cell properties will be
+ (number of use-cases * number of paths).
+ * These values will be overridden by the driver
+ based on the run-time requirements. So initial
+ ab and ib values defined here are random and
+ bare no logic except for the use-case 0 where ab
+ and ib values needs to be 0.
+ * Define realtime vector properties followed by
+ non-realtime vector properties.
+
+- qcom,mdss-prefill-outstanding-buffer-bytes: The size of mdp outstanding buffer
+ in bytes. The buffer is filled during prefill
+ time and the buffer size shall be included in
+ prefill bandwidth calculation.
+- qcom,mdss-prefill-y-buffer-bytes: The size of mdp y plane buffer in bytes. The
+ buffer is filled during prefill time when format
+ is YUV and the buffer size shall be included in
+ prefill bandwidth calculation.
+- qcom,mdss-prefill-scaler-buffer-lines-bilinear: The value indicates how many lines
+ of scaler line buffer need to be filled during
+ prefill time. If bilinear scalar is enabled, then this
+ number of lines is used to determine how many bytes
+ of scaler buffer to be included in prefill bandwidth
+ calculation.
+- qcom,mdss-prefill-scaler-buffer-lines-caf: The value indicates how many lines of
+ of scaler line buffer need to be filled during
+ prefill time. If CAF mode filter is enabled, then
+ this number of lines is used to determine how many
+ bytes of scaler buffer to be included in prefill
+ bandwidth calculation.
+- qcom,mdss-prefill-post-scaler-buffer: The size of post scaler buffer in bytes.
+ The buffer is used to smooth the output of the
+ scaler. If the buffer is present in h/w, it is
+ filled during prefill time and the number of bytes
+ shall be included in prefill bandwidth calculation.
+- qcom,mdss-prefill-pingpong-buffer-pixels: The size of pingpong buffer in pixels.
+ The buffer is used to keep pixels flowing to the
+ panel interface. If the vertical start position of a
+ layer is in the beginning of the active area, pingpong
+ buffer must be filled during prefill time to generate
+ starting lines. The number of bytes to be filled is
+ determined by the line width, starting position,
+ byte per pixel and scaling ratio, this number shall be
+ included in prefill bandwidth calculation.
+- qcom,mdss-prefill-fbc-lines: The value indicates how many lines are required to fill
+ fbc buffer during prefill time if FBC (Frame Buffer
+ Compressor) is enabled. The number of bytes to be filled
+ is determined by the line width, bytes per pixel and
+ scaling ratio, this number shall be included in prefill bandwidth
+ calculation.
+- qcom,max-mixer-width: Specify maximum MDP mixer width that the device supports.
+ This is a mandatory property, if not specified then
+ mdp probe will fail.
+
+Optional properties:
+- batfet-supply : Phandle for battery FET regulator device node.
+- vdd-cx-supply : Phandle for vdd CX regulator device node.
+- qcom,vbif-settings : Array with key-value pairs of constant VBIF register
+ settings used to setup MDSS QoS for optimum performance.
+ The key used should be offset from "vbif_phys" register
+ defined in reg property.
+- qcom,vbif-nrt-settings : The key used should be offset from "vbif_nrt_phys"
+ register defined in reg property. Refer qcom,vbif-settings
+ for a detailed description of this binding.
+- qcom,mdp-settings : Array with key-value pairs of constant MDP register
+ settings used to setup MDSS QoS for best performance.
+ The key used should be offset from "mdp_phys" register
+ defined in reg property.
+- qcom,mdss-smp-data: Array of shared memory pool data for dynamic SMP. There
+ should be only two values in this property. The first
+ value corresponds to the number of smp blocks and the
+ second is the size of each block present in the mdss
+ hardware. This property is optional for MDP hardware
+ with fix pixel latency ram.
+- qcom,mdss-rot-block-size: The size of a memory block (in pixels) to be used
+ by the rotator. If this property is not specified,
+ then a default value of 128 pixels would be used.
+- qcom,mdss-has-bwc: Boolean property to indicate the presence of bandwidth
+ compression feature in the rotator.
+- qcom,mdss-has-non-scalar-rgb: Boolean property to indicate the presence of RGB
+ pipes which have no scaling support.
+- qcom,mdss-has-decimation: Boolean property to indicate the presence of
+ decimation feature in fetch.
+- qcom,mdss-has-fixed-qos-arbiter-enabled: Boolean property to indicate the
+ presence of rt/nrt feature. This feature enables
+ increased performance by prioritizing the real time
+ (rt) traffic over non real time (nrt) traffic to
+ access the memory.
+- qcom,mdss-num-nrt-paths: Integer property represents the number of non-realtime
+ paths in each Bus Scaling Usecase. This value depends on
+ number of AXI ports are dedicated to non-realtime VBIF for
+ particular chipset. This property is mandatory when
+ "qcom,mdss-has-fixed-qos-arbiter-enabled" is enabled.
+ These paths must be defined after rt-paths in
+ "qcom,msm-bus,vectors-KBps" vector request.
+- qcom,mdss-has-source-split: Boolean property to indicate if source split
+ feature is available or not.
+- qcom,mdss-has-rotator-downscale: Boolean property to indicate if rotator
+ downscale feature is available or not.
+- qcom,mdss-rot-downscale-min: This integer value indicates the Minimum
+ downscale factor supported by rotator.
+- qcom,mdss-rot-downscale-max: This integer value indicates the Maximum
+ downscale factor supported by rotator.
+- qcom,mdss-ad-off: Array of offset addresses for the available
+ Assertive Display (AD) blocks. These offsets
+ are calculated from the register "mdp_phys"
+ defined in reg property. The number of AD
+ offsets should be less than or equal to the
+ number of mixers driving interfaces defined in
+ property: qcom,mdss-mixer-intf-off. Assumes
+ that AD blocks are aligned with the mixer
+ offsets as well (i.e. the first mixer offset
+ corresponds to the same pathway as the first
+ AD offset).
+- qcom,mdss-has-wb-ad: Boolean property to indicate assertive display feature
+ support on write back framebuffer.
+- qcom,mdss-no-lut-read: Boolean property to indicate reading of LUT is
+ not supported.
+- qcom,mdss-no-hist-vote Boolean property to indicate histogram reads
+ and histogram LUT writes do not need additional
+ bandwidth voting.
+- qcom,mdss-mdp-wfd-mode: A string that specifies what is the mode of
+ writeback wfd block.
+ "intf" = Writeback wfd block is
+ connected to the interface mixer.
+ "shared" = Writeback block shared
+ between wfd and rotator.
+ "dedicated" = Dedicated writeback
+ block for wfd using writeback mixer.
+- qcom,mdss-smp-mb-per-pipe: Maximum number of shared memory pool blocks
+ restricted for a source surface pipe. If this
+ property is not specified, no such restriction
+ would be applied.
+- qcom,mdss-highest-bank-bit: Property to indicate tile format as opposed to usual
+ linear format. The value tells the GPU highest memory
+ bank bit used.
+- qcom,mdss-pipe-rgb-fixed-mmb: Array of indexes describing fixed Memory Macro
+ Blocks (MMBs) for rgb pipes. First value denotes
+ total numbers of MMBs per pipe while values, if
+ any, following first one denotes indexes of MMBs
+ to that RGB pipe.
+- qcom,mdss-pipe-vig-fixed-mmb: Array of indexes describing fixed Memory Macro
+ Blocks (MMBs) for vig pipes. First value denotes
+ total numbers of MMBs per pipe while values, if
+ any, following first one denotes indexes of MMBs
+ to that VIG pipe.
+- qcom,mdss-pipe-sw-reset-off: Property to indicate offset to the register which
+ holds sw_reset bitmap for different MDSS
+ components.
+- qcom,mdss-pipe-vig-sw-reset-map: Array of bit offsets for vig pipes within
+ sw_reset register bitmap. Number of offsets
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-rgb-sw-reset-map: Array of bit offsets for rgb pipes within
+ sw_reset register bitmap. Number of offsets
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-dma-sw-reset-map: Array of bit offsets for dma pipes within
+ sw_reset register bitmap. Number of offsets
+ defined should match the number of offsets
+ defined in property: qcom,mdss-pipe-dma-off
+- qcom,mdss-default-ot-wr-limit: This integer value indicates maximum number of pending
+ writes that can be allowed on each WR xin.
+ This value can be used to reduce the pending writes
+ limit and can be tuned to match performance
+ requirements depending upon system state.
+ Some platforms require a dynamic ot limiting in
+ some cases. Setting this default ot write limit
+ will enable this dynamic limiting for the write
+ operations in the platforms that require these
+ limits.
+- qcom,mdss-default-ot-rd-limit: This integer value indicates the default number of pending
+ reads that can be allowed on each RD xin.
+ Some platforms require a dynamic ot limiting in
+ some cases. Setting this default ot read limit
+ will enable this dynamic limiting for the read
+ operations in the platforms that require these
+ limits.
+- qcom,mdss-clk-levels: This array indicates the mdp core clock level selection
+ array. Core clock is calculated for each frame and
+ hence depending upon calculated value, clock rate
+ will be rounded up to the next level according to
+ this table. Order of entries need to be ordered in
+ ascending order.
+- qcom,mdss-vbif-qos-rt-setting: This array is used to program vbif qos remapper register
+ priority for real time clients.
+- qcom,mdss-vbif-qos-nrt-setting: This array is used to program vbif qos remapper register
+ priority for non real time clients.
+- qcom,mdss-traffic-shaper-enabled: This boolean property enables traffic shaper functionality
+ for MDSS rotator which spread out rotator bandwidth request
+ so that rotator don't compete with other real time read
+ clients.
+- qcom,mdss-dram-channels: This represents the number of channels in the
+ Bus memory controller.
+- qcom,regs-dump-mdp: This array represents the registers offsets that
+ will be dumped from the mdp when the debug logging
+ is enabled; each entry in the table is an start and
+ end offset from the MDP address "mdp_phys", the
+ format of each entry is as follows:
+ <start_offset end_offset>
+ Ex:
+ <0x01000 0x01404>
+ Will dump the MDP registers
+ from the address: "mdp_phys + 0x01000"
+ to the address: "mdp_phys + 0x01404"
+- qcom,regs-dump-names-mdp: This array represents the tag that will be used
+ for each of the entries defined within regs-dump.
+ Note that each tag matches with one of the
+ regs-dump entries in the same order as they
+ are defined.
+- qcom,regs-dump-xin-id-mdp: Array of VBIF clients ids (xins) corresponding
+ to mdp block. Xin id property is not valid for mdp
+ internal blocks like ctl, lm, dspp. It should set
+ to 0xff for such blocks.
+
+Fudge Factors: Fudge factors are used to boost demand for
+ resources like bus bandswidth, clk rate etc. to
+ overcome system inefficiencies and avoid any
+ glitches. These fudge factors are expressed in
+ terms of numerator and denominator. First value
+ is numerator followed by denominator. They all
+ are optional but highly recommended.
+ Ex:
+ x = value to be fudged
+ a = numerator, default value is 1
+ b = denominator, default value is 1
+ FUDGE(x, a, b) = ((x * a) / b)
+- qcom,mdss-ib-factor: This fudge factor is applied to calculated ib
+ values in default conditions.
+- qcom,mdss-ib-factor-overlap: This fudge factor is applied to calculated ib
+ values when the overlap bandwidth is the
+ predominant value compared to prefill bandwidth
+ value.
+- qcom,mdss-clk-factor: This fudge factor is applied to calculated mdp
+ clk rate in default conditions.
+
+- qcom,max-bandwidth-low-kbps: This value indicates the max bandwidth in KB
+ that can be supported without underflow.
+ This is a low bandwidth threshold which should
+ be applied in most scenarios to be safe from
+ underflows when unable to satisfy bandwidth
+ requirements.
+- qcom,max-bandwidth-high-kbps: This value indicates the max bandwidth in KB
+ that can be supported without underflow.
+ This is a high bandwidth threshold which can be
+ applied in scenarios where panel interface can
+ be more tolerant to memory latency such as
+ command mode panels.
+- qcom,max-bandwidth-per-pipe-kbps: A two dimensional array indicating the max
+ bandwidth in KB that a single pipe can support
+ without underflow for various usecases. The
+ first parameter indicates the usecase and the
+ second parameter gives the max bw allowed for
+ the usecase. Following are the enum values for
+ modes in different cases:
+ For default case, mode = 1
+ camera usecase, mode = 2
+ hflip usecase, mode = 4
+ vflip usecase, mode = 8
+ First parameter/mode value need to match enum,
+ mdss_mdp_max_bw_mode, present in
+ include/uapi/linux/msm_mdp.h.
+- qcom,max-bw-settings: This two dimension array indicates the max bandwidth
+ in KB that has to be supported when particular
+ scenarios are involved such as camera, flip.
+ The first parameter indicate the
+ scenario/usecase and second parameter indicate
+ the maximum bandwidth for that usecase.
+ Following are the enum values for modes in different
+ cases:
+ For default case, mode = 1
+ camera usecase, mode = 2
+ hflip usecase, mode = 4
+ vflip usecase, mode = 8
+ First parameter/mode value need to match enum,
+ mdss_mdp_max_bw_mode, present in
+ include/uapi/linux/msm_mdp.h.
+
+- qcom,mdss-has-panic-ctrl: Boolean property to indicate if panic/robust signal
+ control feature is available or not.
+- qcom,mdss-en-svs-high: Boolean property to indicate if this target needs to
+ enable the svs high voltage level for CX rail.
+- qcom,mdss-pipe-vig-panic-ctrl-offsets: Array of panic/robust signal offsets
+ corresponding to the respective VIG pipes.
+ Number of signal offsets should match the
+ number of offsets defined in property:
+ qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-rgb-panic-ctrl-offsets: Array of panic/robust signal offsets
+ corresponding to the respective RGB pipes.
+ Number of signal offsets should match the
+ number of offsets defined in property:
+ qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-dma-panic-ctrl-offsets: Array of panic/robust signal offsets
+ corresponding to the respective DMA pipes.
+ Number of signal offsets should match the
+ number of offsets defined in property:
+ qcom,mdss-pipe-dma-off
+- qcom,mdss-per-pipe-panic-luts: Array to configure the panic/robust luts for
+ each rt and nrt clients. This property is
+ for the MDPv1.7 and above, which configures
+ the panic independently on each client.
+ Each element of the array corresponds to:
+ First element - panic for linear formats
+ Second element - panic for tile formats
+ Third element - robust for linear formats
+ Fourth element - robust for tile formats
+- qcom,mdss-has-pingpong-split: Boolean property to indicate if destination
+ split feature is available or not in the target.
+- qcom,mdss-slave-pingpong-off: Offset address for the extra TE block which needs
+ to be programmed when pingpong split feature is enabled.
+ Offset is calculated from the "mdp_phys"
+ register value. Mandatory when qcom,mdss-has-pingpong-split
+ is enabled.
+- qcom,mdss-ppb-ctl-off: Array of offset addresses of ping pong buffer control registers.
+ The offsets are calculated from the "mdp_phys" base address
+ specified. The number of offsets should match the
+ number of ping pong buffers available in the hardware.
+ Mandatory when qcom,mdss-has-pingpong-split is enabled.
+- qcom,mdss-ppb-cfg-off: Array of offset addresses of ping pong buffer config registers.
+ The offsets are calculated from the "mdp_phys" base address
+ specified. The number of offsets should match the
+ number of ping pong buffers available in the hardware.
+ Mandatory when qcom,mdss-has-pingpong-split is enabled.
+- qcom,mdss-cdm-off: Array of offset addresses for the available
+ chroma down modules that can convert RGB data
+ to YUV before sending it to the interface
+ block. These offsets will be calculated from
+ register "mdp_phys" define in reg property. The
+ number of cdm offsets should reflect the number
+ of cdm blocks present in hardware.
+- qcom,mdss-dsc-off: Array of offset addresses for the available
+ display stream compression module block.
+ These offsets will be calculated from
+ register "mdp_phys" define in reg property. The
+ number of dsc offsets should reflect the number
+ of dsc blocks present in hardware.
+- qcom,max-pipe-width: This value specifies the maximum MDP SSPP width
+ the device supports. If not specified, a default value
+ of 2048 will be applied.
+- qcom,mdss-reg-bus: Property to provide Bus scaling for register access for
+ MDP and DSI Blocks.
+
+- qcom,mdss-rot-reg-bus: Property to provide Bus scaling for register access for
+ Rotator Block.
+
+- qcom,mdss-hw-rt: Optional Property to request min vote on the bus.
+ Few Low tier targets expect min vote on the bus during SMMU
+ and TZ operations. use this handle to request the vote needed.
+
+Optional subnodes:
+- mdss_fb: Child nodes representing the frame buffer virtual devices.
+
+Subnode properties:
+- compatible : Must be "qcom,mdss-fb"
+- cell-index : Index representing frame buffer
+- qcom,mdss-mixer-swap: A boolean property that indicates if the mixer muxes
+ need to be swapped based on the target panel.
+ By default the property is not defined.
+- qcom,memblock-reserve: Specifies the memory location and the size reserved
+ for the framebuffer used to display the splash screen.
+ This property is required whenever the continuous splash
+ screen feature is enabled for the corresponding
+ framebuffer device. It should be used for only 32bit
+ kernel.
+- qcom,cont-splash-memory: Specifies the memory block region reserved for
+ continuous splash screen feature. This property should be
+ defined for corresponding framebuffer device if
+ "qcom,memblock-reserve" is not defined when continuous
+ splash screen feature is enabled.
+- linux,contiguous-region: Phandle to the continuous memory region reserved for
+ frame-buffer or continuous splash screen. Size of this
+ region is dependent on the display panel resolution and
+ buffering scheme for frame-buffer node. Currently driver
+ uses double buffering.
+
+ Example: Width = 1920, Height = 1080, BytesPerPixel = 4,
+ Number of frame-buffers reserved = 2.
+ Size = 1920*1080*4*2 = ROUND_1MB(15.8MB) = 16MB.
+- qcom,mdss-fb-splash-logo-enabled: The boolean entry enables the framebuffer
+ driver to display the splash logo image.
+ It is independent of continuous splash
+ screen feature and has no relation with
+ qcom,cont-splash-enabled entry present in
+ panel configuration.
+- qcom,mdss-idle-power-collapse-enabled: Boolean property that enables support
+ for mdss power collapse in idle
+ screen use cases with smart panels.
+- qcom,boot-indication-enabled: Boolean property that enables turning on the blue
+ LED for notifying that the device is in boot
+ process.
+
+- qcom,mdss-pp-offets: A node that lists the offsets of post processing blocks
+ from base module.
+ -- qcom,mdss-mdss-sspp-igc-lut-off: This 32 bit value provides the
+ offset to the IGC lut rams from mdp_phys base.
+ -- qcom,mdss-sspp-vig-pcc-off: This 32 bit value provides the offset
+ to PCC block from the VIG pipe base address.
+ -- qcom,mdss-sspp-rgb-pcc-off: This 32 bit value provides the offset
+ to PCC block from the RGB pipe base address.
+ -- qcom,mdss-sspp-dma-pcc-off: This 32 bit value provides the offset
+ to PCC block from the DMA pipe base address.
+ -- qcom,mdss-dspp-pcc-off: This 32 bit value provides the offset
+ to PCC block from the DSPP pipe base address.
+ -- qcom,mdss-lm-pgc-off: This 32 bit value provides the offset
+ to PGC block from the layer mixer base address.
+ -- qcom,mdss-dspp-gamut-off: This 32 bit value provides the offset
+ to gamut block from DSPP base address.
+ -- qcom,mdss-dspp-pgc-off: This 32 bit value provides the offset to
+ PGC block from the DSPP base address.
+
+- qcom,mdss-scaler-offsets: A node that lists the offsets of scaler blocks
+ from base module.
+ -- qcom,mdss-vig-scaler-off: This 32 bit value provides the
+ offset to vig scaler from vig pipe base.
+ -- qcom,mdss-vig-scaler-lut-off: This 32 bit value provides the
+ offset to vig scaler lut from vig pipe base.
+ -- qcom,mdss-has-dest-scaler: Boolean property to indicate the
+ presence of destination scaler block.
+ -- qcom,mdss-dest-block-off: This 32 bit value provides the
+ offset from mdp base to destination scaler block.
+ -- qcom,mdss-dest-scaler-off: Array containing offsets of
+ destination scalar modules from the scaler block.
+ -- qcom,mdss-dest-scaler-lut-off: Array containing offsets of destination
+ scaler lut tables from scalar block.
+
+- qcom,mdss-has-separate-rotator: Boolean property to indicate support of
+ indpendent rotator. Indpendent rotator has
+ separate DMA pipe working in block mode only.
+
+- smmu_mdp_***: Child nodes representing the mdss smmu virtual devices.
+ Mandatory smmu v2 and not required for smmu v1.
+
+Subnode properties:
+- compatible : Compatible name used in smmu v2.
+ smmu_v2 names should be:
+ "qcom,smmu_mdp_unsec" - smmu context bank device for
+ unsecure mdp domain.
+ "qcom,smmu_rot_unsec" - smmu context bank device for
+ unsecure rotation domain.
+ "qcom,smmu_mdp_sec" - smmu context bank device for
+ secure mdp domain.
+ "qcom,smmu_rot_sec" - smmu context bank device for
+ secure rotation domain.
+ "qcom,smmu_kms_unsec" - smmu context bank device for
+ unsecure mdp domain for KMS driver.
+ "qcom,smmu_nrt_unsec" - smmu context bank device for
+ unsecure rotation domain for KMS driver.
+ "qcom,smmu_kms_sec" - smmu context bank device for
+ secure mdp domain for KMS driver.
+ "qcom,smmu_nrt_sec" - smmu context bank device for
+ secure rotation domain for KMS driver.
+ "qcom,smmu_arm_mdp_unsec" - arm smmu context bank device for
+ unsecure mdp domain.
+ "qcom,smmu_arm_mdp_sec" - arm smmu context bank device for
+ secure mdp domain.
+- gdsc-mmagic-mdss-supply: Phandle for mmagic mdss supply regulator device node.
+- reg : offset and length of the register set for the device.
+- reg-names : names to refer to register sets related to this device
+- clocks: List of Phandles for clock device nodes
+ needed by the device.
+- clock-names: List of clock names needed by the device.
+
+Subnode properties:
+Required properties:
+- compatible: Must be "qcom,mdss_wb"
+- qcom,mdss_pan_res: Array containing two elements, width and height which
+ specifies size of writeback buffer.
+- qcom,mdss_pan_bpp: Specifies bits per pixel for writeback buffer.
+- qcom,mdss-fb-map: Specifies the handle for frame buffer.
+
+Example:
+ mdss_mdp: qcom,mdss_mdp@fd900000 {
+ compatible = "qcom,mdss_mdp";
+ reg = <0xfd900000 0x22100>,
+ <0xfd924000 0x1000>,
+ <0xfd925000 0x1000>;
+ reg-names = "mdp_phys", "vbif_phys", "vbif_nrt_phys";
+ interrupts = <0 72 0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ vdd-supply = <&gdsc_mdss>;
+ batfet-supply = <&pm8941_chg_batif>;
+ vdd-cx-supply = <&pm8841_s2_corner>;
+
+ /* Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_mdp";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,mdss-dram-channels = <2>;
+ qcom,mdss-num-nrt-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>, <23 512 0 0>,
+ <22 512 0 6400000>, <23 512 0 6400000>,
+ <22 512 0 6400000>, <23 512 0 6400000>;
+
+ /* Fudge factors */
+ qcom,mdss-ab-factor = <2 1>; /* 2 times */
+ qcom,mdss-ib-factor = <3 2>; /* 1.5 times */
+ qcom,mdss-high-ib-factor = <2 1>; /* 2 times */
+ qcom,mdss-clk-factor = <5 4>; /* 1.25 times */
+
+ /* Clock levels */
+ qcom,mdss-clk-levels = <92310000, 177780000, 200000000>;
+
+ /* VBIF QoS remapper settings*/
+ qcom,mdss-vbif-qos-rt-setting = <2 2 2 2>;
+ qcom,mdss-vbif-qos-nrt-setting = <1 1 1 1>;
+
+ qcom,max-bandwidth-low-kbps = <2300000>;
+ qcom,max-bandwidth-high-kbps = <3000000>;
+ qcom,max-bandwidth-per-pipe-kbps = <4 2100000>,
+ <8 1800000>;
+ qcom,max-bw-settings = <1 2300000>,
+ <2 1700000>,
+ <4 2300000>,
+ <8 2000000>;
+
+ qcom,max-mixer-width = <2048>;
+ qcom,max-pipe-width = <2048>;
+ qcom,max-clk-rate = <320000000>;
+ qcom,vbif-settings = <0x0004 0x00000001>,
+ <0x00D8 0x00000707>;
+ qcom,vbif-nrt-settings = <0x0004 0x00000001>,
+ <0x00D8 0x00000707>;
+ qcom,mdp-settings = <0x02E0 0x000000AA>,
+ <0x02E4 0x00000055>;
+ qcom,mdss-pipe-vig-off = <0x00001200 0x00001600
+ 0x00001A00>;
+ qcom,mdss-pipe-rgb-off = <0x00001E00 0x00002200
+ 0x00002600>;
+ qcom,mdss-pipe-dma-off = <0x00002A00 0x00002E00>;
+ qcom,mdss-pipe-cursor-off = <0x00035000 0x00037000>;
+ qcom,mdss-dsc-off = <0x00081000 0x00081400>;
+ qcom,mdss-pipe-vig-fetch-id = <1 4 7>;
+ qcom,mdss-pipe-rgb-fetch-id = <16 17 18>;
+ qcom,mdss-pipe-dma-fetch-id = <10 13>;
+ qcom,mdss-pipe-rgb-fixed-mmb = <2 0 1>,
+ <2 2 3>,
+ <2 4 5>,
+ <2 6 7>;
+ qcom,mdss-pipe-vig-fixed-mmb = <1 8>,
+ <1 9>,
+ <1 10>,
+ <1 11>;
+ qcom,mdss-smp-data = <22 4096>;
+ qcom,mdss-rot-block-size = <64>;
+ qcom,mdss-rotator-ot-limit = <2>;
+ qcom,mdss-smp-mb-per-pipe = <2>;
+ qcom,mdss-pref-prim-intf = "dsi";
+ qcom,mdss-has-non-scalar-rgb;
+ qcom,mdss-has-bwc;
+ qcom,mdss-has-decimation;
+ qcom,mdss-has-fixed-qos-arbiter-enabled;
+ qcom,mdss-has-source-split;
+ qcom,mdss-wfd-mode = "intf";
+ qcom,mdss-no-lut-read;
+ qcom,mdss-no-hist-vote;
+ qcom,mdss-traffic-shaper-enabled;
+ qcom,mdss-has-rotator-downscale;
+ qcom,mdss-rot-downscale-min = <2>;
+ qcom,mdss-rot-downscale-max = <16>;
+
+ qcom,mdss-has-pingpong-split;
+ qcom,mdss-pipe-vig-xin-id = <0 4 8>;
+ qcom,mdss-pipe-rgb-xin-id = <1 5 9>;
+ qcom,mdss-pipe-dma-xin-id = <2 10>;
+ qcom,mdss-pipe-cursor-xin-id = <7 7>;
+
+ qcom,mdss-pipe-vig-clk-ctrl-offsets = <0x3AC 0 0>,
+ <0x3B4 0 0>,
+ <0x3BC 0 0>,
+ <0x3C4 0 0>;
+
+ qcom,mdss-pipe-rgb-clk-ctrl-offsets = <0x3AC 4 8>,
+ <0x3B4 4 8>,
+ <0x3BC 4 8>,
+ <0x3C4 4 8>;
+
+ qcom,mdss-pipe-dma-clk-ctrl-offsets = <0x3AC 8 12>,
+ <0x3B4 8 12>;
+
+ qcom,mdss-per-pipe-panic-luts = <0x000f>,
+ <0xffff>,
+ <0xfffc>,
+ <0xff00>;
+
+ qcom,mdss-has-panic-ctrl;
+ qcom,mdss-pipe-vig-panic-ctrl-offsets = <0 1 2 3>;
+ qcom,mdss-pipe-rgb-panic-ctrl-offsets = <4 5 6 7>;
+ qcom,mdss-pipe-dma-panic-ctrl-offsets = <8 9>;
+
+ qcom,mdss-pipe-sw-reset-off = <0x0128>;
+ qcom,mdss-pipe-vig-sw-reset-map = <5 6 7 8>;
+ qcom,mdss-pipe-rgb-sw-reset-map = <9 10 11 12>;
+ qcom,mdss-pipe-dma-sw-reset-map = <13 14>;
+
+ qcom,mdss-ctl-off = <0x00000600 0x00000700 0x00000800
+ 0x00000900 0x0000A00>;
+ qcom,mdss-mixer-intf-off = <0x00003200 0x00003600
+ 0x00003A00>;
+ qcom,mdss-mixer-wb-off = <0x00003E00 0x00004200>;
+ qcom,mdss-dspp-off = <0x00004600 0x00004A00 0x00004E00>;
+ qcom,mdss-pingpong-off = <0x00012D00 0x00012E00 0x00012F00>;
+ qcom,mdss-wb-off = <0x00011100 0x00013100 0x00015100
+ 0x00017100 0x00019100>;
+ qcom,mdss-intf-off = <0x00021100 0x00021300
+ 0x00021500 0x00021700>;
+ qcom,mdss-cdm-off = <0x0007A200>;
+ qcom,mdss-ppb-ctl-off = <0x0000420>;
+ qcom,mdss-ppb-cfg-off = <0x0000424>;
+ qcom,mdss-slave-pingpong-off = <0x00073000>
+
+ /* buffer parameters to calculate prefill bandwidth */
+ qcom,mdss-prefill-outstanding-buffer-bytes = <1024>;
+ qcom,mdss-prefill-y-buffer-bytes = <4096>;
+ qcom,mdss-prefill-scaler-buffer-lines-bilinear = <2>;
+ qcom,mdss-prefill-scaler-buffer-lines-caf = <4>;
+ qcom,mdss-prefill-post-scaler-buffer-pixels = <2048>;
+ qcom,mdss-prefill-pingpong-buffer-pixels = <5120>;
+ qcom,mdss-prefill-fbc-lines = <2>;
+ qcom,mdss-idle-power-collapse-enabled;
+
+ qcom,regs-dump-xin-id-mdp = <0xff 0xff 0xff 0xff 0x0 0x0>;
+ mdss_fb0: qcom,mdss_fb_primary {
+ cell-index = <0>;
+ compatible = "qcom,mdss-fb";
+ qcom,mdss-mixer-swap;
+ linux,contiguous-region = <&fb_mem>;
+ qcom,mdss-fb-splash-logo-enabled:
+ qcom,cont-splash-memory {
+ linux,contiguous-region = <&cont_splash_mem>;
+ };
+ };
+
+ qcom,mdss-pp-offsets {
+ qcom,mdss-sspp-mdss-igc-lut-off = <0x3000>;
+ qcom,mdss-sspp-vig-pcc-off = <0x1580>;
+ qcom,mdss-sspp-rgb-pcc-off = <0x180>;
+ qcom,mdss-sspp-dma-pcc-off = <0x180>;
+ qcom,mdss-lm-pgc-off = <0x3C0>;
+ qcom,mdss-dspp-gamut-off = <0x1600>;
+ qcom,mdss-dspp-pcc-off = <0x1700>;
+ qcom,mdss-dspp-pgc-off = <0x17C0>;
+ };
+
+ qcom,mdss-scaler-offsets {
+ qcom,mdss-vig-scaler-off = <0xA00>;
+ qcom,mdss-vig-scaler-lut-off = <0xB00>;
+ qcom,mdss-has-dest-scaler;
+ qcom,mdss-dest-block-off = <0x00061000>;
+ qcom,mdss-dest-scaler-off = <0x800 0x1000>;
+ qcom,mdss-dest-scaler-lut-off = <0x900 0x1100>;
+ };
+
+ qcom,mdss-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
+
+ qcom,mdss-hw-rt-bus {
+ /* hw-rt Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_hw_rt";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>,
+ <22 512 0 1000>;
+ };
+
+ smmu_mdp_sec: qcom,smmu_mdp_sec_cb {
+ compatible = "qcom,smmu_mdp_sec";
+ iommus = <&mdp_smmu 1>;
+ reg = <0xd09000 0x000d00>,
+ reg-names = "mmu_cb";
+ gdsc-mmagic-mdss-supply = <&gdsc_mmagic_mdss>;
+ clocks = <&clock_mmss clk_smmu_mdp_ahb_clk>,
+ <&clock_mmss clk_smmu_mdp_axi_clk>;
+ clock-names = "dummy_clk", "dummy_clk";
+ };
+
+ qcom,mdss_wb_panel {
+ compatible = "qcom,mdss_wb";
+ qcom,mdss_pan_res = <1280 720>;
+ qcom,mdss_pan_bpp = <24>;
+ qcom,mdss-fb-map = <&mdss_fb1>;
+ };
+
+ qcom,mdss-rot-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_rot_reg";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt
index 59fa6a0..2c193c2 100644
--- a/Documentation/devicetree/bindings/fb/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt
@@ -1,22 +1,20 @@
-Qualcomm Technologies MDSS pll for DSI/EDP/HDMI
+Qualcomm MDSS pll for DSI/EDP/HDMI
-mdss-pll is a pll controller device which supports pll devices that
-are compatible with MIPI display serial interface specification,
-HDMI and edp.
+mdss-pll is a pll controller device which supports pll devices that are
+compatiable with MIPI display serial interface specification, HDMI and edp.
Required properties:
-- compatible: Compatible name used in the driver
- "qcom,mdss_dsi_pll_8916", "qcom,mdss_dsi_pll_8939",
- "qcom,mdss_dsi_pll_8974", "qcom,mdss_dsi_pll_8994",
- "qcom,mdss_dsi_pll_8994", "qcom,mdss_dsi_pll_8909",
- "qcom,mdss_hdmi_pll", "qcom,mdss_hdmi_pll_8994",
- "qcom,mdss_dsi_pll_8992", "qcom,mdss_hdmi_pll_8992",
- "qcom,mdss_dsi_pll_8996", "qcom,mdss_hdmi_pll_8996",
- "qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2",
- "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8",
- "qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8",
- "qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998",
- "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm".
+- compatible: Compatible name used in the driver. Should be one of:
+ "qcom,mdss_dsi_pll_8916", "qcom,mdss_dsi_pll_8939",
+ "qcom,mdss_dsi_pll_8974", "qcom,mdss_dsi_pll_8994",
+ "qcom,mdss_dsi_pll_8994", "qcom,mdss_dsi_pll_8909",
+ "qcom,mdss_hdmi_pll", "qcom,mdss_hdmi_pll_8994",
+ "qcom,mdss_dsi_pll_8992", "qcom,mdss_hdmi_pll_8992",
+ "qcom,mdss_dsi_pll_8996", "qcom,mdss_hdmi_pll_8996",
+ "qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2",
+ "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_dsi_pll_8952",
+ "qcom,mdss_dsi_pll_8937", "qcom,mdss_hdmi_pll_8996_v3_1p8",
+ "qcom,mdss_dsi_pll_8953"
- cell-index: Specifies the controller used
- reg: offset and length of the register set for the device.
- reg-names : names to refer to register sets related to this device
diff --git a/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt b/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt
new file mode 100644
index 0000000..8c11a43
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies, Inc. mdss-qpic-panel
+
+mdss-qpic-panel is a panel device which can be driven by qpic.
+
+Required properties:
+- compatible: Must be "qcom,mdss-qpic-panel"
+- qcom,mdss-pan-res: A two dimensional array that specifies the panel
+ resolution.
+- qcom,mdss-pan-bpp: Specifies the panel bits per pixel.
+- qcom,refresh_rate: Panel refresh rate
+
+Optional properties:
+- label: A string used as a descriptive name of the panel
+
+
+Example:
+/ {
+ qcom,mdss_lcdc_ili9341_qvga {
+ compatible = "qcom,mdss-qpic-panel";
+ label = "ili qvga lcdc panel";
+ qcom,mdss-pan-res = <240 320>;
+ qcom,mdss-pan-bpp = <18>;
+ qcom,refresh_rate = <60>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/fb/mdss-qpic.txt b/Documentation/devicetree/bindings/fb/mdss-qpic.txt
new file mode 100644
index 0000000..16d5b35
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-qpic.txt
@@ -0,0 +1,49 @@
+Qualcomm Technolgies, Inc. mdss-qpic
+
+mdss-qpic is a qpic controller device which supports dma transmission to MIPI
+and LCDC panel.
+
+Required properties:
+- compatible: must be "qcom,mdss_qpic"
+- reg: offset and length of the register set for the device.
+- reg-names : names to refer to register sets related to this device
+- interrupts: IRQ line
+- vdd-supply: Phandle for vdd regulator device node.
+- avdd-supply: Phandle for avdd regulator device node.
+- qcom,cs-gpio: Phandle for cs gpio device node.
+- qcom,te-gpio: Phandle for te gpio device node.
+- qcom,rst-gpio: Phandle for rst gpio device node.
+- qcom,ad8-gpio: Phandle for ad8 gpio device node.
+- qcom,bl-gpio: Phandle for backlight gpio device node.
+
+Optional properties:
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
+below Bus Scaling properties:
+ - qcom,msm-bus,name
+ - qcom,msm-bus,num-cases
+ - qcom,msm-bus,num-paths
+ - qcom,msm-bus,vectors-KBps
+
+Example:
+ qcom,msm_qpic@f9ac0000 {
+ compatible = "qcom,mdss_qpic";
+ reg = <0xf9ac0000 0x24000>;
+ reg-names = "qpic_base";
+ interrupts = <0 251 0>;
+
+ qcom,msm-bus,name = "mdss_qpic";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+
+ qcom,msm-bus,vectors-KBps =
+ <91 512 0 0>,
+ <91 512 400000 800000>;
+
+ vdd-supply = <&pm8019_l11>;
+ avdd-supply = <&pm8019_l14>;
+ qcom,cs-gpio = <&msmgpio 21 0>;
+ qcom,te-gpio = <&msmgpio 22 0>;
+ qcom,rst-gpio = <&msmgpio 23 0>;
+ qcom,ad8-gpio = <&msmgpio 20 0>;
+ qcom,bl-gpio = <&msmgpio 84 0>;
+ };
diff --git a/Documentation/devicetree/bindings/fb/mdss-rotator.txt b/Documentation/devicetree/bindings/fb/mdss-rotator.txt
new file mode 100644
index 0000000..5e077ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-rotator.txt
@@ -0,0 +1,78 @@
+QTI MDSS Rotator
+
+MDSS rotator is a rotator driver, which manages the rotator hw
+block inside the Mobile Display Subsystem.
+
+Required properties
+- compatible : Must be "qcom,mdss-rotator".
+- qcom,mdss-wb-count: The number of writeback block
+ in the hardware
+- <name>-supply: Phandle for <name> regulator device node.
+
+Bus Scaling Data:
+- qcom,msm-bus,name: String property describing MDSS client.
+- qcom,msm-bus,num-cases: This is the the number of Bus Scaling use cases
+ defined in the vectors property. This must be
+ set to <3> for MDSS driver where use-case 0 is
+ used to take off MDSS BW votes from the system.
+ And use-case 1 & 2 are used in ping-pong fashion
+ to generate run-time BW requests.
+- qcom,msm-bus,num-paths: This represents the number of paths in each
+ Bus Scaling Usecase. This value depends on
+ how many number of AXI master ports are
+ dedicated to MDSS for particular chipset.
+- qcom,msm-bus,vectors-KBps: * A series of 4 cell properties, with a format
+ of (src, dst, ab, ib) which is defined at
+ Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+ * Current values of src & dst are defined at
+ include/linux/msm-bus-board.h
+ src values allowed for MDSS are:
+ 22 = MSM_BUS_MASTER_MDP_PORT0
+ 23 = MSM_BUS_MASTER_MDP_PORT1
+ 25 = MSM_BUS_MASTER_ROTATOR
+ dst values allowed for MDSS are:
+ 512 = MSM_BUS_SLAVE_EBI_CH0
+ ab: Represents aggregated bandwidth.
+ ib: Represents instantaneous bandwidth.
+ * Total number of 4 cell properties will be
+ (number of use-cases * number of paths).
+ * These values will be overridden by the driver
+ based on the run-time requirements. So initial
+ ab and ib values defined here are random and
+ bare no logic except for the use-case 0 where ab
+ and ib values needs to be 0.
+ * Define realtime vector properties followed by
+ non-realtime vector properties.
+
+Optional properties
+- qcom,mdss-has-reg-bus: Boolean property to indicate
+ if rotator needs to vote for register bus. This
+ property is needed starting 8996
+- qcom,mdss-has-ubwc: Boolean property to indicate
+ if the hw supports universal
+ bandwidth compression (ubwc)
+- qcom,mdss-has-downscale Boolean property to indicate
+ if the hw supports downscale
+
+Example:
+ mdss_rotator: qcom,mdss_rotator {
+ compatible = "qcom,mdss_rotator";
+ qcom,mdss-has-downscale;
+ qcom,mdss-has-ubwc;
+ qcom,mdss-wb-count = <2>;
+
+ qcom,mdss-has-reg-bus;
+ /* Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_rotator";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,mdss-num-nrt-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <25 512 0 0>,
+ <25 512 0 6400000>,
+ <25 512 0 6400000>;
+
+ vdd-supply = <&gdsc_mdss>;
+ gdsc-mmagic-mdss-supply = <&gdsc_mmagic_mdss>;
+ qcom,supply-names = "vdd", "gdsc-mmagic-mdss";
+ };
diff --git a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
new file mode 100644
index 0000000..7f95ed4
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
@@ -0,0 +1,116 @@
+* Qualcomm HDMI Tx
+
+Required properties:
+- cell-index: hdmi tx controller index
+- compatible: must be "qcom,hdmi-tx"
+- reg: offset and length of the register regions(s) for the device.
+- reg-names: a list of strings that map in order to the list of regs.
+
+- hpd-gdsc-supply: phandle to the mdss gdsc regulator device tree node.
+- hpd-5v-supply: phandle to the 5V regulator device tree node.
+- core-vdda-supply: phandle to the HDMI vdda regulator device tree node.
+- core-vcc-supply: phandle to the HDMI vcc regulator device tree node.
+- qcom,supply-names: a list of strings that map in order
+ to the list of supplies.
+- qcom,min-voltage-level: specifies minimum voltage (uV) level
+ of supply(ies) mentioned above.
+- qcom,max-voltage-level: specifies maximum voltage (uV) level
+ of supply(ies) mentioned above.
+- qcom,enable-load: specifies the current (uA) that will be
+ drawn from the enabled supply(ies) mentioned above.
+- qcom,disable-load: specifies the current (uA) that will be
+ drawn from the disabled supply(ies) mentioned above.
+
+- qcom,hdmi-tx-cec: gpio for Consumer Electronics Control (cec) line.
+- qcom,hdmi-tx-ddc-clk: gpio for Display Data Channel (ddc) clock line.
+- qcom,hdmi-tx-ddc-data: gpio for ddc data line.
+
+Optional properties:
+- hpd-5v-en-supply: phandle to the 5V boost enable regulator device tree node.
+- qcom,hdmi-tx-mux-sel: gpio required to toggle HDMI output between
+ docking station, type A, and liquid device, type D, ports. Required
+ property for liquid devices.
+- qcom,hdmi-tx-ddc-mux-sel: gpio for ddc mux select.
+- qcom,hdmi-tx-mux-en: gpio required to enable mux for HDMI output
+ on liquid devices. Required property for liquid devices.
+- qcom,hdmi-tx-mux-lpm: gpio required for hdmi mux configuration
+ selection on liquid devices. Required property for liquid devices.
+- qcom,conditional-power-on: Enables HPD conditionally on MTP targets.
+ Required property for MTP devices which are reworked to expose HDMI port.
+- qcom,hdmi-tx-hpd: gpio required for HDMI hot-plug detect. Required on
+ platforms where companion chip is not used.
+- pinctrl-names: a list of strings that map to the pinctrl states.
+- pinctrl-0: list of phandles, each pointing at a pin configuration node.
+...
+- pinctrl-n: list of phandles, each pointing at a pin configuration node.
+- qcom,conti-splash-enabled: Enables the hdmi continuous splash screen feature.
+ HDMI interface will remain powered on from LK to kernel with continuous
+ display of bootup logo.
+- qcom,pluggable: boolean to enable hotplug feature.
+- qcom,display-id: A string indicates the display ID for the controller.
+ The possible values are:
+ - "primary"
+ - "secondary"
+ - "tertiary"
+
+[Optional child nodes]: These nodes are for devices which are
+dependent on HDMI Tx controller. If HDMI Tx controller is disabled then
+these devices will be disabled as well. Ex. HDMI Audio Codec device.
+
+- qcom,msm-hdmi-audio-rx: Node for HDMI audio codec.
+Required properties:
+- compatible : "msm-hdmi-audio-codec-rx";
+
+Example:
+ mdss_hdmi_tx: qcom,hdmi_tx@fd922100 {
+ cell-index = <0>;
+ compatible = "qcom,hdmi-tx";
+ reg = <0xfd922100 0x35C>,
+ <0xfd922500 0x7C>,
+ <0xfc4b8000 0x60F0>,
+ <0xfe2a0000 0xFFF>;
+ reg-names = "core_physical", "phy_physical", "qfprom_physical",
+ "hdcp_physical";
+
+ hpd-gdsc-supply = <&gdsc_mdss>;
+ hpd-5v-supply = <&pm8941_mvs2>;
+ hpd-5v-en-supply = <&hdmi_vreg>;
+ core-vdda-supply = <&pm8941_l12>;
+ core-vcc-supply = <&pm8941_s3>;
+ qcom,supply-names = "hpd-gdsc", "hpd-5v", "hpd-5v-en", "core-vdda", "core-vcc";
+ qcom,min-voltage-level = <0 0 0 1800000 1800000>;
+ qcom,max-voltage-level = <0 0 0 1800000 1800000>;
+ qcom,enable-load = <0 0 0 1800000 0>;
+ qcom,disable-load = <0 0 0 0 0>;
+
+ qcom,hdmi-tx-ddc-mux-sel = <&pma8084_gpios 6 0>;
+ qcom,hdmi-tx-cec = <&msmgpio 31 0>;
+ qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
+ qcom,hdmi-tx-ddc-data = <&msmgpio 33 0>;
+ qcom,hdmi-tx-hpd = <&msmgpio 34 0>;
+
+ qcom,hdmi-tx-mux-lpm = <&msmgpio 27 0>;
+ qcom,hdmi-tx-mux-en = <&msmgpio 83 0>;
+ qcom,hdmi-tx-mux-sel = <&msmgpio 85 0>;
+
+ qcom,conditional-power-on;
+ qcom,pluggable;
+ qcom,display-id = "secondary";
+
+ qcom,msm-hdmi-audio-rx {
+ compatible = "qcom,msm-hdmi-audio-codec-rx";
+ };
+ pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active",
+ "hdmi_cec_active", "hdmi_active",
+ "hdmi_sleep";
+ pinctrl-0 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_suspend
+ &mdss_hdmi_cec_suspend>;
+ pinctrl-1 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active
+ &mdss_hdmi_cec_suspend>;
+ pinctrl-2 = <&mdss_hdmi_hpd_active &mdss_hdmi_cec_active
+ &mdss_hdmi_ddc_suspend>;
+ pinctrl-3 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active
+ &mdss_hdmi_cec_active>;
+ pinctrl-4 = <&mdss_hdmi_hpd_suspend &mdss_hdmi_ddc_suspend
+ &mdss_hdmi_cec_suspend>;
+ };
diff --git a/Documentation/devicetree/bindings/fb/mxsfb.txt b/Documentation/devicetree/bindings/fb/mxsfb.txt
new file mode 100644
index 0000000..96ec517
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mxsfb.txt
@@ -0,0 +1,49 @@
+* Freescale MXS LCD Interface (LCDIF)
+
+Required properties:
+- compatible: Should be "fsl,<chip>-lcdif". Supported chips include
+ imx23 and imx28.
+- reg: Address and length of the register set for lcdif
+- interrupts: Should contain lcdif interrupts
+- display : phandle to display node (see below for details)
+
+* display node
+
+Required properties:
+- bits-per-pixel : <16> for RGB565, <32> for RGB888/666.
+- bus-width : number of data lines. Could be <8>, <16>, <18> or <24>.
+
+Required sub-node:
+- display-timings : Refer to binding doc display-timing.txt for details.
+
+Examples:
+
+lcdif@80030000 {
+ compatible = "fsl,imx28-lcdif";
+ reg = <0x80030000 2000>;
+ interrupts = <38 86>;
+
+ display: display {
+ bits-per-pixel = <32>;
+ bus-width = <24>;
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: timing0 {
+ clock-frequency = <33500000>;
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <164>;
+ hback-porch = <89>;
+ hsync-len = <10>;
+ vback-porch = <23>;
+ vfront-porch = <10>;
+ vsync-len = <10>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <0>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/fb/sm501fb.txt b/Documentation/devicetree/bindings/fb/sm501fb.txt
new file mode 100644
index 0000000..9d9f009
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/sm501fb.txt
@@ -0,0 +1,34 @@
+* SM SM501
+
+The SM SM501 is a LCD controller, with proper hardware, it can also
+drive DVI monitors.
+
+Required properties:
+- compatible : should be "smi,sm501".
+- reg : contain two entries:
+ - First entry: System Configuration register
+ - Second entry: IO space (Display Controller register)
+- interrupts : SMI interrupt to the cpu should be described here.
+- interrupt-parent : the phandle for the interrupt controller that
+ services interrupts for this device.
+
+Optional properties:
+- mode : select a video mode:
+ <xres>x<yres>[-<bpp>][@<refresh>]
+- edid : verbatim EDID data block describing attached display.
+ Data from the detailed timing descriptor will be used to
+ program the display controller.
+- little-endian: available on big endian systems, to
+ set different foreign endian.
+- big-endian: available on little endian systems, to
+ set different foreign endian.
+
+Example for MPC5200:
+ display@1,0 {
+ compatible = "smi,sm501";
+ reg = <1 0x00000000 0x00800000
+ 1 0x03e00000 0x00200000>;
+ interrupts = <1 1 3>;
+ mode = "640x480-32@60";
+ edid = [edid-data];
+ };
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index cb38d5a..55cd383 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -218,6 +218,26 @@
- qcom,mempool-allocate: Allocate memory from the system memory when the
reserved pool exhausted.
+SOC Hardware revisions:
+- qcom,soc-hw-revisions:
+ Container of sets of SOC hardware revisions specified by
+ qcom,soc-hw-revision.
+Properties:
+- compatible:
+ Must be qcom,soc-hw-revisions.
+
+- qcom,soc-hw-revision:
+ Defines a SOC hardware revision.
+
+Properties:
+- reg:
+ Identifier for the hardware revision - must match the value read
+ from the hardware.
+- qcom,chipid:
+ GPU Chip ID to be used for this hardware revision.
+- qcom,gpu-quirk-*:
+ GPU quirks applicable for this hardware revision.
+
GPU LLC slice info:
- cache-slice-names: List of LLC cache slices for GPU transactions
and pagetable walk.
@@ -293,6 +313,28 @@
coresight-child-list = <&funnel_in0>;
coresight-child-ports = <5>;
+ qcom,soc-hw-revisions {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible="qcom,soc-hw-revisions";
+
+ qcom,soc-hw-revision@0 {
+ reg = <0>;
+
+ qcom,chipid = <0x06010500>;
+ qcom,gpu-quirk-hfi-use-reg;
+ qcom,gpu-quirk-limit-uche-gbif-rw;
+ };
+
+ qcom,soc-hw-revision@1 {
+ reg = <1>;
+
+ qcom,chipid = <0x06010501>;
+ qcom,gpu-quirk-hfi-use-reg;
+ };
+ };
+
/* GPU Mempools */
qcom,gpu-mempools {
#address-cells= <1>;
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
index b0c5b57..d9d3470 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -51,6 +51,8 @@
device (see pinctrl binding [0]).
[0]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+- #thermal-sensor-cells : To register ADC sensors with of_thermal. Should be 1.
+ See ./thermal.txt for a description.
Client required property:
- qcom,<consumer name>-vadc : The phandle to the corresponding vadc device.
diff --git a/Documentation/devicetree/bindings/input/qpnp-power-on.txt b/Documentation/devicetree/bindings/input/qpnp-power-on.txt
index c2550e6..33d0236 100644
--- a/Documentation/devicetree/bindings/input/qpnp-power-on.txt
+++ b/Documentation/devicetree/bindings/input/qpnp-power-on.txt
@@ -84,7 +84,34 @@
case.
- qcom,kpdpwr-sw-debounce Boolean property to enable the debounce logic
on the KPDPWR_N rising edge.
-
+- qcom,resin-pon-reset Boolean property which indicates that resin
+ needs to be configured during reset in addition
+ to the primary PON device that is configured
+ for system reset through qcom,system-reset
+ property.
+- qcom,resin-warm-reset-type Poweroff type required to be configured on
+ RESIN reset control register when the system
+ goes for warm reset. If this property is not
+ specified, then the default type, warm reset
+ will be configured to RESIN reset control
+ register. This property is effective only if
+ qcom,resin-pon-reset is defined.
+- qcom,resin-hard-reset-type Same description as qcom,resin-warm-reset-type
+ but this applies for the system hard reset case.
+- qcom,resin-shutdown-type Same description as qcom,resin-warm-reset-type
+ but this applies for the system shutdown case.
+- qcom,resin-shutdown-disable Boolean property to disable RESIN POFF
+ trigger during system shutdown case.
+ This property is effective only if
+ qcom,resin-pon-reset is defined.
+- qcom,resin-hard-reset-disable Boolean property to disable RESIN POFF
+ trigger during system hard reset case.
+ This property is effective only if
+ qcom,resin-pon-reset is defined.
+- qcom,ps-hold-shutdown-disable Boolean property to disable PS_HOLD POFF
+ trigger during system shutdown case.
+- qcom,ps-hold-hard-reset-disable Boolean property to disable PS_HOLD
+ POFF trigger during system hard reset case.
All the below properties are in the sub-node section (properties of the child
node).
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
new file mode 100644
index 0000000..c33daab
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
@@ -0,0 +1,66 @@
+Synaptics DSXV26 touch controller
+
+Please add this description here: The Synaptics Touch controller is connected to the
+host processor via I2C. The controller generates interrupts when the user touches
+the panel. The host controller is expected to read the touch coordinates over I2C and
+pass the coordinates to the rest of the system.
+
+Required properties:
+
+ - compatible : should be "synaptics,dsx-i2c".
+ - reg : i2c slave address of the device.
+ - interrupt-parent : parent of interrupt.
+ - synaptics,irq-gpio : irq gpio.
+ - synaptics,reset-gpio : reset gpio.
+ - synaptics,power-gpio : power switch gpio.
+ - synaptics,irq-flags : irq flags.
+
+Optional property:
+ - vdd_ana-supply : digital voltage power supply needed to power device.
+ - vcc_i2c-supply : analog voltage power supply needed to power device.
+ - synaptics,pwr-reg-name : power reg name of digital voltage.
+ - synaptics,bus-reg-name : bus reg name of analog voltage.
+ - synaptics,irq-on-state : irq gpio active state.
+ - synaptics,reset-on-state : reset gpio active state.
+ - synaptics,power-on-state : power switch active state.
+ - synaptics,ub-i2c-addr : microbootloader mode I2C slave address.
+ - synaptics,cap-button-codes : virtual key code mappings to be used.
+ - synaptics,vir-button-codes : virtual key code and the response region on panel.
+ - synaptics,x-flip : modify orientation of the x axis.
+ - synaptics,y-flip : modify orientation of the y axis.
+ - synaptics,reset-delay-ms : reset delay for controller (ms), default 100.
+ - synaptics,reset-active-ms : reset active duration for controller (ms), default 100.
+ - synaptics,power-delay-ms : power delay for controller (ms), default 100.
+ - synaptics,max-y-for-2d : maximal y value of the panel.
+ - synaptics,swap-axes : specify whether to swap axes.
+ - synaptics,resume-in-workqueue : specify whether to defer the resume to workqueue.
+ - clock-names : Clock names used for secure touch. They are: "iface_clk", "core_clk"
+ - clocks : Defined if 'clock-names' DT property is defined. These clocks
+ are associated with the underlying I2C bus.
+
+Example:
+ i2c@78b7000 {
+ status = "ok";
+ synaptics@4b {
+ compatible = "synaptics,dsx-i2c";
+ reg = <0x4b>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <65 0x2008>;
+ vdd_ana-supply = <&pm8953_l17>;
+ vcc_i2c-supply = <&pm8953_l6>;
+ synaptics,pwr-reg-name = "vdd_ana";
+ synaptics,bus-reg-name = "vcc_i2c";
+ synaptics,irq-gpio = <&tlmm 65 0x2008>;
+ synaptics,irq-on-state = <0>;
+ synaptics,irq-flags = <0x2008>; /* IRQF_ONESHOT | IRQF_TRIGGER_LOW */
+ synaptics,power-delay-ms = <200>;
+ synaptics,reset-delay-ms = <200>;
+ synaptics,max-y-for-2d = <1919>; /* remove if no virtual buttons */
+ synaptics,cap-button-codes = <139 172 158>;
+ synaptics,vir-button-codes = <139 180 2000 320 160 172 540 2000 320 160 158 900 2000 320 160>;
+ /* Underlying clocks used by secure touch */
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
index 07667a4..8d6fad0 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
@@ -31,6 +31,7 @@
* "qcom,pdc-sdm845": For sdm845 pin data
* "qcom,pdc-sdm845-v2": For sdm845 v2 pin data
* "qcom,pdc-sdm670": For sdm670 pin data
+ * "qcom,pdc-sdxpoorwills": For sdxpoorwills pin data
- reg:
Usage: required
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
index b7ce662..1a76d5d 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
@@ -118,6 +118,18 @@
Following properties are specific only to LRA vibrators.
+- qcom,lra-auto-mode
+ Usage: optional
+ Value type: <empty>
+ Definition: If specified, a set of pre-configured settings will be applied
+ based on the pattern duration. For example, for a duration of
+ < 20 ms (short duration), one set of settings will be applied
+ and for a duration of >= 20 ms (long duration), another set of
+ settings will be applied. The parameters configured in the
+ driver when this property is specified is based on the LRA
+ tested internally. Those parameters should be fine-tuned or
+ adjusted based on the LRA used on different hardware platforms.
+
- qcom,lra-auto-res-mode
Usage: optional
Value type: <string>
@@ -200,9 +212,13 @@
- qcom,wave-samples
Usage: optional
Value type: <prop-encoded-array>
- Definition: Wave samples in an array of 8 elements. Each element takes the
+ Definition: Wave samples in an array of 32 elements. Each element takes the
following representation, bit 0: unused, bits[5:1] : amplitude,
bit 6: overdrive, bit 7: sign. Default sample value is 0x3E.
+ Since the hardware supports configuring upto 8 samples, a set
+ of 8 samples will be configured initially and the next set will
+ be configured upon the play interrupt until all the samples are
+ configured and played.
Following properties are applicable only when "qcom,play-mode" is set to
"pwm".
diff --git a/Documentation/devicetree/bindings/leds/leds-qti-tri-led.txt b/Documentation/devicetree/bindings/leds/leds-qti-tri-led.txt
new file mode 100644
index 0000000..c088d42
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qti-tri-led.txt
@@ -0,0 +1,60 @@
+Qualcomm Technologies, Inc. TRI_LED driver specific bindings
+
+This binding document describes the properties of TRI_LED module in
+Qualcomm Technologies, Inc. PMIC chips.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Must be "qcom,tri-led".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Register base of the TRI_LED module and length.
+
+Properties for child nodes:
+- pwms:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: The PWM device (phandle) used for controlling LED.
+
+- led-sources:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: see Documentation/devicetree/bindings/leds/common.txt;
+ Device current output identifiers are: 0 - LED1_EN,
+ 1 - LED2_EN, 2 - LED3_EN.
+
+- label:
+ Usage: optional
+ Value type: <string>
+ Definition: see Documentation/devicetree/bindings/leds/common.txt;
+
+- linux,default-trigger:
+ Usage: optional
+ Value_type: <string>
+ Definition: see Documentation/devicetree/bindings/leds/common.txt;
+
+Example:
+
+ pmi8998_rgb: tri-led@d000{
+ compatible = "qcom,tri-led";
+ reg = <0xd000 0x100>;
+
+ red {
+ label = "red";
+ pwms = <&pmi8998_lpg 4 1000000>;
+ led-sources = <0>;
+ };
+ green {
+ label = "green";
+ pwms = <&pmi8998_lpg 3 1000000>;
+ led-sources = <1>;
+ };
+ blue {
+ label = "blue";
+ pwms = <&pmi8998_lpg 2 1000000>;
+ led-sources = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 001f74f3..6c61ada 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -121,6 +121,10 @@
- qcom,wakeup-on-idle: if configured, the mmcqd thread will call
set_wake_up_idle(), thereby voting for it to be called on idle CPUs.
+ - nvmem-cells: specifies the handle to represent the SoC revision.
+ usually it is defined by qfprom device node.
+ - nvmem-cell-names: specifies the given nvmem cell name as defined in
+ qfprom node.
Example:
diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt
index 6af2bac..dfe5852 100644
--- a/Documentation/devicetree/bindings/pci/msm_pcie.txt
+++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt
@@ -37,6 +37,7 @@
MSIs, virtual IRQ's (INT#), link state notifications.
- perst-gpio: PERST GPIO specified by PCIe spec.
- wake-gpio: WAKE GPIO specified by PCIe spec.
+ - phy-status-offset: Offset from PCIe PHY base to check if PCIe PHY is up.
- <supply-name>-supply: phandle to the regulator device tree node.
Refer to the schematics for the corresponding voltage regulators.
vreg-1.8-supply: phandle to the analog supply for the PCIe controller.
@@ -274,6 +275,7 @@
qcom,switch-latency = <100>;
qcom,wr-halt-size = <0xa>; /* 1KB */
qcom,slv-addr-space-size = <0x1000000>; /* 16MB */
+ qcom,phy-status-offset = <0x800>;
qcom,cpl-timeout = <0x2>;
iommus = <&anoc0_smmu>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8937-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8937-pinctrl.txt
new file mode 100644
index 0000000..f697704
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8937-pinctrl.txt
@@ -0,0 +1,204 @@
+Qualcomm Technologies, Inc. MSM8937 TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+MSM8937 platform.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,msm8937-pinctrl"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: the base address and size of the TLMM register space.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/gpio/gpio.h>
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode.
+ Valid pins are:
+ gpio0-gpio133,
+ sdc1_clk,
+ sdc1_cmd,
+ sdc1_data,
+ sdc1_rclk,
+ sdc2_clk,
+ sdc2_cmd,
+ sdc2_data,
+ qdsd_clk,
+ qdsd_cmd,
+ qdsd_data0,
+ qdsd_data1,
+ qdsd_data2,
+ qdsd_data3,
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Functions are only valid for gpio pins.
+ Valid values are:
+ qdss_tracedata_b, blsp_uart1, gpio, blsp_spi1, adsp_ext, blsp_i2c1, prng_rosc,
+ qdss_cti_trig_out_b0, blsp_spi2, blsp_uart2, blsp_uart3, pbs0, pbs1,
+ pwr_modem_enabled_b, blsp_i2c3, gcc_gp2_clk_b, ldo_update,
+ atest_combodac_to_gpio_native, ldo_en, blsp_i2c2, gcc_gp1_clk_b, pbs2,
+ atest_gpsadc_dtest0_native, blsp_spi3, gcc_gp3_clk_b, blsp_spi4, blsp_uart4,
+ sec_mi2s, pwr_nav_enabled_b, codec_mad, pwr_crypto_enabled_b, blsp_i2c4,
+ blsp_spi5, blsp_uart5, qdss_traceclk_a, atest_bbrx1, m_voc,
+ qdss_cti_trig_in_a0, qdss_cti_trig_in_b0, blsp_i2c6, qdss_traceclk_b,
+ atest_wlan0, atest_wlan1, atest_bbrx0, blsp_i2c5, qdss_tracectl_a,
+ atest_gpsadc_dtest1_native, qdss_tracedata_a, blsp_spi6, blsp_uart6,
+ qdss_tracectl_b, mdp_vsync, pri_mi2s_mclk_a, sec_mi2s_mclk_a, cam_mclk,
+ cci_i2c, pwr_modem_enabled_a, cci_timer0, cci_timer1, cam1_standby,
+ pwr_nav_enabled_a, cam1_rst, pwr_crypto_enabled_a, forced_usb,
+ qdss_cti_trig_out_b1, cam2_rst, webcam_standby, cci_async, webcam_rst,
+ ov_ldo, sd_write, accel_int, gcc_gp1_clk_a, alsp_int, gcc_gp2_clk_a,
+ mag_int, gcc_gp3_clk_a, blsp6_spi, fp_int, qdss_cti_trig_in_b1, uim_batt,
+ cam2_standby, uim1_data, uim1_clk, uim1_reset, uim1_present, uim2_data,
+ uim2_clk, uim2_reset, uim2_present, sensor_rst, mipi_dsi0, smb_int,
+ cam0_ldo, us_euro, atest_char3, dbg_out, bimc_dte0, ts_resout, ts_sample,
+ sec_mi2s_mclk_b, pri_mi2s, sdcard_det, atest_char1, ebi_cdc, audio_reset,
+ atest_char0, audio_ref, cdc_pdm0, pri_mi2s_mclk_b, lpass_slimbus,
+ lpass_slimbus0, lpass_slimbus1, codec_int1, codec_int2, wcss_bt,
+ atest_char2, ebi_ch0, wcss_wlan2, wcss_wlan1, wcss_wlan0, wcss_wlan,
+ wcss_fm, ext_lpass, cri_trng, cri_trng1, cri_trng0, blsp_spi7, blsp_uart7,
+ pri_mi2s_ws, blsp_i2c7, gcc_tlmm, dmic0_clk, dmic0_data, key_volp,
+ qdss_cti_trig_in_a1, us_emitter, wsa_irq, wsa_io, wsa_reset, blsp_spi8,
+ blsp_uart8, blsp_i2c8, gcc_plltest, nav_pps_in_a, pa_indicator, modem_tsync,
+ nav_tsync, nav_pps_in_b, nav_pps, gsm0_tx, atest_char, atest_tsens,
+ bimc_dte1, ssbi_wtr1, fp_gpio, coex_uart, key_snapshot, key_focus, nfc_pwr,
+ blsp8_spi, qdss_cti_trig_out_a0, qdss_cti_trig_out_a1
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull up.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+ Not valid for sdc pins.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+ Not valid for sdc pins.
+
+- drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins, in mA.
+ Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,msm8937-pinctrl";
+ reg = <0x1000000 0x300000>;
+ interrupts = <0 208 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ pmx-uartconsole {
+ uart_console_active: uart_console_active {
+ mux {
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart2";
+ };
+
+ config {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ uart_console_sleep: uart_console_sleep {
+ mux {
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart2";
+ };
+
+ config {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index f50fd88..75996a5 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -104,6 +104,13 @@
this property is not specified, then the default value used
will be 75mA.
+- qcom,fg-cutoff-current
+ Usage: optional
+ Value type: <u32>
+ Definition: Minimum Battery current (in mA) used for cutoff SOC
+ estimate. If this property is not specified, then a default
+ value of 500 mA will be applied.
+
- qcom,fg-delta-soc-thr
Usage: optional
Value type: <u32>
diff --git a/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt b/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
new file mode 100644
index 0000000..3174ccb
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
@@ -0,0 +1,36 @@
+Qualcomm Technologies, Inc. LPG driver specific bindings
+
+This binding document describes the properties of LPG (Light Pulse Generator)
+device module in Qualcomm Technologies, Inc. PMIC chips.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Must be "qcom,pwm-lpg".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Register base and length for LPG modules. The length
+ varies based on the number of channels available in
+ the PMIC chips.
+
+- reg-names:
+ Usage: required
+ Value type: <string>
+ Definition: The name of the register defined in the reg property.
+ It must be "lpg-base".
+
+- #pwm-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: See Documentation/devicetree/bindings/pwm/pwm.txt;
+
+Example:
+
+ pmi8998_lpg: lpg@b100 {
+ compatible = "qcom,pwm-lpg";
+ reg = <0xb100 0x600>;
+ reg-names = "lpg-base";
+ #pwm-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
index fba7204..e64599c 100644
--- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
+++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
@@ -14,6 +14,7 @@
- qcom,fastrpc-glink: Flag to use glink instead of smd for IPC
- qcom,rpc-latency-us: FastRPC QoS latency vote
- qcom,adsp-remoteheap-vmid: FastRPC remote heap VMID list
+- qcom,fastrpc-adsp-audio-pdr: Flag to enable ADSP Audio PDR
Optional subnodes:
- qcom,msm_fastrpc_compute_cb : Child nodes representing the compute context
@@ -73,9 +74,11 @@
Required properties:
- compatible : Must be "qcom,msm-adsprpc-mem-region"
- memory-region : CMA region which is owned by this device
+- restrict-access : Blocking vote for hyp_assign_phys function call
Example:
qcom,adsprpc-mem {
compatible = "qcom,msm-adsprpc-mem-region";
memory-region = <&adsp_mem>;
+ restrict-access;
};
diff --git a/Documentation/devicetree/bindings/regulator/cpr-regulator.txt b/Documentation/devicetree/bindings/regulator/cpr-regulator.txt
index 1c4dfbf..0f5e27a 100644
--- a/Documentation/devicetree/bindings/regulator/cpr-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cpr-regulator.txt
@@ -709,19 +709,6 @@
The number of quadruples should be equal to the number of values specified in
the qcom,cpr-aging-sensor-id property. This property is required if
the qcom,cpr-aging-sensor-id property has been specified.
-- qcom,cpr-thermal-sensor-id: TSENS hardware sensor-id of the sensor which
- needs to be monitored.
-- qcom,cpr-disable-temp-threshold: The TSENS temperature threshold in degrees Celsius at which CPR
- closed-loop is disabled. CPR closed-loop will stay disabled as long as the
- temperature is below this threshold. This property is required
- only if 'qcom,cpr-thermal-sensor-id' is present.
-- qcom,cpr-enable-temp-threshold: The TSENS temperature threshold in degrees Celsius at which CPR
- closed-loop is enabled. CPR closed-loop will stay enabled above this
- temperature threshold. This property is required only if
- 'qcom,cpr-thermal-sensor-id' is present.
-- qcom,disable-closed-loop-in-pc: Bool property to disable closed-loop CPR during
- power-collapse. This can be enabled only for single core
- designs. The property 'qcom,cpr-cpus' is required to enable this logic.
Example:
apc_vreg_corner: regulator@f9018000 {
status = "okay";
@@ -971,8 +958,4 @@
qcom,cpr-fuse-aging-init-quot-diff =
<101 0 8 0>,
<101 8 8 0>;
-
- qcom,cpr-thermal-sensor-id = <9>;
- qcom,cpr-disable-temp-threshold = <5>;
- qcom,cpr-enable-temp-threshold = <10>;
};
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 34c2963..58c9bf8 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -293,6 +293,19 @@
- compatible : "qcom,msm-pcm-hostless"
+* audio-load-mod
+
+Required properties:
+
+ - compatible : "qcom,audio-load-mod"
+
+Optional properties:
+
+ - compatible : "qcom,audio-test-mod"
+ Add this compatible as child device to load-module device.
+ This child device is added after lpass is up to invoke
+ deferred probe devices.
+
* msm-ocmem-audio
Required properties:
@@ -355,6 +368,8 @@
- qcom,mclk-clk-reg: Indicate the register address for mclk.
+ - qcom,lpass-mclk-id: Property to update LPASS MCLK Id.
+
* audio_slimslave
Required properties:
@@ -640,6 +655,13 @@
compatible = "qcom,msm-pcm-hostless";
};
+ audio_load_mod {
+ compatible = "qcom,audio-load-mod";
+ audio_test_mod {
+ compatible = "qcom,audio-test-mod";
+ };
+ };
+
qcom,msm-ocmem-audio {
compatible = "qcom,msm-ocmem-audio";
qcom,msm_bus,name = "audio-ocmem";
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
index 28ab2dd..97b71a7 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
@@ -61,6 +61,8 @@
- qcom,adc-tm-recalib-check: Add this property to check if recalibration required due to inaccuracy.
- hkadc_ldo-supply : Add this property if VADC needs to perform a Software Vote for the HKADC.
- hkadc_ok-supply : Add this property if the VADC needs to perform a Software vote for the HKADC VREG_OK.
+- #thermal-sensor-cells : To register ADC sensors with of_thermal. Should be 1.
+ See ./thermal.txt for a description.
Client required property:
- qcom,<consumer name>-adc_tm : The phandle to the corresponding adc_tm device.
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 8654a3e..e13a6a3 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -1,110 +1,315 @@
MSM SoC HSUSB controllers
-EHCI
+OTG:
-Required properties:
-- compatible: Should contain "qcom,ehci-host"
-- regs: offset and length of the register set in the memory map
-- usb-phy: phandle for the PHY device
+Required properties :
+- compatible : should be "qcom,hsusb-otg"
+- regs : Array of offset and length of the register sets in the memory map
+- reg-names : indicates various iomem resources passed by name. The possible
+ strings in this field are:
+ "core": USB controller register space. (Required)
+ "tcsr": TCSR register for routing USB Controller signals to
+ either picoPHY0 or picoPHY1. (Optional)
+ "phy_csr": PHY Wrapper CSR register space. Provides register level
+ interface through AHB2PHY for performing PHY related
+ operations like retention and HV interrupts management.
+- interrupts: IRQ line
+- interrupt-names: OTG interrupt name(s) referenced in interrupts above
+ HSUSB OTG expects "core_irq" which is IRQ line from CORE and
+ "async_irq" from HSPHY for asynchronous wakeup events in LPM.
+ optional ones are described in next section.
+- qcom,hsusb-otg-phy-type: PHY type can be one of
+ 1 - Chipidea PHY (obsolete)
+ 2 - Synopsis Pico PHY
+ 3 - Synopsis Femto PHY
+ 4 - QUSB ULPI PHY
+- qcom,hsusb-otg-mode: Operational mode. Can be one of
+ 1 - Peripheral only mode
+ 2 - Host only mode
+ 3 - OTG mode
+ Based on the mode, OTG driver registers platform devices for
+ gadget and host.
+- qcom,hsusb-otg-otg-control: OTG control (VBUS and ID notifications)
+ can be one of
+ 1 - PHY control
+ 2 - PMIC control
+ 3 - User control (via debugfs)
+- <supply-name>-supply: handle to the regulator device tree node
+ Required "supply-name" is "HSUSB_VDDCX" (when voting for VDDCX) or
+ "hsusb_vdd_dig" (when voting for VDDCX Corner voltage),
+ "HSUSB_1p8-supply" and "HSUSB_3p3-supply".
+- qcom,vdd-voltage-level: This property must be a list of three integer
+ values (none, min, max) where each value represents either a voltage
+ in microvolts or a value corresponding to voltage corner. If usb core
+ supports svs, min value will have absolute SVS or SVS corner otherwise
+ min value will have absolute nominal or nominal corner.
+- clocks: a list of phandles to the USB clocks. Usage is as per
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+- clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
+ property.
-Example EHCI controller device node:
+ Required clocks:
+ "core_clk": USB core clock that is required for data transfers.
+ "iface_clk": USB core clock that is required for register access.
- ehci: ehci@f9a55000 {
- compatible = "qcom,ehci-host";
- reg = <0xf9a55000 0x400>;
- usb-phy = <&usb_otg>;
+ Optional clocks:
+ "sleep_clk": PHY sleep clock. Required for interrupts.
+ "phy_reset_clk": PHY blocks asynchronous reset clock. Required
+ for the USB block reset. It is a reset only clock.
+ "phy_por_clk": Reset only clock for asserting/de-asserting
+ PHY POR signal. Required for overriding PHY parameters.
+ "phy_csr_clk": Required for accessing PHY CSR registers through
+ AHB2PHY interface.
+ "phy_ref_clk": Required when PHY have referance clock,
+ "xo": XO clock. The source clock that is used as a reference clock
+ to the PHY.
+ "bimc_clk", "snoc_clk", "pcnoc_clk": bus voting clocks. Used to
+ keep buses at a nominal frequency during USB peripheral
+ mode for achieving max throughput.
+- qcom,max-nominal-sysclk-rate: Indicates maximum nominal frequency for which
+ system clock should be voted whenever streaming mode is enabled.
+- resets: reset specifier pair consists of phandle for the reset provider
+ and reset lines used by this controller.
+- reset-names: reset signal name strings sorted in the same order as the resets
+ property.
+
+Optional properties :
+- interrupt-names : Optional interrupt resource entries are:
+ "pmic_id_irq" : Interrupt from PMIC for external ID pin notification.
+ "phy_irq" : Interrupt from PHY. Used for ID detection.
+- qcom,hsusb-otg-disable-reset: If present then core is RESET only during
+ init, otherwise core is RESET for every cable disconnect as well
+- qcom,hsusb-otg-pnoc-errata-fix: If present then workaround for PNOC
+ performance issue is applied which requires changing the mem-type
+ attribute via VMIDMT.
+- qcom,hsusb-otg-default-mode: The default USB mode after boot-up.
+ Applicable only when OTG is controlled by user. Can be one of
+ 0 - None. Low power mode
+ 1 - Peripheral
+ 2 - Host
+- qcom,hsusb-otg-phy-init-seq: PHY configuration sequence. val, reg pairs
+ terminate with -1
+- qcom,hsusb-otg-power-budget: VBUS power budget in mA
+ 0 will be treated as 500mA
+- qcom,hsusb-otg-pclk-src-name: The source of pclk
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm-bus.txt" for
+ below optional properties:
+ - qcom,msm-bus,name
+ - qcom,msm-bus,num_cases - There are three valid cases for this: NONE, MAX
+ and MIN bandwidth votes. Minimum two cases must be defined for
+ both NONE and MAX votes. If MIN vote is different from NONE VOTE
+ then specify third case for MIN VOTE. If explicit NOC clock rates
+ are not specified then MAX value should be large enough to get
+ desired BUS frequencies. In case explicit NOC clock rates are
+ specified, peripheral mode bus bandwidth vote should be defined
+ to vote for arbitrated bandwidth so that 60MHz frequency is met.
+
+ - qcom,msm-bus,num_paths
+ - qcom,msm-bus,vectors
+- qcom,hsusb-otg-lpm-on-dev-suspend: If present then USB enter to
+ low power mode upon receiving bus suspend.
+- qcom,hsusb-otg-clk-always-on-workaround: If present then USB core clocks
+ remain active upon receiving bus suspend and USB cable is connected.
+ Used for allowing USB to respond for remote wakup.
+- qcom,hsusb-otg-delay-lpm: If present then USB core will wait one second
+ after disconnect before entering low power mode.
+- <supply-name>-supply: handle to the regulator device tree node.
+ Optional "supply-name" is "vbus_otg" to supply vbus in host mode.
+- qcom,dp-manual-pullup: If present, vbus is not routed to USB controller/phy
+ and controller driver therefore enables pull-up explicitly before
+ starting controller using usbcmd run/stop bit.
+- qcom,usb2-enable-hsphy2: If present then USB2 controller is connected to 2nd
+ HSPHY.
+- qcom,hsusb-log2-itc: value of 2^(log2_itc-1) will be used as the
+ interrupt threshold (ITC), when log2_itc is between 1 to 7.
+- qcom,hsusb-l1-supported: If present, the device supports l1 (Link power
+ management).
+- qcom,no-selective-suspend: If present selective suspend is disabled on hub ports.
+- qcom,hsusb-otg-mpm-dpsehv-int: If present, indicates mpm interrupt to be
+ configured for detection of dp line transition during VDD minimization.
+- qcom,hsusb-otg-mpm-dmsehv-int: If present, indicates mpm interrupt to be
+ configured for detection of dm line transition during VDD minimization.
+- pinctrl-names : This should be defined if a target uses gpio and pinctrl framework.
+ See "pinctrl" in Documentation/devicetree/bindings/pinctrl/msm-pinctrl.txt.
+ It should specify the names of the configs that pinctrl can install in driver
+ Following are the pinctrl config that can be installed
+ "hsusb_active" : Active configuration of pins, this should specify active
+ config of vddmin gpio (if used) defined in their pin groups.
+ "hsusb_sleep" : Disabled configuration of pins, this should specify sleep
+ config of vddmin gpio (if used) defined in their pin groups.
+- qcom,hsusb-otg-vddmin-gpio = If present, indicates a gpio that will be used
+ to supply voltage to the D+ line during VDD minimization and peripheral
+ bus suspend. If not exists, then VDD minimization will not be allowed
+ during peripheral bus suspend.
+- qcom,ahb-async-bridge-bypass: If present, indicates that enable AHB2AHB By Pass
+ mode with device controller for better throughput. With this mode, USB Core
+ runs using PNOC clock and synchronous to it. Hence it is must to have proper
+ "qcom,msm-bus,vectors" to have high bus frequency. User shouldn't try to
+ enable this feature without proper bus voting. When this feature is enabled,
+ it is required to do HW reset during cable disconnect for host mode functionality
+ working and hence need to disable qcom,hsusb-otg-disable-reset. With this feature
+ enabled, USB HW has to vote for maximum PNOC frequency as USB HW cannot tolerate
+ changes in PNOC frequency which results in USB functionality failure.
+- qcom,disable-retention-with-vdd-min: If present don't allow phy retention but allow
+ vdd min.
+- qcom,usbin-vadc: Corresponding vadc device's phandle to read usbin voltage using VADC.
+ This will be used to get value of usb power supply's VOLTAGE_NOW property.
+- qcom,usbid-gpio: This corresponds to gpio which is used for USB ID detection.
+- qcom,hub-reset-gpio: This corresponds to gpio which is used for HUB reset.
+- qcom,sw-sel-gpio: This corresponds to gpio which is used for switch select routing
+ of D+/D- between the USB HUB and type B USB jack for peripheral mode.
+- qcom,bus-clk-rate: If present, indicates nominal bus frequency to be voted for
+ bimc/snoc/pcnoc clock with usb cable connected. If AHB2AHB bypass is enabled,
+ pcnoc value should be defined to very large number so that PNOC runs at max
+ frequency. If 'qcom,default-mode-svs' is also set then two set of frequencies
+ must be specified for SVS and NOM modes which user can change using sysfs node.
+- qcom,phy-dvdd-always-on: If present PHY DVDD is supplied by a always-on
+ regulator unlike vddcx/vddmx. PHY can keep D+ pull-up and D+/D-
+ pull-down resistors during peripheral and host bus suspend without
+ any re-work.
+- qcom,emulation: Indicates that we are running on emulation platform.
+- qcom,boost-sysclk-with-streaming: If present, enable controller specific
+ streaming feature. Also this flag can bump up usb system clock to max in streaming
+ mode. This flag enables streaming mode for all compositions and is different from
+ streaming-func property defined in android device node. Please refer Doumentation/
+ devicetree/bindings/usb/android-dev.txt for details about "streaming-func" property.
+- qcom,axi-prefetch-enable: If present, AXI64 interface will be used for transferring data
+ to/from DDR by controller.
+- qcom,enable-sdp-typec-current-limit: Indicates whether type-c current for SDP CHARGER to
+ be limited.
+- qcom,enable-phy-id-pullup: If present, PHY can keep D+ pull-up resistor on USB ID line
+ during cable disconnect.
+- qcom,max-svs-sysclk-rate: Indicates system clock frequency voted by driver in
+ non-perf mode. In perf mode driver uses qcom,max-nominal-sysclk-rate.
+- qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs,
+ which is used as a vote by driver to get max performance in perf mode.
+- qcom,default-mode-svs: Indicates USB system clock should run at SVS frequency.
+ User can bump it up using 'perf_mode' sysfs attribute for gadget.
+- qcom,vbus-low-as-hostmode: If present, specifies USB_VBUS to switch to host mode
+ if USB_VBUS is low or device mode if USB_VBUS is high.
+- qcom,usbeth-reset-gpio: If present then an external usb-to-eth is connected to
+ the USB host controller and its RESET_N signal is connected to this
+ usbeth-reset-gpio GPIO. It should be driven LOW to RESET the usb-to-eth.
+- extcon: phandles to external connector devices. First phandle should point to
+ external connector, which provide "USB" cable events, the second should
+ point to external connector device, which provide "USB-HOST" cable events.
+ A single phandle may be specified if a single connector device provides
+ both "USB" and "USB-HOST" events.
+
+Example HSUSB OTG controller device node :
+ usb@f9690000 {
+ compatible = "qcom,hsusb-otg";
+ reg = <0xf9690000 0x400>;
+ reg-names = "core";
+ interrupts = <134>;
+ interrupt-names = "core_irq";
+
+ qcom,hsusb-otg-phy-type = <2>;
+ qcom,hsusb-otg-mode = <1>;
+ qcom,hsusb-otg-otg-control = <1>;
+ qcom,hsusb-otg-disable-reset;
+ qcom,hsusb-otg-pnoc-errata-fix;
+ qcom,hsusb-otg-default-mode = <2>;
+ qcom,hsusb-otg-phy-init-seq = <0x01 0x90 0xffffffff>;
+ qcom,hsusb-otg-power-budget = <500>;
+ qcom,hsusb-otg-pclk-src-name = "dfab_usb_clk";
+ qcom,hsusb-otg-lpm-on-dev-suspend;
+ qcom,hsusb-otg-clk-always-on-workaround;
+ hsusb_vdd_dig-supply = <&pm8226_s1_corner>;
+ HSUSB_1p8-supply = <&pm8226_l10>;
+ HSUSB_3p3-supply = <&pm8226_l20>;
+ qcom,vdd-voltage-level = <1 5 7>;
+ qcom,dp-manual-pullup;
+ qcom,hsusb-otg-mpm-dpsehv-int = <49>;
+ qcom,hsusb-otg-mpm-dmsehv-int = <58>;
+ qcom,max-nominal-sysclk-rate = <133330000>;
+ qcom,max-svs-sysclk-rate = <100000000>;
+ qcom,pm-qos-latency = <59>;
+
+ qcom,msm-bus,name = "usb2";
+ qcom,msm-bus,num_cases = <2>;
+ qcom,msm-bus,num_paths = <1>;
+ qcom,msm-bus,vectors =
+ <87 512 0 0>,
+ <87 512 60000000 960000000>;
+ pinctrl-names = "hsusb_active","hsusb_sleep";
+ pinctrl-0 = <&vddmin_act>;
+ pinctrl-0 = <&vddmin_sus>;
+ qcom,hsusb-otg-vddmin-gpio = <&pm8019_mpps 6 0>;
+ qcom,disable-retention-with-vdd-min;
+ qcom,usbin-vadc = <&pm8226_vadc>;
+ qcom,usbid-gpio = <&msm_gpio 110 0>;
};
-USB PHY with optional OTG:
+MSM HSUSB EHCI controller
-Required properties:
-- compatible: Should contain:
- "qcom,usb-otg-ci" for chipsets with ChipIdea 45nm PHY
- "qcom,usb-otg-snps" for chipsets with Synopsys 28nm PHY
+Required properties :
+- compatible : should be "qcom,ehci-host"
+- reg : offset and length of the register set in the memory map
+- interrupts: IRQ lines used by this controller
+- interrupt-names : Required interrupt resource entries are:
+ HSUSB EHCI expects "core_irq" and optionally "async_irq".
+- <supply-name>-supply: handle to the regulator device tree node
+ Required "supply-name" is either "hsusb_vdd_dig" or "HSUSB_VDDCX"
+ "HSUSB_1p8-supply" "HSUSB_3p3-supply".
+- qcom,usb2-power-budget: maximum vbus power (in mA) that can be provided.
+- qcom,vdd-voltage-level: This property must be a list of five integer
+ values (no, 0.5vsuspend, 0.75suspend, min, max) where each value respresents
+ either a voltage in microvolts or a value corresponding to voltage corner.
+ First value represents value to vote when USB is not at all active, second
+ value represents value to vote when target is not connected to dock during low
+ power mode, third value represents vlaue to vote when target is connected to dock
+ and no peripheral connected over dock during low power mode, fourth value represents
+ minimum value to vote when USB is operational, fifth item represents maximum value
+ to vote for USB is operational.
-- regs: Offset and length of the register set in the memory map
-- interrupts: interrupt-specifier for the OTG interrupt.
+Optional properties :
+- qcom,usb2-enable-hsphy2: If present, select second PHY for USB operation.
+- pinctrl-names : This should be defined if a target uses pinctrl framework.
+ See "pinctrl" in Documentation/devicetree/bindings/pinctrl/msm-pinctrl.txt.
+ It should specify the names of the configs that pinctrl can install in driver
+ Following are the pinctrl configs that can be installed
+ "ehci_active" : Active configuration of pins, this should specify active
+ config defined in pin groups of used gpio's from resume and
+ ext-hub-reset.
+ "ehci_sleep" : Disabled configuration of pins, this should specify sleep
+ config defined in pin groups of used gpio's from resume and
+ ext-hub-reset.
+- qcom,resume-gpio: if present then peripheral connected to usb controller
+ cannot wakeup from XO shutdown using in-band usb bus resume. Use resume
+ gpio to wakeup peripheral.
+- qcom,ext-hub-reset-gpio: If present then an external HUB is connected to
+ the USB host controller and its RESET_N signal is connected to this
+ ext-hub-reset-gpio GPIO. It should be driven LOW to RESET the HUB.
+- qcom,usb2-enable-uicc: If present, usb2 port will be used for uicc card connection.
+- usb-phy: phandle for the PHY device, if described as a separate device tree node
+- qcom,pm-qos-latency: This property represents the maximum tolerable CPU latency in
+ microsecs, which is used as a vote to keep the CPUs in a high enough power state when
+ USB bus is in use (not suspended).
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm-bus.txt" for
+ below optional properties:
+ - qcom,msm-bus,name
+ - qcom,msm-bus,num_cases - Two cases (NONE and MAX) for voting are supported.
+ - qcom,msm-bus,num_paths
+ - qcom,msm-bus,vectors
-- clocks: A list of phandle + clock-specifier pairs for the
- clocks listed in clock-names
-- clock-names: Should contain the following:
- "phy" USB PHY reference clock
- "core" Protocol engine clock
- "iface" Interface bus clock
- "alt_core" Protocol engine clock for targets with asynchronous
- reset methodology. (optional)
-
-- vdccx-supply: phandle to the regulator for the vdd supply for
- digital circuit operation.
-- v1p8-supply: phandle to the regulator for the 1.8V supply
-- v3p3-supply: phandle to the regulator for the 3.3V supply
-
-- resets: A list of phandle + reset-specifier pairs for the
- resets listed in reset-names
-- reset-names: Should contain the following:
- "phy" USB PHY controller reset
- "link" USB LINK controller reset
-
-- qcom,otg-control: OTG control (VBUS and ID notifications) can be one of
- 1 - PHY control
- 2 - PMIC control
-
-Optional properties:
-- dr_mode: One of "host", "peripheral" or "otg". Defaults to "otg"
-
-- switch-gpio: A phandle + gpio-specifier pair. Some boards are using Dual
- SPDT USB Switch, witch is cotrolled by GPIO to de/multiplex
- D+/D- USB lines between connectors.
-
-- qcom,phy-init-sequence: PHY configuration sequence values. This is related to Device
- Mode Eye Diagram test. Start address at which these values will be
- written is ULPI_EXT_VENDOR_SPECIFIC. Value of -1 is reserved as
- "do not overwrite default value at this address".
- For example: qcom,phy-init-sequence = < -1 0x63 >;
- Will update only value at address ULPI_EXT_VENDOR_SPECIFIC + 1.
-
-- qcom,phy-num: Select number of pyco-phy to use, can be one of
- 0 - PHY one, default
- 1 - Second PHY
- Some platforms may have configuration to allow USB
- controller work with any of the two HSPHYs present.
-
-- qcom,vdd-levels: This property must be a list of three integer values
- (no, min, max) where each value represents either a voltage
- in microvolts or a value corresponding to voltage corner.
-
-- qcom,manual-pullup: If present, vbus is not routed to USB controller/phy
- and controller driver therefore enables pull-up explicitly
- before starting controller using usbcmd run/stop bit.
-
-- extcon: phandles to external connector devices. First phandle
- should point to external connector, which provide "USB"
- cable events, the second should point to external connector
- device, which provide "USB-HOST" cable events. If one of
- the external connector devices is not required empty <0>
- phandle should be specified.
-
-Example HSUSB OTG controller device node:
-
- usb@f9a55000 {
- compatible = "qcom,usb-otg-snps";
- reg = <0xf9a55000 0x400>;
- interrupts = <0 134 0>;
- dr_mode = "peripheral";
-
- clocks = <&gcc GCC_XO_CLK>, <&gcc GCC_USB_HS_SYSTEM_CLK>,
- <&gcc GCC_USB_HS_AHB_CLK>;
-
- clock-names = "phy", "core", "iface";
-
- vddcx-supply = <&pm8841_s2_corner>;
- v1p8-supply = <&pm8941_l6>;
- v3p3-supply = <&pm8941_l24>;
-
- resets = <&gcc GCC_USB2A_PHY_BCR>, <&gcc GCC_USB_HS_BCR>;
- reset-names = "phy", "link";
-
- qcom,otg-control = <1>;
- qcom,phy-init-sequence = < -1 0x63 >;
- qcom,vdd-levels = <1 5 7>;
+Example MSM HSUSB EHCI controller device node :
+ ehci: qcom,ehci-host@f9a55000 {
+ compatible = "qcom,ehci-host";
+ reg = <0xf9a55000 0x400>;
+ interrupts = <0 134 0>, <0 140 0>;
+ interrupt-names = "core_irq", "async_irq";
+ /* If pinctrl is used and ext-hub-reset and resume gpio's are present*/
+ pinctrl-names = "ehci_active","ehci_sleep";
+ pinctrl-0 = <&ehci_reset_act &resume_act>;
+ pinctrl-1 = <&ehci_reset_sus &resume_sus>;
+ qcom,resume-gpio = <&msm_gpio 80 0>;
+ qcom,ext-hub-reset-gpio = <&msm_gpio 0 0>;
+ hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
+ HSUSB_1p8-supply = <&pm8941_l6>;
+ HSUSB_3p3-supply = <&pm8941_l24>;
+ qcom,usb2-enable-hsphy2;
+ qcom,usb2-power-budget = <500>;
+ qcom,vdd-voltage-level = <1 2 3 5 7>;
+ qcom,usb2-enable-uicc;
};
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index f8c8a69..f7f4ced 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -184,6 +184,10 @@
state when attached in host mode and "suspend" state when detached.
- qcom,tune2-efuse-correction: The value to be adjusted from fused value for
improved rise/fall times.
+ - nvmem-cells: specifies the handle to represent the SoC revision.
+ usually it is defined by qfprom device node.
+ - nvmem-cell-names: specifies the given nvmem cell name as defined in
+ qfprom node.
Example:
qusb_phy: qusb@f9b39000 {
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index a37e441..e996ba5 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -272,6 +272,7 @@
SUNW Sun Microsystems, Inc
swir Sierra Wireless
syna Synaptics Inc.
+synaptics Synaptics Inc.
synology Synology, Inc.
tbs TBS Technologies
tcg Trusted Computing Group
diff --git a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
new file mode 100644
index 0000000..fbe1bca
--- /dev/null
+++ b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
@@ -0,0 +1,110 @@
+* Qualcomm Technologies Inc. WCNSS Platform Driver
+
+WCNSS driver is the platform driver. It is used for performing the cold
+boot-up of the wireless device. It is responsible for adjusting
+the necessary I/O rails and enabling appropriate gpios for wireless
+connectivity subsystem.
+
+Required properties:
+- compatible: "wcnss_wlan"
+- reg: physical address and length of the register set for the device.
+- reg-names: "wcnss_mmio", "wcnss_fiq", "pronto_phy_base", "riva_phy_base",
+ "riva_ccu_base", "pronto_a2xb_base", "pronto_ccpu_base",
+ "pronto_saw2_base", "wlan_tx_phy_aborts","wlan_brdg_err_source",
+ "wlan_tx_status", "alarms_txctl", "alarms_tactl",
+ "pronto_mcu_base", "pronto_qfuse".
+- interupts: Pronto to Apps interrupts for tx done and rx pending.
+- qcom,pronto-vddmx-supply: regulator to supply pronto pll.
+- qcom,pronto-vddcx-supply: voltage corner regulator to supply WLAN/BT/FM
+digital module.
+- qcom,pronto-vddpx-supply: regulator to supply WLAN DAC.
+- qcom,iris-vddxo-supply : regulator to supply RF XO.
+- qcom,iris-vddrfa-supply : regulator to supply RFA digital.
+- qcom,iris-vddpa-supply : regulator to supply RF PA.
+- qcom,iris-vdddig-supply : regulator to supply RF digital(BT/FM).
+- gpios: gpio numbers to configure 5-wire interface of WLAN connectivity
+- qcom,has-48mhz-xo: boolean flag to determine the usage of 24MHz XO from RF
+- qcom,has-pronto-hw: boolean flag to determine the revId of the WLAN subsystem
+- qcom,wcnss-adc_tm: ADC handle for vbatt notification APIs.
+- qcom,wcnss-vadc: VADC handle for battery voltage notification APIs.
+- pinctrl-<n> : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt
+- pinctrl-names : Names corresponding to the numbered pinctrl states
+- clocks: from common clock binding: handle to xo, rf_clk and wcnss snoc clocks.
+- clock-names: Names of all the clocks that are accessed by the subsystem
+- qcom,vdd-voltage-level: This property represents (nominal, min, max) voltage
+for iris and pronto regulators in milli-volts.
+- qcom,vdd-current: This property represents current value for
+iris and pronto regulators in micro-amps.
+
+Optional properties:
+- qcom,has-autodetect-xo: boolean flag to determine whether Iris XO auto detect
+should be performed during boot up.
+- qcom,snoc-wcnss-clock-freq: indicates the wcnss snoc clock frequency in Hz.
+If wcnss_snoc clock is specified in the list of clocks, this property needs
+to be set to make it functional.
+- qcom,wlan-rx-buff-count: WLAN RX buffer count is a configurable value,
+using a smaller count for this buffer will reduce the memory usage.
+- qcom,is-pronto-v3: boolean flag to determine the pronto hardware version
+in use. subsequently correct workqueue will be used by DXE engine to push frames
+in TX data path.
+- qcom,is-dual-band-disable: boolean flag to determine the WLAN dual band
+capability.
+- qcom,is-pronto-vadc: boolean flag to determine Battery voltage feature
+support for pronto hardware.
+- qcom,wcnss-pm : <Core rail LDO#, PA rail LDO#, XO settling time,
+RPM power collapse enabled, standalone power collapse enabled>
+Power manager related parameter for LDO configuration.
+ 11 - WCN CORE rail LDO number
+ 21 - WCN PA rail LDO number
+ 1200 - WCN XO settling time (usec)
+ 1 - WCN RPM power collapse enabled
+ 1 - WCN standalone power collapse enabled
+ 6 - GPIO strength value
+- qcom,has-vsys-adc-channel: boolean flag to determine which ADC HW channel need
+to use for VBATT feature.
+- qcom,has-a2xb-split-reg: boolean flag to determine A2xb split timeout limit
+register is available or not.
+
+Example:
+
+qcom,wcnss-wlan@fb000000 {
+ compatible = "qcom,wcnss_wlan";
+ reg = <0xfb000000 0x280000>,
+ <0xf9011008 0x04>;
+reg-names = "wcnss_mmio", "wcnss_fiq";
+ interrupts = <0 145 0 0 146 0>;
+ interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
+
+ qcom,pronto-vddmx-supply = <&pm8841_s1>;
+ qcom,pronto-vddcx-supply = <&pm8841_s2_corner>;
+ qcom,pronto-vddpx-supply = <&pm8941_s3>;
+ qcom,iris-vddxo-supply = <&pm8941_l6>;
+ qcom,iris-vddrfa-supply = <&pm8941_l11>;
+ qcom,iris-vddpa-supply = <&pm8941_l19>;
+ qcom,iris-vdddig-supply = <&pm8941_l3>;
+
+ gpios = <&msmgpio 36 0>, <&msmgpio 37 0>, <&msmgpio 38 0>,
+ <&msmgpio 39 0>, <&msmgpio 40 0>;
+ qcom,has-48mhz-xo;
+ qcom,is-pronto-vt;
+ qcom,wlan-rx-buff-count = <512>;
+ qcom,has-pronto-hw;
+ qcom,wcnss-adc_tm = <&pm8226_adc_tm>;
+
+ pinctrl-names = "wcnss_default", "wcnss_sleep";
+ pinctrl-0 = <&wcnss_default>;
+ pinctrl-1 = <&wcnss_sleep>;
+ pinctrl-2 = <&wcnss_gpio_default>;
+
+ clocks = <&clock_rpm clk_xo_wlan_clk>,
+ <&clock_rpm clk_rf_clk2>,
+ <&clock_debug clk_gcc_debug_mux>,
+ <&clock_gcc clk_wcnss_m_clk>,
+ <&clock_gcc clk_snoc_wcnss_a_clk>;
+
+ clock-names = "xo", "rf_clk", "measure", "wcnss_debug",
+ "snoc_wcnss";
+
+ qcom,snoc-wcnss-clock-freq = <200000000>;
+ qcom,wcnss-pm = <11 21 1200 1 1 6>;
+};
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 95ccbe6..206c9b0 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -30,6 +30,7 @@
- dirty_writeback_centisecs
- drop_caches
- extfrag_threshold
+- extra_free_kbytes
- hugepages_treat_as_movable
- hugetlb_shm_group
- laptop_mode
@@ -240,6 +241,21 @@
==============================================================
+extra_free_kbytes
+
+This parameter tells the VM to keep extra free memory between the threshold
+where background reclaim (kswapd) kicks in, and the threshold where direct
+reclaim (by allocating processes) kicks in.
+
+This is useful for workloads that require low latency memory allocations
+and have a bounded burstiness in memory allocations, for example a
+realtime application that receives and transmits network traffic
+(causing in-kernel memory allocations) with a maximum total message burst
+size of 200MB may need 200MB of extra free memory to avoid direct reclaim
+related latencies.
+
+==============================================================
+
hugepages_treat_as_movable
This parameter controls whether we can allocate hugepages from ZONE_MOVABLE
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d8d8b82..41245ce 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -12,6 +12,7 @@
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARM_PSCI_FW if PM
select BUILDTIME_EXTABLE_SORT if MMU
select CLONE_BACKWARDS
select CPU_PM if (SUSPEND || CPU_IDLE)
diff --git a/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi b/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
index 2106759..8f7edab 100644
--- a/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -97,6 +97,7 @@
qcom,adc-vdd-reference = <1875>;
pinctrl-names = "default";
pinctrl-0 = <&ambient_therm_default>;
+ #thermal-sensor-cells = <1>;
chan@6 {
label = "die_temp";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-coresight.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-coresight.dtsi
new file mode 100644
index 0000000..f8baa04
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-coresight.dtsi
@@ -0,0 +1,1070 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ csr: csr@6001000 {
+ compatible = "qcom,coresight-csr";
+ reg = <0x6001000 0x1000>;
+ reg-names = "csr-base";
+
+ coresight-name = "coresight-csr";
+
+ qcom,blk-size = <1>;
+ };
+
+ tmc_etr: tmc@6048000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b961>;
+
+ reg = <0x6048000 0x1000>,
+ <0x6064000 0x15000>;
+ reg-names = "tmc-base", "bam-base";
+
+ arm,buffer-size = <0x400000>;
+ arm,sg-enable;
+
+ coresight-name = "coresight-tmc-etr";
+ coresight-ctis = <&cti0 &cti8>;
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ interrupts = <GIC_SPI 251 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "byte-cntr-irq";
+
+ port {
+ tmc_etr_in_replicator: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out_tmc_etr>;
+ };
+ };
+ };
+
+ replicator_qdss: replicator@6046000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b909>;
+
+ reg = <0x6046000 0x1000>;
+ reg-names = "replicator-base";
+
+ coresight-name = "coresight-replicator";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ replicator_out_tmc_etr: endpoint {
+ remote-endpoint=
+ <&tmc_etr_in_replicator>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ replicator_in_tmc_etf: endpoint {
+ slave-mode;
+ remote-endpoint=
+ <&tmc_etf_out_replicator>;
+ };
+ };
+ };
+ };
+
+ tmc_etf: tmc@6047000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b961>;
+
+ reg = <0x6047000 0x1000>;
+ reg-names = "tmc-base";
+
+ coresight-name = "coresight-tmc-etf";
+ coresight-ctis = <&cti0 &cti8>;
+ arm,default-sink;
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ tmc_etf_out_replicator: endpoint {
+ remote-endpoint =
+ <&replicator_in_tmc_etf>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ tmc_etf_in_funnel_merg: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_merg_out_tmc_etf>;
+ };
+ };
+ };
+ };
+
+ funnel_merg: funnel@6045000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6045000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-merg";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_merg_out_tmc_etf: endpoint {
+ remote-endpoint =
+ <&tmc_etf_in_funnel_merg>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_merg_in_funnel_in0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_in0_out_funnel_merg>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel_merg_in_funnel_in1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_in1_out_funnel_merg>;
+ };
+ };
+ };
+ };
+
+ funnel_in0: funnel@6041000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6041000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-in0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_in0_out_funnel_merg: endpoint {
+ remote-endpoint =
+ <&funnel_merg_in_funnel_in0>;
+ };
+ };
+
+ port@1 {
+ reg = <6>;
+ funnel_in0_in_funnel_qatb: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_qatb_out_funnel_in0>;
+ };
+ };
+
+ port@2 {
+ reg = <7>;
+ funnel_in0_in_stm: endpoint {
+ slave-mode;
+ remote-endpoint = <&stm_out_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ stm: stm@6002000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b962>;
+
+ reg = <0x6002000 0x1000>,
+ <0x16280000 0x180000>;
+ reg-names = "stm-base", "stm-stimulus-base";
+
+ coresight-name = "coresight-stm";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ stm_out_funnel_in0: endpoint {
+ remote-endpoint = <&funnel_in0_in_stm>;
+ };
+ };
+
+ };
+
+ funnel_qatb: funnel@6005000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6005000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-qatb";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_qatb_out_funnel_in0: endpoint {
+ remote-endpoint =
+ <&funnel_in0_in_funnel_qatb>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_qatb_in_tpda: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_out_funnel_qatb>;
+ };
+ };
+ };
+ };
+
+ tpda: tpda@6004000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
+ reg = <0x6004000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda";
+
+ qcom,tpda-atid = <65>;
+ qcom,bc-elem-size = <10 32>,
+ <13 32>;
+ qcom,tc-elem-size = <13 32>;
+ qcom,dsb-elem-size = <0 32>,
+ <2 32>,
+ <3 32>,
+ <5 32>,
+ <6 32>,
+ <10 32>,
+ <11 32>,
+ <13 32>;
+ qcom,cmb-elem-size = <3 64>,
+ <7 64>,
+ <9 64>,
+ <13 64>;
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_out_funnel_qatb: endpoint {
+ remote-endpoint =
+ <&funnel_qatb_in_tpda>;
+ };
+
+ };
+
+ port@1 {
+ reg = <0>;
+ tpda_in_funnel_ddr_0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_ddr_0_out_tpda>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ tpda_in_tpdm_vsense: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_vsense_out_tpda>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ tpda_in_tpdm_dcc: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_dcc_out_tpda>;
+ };
+ };
+
+ port@4 {
+ reg = <5>;
+ tpda_in_tpdm_center: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_center_out_tpda>;
+ };
+ };
+ };
+ };
+
+ funnel_ddr_0: funnel@69e2000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x69e2000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-ddr-0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_ddr_0_out_tpda: endpoint {
+ remote-endpoint =
+ <&tpda_in_funnel_ddr_0>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_ddr_0_in_tpdm_ddr: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_ddr_out_funnel_ddr_0>;
+ };
+ };
+ };
+ };
+
+ tpdm_dcc: tpdm@6870280 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x6870280 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-dcc";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port{
+ tpdm_dcc_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_dcc>;
+ };
+ };
+ };
+
+ tpdm_vsense: tpdm@6840000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x6840000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-vsense";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port{
+ tpdm_vsense_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_vsense>;
+ };
+ };
+ };
+
+ tpdm_center: tpdm@6c28000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x6c28000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-center";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port{
+ tpdm_center_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_center>;
+ };
+ };
+ };
+
+ tpdm_ddr: tpdm@69e0000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x69e0000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-ddr";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ qcom,msr-fix-req;
+
+ port {
+ tpdm_ddr_out_funnel_ddr_0: endpoint {
+ remote-endpoint = <&funnel_ddr_0_in_tpdm_ddr>;
+ };
+ };
+ };
+
+ funnel_in1: funnel@6042000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6042000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-in1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_in1_out_funnel_merg: endpoint {
+ remote-endpoint =
+ <&funnel_merg_in_funnel_in1>;
+ };
+ };
+
+ port@1 {
+ reg = <2>;
+ funnel_in1_in_funnel_swao: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_swao_out_funnel_in1>;
+ };
+ };
+
+ port@2 {
+ reg = <3>;
+ funnel_in1_in_modem_etm0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&modem_etm0_out_funnel_in1>;
+ };
+ };
+
+ port@3 {
+ reg = <7>;
+ funnel_in1_in_tpda_modem: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_modem_out_funnel_in1>;
+ };
+ };
+ };
+ };
+
+ modem_etm0 {
+ compatible = "qcom,coresight-remote-etm";
+
+ coresight-name = "coresight-modem-etm0";
+ qcom,inst-id = <2>;
+
+ port {
+ modem_etm0_out_funnel_in1: endpoint {
+ remote-endpoint =
+ <&funnel_in1_in_modem_etm0>;
+ };
+ };
+ };
+
+ funnel_swao:funnel@6b08000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6b08000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-swao";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_swao_out_funnel_in1: endpoint {
+ remote-endpoint =
+ <&funnel_in1_in_funnel_swao>;
+ };
+ };
+
+ port@1 {
+ reg = <7>;
+ funnel_swao_in_tpda_swao: endpoint {
+ slave-mode;
+ remote-endpoint=
+ <&tpda_swao_out_funnel_swao>;
+ };
+ };
+ };
+ };
+
+ tpda_modem: tpda@6832000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
+ reg = <0x6832000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-modem";
+
+ qcom,tpda-atid = <67>;
+ qcom,dsb-elem-size = <0 32>;
+ qcom,cmb-elem-size = <0 64>;
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_modem_out_funnel_in1: endpoint {
+ remote-endpoint =
+ <&funnel_in1_in_tpda_modem>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ tpda_modem_in_tpdm_modem: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_modem_out_tpda_modem>;
+ };
+ };
+ };
+ };
+
+ tpdm_modem: tpdm@6830000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x6830000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-modem";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_modem_out_tpda_modem: endpoint {
+ remote-endpoint = <&tpda_modem_in_tpdm_modem>;
+ };
+ };
+ };
+
+ tpda_swao: tpda@6b01000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
+ reg = <0x6b01000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-swao";
+
+ qcom,tpda-atid = <71>;
+ qcom,dsb-elem-size = <1 32>;
+ qcom,cmb-elem-size = <0 64>;
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ tpda_swao_out_funnel_swao: endpoint {
+ remote-endpoint =
+ <&funnel_swao_in_tpda_swao>;
+ };
+
+ };
+
+ port@1 {
+ reg = <0>;
+ tpda_swao_in_tpdm_swao0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_swao0_out_tpda_swao>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ tpda_swao_in_tpdm_swao1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_swao1_out_tpda_swao>;
+ };
+
+ };
+ };
+ };
+
+ tpdm_swao0: tpdm@6b02000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+
+ reg = <0x6b02000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-swao-0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_swao0_out_tpda_swao: endpoint {
+ remote-endpoint = <&tpda_swao_in_tpdm_swao0>;
+ };
+ };
+ };
+
+ tpdm_swao1: tpdm@6b03000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x6b03000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name="coresight-tpdm-swao-1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ qcom,msr-fix-req;
+
+ port {
+ tpdm_swao1_out_tpda_swao: endpoint {
+ remote-endpoint = <&tpda_swao_in_tpdm_swao1>;
+ };
+ };
+ };
+
+ ipcb_tgu: tgu@6b0c000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b999>;
+ reg = <0x6b0c000 0x1000>;
+ reg-names = "tgu-base";
+ tgu-steps = <3>;
+ tgu-conditions = <4>;
+ tgu-regs = <4>;
+ tgu-timer-counters = <8>;
+
+ coresight-name = "coresight-tgu-ipcb";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti0: cti@6010000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6010000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti1: cti@6011000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6011000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti2: cti@6012000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6012000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti2";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti3: cti@6013000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6013000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti3";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti4: cti@6014000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6014000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti4";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti5: cti@6015000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6015000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti5";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti6: cti@6016000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6016000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti6";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti7: cti@6017000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6017000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti7";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti8: cti@6018000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6018000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti8";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti9: cti@6019000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6019000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti9";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti10: cti@601a000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x601a000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti10";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti11: cti@601b000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x601b000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti11";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti12: cti@601c000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x601c000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti12";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti13: cti@601d000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x601d000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti13";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti14: cti@601e000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x601e000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti14";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti15: cti@601f000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x601f000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti15";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti_cpu0: cti@7003000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x7003000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-cpu0";
+ cpu = <&CPU0>;
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ };
+
+ cti_modem_cpu0:cti@6837000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6837000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-modem-cpu0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_modem_cpu1:cti@683b000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x683b000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-modem-cpu1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti0_swao:cti@6b04000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6b04000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-swao_cti0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti1_swao:cti@6b05000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6b05000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-swao_cti1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti2_swao:cti@6b06000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6b06000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-swao_cti2";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti3_swao:cti@6b07000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x6b07000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-swao_cti3";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti0_ddr0: cti@69e1000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x69e1000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-ddr_dl_0_cti";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti0_ddr1: cti@69e4000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x69e4000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-ddr_dl_1_cti0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti1_ddr1: cti@69e5000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x69e5000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-ddr_dl_1_cti1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti2_ddr1: cti@69e6000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+ reg = <0x69e6000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-ddr_dl_1_cti2";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ hwevent: hwevent@0x014066f0 {
+ compatible = "qcom,coresight-hwevent";
+ reg = <0x14066f0 0x4>,
+ <0x14166f0 0x4>,
+ <0x1406038 0x4>,
+ <0x1416038 0x4>;
+ reg-names = "ddr-ch0-cfg", "ddr-ch23-cfg", "ddr-ch0-ctrl",
+ "ddr-ch23-ctrl";
+
+ coresight-name = "coresight-hwevent";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie.dtsi
new file mode 100644
index 0000000..e939bd2
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie.dtsi
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
+
+&soc {
+ pcie0: qcom,pcie@1c00000 {
+ compatible = "qcom,pci-msm";
+ cell-index = <0>;
+
+ reg = <0x01c00000 0x2000>,
+ <0x01c02000 0x1000>,
+ <0x40000000 0xf1d>,
+ <0x40000f20 0xa8>,
+ <0x40001000 0x1000>,
+ <0x40100000 0x100000>,
+ <0x40200000 0x100000>,
+ <0x40300000 0x1d00000>,
+ <0x01fce008 0x4>;
+
+ reg-names = "parf", "phy", "dm_core", "elbi", "iatu",
+ "conf", "io", "bars", "tcsr";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x40200000 0x40200000 0x0 0x100000>,
+ <0x02000000 0x0 0x40300000 0x40300000 0x0 0x1d00000>;
+ interrupt-parent = <&pcie0>;
+ interrupts = <0 1 2 3 4 5>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0xffffffff>;
+ interrupt-map = <0 0 0 0 &intc 0 119 0
+ 0 0 0 1 &intc 0 141 0
+ 0 0 0 2 &intc 0 142 0
+ 0 0 0 3 &intc 0 143 0
+ 0 0 0 4 &intc 0 144 0
+ 0 0 0 5 &intc 0 140 0>;
+
+ interrupt-names = "int_msi", "int_a", "int_b", "int_c",
+ "int_d", "int_global_int";
+
+ qcom,phy-sequence = <0x840 0x03 0x0
+ 0x094 0x08 0x0
+ 0x154 0x33 0x0
+ 0x058 0x0f 0x0
+ 0x0a4 0x42 0x0
+ 0x1bc 0x11 0x0
+ 0x0bc 0x82 0x0
+ 0x0d4 0x03 0x0
+ 0x0d0 0x55 0x0
+ 0x0cc 0x55 0x0
+ 0x0b0 0x1a 0x0
+ 0x0ac 0x0a 0x0
+ 0x158 0x01 0x0
+ 0x074 0x06 0x0
+ 0x07c 0x16 0x0
+ 0x084 0x36 0x0
+ 0x1b0 0x1e 0x0
+ 0x1ac 0xb9 0x0
+ 0x050 0x07 0x0
+ 0x29c 0x12 0x0
+ 0x284 0x05 0x0
+ 0x234 0xd9 0x0
+ 0x238 0xcc 0x0
+ 0x51c 0x03 0x0
+ 0x518 0x1c 0x0
+ 0x524 0x14 0x0
+ 0x4ec 0x0e 0x0
+ 0x4f0 0x4a 0x0
+ 0x4f4 0x0f 0x0
+ 0x5b4 0x04 0x0
+ 0x434 0x7f 0x0
+ 0x444 0x70 0x0
+ 0x510 0x17 0x0
+ 0x4d8 0x01 0x0
+ 0x598 0xe0 0x0
+ 0x59c 0xc8 0x0
+ 0x5a0 0xc8 0x0
+ 0x5a4 0x09 0x0
+ 0x5a8 0xb1 0x0
+ 0x584 0x24 0x0
+ 0x588 0xe4 0x0
+ 0x58c 0xec 0x0
+ 0x590 0x39 0x0
+ 0x594 0x36 0x0
+ 0x570 0xef 0x0
+ 0x574 0xef 0x0
+ 0x578 0x2f 0x0
+ 0x57c 0xd3 0x0
+ 0x580 0x40 0x0
+ 0x4fc 0x00 0x0
+ 0x4f8 0xc0 0x0
+ 0x9a4 0x01 0x0
+ 0xc90 0x00 0x0
+ 0xc40 0x01 0x0
+ 0xc48 0x01 0x0
+ 0xca0 0x11 0x0
+ 0x048 0x90 0x0
+ 0xc1c 0xc1 0x0
+ 0x988 0x88 0x0
+ 0x998 0x08 0x0
+ 0x8dc 0x0d 0x0
+ 0x800 0x00 0x0
+ 0x844 0x03 0x0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_clkreq_default
+ &pcie0_perst_default
+ &pcie0_wake_default>;
+
+ perst-gpio = <&tlmm 57 0>;
+ wake-gpio = <&tlmm 53 0>;
+
+ gdsc-vdd-supply = <&gdsc_pcie>;
+ vreg-1.8-supply = <&pmxpoorwills_l1>;
+ vreg-0.9-supply = <&pmxpoorwills_l4>;
+ vreg-cx-supply = <&pmxpoorwills_s5_level>;
+
+ qcom,vreg-1.8-voltage-level = <1200000 1200000 24000>;
+ qcom,vreg-0.9-voltage-level = <872000 872000 24000>;
+ qcom,vreg-cx-voltage-level = <RPMH_REGULATOR_LEVEL_MAX
+ RPMH_REGULATOR_LEVEL_SVS 0>;
+
+ qcom,l0s-supported;
+ qcom,l1-supported;
+ qcom,l1ss-supported;
+ qcom,aux-clk-sync;
+
+ qcom,ep-latency = <10>;
+
+ qcom,slv-addr-space-size = <0x40000000>;
+
+ qcom,phy-status-offset = <0x814>;
+
+ qcom,cpl-timeout = <0x2>;
+
+ qcom,boot-option = <0x1>;
+
+ linux,pci-domain = <0>;
+
+ qcom,use-19p2mhz-aux-clk;
+
+ qcom,msm-bus,name = "pcie0";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <45 512 0 0>,
+ <45 512 500 800>;
+
+ clocks = <&clock_gcc GCC_PCIE_PIPE_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_PCIE_AUX_CLK>,
+ <&clock_gcc GCC_PCIE_CFG_AHB_CLK>,
+ <&clock_gcc GCC_PCIE_MSTR_AXI_CLK>,
+ <&clock_gcc GCC_PCIE_SLV_AXI_CLK>,
+ <&clock_gcc GCC_PCIE_0_CLKREF_CLK>,
+ <&clock_gcc GCC_PCIE_SLV_Q2A_AXI_CLK>,
+ <&clock_gcc GCC_PCIE_SLEEP_CLK>,
+ <&clock_gcc GCC_PCIE_PHY_REFGEN_CLK>;
+
+ clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
+ "pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+ "pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+ "pcie_0_ldo", "pcie_0_slv_q2a_axi_clk",
+ "pcie_0_sleep_clk", "pcie_phy_refgen_clk";
+
+ max-clock-frequency-hz = <0>, <0>, <0>, <0>, <0>, <0>,
+ <0>, <0>, <0>, <0>, <100000000>;
+
+ resets = <&clock_gcc GCC_PCIE_BCR>,
+ <&clock_gcc GCC_PCIE_PHY_BCR>;
+
+ reset-names = "pcie_0_core_reset",
+ "pcie_0_phy_reset";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index fa9c4f8..deed94d 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,11 +13,13 @@
&soc {
tlmm: pinctrl@3900000 {
compatible = "qcom,sdxpoorwills-pinctrl";
- reg = <0x3900000 0x300000>;
+ reg = <0x3900000 0x300000>,
+ <0xB204900 0x280>;
interrupts = <0 212 0>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ interrupt-parent = <&pdc>;
#interrupt-cells = <2>;
uart2_console_active: uart2_console_active {
@@ -382,6 +384,44 @@
};
};
+ pcie0 {
+ pcie0_clkreq_default: pcie0_clkreq_default {
+ mux {
+ pins = "gpio56";
+ function = "pcie_clkreq";
+ };
+ config {
+ pins = "gpio56";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie0_perst_default: pcie0_perst_default {
+ mux {
+ pins = "gpio57";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio57";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ pcie0_wake_default: pcie0_wake_default {
+ mux {
+ pins = "gpio53";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio53";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
/* HS UART CONFIGURATION */
blsp1_uart1a: blsp1_uart1a {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi
new file mode 100644
index 0000000..eab887c
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi
@@ -0,0 +1,95 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+
+ qcom,lpm-levels {
+ compatible = "qcom,lpm-levels";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,pm-cluster@0{
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ label = "system";
+ qcom,psci-mode-shift = <0>;
+ qcom,psci-mode-mask = <0xf>;
+
+ qcom,pm-cluster-level@0 {
+ reg = <0>;
+ label = "cx_active";
+ qcom,psci-mode = <0x0>;
+ qcom,latency-us = <270>;
+ qcom,ss-power = <455>;
+ qcom,energy-overhead = <270621>;
+ qcom,time-overhead = <500>;
+ };
+
+ qcom,pm-cluster-level@1 {
+ reg = <1>;
+ label = "cx_min";
+ qcom,psci-mode = <0x0>;
+ qcom,latency-us = <285>;
+ qcom,ss-power = <442>;
+ qcom,energy-overhead = <306621>;
+ qcom,time-overhead = <540>;
+ qcom,min-child-idx = <2>;
+ qcom,notify-rpm;
+ qcom,is-reset;
+ };
+
+ qcom,pm-cpu@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,psci-mode-shift = <0>;
+ qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU0>;
+
+ qcom,pm-cpu-level@0{
+ reg = <0>;
+ label = "wfi";
+ qcom,psci-cpu-mode = <0x1>;
+ qcom,latency-us = <1>;
+ qcom,ss-power = <473>;
+ qcom,energy-overhead = <100000>;
+ qcom,time-overhead = <25>;
+ };
+
+ qcom,pm-cpu-level@1 {
+ reg = <1>;
+ label ="standalone_pc";
+ qcom,psci-cpu-mode = <0x4>;
+ qcom,latency-us = <240>;
+ qcom,ss-power = <467>;
+ qcom,energy-overhead = <202781>;
+ qcom,time-overhead = <420>;
+ qcom,use-broadcast-timer;
+ qcom,is-reset;
+ };
+
+ qcom,pm-cpu-level@2 {
+ reg = <2>;
+ label = "system-pc";
+ qcom,psci-cpu-mode = <0x8>;
+ qcom,latency-us = <270>;
+ qcom,ss-power = <455>;
+ qcom,energy-overhead = <270621>;
+ qcom,time-overhead = <500>;
+ qcom,use-broadcast-timer;
+ qcom,is-reset;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
index 053348c..37903b9 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -381,4 +381,24 @@
enable-active-high;
gpio = <&tlmm 92 GPIO_ACTIVE_HIGH>;
};
+
+ vreg_emac_phy: emac_phy_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "emac_phy";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <100>;
+ gpio = <&tlmm 96 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vreg_rgmii_io_pads: rgmii_io_pads_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "rgmii_io_pads";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <100>;
+ gpio = <&tlmm 83 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
index 926044a..77e1763 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -74,6 +74,37 @@
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
};
+
+ qcom,usbbam@a704000 {
+ compatible = "qcom,usb-bam-msm";
+ reg = <0xa704000 0x17000>;
+ interrupts = <0 132 0>;
+
+ qcom,bam-type = <0>;
+ qcom,usb-bam-fifo-baseaddr = <0x14689000>;
+ qcom,usb-bam-num-pipes = <8>;
+ qcom,ignore-core-reset-ack;
+ qcom,disable-clk-gating;
+ qcom,usb-bam-override-threshold = <0x4001>;
+ qcom,usb-bam-max-mbps-highspeed = <400>;
+ qcom,usb-bam-max-mbps-superspeed = <3600>;
+ qcom,reset-bam-on-connect;
+
+ qcom,pipe0 {
+ label = "ssusb-qdss-in-0";
+ qcom,usb-bam-mem-type = <2>;
+ qcom,dir = <1>;
+ qcom,pipe-num = <0>;
+ qcom,peer-bam = <0>;
+ qcom,peer-bam-physical-address = <0x6064000>;
+ qcom,src-bam-pipe-index = <0>;
+ qcom,dst-bam-pipe-index = <0>;
+ qcom,data-fifo-offset = <0x0>;
+ qcom,data-fifo-size = <0x1800>;
+ qcom,descriptor-fifo-offset = <0x1800>;
+ qcom,descriptor-fifo-size = <0x800>;
+ };
+ };
};
/* USB port for High Speed PHY */
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 40fc3fe..e507c4e 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -17,12 +17,13 @@
#include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/clock/qcom,aop-qmp.h>
/ {
model = "Qualcomm Technologies, Inc. SDX POORWILLS";
compatible = "qcom,sdxpoorwills";
qcom,msm-id = <334 0x0>, <335 0x0>;
- interrupt-parent = <&intc>;
+ interrupt-parent = <&pdc>;
reserved-memory {
#address-cells = <1>;
@@ -67,8 +68,9 @@
#address-cells = <1>;
CPU0: cpu@0 {
- device-type = "cpu";
+ device_type = "cpu";
compatible = "arm,cortex-a7";
+ enable-method = "psci";
reg = <0x0>;
#cooling-cells = <2>;
};
@@ -76,9 +78,15 @@
aliases {
qpic_nand1 = &qnand_1;
+ pci-domain0 = &pcie0;
sdhc1 = &sdhc_1; /* SDC1 eMMC/SD/SDIO slot */
};
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
soc: soc { };
};
@@ -94,6 +102,15 @@
#interrupt-cells = <3>;
reg = <0x17800000 0x1000>,
<0x17802000 0x1000>;
+ interrupt-parent = <&intc>;
+ };
+
+ pdc: interrupt-controller@b210000{
+ compatible = "qcom,pdc-sdxpoorwills";
+ reg = <0xb210000 0x30000>;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
};
timer {
@@ -222,6 +239,13 @@
mbox-names = "apps";
};
+ clock_aop: qcom,aopclk {
+ compatible = "qcom,aop-qmp-clk-v1";
+ #clock-cells = <1>;
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "qdss_clk";
+ };
+
snoc_cnoc_keepalive: qcom,snoc_cnoc_keepalive {
compatible = "qcom,devbw";
governor = "powersave";
@@ -513,7 +537,7 @@
vdd_cx-supply = <&pmxpoorwills_s5_level>;
qcom,proxy-reg-names = "vdd_cx";
- qcom,pas-id = <0>;
+ qcom,pas-id = <4>;
qcom,smem-id = <421>;
qcom,proxy-timeout-ms = <10000>;
qcom,sysmon-id = <0>;
@@ -582,6 +606,8 @@
qcom,mhi-event-ring-id-limits = <9 10>; /* start and end */
qcom,modem-cfg-emb-pipe-flt;
qcom,use-ipa-pm;
+ qcom,arm-smmu;
+ qcom,smmu-fast-map;
qcom,bandwidth-vote-for-ipa;
qcom,msm-bus,name = "ipa";
qcom,msm-bus,num-cases = <5>;
@@ -720,8 +746,76 @@
compatible = "qcom,smp2pgpio-map-ipa-1-in";
gpios = <&smp2pgpio_ipa_1_in 0 0>;
};
+
+ ipa_smmu_ap: ipa_smmu_ap {
+ compatible = "qcom,ipa-smmu-ap-cb";
+ iommus = <&apps_smmu 0x5E0 0x0>;
+ qcom,iova-mapping = <0x20000000 0x40000000>;
+ qcom,additional-mapping =
+ /* modem tables in IMEM */
+ <0x14686000 0x14686000 0x3000>;
+ };
+
+ ipa_smmu_wlan: ipa_smmu_wlan {
+ compatible = "qcom,ipa-smmu-wlan-cb";
+ iommus = <&apps_smmu 0x5E1 0x0>;
+ qcom,additional-mapping =
+ /* ipa-uc ram */
+ <0x1E60000 0x1E60000 0xA000>;
+ };
+
+ ipa_smmu_uc: ipa_smmu_uc {
+ compatible = "qcom,ipa-smmu-uc-cb";
+ iommus = <&apps_smmu 0x5E2 0x0>;
+ qcom,iova-mapping = <0x40000000 0x20000000>;
+ };
};
+ qmp_aop: qcom,qmp-aop@c300000 {
+ compatible = "qcom,qmp-mbox";
+ label = "aop";
+ reg = <0xc300000 0x400>,
+ <0x17811008 0x4>;
+ reg-names = "msgram", "irq-reg-base";
+ qcom,irq-mask = <0x2>;
+ interrupts = <GIC_SPI 221 IRQ_TYPE_EDGE_RISING>;
+ priority = <0>;
+ mbox-desc-offset = <0x0>;
+ #mbox-cells = <1>;
+ };
+
+ usb_detect: qcom,gpio-usbdetect {
+ compatible = "qcom,gpio-usbdetect";
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x0 0x0d 0x0 IRQ_TYPE_NONE>;
+ interrupt-names = "vbus_det_irq";
+ status = "disabled";
+ };
+
+ qcom,wdt@17817000{
+ compatible = "qcom,msm-watchdog";
+ reg = <0x17817000 0x1000>;
+ reg-names = "wdt-base";
+ interrupts = <1 3 0>, <1 2 0>;
+ qcom,bark-time = <11000>;
+ qcom,pet-time = <10000>;
+ };
+};
+
+#include "pmxpoorwills.dtsi"
+#include "sdxpoorwills-blsp.dtsi"
+#include "sdxpoorwills-regulator.dtsi"
+#include "sdxpoorwills-smp2p.dtsi"
+#include "sdxpoorwills-usb.dtsi"
+#include "sdxpoorwills-pcie.dtsi"
+#include "sdxpoorwills-bus.dtsi"
+#include "sdxpoorwills-thermal.dtsi"
+#include "sdxpoorwills-audio.dtsi"
+#include "sdxpoorwills-ion.dtsi"
+#include "msm-arm-smmu-sdxpoorwills.dtsi"
+#include "sdxpoorwills-coresight.dtsi"
+
+&soc {
emac_hw: qcom,emac@00020000 {
compatible = "qcom,emac-dwc-eqos";
reg = <0x20000 0x10000>,
@@ -753,32 +847,17 @@
<&clock_gcc GCC_ETH_SLAVE_AHB_CLK>;
clock-names = "eth_axi_clk", "eth_ptp_clk",
"eth_rgmii_clk", "eth_slave_ahb_clk";
+ qcom,phy-intr-redirect = <&tlmm 84 GPIO_ACTIVE_LOW>;
+ qcom,phy-reset = <&tlmm 85 GPIO_ACTIVE_LOW>;
+ vreg_rgmii-supply = <&vreg_rgmii>;
+ vreg_emac_phy-supply = <&vreg_emac_phy>;
+ vreg_rgmii_io_pads-supply = <&vreg_rgmii_io_pads>;
+ gdsc_emac-supply = <&gdsc_emac>;
io-macro-info {
io-macro-bypass-mode = <0>;
io-interface = "rgmii";
};
};
-
- qmp_aop: qcom,qmp-aop@c300000 {
- compatible = "qcom,qmp-mbox";
- label = "aop";
- reg = <0xc300000 0x400>,
- <0x17811008 0x4>;
- reg-names = "msgram", "irq-reg-base";
- qcom,irq-mask = <0x2>;
- interrupts = <GIC_SPI 221 IRQ_TYPE_EDGE_RISING>;
- priority = <0>;
- mbox-desc-offset = <0x0>;
- #mbox-cells = <1>;
- };
-
- usb_detect: qcom,gpio-usbdetect {
- compatible = "qcom,gpio-usbdetect";
- interrupt-parent = <&spmi_bus>;
- interrupts = <0x0 0x0d 0x0 IRQ_TYPE_NONE>;
- interrupt-names = "vbus_det_irq";
- status = "disabled";
- };
};
#include "pmxpoorwills.dtsi"
@@ -786,8 +865,10 @@
#include "sdxpoorwills-regulator.dtsi"
#include "sdxpoorwills-smp2p.dtsi"
#include "sdxpoorwills-usb.dtsi"
+#include "sdxpoorwills-pcie.dtsi"
#include "sdxpoorwills-bus.dtsi"
#include "sdxpoorwills-thermal.dtsi"
#include "sdxpoorwills-audio.dtsi"
#include "sdxpoorwills-ion.dtsi"
#include "msm-arm-smmu-sdxpoorwills.dtsi"
+#include "sdxpoorwills-pm.dtsi"
diff --git a/arch/arm/configs/msm8953-perf_defconfig b/arch/arm/configs/msm8953-perf_defconfig
index d7b9289..89ed48a 100644
--- a/arch/arm/configs/msm8953-perf_defconfig
+++ b/arch/arm/configs/msm8953-perf_defconfig
@@ -231,6 +231,7 @@
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -304,12 +305,16 @@
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_QPNP_PIN=y
CONFIG_GPIO_QPNP_PIN_DEBUG=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_SUPPLY=y
CONFIG_QPNP_FG=y
CONFIG_SMB135X_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_QPNP_SMBCHARGER=y
CONFIG_QPNP_TYPEC=y
+CONFIG_MSM_APM=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
CONFIG_THERMAL_QPNP=y
@@ -322,11 +327,15 @@
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_CPR4_APSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_MEM_ACC=y
CONFIG_REGULATOR_MSM_GFX_LDO=y
CONFIG_REGULATOR_QPNP_LABIBB=y
CONFIG_REGULATOR_QPNP_LCDB=y
CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
@@ -401,12 +410,18 @@
CONFIG_USB_BAM=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
+CONFIG_MSM_SPM=y
+CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MSM_SMEM=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
@@ -425,6 +440,7 @@
CONFIG_MSM_PM=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_ANDROID=y
@@ -437,6 +453,7 @@
CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
diff --git a/arch/arm/configs/msm8953_defconfig b/arch/arm/configs/msm8953_defconfig
index 4b8dbcb..1e9d7ae 100644
--- a/arch/arm/configs/msm8953_defconfig
+++ b/arch/arm/configs/msm8953_defconfig
@@ -239,6 +239,7 @@
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -314,12 +315,16 @@
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_QPNP_PIN=y
CONFIG_GPIO_QPNP_PIN_DEBUG=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_SUPPLY=y
CONFIG_QPNP_FG=y
CONFIG_SMB135X_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_QPNP_SMBCHARGER=y
CONFIG_QPNP_TYPEC=y
+CONFIG_MSM_APM=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
CONFIG_THERMAL_QPNP=y
@@ -332,11 +337,15 @@
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_CPR4_APSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_MEM_ACC=y
CONFIG_REGULATOR_MSM_GFX_LDO=y
CONFIG_REGULATOR_QPNP_LABIBB=y
CONFIG_REGULATOR_QPNP_LCDB=y
CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
@@ -415,14 +424,20 @@
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MSM_SPM=y
+CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_CORE_HANG_DETECT=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MSM_SMEM=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
@@ -442,6 +457,7 @@
CONFIG_MSM_PM=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_ANDROID=y
@@ -454,6 +470,7 @@
CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -486,6 +503,7 @@
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_PANIC_ON_SCHED_BUG=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 7a85ac6..29e6335 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -25,6 +25,7 @@
CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SDXPOORWILLS=y
+CONFIG_PCI_MSM=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_CMA=y
@@ -44,9 +45,13 @@
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_GRE=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_IPV6_MROUTE=y
@@ -70,6 +75,8 @@
CONFIG_NF_CONNTRACK_TFTP=y
CONFIG_NF_CT_NETLINK=y
CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NF_CT_NETLINK_HELPER=y
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -77,6 +84,7 @@
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
@@ -100,6 +108,7 @@
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NATTYPE_MODULE=y
CONFIG_IP_NF_TARGET_NETMAP=y
CONFIG_IP_NF_TARGET_REDIRECT=y
CONFIG_IP_NF_MANGLE=y
@@ -277,6 +286,7 @@
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_VBUS_DRAW=500
CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_UEVENT=y
@@ -312,6 +322,7 @@
CONFIG_GPIO_USB_DETECT=y
CONFIG_USB_BAM=y
CONFIG_MSM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
CONFIG_MDM_GCC_SDXPOORWILLS=y
CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
CONFIG_REMOTE_SPINLOCK_MSM=y
@@ -324,6 +335,7 @@
CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_SCM=y
CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_MSM_SMEM=y
CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
@@ -362,4 +374,13 @@
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_IPC_LOGGING=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_HWEVENT=y
CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index b4b4ba9..865406f 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -27,6 +27,7 @@
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SDXPOORWILLS=y
# CONFIG_VDSO is not set
+CONFIG_PCI_MSM=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_CMA=y
@@ -46,9 +47,13 @@
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_GRE=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_IPV6_MROUTE=y
@@ -72,6 +77,8 @@
CONFIG_NF_CONNTRACK_TFTP=y
CONFIG_NF_CT_NETLINK=y
CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NF_CT_NETLINK_HELPER=y
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -79,6 +86,7 @@
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
@@ -102,6 +110,7 @@
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NATTYPE_MODULE=y
CONFIG_IP_NF_TARGET_NETMAP=y
CONFIG_IP_NF_TARGET_REDIRECT=y
CONFIG_IP_NF_MANGLE=y
@@ -276,6 +285,7 @@
CONFIG_USB_GADGET_DEBUG_FS=y
CONFIG_USB_GADGET_VBUS_DRAW=500
CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_UEVENT=y
@@ -309,7 +319,9 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_REVID=y
CONFIG_GPIO_USB_DETECT=y
+CONFIG_USB_BAM=y
CONFIG_MSM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
CONFIG_MDM_GCC_SDXPOORWILLS=y
CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
CONFIG_REMOTE_SPINLOCK_MSM=y
@@ -322,6 +334,7 @@
CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_SCM=y
CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_BUS_CONFIG_RPMH=y
CONFIG_MSM_SMEM=y
@@ -345,7 +358,6 @@
CONFIG_PWM_QPNP=y
CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_ANDROID=y
-CONFIG_STM=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_VFAT_FS=y
@@ -377,6 +389,19 @@
CONFIG_IPC_LOGGING=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DEBUG_USER=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_TGU=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_CMAC=y
CONFIG_CRYPTO_SHA256=y
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 062c484..906623e 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -22,6 +22,7 @@
extern unsigned int user_debug;
extern char* (*arch_read_hardware_id)(void);
+const char * __init arch_read_machine_name(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 877f461..09dd8ff 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1174,7 +1174,7 @@
return 0;
}
-subsys_initcall(topology_init);
+postcore_initcall(topology_init);
#ifdef CONFIG_HAVE_PROC_CPU
static int __init proc_cpu_init(void)
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index f9dfe80..405e34d 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -44,12 +44,14 @@
select HAVE_ARM_ARCH_TIMER
select MSM_CORTEX_A7
select PINCTRL
+ select PCI
select QCOM_SCM if SMP
select MSM_JTAG_MM if CORESIGHT_ETM
select PM_DEVFREQ
select COMMON_CLK
select COMMON_CLK_QCOM
select QCOM_GDSC
+ select GENERIC_CLOCKEVENTS_BROADCAST
config ARCH_MSM8953
bool "Enable support for MSM8953"
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index d35cecb..b7640d3 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -154,6 +154,15 @@
This enables support for the MSM8953 chipset. If you do not
wish to build a kernel that runs on this chipset, say 'N' here.
+config ARCH_MSM8937
+ bool "Enable Support for Qualcomm Technologies Inc. MSM8937"
+ depends on ARCH_QCOM
+ select CPU_FREQ_QCOM
+ select COMMON_CLK_MSM
+ help
+ This enables support for the MSM8937 chipset. If you do not
+ wish to build a kernel that runs on this chipset, say 'N' here.
+
config ARCH_SDM450
bool "Enable Support for Qualcomm Technologies Inc. SDM450"
depends on ARCH_QCOM
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index f1dbace..9877324 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -10,7 +10,8 @@
sdm845-4k-panel-mtp-overlay.dtbo \
sdm845-4k-panel-cdp-overlay.dtbo \
sdm845-4k-panel-qrd-overlay.dtbo \
- sdm845-v2-qvr-overlay.dtbo \
+ sdm845-v2-qvr-evt-overlay.dtbo \
+ sdm845-v2-qvr-dvt-overlay.dtbo \
sdm845-v2-cdp-overlay.dtbo \
sdm845-v2-mtp-overlay.dtbo \
sdm845-v2-qrd-overlay.dtbo \
@@ -41,7 +42,10 @@
sda845-v2.1-qrd-overlay.dtbo \
sda845-v2.1-4k-panel-cdp-overlay.dtbo \
sda845-v2.1-4k-panel-mtp-overlay.dtbo \
- sda845-v2.1-4k-panel-qrd-overlay.dtbo
+ sda845-v2.1-4k-panel-qrd-overlay.dtbo \
+ sda845-v2.1-cdp-sdxpoorwills-overlay.dtbo \
+ sda845-v2.1-mtp-sdxpoorwills-overlay.dtbo \
+ sda845-v2-mtp-sdxpoorwills-overlay.dtbo
sdm845-cdp-overlay.dtbo-base := sdm845.dtb
sdm845-mtp-overlay.dtbo-base := sdm845.dtb
@@ -49,7 +53,8 @@
sdm845-4k-panel-mtp-overlay.dtbo-base := sdm845.dtb
sdm845-4k-panel-cdp-overlay.dtbo-base := sdm845.dtb
sdm845-4k-panel-qrd-overlay.dtbo-base := sdm845.dtb
-sdm845-v2-qvr-overlay.dtbo-base := sdm845-v2.dtb
+sdm845-v2-qvr-evt-overlay.dtbo-base := sdm845-v2.dtb
+sdm845-v2-qvr-dvt-overlay.dtbo-base := sdm845-v2.dtb
sdm845-v2-cdp-overlay.dtbo-base := sdm845-v2.dtb
sdm845-v2-mtp-overlay.dtbo-base := sdm845-v2.dtb
sdm845-v2-qrd-overlay.dtbo-base := sdm845-v2.dtb
@@ -81,6 +86,9 @@
sda845-v2.1-4k-panel-cdp-overlay.dtbo-base := sda845-v2.1.dtb
sda845-v2.1-4k-panel-mtp-overlay.dtbo-base := sda845-v2.1.dtb
sda845-v2.1-4k-panel-qrd-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-cdp-sdxpoorwills-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-mtp-sdxpoorwills-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2-mtp-sdxpoorwills-overlay.dtbo-base := sda845-v2.dtb
else
dtb-$(CONFIG_ARCH_SDM845) += sdm845-sim.dtb \
sdm845-rumi.dtb \
@@ -91,7 +99,8 @@
sdm845-v2-cdp.dtb \
sdm845-qrd.dtb \
sdm845-v2-qrd.dtb \
- sdm845-v2-qvr.dtb \
+ sdm845-v2-qvr-evt.dtb \
+ sdm845-v2-qvr-dvt.dtb \
sdm845-4k-panel-mtp.dtb \
sdm845-4k-panel-cdp.dtb \
sdm845-4k-panel-qrd.dtb \
@@ -124,6 +133,8 @@
sda670-mtp-overlay.dtbo \
sda670-pm660a-cdp-overlay.dtbo \
sda670-pm660a-mtp-overlay.dtbo \
+ sdm670-tasha-codec-cdp-overlay.dtbo \
+ sdm670-pm660a-tasha-codec-cdp-overlay.dtbo \
qcs605-cdp-overlay.dtbo \
qcs605-mtp-overlay.dtbo \
qcs605-360camera-overlay.dtbo \
@@ -149,6 +160,8 @@
sdm670-usbc-external-codec-mtp-overlay.dtbo-base := sdm670.dtb
sdm670-usbc-external-codec-pm660a-cdp-overlay.dtbo-base := sdm670.dtb
sdm670-usbc-external-codec-pm660a-mtp-overlay.dtbo-base := sdm670.dtb
+sdm670-tasha-codec-cdp-overlay.dtbo-base := sdm670.dtb
+sdm670-pm660a-tasha-codec-cdp-overlay.dtbo-base := sdm670.dtb
sda670-cdp-overlay.dtbo-base := sda670.dtb
sda670-mtp-overlay.dtbo-base := sda670.dtb
sda670-pm660a-cdp-overlay.dtbo-base := sda670.dtb
@@ -181,6 +194,8 @@
sdm670-usbc-pm660a-mtp.dtb \
sda670-mtp.dtb \
sda670-cdp.dtb \
+ sdm670-tasha-codec-cdp.dtb \
+ sdm670-pm660a-tasha-codec-cdp.dtb \
sda670-pm660a-mtp.dtb \
sda670-pm660a-cdp.dtb \
qcs605-360camera.dtb \
@@ -191,6 +206,68 @@
endif
ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+dtbo-$(CONFIG_ARCH_MSM8953) += msm8953-mtp-overlay.dtbo \
+ msm8953-cdp-overlay.dtbo \
+ msm8953-rcm-overlay.dtbo \
+ msm8953-ipc-overlay.dtbo \
+ msm8953-qrd-overlay.dtbo \
+ msm8953-iot-mtp-overlay.dtbo \
+ msm8953-ext-codec-mtp-overlay.dtbo \
+ msm8953-ext-codec-rcm-overlay.dtbo \
+ msm8953-cdp-1200p-overlay.dtbo
+
+dtbo-$(CONFIG_ARCH_SDM450) += msm8953-mtp-overlay.dtbo \
+ msm8953-cdp-overlay.dtbo \
+ msm8953-rcm-overlay.dtbo \
+ msm8953-qrd-overlay.dtbo \
+ msm8953-iot-mtp-overlay.dtbo \
+ sdm450-cdp-s2-overlay.dtbo \
+ sdm450-mtp-s3-overlay.dtbo \
+ sdm450-qrd-sku4-overlay.dtbo
+
+dtbo-$(CONFIG_ARCH_SDM632) += sdm632-rumi-overlay.dtbo \
+ sdm450-cdp-s2-overlay.dtbo \
+ sdm450-mtp-s3-overlay.dtbo \
+ sdm450-qrd-sku4-overlay.dtbo
+
+msm8953-mtp-overlay.dtbo-base := sdm450.dtb \
+ msm8953.dtb \
+ apq8053.dtb \
+ msm8953-pmi8940.dtb \
+ msm8953-pmi8937.dtb \
+ sdm450-pmi8940.dtb \
+ sdm450-pmi8937.dtb
+msm8953-cdp-overlay.dtbo-base := sdm450.dtb \
+ msm8953.dtb \
+ apq8053.dtb \
+ msm8953-pmi8940.dtb \
+ msm8953-pmi8937.dtb
+msm8953-rcm-overlay.dtbo-base := sdm450.dtb \
+ msm8953.dtb \
+ apq8053.dtb
+msm8953-ipc-overlay.dtbo-base := msm8953.dtb \
+ apq8053.dtb
+msm8953-qrd-overlay.dtbo-base := sdm450.dtb \
+ msm8953.dtb
+msm8953-iot-mtp-overlay.dtbo-base := sdm450.dtb \
+ msm8953.dtb \
+ apq8053.dtb
+msm8953-ext-codec-mtp-overlay.dtbo-base := msm8953.dtb \
+ apq8053.dtb \
+ msm8953-pmi8940.dtb \
+ msm8953-pmi8937.dtb
+msm8953-ext-codec-rcm-overlay.dtbo-base := msm8953.dtb \
+ apq8053.dtb
+msm8953-cdp-1200p-overlay.dtbo-base := msm8953.dtb
+sdm450-cdp-s2-overlay.dtbo-base := sdm450-pmi632.dtb \
+ sdm632.dtb \
+ msm8953-pmi632.dtb
+sdm450-mtp-s3-overlay.dtbo-base := sdm450-pmi632.dtb \
+ sdm632.dtb
+sdm450-qrd-sku4-overlay.dtbo-base := sdm450-pmi632.dtb \
+ sdm632.dtb
+sdm632-rumi-overlay.dtbo-base := sdm632.dtb
+
else
dtb-$(CONFIG_ARCH_MSM8953) += msm8953-cdp.dtb \
msm8953-mtp.dtb \
@@ -213,7 +290,8 @@
msm8953-pmi8937-cdp.dtb \
msm8953-pmi8937-mtp.dtb \
msm8953-pmi8940-ext-codec-mtp.dtb \
- msm8953-pmi8937-ext-codec-mtp.dtb
+ msm8953-pmi8937-ext-codec-mtp.dtb \
+ msm8953-pmi632-cdp-s2.dtb
dtb-$(CONFIG_ARCH_SDM450) += sdm450-rcm.dtb \
sdm450-cdp.dtb \
@@ -226,7 +304,10 @@
sdm450-pmi632-cdp-s2.dtb \
sdm450-pmi632-mtp-s3.dtb
-dtb-$(CONFIG_ARCH_SDM632) += sdm632-rumi.dtb
+dtb-$(CONFIG_ARCH_SDM632) += sdm632-rumi.dtb \
+ sdm632-cdp-s2.dtb \
+ sdm632-mtp-s3.dtb \
+ sdm632-qrd-sku4.dtb
endif
diff --git a/arch/arm64/boot/dts/qcom/apq8053-cdp.dts b/arch/arm64/boot/dts/qcom/apq8053-cdp.dts
index 5e89e4f..57401d8 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-cdp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "apq8053.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 CDP";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts b/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts
index 2c7b228..2d5e761a 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "apq8053.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 Ext Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts b/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts
index d026734..96e1d53 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "apq8053.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 Ext Codec RCM";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
index 177e105..44b4792 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "apq8053.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 IOT MTP";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-mtp.dts b/arch/arm64/boot/dts/qcom/apq8053-mtp.dts
index be544af..89b7624 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "apq8053.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 MTP";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-rcm.dts b/arch/arm64/boot/dts/qcom/apq8053-rcm.dts
index cc5bdaa..d70b99f 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-rcm.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "apq8053.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 RCM";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053.dts
index 0a56c79..bf9e2f2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "apq8053.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 SOC";
+ compatible = "qcom,apq8053";
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+ qcom,pmic-name = "PMI8950";
};
diff --git a/arch/arm64/boot/dts/qcom/apq8053.dtsi b/arch/arm64/boot/dts/qcom/apq8053.dtsi
index 15a1595..4600dc1 100644
--- a/arch/arm64/boot/dts/qcom/apq8053.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8053.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,9 +12,10 @@
*/
#include "msm8953.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. APQ 8953";
+ model = "Qualcomm Technologies, Inc. APQ8053";
compatible = "qcom,apq8053";
qcom,msm-id = <304 0x0>;
+ qcom,msm-name = "APQ8053";
};
&secure_mem {
diff --git a/arch/arm64/boot/dts/qcom/dsi-adv7533-1080p.dtsi b/arch/arm64/boot/dts/qcom/dsi-adv7533-1080p.dtsi
new file mode 100644
index 0000000..7994285
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-adv7533-1080p.dtsi
@@ -0,0 +1,75 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_adv7533_1080p: qcom,mdss_dsi_adv7533_1080p {
+ label = "adv7533 1080p video mode dsi panel";
+ qcom,mdss-dsi-panel-name = "dsi_adv7533_1080p";
+ qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-destination = "display_1";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1920>;
+ qcom,mdss-dsi-panel-height = <1080>;
+ qcom,mdss-dsi-h-front-porch = <88>;
+ qcom,mdss-dsi-h-back-porch = <148>;
+ qcom,mdss-dsi-h-pulse-width = <44>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <36>;
+ qcom,mdss-dsi-v-front-porch = <4>;
+ qcom,mdss-dsi-v-pulse-width = <5>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [
+ 05 01 00 00 c8 00 02 11 00
+ 05 01 00 00 0a 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 00 00 02 28 00
+ 05 01 00 00 00 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <1>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_pulse";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-panel-timings = [
+ E6 38 26 00 68 6C 2A 3A 2C 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x02>;
+ qcom,mdss-dsi-t-clk-pre = <0x2B>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
+ qcom,mdss-pan-physical-width-dimension = <160>;
+ qcom,mdss-pan-physical-height-dimension = <90>;
+ qcom,mdss-dsi-force-clock-lane-hs;
+ qcom,mdss-dsi-always-on;
+ qcom,mdss-dsi-panel-timings-phy-v2 = [1d 1a 03 05 01 03 04 a0
+ 1d 1a 03 05 01 03 04 a0
+ 1d 1a 03 05 01 03 04 a0
+ 1d 1a 03 05 01 03 04 a0
+ 1d 1a 03 05 01 03 04 a0];
+ qcom,dba-panel;
+ qcom,bridge-name = "adv7533";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-adv7533-720p.dtsi b/arch/arm64/boot/dts/qcom/dsi-adv7533-720p.dtsi
new file mode 100644
index 0000000..b84488c0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-adv7533-720p.dtsi
@@ -0,0 +1,74 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+dsi_adv7533_720p: qcom,mdss_dsi_adv7533_720p {
+ label = "adv7533 720p video mode dsi panel";
+ qcom,mdss-dsi-panel-name = "dsi_adv7533_720p";
+ qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-destination = "display_1";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1280>;
+ qcom,mdss-dsi-panel-height = <720>;
+ qcom,mdss-dsi-h-front-porch = <110>;
+ qcom,mdss-dsi-h-back-porch = <220>;
+ qcom,mdss-dsi-h-pulse-width = <40>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <20>;
+ qcom,mdss-dsi-v-front-porch = <5>;
+ qcom,mdss-dsi-v-pulse-width = <5>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [
+ 05 01 00 00 c8 00 02 11 00
+ 05 01 00 00 0a 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 00 00 02 28 00
+ 05 01 00 00 00 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <1>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_pulse";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-panel-timings = [
+ A4 24 18 00 4E 52 1C 28 1C 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x03>;
+ qcom,mdss-dsi-t-clk-pre = <0x20>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
+ qcom,mdss-pan-physical-width-dimension = <160>;
+ qcom,mdss-pan-physical-height-dimension = <90>;
+ qcom,mdss-dsi-force-clock-lane-hs;
+ qcom,mdss-dsi-always-on;
+ qcom,mdss-dsi-panel-timings-phy-v2 = [1c 19 02 03 01 03 04 a0
+ 1c 19 02 03 01 03 04 a0
+ 1c 19 02 03 01 03 04 a0
+ 1c 19 02 03 01 03 04 a0
+ 1c 08 02 03 01 03 04 a0];
+ qcom,dba-panel;
+ qcom,bridge-name = "adv7533";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-adv7533-1080p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-adv7533-1080p-video.dtsi
new file mode 100644
index 0000000..cbf82af
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-adv7533-1080p-video.dtsi
@@ -0,0 +1,67 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_adv7533_1080p: qcom,mdss_dsi_adv7533_1080p {
+ label = "adv7533 720p video mode dsi panel";
+ qcom,mdss-dsi-panel-name = "dsi_adv7533_1080p";
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1920>;
+ qcom,mdss-dsi-panel-height = <1080>;
+ qcom,mdss-dsi-h-front-porch = <88>;
+ qcom,mdss-dsi-h-back-porch = <148>;
+ qcom,mdss-dsi-h-pulse-width = <44>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <36>;
+ qcom,mdss-dsi-v-front-porch = <4>;
+ qcom,mdss-dsi-v-pulse-width = <5>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [
+ 05 01 00 00 c8 00 02 11 00
+ 05 01 00 00 0a 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 00 00 02 28 00
+ 05 01 00 00 00 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <1>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_pulse";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-panel-timings = [e6 38 26 00 68 6c 2a 3a
+ 2c 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x02>;
+ qcom,mdss-dsi-t-clk-pre = <0x2B>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
+ qcom,mdss-pan-physical-width-dimension = <160>;
+ qcom,mdss-pan-physical-height-dimension = <90>;
+ qcom,mdss-dsi-force-clock-lane-hs;
+ qcom,mdss-dsi-always-on;
+ qcom,dba-panel;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-adv7533-720p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-adv7533-720p-video.dtsi
new file mode 100644
index 0000000..55ce9f7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-adv7533-720p-video.dtsi
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_adv7533_720p: qcom,mdss_dsi_adv7533_720p {
+ label = "adv7533 720p video mode dsi panel";
+ qcom,mdss-dsi-panel-name = "dsi_adv7533_720p";
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1280>;
+ qcom,mdss-dsi-panel-height = <720>;
+ qcom,mdss-dsi-h-front-porch = <110>;
+ qcom,mdss-dsi-h-back-porch = <220>;
+ qcom,mdss-dsi-h-pulse-width = <40>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <20>;
+ qcom,mdss-dsi-v-front-porch = <5>;
+ qcom,mdss-dsi-v-pulse-width = <5>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [
+ 05 01 00 00 c8 00 02 11 00
+ 05 01 00 00 0a 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 00 00 02 28 00
+ 05 01 00 00 00 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <1>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_pulse";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-panel-timings = [a4 24 18 00 4e 52 1c 28
+ 1c 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x03>;
+ qcom,mdss-dsi-t-clk-pre = <0x20>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
+ qcom,mdss-pan-physical-width-dimension = <160>;
+ qcom,mdss-pan-physical-height-dimension = <90>;
+ qcom,mdss-dsi-force-clock-lane-hs;
+ qcom,mdss-dsi-always-on;
+ qcom,dba-panel;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-lt8912-1080p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-lt8912-1080p-video.dtsi
new file mode 100644
index 0000000..7297d2a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-lt8912-1080p-video.dtsi
@@ -0,0 +1,69 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_lt8912_1080_vid: qcom,mdss_dsi_lt8912_1080p_video {
+ qcom,mdss-dsi-panel-name = "lt8912 1080p video mode dsi panel";
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-destination = "display_1";
+ qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+ qcom,mdss-dsi-lane-map = "lane_map_0123";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1920>;
+ qcom,mdss-dsi-panel-height = <1080>;
+ qcom,mdss-dsi-h-front-porch = <88>;
+ qcom,mdss-dsi-h-back-porch = <148>;
+ qcom,mdss-dsi-h-pulse-width = <44>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <36>;
+ qcom,mdss-dsi-v-front-porch = <4>;
+ qcom,mdss-dsi-v-pulse-width = <5>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [
+ 05 01 00 00 a0 00 02 11 00
+ 05 01 00 00 a0 00 02 29 00];
+ qcom,mdss-dsi-off-command = [
+ 05 01 00 00 78 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-panel-timings-phy-v2 = [
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1a 08 09 05 03 04 a0];
+ qcom,mdss-dsi-panel-timings = [
+ e6 38 26 00 68 6c 2a 3a 2c 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x02>;
+ qcom,mdss-dsi-t-clk-pre = <0x2b>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 2>, <1 20>;
+ qcom,mdss-dsi-post-init-delay = <1>;
+ qcom,mdss-dsi-force-clock-lane-hs;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-lt8912-480p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-lt8912-480p-video.dtsi
new file mode 100644
index 0000000..cde7fb4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-lt8912-480p-video.dtsi
@@ -0,0 +1,67 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_lt8912_480_vid: qcom,mdss_dsi_lt8912_480p_video {
+ qcom,mdss-dsi-panel-name = "lt8912 480p video mode dsi panel";
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-destination = "display_1";
+ qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+ qcom,mdss-dsi-lane-map = "lane_map_0123";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <640>;
+ qcom,mdss-dsi-panel-height = <480>;
+ qcom,mdss-dsi-h-front-porch = <16>;
+ qcom,mdss-dsi-h-back-porch = <48>;
+ qcom,mdss-dsi-h-pulse-width = <96>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <32>;
+ qcom,mdss-dsi-v-front-porch = <15>;
+ qcom,mdss-dsi-v-pulse-width = <2>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [
+ 05 01 00 00 a0 00 02 11 00
+ 05 01 00 00 a0 00 02 29 00];
+ qcom,mdss-dsi-off-command = [
+ 05 01 00 00 78 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-panel-timings-phy-v2 = [
+ 1D 1A 03 05 01 03 04 a0
+ 1D 1A 03 05 01 03 04 a0
+ 1D 0A 03 04 01 03 04 a0];
+ qcom,mdss-dsi-panel-timings = [
+ 65 12 0C 00 34 38 10 16 0F 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x02>;
+ qcom,mdss-dsi-t-clk-pre = <0x2B>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 2>, <1 20>;
+ qcom,mdss-dsi-post-init-delay = <1>;
+ qcom,mdss-dsi-force-clock-lane-hs;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
index c059443..1b38d06 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,17 +38,13 @@
qcom,mdss-dsi-te-dcs-command = <1>;
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
- qcom,mdss-dsi-panel-timings =
- [da 34 24 00 64 68 28 38 2a 03 04 00];
- qcom,mdss-dsi-t-clk-pre = <0x29>;
- qcom,mdss-dsi-t-clk-post = <0x03>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-lp11-init;
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 50>;
qcom,mdss-dsi-display-timings {
timing@0 {
qcom,mdss-dsi-panel-framerate = <60>;
@@ -77,11 +73,11 @@
05 01 00 00 0a 00 02 20 00
15 01 00 00 00 00 02 bb 10
05 01 00 00 78 00 02 11 00
- 05 01 00 00 14 00 02 29 00
+ 05 01 00 00 78 00 02 29 00
];
qcom,mdss-dsi-off-command = [
- 05 01 00 00 14 00 02
- 28 00 05 01 00 00 78 00 02 10 00
+ 05 01 00 00 78 00 02 28 00
+ 05 01 00 00 78 00 02 10 00
];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-r69006-1080p-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-r69006-1080p-cmd.dtsi
new file mode 100644
index 0000000..90d42a9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-r69006-1080p-cmd.dtsi
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*---------------------------------------------------------------------------
+ * This file is autogenerated file using gcdb parser. Please do not edit it.
+ * Update input XML file to add a new entry or update variable in this file
+ * VERSION = "1.0"
+ *---------------------------------------------------------------------------
+ */
+
+&mdss_mdp {
+ dsi_r69006_1080p_cmd: qcom,mdss_dsi_r69006_1080p_cmd {
+ qcom,mdss-dsi-panel-name = "r69006 1080p cmd mode dsi panel";
+ qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <1920>;
+ qcom,mdss-dsi-h-front-porch = <100>;
+ qcom,mdss-dsi-h-back-porch = <82>;
+ qcom,mdss-dsi-h-pulse-width = <20>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <9>;
+ qcom,mdss-dsi-v-front-porch = <3>;
+ qcom,mdss-dsi-v-pulse-width = <15>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [23 01 00 00 00 00 02 B0 00
+ 29 01 00 00 00 00 06
+ B3 04 10 00 00 00
+ 29 01 00 00 00 00 03
+ B4 0C 00
+ 29 01 00 00 00 00 04
+ B6 3B D3 00
+ 23 01 00 00 00 00
+ 02 C0 00
+ 15 01 00 00 00 00
+ 02 36 98
+ 23 01 00 00 00 00
+ 02 CC 04
+ 29 01 00 00 00 00 20
+ C1 84 00 10 EF 8B F1 FF
+ FF DF 9C C5 9A 73 8D AD
+ 63 FE FF FF CB F8 01 00
+ AA 40 02 C2 01 08 00 01
+ 29 01 00 00 00 00 0A
+ CB 0D FE 1F 2C 00 00 00
+ 00 00
+ 29 01 00 00 00 00 0B
+ C2 01 F7 80 04 63 00 60
+ 00 01 30
+ 29 01 00 00 00 00 07
+ C3 55 01 00 01 00 00
+ 29 01 00 00 00 00 12
+ C4 70 00 00 00 00 00 00
+ 00 00 02 01 00 05 01 00
+ 00 00
+ 29 01 00 00 00 00 0F
+ C6 57 07 4A 07 4A 01 0E
+ 01 02 01 02 09 15 07
+ 29 01 00 00 00 00 1F
+ C7 00 06 0C 16 27 35 3F
+ 4D 33 3C 49 5B 64 66 67
+ 00 06 0C 16 27 35 3F 4D
+ 33 3C 49 5B 64 66 67
+ 29 01 00 00 00 00 14
+ C8 00 00 FE 01 08 E7 00
+ 00 FD 02 03 A8 00 00 FC
+ E7 E9 C9 00
+ 29 01 00 00 00 00 09
+ C9 1F 68 1F 68 4C 4C C4
+ 11
+ 29 01 00 00 00 00 11
+ D0 11 01 91 0B D9 19 19
+ 00 00 00 19 99 00 00 00
+ 00
+ 29 01 00 00 00 00 1D
+ D3 1B 3B BB AD A5 33 33
+ 33 00 80 AD A8 37 33 33
+ 33 33 F7 F2 1F 7D 7C FF
+ 0F 99 00 FF FF
+ 29 01 00 00 00 00 04
+ D4 57 33 03
+ 29 01 00 00 00 00 0C
+ D5 66 00 00 01 32 01 32
+ 00 0b 00 0b
+ 29 01 00 00 00 00 02 BE 04
+ 29 01 00 00 00 00 11
+ CF 40 10 00 00 00 00 32
+ 00 00 00 00 00 00 00 00
+ 00
+ 29 01 00 00 00 00 06
+ DE 00 00 3F FF 10
+ 29 01 00 00 00 00 02 E9 00
+ 29 01 00 00 00 00 02 F2 00
+ 23 01 00 00 00 00 02 D6 01
+ 39 01 00 00 00 00 02 35 00
+ 39 01 00 00 00 00 02 51 FF
+ 39 01 00 00 00 00 02 53 2C
+ 39 01 00 00 00 00 02 55 00
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 14 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 0A 00 02 28 00
+ 29 01 00 00 00 00 1d D3 13 3B BB A5 A5 33 33 33
+ 00 80 A4 A8 37 33 33 33 33 F7 F2 1F 7D
+ 7C FF 0F 99 00 FF FF
+ 05 01 00 00 5A 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-te-pin-select = <1>;
+ qcom,mdss-dsi-wr-mem-start = <0x2c>;
+ qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+ qcom,mdss-dsi-te-dcs-command = <1>;
+ qcom,mdss-dsi-te-check-enable;
+ qcom,mdss-dsi-te-using-te-pin;
+ qcom,mdss-dsi-panel-timings = [6E 3F 36 00 5A 4F 38 41 54
+ 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x1e>;
+ qcom,mdss-dsi-t-clk-pre = <0x30>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 05 00 01 0A];
+ qcom,mdss-dsi-panel-status-command-mode = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,mdss-dsi-panel-status-value = <0x1C>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
+ qcom,mdss-dsi-rx-eot-ignore;
+ qcom,mdss-dsi-tx-eot-append;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-r69006-1080p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-r69006-1080p-video.dtsi
new file mode 100644
index 0000000..4cbc922
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-r69006-1080p-video.dtsi
@@ -0,0 +1,137 @@
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*---------------------------------------------------------------------------
+ * This file is autogenerated file using gcdb parser. Please do not edit it.
+ * Update input XML file to add a new entry or update variable in this file
+ * VERSION = "1.0"
+ *---------------------------------------------------------------------------
+ */
+
+&mdss_mdp {
+ dsi_r69006_1080p_video: qcom,mdss_dsi_r69006_1080p_video {
+ qcom,mdss-dsi-panel-name = "r69006 1080p video mode dsi panel";
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <1920>;
+ qcom,mdss-dsi-h-front-porch = <100>;
+ qcom,mdss-dsi-h-back-porch = <82>;
+ qcom,mdss-dsi-h-pulse-width = <20>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <9>;
+ qcom,mdss-dsi-v-front-porch = <3>;
+ qcom,mdss-dsi-v-pulse-width = <15>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [23 01 00 00 00 00 02 B0 00
+ 29 01 00 00 00 00 06
+ B3 05 10 00 00 00
+ 29 01 00 00 00 00 03 B4 0c 00
+ 29 01 00 00 00 00 04 B6 3b c3 00
+ 23 01 00 00 00 00 02 C0 00
+ 15 01 00 00 00 00 02 36 98
+ 23 01 00 00 00 00 02 CC 04
+ 29 01 00 00 00 00 20
+ C1 84 00 10 EF 8B
+ F1 FF FF DF 9C C5
+ 9A 73 8D AD 63 FE
+ FF FF CB F8 01 00
+ AA 40 00 C2 01 08
+ 00 01
+ 29 01 00 00 00 00 0A
+ CB 0D FE 1F 2C 00
+ 00 00 00 00
+ 29 01 00 00 00 00 0B
+ C2 01 F7 80 04 63
+ 00 60 00 01 30
+ 29 01 00 00 00 00 07
+ C3 55 01 00 01 00
+ 00
+ 29 01 00 00 00 00 12
+ C4 70 00 00 00 00
+ 00 00 00 00 02 01
+ 00 05 01 00 00 00
+ 29 01 00 00 00 00 0F
+ C6 59 07 4a 07 4a
+ 01 0E 01 02 01 02
+ 09 15 07
+ 29 01 00 00 00 00 1F
+ C7 00 30 32 34 42
+ 4E 56 62 44 4A 54
+ 62 6B 73 7F 08 30
+ 32 34 42 4E 56 62
+ 44 4A 54 62 6B 73
+ 7F
+ 29 01 00 00 00 00 14
+ C8 00 00 00 00 00
+ FC 00 00 00 00 00
+ FC 00 00 00 00 00
+ FC 00
+ 29 01 00 00 00 00 09
+ C9 1F 68 1F 68 4C
+ 4C C4 11
+ 29 01 00 00 00 00 11
+ D0 33 01 91 0B D9
+ 19 19 00 00 00 19
+ 99 00 00 00 00
+ 29 01 00 00 00 00 1D
+ D3 1B 3B BB AD A5
+ 33 33 33 00 80 AD
+ A8 6f 6f 33 33 33
+ F7 F2 1F 7D 7C FF
+ 0F 99 00 FF FF
+ 29 01 00 00 00 00 04
+ D4 57 33 03
+ 29 01 00 00 00 00 0C
+ D5 66 00 00 01 27
+ 01 27 00 6D 00 6D
+ 23 01 00 00 00 00 02 D6 81
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 78 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+ 05 01 00 00 96 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <1>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
+ 22 27 1e 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x20>;
+ qcom,mdss-dsi-t-clk-pre = <0x2c>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 2>, <1 20>;
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 05 00 01 0A];
+ qcom,mdss-dsi-panel-status-command-mode = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,mdss-dsi-panel-status-value = <0x1C>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-truly-1080p-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-truly-1080p-cmd.dtsi
new file mode 100644
index 0000000..83b8ca0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-truly-1080p-cmd.dtsi
@@ -0,0 +1,96 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_truly_1080_cmd: qcom,mdss_dsi_truly_1080p_cmd {
+ qcom,mdss-dsi-panel-name = "truly 1080p cmd mode dsi panel";
+ qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <1920>;
+ qcom,mdss-dsi-h-front-porch = <96>;
+ qcom,mdss-dsi-h-back-porch = <64>;
+ qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <16>;
+ qcom,mdss-dsi-v-front-porch = <4>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-te-pin-select = <1>;
+ qcom,mdss-dsi-te-dcs-command = <1>;
+ qcom,mdss-dsi-te-check-enable;
+ qcom,mdss-dsi-te-using-te-pin;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-panel-timings =
+ [e6 38 26 00 68 6e 2a 3c 44 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x02>;
+ qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,mdss-dsi-tx-eot-append;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-on-command = [23 01 00 00 00 00 02 d6 01
+ 15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 51 ff
+ 15 01 00 00 00 00 02 53 2c
+ 15 01 00 00 00 00 02 55 00
+ 05 01 00 00 78 00 02 11 00
+ 23 01 00 00 00 00 02 b0 04
+ 29 01 00 00 00 00 07 b3 04 00 00 00 00 00
+ 29 01 00 00 00 00 03 b6 3a d3
+ 29 01 00 00 00 00 03 c0 00 00
+ 29 01 00 00 00 00 23 c1 84 60 10 eb ff 6f ce ff ff 17 02
+ 58 73 ae b1 20 c6 ff ff 1f f3 ff 5f 10 10 10 10
+ 00 02 01 22 22 00 01
+ 29 01 00 00 00 00 08 c2 31 f7 80 06 08 00 00
+ 29 01 00 00 00 00 17 c4 70 00 00 00 00 04 00 00 00 0c 06
+ 00 00 00 00 00 04 00 00 00 0c 06
+ 29 01 00 00 00 00 29 c6 78 69 00 69 00 69 00 00 00 00 00
+ 69 00 69 00 69 10 19 07 00 78 00 69 00 69 00 69
+ 00 00 00 00 00 69 00 69 00 69 10 19 07
+ 29 01 00 00 00 00 0a cb 31 fc 3f 8c 00 00 00 00 c0
+ 23 01 00 00 00 00 02 cc 0b
+ 29 01 00 00 00 00 0b d0 11 81 bb 1e 1e 4c 19 19 0c 00
+ 29 01 00 00 00 00 1a d3 1b 33 bb bb b3 33 33 33 00 01 00
+ a0 d8 a0 0d 4e 4e 33 3b 22 72 07 3d bf 33
+ 29 01 00 00 00 00 08 d5 06 00 00 01 51 01 32
+ 29 01 00 00 00 00 1f c7 01 0a 11 18 26 33 3e 50 38 42 52
+ 60 67 6e 77 01 0a 11 18 26 33 3e 50 38 42 52 60
+ 67 6e 77
+ 29 01 00 00 14 00 14 c8 01 00 00 00 00 fc 00 00 00 00
+ 00 fc 00 00 00 00 00 fc 00
+ 05 01 00 00 14 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 14 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+ qcom,mdss-dsi-post-init-delay = <1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-truly-1080p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-truly-1080p-video.dtsi
new file mode 100644
index 0000000..b8a85d9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-truly-1080p-video.dtsi
@@ -0,0 +1,91 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_truly_1080_vid: qcom,mdss_dsi_truly_1080p_video {
+ qcom,mdss-dsi-panel-name = "truly 1080p video mode dsi panel";
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <1920>;
+ qcom,mdss-dsi-h-front-porch = <96>;
+ qcom,mdss-dsi-h-back-porch = <64>;
+ qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <16>;
+ qcom,mdss-dsi-v-front-porch = <4>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-panel-timings =
+ [e6 38 26 00 68 6e 2a 3c 44 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x02>;
+ qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 51 ff
+ 15 01 00 00 00 00 02 53 2c
+ 15 01 00 00 00 00 02 55 00
+ 05 01 00 00 78 00 02 11 00
+ 23 01 00 00 00 00 02 b0 00
+ 29 01 00 00 00 00 07 b3 14 00 00 00 00 00
+ 29 01 00 00 00 00 03 b6 3a d3
+ 29 01 00 00 00 00 03 c0 00 00
+ 29 01 00 00 00 00 23 c1 84 60 10 eb ff 6f ce ff ff 17 02
+ 58 73 ae b1 20 c6 ff ff 1f f3 ff 5f 10 10 10 10
+ 00 02 01 22 22 00 01
+ 29 01 00 00 00 00 08 c2 31 f7 80 06 08 00 00
+ 29 01 00 00 00 00 17 c4 70 00 00 00 00 04 00 00 00 0c 06
+ 00 00 00 00 00 04 00 00 00 0c 06
+ 29 01 00 00 00 00 29 c6 00 69 00 69 00 69 00 00 00 00 00
+ 69 00 69 00 69 10 19 07 00 01 00 69 00 69 00 69
+ 00 00 00 00 00 69 00 69 00 69 10 19 07
+ 29 01 00 00 00 00 0a cb 31 fc 3f 8c 00 00 00 00 c0
+ 23 01 00 00 00 00 02 cc 0b
+ 29 01 00 00 00 00 0b d0 11 81 bb 1e 1e 4c 19 19 0c 00
+ 29 01 00 00 00 00 1a d3 1b 33 bb bb b3 33 33 33 00 01 00
+ a0 d8 a0 0d 4e 4e 33 3b 22 72 07 3d bf 33
+ 29 01 00 00 00 00 08 d5 06 00 00 01 51 01 32
+ 29 01 00 00 00 00 1f c7 01 0a 11 18 26 33 3e 50 38 42 52
+ 60 67 6e 77 01 0a 11 18 26 33 3e 50 38 42 52 60
+ 67 6e 77
+ 29 01 00 00 14 00 14 c8 01 00 00 00 00 fc 00 00 00 00
+ 00 fc 00 00 00 00 00 fc 00
+ 05 01 00 00 14 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 14 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+ qcom,mdss-dsi-tx-eot-append;
+ qcom,mdss-dsi-post-init-delay = <1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-truly-wuxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-truly-wuxga-video.dtsi
new file mode 100644
index 0000000..b0d13d0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-truly-wuxga-video.dtsi
@@ -0,0 +1,59 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_truly_wuxga_vid: qcom,mdss_dsi_truly_wuxga_video {
+ qcom,mdss-dsi-panel-name = "truly wuxga video mode dsi panel";
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1920>;
+ qcom,mdss-dsi-panel-height = <1200>;
+ qcom,mdss-dsi-h-front-porch = <96>;
+ qcom,mdss-dsi-h-back-porch = <64>;
+ qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <16>;
+ qcom,mdss-dsi-v-front-porch = <4>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-panel-timings = [f3 3a 26 00 6c 6e
+ 2c 3e 2f 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x02>;
+ qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-on-command = [32 01 00 00 00 00 02 00 00];
+ qcom,mdss-dsi-off-command = [22 01 00 00 00 00 02 00 00];
+ qcom,mdss-dsi-on-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-reset-sequence = <1 200>, <0 200>, <1 200>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/external-soc.dtsi b/arch/arm64/boot/dts/qcom/external-soc.dtsi
new file mode 100644
index 0000000..e6609c0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/external-soc.dtsi
@@ -0,0 +1,37 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ mdm3: qcom,mdm3 {
+ compatible = "qcom,ext-sdxpoorwills";
+ cell-index = <0>;
+ #address-cells = <0>;
+ interrupt-parent = <&mdm3>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-names =
+ "err_fatal_irq",
+ "status_irq",
+ "mdm2ap_vddmin_irq";
+ /* modem attributes */
+ qcom,ramdump-delays-ms = <2000>;
+ qcom,ramdump-timeout-ms = <120000>;
+ qcom,vddmin-modes = "normal";
+ qcom,vddmin-drive-strength = <8>;
+ qcom,sfr-query;
+ qcom,sysmon-id = <20>;
+ qcom,ssctl-instance-id = <0x10>;
+ qcom,support-shutdown;
+ qcom,pil-force-shutdown;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-cdp-1200p-overlay.dts
index 0a56c79..03ec7b5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "CDP 1200P";
+ qcom,board-id = <1 1>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts
index a685380..96e364f 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 CDP 1200P";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-cdp-overlay.dts
index 0a56c79..145a40c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "CDP";
+ qcom,board-id = <1 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cdp.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp.dts
index 1f78902..34c5f8f 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 CDP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
index 87b8c74..8212cc8 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -74,3 +74,51 @@
status = "ok";
};
+
+#include "msm8953-mdss-panels.dtsi"
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+ hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_truly_1080_vid>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+ qcom,platform-te-gpio = <&tlmm 24 0>;
+ qcom,platform-reset-gpio = <&tlmm 61 0>;
+ qcom,platform-bklight-en-gpio = <&tlmm 59 0>;
+};
+
+&mdss_dsi1 {
+ status = "disabled";
+ qcom,dsi-pref-prim-pan = <&dsi_adv7533_1080p>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+ qcom,pluggable;
+ qcom,platform-te-gpio = <&tlmm 24 0>;
+ qcom,platform-reset-gpio = <&tlmm 61 0>;
+ qcom,platform-bklight-en-gpio = <&tlmm 59 0>;
+};
+
+&dsi_truly_1080_vid {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+};
+
+&dsi_truly_1080_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,ulps-enabled;
+ qcom,partial-update-enabled;
+ qcom,panel-roi-alignment = <2 2 4 2 1080 2>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
index 0a56c79..08a343e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Ext Codec MTP";
+ qcom,board-id= <8 1>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
index 3dfd848..b80583e 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 Ext Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm-overlay.dts
index 0a56c79..45fdf06 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Ext Codec RCM";
+ qcom,board-id = <21 1>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts
index a81e212..d4224a4 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 Ext Codec RCM";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
index 96e8591..5cf6eb2 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
@@ -16,6 +16,7 @@
qcom,pas-id = <13>;
qcom,firmware-name = "a506_zap";
memory-region = <&gpu_mem>;
+ qcom,mas-crypto = <&mas_crypto>;
clocks = <&clock_gcc clk_gcc_crypto_clk>,
<&clock_gcc clk_gcc_crypto_ahb_clk>,
<&clock_gcc clk_gcc_crypto_axi_clk>,
@@ -140,6 +141,9 @@
/* Context aware jump target power level */
qcom,ca-target-pwrlevel = <3>;
+ /* Enable gpu cooling device */
+ #cooling-cells = <2>;
+
/* GPU Mempools */
qcom,gpu-mempools {
#address-cells= <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-iot-mtp-overlay.dts
index 0a56c79..fec135d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "IOT MTP";
+ qcom,board-id = <8 2>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
index 524e7ca..39c76cc 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 IOT MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-ipc-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-ipc-overlay.dts
index 0a56c79..3f957da 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ipc-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-ipc.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "IPC";
+ qcom,board-id = <12 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi b/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
new file mode 100644
index 0000000..4fa5cd1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
@@ -0,0 +1,106 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi-panel-sim-video.dtsi"
+#include "dsi-panel-sim-dualmipi-video.dtsi"
+#include "dsi-panel-sim-cmd.dtsi"
+#include "dsi-panel-sim-dualmipi-cmd.dtsi"
+#include "dsi-panel-truly-1080p-video.dtsi"
+#include "dsi-panel-truly-1080p-cmd.dtsi"
+#include "dsi-adv7533-1080p.dtsi"
+#include "dsi-adv7533-720p.dtsi"
+#include "dsi-panel-r69006-1080p-video.dtsi"
+#include "dsi-panel-r69006-1080p-cmd.dtsi"
+#include "dsi-panel-truly-wuxga-video.dtsi"
+#include "dsi-panel-lt8912-480p-video.dtsi"
+#include "dsi-panel-lt8912-1080p-video.dtsi"
+
+&soc {
+ dsi_panel_pwr_supply: dsi_panel_pwr_supply {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,panel-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdd";
+ qcom,supply-min-voltage = <2850000>;
+ qcom,supply-max-voltage = <2850000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "vddio";
+ qcom,supply-min-voltage = <1800000>;
+ qcom,supply-max-voltage = <1800000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+ };
+};
+
+&dsi_truly_1080_vid {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1a 08 09 05 03 04 a0];
+};
+
+&dsi_truly_1080_cmd {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1a 08 09 05 03 04 a0];
+};
+
+&dsi_r69006_1080p_video {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1b 08 09 05 03 04 a0];
+};
+
+&dsi_r69006_1080p_cmd{
+ qcom,mdss-dsi-panel-timings-phy-v2 = [24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1b 08 09 05 03 04 a0];
+};
+
+&dsi_adv7533_1080p {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1b 08 09 05 03 04 a0];
+};
+
+&dsi_adv7533_720p {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [1e 1b 04 06 02 03 04 a0
+ 1e 1b 04 06 02 03 04 a0
+ 1e 1b 04 06 02 03 04 a0
+ 1e 1b 04 06 02 03 04 a0
+ 1e 0e 04 05 02 03 04 a0];
+};
+
+&dsi_truly_wuxga_vid {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1c 08 09 05 03 04 a0];
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mdss-pll.dtsi b/arch/arm64/boot/dts/qcom/msm8953-mdss-pll.dtsi
new file mode 100644
index 0000000..a279453
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-mdss-pll.dtsi
@@ -0,0 +1,84 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ mdss_dsi0_pll: qcom,mdss_dsi_pll@994400 {
+ compatible = "qcom,mdss_dsi_pll_8953";
+ label = "MDSS DSI 0 PLL";
+ cell-index = <0>;
+ #clock-cells = <1>;
+
+ reg = <0x01a94400 0x588>,
+ <0x0184d074 0x8>,
+ <0x01a94200 0x98>;
+ reg-names = "pll_base", "gdsc_base", "dynamic_pll_base";
+
+ gdsc-supply = <&gdsc_mdss>;
+
+ clocks = <&clock_gcc clk_gcc_mdss_ahb_clk>;
+ clock-names = "iface_clk";
+ clock-rate = <0>;
+
+ qcom,dsi-pll-ssc-en;
+ qcom,dsi-pll-ssc-mode = "down-spread";
+ /* Memory region for passing dynamic refresh pll codes */
+ memory-region = <&dfps_data_mem>;
+
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+ };
+
+ mdss_dsi1_pll: qcom,mdss_dsi_pll@996400 {
+ compatible = "qcom,mdss_dsi_pll_8953";
+ label = "MDSS DSI 1 PLL";
+ cell-index = <1>;
+ #clock-cells = <1>;
+
+ reg = <0x01a96400 0x588>,
+ <0x0184d074 0x8>,
+ <0x01a96200 0x98>;
+ reg-names = "pll_base", "gdsc_base", "dynamic_pll_base";
+
+ gdsc-supply = <&gdsc_mdss>;
+
+ qcom,dsi-pll-ssc-en;
+ qcom,dsi-pll-ssc-mode = "down-spread";
+ clocks = <&clock_gcc clk_gcc_mdss_ahb_clk>;
+ clock-names = "iface_clk";
+ clock-rate = <0>;
+
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mdss.dtsi b/arch/arm64/boot/dts/qcom/msm8953-mdss.dtsi
new file mode 100644
index 0000000..310da1f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-mdss.dtsi
@@ -0,0 +1,434 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ mdss_mdp: qcom,mdss_mdp@1a00000 {
+ compatible = "qcom,mdss_mdp";
+ reg = <0x01a00000 0x90000>,
+ <0x01ab0000 0x1040>;
+ reg-names = "mdp_phys", "vbif_phys";
+ interrupts = <0 72 0>;
+ vdd-supply = <&gdsc_mdss>;
+
+ /* Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_mdp";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>,
+ <22 512 0 6400000>,
+ <22 512 0 6400000>;
+
+ /* Fudge factors */
+ qcom,mdss-ab-factor = <1 1>; /* 1 time */
+ qcom,mdss-ib-factor = <1 1>; /* 1 time */
+ qcom,mdss-clk-factor = <105 100>; /* 1.05 times */
+
+ qcom,max-mixer-width = <2048>;
+ qcom,max-pipe-width = <2048>;
+
+ /* VBIF QoS remapper settings*/
+ qcom,mdss-vbif-qos-rt-setting = <1 2 2 2>;
+ qcom,mdss-vbif-qos-nrt-setting = <1 1 1 1>;
+
+ qcom,mdss-has-panic-ctrl;
+ qcom,mdss-per-pipe-panic-luts = <0x000f>,
+ <0xffff>,
+ <0xfffc>,
+ <0xff00>;
+
+ qcom,mdss-mdp-reg-offset = <0x00001000>;
+ qcom,max-bandwidth-low-kbps = <3400000>;
+ qcom,max-bandwidth-high-kbps = <3400000>;
+ qcom,max-bandwidth-per-pipe-kbps = <2300000>;
+ qcom,max-clk-rate = <400000000>;
+ qcom,mdss-default-ot-rd-limit = <32>;
+ qcom,mdss-default-ot-wr-limit = <16>;
+
+ /* Bandwidth limit settings */
+ qcom,max-bw-settings = <1 3400000>, /* Default */
+ <2 3100000>; /* Camera */
+
+ qcom,mdss-pipe-vig-off = <0x00005000>;
+ qcom,mdss-pipe-rgb-off = <0x00015000 0x00017000>;
+ qcom,mdss-pipe-dma-off = <0x00025000>;
+ qcom,mdss-pipe-cursor-off = <0x00035000>;
+
+ qcom,mdss-pipe-vig-xin-id = <0>;
+ qcom,mdss-pipe-rgb-xin-id = <1 5>;
+ qcom,mdss-pipe-dma-xin-id = <2>;
+ qcom,mdss-pipe-cursor-xin-id = <7>;
+
+ /* Offsets relative to "mdp_phys + mdp-reg-offset" address */
+ qcom,mdss-pipe-vig-clk-ctrl-offsets = <0x2aC 0 0>;
+ qcom,mdss-pipe-rgb-clk-ctrl-offsets = <0x2aC 4 8>,
+ <0x2b4 4 8>;
+ qcom,mdss-pipe-dma-clk-ctrl-offsets = <0x2ac 8 12>;
+ qcom,mdss-pipe-cursor-clk-ctrl-offsets = <0x3a8 16 15>;
+
+
+ qcom,mdss-ctl-off = <0x00002000 0x00002200 0x00002400>;
+ qcom,mdss-mixer-intf-off = <0x00045000 0x00046000>;
+ qcom,mdss-dspp-off = <0x00055000>;
+ qcom,mdss-wb-off = <0x00065000 0x00066000>;
+ qcom,mdss-intf-off = <0x0006b000 0x0006b800 0x0006c000>;
+ qcom,mdss-pingpong-off = <0x00071000 0x00071800>;
+ qcom,mdss-slave-pingpong-off = <0x00073000>;
+ qcom,mdss-cdm-off = <0x0007a200>;
+ qcom,mdss-wfd-mode = "intf";
+ qcom,mdss-highest-bank-bit = <0x1>;
+ qcom,mdss-has-decimation;
+ qcom,mdss-has-non-scalar-rgb;
+ qcom,mdss-has-rotator-downscale;
+ qcom,mdss-rot-downscale-min = <2>;
+ qcom,mdss-rot-downscale-max = <16>;
+ qcom,mdss-idle-power-collapse-enabled;
+ qcom,mdss-rot-block-size = <64>;
+ qcom,mdss-ppb-off = <0x00000330>;
+ qcom,mdss-has-pingpong-split;
+
+ clocks = <&clock_gcc clk_gcc_mdss_ahb_clk>,
+ <&clock_gcc clk_gcc_mdss_axi_clk>,
+ <&clock_gcc clk_mdp_clk_src>,
+ <&clock_gcc_mdss clk_mdss_mdp_vote_clk>,
+ <&clock_gcc clk_gcc_mdss_vsync_clk>;
+ clock-names = "iface_clk", "bus_clk", "core_clk_src",
+ "core_clk", "vsync_clk";
+
+ qcom,mdp-settings = <0x0506c 0x00000000>,
+ <0x1506c 0x00000000>,
+ <0x1706c 0x00000000>,
+ <0x2506c 0x00000000>;
+
+ qcom,vbif-settings = <0x0d0 0x00000010>;
+
+ qcom,regs-dump-mdp = <0x01000 0x01454>,
+ <0x02000 0x02064>,
+ <0x02200 0x02264>,
+ <0x02400 0x02464>,
+ <0x05000 0x05150>,
+ <0x05200 0x05230>,
+ <0x15000 0x15150>,
+ <0x17000 0x17150>,
+ <0x25000 0x25150>,
+ <0x35000 0x35150>,
+ <0x45000 0x452bc>,
+ <0x46000 0x462bc>,
+ <0x55000 0x5522c>,
+ <0x65000 0x652c0>,
+ <0x66000 0x662c0>,
+ <0x6b800 0x6ba68>,
+ <0x6c000 0x6c268>,
+ <0x71000 0x710d4>,
+ <0x71800 0x718d4>;
+
+ qcom,regs-dump-names-mdp = "MDP",
+ "CTL_0", "CTL_1", "CTL_2",
+ "VIG0_SSPP", "VIG0",
+ "RGB0_SSPP", "RGB1_SSPP",
+ "DMA0_SSPP",
+ "CURSOR0_SSPP",
+ "LAYER_0", "LAYER_1",
+ "DSPP_0",
+ "WB_0", "WB_2",
+ "INTF_1", "INTF_2",
+ "PP_0", "PP_1";
+
+ /* buffer parameters to calculate prefill bandwidth */
+ qcom,mdss-prefill-outstanding-buffer-bytes = <0>;
+ qcom,mdss-prefill-y-buffer-bytes = <0>;
+ qcom,mdss-prefill-scaler-buffer-lines-bilinear = <2>;
+ qcom,mdss-prefill-scaler-buffer-lines-caf = <4>;
+ qcom,mdss-prefill-post-scaler-buffer-pixels = <2048>;
+ qcom,mdss-prefill-pingpong-buffer-pixels = <4096>;
+
+ qcom,mdss-pp-offsets {
+ qcom,mdss-sspp-mdss-igc-lut-off = <0x2000>;
+ qcom,mdss-sspp-vig-pcc-off = <0x1780>;
+ qcom,mdss-sspp-rgb-pcc-off = <0x380>;
+ qcom,mdss-sspp-dma-pcc-off = <0x380>;
+ qcom,mdss-lm-pgc-off = <0x3c0>;
+ qcom,mdss-dspp-pcc-off = <0x1700>;
+ qcom,mdss-dspp-pgc-off = <0x17c0>;
+ };
+
+ qcom,mdss-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
+
+ qcom,mdss-hw-rt-bus {
+ /* Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_hw_rt";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>,
+ <22 512 0 1000>;
+ };
+
+ smmu_mdp_unsec: qcom,smmu_mdp_unsec_cb {
+ compatible = "qcom,smmu_mdp_unsec";
+ iommus = <&apps_iommu 0xC00 0>; /* For NS ctx bank */
+ };
+ smmu_mdp_sec: qcom,smmu_mdp_sec_cb {
+ compatible = "qcom,smmu_mdp_sec";
+ iommus = <&apps_iommu 0xC01 0>; /* For SEC Ctx Bank */
+ };
+
+ mdss_fb0: qcom,mdss_fb_primary {
+ cell-index = <0>;
+ compatible = "qcom,mdss-fb";
+ qcom,cont-splash-memory {
+ linux,contiguous-region = <&cont_splash_mem>;
+ };
+ };
+
+ mdss_fb1: qcom,mdss_fb_wfd {
+ cell-index = <1>;
+ compatible = "qcom,mdss-fb";
+ };
+
+ mdss_fb2: qcom,mdss_fb_secondary {
+ cell-index = <2>;
+ compatible = "qcom,mdss-fb";
+ };
+ };
+
+ mdss_dsi: qcom,mdss_dsi@0 {
+ compatible = "qcom,mdss-dsi";
+ hw-config = "single_dsi";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ gdsc-supply = <&gdsc_mdss>;
+ vdda-supply = <&pm8953_s3>;
+ vcca-supply = <&pm8953_l3>;
+
+ /* Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_dsi";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>,
+ <22 512 0 1000>;
+
+ ranges = <0x1a94000 0x1a94000 0x400
+ 0x1a94400 0x1a94400 0x588
+ 0x193e000 0x193e000 0x30
+ 0x1a96000 0x1a96000 0x400
+ 0x1a96400 0x1a96400 0x588
+ 0x193e000 0x193e000 0x30>;
+
+ clocks = <&clock_gcc_mdss clk_mdss_mdp_vote_clk>,
+ <&clock_gcc clk_gcc_mdss_ahb_clk>,
+ <&clock_gcc clk_gcc_mdss_axi_clk>,
+ <&clock_gcc_mdss clk_ext_byte0_clk_src>,
+ <&clock_gcc_mdss clk_ext_byte1_clk_src>,
+ <&clock_gcc_mdss clk_ext_pclk0_clk_src>,
+ <&clock_gcc_mdss clk_ext_pclk1_clk_src>;
+ clock-names = "mdp_core_clk", "iface_clk", "bus_clk",
+ "ext_byte0_clk", "ext_byte1_clk", "ext_pixel0_clk",
+ "ext_pixel1_clk";
+
+ qcom,mmss-ulp-clamp-ctrl-offset = <0x20>;
+ qcom,mmss-phyreset-ctrl-offset = <0x24>;
+
+ qcom,mdss-fb-map-prim = <&mdss_fb0>;
+ qcom,mdss-fb-map-sec = <&mdss_fb2>;
+
+ qcom,core-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,core-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ qcom,ctrl-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ctrl-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda";
+ qcom,supply-min-voltage = <1225000>;
+ qcom,supply-max-voltage = <1225000>;
+ qcom,supply-enable-load = <18160>;
+ qcom,supply-disable-load = <1>;
+ };
+ };
+
+ qcom,phy-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,phy-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vcca";
+ qcom,supply-min-voltage = <925000>;
+ qcom,supply-max-voltage = <925000>;
+ qcom,supply-enable-load = <17000>;
+ qcom,supply-disable-load = <32>;
+ };
+ };
+
+ mdss_dsi0: qcom,mdss_dsi_ctrl0@1a94000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ label = "MDSS DSI CTRL->0";
+ cell-index = <0>;
+ reg = <0x1a94000 0x400>,
+ <0x1a94400 0x580>,
+ <0x193e000 0x30>;
+ reg-names = "dsi_ctrl", "dsi_phy", "mmss_misc_phys";
+
+ qcom,timing-db-mode;
+ qcom,mdss-mdp = <&mdss_mdp>;
+ vdd-supply = <&pm8953_l17>;
+ vddio-supply = <&pm8953_l6>;
+
+ clocks = <&clock_gcc_mdss clk_gcc_mdss_byte0_clk>,
+ <&clock_gcc_mdss clk_gcc_mdss_pclk0_clk>,
+ <&clock_gcc clk_gcc_mdss_esc0_clk>,
+ <&clock_gcc_mdss clk_byte0_clk_src>,
+ <&clock_gcc_mdss clk_pclk0_clk_src>,
+ <&mdss_dsi0_pll clk_dsi0pll_byte_clk_mux>,
+ <&mdss_dsi0_pll clk_dsi0pll_pixel_clk_mux>,
+ <&mdss_dsi0_pll clk_dsi0pll_byte_clk_src>,
+ <&mdss_dsi0_pll clk_dsi0pll_pixel_clk_src>,
+ <&mdss_dsi0_pll
+ clk_dsi0pll_shadow_byte_clk_src>,
+ <&mdss_dsi0_pll
+ clk_dsi0pll_shadow_pixel_clk_src>;
+ clock-names = "byte_clk", "pixel_clk", "core_clk",
+ "byte_clk_rcg", "pixel_clk_rcg",
+ "pll_byte_clk_mux", "pll_pixel_clk_mux",
+ "pll_byte_clk_src", "pll_pixel_clk_src",
+ "pll_shadow_byte_clk_src",
+ "pll_shadow_pixel_clk_src";
+
+ qcom,platform-strength-ctrl = [ff 06
+ ff 06
+ ff 06
+ ff 06
+ ff 00];
+ qcom,platform-regulator-settings = [1d
+ 1d 1d 1d 1d];
+ qcom,platform-lane-config = [00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 8f];
+ };
+
+ mdss_dsi1: qcom,mdss_dsi_ctrl1@1a96000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ label = "MDSS DSI CTRL->1";
+ cell-index = <1>;
+ reg = <0x1a96000 0x400>,
+ <0x1a96400 0x588>,
+ <0x193e000 0x30>;
+ reg-names = "dsi_ctrl", "dsi_phy", "mmss_misc_phys";
+
+ qcom,mdss-mdp = <&mdss_mdp>;
+ vdd-supply = <&pm8953_l17>;
+ vddio-supply = <&pm8953_l6>;
+
+ clocks = <&clock_gcc_mdss clk_gcc_mdss_byte1_clk>,
+ <&clock_gcc_mdss clk_gcc_mdss_pclk1_clk>,
+ <&clock_gcc clk_gcc_mdss_esc1_clk>,
+ <&clock_gcc_mdss clk_byte1_clk_src>,
+ <&clock_gcc_mdss clk_pclk1_clk_src>,
+ <&mdss_dsi1_pll clk_dsi1pll_byte_clk_mux>,
+ <&mdss_dsi1_pll clk_dsi1pll_pixel_clk_mux>,
+ <&mdss_dsi1_pll clk_dsi1pll_byte_clk_src>,
+ <&mdss_dsi1_pll clk_dsi1pll_pixel_clk_src>,
+ <&mdss_dsi1_pll
+ clk_dsi1pll_shadow_byte_clk_src>,
+ <&mdss_dsi1_pll
+ clk_dsi1pll_shadow_pixel_clk_src>;
+ clock-names = "byte_clk", "pixel_clk", "core_clk",
+ "byte_clk_rcg", "pixel_clk_rcg",
+ "pll_byte_clk_mux", "pll_pixel_clk_mux",
+ "pll_byte_clk_src", "pll_pixel_clk_src",
+ "pll_shadow_byte_clk_src",
+ "pll_shadow_pixel_clk_src";
+
+ qcom,timing-db-mode;
+ qcom,platform-strength-ctrl = [ff 06
+ ff 06
+ ff 06
+ ff 06
+ ff 00];
+ qcom,platform-regulator-settings = [1d
+ 1d 1d 1d 1d];
+ qcom,platform-lane-config = [00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 8f];
+ };
+ };
+
+ qcom,mdss_wb_panel {
+ compatible = "qcom,mdss_wb";
+ qcom,mdss_pan_res = <640 640>;
+ qcom,mdss_pan_bpp = <24>;
+ qcom,mdss-fb-map = <&mdss_fb1>;
+ };
+
+ mdss_rotator: qcom,mdss_rotator {
+ compatible = "qcom,mdss_rotator";
+ qcom,mdss-wb-count = <1>;
+ qcom,mdss-has-downscale;
+ qcom,mdss-has-ubwc;
+ /* Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_rotator";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>,
+ <22 512 0 6400000>,
+ <22 512 0 6400000>;
+
+ rot-vdd-supply = <&gdsc_mdss>;
+ qcom,supply-names = "rot-vdd";
+ qcom,mdss-has-reg-bus;
+ clocks = <&clock_gcc clk_gcc_mdss_ahb_clk>,
+ <&clock_gcc_mdss clk_mdss_rotator_vote_clk>;
+ clock-names = "iface_clk", "rot_core_clk";
+
+ qcom,mdss-rot-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_rot_reg";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
index 0a56c79..49956df 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "MTP";
+ qcom,board-id = <8 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
index 87b8c74..8212cc8 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -74,3 +74,51 @@
status = "ok";
};
+
+#include "msm8953-mdss-panels.dtsi"
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+ hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_truly_1080_vid>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+ qcom,platform-te-gpio = <&tlmm 24 0>;
+ qcom,platform-reset-gpio = <&tlmm 61 0>;
+ qcom,platform-bklight-en-gpio = <&tlmm 59 0>;
+};
+
+&mdss_dsi1 {
+ status = "disabled";
+ qcom,dsi-pref-prim-pan = <&dsi_adv7533_1080p>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+ qcom,pluggable;
+ qcom,platform-te-gpio = <&tlmm 24 0>;
+ qcom,platform-reset-gpio = <&tlmm 61 0>;
+ qcom,platform-bklight-en-gpio = <&tlmm 59 0>;
+};
+
+&dsi_truly_1080_vid {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+};
+
+&dsi_truly_1080_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,ulps-enabled;
+ qcom,partial-update-enabled;
+ qcom,panel-roi-alignment = <2 2 4 2 1080 2>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi632-cdp-s2.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi632-cdp-s2.dts
new file mode 100644
index 0000000..78ff97f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi632-cdp-s2.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "sdm450-pmi632-cdp-s2.dtsi"
+#include "sdm450-pmi632.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. msm8953 + PMI632 CDP S2";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id = <1 2>;
+ qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi632.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi632.dts
index 0a56c79..2ffb0ab 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi632.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "sdm450-pmi632.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. msm8953 + PMI632 SOC";
+ compatible = "qcom,msm8953";
+ qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+ qcom,pmic-name = "PMI632";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
index a751d5d..ad3d3ed 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8937.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8937.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 CDP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts
index 13aba62..5abf198 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8937.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8937.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 Ext Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
index 9d6be47..ee1c2a0 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8937.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8937.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8937.dts
index 0a56c79..a9f64a4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 SOC";
+ compatible = "qcom,msm8953";
+ qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
+ qcom,pmic-name = "PMI8937";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8937.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pmi8937.dtsi
new file mode 100644
index 0000000..a208e1a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937.dtsi
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash0: qcom,camera-flash {
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-type = <1>;
+ qcom,flash-source = <&pmi8937_flash0 &pmi8937_flash1>;
+ qcom,torch-source = <&pmi8937_torch0 &pmi8937_torch1>;
+ qcom,switch-source = <&pmi8937_switch>;
+ };
+};
+
+&usb3 {
+ vbus_dwc3-supply = <&smbcharger_charger_otg>;
+ extcon = <&pmi8937_charger>;
+};
+
+&pmi8937_charger {
+ qcom,external-typec;
+ qcom,typec-psy-name = "typec";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
index d2bb465..51622e0 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8940.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8940.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 CDP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts
index dbbb6b8..92c67fa 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8940.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8940.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 Ext Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
index 0fb793b..cb379b9 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8940.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8940.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8940.dts
index 0a56c79..e9c80a0d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 SOC";
+ compatible = "qcom,msm8953";
+ qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
+ qcom,pmic-name = "PMI8940";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8940.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pmi8940.dtsi
new file mode 100644
index 0000000..28fc0d7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940.dtsi
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash0: qcom,camera-flash {
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-type = <1>;
+ qcom,flash-source = <&pmi8940_flash0 &pmi8940_flash1>;
+ qcom,torch-source = <&pmi8940_torch0 &pmi8940_torch1>;
+ qcom,switch-source = <&pmi8940_switch>;
+ };
+};
+
+&usb3 {
+ vbus_dwc3-supply = <&smbcharger_charger_otg>;
+ extcon = <&pmi8940_charger>;
+};
+
+&labibb {
+ status = "ok";
+ qpnp,qpnp-labibb-mode = "lcd";
+};
+
+&ibb_regulator {
+ qcom,qpnp-ibb-discharge-resistor = <32>;
+};
+
+&pmi8940_charger {
+ qcom,external-typec;
+ qcom,typec-psy-name = "typec";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi
index 016baf2..139ef1e 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,4 +29,40 @@
&usb3 {
vbus_dwc3-supply = <&smbcharger_charger_otg>;
+ extcon = <&pmi8950_charger>;
+};
+
+&pmi8950_charger {
+ qcom,external-typec;
+ qcom,typec-psy-name = "typec";
+};
+
+&mdss_dsi0 {
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+};
+
+&mdss_dsi1 {
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+};
+
+&dsi_panel_pwr_supply {
+ qcom,panel-supply-entry@2 {
+ reg = <2>;
+ qcom,supply-name = "lab";
+ qcom,supply-min-voltage = <4600000>;
+ qcom,supply-max-voltage = <6000000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+ qcom,panel-supply-entry@3 {
+ reg = <3>;
+ qcom,supply-name = "ibb";
+ qcom,supply-min-voltage = <4600000>;
+ qcom,supply-max-voltage = <6000000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ qcom,supply-post-on-sleep = <10>;
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts
index 0a56c79..7f5fc4e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-qrd-sku3.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "QRD SKU3";
+ qcom,board-id = <0xb 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
index 87b8c74..b29e447 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,45 @@
* GNU General Public License for more details.
*/
+
+&soc {
+ i2c@78b7000 { /* BLSP1 QUP3 */
+ status = "okay";
+ synaptics@4b {
+ compatible = "synaptics,dsx-i2c";
+ reg = <0x4b>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <65 0x2008>;
+ vdd_ana-supply = <&vdd_vreg>;
+ vcc_i2c-supply = <&pm8953_l6>;
+ synaptics,pwr-reg-name = "vdd_ana";
+ synaptics,bus-reg-name = "vcc_i2c";
+ synaptics,irq-gpio = <&tlmm 65 0x2008>;
+ synaptics,irq-on-state = <0>;
+ synaptics,irq-flags = <0x2008>;
+ synaptics,power-delay-ms = <200>;
+ synaptics,reset-delay-ms = <200>;
+ synaptics,max-y-for-2d = <1919>;
+ synaptics,cap-button-codes = <139 158 172>;
+ synaptics,vir-button-codes = <139 180 2000 320 160
+ 158 540 2000 320 160
+ 172 900 2000 320 160>;
+ synaptics,resume-in-workqueue;
+ /* Underlying clocks used by secure touch */
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+ };
+ };
+
+ vdd_vreg: vdd_vreg {
+ compatible = "regulator-fixed";
+ status = "ok";
+ regulator-name = "vdd_vreg";
+ };
+
+};
+
&blsp1_uart0 {
status = "ok";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-rcm-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-rcm-overlay.dts
index 0a56c79..dbb7f57 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-rcm-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "RCM";
+ qcom,board-id = <21 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-rcm.dts b/arch/arm64/boot/dts/qcom/msm8953-rcm.dts
index a3117ed..625a4d6 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-rcm.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 RCM";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-thermal.dtsi b/arch/arm64/boot/dts/qcom/msm8953-thermal.dtsi
index 208ef41..d5a6f52 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-thermal.dtsi
@@ -265,6 +265,14 @@
type = "passive";
};
};
+ cooling-maps {
+ gpu_cdev0 {
+ trip = <&gpu_trip0>;
+ cooling-device =
+ <&msm_gpu THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
+ };
+ };
};
deca-cpu-max-step {
@@ -589,6 +597,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&mdm_core_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&mdm_core_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -619,6 +631,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&qdsp_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&qdsp_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -649,6 +665,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&camera_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -679,6 +699,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&cpu4_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&cpu4_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -709,6 +733,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&cpu5_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&cpu5_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -739,6 +767,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&cpu6_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&cpu6_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -769,6 +801,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&cpu7_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&cpu7_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -799,6 +835,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&apc1_l2_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&apc1_l2_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -829,6 +869,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&cpu0_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -859,6 +903,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&cpu1_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -889,6 +937,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&cpu2_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -919,6 +971,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&cpu3_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -949,6 +1005,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&apc0_l2_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&apc0_l2_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -979,6 +1039,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&gpu0_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&gpu0_trip>;
cooling-device = <&cx_cdev 0 0>;
@@ -1009,6 +1073,10 @@
cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
(THERMAL_MAX_LIMIT - 4)>;
};
+ gpu_vdd_cdev {
+ trip = <&gpu1_trip>;
+ cooling-device = <&msm_gpu 2 2>;
+ };
cx_vdd_cdev {
trip = <&gpu1_trip>;
cooling-device = <&cx_cdev 0 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953.dts
index 0a56c79..ddf2218 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 SOC";
+ compatible = "qcom,msm8953";
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+ qcom,pmic-name = "PMI8950";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index d05c461..38e4804 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -19,9 +19,10 @@
#include <dt-bindings/clock/msm-clocks-8953.h>
/ {
- model = "Qualcomm Technologies, Inc. MSM 8953";
+ model = "Qualcomm Technologies, Inc. MSM8953";
compatible = "qcom,msm8953";
qcom,msm-id = <293 0x0>;
+ qcom,msm-name = "MSM8953";
interrupt-parent = <&intc>;
chosen {
@@ -112,8 +113,9 @@
};
dfps_data_mem: dfps_data_mem@90000000 {
- reg = <0 0x90000000 0 0x1000>;
- label = "dfps_data_mem";
+ reg = <0 0x90000000 0 0x1000>;
+ label = "dfps_data_mem";
+ status = "disabled";
};
cont_splash_mem: splash_region@0x90001000 {
@@ -163,6 +165,8 @@
#include "msm8953-ion.dtsi"
#include "msm-arm-smmu-8953.dtsi"
#include "msm8953-gpu.dtsi"
+#include "msm8953-mdss.dtsi"
+#include "msm8953-mdss-pll.dtsi"
&soc {
#address-cells = <1>;
@@ -650,6 +654,19 @@
status = "disabled";
};
+ clock_gcc_mdss: qcom,gcc-mdss@1800000 {
+ compatible = "qcom,gcc-mdss-8953";
+ reg = <0x1800000 0x80000>;
+ reg-names = "cc_base";
+ clock-names = "pclk0_src", "pclk1_src",
+ "byte0_src", "byte1_src";
+ clocks = <&mdss_dsi0_pll clk_dsi0pll_pixel_clk_mux>,
+ <&mdss_dsi1_pll clk_dsi1pll_pixel_clk_mux>,
+ <&mdss_dsi0_pll clk_dsi0pll_byte_clk_mux>,
+ <&mdss_dsi1_pll clk_dsi1pll_byte_clk_mux>;
+ #clock-cells = <1>;
+ };
+
clock_gcc: qcom,gcc@1800000 {
compatible = "qcom,gcc-8953";
reg = <0x1800000 0x80000>,
@@ -674,6 +691,8 @@
reg = <0x1800000 0x80000>;
reg-names = "cc_base";
vdd_gfx-supply = <&gfx_vreg_corner>;
+ clocks = <&clock_gcc clk_xo_clk_src>;
+ clock-names = "xo";
qcom,gfxfreq-corner =
< 0 0 >,
< 133330000 1 >, /* Min SVS */
@@ -1278,6 +1297,22 @@
status = "disabled";
};
+ qcom,msm-adsprpc-mem {
+ compatible = "qcom,msm-adsprpc-mem-region";
+ memory-region = <&adsp_mem>;
+ };
+
+ qcom,msm_fastrpc {
+ compatible = "qcom,msm-fastrpc-legacy-compute";
+ qcom,msm_fastrpc_compute_cb {
+ compatible = "qcom,msm-fastrpc-legacy-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&apps_iommu 0x2408 0x7>;
+ sids = <0x8 0x9 0xa 0xb 0xc 0xd 0xe 0xf>;
+ };
+ };
+
+
ipa_hw: qcom,ipa@07900000 {
compatible = "qcom,ipa";
reg = <0x07900000 0x4effc>, <0x07904000 0x26934>;
@@ -1726,6 +1761,7 @@
vdd-supply = <&gdsc_venus>;
qcom,proxy-reg-names = "vdd";
+ qcom,mas-crypto = <&mas_crypto>;
clocks = <&clock_gcc clk_gcc_venus0_vcodec0_clk>,
<&clock_gcc clk_gcc_venus0_ahb_clk>,
@@ -1756,6 +1792,89 @@
qcom,firmware-name = "venus";
memory-region = <&venus_mem>;
};
+
+ qcom,wcnss-wlan@0a000000 {
+ compatible = "qcom,wcnss_wlan";
+ reg = <0x0a000000 0x280000>,
+ <0x0b011008 0x04>,
+ <0x0a21b000 0x3000>,
+ <0x03204000 0x00000100>,
+ <0x03200800 0x00000200>,
+ <0x0a100400 0x00000200>,
+ <0x0a205050 0x00000200>,
+ <0x0a219000 0x00000020>,
+ <0x0a080488 0x00000008>,
+ <0x0a080fb0 0x00000008>,
+ <0x0a08040c 0x00000008>,
+ <0x0a0120a8 0x00000008>,
+ <0x0a012448 0x00000008>,
+ <0x0a080c00 0x00000001>;
+
+ reg-names = "wcnss_mmio", "wcnss_fiq",
+ "pronto_phy_base", "riva_phy_base",
+ "riva_ccu_base", "pronto_a2xb_base",
+ "pronto_ccpu_base", "pronto_saw2_base",
+ "wlan_tx_phy_aborts","wlan_brdg_err_source",
+ "wlan_tx_status", "alarms_txctl",
+ "alarms_tactl", "pronto_mcu_base";
+
+ interrupts = <0 145 0 0 146 0>;
+ interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
+
+ qcom,pronto-vddmx-supply = <&pm8953_s7_level_ao>;
+ qcom,pronto-vddcx-supply = <&pm8953_s2_level>;
+ qcom,pronto-vddpx-supply = <&pm8953_l5>;
+ qcom,iris-vddxo-supply = <&pm8953_l7>;
+ qcom,iris-vddrfa-supply = <&pm8953_l19>;
+ qcom,iris-vddpa-supply = <&pm8953_l9>;
+ qcom,iris-vdddig-supply = <&pm8953_l5>;
+
+ qcom,iris-vddxo-voltage-level = <1800000 0 1800000>;
+ qcom,iris-vddrfa-voltage-level = <1300000 0 1300000>;
+ qcom,iris-vddpa-voltage-level = <3300000 0 3300000>;
+ qcom,iris-vdddig-voltage-level = <1800000 0 1800000>;
+
+ qcom,vddmx-voltage-level = <RPM_SMD_REGULATOR_LEVEL_TURBO
+ RPM_SMD_REGULATOR_LEVEL_NONE
+ RPM_SMD_REGULATOR_LEVEL_TURBO>;
+ qcom,vddcx-voltage-level = <RPM_SMD_REGULATOR_LEVEL_NOM
+ RPM_SMD_REGULATOR_LEVEL_NONE
+ RPM_SMD_REGULATOR_LEVEL_TURBO>;
+ qcom,vddpx-voltage-level = <1800000 0 1800000>;
+
+ qcom,iris-vddxo-current = <10000>;
+ qcom,iris-vddrfa-current = <100000>;
+ qcom,iris-vddpa-current = <515000>;
+ qcom,iris-vdddig-current = <10000>;
+
+ qcom,pronto-vddmx-current = <0>;
+ qcom,pronto-vddcx-current = <0>;
+ qcom,pronto-vddpx-current = <0>;
+
+ pinctrl-names = "wcnss_default", "wcnss_sleep",
+ "wcnss_gpio_default";
+ pinctrl-0 = <&wcnss_default>;
+ pinctrl-1 = <&wcnss_sleep>;
+ pinctrl-2 = <&wcnss_gpio_default>;
+
+ gpios = <&tlmm 76 0>, <&tlmm 77 0>, <&tlmm 78 0>,
+ <&tlmm 79 0>, <&tlmm 80 0>;
+
+ clocks = <&clock_gcc clk_xo_wlan_clk>,
+ <&clock_gcc clk_rf_clk2>,
+ <&clock_debug clk_gcc_debug_mux>,
+ <&clock_gcc clk_wcnss_m_clk>;
+
+ clock-names = "xo", "rf_clk", "measure", "wcnss_debug";
+
+ qcom,has-autodetect-xo;
+ qcom,is-pronto-v3;
+ qcom,has-pronto-hw;
+ qcom,has-vsys-adc-channel;
+ qcom,has-a2xb-split-reg;
+ qcom,wcnss-adc_tm = <&pm8953_adc_tm>;
+ };
+
};
#include "pm8953-rpm-regulator.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
index 502b2fe..903f170a 100644
--- a/arch/arm64/boot/dts/qcom/pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -218,7 +218,6 @@
qcom,scale-function = <2>;
qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
- qcom,vadc-thermal-node;
};
chan@4f {
@@ -230,7 +229,6 @@
qcom,scale-function = <2>;
qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
- qcom,vadc-thermal-node;
};
chan@1d {
@@ -376,7 +374,7 @@
#clock-cells = <1>;
qcom,cxo-freq = <19200000>;
qcom,clkdiv-id = <1>;
- qcom,clkdiv-init-freq = <19200000>;
+ qcom,clkdiv-init-freq = <9600000>;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index c991c9a..e5963ef 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -32,6 +32,162 @@
qcom,secondary-pon-reset;
};
+ pmi632_vadc: vadc@3100 {
+ compatible = "qcom,qpnp-vadc-hc";
+ reg = <0x3100 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eoc-int-en-set";
+ qcom,adc-vdd-reference = <1875>;
+ qcom,adc-full-scale-code = <0x70e4>;
+
+ chan@0 {
+ label = "ref_gnd";
+ reg = <0>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@1 {
+ label = "ref_1250v";
+ reg = <1>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@84 {
+ label = "vbat_sns";
+ reg = <0x84>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@6 {
+ label = "die_temp";
+ reg = <6>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <19>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@7 {
+ label = "usb_in_i";
+ reg = <7>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <21>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@8 {
+ label = "usb_in_v";
+ reg = <8>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <8>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@9 {
+ label = "chg_temp";
+ reg = <9>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <18>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@4a {
+ label = "bat_therm";
+ reg = <0x4a>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <17>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@4b {
+ label = "bat_id";
+ reg = <0x4b>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <8>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+
+ chan@1e {
+ label = "mid_chg";
+ reg = <0x1e>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <3>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ };
+
pmi632_tz: qcom,temp-alarm@2400 {
compatible = "qcom,qpnp-temp-alarm";
reg = <0x2400 0x100>;
@@ -69,9 +225,8 @@
pmi632_vib: qcom,vibrator@5700 {
compatible = "qcom,qpnp-vibrator-ldo";
reg = <0x5700 0x100>;
- qcom,vib-ldo-volt-uv = <1504000>;
+ qcom,vib-ldo-volt-uv = <3000000>;
qcom,vib-overdrive-volt-uv = <3544000>;
- status = "disabled";
};
pmi632_pwm_1: pwm@b300 {
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index 97be32de..bab5774 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -11,6 +11,8 @@
*/
#include <dt-bindings/msm/power-on.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
&spmi_bus {
qcom,pmi8950@2 {
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index 6cf9a82..fcde397 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,30 @@
qcom,msm-id = <347 0x0>;
};
+&pil_modem_mem {
+ reg = <0 0x8b000000 0 0x3c00000>;
+};
+
+&pil_video_mem {
+ reg = <0 0x8ec00000 0 0x500000>;
+};
+
+&wlan_msa_mem {
+ reg = <0 0x8f100000 0 0x100000>;
+};
+
+&pil_cdsp_mem {
+ reg = <0 0x8f200000 0 0x800000>;
+};
+
+&pil_mba_mem {
+ reg = <0 0x8fa00000 0 0x200000>;
+};
+
+&pil_adsp_mem {
+ reg = <0 0x8fc00000 0 0x1e00000>;
+};
+
&soc {
qcom,rmnet-ipa {
status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/sda845-sdxpoorwills.dtsi b/arch/arm64/boot/dts/qcom/sda845-sdxpoorwills.dtsi
new file mode 100644
index 0000000..944ca3b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-sdxpoorwills.dtsi
@@ -0,0 +1,271 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdm3 {
+ pinctrl-names = "default", "mdm_active", "mdm_suspend";
+ pinctrl-0 = <&ap2mdm_pon_reset_default>;
+ pinctrl-1 = <&ap2mdm_active &mdm2ap_active>;
+ pinctrl-2 = <&ap2mdm_sleep &mdm2ap_sleep>;
+ interrupt-map = <0 &tlmm 24 0x3
+ 1 &tlmm 21 0x3>;
+ qcom,mdm2ap-errfatal-gpio = <&tlmm 24 0x00>;
+ qcom,ap2mdm-errfatal-gpio = <&tlmm 23 0x00>;
+ qcom,mdm2ap-status-gpio = <&tlmm 22 0x00>;
+ qcom,ap2mdm-status-gpio = <&tlmm 21 0x00>;
+ qcom,ap2mdm-soft-reset-gpio = <&pm8998_gpios 10 0>;
+ qcom,mdm-link-info = "0304_00.01.00";
+ status = "ok";
+};
+
+&pm8998_gpios {
+ ap2mdm_pon_reset {
+ ap2mdm_pon_reset_default: ap2mdm_pon_reset_default {
+ /* MDM PON conrol*/
+ pins = "gpio10";
+ function = "normal";
+ output-low;
+ power-source = <0>;
+ };
+ };
+};
+
+&pil_modem {
+ status = "disabled";
+};
+
+&pcie0_wake_default {
+ config {
+ /delete-property/ bias-pull-down;
+ };
+};
+
+&led_flash_rear {
+ status = "disabled";
+};
+
+&led_flash_front {
+ status = "disabled";
+};
+
+&ois_rear {
+ status = "disabled";
+};
+
+&eeprom_rear {
+ status = "disabled";
+};
+
+&eeprom_rear_aux {
+ status = "disabled";
+};
+
+&eeprom_front {
+ status = "disabled";
+};
+
+&soc {
+ qcom,cam-req-mgr {
+ status = "disabled";
+ };
+
+ cam_csiphy0: qcom,csiphy@ac65000 {
+ status = "disabled";
+ };
+
+ cam_csiphy1: qcom,csiphy@ac66000 {
+ status = "disabled";
+ };
+
+ cam_csiphy2: qcom,csiphy@ac67000 {
+ status = "disabled";
+ };
+
+ cam_cci: qcom,cci@ac4a000 {
+ status = "disabled";
+
+ i2c_freq_100Khz: qcom,i2c_standard_mode {
+ status = "disabled";
+ };
+
+ i2c_freq_400Khz: qcom,i2c_fast_mode {
+ status = "disabled";
+ };
+
+ i2c_freq_custom: qcom,i2c_custom_mode {
+ status = "disabled";
+ };
+
+ i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+ status = "disabled";
+ };
+ };
+
+ qcom,cam_smmu {
+ status = "disabled";
+
+ msm_cam_smmu_ife {
+ ife_iova_mem_map: iova-mem-map {
+ iova-mem-region-io {
+ status = "disabled";
+ };
+ };
+ };
+
+ msm_cam_smmu_jpeg {
+ jpeg_iova_mem_map: iova-mem-map {
+ iova-mem-region-io {
+ status = "disabled";
+ };
+ };
+ };
+
+ msm_cam_smmu_icp {
+ icp_iova_mem_map: iova-mem-map {
+ iova-mem-region-firmware {
+ status = "disabled";
+ };
+
+ iova-mem-region-shared {
+ status = "disabled";
+ };
+
+ iova-mem-region-io {
+ status = "disabled";
+ };
+ };
+ };
+
+ msm_cam_smmu_cpas_cdm {
+ cpas_cdm_iova_mem_map: iova-mem-map {
+ iova-mem-region-io {
+ status = "disabled";
+ };
+ };
+ };
+
+ msm_cam_smmu_fd {
+ fd_iova_mem_map: iova-mem-map {
+ iova-mem-region-io {
+ status = "disabled";
+ };
+ };
+ };
+ };
+
+ qcom,cam-cpas@ac40000 {
+ status = "disabled";
+ };
+
+ qcom,cam-cdm-intf {
+ status = "disabled";
+ };
+
+ qcom,cpas-cdm0@ac48000 {
+ status = "disabled";
+ };
+
+ qcom,cam-isp {
+ status = "disabled";
+ };
+
+ cam_csid0: qcom,csid0@acb3000 {
+ status = "disabled";
+ };
+
+ cam_vfe0: qcom,vfe0@acaf000 {
+ status = "disabled";
+ };
+
+ cam_csid1: qcom,csid1@acba000 {
+ status = "disabled";
+ };
+
+ cam_vfe1: qcom,vfe1@acb6000 {
+ status = "disabled";
+ };
+
+ cam_csid_lite: qcom,csid-lite@acc8000 {
+ status = "disabled";
+ };
+
+ cam_vfe_lite: qcom,vfe-lite@acc4000 {
+ status = "disabled";
+ };
+
+ qcom,cam-icp {
+ status = "disabled";
+ };
+
+ cam_a5: qcom,a5@ac00000 {
+ status = "disabled";
+ };
+
+ cam_ipe0: qcom,ipe0 {
+ status = "disabled";
+ };
+
+ cam_ipe1: qcom,ipe1 {
+ status = "disabled";
+ };
+
+ cam_bps: qcom,bps {
+ status = "disabled";
+ };
+
+ clock_camcc: qcom,camcc@ad00000 {
+ status = "disabled";
+ };
+
+ qcom,cam-jpeg {
+ status = "disabled";
+ };
+
+ cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+ status = "disabled";
+ };
+
+ cam_jpeg_dma: qcom,jpegdma@0xac52000 {
+ status = "disabled";
+ };
+
+ qcom,cam-fd {
+ status = "disabled";
+ };
+
+ cam_fd: qcom,fd@ac5a000 {
+ status = "disabled";
+ };
+
+ qcom,cam-sensor@0 {
+ status = "disabled";
+ };
+
+ qcom,cam-sensor@1 {
+ status = "disabled";
+ };
+
+ qcom,cam-sensor@2 {
+ status = "disabled";
+ };
+
+ qcom,cam-sensor@3 {
+ status = "disabled";
+ };
+
+ cam_csiphy3: qcom,csiphy@ac68000 {
+ status = "disabled";
+ };
+};
+
+&wil6210 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
index 6357886..bae7ee1 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,14 +13,8 @@
/dts-v1/;
/plugin/;
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm845-sde-display.dtsi"
#include "sda845-v2-hdk.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-hdk-audio-overlay.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2-mtp-sdxpoorwills-overlay.dts
similarity index 66%
rename from arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
rename to arch/arm64/boot/dts/qcom/sda845-v2-mtp-sdxpoorwills-overlay.dts
index e1ec364..5377813 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-mtp-sdxpoorwills-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+
/dts-v1/;
/plugin/;
@@ -20,13 +21,14 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-qvr-audio-overlay.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+#include "external-soc.dtsi"
+#include "sda845-sdxpoorwills.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,msm-id = <321 0x20000>;
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDA845 v2 + SDXPOORWILLS MTP";
+ compatible = "qcom,sda845-mtp", "qcom,sda845", "qcom,mtp";
+ qcom,msm-id = <341 0x20000>;
+ qcom,board-id = <8 5>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-sdxpoorwills-overlay.dts
similarity index 66%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
copy to arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-sdxpoorwills-overlay.dts
index e1ec364..10e4a32 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-sdxpoorwills-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+
/dts-v1/;
/plugin/;
@@ -20,13 +21,14 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-qvr-audio-overlay.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+#include "external-soc.dtsi"
+#include "sda845-sdxpoorwills.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,msm-id = <321 0x20000>;
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDA845 v2.1 + SDXPOORWILLS CDP";
+ compatible = "qcom,sda845-cdp", "qcom,sda845", "qcom,cdp";
+ qcom,msm-id = <341 0x20001>;
+ qcom,board-id = <1 2>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-sdxpoorwills-overlay.dts
similarity index 66%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
copy to arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-sdxpoorwills-overlay.dts
index e1ec364..09fa20f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-sdxpoorwills-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+
/dts-v1/;
/plugin/;
@@ -20,13 +21,14 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-qvr-audio-overlay.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+#include "external-soc.dtsi"
+#include "sda845-sdxpoorwills.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,msm-id = <321 0x20000>;
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDA845 v2.1 + SDXPOORWILLS MTP";
+ compatible = "qcom,sda845-mtp", "qcom,sda845", "qcom,mtp";
+ qcom,msm-id = <341 0x20001>;
+ qcom,board-id = <8 5>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-cdp-s2-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-cdp-s2-overlay.dts
index 0a56c79..e12ad51 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-cdp-s2-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm450-pmi632-cdp-s2.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "CDP S2";
+ compatible = "qcom,cdp";
+ qcom,board-id = <1 2>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-cdp.dts b/arch/arm64/boot/dts/qcom/sdm450-cdp.dts
index 3e06872..c55622a 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-cdp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "sdm450.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
index 7fac030..9c8cd38 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "sdm450.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 IOT MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-mtp-s3-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-mtp-s3-overlay.dts
index 0a56c79..ae522a5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-mtp-s3-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "MTP S3";
+ compatible = "qcom,mtp";
+ qcom,board-id = <8 3>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-mtp.dts
index 2524b80..040b4ba 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "sdm450.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
index 0a56c79..4f6e7f5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm450.dtsi"
+#include "sdm450-pmi632.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI632 SOC";
+ compatible = "qcom,sdm450";
+ qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+ qcom,pmic-name = "PMI632";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
index 88a4ce7..413612d 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
@@ -21,3 +21,8 @@
&pm8953_typec {
status = "disabled";
};
+
+&pmi632_pon {
+ qcom,ps-hold-hard-reset-disable;
+ qcom,ps-hold-shutdown-disable;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
index 6a6a09e..4964a5f 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "sdm450.dtsi"
+#include "pmi8937.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8937.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM450 + PMI8937 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8937.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-pmi8937.dts
index 0a56c79..700e950 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8937.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm450.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI8937 SOC";
+ compatible = "qcom,sdm450";
+ qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
+ qcom,pmic-name = "PMI8937";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
index 3c4e802..9bed8d3 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "sdm450.dtsi"
+#include "pmi8940.dtsi"
#include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8940.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM450 + PMI8940 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8940.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-pmi8940.dts
index 0a56c79..f50d177 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8940.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm450.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI8940 SOC";
+ compatible = "qcom,sdm450";
+ qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
+ qcom,pmic-name = "PMI8940";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-qrd-sku4-overlay.dts
index 0a56c79..558c3c6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm450-qrd-sku4.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "QRD SKU4";
+ compatible = "qcom,qrd";
+ qcom,board-id = <0xb 1>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-rcm.dts b/arch/arm64/boot/dts/qcom/sdm450-rcm.dts
index 4ab131a..1b7831b 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-rcm.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "sdm450.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 RCM";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450.dts
index 0a56c79..b829b81 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm450.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 SOC";
+ compatible = "qcom,sdm450";
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+ qcom,pmic-name = "PMI8950";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm450.dtsi b/arch/arm64/boot/dts/qcom/sdm450.dtsi
index 2f3e8c4..3e24714 100644
--- a/arch/arm64/boot/dts/qcom/sdm450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450.dtsi
@@ -17,6 +17,7 @@
model = "Qualcomm Technologies, Inc. SDM450";
compatible = "qcom,sdm450";
qcom,msm-id = <338 0x0>;
+ qcom,msm-name = "SDM450";
};
&CPU4 {
diff --git a/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
new file mode 100644
index 0000000..903b432
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm632.dtsi"
+#include "sdm450-pmi632-cdp-s2.dtsi"
+#include "sdm450-pmi632.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM632 + PMI632 + PMI8004 CDP S2";
+ compatible = "qcom,sdm632-cdp", "qcom,sdm632", "qcom,cdp";
+ qcom,board-id = <1 2>;
+ qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
new file mode 100644
index 0000000..6339c3c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
+#include "sdm450-pmi632.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM632 + PMI632 + PMI8004 MTP S3";
+ compatible = "qcom,sdm632-mtp", "qcom,sdm632", "qcom,mtp";
+ qcom,board-id = <8 3>;
+ qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm632-qrd-sku4.dts b/arch/arm64/boot/dts/qcom/sdm632-qrd-sku4.dts
new file mode 100644
index 0000000..9f33721
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm632-qrd-sku4.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm632.dtsi"
+#include "sdm450-qrd-sku4.dtsi"
+#include "sdm450-pmi632.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM632 + PMI632 + PMI8004 QRD SKU4";
+ compatible = "qcom,sdm632-qrd", "qcom,sdm632", "qcom,qrd";
+ qcom,board-id = <0xb 1>;
+ qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm632-rumi-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm632-rumi-overlay.dts
index 0a56c79..4d8ce5c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-rumi-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm632-rumi.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "RUMI";
+ compatible = "qcom,rumi";
+ qcom,board-id = <15 0>;
+ qcom,pmic-id = <0 0 0 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm632.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm632.dts
index 0a56c79..dab409c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm632.dtsi"
+#include "sdm450-pmi632.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM632 + PMI632 SOC";
+ compatible = "qcom,sdm450";
+ qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
+ qcom,pmic-name = "PMI632";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm632.dtsi b/arch/arm64/boot/dts/qcom/sdm632.dtsi
index 3ebd50e..5100f28 100644
--- a/arch/arm64/boot/dts/qcom/sdm632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632.dtsi
@@ -18,5 +18,17 @@
model = "Qualcomm Technologies, Inc. SDM632";
compatible = "qcom,sdm632";
qcom,msm-id = <349 0x0>;
+ qcom,msm-name = "SDM632";
};
+&clock_gcc {
+ compatible = "qcom,gcc-sdm632";
+};
+
+&clock_debug {
+ compatible = "qcom,cc-debug-sdm632";
+};
+
+&clock_gcc_gfx {
+ compatible = "qcom,gcc-gfx-sdm632";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
index 5dd5c0d..2b3cb39 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
*/
#include "sdm670-wcd.dtsi"
#include "sdm670-wsa881x.dtsi"
+#include "sdm670-lpi.dtsi"
#include <dt-bindings/clock/qcom,audio-ext-clk.h>
&tavil_snd {
@@ -59,6 +60,49 @@
"SpkrLeft", "SpkrRight";
};
+&tasha_snd {
+ qcom,msm-mi2s-master = <1>, <1>, <1>, <1>, <1>;
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "hifi amp", "LINEOUT1",
+ "hifi amp", "LINEOUT2",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "AMIC3", "MIC BIAS2",
+ "MIC BIAS2", "ANCRight Headset Mic",
+ "AMIC4", "MIC BIAS2",
+ "MIC BIAS2", "ANCLeft Headset Mic",
+ "AMIC5", "MIC BIAS3",
+ "MIC BIAS3", "Handset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC4", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic4",
+ "DMIC5", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic5",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT";
+
+ qcom,msm-mbhc-hphl-swh = <0>;
+ qcom,msm-mbhc-gnd-swh = <0>;
+ qcom,msm-mclk-freq = <9600000>;
+ asoc-codec = <&stub_codec>;
+ asoc-codec-names = "msm-stub-codec.1";
+ qcom,wsa-max-devs = <2>;
+ qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+ <&wsa881x_213>, <&wsa881x_214>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+ "SpkrLeft", "SpkrRight";
+};
+
&int_codec {
qcom,audio-routing =
"RX_BIAS", "INT_MCLK0",
@@ -222,6 +266,31 @@
pinctrl-0 = <&wcd_intr_default>;
};
+ clock_audio_native: audio_ext_clk_native {
+ status = "disabled";
+ compatible = "qcom,audio-ref-clk";
+ #clock-cells = <1>;
+ qcom,lpass-mclk-id = <0x116>;
+ qcom,codec-mclk-clk-freq = <11289600>;
+ qcom,audio-ref-clk-gpio = <&lpi_tlmm 19 0>;
+ pinctrl-names = "sleep", "active";
+ pinctrl-0 = <&lpi_mclk0_sleep>;
+ pinctrl-1 = <&lpi_mclk0_active>;
+ };
+
+ clock_audio: audio_ext_clk {
+ status = "disabled";
+ compatible = "qcom,audio-ref-clk";
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&tasha_mclk_default>;
+ pinctrl-1 = <&tasha_mclk_default>;
+ qcom,audio-ref-clk-gpio = <&pm660_gpios 3 0>;
+ clock-names = "osr_clk";
+ clocks = <&pm660_div_clk>;
+ qcom,node_has_rpm_clock;
+ #clock-cells = <1>;
+ };
+
clock_audio_lnbb: audio_ext_clk_lnbb {
status = "disabled";
compatible = "qcom,audio-ref-clk";
@@ -254,6 +323,40 @@
};
&slim_aud {
+ wcd9335: tasha_codec {
+ status = "disabled";
+ compatible = "qcom,tasha-slim-pgd";
+ elemental-addr = [00 01 a0 01 17 02];
+
+ interrupt-parent = <&wcd9xxx_intc>;
+ interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ 17 18 19 20 21 22 23 24 25 26 27 28 29
+ 30>;
+
+ qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+ clock-names = "wcd_clk", "wcd_native_clk";
+ clocks = <&clock_audio AUDIO_PMI_CLK>,
+ <&clock_audio_native AUDIO_LPASS_MCLK>;
+
+ cdc-vdd-mic-bias-supply = <&pm660l_bob>;
+ qcom,cdc-vdd-mic-bias-voltage = <3312000 3312000>;
+ qcom,cdc-vdd-mic-bias-current = <30400>;
+
+ qcom,cdc-static-supplies = "cdc-vdd-mic-bias";
+
+ qcom,cdc-micbias1-mv = <1800>;
+ qcom,cdc-micbias2-mv = <1800>;
+ qcom,cdc-micbias3-mv = <1800>;
+ qcom,cdc-micbias4-mv = <1800>;
+
+ qcom,cdc-mclk-clk-rate = <9600000>;
+ qcom,cdc-slim-ifd = "tasha-slim-ifd";
+ qcom,cdc-slim-ifd-elemental-addr = [00 00 a0 01 17 02];
+ qcom,cdc-dmic-sample-rate = <4800000>;
+ qcom,cdc-mad-dmic-rate = <600000>;
+ };
+
wcd934x_cdc: tavil_codec {
status = "disabled";
compatible = "qcom,tavil-slim-pgd";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
index bda44cc..faaf644 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,10 +12,6 @@
*/
#include "msm-audio-lpass.dtsi"
-#include "sdm670-wcd.dtsi"
-#include "sdm670-wsa881x.dtsi"
-#include "sdm670-lpi.dtsi"
-#include <dt-bindings/clock/qcom,audio-ext-clk.h>
&msm_audio_ion {
iommus = <&apps_smmu 0x1801 0x0>;
@@ -23,6 +19,13 @@
};
&soc {
+ audio_load_mod {
+ compatible = "qcom,audio-load-mod";
+ audio_test_mod {
+ compatible = "qcom,audio-test-mod";
+ };
+ };
+
qcom,avtimer@62cf700c {
compatible = "qcom,avtimer";
reg = <0x62cf700c 0x4>,
@@ -99,6 +102,72 @@
"msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929";
};
+ tasha_snd: sound-tasha {
+ status = "disabled";
+ compatible = "qcom,sdm670-asoc-snd-tasha";
+ qcom,model = "sdm670-tasha-snd-card";
+ qcom,wcn-btfm;
+ qcom,mi2s-audio-intf;
+ qcom,auxpcm-audio-intf;
+ asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+ <&loopback>, <&compress>, <&hostless>,
+ <&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
+ <&pcm_noirq>, <&cpe3>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-pcm-dsp.2", "msm-voip-dsp",
+ "msm-pcm-voice", "msm-pcm-loopback",
+ "msm-compress-dsp", "msm-pcm-hostless",
+ "msm-pcm-afe", "msm-lsm-client",
+ "msm-pcm-routing", "msm-cpe-lsm",
+ "msm-compr-dsp", "msm-pcm-dsp-noirq",
+ "msm-cpe-lsm.3";
+ asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ <&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>,
+ <&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+ <&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+ <&dai_quin_auxpcm>,
+ <&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+ <&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+ <&sb_4_rx>, <&sb_4_tx>, <&sb_5_rx>, <&sb_5_tx>,
+ <&sb_6_rx>, <&sb_7_rx>, <&sb_7_tx>,
+ <&sb_8_rx>, <&sb_8_tx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+ <&afe_proxy_tx>, <&incall_record_rx>,
+ <&incall_record_tx>, <&incall_music_rx>,
+ <&incall_music_2_rx>,
+ <&usb_audio_rx>, <&usb_audio_tx>,
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+ <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+ <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>,
+ <&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
+ asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+ "msm-dai-q6-mi2s.4",
+ "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+ "msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+ "msm-dai-q6-auxpcm.5",
+ "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+ "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+ "msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+ "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+ "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+ "msm-dai-q6-dev.16394", "msm-dai-q6-dev.16395",
+ "msm-dai-q6-dev.16396",
+ "msm-dai-q6-dev.16398", "msm-dai-q6-dev.16399",
+ "msm-dai-q6-dev.16400", "msm-dai-q6-dev.16401",
+ "msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+ "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+ "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+ "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+ "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913",
+ "msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929";
+ };
+
int_codec: sound {
status = "okay";
compatible = "qcom,sdm670-asoc-snd";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
index 4f5a9b1..aa7cc97 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -669,35 +669,6 @@
qcom,bcms = <&bcm_cn0>;
};
- mas_qhm_tic: mas-qhm-tic {
- cell-id = <MSM_BUS_MASTER_TIC>;
- label = "mas-qhm-tic";
- qcom,buswidth = <4>;
- qcom,agg-ports = <1>;
- qcom,connections = <&slv_qhs_tlmm_south
- &slv_qhs_camera_cfg &slv_qhs_sdc4
- &slv_qhs_sdc2 &slv_qhs_mnoc_cfg
- &slv_qhs_ufs_mem_cfg &slv_qhs_glm
- &slv_qhs_pdm &slv_qhs_a2_noc_cfg
- &slv_qhs_qdss_cfg &slv_qhs_display_cfg
- &slv_qhs_tcsr &slv_qhs_dcc_cfg
- &slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc
- &slv_qhs_snoc_cfg &slv_qhs_phy_refgen_south
- &slv_qhs_gpuss_cfg &slv_qhs_venus_cfg
- &slv_qhs_tsif &slv_qhs_compute_dsp_cfg
- &slv_qhs_aop &slv_qhs_qupv3_north
- &slv_srvc_cnoc &slv_qhs_usb3_0
- &slv_qhs_ipa &slv_qhs_cpr_cx
- &slv_qhs_a1_noc_cfg &slv_qhs_aoss
- &slv_qhs_prng &slv_qhs_vsense_ctrl_cfg
- &slv_qhs_emmc_cfg &slv_qhs_qupv3_south
- &slv_qhs_spdm &slv_qhs_crypto0_cfg
- &slv_qhs_pimem_cfg &slv_qhs_tlmm_north
- &slv_qhs_clk_ctl &slv_qhs_imem_cfg>;
- qcom,bus-dev = <&fab_config_noc>;
- qcom,bcms = <&bcm_cn0>;
- };
-
mas_qnm_snoc: mas-qnm-snoc {
cell-id = <MSM_BUS_SNOC_CNOC_MAS>;
label = "mas-qnm-snoc";
@@ -727,36 +698,6 @@
qcom,bcms = <&bcm_cn0>;
};
- mas_xm_qdss_dap: mas-xm-qdss-dap {
- cell-id = <MSM_BUS_MASTER_QDSS_DAP>;
- label = "mas-xm-qdss-dap";
- qcom,buswidth = <8>;
- qcom,agg-ports = <1>;
- qcom,connections = <&slv_qhs_tlmm_south
- &slv_qhs_camera_cfg
- &slv_qhs_sdc4
- &slv_qhs_sdc2 &slv_qhs_mnoc_cfg
- &slv_qhs_ufs_mem_cfg &slv_qhs_glm
- &slv_qhs_pdm &slv_qhs_a2_noc_cfg
- &slv_qhs_qdss_cfg &slv_qhs_display_cfg
- &slv_qhs_tcsr &slv_qhs_dcc_cfg
- &slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc
- &slv_qhs_snoc_cfg &slv_qhs_phy_refgen_south
- &slv_qhs_gpuss_cfg &slv_qhs_venus_cfg
- &slv_qhs_tsif &slv_qhs_compute_dsp_cfg
- &slv_qhs_aop &slv_qhs_qupv3_north
- &slv_srvc_cnoc &slv_qhs_usb3_0
- &slv_qhs_ipa &slv_qhs_cpr_cx
- &slv_qhs_a1_noc_cfg &slv_qhs_aoss
- &slv_qhs_prng &slv_qhs_vsense_ctrl_cfg
- &slv_qhs_qupv3_south &slv_qhs_spdm
- &slv_qhs_crypto0_cfg &slv_qhs_pimem_cfg
- &slv_qhs_tlmm_north &slv_qhs_clk_ctl
- &slv_qhs_imem_cfg>;
- qcom,bus-dev = <&fab_config_noc>;
- qcom,bcms = <&bcm_cn0>;
- };
-
mas_qhm_cnoc: mas-qhm-cnoc {
cell-id = <MSM_BUS_MASTER_CNOC_DC_NOC>;
label = "mas-qhm-cnoc";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
index 1f40e20..96c4640 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -229,6 +229,7 @@
qcom,cam_smmu {
compatible = "qcom,msm-cam-smmu";
status = "ok";
+ non-fatal-fault-disabled;
msm_cam_smmu_lrme {
compatible = "qcom,msm-cam-smmu-cb";
@@ -891,7 +892,7 @@
<0 0 200000000 0 0 0 0 600000000>;
clock-cntl-level = "svs", "turbo";
fw_name = "CAMERA_ICP.elf";
- ubwc-cfg = <0x77 0x1DF>;
+ ubwc-cfg = <0x73 0x1CF>;
status = "ok";
};
@@ -912,12 +913,12 @@
<&clock_camcc CAM_CC_IPE_0_CLK>,
<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
- clock-rates = <0 0 0 0 240000000>,
+ clock-rates =
<0 0 0 0 404000000>,
<0 0 0 0 480000000>,
<0 0 0 0 538000000>,
<0 0 0 0 600000000>;
- clock-cntl-level = "lowsvs", "svs",
+ clock-cntl-level = "svs",
"svs_l1", "nominal", "turbo";
status = "ok";
};
@@ -939,12 +940,12 @@
<&clock_camcc CAM_CC_IPE_1_CLK>,
<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
- clock-rates = <0 0 0 0 240000000>,
+ clock-rates =
<0 0 0 0 404000000>,
<0 0 0 0 480000000>,
<0 0 0 0 538000000>,
<0 0 0 0 600000000>;
- clock-cntl-level = "lowsvs", "svs",
+ clock-cntl-level = "svs",
"svs_l1", "nominal", "turbo";
status = "ok";
};
@@ -966,12 +967,12 @@
<&clock_camcc CAM_CC_BPS_CLK>,
<&clock_camcc CAM_CC_BPS_CLK_SRC>;
- clock-rates = <0 0 0 0 200000000>,
+ clock-rates =
<0 0 0 0 404000000>,
<0 0 0 0 480000000>,
<0 0 0 0 600000000>,
<0 0 0 0 600000000>;
- clock-cntl-level = "lowsvs", "svs",
+ clock-cntl-level = "svs",
"svs_l1", "nominal", "turbo";
status = "ok";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
index 108eda5..6dc5c2c 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -169,6 +169,15 @@
};
port@1 {
+ reg = <6>;
+ funnel_swao_in_sensor_etm0: endpoint {
+ slave-mode;
+ remote-endpoint=
+ <&sensor_etm0_out_funnel_swao>;
+ };
+ };
+
+ port@2 {
reg = <7>;
funnel_swao_in_tpda_swao: endpoint {
slave-mode;
@@ -2059,6 +2068,20 @@
};
};
+ sensor_etm0 {
+ compatible = "qcom,coresight-remote-etm";
+
+ coresight-name = "coresight-sensor-etm0";
+ qcom,inst-id = <8>;
+
+ port {
+ sensor_etm0_out_funnel_swao: endpoint {
+ remote-endpoint =
+ <&funnel_swao_in_sensor_etm0>;
+ };
+ };
+ };
+
audio_etm0 {
compatible = "qcom,coresight-remote-etm";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index f287b21..75a2762 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
compatible = "qcom,pil-tz-generic";
qcom,pas-id = <13>;
qcom,firmware-name = "a615_zap";
+ memory-region = <&pil_gpu_mem>;
};
msm_bus: qcom,kgsl-busmon{
@@ -46,9 +47,12 @@
label = "kgsl-3d0";
compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
status = "ok";
- reg = <0x5000000 0x40000
- 0x780000 0x6300>;
- reg-names = "kgsl_3d0_reg_memory", "qfprom_memory";
+ reg = <0x5000000 0x40000>,
+ <0x5061000 0x800>,
+ <0x780000 0x6300>;
+ reg-names = "kgsl_3d0_reg_memory",
+ "kgsl_3d0_cx_dbgc_memory",
+ "qfprom_memory";
interrupts = <0 300 0>;
interrupt-names = "kgsl_3d0_irq";
qcom,id = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
index c76fbce..948c51d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,34 @@
gpio-controller;
#gpio-cells = <2>;
+ lpi_mclk0_active: lpi_mclk0_active {
+ mux {
+ pins = "gpio19";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio19";
+ drive-strength = <8>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ lpi_mclk0_sleep: lpi_mclk0_sleep {
+ mux {
+ pins = "gpio19";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio19";
+ drive-strength = <2>;
+ bias-disable;
+ bias-pull-down;
+ };
+ };
+
cdc_pdm_clk_active: cdc_pdm_clk_active {
mux {
pins = "gpio18";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index a85060e..5684e19 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1505,6 +1505,67 @@
};
};
+ /* Tasha WSA speaker reset pins */
+ tasha_spkr_1_sd_n {
+ tasha_spkr_1_sd_n_sleep: tasha_spkr_1_sd_n_sleep {
+ mux {
+ pins = "gpio66";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio66";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ tasha_spkr_1_sd_n_active: tasha_spkr_1_sd_n_active {
+ mux {
+ pins = "gpio66";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio66";
+ drive-strength = <16>; /* 16 mA */
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ tasha_spkr_2_sd_n {
+ tasha_spkr_2_sd_n_sleep: tasha_spkr_2_sd_n_sleep {
+ mux {
+ pins = "gpio65";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio65";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ tasha_spkr_2_sd_n_active: tasha_spkr_2_sd_n_active {
+ mux {
+ pins = "gpio65";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio65";
+ drive-strength = <16>; /* 16 mA */
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
wcd_buck_vsel {
wcd_buck_vsel_default: wcd_buck_vsel_default{
mux {
@@ -1995,6 +2056,19 @@
};
};
+&pm660_gpios {
+ tasha_mclk {
+ tasha_mclk_default: tasha_mclk_default{
+ pins = "gpio3";
+ function = "func1";
+ qcom,drive-strength = <2>;
+ power-source = <0>;
+ bias-disable;
+ output-low;
+ };
+ };
+};
+
&pm660l_gpios {
camera_rear_dvdd_en {
camera_rear_dvdd_en_default: camera_rear_dvdd_en_default {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp-overlay.dts
new file mode 100644
index 0000000..b7cb820
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp-overlay.dts
@@ -0,0 +1,74 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm670-cdp.dtsi"
+#include "pm660a.dtsi"
+#include "sdm670-tasha-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A + Tasha Codec CDP";
+ compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
+ qcom,msm-id = <336 0x0>;
+ qcom,board-id = <1 5>;
+ qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
+ <0x0001001b 0x0002001a 0x0 0x0>,
+ <0x0001001b 0x0202001a 0x0 0x0>;
+};
+
+&dsi_dual_nt35597_truly_video_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_panel_pwr_supply_labibb_amoled {
+ qcom,panel-supply-entry@2 {
+ reg = <2>;
+ qcom,supply-name = "lab";
+ qcom,supply-min-voltage = <4600000>;
+ qcom,supply-max-voltage = <6100000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@3 {
+ reg = <3>;
+ qcom,supply-name = "ibb";
+ qcom,supply-min-voltage = <4000000>;
+ qcom,supply-max-voltage = <6300000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@4 {
+ reg = <4>;
+ qcom,supply-name = "oledb";
+ qcom,supply-min-voltage = <5000000>;
+ qcom,supply-max-voltage = <8100000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+};
+
+&dsi_rm67195_amoled_fhd_cmd_display {
+ qcom,dsi-display-active;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ oledb-supply = <&pm660a_oledb>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp.dts
new file mode 100644
index 0000000..1922b38
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp.dts
@@ -0,0 +1,68 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm670.dtsi"
+#include "sdm670-cdp.dtsi"
+#include "pm660a.dtsi"
+#include "sdm670-tasha-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A Tasha Codec CDP";
+ compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
+ qcom,board-id = <1 5>;
+ qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
+ <0x0001001b 0x0002001a 0x0 0x0>,
+ <0x0001001b 0x0202001a 0x0 0x0>;
+};
+
+&dsi_dual_nt35597_truly_video_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_panel_pwr_supply_labibb_amoled {
+ qcom,panel-supply-entry@2 {
+ reg = <2>;
+ qcom,supply-name = "lab";
+ qcom,supply-min-voltage = <4600000>;
+ qcom,supply-max-voltage = <6100000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@3 {
+ reg = <3>;
+ qcom,supply-name = "ibb";
+ qcom,supply-min-voltage = <4000000>;
+ qcom,supply-max-voltage = <6300000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@4 {
+ reg = <4>;
+ qcom,supply-name = "oledb";
+ qcom,supply-min-voltage = <5000000>;
+ qcom,supply-max-voltage = <8100000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+};
+
+&dsi_rm67195_amoled_fhd_cmd_display {
+ qcom,dsi-display-active;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ oledb-supply = <&pm660a_oledb>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
index 5d3975c..ab49970 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -175,8 +175,8 @@
io-channel-names = "rradc_batt_id",
"rradc_die_temp";
qcom,rradc-base = <0x4500>;
- qcom,fg-esr-timer-awake = <96 96>;
- qcom,fg-esr-timer-asleep = <256 256>;
+ qcom,fg-esr-timer-awake = <64 96>;
+ qcom,fg-esr-timer-asleep = <224 256>;
qcom,fg-esr-timer-charging = <0 96>;
qcom,cycle-counter-en;
qcom,hold-soc-while-full;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index d2a6640..9a7e742 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -307,6 +307,12 @@
qcom,dsi-display-active;
};
+&dsi_panel_pwr_supply {
+ qcom,panel-supply-entry@2 {
+ qcom,supply-post-off-sleep = <5>;
+ };
+};
+
&pm660l_wled {
status = "okay";
qcom,led-strings-list = [00 01];
diff --git a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
index 62db873..6b24593 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017,2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -241,7 +241,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LPM
RPMH_REGULATOR_MODE_HPM>;
- qcom,mode-threshold-currents = <0 1>;
+ qcom,mode-threshold-currents = <0 10000>;
pm660_l6: regulator-pm660-l6 {
regulator-name = "pm660_l6";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -298,7 +298,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LPM
RPMH_REGULATOR_MODE_HPM>;
- qcom,mode-threshold-currents = <0 1>;
+ qcom,mode-threshold-currents = <0 10000>;
pm660_l9: regulator-pm660-l9 {
regulator-name = "pm660_l9";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -472,7 +472,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LPM
RPMH_REGULATOR_MODE_HPM>;
- qcom,mode-threshold-currents = <0 1>;
+ qcom,mode-threshold-currents = <0 10000>;
pm660_l19: regulator-pm660-l19 {
regulator-name = "pm660_l19";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index ce88d14..007f937 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -826,8 +826,15 @@
};
&dsi_dual_nt36850_truly_cmd {
- qcom,mdss-dsi-t-clk-post = <0x0E>;
+ qcom,mdss-dsi-t-clk-post = <0x28>;
qcom,mdss-dsi-t-clk-pre = <0x30>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 23 08
diff --git a/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-audio-overlay.dtsi
new file mode 100644
index 0000000..80d3879
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-audio-overlay.dtsi
@@ -0,0 +1,80 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-audio-overlay.dtsi"
+
+&pmic_analog_codec {
+ status = "disabled";
+};
+
+&msm_sdw_codec {
+ status = "disabled";
+};
+
+&cdc_pdm_gpios {
+ status = "disabled";
+};
+
+&cdc_comp_gpios {
+ status = "disabled";
+};
+
+&cdc_dmic_gpios {
+ status = "disabled";
+};
+
+&cdc_sdw_gpios {
+ status = "disabled";
+};
+
+&wsa_spkr_en1 {
+ status = "disabled";
+};
+
+&wsa_spkr_en2 {
+ status = "disabled";
+};
+
+&qupv3_se8_spi {
+ status = "disabled";
+};
+
+&wcd9xxx_intc {
+ status = "okay";
+};
+
+&slim_aud {
+ status = "okay";
+};
+
+&dai_slim {
+ status = "okay";
+};
+
+&wcd9335 {
+ status = "okay";
+};
+
+&clock_audio {
+ status = "okay";
+};
+
+&clock_audio_native {
+ status = "okay";
+};
+
+&wcd_rst_gpio {
+ status = "okay";
+};
+
+&wcd9xxx_intc {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp-overlay.dts
similarity index 62%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
copy to arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp-overlay.dts
index e1ec364..af8244a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,14 +19,15 @@
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-qvr-audio-overlay.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm670-cdp.dtsi"
+#include "sdm670-tasha-codec.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,msm-id = <321 0x20000>;
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L Tasha Codec CDP";
+ compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
+ qcom,msm-id = <336 0x0>;
+ qcom,board-id = <1 5>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0102001a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp.dts
new file mode 100644
index 0000000..55d2fc2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp.dts
@@ -0,0 +1,27 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm670.dtsi"
+#include "sdm670-cdp.dtsi"
+#include "sdm670-tasha-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L Tasha Codec CDP";
+ compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
+ qcom,board-id = <1 5>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0102001a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-tasha-codec.dtsi b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec.dtsi
new file mode 100644
index 0000000..1fc0fd5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec.dtsi
@@ -0,0 +1,33 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm670-tasha-codec-audio-overlay.dtsi"
+
+&int_codec {
+ status = "disabled";
+};
+
+&pm660_div_clk {
+ status = "okay";
+};
+
+&tasha_snd {
+ status = "okay";
+};
+
+&slim_aud {
+ status = "okay";
+};
+
+&dai_slim {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi b/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi
index 8cbc84f..a0fa9cf 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -471,7 +471,7 @@
};
gpu_vdd_cdev {
trip = <&aoss0_trip>;
- cooling-device = <&msm_gpu 4 4>;
+ cooling-device = <&msm_gpu 0 0>;
};
cx_vdd_cdev {
trip = <&aoss0_trip>;
@@ -496,606 +496,6 @@
};
};
- cpu0-silver-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 1>;
- tracks-low;
- trips {
- cpu0_trip: cpu0-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- cpu1-silver-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 2>;
- tracks-low;
- trips {
- cpu1_trip: cpu1-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- cpu2-silver-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 3>;
- tracks-low;
- trips {
- cpu2_trip: cpu2-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- cpu3-silver-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 4>;
- tracks-low;
- trips {
- cpu3_trip: cpu3-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- cpuss-0-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 5>;
- tracks-low;
- trips {
- l3_0_trip: l3-0-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- cpuss-1-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 6>;
- tracks-low;
- trips {
- l3_1_trip: l3-1-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- cpu4-silver-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 7>;
- tracks-low;
- trips {
- cpu4_trip: cpu4-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpu4_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&cpu4_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&cpu4_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&cpu4_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&cpu4_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&cpu4_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&cpu4_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&cpu4_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- cpu5-silver-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 8>;
- tracks-low;
- trips {
- cpu5_trip: cpu5-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpu5_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&cpu5_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&cpu5_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&cpu5_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&cpu5_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&cpu5_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&cpu5_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&cpu5_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- cpu0-gold-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 9>;
- tracks-low;
- trips {
- cpug0_trip: cpug0-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- cpu1-gold-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 10>;
- tracks-low;
- trips {
- cpug1_trip: cpug1-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- gpu0-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 11>;
- tracks-low;
- trips {
- gpu0_trip_l: gpu0-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- gpu1-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 12>;
- tracks-low;
- trips {
- gpu1_trip_l: gpu1-trip_l {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
aoss1-lowf {
polling-delay-passive = <0>;
polling-delay = <0>;
@@ -1121,7 +521,7 @@
};
gpu_vdd_cdev {
trip = <&aoss1_trip>;
- cooling-device = <&msm_gpu 4 4>;
+ cooling-device = <&msm_gpu 0 0>;
};
cx_vdd_cdev {
trip = <&aoss1_trip>;
@@ -1146,356 +546,6 @@
};
};
- mdm-dsp-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 1>;
- tracks-low;
- trips {
- dsp_trip: dsp-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- ddr-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 2>;
- tracks-low;
- trips {
- ddr_trip: ddr-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- wlan-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 3>;
- tracks-low;
- trips {
- wlan_trip: wlan-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- compute-hvx-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 4>;
- tracks-low;
- trips {
- hvx_trip: hvx-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- camera-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 5>;
- tracks-low;
- trips {
- camera_trip: camera-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- mmss-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 6>;
- tracks-low;
- trips {
- mmss_trip: mmss-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
- mdm-core-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 7>;
- tracks-low;
- trips {
- mdm_trip: mdm-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&CPU0 2 2>;
- };
- cpu6_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
- (THERMAL_MAX_LIMIT-8)>;
- };
- gpu_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&msm_gpu 4 4>;
- };
- cx_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&cx_cdev 0 0>;
- };
- mx_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&mx_cdev 0 0>;
- };
- modem_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&modem_vdd 0 0>;
- };
- adsp_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&adsp_vdd 0 0>;
- };
- cdsp_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&cdsp_vdd 0 0>;
- };
- };
- };
-
lmh-dcvs-01 {
polling-delay-passive = <0>;
polling-delay = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
index 2ce829d..3df6d09 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,8 +27,6 @@
};
&usb0 {
- /delete-property/ iommus;
- /delete-property/ qcom,smmu-s1-bypass;
qcom,pm-qos-latency = <601>; /* CPU-CLUSTER-WFI-LVL latency +1 */
extcon = <0>, <0>, <&eud>, <0>, <0>;
};
@@ -37,6 +35,28 @@
vdd-supply = <&pm660l_l1>;
vdda18-supply = <&pm660_l10>;
vdda33-supply = <&pm660l_l7>;
+ qcom,qusb-phy-init-seq =
+ /* <value reg_offset> */
+ <0x23 0x210 /* PWR_CTRL1 */
+ 0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
+ 0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+ 0x80 0x2c /* PLL_CMODE */
+ 0x0a 0x184 /* PLL_LOCK_DELAY */
+ 0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */
+ 0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+ 0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x21 0x214 /* PWR_CTRL2 */
+ 0x08 0x220 /* IMP_CTRL1 */
+ 0x58 0x224 /* IMP_CTRL2 */
+ 0x45 0x240 /* TUNE1 */
+ 0x29 0x244 /* TUNE2 */
+ 0xca 0x248 /* TUNE3 */
+ 0x04 0x24c /* TUNE4 */
+ 0x03 0x250 /* TUNE5 */
+ 0x00 0x23c /* CHG_CTRL2 */
+ 0x22 0x210>; /* PWR_CTRL1 */
+ nvmem-cells = <&minor_rev>;
+ nvmem-cell-names = "minor_rev";
};
&usb_qmp_dp_phy {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi
index f8d2a04..d7120d0 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -164,4 +164,20 @@
pinctrl-1 = <&hph_en1_wcd_sleep>;
};
};
+
+ tasha_codec {
+ wsa_spkr_sd1: msm_cdc_pinctrll {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&tasha_spkr_1_sd_n_active>;
+ pinctrl-1 = <&tasha_spkr_1_sd_n_sleep>;
+ };
+
+ wsa_spkr_sd2: msm_cdc_pinctrlr {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&tasha_spkr_2_sd_n_active>;
+ pinctrl-1 = <&tasha_spkr_2_sd_n_sleep>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi b/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi
index c35850d..5dfe244 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,4 +42,36 @@
};
};
};
+
+ tasha_codec {
+ swr_master {
+ compatible = "qcom,swr-wcd";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ wsa881x_211: wsa881x@20170211 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x20170211>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_sd1>;
+ };
+
+ wsa881x_212: wsa881x@20170212 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x20170212>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_sd2>;
+ };
+
+ wsa881x_213: wsa881x@21170213 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x21170213>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_sd1>;
+ };
+
+ wsa881x_214: wsa881x@21170214 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x21170214>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_sd2>;
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index dc03f88..45a7dfc 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -529,6 +529,30 @@
reg = <0 0x93e00000 0 0x1e00000>;
};
+ pil_ipa_fw_mem: ips_fw_region@0x95c00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95c00000 0 0x10000>;
+ };
+
+ pil_ipa_gsi_mem: ipa_gsi_region@0x95c10000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95c10000 0 0x5000>;
+ };
+
+ pil_gpu_mem: gpu_region@0x95c15000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95c15000 0 0x2000>;
+ };
+
+ qseecom_mem: qseecom_region@0x9e400000 {
+ compatible = "shared-dma-pool";
+ no-map;
+ reg = <0 0x9e400000 0 0x1400000>;
+ };
+
adsp_mem: adsp_region {
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>;
@@ -537,14 +561,6 @@
size = <0 0xc00000>;
};
- qseecom_mem: qseecom_region {
- compatible = "shared-dma-pool";
- alloc-ranges = <0 0x00000000 0 0xffffffff>;
- no-map;
- alignment = <0 0x400000>;
- size = <0 0x1400000>;
- };
-
qseecom_ta_mem: qseecom_ta_region {
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>;
@@ -1082,7 +1098,7 @@
vdd_pwrcl_mx_ao-supply = <&pm660l_s1_level_ao>;
qcom,mx-turbo-freq = <1440000000 1708000000 3300000001>;
- l3-devs = <&l3_cpu0 &l3_cpu6>;
+ l3-devs = <&l3_cpu0 &l3_cpu6 &l3_cdsp>;
clock-names = "xo_ao";
clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
@@ -1781,6 +1797,29 @@
cell-index = <0>;
};
+ ufs_ice: ufsice@1d90000 {
+ compatible = "qcom,ice";
+ reg = <0x1d90000 0x8000>;
+ qcom,enable-ice-clk;
+ clock-names = "ufs_core_clk", "bus_clk",
+ "iface_clk", "ice_core_clk";
+ clocks = <&clock_gcc GCC_UFS_PHY_AXI_CLK>,
+ <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&clock_gcc GCC_UFS_PHY_AHB_CLK>,
+ <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+ qcom,op-freq-hz = <0>, <0>, <0>, <300000000>;
+ vdd-hba-supply = <&ufs_phy_gdsc>;
+ qcom,msm-bus,name = "ufs_ice_noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <1 650 0 0>, /* No vote */
+ <1 650 1000 0>; /* Max. bandwidth */
+ qcom,bus-vector-names = "MIN",
+ "MAX";
+ qcom,instance-type = "ufs";
+ };
+
ufsphy_mem: ufsphy_mem@1d87000 {
reg = <0x1d87000 0xe00>; /* PHY regs */
reg-names = "phy_mem";
@@ -1804,6 +1843,7 @@
interrupts = <0 265 0>;
phys = <&ufsphy_mem>;
phy-names = "ufsphy";
+ ufs-qcom-crypto = <&ufs_ice>;
lanes-per-direction = <1>;
dev-ref-clk-freq = <0>; /* 19.2 MHz */
@@ -1956,7 +1996,7 @@
qcom,arm-smmu;
qcom,bandwidth-vote-for-ipa;
qcom,msm-bus,name = "ipa";
- qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-cases = <5>;
qcom,msm-bus,num-paths = <4>;
qcom,msm-bus,vectors-KBps =
/* No vote */
@@ -1964,22 +2004,28 @@
<90 585 0 0>,
<1 676 0 0>,
<143 777 0 0>,
+ /* SVS2 */
+ <90 512 80000 600000>,
+ <90 585 80000 350000>,
+ <1 676 40000 40000>, /*gcc_config_noc_clk_src */
+ <143 777 0 75>, /* IB defined for IPA2X_clk in MHz*/
/* SVS */
<90 512 80000 640000>,
<90 585 80000 640000>,
<1 676 80000 80000>,
- <143 777 0 150>, /* IB defined for IPA clk in MHz*/
+ <143 777 0 150>, /* IB defined for IPA2X_clk in MHz*/
/* NOMINAL */
<90 512 206000 960000>,
<90 585 206000 960000>,
<1 676 206000 160000>,
- <143 777 0 300>, /* IB defined for IPA clk in MHz*/
+ <143 777 0 300>, /* IB defined for IPA2X_clk in MHz*/
/* TURBO */
<90 512 206000 3600000>,
<90 585 206000 3600000>,
<1 676 206000 300000>,
<143 777 0 355>; /* IB defined for IPA clk in MHz*/
- qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
+ qcom,bus-vector-names =
+ "MIN", "SVS2", "SVS", "NOMINAL", "TURBO";
/* IPA RAM mmap */
qcom,ipa-ram-mmap = <
@@ -2093,6 +2139,8 @@
compatible = "qcom,pil-tz-generic";
qcom,pas-id = <0xf>;
qcom,firmware-name = "ipa_fws";
+ qcom,pil-force-shutdown;
+ memory-region = <&pil_ipa_fw_mem>;
};
pil_modem: qcom,mss@4080000 {
@@ -2135,6 +2183,7 @@
qcom,firmware-name = "modem";
qcom,pil-self-auth;
qcom,sysmon-id = <0>;
+ qcom,minidump-id = <3>;
qcom,ssctl-instance-id = <0x12>;
qcom,override-acc;
qcom,signal-aop;
@@ -2318,6 +2367,8 @@
qcom,ddr-config = <0xC3040873>;
qcom,nonremovable;
+ nvmem-cells = <&minor_rev>;
+ nvmem-cell-names = "minor_rev";
status = "disabled";
};
@@ -2391,11 +2442,13 @@
qcom,msm-adsprpc-mem {
compatible = "qcom,msm-adsprpc-mem-region";
memory-region = <&adsp_mem>;
+ restrict-access;
};
qcom,msm_fastrpc {
compatible = "qcom,msm-fastrpc-compute";
qcom,adsp-remoteheap-vmid = <22 37>;
+ qcom,fastrpc-adsp-audio-pdr;
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
@@ -2761,6 +2814,13 @@
< 2457600 MHZ_TO_MBPS(1804, 4) >;
};
+ l3_cdsp: qcom,l3-cdsp {
+ compatible = "devfreq-simple-dev";
+ clock-names = "devfreq_clk";
+ clocks = <&clock_cpucc L3_MISC_VOTE_CLK>;
+ governor = "powersave";
+ };
+
cpu_pmu: cpu-pmu {
compatible = "arm,armv8-pmuv3";
qcom,irq-is-percpu;
@@ -2776,6 +2836,20 @@
compatible = "syscon";
reg = <0x5091008 0x4>;
};
+
+ qfprom: qfprom@0x780000 {
+ compatible = "qcom,qfprom";
+ reg = <0x00780000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ minor_rev: minor_rev@0x78014c {
+ reg = <0x14c 0x4>;
+ bits = <0x1c 0x2>;
+ };
+ };
+
};
#include "pm660.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index a3a48af..d708a12 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -91,16 +91,26 @@
pinctrl-0 = <&camera_dvdd_en_default>;
vin-supply = <&pm8998_s3>;
};
+
+ camera_vana_ldo: gpio-regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <0x04 0x00>;
+ regulator-name = "camera_vana_ldo";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 8 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vana>;
+ vin-supply = <&pmi8998_bob>;
+ };
};
&cam_cci {
qcom,cam-res-mgr {
compatible = "qcom,cam-res-mgr";
status = "ok";
- shared-gpios = <8>;
- pinctrl-names = "cam_res_mgr_default", "cam_res_mgr_suspend";
- pinctrl-0 = <&cam_res_mgr_active>;
- pinctrl-1 = <&cam_res_mgr_suspend>;
};
actuator_rear: qcom,actuator@0 {
@@ -339,13 +349,13 @@
eeprom-src = <&eeprom_rear_aux>;
cam_vdig-supply = <&camera_ldo>;
cam_vio-supply = <&pm8998_lvs1>;
- cam_vana-supply = <&pmi8998_bob>;
+ cam_vana-supply = <&camera_vana_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
regulator-names = "cam_vdig", "cam_vio", "cam_vana",
"cam_clk";
rgltr-cntrl-support;
- rgltr-min-voltage = <1050000 0 3312000 0>;
- rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-min-voltage = <1050000 0 2850000 0>;
+ rgltr-max-voltage = <1050000 0 2850000 0>;
rgltr-load-current = <105000 0 80000 0>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
@@ -354,15 +364,12 @@
pinctrl-1 = <&cam_sensor_mclk2_suspend
&cam_sensor_rear2_suspend>;
gpios = <&tlmm 15 0>,
- <&tlmm 9 0>,
- <&tlmm 8 0>;
+ <&tlmm 9 0>;
gpio-reset = <1>;
- gpio-vana = <2>;
- gpio-req-tbl-num = <0 1 2>;
- gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
gpio-req-tbl-label = "CAMIF_MCLK1",
- "CAM_RESET1",
- "CAM_VANA1";
+ "CAM_RESET1";
sensor-mode = <0>;
cci-master = <1>;
status = "ok";
@@ -384,14 +391,14 @@
actuator-src = <&actuator_front>;
led-flash-src = <&led_flash_front>;
cam_vio-supply = <&pm8998_lvs1>;
- cam_vana-supply = <&pmi8998_bob>;
+ cam_vana-supply = <&camera_vana_ldo>;
cam_vdig-supply = <&camera_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
rgltr-cntrl-support;
- rgltr-min-voltage = <0 3312000 1050000 0>;
- rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-min-voltage = <0 2850000 1050000 0>;
+ rgltr-max-voltage = <0 2850000 1050000 0>;
rgltr-load-current = <0 80000 105000 0>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
@@ -400,15 +407,12 @@
pinctrl-1 = <&cam_sensor_mclk1_suspend
&cam_sensor_front_suspend>;
gpios = <&tlmm 14 0>,
- <&tlmm 28 0>,
- <&tlmm 8 0>;
+ <&tlmm 28 0>;
gpio-reset = <1>;
- gpio-vana = <2>;
- gpio-req-tbl-num = <0 1 2>;
- gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
gpio-req-tbl-label = "CAMIF_MCLK2",
- "CAM_RESET2",
- "CAM_VANA1";
+ "CAM_RESET2";
sensor-mode = <0>;
cci-master = <1>;
status = "ok";
@@ -428,14 +432,14 @@
sensor-position-yaw = <0>;
led-flash-src = <&led_flash_iris>;
cam_vio-supply = <&pm8998_lvs1>;
- cam_vana-supply = <&pmi8998_bob>;
+ cam_vana-supply = <&camera_vana_ldo>;
cam_vdig-supply = <&camera_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
rgltr-cntrl-support;
- rgltr-min-voltage = <0 3312000 1050000 0>;
- rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-min-voltage = <0 2850000 1050000 0>;
+ rgltr-max-voltage = <0 2850000 1050000 0>;
rgltr-load-current = <0 80000 105000 0>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
@@ -444,15 +448,12 @@
pinctrl-1 = <&cam_sensor_mclk3_suspend
&cam_sensor_iris_suspend>;
gpios = <&tlmm 16 0>,
- <&tlmm 9 0>,
- <&tlmm 8 0>;
+ <&tlmm 9 0>;
gpio-reset = <1>;
- gpio-vana = <2>;
- gpio-req-tbl-num = <0 1 2>;
- gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
gpio-req-tbl-label = "CAMIF_MCLK3",
- "CAM_RESET3",
- "CAM_VANA1";
+ "CAM_RESET3";
sensor-mode = <0>;
cci-master = <1>;
status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 35a7774..ec1e9c7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -863,7 +863,7 @@
<0 0 200000000 0 0 0 0 600000000>;
clock-cntl-level = "svs", "turbo";
fw_name = "CAMERA_ICP.elf";
- ubwc-cfg = <0x7F 0x1FF>;
+ ubwc-cfg = <0x7B 0x1EF>;
status = "ok";
};
@@ -885,12 +885,11 @@
<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
clock-rates =
- <0 0 0 0 240000000>,
<0 0 0 0 404000000>,
<0 0 0 0 480000000>,
<0 0 0 0 538000000>,
<0 0 0 0 600000000>;
- clock-cntl-level = "lowsvs", "svs",
+ clock-cntl-level = "svs",
"svs_l1", "nominal", "turbo";
status = "ok";
};
@@ -912,12 +911,12 @@
<&clock_camcc CAM_CC_IPE_1_CLK>,
<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
- clock-rates = <0 0 0 0 240000000>,
+ clock-rates =
<0 0 0 0 404000000>,
<0 0 0 0 480000000>,
<0 0 0 0 538000000>,
<0 0 0 0 600000000>;
- clock-cntl-level = "lowsvs", "svs",
+ clock-cntl-level = "svs",
"svs_l1", "nominal", "turbo";
status = "ok";
};
@@ -939,12 +938,12 @@
<&clock_camcc CAM_CC_BPS_CLK>,
<&clock_camcc CAM_CC_BPS_CLK_SRC>;
- clock-rates = <0 0 0 0 200000000>,
+ clock-rates =
<0 0 0 0 404000000>,
<0 0 0 0 480000000>,
<0 0 0 0 600000000>,
<0 0 0 0 600000000>;
- clock-cntl-level = "lowsvs", "svs",
+ clock-cntl-level = "svs",
"svs_l1", "nominal", "turbo";
status = "ok";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 5a88dc2..2c38f51 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -320,7 +320,6 @@
qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
qcom,nq-esepwr = <&tlmm 116 0x00>;
interrupt-parent = <&tlmm>;
- qcom,clk-src = "BBCLK3";
interrupts = <63 0>;
interrupt-names = "nfc_irq";
pinctrl-names = "nfc_active", "nfc_suspend";
@@ -328,8 +327,6 @@
&nfc_enable_active
&nfc_clk_default>;
pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
- clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
- clock-names = "ref_clk";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index a7cf880..a094f65 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,23 @@
reg-names = "csr-base";
coresight-name = "coresight-csr";
+ qcom,usb-bam-support;
+ qcom,hwctrl-set-support;
+ qcom,set-byte-cntr-support;
+
+ qcom,blk-size = <1>;
+ };
+
+ swao_csr: csr@6b0e000 {
+ compatible = "qcom,coresight-csr";
+ reg = <0x6b0e000 0x1000>;
+ reg-names = "csr-base";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ coresight-name = "coresight-swao-csr";
+ qcom,timestamp-support;
qcom,blk-size = <1>;
};
@@ -113,6 +130,7 @@
reg-names = "tmc-base";
coresight-name = "coresight-tmc-etf-swao";
+ coresight-csr = <&csr>;
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -166,6 +184,15 @@
};
port@1 {
+ reg = <6>;
+ funnel_swao_in_sensor_etm0: endpoint {
+ slave-mode;
+ remote-endpoint=
+ <&sensor_etm0_out_funnel_swao>;
+ };
+ };
+
+ port@2 {
reg = <7>;
funnel_swao_in_tpda_swao: endpoint {
slave-mode;
@@ -277,6 +304,7 @@
coresight-name = "coresight-tmc-etr";
coresight-ctis = <&cti0 &cti8>;
+ coresight-csr = <&csr>;
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -301,6 +329,7 @@
coresight-name = "coresight-tmc-etf";
coresight-ctis = <&cti0 &cti8>;
+ coresight-csr = <&csr>;
arm,default-sink;
clocks = <&clock_aop QDSS_CLK>;
@@ -405,6 +434,7 @@
"ddr-ch23-ctrl";
coresight-name = "coresight-hwevent";
+ coresight-csr = <&csr>;
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -2049,6 +2079,20 @@
};
};
+ sensor_etm0 {
+ compatible = "qcom,coresight-remote-etm";
+
+ coresight-name = "coresight-sensor-etm0";
+ qcom,inst-id = <8>;
+
+ port {
+ sensor_etm0_out_funnel_swao: endpoint {
+ remote-endpoint =
+ <&funnel_swao_in_sensor_etm0>;
+ };
+ };
+ };
+
modem_etm0 {
compatible = "qcom,coresight-remote-etm";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index 33bcaa6..000f5d3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -16,6 +16,7 @@
compatible = "qcom,pil-tz-generic";
qcom,pas-id = <13>;
qcom,firmware-name = "a630_zap";
+ memory-region = <&pil_gpu_mem>;
};
msm_bus: qcom,kgsl-busmon{
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
index c16e1d8..1c7269a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -328,6 +328,13 @@
/delete-property/ pinctrl-0;
};
+ gpio-regulator@4 {
+ /delete-property/ gpio;
+ /delete-property/ vin-supply;
+ /delete-property/ pinctrl-names;
+ /delete-property/ pinctrl-0;
+ };
+
/delete-node/ qcom,spmi-debug@6b22000;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index fc4b674..349c4c0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -319,7 +319,6 @@
qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
qcom,nq-esepwr = <&tlmm 116 0x00>;
interrupt-parent = <&tlmm>;
- qcom,clk-src = "BBCLK3";
interrupts = <63 0>;
interrupt-names = "nfc_irq";
pinctrl-names = "nfc_active", "nfc_suspend";
@@ -327,8 +326,6 @@
&nfc_enable_active
&nfc_clk_default>;
pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
- clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
- clock-names = "ref_clk";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
index daf5687..af7feb5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -202,6 +202,8 @@
qcom,ep-latency = <10>;
+ qcom,phy-status-offset = <0x974>;
+
qcom,boot-option = <0x1>;
linux,pci-domain = <0>;
@@ -535,6 +537,8 @@
qcom,slv-addr-space-size = <0x20000000>;
+ qcom,phy-status-offset = <0x1aac>;
+
qcom,boot-option = <0x1>;
linux,pci-domain = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 191e76d..78be790 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -749,6 +749,123 @@
};
};
+ /* add pingrp for touchscreen */
+ pmx_ts_int_active {
+ ts_int_active: ts_int_active {
+ mux {
+ pins = "gpio122";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio122";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ pmx_ts_int_suspend {
+ ts_int_suspend1: ts_int_suspend1 {
+ mux {
+ pins = "gpio122";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio122";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+
+ pmx_ts_reset_active {
+ ts_reset_active: ts_reset_active {
+ mux {
+ pins = "gpio99";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio99";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ pmx_ts_reset_suspend {
+ ts_reset_suspend1: ts_reset_suspend1 {
+ mux {
+ pins = "gpio99";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio99";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+
+ pmx_ts_release {
+ ts_release: ts_release {
+ mux {
+ pins = "gpio122", "gpio99";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio122", "gpio99";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+
+ ts_mux {
+ ts_active: ts_active {
+ mux {
+ pins = "gpio99", "gpio122";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio99", "gpio122";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+
+ ts_reset_suspend: ts_reset_suspend {
+ mux {
+ pins = "gpio99";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio99";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ ts_int_suspend: ts_int_suspend {
+ mux {
+ pins = "gpio122";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio122";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
sec_aux_pcm {
sec_aux_pcm_sleep: sec_aux_pcm_sleep {
mux {
@@ -3099,6 +3216,20 @@
};
};
+ cam_sensor_rear_vana: cam_sensor_rear_vana {
+ /* AVDD LDO */
+ mux {
+ pins = "gpio8";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio8";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
cam_res_mgr_active: cam_res_mgr_active {
/* AVDD_LDO*/
mux {
@@ -3200,6 +3331,77 @@
bias-pull-down; /* pull down */
};
};
+
+ ap2mdm {
+ ap2mdm_active: ap2mdm_active {
+ mux {
+ /* ap2mdm-status
+ * ap2mdm-errfatal
+ * ap2mdm-vddmin
+ */
+ pins = "gpio21", "gpio23";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21", "gpio23";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+ ap2mdm_sleep: ap2mdm_sleep {
+ mux {
+ /* ap2mdm-status
+ * ap2mdm-errfatal
+ * ap2mdm-vddmin
+ */
+ pins = "gpio21", "gpio23";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21", "gpio23";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ };
+ };
+
+ mdm2ap {
+ mdm2ap_active: mdm2ap_active {
+ mux {
+ /* mdm2ap-status
+ * mdm2ap-errfatal
+ * mdm2ap-vddmin
+ */
+ pins = "gpio22", "gpio20";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio22", "gpio20";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
+ mdm2ap_sleep: mdm2ap_sleep {
+ mux {
+ /* mdm2ap-status
+ * mdm2ap-errfatal
+ * mdm2ap-vddmin
+ */
+ pins = "gpio22", "gpio20";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio22", "gpio20";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 3ee0138..bd8ae70 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -82,7 +82,6 @@
qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
qcom,nq-esepwr = <&tlmm 116 0x00>;
interrupt-parent = <&tlmm>;
- qcom,clk-src = "BBCLK3";
interrupts = <63 0>;
interrupt-names = "nfc_irq";
pinctrl-names = "nfc_active", "nfc_suspend";
@@ -90,8 +89,6 @@
&nfc_enable_active
&nfc_clk_default>;
pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
- clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
- clock-names = "ref_clk";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index 1825cd0..a5c6ab5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,12 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
#include "sdm845-pmic-overlay.dtsi"
#include "sdm845-pinctrl-overlay.dtsi"
#include "smb1355.dtsi"
@@ -200,8 +206,8 @@
qcom,vdd-io-current-level = <200 22000>;
pinctrl-names = "active", "sleep";
- pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &storage_cd>;
- pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &storage_cd>;
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
cd-gpios = <&tlmm 126 GPIO_ACTIVE_HIGH>;
@@ -211,3 +217,30 @@
&wil6210 {
status = "ok";
};
+
+&qupv3_se5_i2c {
+ status = "ok";
+ synaptics_dsx@20 {
+ compatible = "synaptics,dsx-i2c";
+ reg = <0x20>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <122 0x2008>;
+ vdd-supply = <&pm8998_l14>;
+ avdd-supply = <&pm8998_l28>;
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend",
+ "pmx_ts_release";
+ pinctrl-0 = <&ts_int_active &ts_reset_active>;
+ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+ pinctrl-2 = <&ts_release>;
+ synaptics,pwr-reg-name = "avdd";
+ synaptics,bus-reg-name = "vdd";
+ synaptics,ub-i2c-addr = <0x2c>;
+ synaptics,irq-gpio = <&tlmm 122 0x2008>;
+ synaptics,reset-gpio = <&tlmm 99 0x0>;
+ synaptics,irq-on-state = <0>;
+ synaptics,power-delay-ms = <200>;
+ synaptics,reset-delay-ms = <200>;
+ synaptics,reset-on-state = <0>;
+ synaptics,reset-active-ms = <20>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 9d7c519..ec8665b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -559,7 +559,7 @@
regulator-min-microvolt = <2704000>;
regulator-max-microvolt = <2960000>;
qcom,init-voltage = <2704000>;
- qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_HPM>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index d2ee9eb..05d77d3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -156,6 +156,7 @@
qcom,cam_smmu {
compatible = "qcom,msm-cam-smmu";
status = "ok";
+ non-fatal-fault-disabled;
msm_cam_smmu_lrme {
compatible = "qcom,msm-cam-smmu-cb";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt-overlay.dts
similarity index 65%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt-overlay.dts
index 0a56c79..d26c975 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +10,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-v2-qvr-dvt.dtsi"
+#include "sdm845-qvr-audio-overlay.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
+ model = "Qualcomm Technologies, Inc. SDM845 V2 DVT QVR";
compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ qcom,msm-id = <321 0x20000>;
+ qcom,board-id = <0x02000B 0x20>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dts
similarity index 71%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dts
index 0a56c79..9110954 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,11 +14,10 @@
/dts-v1/;
#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-v2-qvr-dvt.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
+ model = "Qualcomm Technologies, Inc. SDM845 V2 DVT QVR";
compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ qcom,board-id = <0x02000B 0x20>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dtsi
similarity index 66%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dtsi
index 0a56c79..c629c53 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -9,16 +9,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-
-
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
#include "sdm845-qvr.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-camera-sensor-qvr.dtsi"
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt-overlay.dts
similarity index 70%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt-overlay.dts
index 0a56c79..5172098 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +10,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
+/plugin/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-v2-qvr-evt.dtsi"
+#include "sdm845-qvr-audio-overlay.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
+ model = "Qualcomm Technologies, Inc. SDM845 V2 EVT QVR";
compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
+ qcom,msm-id = <321 0x20000>;
qcom,board-id = <0x01000B 0x20>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dts
similarity index 76%
rename from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
rename to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dts
index 0a56c79..19b12e2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,11 +14,10 @@
/dts-v1/;
#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-v2-qvr-evt.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
+ model = "Qualcomm Technologies, Inc. SDM845 V2 EVT QVR";
compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
qcom,board-id = <0x01000B 0x20>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dtsi
similarity index 66%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dtsi
index 0a56c79..c629c53 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -9,16 +9,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-
-
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
#include "sdm845-qvr.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-camera-sensor-qvr.dtsi"
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index 947d28b..1551952 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -51,6 +51,7 @@
compatible = "qcom,memshare-peripheral";
qcom,peripheral-size = <0x500000>;
qcom,client-id = <1>;
+ qcom,allocate-boot-time;
label = "modem";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 213dfdb..7832165 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -2648,6 +2648,7 @@
qcom,ipa-wdi2;
qcom,use-64-bit-dma-mask;
qcom,arm-smmu;
+ qcom,smmu-fast-map;
qcom,bandwidth-vote-for-ipa;
qcom,msm-bus,name = "ipa";
qcom,msm-bus,num-cases = <5>;
@@ -2770,7 +2771,6 @@
ipa_smmu_ap: ipa_smmu_ap {
compatible = "qcom,ipa-smmu-ap-cb";
- qcom,smmu-s1-bypass;
iommus = <&apps_smmu 0x720 0x0>;
qcom,iova-mapping = <0x20000000 0x40000000>;
qcom,additional-mapping =
@@ -2780,7 +2780,6 @@
ipa_smmu_wlan: ipa_smmu_wlan {
compatible = "qcom,ipa-smmu-wlan-cb";
- qcom,smmu-s1-bypass;
iommus = <&apps_smmu 0x721 0x0>;
qcom,additional-mapping =
/* ipa-uc ram */
@@ -2789,7 +2788,6 @@
ipa_smmu_uc: ipa_smmu_uc {
compatible = "qcom,ipa-smmu-uc-cb";
- qcom,smmu-s1-bypass;
iommus = <&apps_smmu 0x722 0x0>;
qcom,iova-mapping = <0x40000000 0x20000000>;
};
@@ -3089,7 +3087,6 @@
vdd-3.3-ch0-supply = <&pm8998_l25>;
qcom,vdd-0.8-cx-mx-config = <800000 800000>;
qcom,vdd-3.3-ch0-config = <3104000 3312000>;
- qcom,smmu-s1-bypass;
};
qmi-tmd-devices {
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 9b242a0..880018c 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -52,6 +52,7 @@
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_MSM8937=y
CONFIG_ARCH_SDM450=y
CONFIG_ARCH_SDM632=y
CONFIG_SCHED_MC=y
@@ -231,6 +232,7 @@
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -294,6 +296,7 @@
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_SMD_PKT=y
+CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
@@ -334,6 +337,7 @@
CONFIG_REGULATOR_COOLING_DEVICE=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_CPR=y
CONFIG_REGULATOR_CPR4_APSS=y
CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_MEM_ACC=y
@@ -494,6 +498,7 @@
CONFIG_MSM_PM=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_SPDM_SCM=y
CONFIG_DEVFREQ_SPDM=y
CONFIG_PWM=y
@@ -505,6 +510,7 @@
CONFIG_MSM_TZ_LOG=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS_POSIX_ACL=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index f6e49cd..bc0265f 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -56,6 +56,7 @@
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_MSM8937=y
CONFIG_ARCH_SDM450=y
CONFIG_ARCH_SDM632=y
CONFIG_SCHED_MC=y
@@ -240,6 +241,7 @@
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -304,6 +306,7 @@
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_SMD_PKT=y
+CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
@@ -344,6 +347,7 @@
CONFIG_REGULATOR_COOLING_DEVICE=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_CPR=y
CONFIG_REGULATOR_CPR4_APSS=y
CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_MEM_ACC=y
@@ -513,6 +517,10 @@
CONFIG_QCOM_DCC=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_WCNSS_CORE=y
+CONFIG_WCNSS_CORE_PRONTO=y
+CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_SPDM_SCM=y
CONFIG_DEVFREQ_SPDM=y
CONFIG_PWM=y
@@ -528,6 +536,7 @@
CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -560,6 +569,7 @@
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_PANIC_ON_SCHED_BUG=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index 1904209..8aa1e7d 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -564,6 +564,8 @@
CONFIG_ARM_GIC_V3_ACL=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_NVMEM=y
+CONFIG_QCOM_QFPROM=y
CONFIG_SENSORS_SSC=y
CONFIG_MSM_TZ_LOG=y
CONFIG_EXT2_FS=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 670627d..667377f 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -582,6 +582,8 @@
CONFIG_PHY_XGENE=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_NVMEM=y
+CONFIG_QCOM_QFPROM=y
CONFIG_SENSORS_SSC=y
CONFIG_MSM_TZ_LOG=y
CONFIG_EXT2_FS=y
@@ -593,6 +595,7 @@
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -618,7 +621,6 @@
CONFIG_DEBUG_OBJECTS_FREE=y
CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_DEBUG_OBJECTS_WORK=y
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
CONFIG_SLUB_DEBUG_ON=y
CONFIG_DEBUG_KMEMLEAK=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 012b342..11c95ea 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -290,6 +290,11 @@
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
@@ -451,6 +456,12 @@
CONFIG_LEDS_TRIGGERS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_ESOC_MDM_DBG_ENG=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_GPI_DMA=y
CONFIG_UIO=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 7e5d05d..6aa09e5 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -294,6 +294,11 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
@@ -459,6 +464,13 @@
CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_DEBUG=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_ESOC_MDM_DBG_ENG=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_GPI_DMA=y
CONFIG_QCOM_GPI_DMA_DEBUG=y
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index b64410c..96c11e7 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -46,6 +46,8 @@
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern char* (*arch_read_hardware_id)(void);
+const char * __init arch_read_machine_name(void);
+
#define show_unhandled_signals_ratelimited() \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index a58fb92..f58539f 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -65,6 +65,7 @@
#include <asm/efi.h>
#include <asm/xen/hypervisor.h>
#include <asm/mmu_context.h>
+#include <asm/system_misc.h>
phys_addr_t __fdt_pointer __initdata;
@@ -186,6 +187,11 @@
pr_warn("Large number of MPIDR hash buckets detected\n");
}
+const char * __init __weak arch_read_machine_name(void)
+{
+ return of_flat_dt_get_machine_name();
+}
+
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
void *dt_virt = fixmap_remap_fdt(dt_phys);
@@ -201,7 +207,7 @@
cpu_relax();
}
- machine_name = of_flat_dt_get_machine_name();
+ machine_name = arch_read_machine_name();
if (machine_name) {
dump_stack_set_arch_desc("%s (DT)", machine_name);
pr_info("Machine: %s\n", machine_name);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2705e51..2b8950e 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -473,7 +473,7 @@
unsigned int esr,
struct pt_regs *regs)
{
-#define SCM_TLB_CONFLICT_CMD 0x1B
+#define SCM_TLB_CONFLICT_CMD 0x1F
struct scm_desc desc = {
.args[0] = addr,
.arginfo = SCM_ARGS(1),
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b08ccbb..8ba0af7 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -185,7 +185,8 @@
}
wb_congested = wb_congested_get_create(&q->backing_dev_info,
- blkcg->css.id, GFP_NOWAIT);
+ blkcg->css.id,
+ GFP_NOWAIT | __GFP_NOWARN);
if (!wb_congested) {
ret = -ENOMEM;
goto err_put_css;
@@ -193,7 +194,7 @@
/* allocate */
if (!new_blkg) {
- new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
+ new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto err_put_congested;
@@ -1022,7 +1023,7 @@
}
spin_lock_init(&blkcg->lock);
- INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
@@ -1240,7 +1241,7 @@
if (blkg->pd[pol->plid])
continue;
- pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
+ pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
if (!pd)
swap(pd, pd_prealloc);
if (!pd) {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4ac4910..6a90155 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3868,7 +3868,8 @@
goto out;
}
- cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
+ cfqq = kmem_cache_alloc_node(cfq_pool,
+ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
cfqd->queue->node);
if (!cfqq) {
cfqq = &cfqd->oom_cfqq;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 1ea2053..b0d0181 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -607,7 +607,7 @@
config MSM_ADSPRPC
tristate "QTI ADSP RPC driver"
- depends on MSM_GLINK
+ depends on MSM_GLINK || MSM_SMD
help
Provides a communication mechanism that allows for clients to
make remote method invocations across processor boundary to
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 177fb3d..de9f280 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -28,6 +28,8 @@
#include <soc/qcom/glink.h>
#include <soc/qcom/subsystem_notif.h>
#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/service-notifier.h>
+#include <soc/qcom/service-locator.h>
#include <linux/scatterlist.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
@@ -58,6 +60,9 @@
#define VMID_ADSP_Q6 6
#define DEBUGFS_SIZE 1024
+#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
+#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
+
#define RPC_TIMEOUT (5 * HZ)
#define BALIGN 128
#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
@@ -110,6 +115,9 @@
static int fastrpc_glink_open(int cid);
static void fastrpc_glink_close(void *chan, int cid);
+static int fastrpc_audio_pdr_notifier_cb(struct notifier_block *nb,
+ unsigned long code,
+ void *data);
static struct dentry *debugfs_root;
static struct dentry *debugfs_global_file;
@@ -224,6 +232,16 @@
int used;
};
+struct fastrpc_static_pd {
+ char *spdname;
+ struct notifier_block pdrnb;
+ struct notifier_block get_service_nb;
+ void *pdrhandle;
+ int pdrcount;
+ int prevpdrcount;
+ int ispdup;
+};
+
struct fastrpc_glink_info {
int link_state;
int port_state;
@@ -238,6 +256,7 @@
void *chan;
struct device *dev;
struct fastrpc_session_ctx session[NUM_SESSIONS];
+ struct fastrpc_static_pd spd[NUM_SESSIONS];
struct completion work;
struct completion workport;
struct notifier_block nb;
@@ -334,6 +353,7 @@
int cid;
int ssrcount;
int pd;
+ char *spdname;
int file_close;
struct fastrpc_apps *apps;
struct hlist_head perf;
@@ -342,6 +362,7 @@
struct pm_qos_request pm_qos_req;
int qos_request;
struct mutex map_mutex;
+ struct mutex fl_map_mutex;
};
static struct fastrpc_apps gfa;
@@ -352,6 +373,14 @@
.subsys = "adsp",
.link.link_info.edge = "lpass",
.link.link_info.transport = "smem",
+ .spd = {
+ {
+ .spdname =
+ AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
+ .pdrnb.notifier_call =
+ fastrpc_audio_pdr_notifier_cb,
+ }
+ },
},
{
.name = "mdsprpc-smd",
@@ -491,9 +520,7 @@
} else {
struct fastrpc_file *fl = map->fl;
- spin_lock(&fl->hlock);
hlist_add_head(&map->hn, &fl->maps);
- spin_unlock(&fl->hlock);
}
}
@@ -522,7 +549,6 @@
}
spin_unlock(&me->hlock);
} else {
- spin_lock(&fl->hlock);
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
if (va >= map->va &&
va + len <= map->va + map->len &&
@@ -533,7 +559,6 @@
break;
}
}
- spin_unlock(&fl->hlock);
}
if (match) {
*ppmap = match;
@@ -581,7 +606,6 @@
*ppmap = match;
return 0;
}
- spin_lock(&fl->hlock);
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
if (map->raddr == va &&
map->raddr + map->len == va + len &&
@@ -591,7 +615,6 @@
break;
}
}
- spin_unlock(&fl->hlock);
if (match) {
*ppmap = match;
return 0;
@@ -619,11 +642,9 @@
if (map->refs > 0)
return;
} else {
- spin_lock(&fl->hlock);
map->refs--;
if (!map->refs)
hlist_del_init(&map->hn);
- spin_unlock(&fl->hlock);
if (map->refs > 0 && !flags)
return;
}
@@ -786,13 +807,24 @@
goto bail;
}
map->phys = sg_dma_address(map->table->sgl);
+
if (sess->smmu.cb) {
map->phys += ((uint64_t)sess->smmu.cb << 32);
map->size = sg_dma_len(map->table->sgl);
} else {
map->size = buf_page_size(len);
}
+
vmid = fl->apps->channel[fl->cid].vmid;
+ if (!sess->smmu.enabled && !vmid) {
+ VERIFY(err, map->phys >= me->range.addr &&
+ map->phys + map->size <=
+ me->range.addr + me->range.size);
+ if (err) {
+ pr_err("adsprpc: mmap fail out of range\n");
+ goto bail;
+ }
+ }
if (vmid) {
int srcVM[1] = {VMID_HLOS};
int destVM[2] = {VMID_HLOS, vmid};
@@ -1084,8 +1116,11 @@
spin_lock(&ctx->fl->hlock);
hlist_del_init(&ctx->hn);
spin_unlock(&ctx->fl->hlock);
+ mutex_lock(&ctx->fl->fl_map_mutex);
for (i = 0; i < nbufs; ++i)
fastrpc_mmap_free(ctx->maps[i], 0);
+
+ mutex_unlock(&ctx->fl->fl_map_mutex);
fastrpc_buf_free(ctx->buf, 1);
ctx->magic = 0;
kfree(ctx);
@@ -1127,6 +1162,21 @@
spin_unlock(&me->hlock);
}
+
+static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
+{
+ struct fastrpc_file *fl;
+ struct hlist_node *n;
+
+ spin_lock(&me->hlock);
+ hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
+ if (fl->spdname && !strcmp(spdname, fl->spdname))
+ fastrpc_notify_users(fl);
+ }
+ spin_unlock(&me->hlock);
+
+}
+
static void context_list_ctor(struct fastrpc_ctx_lst *me)
{
INIT_HLIST_HEAD(&me->interrupted);
@@ -1215,21 +1265,27 @@
uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
size_t len = lpra[i].buf.len;
+ mutex_lock(&ctx->fl->fl_map_mutex);
if (ctx->fds[i] && (ctx->fds[i] != -1))
fastrpc_mmap_create(ctx->fl, ctx->fds[i],
ctx->attrs[i], buf, len,
mflags, &ctx->maps[i]);
+ mutex_unlock(&ctx->fl->fl_map_mutex);
ipage += 1;
}
PERF_END);
handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
+ mutex_lock(&ctx->fl->fl_map_mutex);
for (i = bufs; i < bufs + handles; i++) {
VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
- if (err)
+ if (err) {
+ mutex_unlock(&ctx->fl->fl_map_mutex);
goto bail;
+ }
ipage += 1;
}
+ mutex_unlock(&ctx->fl->fl_map_mutex);
metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
(sizeof(uint32_t) * M_CRCLIST);
@@ -1336,7 +1392,7 @@
/* copy non ion buffers */
PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
rlen = copylen - metalen;
- for (oix = 0; oix < inbufs + outbufs; ++oix) {
+ for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
struct fastrpc_mmap *map = ctx->maps[i];
size_t mlen;
@@ -1387,7 +1443,7 @@
if (map && (map->attr & FASTRPC_ATTR_COHERENT))
continue;
- if (rpra[i].buf.len && ctx->overps[oix]->mstart)
+ if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart)
dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
}
@@ -1438,10 +1494,13 @@
if (err)
goto bail;
} else {
+ mutex_lock(&ctx->fl->fl_map_mutex);
fastrpc_mmap_free(ctx->maps[i], 0);
+ mutex_unlock(&ctx->fl->fl_map_mutex);
ctx->maps[i] = NULL;
}
}
+ mutex_lock(&ctx->fl->fl_map_mutex);
if (inbufs + outbufs + handles) {
for (i = 0; i < M_FDLIST; i++) {
if (!fdlist[i])
@@ -1451,6 +1510,7 @@
fastrpc_mmap_free(mmap, 0);
}
}
+ mutex_unlock(&ctx->fl->fl_map_mutex);
if (ctx->crc && crclist && rpra)
K_COPY_TO_USER(err, kernel, ctx->crc,
crclist, M_CRCLIST*sizeof(uint32_t));
@@ -1700,7 +1760,28 @@
return err;
}
+static int fastrpc_get_adsp_session(char *name, int *session)
+{
+ struct fastrpc_apps *me = &gfa;
+ int err = 0, i;
+
+ for (i = 0; i < NUM_SESSIONS; i++) {
+ if (!me->channel[0].spd[i].spdname)
+ continue;
+ if (!strcmp(name, me->channel[0].spd[i].spdname))
+ break;
+ }
+ VERIFY(err, i < NUM_SESSIONS);
+ if (err)
+ goto bail;
+ *session = i;
+bail:
+ return err;
+}
+
+static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
static int fastrpc_channel_open(struct fastrpc_file *fl);
+static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
static int fastrpc_init_process(struct fastrpc_file *fl,
struct fastrpc_ioctl_init_attrs *uproc)
{
@@ -1755,8 +1836,10 @@
if (err)
goto bail;
if (init->filelen) {
+ mutex_lock(&fl->fl_map_mutex);
VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
init->file, init->filelen, mflags, &file));
+ mutex_unlock(&fl->fl_map_mutex);
if (err)
goto bail;
}
@@ -1765,8 +1848,10 @@
init->memlen));
if (err)
goto bail;
+ mutex_lock(&fl->fl_map_mutex);
VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
init->mem, init->memlen, mflags, &mem));
+ mutex_unlock(&fl->fl_map_mutex);
if (err)
goto bail;
inbuf.pageslen = 1;
@@ -1836,11 +1921,19 @@
inbuf.pgid = current->tgid;
inbuf.namelen = init->filelen;
inbuf.pageslen = 0;
+
+ if (!strcmp(proc_name, "audiopd")) {
+ fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
+ VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
+ }
+
if (!me->staticpd_flags) {
inbuf.pageslen = 1;
+ mutex_lock(&fl->fl_map_mutex);
VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
&mem));
+ mutex_unlock(&fl->fl_map_mutex);
if (err)
goto bail;
phys = mem->phys;
@@ -1897,10 +1990,15 @@
me->channel[fl->cid].rhvm.vmid,
me->channel[fl->cid].rhvm.vmcount,
hlosvm, hlosvmperm, 1);
+ mutex_lock(&fl->fl_map_mutex);
fastrpc_mmap_free(mem, 0);
+ mutex_unlock(&fl->fl_map_mutex);
}
- if (file)
+ if (file) {
+ mutex_lock(&fl->fl_map_mutex);
fastrpc_mmap_free(file, 0);
+ mutex_unlock(&fl->fl_map_mutex);
+ }
return err;
}
@@ -2140,6 +2238,33 @@
return err;
}
+static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
+{
+ struct fastrpc_apps *me = &gfa;
+ int session = 0, err = 0;
+
+ VERIFY(err, !fastrpc_get_adsp_session(
+ AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
+ if (err)
+ goto bail;
+ if (me->channel[fl->cid].spd[session].pdrcount !=
+ me->channel[fl->cid].spd[session].prevpdrcount) {
+ if (fastrpc_mmap_remove_ssr(fl))
+ pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
+ me->channel[fl->cid].spd[session].prevpdrcount =
+ me->channel[fl->cid].spd[session].pdrcount;
+ }
+ if (!me->channel[fl->cid].spd[session].ispdup) {
+ VERIFY(err, 0);
+ if (err) {
+ err = -ENOTCONN;
+ goto bail;
+ }
+ }
+bail:
+ return err;
+}
+
static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
size_t len, struct fastrpc_mmap **ppmap);
@@ -2152,16 +2277,23 @@
struct fastrpc_mmap *map = NULL;
mutex_lock(&fl->map_mutex);
+ mutex_lock(&fl->fl_map_mutex);
VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
+ mutex_unlock(&fl->fl_map_mutex);
if (err)
goto bail;
VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
if (err)
goto bail;
+ mutex_lock(&fl->fl_map_mutex);
fastrpc_mmap_free(map, 0);
+ mutex_unlock(&fl->fl_map_mutex);
bail:
- if (err && map)
+ if (err && map) {
+ mutex_lock(&fl->fl_map_mutex);
fastrpc_mmap_add(map);
+ mutex_unlock(&fl->fl_map_mutex);
+ }
mutex_unlock(&fl->map_mutex);
return err;
}
@@ -2174,16 +2306,18 @@
VERIFY(err, (fl && ud));
if (err)
goto bail;
-
+ mutex_lock(&fl->fl_map_mutex);
if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
pr_err("mapping not found to unamp %x va %llx %x\n",
ud->fd, (unsigned long long)ud->va,
(unsigned int)ud->len);
err = -1;
+ mutex_unlock(&fl->fl_map_mutex);
goto bail;
}
if (map)
- fastrpc_mmap_free(map, 0);
+ fastrpc_mmap_free(map, 0);
+ mutex_unlock(&fl->fl_map_mutex);
bail:
return err;
}
@@ -2197,14 +2331,17 @@
int err = 0;
mutex_lock(&fl->map_mutex);
+ mutex_lock(&fl->fl_map_mutex);
if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
- ud->size, ud->flags, 1, &map)){
+ ud->size, ud->flags, 1, &map)) {
+ mutex_unlock(&fl->fl_map_mutex);
mutex_unlock(&fl->map_mutex);
return 0;
}
VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
(uintptr_t)ud->vaddrin, ud->size,
ud->flags, &map));
+ mutex_unlock(&fl->fl_map_mutex);
if (err)
goto bail;
VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
@@ -2212,8 +2349,11 @@
goto bail;
ud->vaddrout = map->raddr;
bail:
- if (err && map)
+ if (err && map) {
+ mutex_lock(&fl->fl_map_mutex);
fastrpc_mmap_free(map, 0);
+ mutex_unlock(&fl->fl_map_mutex);
+ }
mutex_unlock(&fl->map_mutex);
return err;
}
@@ -2378,9 +2518,11 @@
spin_unlock(&fl->hlock);
fastrpc_context_list_dtor(fl);
fastrpc_buf_list_free(fl);
+ mutex_lock(&fl->fl_map_mutex);
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
fastrpc_mmap_free(map, 1);
}
+ mutex_unlock(&fl->fl_map_mutex);
if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
kref_put_mutex(&fl->apps->channel[cid].kref,
fastrpc_channel_close, &fl->apps->smd_mutex);
@@ -2403,6 +2545,7 @@
} while (fperf);
mutex_unlock(&fl->perf_mutex);
mutex_destroy(&fl->perf_mutex);
+ mutex_destroy(&fl->fl_map_mutex);
kfree(fl);
return 0;
}
@@ -2742,6 +2885,7 @@
fl->qos_request = 0;
filp->private_data = fl;
mutex_init(&fl->map_mutex);
+ mutex_init(&fl->fl_map_mutex);
spin_lock(&me->hlock);
hlist_add_head(&fl->hn, &me->drivers);
spin_unlock(&me->hlock);
@@ -3041,6 +3185,64 @@
return NOTIFY_DONE;
}
+static int fastrpc_audio_pdr_notifier_cb(struct notifier_block *pdrnb,
+ unsigned long code,
+ void *data)
+{
+ struct fastrpc_apps *me = &gfa;
+ struct fastrpc_static_pd *spd;
+ struct notif_data *notifdata = data;
+
+ spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
+ if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
+ mutex_lock(&me->smd_mutex);
+ spd->pdrcount++;
+ spd->ispdup = 0;
+ pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
+ MAJOR(me->dev_no), spd->spdname);
+ mutex_unlock(&me->smd_mutex);
+ if (!strcmp(spd->spdname,
+ AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
+ me->staticpd_flags = 0;
+ fastrpc_notify_pdr_drivers(me, spd->spdname);
+ } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
+ if (me->channel[0].remoteheap_ramdump_dev &&
+ notifdata->enable_ramdump) {
+ me->channel[0].ramdumpenabled = 1;
+ }
+ } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
+ spd->ispdup = 1;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int fastrpc_get_service_location_notify(struct notifier_block *nb,
+ unsigned long opcode, void *data)
+{
+ struct fastrpc_static_pd *spd;
+ struct pd_qmi_client_data *pdr = data;
+ int curr_state = 0;
+
+ spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
+ if (opcode == LOCATOR_DOWN) {
+ pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
+ return NOTIFY_DONE;
+ }
+
+ if (pdr->total_domains == 1) {
+ spd->pdrhandle = service_notif_register_notifier(
+ pdr->domain_list[0].name,
+ pdr->domain_list[0].instance_id,
+ &spd->pdrnb, &curr_state);
+ if (IS_ERR(spd->pdrhandle))
+ pr_err("ADSPRPC: Unable to register notifier\n");
+ } else
+ pr_err("ADSPRPC: Service returned invalid domains\n");
+
+ return NOTIFY_DONE;
+}
+
static const struct file_operations fops = {
.open = fastrpc_device_open,
.release = fastrpc_device_release,
@@ -3170,6 +3372,7 @@
struct platform_device *ion_pdev;
struct cma *cma;
uint32_t val;
+ int ret = 0;
if (of_device_is_compatible(dev->of_node,
@@ -3207,7 +3410,8 @@
break;
}
}
- if (range.addr) {
+ if (range.addr && !of_property_read_bool(dev->of_node,
+ "restrict-access")) {
int srcVM[1] = {VMID_HLOS};
int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
VMID_ADSP_Q6};
@@ -3221,10 +3425,31 @@
srcVM, 1, destVM, destVMperm, 4));
if (err)
goto bail;
+ me->range.addr = range.addr;
+ me->range.size = range.size;
}
return 0;
}
+ if (of_property_read_bool(dev->of_node,
+ "qcom,fastrpc-adsp-audio-pdr")) {
+ int session;
+ VERIFY(err, !fastrpc_get_adsp_session(
+ AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
+ if (err)
+ goto spdbail;
+ me->channel[0].spd[session].get_service_nb.notifier_call =
+ fastrpc_get_service_location_notify;
+ ret = get_service_location(
+ AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
+ AUDIO_PDR_ADSP_SERVICE_NAME,
+ &me->channel[0].spd[session].get_service_nb);
+ if (ret)
+ pr_err("ADSPRPC: Get service location failed: %d\n",
+ ret);
+ }
+spdbail:
+ err = 0;
VERIFY(err, !of_platform_populate(pdev->dev.of_node,
fastrpc_match_table,
NULL, &pdev->dev));
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index f510c14..223bc03 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -536,8 +536,7 @@
}
static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
int i;
int write_len = 0;
@@ -545,23 +544,30 @@
struct diag_msg_ssid_query_t rsp;
struct diag_ssid_range_t ssid_range;
struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &msg_mask : info->msg_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
!mask_info) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (!mask_info->ptr) {
pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
__func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
- if (!diag_apps_responds())
+ if (!diag_apps_responds()) {
+ mutex_unlock(&driver->md_session_lock);
return 0;
+ }
mutex_lock(&driver->msg_mask_lock);
rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE;
@@ -583,12 +589,12 @@
write_len += sizeof(ssid_range);
}
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&driver->md_session_lock);
return write_len;
}
static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
int i = 0;
int write_len = 0;
@@ -641,8 +647,7 @@
}
static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
int i;
int write_len = 0;
@@ -651,6 +656,10 @@
struct diag_build_mask_req_t *req = NULL;
struct diag_msg_build_mask_t rsp;
struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &msg_mask : info->msg_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -658,15 +667,19 @@
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (!mask_info->ptr) {
pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
__func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
- if (!diag_apps_responds())
+ if (!diag_apps_responds()) {
+ mutex_unlock(&driver->md_session_lock);
return 0;
+ }
mutex_lock(&driver->msg_mask_lock);
req = (struct diag_build_mask_req_t *)src_buf;
@@ -681,6 +694,7 @@
pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
__func__, mask->ptr);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
@@ -700,12 +714,12 @@
memcpy(dest_buf, &rsp, sizeof(rsp));
write_len += sizeof(rsp);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&driver->md_session_lock);
return write_len;
}
static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
uint32_t mask_size = 0, offset = 0;
uint32_t *temp = NULL;
@@ -716,6 +730,10 @@
struct diag_msg_build_mask_t rsp;
struct diag_mask_info *mask_info = NULL;
struct diag_msg_mask_t *mask_next = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &msg_mask : info->msg_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -723,11 +741,13 @@
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (!mask_info->ptr) {
pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
__func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
@@ -740,6 +760,7 @@
__func__, mask->ptr);
mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
@@ -782,6 +803,7 @@
mutex_unlock(&mask->lock);
mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
return -ENOMEM;
}
mask->ptr = temp;
@@ -802,6 +824,7 @@
}
mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(MSG_MASKS_TYPE);
@@ -842,8 +865,7 @@
}
static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
int i, write_len = 0, peripheral;
int header_len = sizeof(struct diag_msg_config_rsp_t);
@@ -851,6 +873,10 @@
struct diag_msg_config_rsp_t *req = NULL;
struct diag_msg_mask_t *mask = NULL;
struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &msg_mask : info->msg_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -858,11 +884,13 @@
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (!mask_info->ptr) {
pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
__func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
@@ -877,6 +905,7 @@
__func__, mask->ptr);
mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
@@ -889,7 +918,7 @@
}
mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
-
+ mutex_unlock(&driver->md_session_lock);
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(MSG_MASKS_TYPE);
@@ -923,8 +952,7 @@
}
static int diag_cmd_get_event_mask(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
int write_len = 0;
uint32_t mask_size;
@@ -959,26 +987,30 @@
}
static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
int i, write_len = 0, mask_len = 0, peripheral;
int header_len = sizeof(struct diag_event_mask_config_t);
struct diag_event_mask_config_t rsp;
struct diag_event_mask_config_t *req;
struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &event_mask : info->event_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
!mask_info) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (!mask_info->ptr) {
pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
__func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
req = (struct diag_event_mask_config_t *)src_buf;
@@ -986,6 +1018,7 @@
if (mask_len <= 0 || mask_len > event_mask.mask_len) {
pr_err("diag: In %s, invalid event mask len: %d\n", __func__,
mask_len);
+ mutex_unlock(&driver->md_session_lock);
return -EIO;
}
@@ -993,6 +1026,7 @@
memcpy(mask_info->ptr, src_buf + header_len, mask_len);
mask_info->status = DIAG_CTRL_MASK_VALID;
mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(EVENT_MASKS_TYPE);
@@ -1027,25 +1061,29 @@
}
static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
int write_len = 0, i, peripheral;
uint8_t toggle = 0;
struct diag_event_report_t header;
struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &event_mask : info->event_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
!mask_info) {
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (!mask_info->ptr) {
pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
__func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
@@ -1059,6 +1097,7 @@
memset(mask_info->ptr, 0, mask_info->mask_len);
}
mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(EVENT_MASKS_TYPE);
@@ -1088,8 +1127,7 @@
}
static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
int i;
int status = LOG_STATUS_INVALID;
@@ -1102,6 +1140,10 @@
struct diag_log_config_req_t *req;
struct diag_log_config_rsp_t rsp;
struct diag_mask_info *mask_info = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &log_mask : info->log_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -1109,16 +1151,20 @@
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (!mask_info->ptr) {
pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
__func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
- if (!diag_apps_responds())
+ if (!diag_apps_responds()) {
+ mutex_unlock(&driver->md_session_lock);
return 0;
+ }
req = (struct diag_log_config_req_t *)src_buf;
read_len += req_header_len;
@@ -1138,6 +1184,7 @@
if (!log_item->ptr) {
pr_err("diag: Invalid input in %s, mask: %pK\n",
__func__, log_item);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
@@ -1179,28 +1226,27 @@
rsp.status = status;
memcpy(dest_buf, &rsp, rsp_header_len);
+ mutex_unlock(&driver->md_session_lock);
return write_len;
}
static int diag_cmd_get_log_range(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
int i;
int write_len = 0;
struct diag_log_config_rsp_t rsp;
- struct diag_mask_info *mask_info = NULL;
struct diag_log_mask_t *mask = (struct diag_log_mask_t *)log_mask.ptr;
+ if (!mask)
+ return -EINVAL;
+
if (!diag_apps_responds())
return 0;
- mask_info = (!info) ? &log_mask : info->log_mask;
- if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
- !mask_info) {
- pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
- __func__, src_buf, src_len, dest_buf, dest_len,
- mask_info);
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+ __func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@@ -1223,7 +1269,7 @@
static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ int pid)
{
int i, peripheral, write_len = 0;
int status = LOG_STATUS_SUCCESS;
@@ -1236,6 +1282,10 @@
struct diag_log_mask_t *mask = NULL;
struct diag_mask_info *mask_info = NULL;
unsigned char *temp_buf = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &log_mask : info->log_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -1243,11 +1293,13 @@
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (!mask_info->ptr) {
pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
__func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
@@ -1257,6 +1309,7 @@
if (!mask->ptr) {
pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
__func__, mask->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (req->equip_id >= MAX_EQUIP_ID) {
@@ -1319,6 +1372,7 @@
break;
}
mutex_unlock(&mask_info->lock);
+ mutex_unlock(&driver->md_session_lock);
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(LOG_MASKS_TYPE);
@@ -1365,13 +1419,16 @@
}
static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info)
+ unsigned char *dest_buf, int dest_len, int pid)
{
struct diag_mask_info *mask_info = NULL;
struct diag_log_mask_t *mask = NULL;
struct diag_log_config_rsp_t header;
int write_len = 0, i, peripheral;
+ struct diag_md_session_t *info = NULL;
+
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
mask_info = (!info) ? &log_mask : info->log_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -1379,17 +1436,20 @@
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
__func__, src_buf, src_len, dest_buf, dest_len,
mask_info);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (!mask_info->ptr) {
pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
__func__, mask_info->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
mask = (struct diag_log_mask_t *)mask_info->ptr;
if (!mask->ptr) {
pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
__func__, mask->ptr);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
@@ -1398,6 +1458,7 @@
mutex_unlock(&mask->lock);
}
mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+ mutex_unlock(&driver->md_session_lock);
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(LOG_MASKS_TYPE);
@@ -2144,13 +2205,11 @@
}
}
-int diag_process_apps_masks(unsigned char *buf, int len,
- struct diag_md_session_t *info)
+int diag_process_apps_masks(unsigned char *buf, int len, int pid)
{
int size = 0, sub_cmd = 0;
int (*hdlr)(unsigned char *src_buf, int src_len,
- unsigned char *dest_buf, int dest_len,
- struct diag_md_session_t *info) = NULL;
+ unsigned char *dest_buf, int dest_len, int pid) = NULL;
if (!buf || len <= 0)
return -EINVAL;
@@ -2200,7 +2259,7 @@
if (hdlr)
size = hdlr(buf, len, driver->apps_rsp_buf,
- DIAG_MAX_RSP_SIZE, info);
+ DIAG_MAX_RSP_SIZE, pid);
return (size > 0) ? size : 0;
}
diff --git a/drivers/char/diag/diag_masks.h b/drivers/char/diag/diag_masks.h
index 1a52f94..6edeee9 100644
--- a/drivers/char/diag/diag_masks.h
+++ b/drivers/char/diag/diag_masks.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2015, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -167,8 +167,7 @@
void diag_log_mask_free(struct diag_mask_info *mask_info);
void diag_msg_mask_free(struct diag_mask_info *mask_info);
void diag_event_mask_free(struct diag_mask_info *mask_info);
-int diag_process_apps_masks(unsigned char *buf, int len,
- struct diag_md_session_t *info);
+int diag_process_apps_masks(unsigned char *buf, int len, int pid);
void diag_send_updates_peripheral(uint8_t peripheral);
extern int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 9cecb03..ce0c7bb 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -129,11 +129,10 @@
int diag_md_write(int id, unsigned char *buf, int len, int ctx)
{
- int i;
+ int i, peripheral, pid = 0;
uint8_t found = 0;
unsigned long flags;
struct diag_md_info *ch = NULL;
- int peripheral;
struct diag_md_session_t *session_info = NULL;
if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
@@ -146,10 +145,14 @@
if (peripheral < 0)
return -EINVAL;
- session_info =
- diag_md_session_get_peripheral(peripheral);
- if (!session_info)
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_peripheral(peripheral);
+ if (!session_info) {
+ mutex_unlock(&driver->md_session_lock);
return -EIO;
+ }
+ pid = session_info->pid;
+ mutex_unlock(&driver->md_session_lock);
ch = &diag_md[id];
if (!ch)
@@ -192,8 +195,7 @@
found = 0;
for (i = 0; i < driver->num_clients && !found; i++) {
- if ((driver->client_map[i].pid !=
- session_info->pid) ||
+ if ((driver->client_map[i].pid != pid) ||
(driver->client_map[i].pid == 0))
continue;
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index 1cf7f52..060f03f 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -221,7 +221,7 @@
if (!atomic_read(&ch->connected) &&
driver->usb_connected && diag_mask_param())
- diag_clear_masks(NULL);
+ diag_clear_masks(0);
if (ch && ch->ops && ch->ops->close)
ch->ops->close(ch->ctxt, DIAG_USB_MODE);
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 9de40b0..3cecebf 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -714,7 +714,7 @@
void diag_cmd_remove_reg_by_proc(int proc);
int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
int diag_mask_param(void);
-void diag_clear_masks(struct diag_md_session_t *info);
+void diag_clear_masks(int pid);
uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask);
int diag_query_pd(char *process_name);
int diag_search_peripheral_by_pd(uint8_t pd_val);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 0158549..694b483 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -167,7 +167,7 @@
void *diag_ipc_log;
#endif
-static void diag_md_session_close(struct diag_md_session_t *session_info);
+static void diag_md_session_close(int pid);
/*
* Returns the next delayed rsp id. If wrapping is enabled,
@@ -243,12 +243,13 @@
timer_in_progress = 0;
mutex_lock(&apps_data_mutex);
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_peripheral(APPS_DATA);
if (session_info)
hdlc_disabled = session_info->hdlc_disabled;
else
hdlc_disabled = driver->hdlc_disabled;
-
+ mutex_unlock(&driver->md_session_lock);
if (!hdlc_disabled)
diag_drain_apps_data(&hdlc_data);
else
@@ -422,7 +423,7 @@
{
return diag_mask_clear_param;
}
-void diag_clear_masks(struct diag_md_session_t *info)
+void diag_clear_masks(int pid)
{
int ret;
char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
@@ -431,14 +432,14 @@
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"diag: %s: masks clear request upon %s\n", __func__,
- ((info) ? "ODL exit" : "USB Disconnection"));
+ ((pid) ? "ODL exit" : "USB Disconnection"));
ret = diag_process_apps_masks(cmd_disable_log_mask,
- sizeof(cmd_disable_log_mask), info);
+ sizeof(cmd_disable_log_mask), pid);
ret = diag_process_apps_masks(cmd_disable_msg_mask,
- sizeof(cmd_disable_msg_mask), info);
+ sizeof(cmd_disable_msg_mask), pid);
ret = diag_process_apps_masks(cmd_disable_event_mask,
- sizeof(cmd_disable_event_mask), info);
+ sizeof(cmd_disable_event_mask), pid);
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"diag:%s: masks cleared successfully\n", __func__);
}
@@ -451,21 +452,23 @@
struct diag_md_session_t *session_info = NULL;
struct diag_logging_mode_param_t params;
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(pid);
- if (!session_info)
+ if (!session_info) {
+ mutex_unlock(&driver->md_session_lock);
return;
+ }
+ session_mask = session_info->peripheral_mask;
+ mutex_unlock(&driver->md_session_lock);
if (diag_mask_clear_param)
- diag_clear_masks(session_info);
+ diag_clear_masks(pid);
mutex_lock(&driver->diag_maskclear_mutex);
driver->mask_clear = 1;
mutex_unlock(&driver->diag_maskclear_mutex);
mutex_lock(&driver->diagchar_mutex);
- session_mask = session_info->peripheral_mask;
- diag_md_session_close(session_info);
-
p_mask =
diag_translate_kernel_to_user_mask(session_mask);
@@ -489,7 +492,9 @@
}
}
}
-
+ mutex_lock(&driver->md_session_lock);
+ diag_md_session_close(pid);
+ mutex_unlock(&driver->md_session_lock);
diag_switch_logging(¶ms);
mutex_unlock(&driver->diagchar_mutex);
}
@@ -1024,11 +1029,13 @@
if (driver->hdlc_encode_buf_len != 0)
return -EAGAIN;
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_peripheral(APPS_DATA);
if (session_info)
hdlc_disabled = session_info->hdlc_disabled;
else
hdlc_disabled = driver->hdlc_disabled;
+ mutex_unlock(&driver->md_session_lock);
if (hdlc_disabled) {
if (len < 4) {
pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
@@ -1386,15 +1393,16 @@
return err;
}
-static void diag_md_session_close(struct diag_md_session_t *session_info)
+static void diag_md_session_close(int pid)
{
int i;
uint8_t found = 0;
+ struct diag_md_session_t *session_info = NULL;
+ session_info = diag_md_session_get_pid(pid);
if (!session_info)
return;
- mutex_lock(&driver->md_session_lock);
for (i = 0; i < NUM_MD_SESSIONS; i++) {
if (driver->md_session_map[i] != session_info)
continue;
@@ -1420,13 +1428,14 @@
driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
kfree(session_info);
session_info = NULL;
- mutex_unlock(&driver->md_session_lock);
DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
}
struct diag_md_session_t *diag_md_session_get_pid(int pid)
{
int i;
+ if (pid <= 0)
+ return NULL;
for (i = 0; i < NUM_MD_SESSIONS; i++) {
if (driver->md_session_map[i] &&
driver->md_session_map[i]->pid == pid)
@@ -1442,10 +1451,12 @@
return driver->md_session_map[peripheral];
}
-static int diag_md_peripheral_switch(struct diag_md_session_t *session_info,
+static int diag_md_peripheral_switch(int pid,
int peripheral_mask, int req_mode) {
int i, bit = 0;
+ struct diag_md_session_t *session_info = NULL;
+ session_info = diag_md_session_get_pid(pid);
if (!session_info)
return -EINVAL;
if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
@@ -1455,25 +1466,20 @@
* check that md_session_map for i == session_info,
* if not then race condition occurred and bail
*/
- mutex_lock(&driver->md_session_lock);
for (i = 0; i < NUM_MD_SESSIONS; i++) {
bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
if (!bit)
continue;
if (req_mode == DIAG_USB_MODE) {
- if (driver->md_session_map[i] != session_info) {
- mutex_unlock(&driver->md_session_lock);
+ if (driver->md_session_map[i] != session_info)
return -EINVAL;
- }
driver->md_session_map[i] = NULL;
driver->md_session_mask &= ~bit;
session_info->peripheral_mask &= ~bit;
} else {
- if (driver->md_session_map[i] != NULL) {
- mutex_unlock(&driver->md_session_lock);
+ if (driver->md_session_map[i] != NULL)
return -EINVAL;
- }
driver->md_session_map[i] = session_info;
driver->md_session_mask |= bit;
session_info->peripheral_mask |= bit;
@@ -1482,7 +1488,6 @@
}
driver->md_session_mode = DIAG_MD_PERIPHERAL;
- mutex_unlock(&driver->md_session_lock);
DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
peripheral_mask, req_mode);
}
@@ -1491,7 +1496,7 @@
const struct diag_logging_mode_param_t *param,
uint8_t *change_mode)
{
- int i, bit = 0, err = 0;
+ int i, bit = 0, err = 0, peripheral_mask = 0;
int change_mask = 0;
struct diag_md_session_t *session_info = NULL;
@@ -1515,12 +1520,13 @@
if (req_mode == DIAG_USB_MODE) {
if (curr_mode == DIAG_USB_MODE)
return 0;
+ mutex_lock(&driver->md_session_lock);
if (driver->md_session_mode == DIAG_MD_NONE
&& driver->md_session_mask == 0 && driver->logging_mask) {
*change_mode = 1;
+ mutex_unlock(&driver->md_session_lock);
return 0;
}
-
/*
* curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
* Check if requested peripherals are already in usb mode
@@ -1532,8 +1538,10 @@
if (bit & driver->logging_mask)
change_mask |= bit;
}
- if (!change_mask)
+ if (!change_mask) {
+ mutex_unlock(&driver->md_session_lock);
return 0;
+ }
/*
* Change is needed. Check if this md_session has set all the
@@ -1542,29 +1550,29 @@
* If this session owns all the requested peripherals, then
* call function to switch the modes/masks for the md_session
*/
- mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
- mutex_unlock(&driver->md_session_lock);
-
if (!session_info) {
*change_mode = 1;
+ mutex_unlock(&driver->md_session_lock);
return 0;
}
- if ((change_mask & session_info->peripheral_mask)
+ peripheral_mask = session_info->peripheral_mask;
+ if ((change_mask & peripheral_mask)
!= change_mask) {
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"Another MD Session owns a requested peripheral\n");
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
*change_mode = 1;
/* If all peripherals are being set to USB Mode, call close */
- if (~change_mask & session_info->peripheral_mask) {
- err = diag_md_peripheral_switch(session_info,
+ if (~change_mask & peripheral_mask) {
+ err = diag_md_peripheral_switch(current->tgid,
change_mask, DIAG_USB_MODE);
} else
- diag_md_session_close(session_info);
-
+ diag_md_session_close(current->tgid);
+ mutex_unlock(&driver->md_session_lock);
return err;
} else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
@@ -1573,21 +1581,23 @@
* been set. Check that requested peripherals already set are
* owned by this md session
*/
- change_mask = driver->md_session_mask & param->peripheral_mask;
mutex_lock(&driver->md_session_lock);
+ change_mask = driver->md_session_mask & param->peripheral_mask;
session_info = diag_md_session_get_pid(current->tgid);
- mutex_unlock(&driver->md_session_lock);
if (session_info) {
if ((session_info->peripheral_mask & change_mask)
!= change_mask) {
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"Another MD Session owns a requested peripheral\n");
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
- err = diag_md_peripheral_switch(session_info,
+ err = diag_md_peripheral_switch(current->tgid,
change_mask, DIAG_USB_MODE);
+ mutex_unlock(&driver->md_session_lock);
} else {
+ mutex_unlock(&driver->md_session_lock);
if (change_mask) {
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"Another MD Session owns a requested peripheral\n");
@@ -2047,19 +2057,17 @@
{
uint8_t hdlc_support;
struct diag_md_session_t *session_info = NULL;
- mutex_lock(&driver->md_session_lock);
- session_info = diag_md_session_get_pid(current->tgid);
- mutex_unlock(&driver->md_session_lock);
if (copy_from_user(&hdlc_support, (void __user *)ioarg,
sizeof(uint8_t)))
return -EFAULT;
mutex_lock(&driver->hdlc_disable_mutex);
- if (session_info) {
- mutex_lock(&driver->md_session_lock);
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(current->tgid);
+ if (session_info)
session_info->hdlc_disabled = hdlc_support;
- mutex_unlock(&driver->md_session_lock);
- } else
+ else
driver->hdlc_disabled = hdlc_support;
+ mutex_unlock(&driver->md_session_lock);
mutex_unlock(&driver->hdlc_disable_mutex);
diag_update_md_clients(HDLC_SUPPORT_TYPE);
@@ -2885,7 +2893,6 @@
int remote_proc = 0;
const int mempool = POOL_TYPE_COPY;
unsigned char *user_space_data = NULL;
- struct diag_md_session_t *info = NULL;
if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
@@ -2936,13 +2943,11 @@
} else {
wait_event_interruptible(driver->wait_q,
(driver->in_busy_pktdata == 0));
- mutex_lock(&driver->md_session_lock);
- info = diag_md_session_get_pid(current->tgid);
- mutex_unlock(&driver->md_session_lock);
- ret = diag_process_apps_pkt(user_space_data, len, info);
+ ret = diag_process_apps_pkt(user_space_data, len,
+ current->tgid);
if (ret == 1)
diag_send_error_rsp((void *)(user_space_data), len,
- info);
+ current->tgid);
}
fail:
diagmem_free(driver, user_space_data, mempool);
@@ -3008,24 +3013,25 @@
if (!remote_proc) {
mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
- mutex_unlock(&driver->md_session_lock);
if (!session_info) {
pr_err("diag:In %s request came from invalid md session pid:%d",
__func__, current->tgid);
+ mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
if (session_info)
hdlc_disabled = session_info->hdlc_disabled;
else
hdlc_disabled = driver->hdlc_disabled;
+ mutex_unlock(&driver->md_session_lock);
if (!hdlc_disabled)
diag_process_hdlc_pkt((void *)
(driver->user_space_data_buf),
- len, session_info);
+ len, current->tgid);
else
diag_process_non_hdlc_pkt((char *)
(driver->user_space_data_buf),
- len, session_info);
+ len, current->tgid);
return 0;
}
@@ -3102,11 +3108,13 @@
mutex_lock(&apps_data_mutex);
mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_peripheral(APPS_DATA);
if (session_info)
hdlc_disabled = session_info->hdlc_disabled;
else
hdlc_disabled = driver->hdlc_disabled;
+ mutex_unlock(&driver->md_session_lock);
if (hdlc_disabled)
ret = diag_process_apps_data_non_hdlc(user_space_data, len,
pkt_type);
@@ -3177,9 +3185,9 @@
ret += sizeof(int);
mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
- mutex_unlock(&driver->md_session_lock);
exit_stat = diag_md_copy_to_user(buf, &ret, count,
session_info);
+ mutex_unlock(&driver->md_session_lock);
goto exit;
} else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
/* In case, the thread wakes up and the logging mode is not
@@ -3199,14 +3207,16 @@
mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
- mutex_unlock(&driver->md_session_lock);
if (session_info) {
COPY_USER_SPACE_OR_ERR(buf+4,
session_info->hdlc_disabled,
sizeof(uint8_t));
- if (ret == -EFAULT)
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
goto exit;
+ }
}
+ mutex_unlock(&driver->md_session_lock);
goto exit;
}
@@ -3226,12 +3236,16 @@
if (driver->data_ready[index] & MSG_MASKS_TYPE) {
/*Copy the type of data being passed*/
data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_peripheral(APPS_DATA);
COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
- if (ret == -EFAULT)
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
goto exit;
+ }
write_len = diag_copy_to_user_msg_mask(buf + ret, count,
session_info);
+ mutex_unlock(&driver->md_session_lock);
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= MSG_MASKS_TYPE;
@@ -3242,25 +3256,32 @@
if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
/*Copy the type of data being passed*/
data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_peripheral(APPS_DATA);
COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
- if (ret == -EFAULT)
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
goto exit;
-
+ }
if (session_info && session_info->event_mask &&
session_info->event_mask->ptr) {
COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
*(session_info->event_mask->ptr),
session_info->event_mask->mask_len);
- if (ret == -EFAULT)
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
goto exit;
+ }
} else {
COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
*(event_mask.ptr),
event_mask.mask_len);
- if (ret == -EFAULT)
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
goto exit;
+ }
}
+ mutex_unlock(&driver->md_session_lock);
driver->data_ready[index] ^= EVENT_MASKS_TYPE;
atomic_dec(&driver->data_ready_notif[index]);
goto exit;
@@ -3269,13 +3290,17 @@
if (driver->data_ready[index] & LOG_MASKS_TYPE) {
/*Copy the type of data being passed*/
data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_peripheral(APPS_DATA);
COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
- if (ret == -EFAULT)
+ if (ret == -EFAULT) {
+ mutex_unlock(&driver->md_session_lock);
goto exit;
+ }
write_len = diag_copy_to_user_log_mask(buf + ret, count,
session_info);
+ mutex_unlock(&driver->md_session_lock);
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= LOG_MASKS_TYPE;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 83f44ce..33048e1 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -242,7 +242,7 @@
}
static void pack_rsp_and_send(unsigned char *buf, int len,
- struct diag_md_session_t *info)
+ int pid)
{
int err;
int retry_count = 0, i, rsp_ctxt;
@@ -250,6 +250,7 @@
unsigned long flags;
unsigned char *rsp_ptr = driver->encoded_rsp_buf;
struct diag_pkt_frame_t header;
+ struct diag_md_session_t *session_info = NULL, *info = NULL;
if (!rsp_ptr || !buf)
return;
@@ -260,6 +261,11 @@
return;
}
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(pid);
+ info = (session_info) ? session_info :
+ diag_md_session_get_peripheral(APPS_DATA);
+
/*
* Explicitly check for the Peripheral Modem here
* is necessary till a way to identify a peripheral
@@ -279,6 +285,7 @@
}
} else
rsp_ctxt = driver->rsp_buf_ctxt;
+ mutex_unlock(&driver->md_session_lock);
/*
* Keep trying till we get the buffer back. It should probably
@@ -302,8 +309,11 @@
* draining responses when we are in Memory Device Mode.
*/
if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
- driver->logging_mode == DIAG_MULTI_MODE)
+ driver->logging_mode == DIAG_MULTI_MODE) {
+ mutex_lock(&driver->md_session_lock);
chk_logging_wakeup();
+ mutex_unlock(&driver->md_session_lock);
+ }
}
if (driver->rsp_buf_busy) {
pr_err("diag: unable to get hold of response buffer\n");
@@ -332,13 +342,14 @@
}
static void encode_rsp_and_send(unsigned char *buf, int len,
- struct diag_md_session_t *info)
+ int pid)
{
struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
unsigned char *rsp_ptr = driver->encoded_rsp_buf;
int err, i, rsp_ctxt, retry_count = 0;
unsigned long flags;
+ struct diag_md_session_t *session_info = NULL, *info = NULL;
if (!rsp_ptr || !buf)
return;
@@ -349,6 +360,11 @@
return;
}
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(pid);
+ info = (session_info) ? session_info :
+ diag_md_session_get_peripheral(APPS_DATA);
+
/*
* Explicitly check for the Peripheral Modem here
* is necessary till a way to identify a peripheral
@@ -368,7 +384,7 @@
}
} else
rsp_ctxt = driver->rsp_buf_ctxt;
-
+ mutex_unlock(&driver->md_session_lock);
/*
* Keep trying till we get the buffer back. It should probably
* take one or two iterations. When this loops till UINT_MAX, it
@@ -391,8 +407,11 @@
* draining responses when we are in Memory Device Mode.
*/
if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
- driver->logging_mode == DIAG_MULTI_MODE)
+ driver->logging_mode == DIAG_MULTI_MODE) {
+ mutex_lock(&driver->md_session_lock);
chk_logging_wakeup();
+ mutex_unlock(&driver->md_session_lock);
+ }
}
if (driver->rsp_buf_busy) {
@@ -424,22 +443,23 @@
}
static void diag_send_rsp(unsigned char *buf, int len,
- struct diag_md_session_t *info)
+ int pid)
{
- struct diag_md_session_t *session_info = NULL;
+ struct diag_md_session_t *session_info = NULL, *info = NULL;
uint8_t hdlc_disabled;
-
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
session_info = (info) ? info :
diag_md_session_get_peripheral(APPS_DATA);
if (session_info)
hdlc_disabled = session_info->hdlc_disabled;
else
hdlc_disabled = driver->hdlc_disabled;
-
+ mutex_unlock(&driver->md_session_lock);
if (hdlc_disabled)
- pack_rsp_and_send(buf, len, session_info);
+ pack_rsp_and_send(buf, len, pid);
else
- encode_rsp_and_send(buf, len, session_info);
+ encode_rsp_and_send(buf, len, pid);
}
void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
@@ -506,13 +526,14 @@
int i, j;
mutex_lock(&driver->diagchar_mutex);
+ mutex_lock(&driver->md_session_lock);
for (i = 0; i < NUM_MD_SESSIONS; i++) {
if (driver->md_session_map[i] != NULL)
for (j = 0; j < driver->num_clients; j++) {
if (driver->client_map[j].pid != 0 &&
driver->client_map[j].pid ==
driver->md_session_map[i]->pid) {
- if (!(driver->data_ready[i] & type)) {
+ if (!(driver->data_ready[j] & type)) {
driver->data_ready[j] |= type;
atomic_inc(
&driver->data_ready_notif[j]);
@@ -521,6 +542,7 @@
}
}
}
+ mutex_unlock(&driver->md_session_lock);
wake_up_interruptible(&driver->wait_q);
mutex_unlock(&driver->diagchar_mutex);
}
@@ -594,14 +616,15 @@
* Check if command is valid. If the command is asking for
* status, then the processor mask field is to be ignored.
*/
- if ((version != 2) || (cmd > STATUS_STM) ||
- ((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) {
+ if ((version != 2) || (cmd > STM_AUTO_QUERY) ||
+ ((cmd != STATUS_STM && cmd != STM_AUTO_QUERY) &&
+ ((mask == 0) || (0 != (mask >> 4))))) {
/* Command is invalid. Send bad param message response */
dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
for (i = 0; i < STM_CMD_NUM_BYTES; i++)
dest_buf[i+1] = *(buf + i);
return STM_CMD_NUM_BYTES+1;
- } else if (cmd != STATUS_STM) {
+ } else if (cmd != STATUS_STM && cmd != STM_AUTO_QUERY) {
if (mask & DIAG_STM_MODEM)
diag_process_stm_mask(cmd, DIAG_STM_MODEM,
PERIPHERAL_MODEM);
@@ -981,7 +1004,7 @@
}
void diag_send_error_rsp(unsigned char *buf, int len,
- struct diag_md_session_t *info)
+ int pid)
{
/* -1 to accommodate the first byte 0x13 */
if (len > (DIAG_MAX_RSP_SIZE - 1)) {
@@ -991,13 +1014,12 @@
*(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
- diag_send_rsp(driver->apps_rsp_buf, len + 1, info);
+ diag_send_rsp(driver->apps_rsp_buf, len + 1, pid);
}
-int diag_process_apps_pkt(unsigned char *buf, int len,
- struct diag_md_session_t *info)
+int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
{
- int i;
+ int i, p_mask = 0;
int mask_ret;
int write_len = 0;
unsigned char *temp = NULL;
@@ -1006,14 +1028,15 @@
struct diag_cmd_reg_t *reg_item = NULL;
struct diagfwd_info *fwd_info = NULL;
uint32_t pd_mask = 0;
+ struct diag_md_session_t *info = NULL;
if (!buf)
return -EIO;
/* Check if the command is a supported mask command */
- mask_ret = diag_process_apps_masks(buf, len, info);
+ mask_ret = diag_process_apps_masks(buf, len, pid);
if (mask_ret > 0) {
- diag_send_rsp(driver->apps_rsp_buf, mask_ret, info);
+ diag_send_rsp(driver->apps_rsp_buf, mask_ret, pid);
return 0;
}
@@ -1035,7 +1058,7 @@
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len, info);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
return 0;
}
@@ -1044,18 +1067,22 @@
if (temp_entry) {
reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
entry);
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
if (info) {
+ p_mask = info->peripheral_mask;
+ mutex_unlock(&driver->md_session_lock);
MD_PERIPHERAL_PD_MASK(TYPE_CMD, reg_item->proc,
pd_mask);
if ((MD_PERIPHERAL_MASK(reg_item->proc) &
- info->peripheral_mask) ||
- (pd_mask & info->peripheral_mask))
+ p_mask) || (pd_mask & p_mask))
write_len = diag_send_data(reg_item, buf, len);
} else {
+ mutex_unlock(&driver->md_session_lock);
if (MD_PERIPHERAL_MASK(reg_item->proc) &
driver->logging_mask) {
mutex_unlock(&driver->cmd_reg_mutex);
- diag_send_error_rsp(buf, len, info);
+ diag_send_error_rsp(buf, len, pid);
return write_len;
}
else
@@ -1073,13 +1100,13 @@
for (i = 0; i < 4; i++)
*(driver->apps_rsp_buf+i) = *(buf+i);
*(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
- diag_send_rsp(driver->apps_rsp_buf, 8, info);
+ diag_send_rsp(driver->apps_rsp_buf, 8, pid);
return 0;
} else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
(*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
if (len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, len, info);
+ diag_send_rsp(driver->apps_rsp_buf, len, pid);
return 0;
}
return len;
@@ -1092,7 +1119,7 @@
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len, info);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
return 0;
}
/* Check for time sync switch command */
@@ -1103,7 +1130,7 @@
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len, info);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
return 0;
}
/* Check for diag id command */
@@ -1114,14 +1141,14 @@
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len, info);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
return 0;
}
/* Check for download command */
else if ((chk_apps_master()) && (*buf == 0x3A)) {
/* send response back */
driver->apps_rsp_buf[0] = *buf;
- diag_send_rsp(driver->apps_rsp_buf, 1, info);
+ diag_send_rsp(driver->apps_rsp_buf, 1, pid);
msleep(5000);
/* call download API */
msm_set_restart_mode(RESTART_DLOAD);
@@ -1141,7 +1168,7 @@
for (i = 0; i < 13; i++)
driver->apps_rsp_buf[i+3] = 0;
- diag_send_rsp(driver->apps_rsp_buf, 16, info);
+ diag_send_rsp(driver->apps_rsp_buf, 16, pid);
return 0;
}
}
@@ -1150,7 +1177,7 @@
(*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
memcpy(driver->apps_rsp_buf, buf, 4);
driver->apps_rsp_buf[4] = wrap_enabled;
- diag_send_rsp(driver->apps_rsp_buf, 5, info);
+ diag_send_rsp(driver->apps_rsp_buf, 5, pid);
return 0;
}
/* Wrap the Delayed Rsp ID */
@@ -1159,7 +1186,7 @@
wrap_enabled = true;
memcpy(driver->apps_rsp_buf, buf, 4);
driver->apps_rsp_buf[4] = wrap_count;
- diag_send_rsp(driver->apps_rsp_buf, 6, info);
+ diag_send_rsp(driver->apps_rsp_buf, 6, pid);
return 0;
}
/* Mobile ID Rsp */
@@ -1170,7 +1197,7 @@
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, write_len, info);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
return 0;
}
}
@@ -1190,7 +1217,7 @@
for (i = 0; i < 55; i++)
driver->apps_rsp_buf[i] = 0;
- diag_send_rsp(driver->apps_rsp_buf, 55, info);
+ diag_send_rsp(driver->apps_rsp_buf, 55, pid);
return 0;
}
/* respond to 0x7c command */
@@ -1203,14 +1230,14 @@
chk_config_get_id();
*(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
*(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
- diag_send_rsp(driver->apps_rsp_buf, 14, info);
+ diag_send_rsp(driver->apps_rsp_buf, 14, pid);
return 0;
}
}
write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, write_len, info);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
return 0;
}
write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
@@ -1222,7 +1249,7 @@
* before disabling HDLC encoding on Apps processor.
*/
mutex_lock(&driver->hdlc_disable_mutex);
- diag_send_rsp(driver->apps_rsp_buf, write_len, info);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
/*
* Set the value of hdlc_disabled after sending the response to
* the tools. This is required since the tools is expecting a
@@ -1230,10 +1257,13 @@
*/
pr_debug("diag: In %s, disabling HDLC encoding\n",
__func__);
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
if (info)
info->hdlc_disabled = 1;
else
driver->hdlc_disabled = 1;
+ mutex_unlock(&driver->md_session_lock);
diag_update_md_clients(HDLC_SUPPORT_TYPE);
mutex_unlock(&driver->hdlc_disable_mutex);
return 0;
@@ -1242,13 +1272,12 @@
/* We have now come to the end of the function. */
if (chk_apps_only())
- diag_send_error_rsp(buf, len, info);
+ diag_send_error_rsp(buf, len, pid);
return 0;
}
-void diag_process_hdlc_pkt(void *data, unsigned int len,
- struct diag_md_session_t *info)
+void diag_process_hdlc_pkt(void *data, unsigned int len, int pid)
{
int err = 0;
int ret = 0;
@@ -1308,7 +1337,7 @@
}
err = diag_process_apps_pkt(driver->hdlc_buf,
- driver->hdlc_buf_len, info);
+ driver->hdlc_buf_len, pid);
if (err < 0)
goto fail;
} else {
@@ -1325,7 +1354,7 @@
* recovery algorithm. Send an error response if the
* packet is not in expected format.
*/
- diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, info);
+ diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, pid);
driver->hdlc_buf_len = 0;
end:
mutex_unlock(&driver->diag_hdlc_mutex);
@@ -1422,9 +1451,11 @@
static uint8_t hdlc_reset;
-static void hdlc_reset_timer_start(struct diag_md_session_t *info)
+static void hdlc_reset_timer_start(int pid)
{
+ struct diag_md_session_t *info = NULL;
mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
if (!hdlc_timer_in_progress) {
hdlc_timer_in_progress = 1;
if (info)
@@ -1466,15 +1497,16 @@
}
static void diag_hdlc_start_recovery(unsigned char *buf, int len,
- struct diag_md_session_t *info)
+ int pid)
{
int i;
static uint32_t bad_byte_counter;
unsigned char *start_ptr = NULL;
struct diag_pkt_frame_t *actual_pkt = NULL;
+ struct diag_md_session_t *info = NULL;
hdlc_reset = 1;
- hdlc_reset_timer_start(info);
+ hdlc_reset_timer_start(pid);
actual_pkt = (struct diag_pkt_frame_t *)buf;
for (i = 0; i < len; i++) {
@@ -1494,10 +1526,13 @@
pr_err("diag: In %s, re-enabling HDLC encoding\n",
__func__);
mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&driver->md_session_lock);
+ info = diag_md_session_get_pid(pid);
if (info)
info->hdlc_disabled = 0;
else
driver->hdlc_disabled = 0;
+ mutex_unlock(&driver->md_session_lock);
mutex_unlock(&driver->hdlc_disable_mutex);
diag_update_md_clients(HDLC_SUPPORT_TYPE);
@@ -1510,12 +1545,11 @@
mutex_lock(&driver->hdlc_recovery_mutex);
driver->incoming_pkt.processing = 0;
mutex_unlock(&driver->hdlc_recovery_mutex);
- diag_process_non_hdlc_pkt(start_ptr, len - i, info);
+ diag_process_non_hdlc_pkt(start_ptr, len - i, pid);
}
}
-void diag_process_non_hdlc_pkt(unsigned char *buf, int len,
- struct diag_md_session_t *info)
+void diag_process_non_hdlc_pkt(unsigned char *buf, int len, int pid)
{
int err = 0;
uint16_t pkt_len = 0;
@@ -1571,11 +1605,11 @@
if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
CONTROL_CHAR) {
mutex_unlock(&driver->hdlc_recovery_mutex);
- diag_hdlc_start_recovery(buf, len, info);
+ diag_hdlc_start_recovery(buf, len, pid);
mutex_lock(&driver->hdlc_recovery_mutex);
}
err = diag_process_apps_pkt(data_ptr,
- actual_pkt->length, info);
+ actual_pkt->length, pid);
if (err) {
pr_err("diag: In %s, unable to process incoming data packet, err: %d\n",
__func__, err);
@@ -1597,8 +1631,8 @@
pkt_len = actual_pkt->length;
if (actual_pkt->start != CONTROL_CHAR) {
- diag_hdlc_start_recovery(buf, len, info);
- diag_send_error_rsp(buf, len, info);
+ diag_hdlc_start_recovery(buf, len, pid);
+ diag_send_error_rsp(buf, len, pid);
goto end;
}
mutex_lock(&driver->hdlc_recovery_mutex);
@@ -1606,7 +1640,7 @@
pr_err("diag: In %s, incoming data is too large for the request buffer %d\n",
__func__, pkt_len);
mutex_unlock(&driver->hdlc_recovery_mutex);
- diag_hdlc_start_recovery(buf, len, info);
+ diag_hdlc_start_recovery(buf, len, pid);
break;
}
if ((pkt_len + header_len) > (len - read_bytes)) {
@@ -1623,13 +1657,13 @@
if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
CONTROL_CHAR) {
mutex_unlock(&driver->hdlc_recovery_mutex);
- diag_hdlc_start_recovery(buf, len, info);
+ diag_hdlc_start_recovery(buf, len, pid);
mutex_lock(&driver->hdlc_recovery_mutex);
}
else
hdlc_reset = 0;
err = diag_process_apps_pkt(data_ptr,
- actual_pkt->length, info);
+ actual_pkt->length, pid);
if (err) {
mutex_unlock(&driver->hdlc_recovery_mutex);
break;
@@ -1648,9 +1682,9 @@
return -EINVAL;
if (!driver->hdlc_disabled)
- diag_process_hdlc_pkt(buf, len, NULL);
+ diag_process_hdlc_pkt(buf, len, 0);
else
- diag_process_non_hdlc_pkt(buf, len, NULL);
+ diag_process_non_hdlc_pkt(buf, len, 0);
diag_mux_queue_read(ctxt);
return 0;
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
index 0e0bf2d..687aeb7 100644
--- a/drivers/char/diag/diagfwd.h
+++ b/drivers/char/diag/diagfwd.h
@@ -30,10 +30,8 @@
int diagfwd_init(void);
void diagfwd_exit(void);
-void diag_process_hdlc_pkt(void *data, unsigned int len,
- struct diag_md_session_t *info);
-void diag_process_non_hdlc_pkt(unsigned char *data, int len,
- struct diag_md_session_t *info);
+void diag_process_hdlc_pkt(void *data, unsigned int len, int pid);
+void diag_process_non_hdlc_pkt(unsigned char *data, int len, int pid);
int chk_config_get_id(void);
int chk_apps_only(void);
int chk_apps_master(void);
@@ -45,10 +43,8 @@
int diag_check_common_cmd(struct diag_pkt_header_t *header);
void diag_update_userspace_clients(unsigned int type);
void diag_update_sleeping_process(int process_id, int data_type);
-int diag_process_apps_pkt(unsigned char *buf, int len,
- struct diag_md_session_t *info);
-void diag_send_error_rsp(unsigned char *buf, int len,
- struct diag_md_session_t *info);
+int diag_process_apps_pkt(unsigned char *buf, int len, int pid);
+void diag_send_error_rsp(unsigned char *buf, int len, int pid);
void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type);
int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
void diag_md_hdlc_reset_timer_func(unsigned long pid);
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 848ad87..4a0ee11 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -77,7 +77,7 @@
#define DISABLE_STM 0
#define ENABLE_STM 1
#define STATUS_STM 2
-
+#define STM_AUTO_QUERY 3
#define UPDATE_PERIPHERAL_STM_STATE 1
#define CLEAR_PERIPHERAL_STM_STATE 2
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index f27f358..f8c3fde 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -552,6 +552,8 @@
struct mhi_result *result = NULL;
struct diag_mhi_ch_t *ch = NULL;
void *buf = NULL;
+ struct diag_mhi_info *mhi_info = NULL;
+ unsigned long flags;
if (!cb_info)
return;
@@ -603,13 +605,6 @@
queue_work(diag_mhi[index].mhi_wq,
&(diag_mhi[index].open_work));
break;
- case MHI_CB_MHI_DISABLED:
- DIAG_LOG(DIAG_DEBUG_BRIDGE,
- "received mhi disabled notifiation port: %d ch: %d\n",
- index, ch->type);
- atomic_set(&(ch->opened), 0);
- __mhi_close(&diag_mhi[index], CHANNELS_CLOSED);
- break;
case MHI_CB_XFER:
/*
* If the channel is a read channel, this is a read
@@ -636,6 +631,24 @@
result->bytes_xferd,
diag_mhi[index].id);
break;
+ case MHI_CB_MHI_DISABLED:
+ case MHI_CB_SYS_ERROR:
+ case MHI_CB_MHI_SHUTDOWN:
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "received mhi link down cb: %d port: %d ch: %d\n",
+ cb_info->cb_reason, index, ch->type);
+ mhi_info = &diag_mhi[index];
+ if (!mhi_info->enabled)
+ return;
+ spin_lock_irqsave(&mhi_info->lock, flags);
+ mhi_info->enabled = 0;
+ spin_unlock_irqrestore(&mhi_info->lock, flags);
+ atomic_set(&(mhi_info->read_ch.opened), 0);
+ atomic_set(&(mhi_info->write_ch.opened), 0);
+ flush_workqueue(mhi_info->mhi_wq);
+ mhi_buf_tbl_clear(mhi_info);
+ diag_remote_dev_close(mhi_info->dev_id);
+ break;
default:
pr_err("diag: In %s, invalid cb reason 0x%x\n", __func__,
cb_info->cb_reason);
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index c7bd2205..c4e6107 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -343,14 +343,13 @@
diag_ws_release();
return;
}
-
- session_info =
- diag_md_session_get_peripheral(peripheral);
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_peripheral(peripheral);
if (session_info)
hdlc_disabled = session_info->hdlc_disabled;
else
hdlc_disabled = driver->hdlc_disabled;
-
+ mutex_unlock(&driver->md_session_lock);
if (hdlc_disabled) {
/* The data is raw and and on APPS side HDLC is disabled */
if (!buf) {
@@ -633,12 +632,13 @@
mutex_lock(&driver->hdlc_disable_mutex);
mutex_lock(&fwd_info->data_mutex);
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_peripheral(fwd_info->peripheral);
if (session_info)
hdlc_disabled = session_info->hdlc_disabled;
else
hdlc_disabled = driver->hdlc_disabled;
-
+ mutex_unlock(&driver->md_session_lock);
if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
temp_buf = fwd_info->buf_1;
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index af8bf00..f3c587d 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -513,8 +513,10 @@
info->hdl->sk->sk_user_data = NULL;
info->hdl->sk->sk_data_ready = NULL;
write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+ mutex_lock(&info->socket_info_mutex);
sock_release(info->hdl);
info->hdl = NULL;
+ mutex_unlock(&info->socket_info_mutex);
wake_up_interruptible(&info->read_wait_q);
}
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
@@ -820,6 +822,8 @@
break;
}
+ if (info->port_type == PORT_TYPE_CLIENT)
+ mutex_init(&info->socket_info_mutex);
info->svc_id = DIAG_SVC_ID;
info->ins_id = ins_base + ins_offset;
info->inited = 1;
@@ -1031,6 +1035,8 @@
diagfwd_deregister(info->peripheral, info->type, (void *)info);
info->fwd_ctxt = NULL;
info->hdl = NULL;
+ if (info->port_type == PORT_TYPE_CLIENT)
+ mutex_destroy(&info->socket_info_mutex);
if (info->wq)
destroy_workqueue(info->wq);
@@ -1119,13 +1125,28 @@
read_msg.msg_name = &src_addr;
read_msg.msg_namelen = sizeof(src_addr);
+ if (info->port_type != PORT_TYPE_SERVER) {
+ mutex_lock(&info->socket_info_mutex);
+ if (!info->hdl) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s closing read thread\n",
+ info->name);
+ mutex_unlock(&info->socket_info_mutex);
+ goto fail;
+ }
+ }
pkt_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1, 0,
MSG_PEEK);
- if (pkt_len <= 0)
+ if (pkt_len <= 0) {
+ if (info->port_type != PORT_TYPE_SERVER)
+ mutex_unlock(&info->socket_info_mutex);
break;
+ }
if (pkt_len > bytes_remaining) {
buf_full = 1;
+ if (info->port_type != PORT_TYPE_SERVER)
+ mutex_unlock(&info->socket_info_mutex);
break;
}
@@ -1135,6 +1156,8 @@
read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
pkt_len, 0);
+ if (info->port_type != PORT_TYPE_SERVER)
+ mutex_unlock(&info->socket_info_mutex);
if (read_len <= 0)
goto fail;
@@ -1211,7 +1234,16 @@
write_msg.msg_name = &info->remote_addr;
write_msg.msg_namelen = sizeof(info->remote_addr);
write_msg.msg_flags |= MSG_DONTWAIT;
+ if (info->port_type != PORT_TYPE_SERVER) {
+ mutex_lock(&info->socket_info_mutex);
+ if (!info->hdl) {
+ mutex_unlock(&info->socket_info_mutex);
+ return -ENODEV;
+ }
+ }
write_len = kernel_sendmsg(info->hdl, &write_msg, &iov, 1, len);
+ if (info->port_type != PORT_TYPE_SERVER)
+ mutex_unlock(&info->socket_info_mutex);
if (write_len < 0) {
err = write_len;
/*
diff --git a/drivers/char/diag/diagfwd_socket.h b/drivers/char/diag/diagfwd_socket.h
index a9487b1..c42be06 100644
--- a/drivers/char/diag/diagfwd_socket.h
+++ b/drivers/char/diag/diagfwd_socket.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -65,6 +65,7 @@
struct work_struct read_work;
struct diagfwd_info *fwd_ctxt;
wait_queue_head_t read_wait_q;
+ struct mutex socket_info_mutex;
};
union cntl_port_msg {
diff --git a/drivers/clk/msm/Kconfig b/drivers/clk/msm/Kconfig
index 16f8c32..d881a5d 100644
--- a/drivers/clk/msm/Kconfig
+++ b/drivers/clk/msm/Kconfig
@@ -16,3 +16,5 @@
Generate clock data structures from definitions found in
device tree.
+source "drivers/clk/msm/mdss/Kconfig"
+
diff --git a/drivers/clk/msm/Makefile b/drivers/clk/msm/Makefile
index 4176553..d264e79 100644
--- a/drivers/clk/msm/Makefile
+++ b/drivers/clk/msm/Makefile
@@ -17,3 +17,6 @@
obj-$(CONFIG_ARCH_MSM8953) += clock-cpu-8953.o
obj-$(CONFIG_ARCH_MSM8953) += clock-rcgwr.o
endif
+
+obj-y += mdss/
+
diff --git a/drivers/clk/msm/clock-gcc-8953.c b/drivers/clk/msm/clock-gcc-8953.c
index e25da83..fd80b56 100644
--- a/drivers/clk/msm/clock-gcc-8953.c
+++ b/drivers/clk/msm/clock-gcc-8953.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3822,6 +3822,7 @@
static const struct of_device_id msm_clock_gcc_match_table[] = {
{ .compatible = "qcom,gcc-8953" },
+ { .compatible = "qcom,gcc-sdm632" },
{},
};
@@ -3871,6 +3872,7 @@
static const struct of_device_id msm_clock_debug_match_table[] = {
{ .compatible = "qcom,cc-debug-8953" },
+ { .compatible = "qcom,cc-debug-sdm632" },
{}
};
@@ -3983,6 +3985,7 @@
static const struct of_device_id msm_clock_mdss_match_table[] = {
{ .compatible = "qcom,gcc-mdss-8953" },
+ { .compatible = "qcom,gcc-mdss-sdm632" },
{}
};
@@ -4072,8 +4075,17 @@
struct resource *res;
int ret;
u32 regval;
+ struct clk *xo_clk;
bool compat_bin = false;
+ /* Require the GCC-RPM-XO clock to be registered first */
+ xo_clk = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(xo_clk)) {
+ if (PTR_ERR(xo_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get xo clock\n");
+ return PTR_ERR(xo_clk);
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
if (!res) {
dev_err(&pdev->dev, "Register base not defined\n");
@@ -4122,6 +4134,7 @@
static const struct of_device_id msm_clock_gfx_match_table[] = {
{ .compatible = "qcom,gcc-gfx-8953" },
{ .compatible = "qcom,gcc-gfx-sdm450" },
+ { .compatible = "qcom,gcc-gfx-sdm632" },
{}
};
diff --git a/drivers/clk/msm/mdss/Kconfig b/drivers/clk/msm/mdss/Kconfig
new file mode 100644
index 0000000..229780e
--- /dev/null
+++ b/drivers/clk/msm/mdss/Kconfig
@@ -0,0 +1,6 @@
+config MSM_MDSS_PLL
+ bool "MDSS pll programming"
+ ---help---
+ It provides support for DSI, eDP and HDMI interface pll programming on MDSS
+ hardware. It also handles the pll specific resources and turn them on/off when
+ mdss pll client tries to enable/disable pll clocks.
diff --git a/drivers/clk/msm/mdss/Makefile b/drivers/clk/msm/mdss/Makefile
new file mode 100644
index 0000000..6285714
--- /dev/null
+++ b/drivers/clk/msm/mdss/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll-util.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-util.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-28lpm.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996-util.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8996.o
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-28lpm.c b/drivers/clk/msm/mdss/mdss-dsi-pll-28lpm.c
new file mode 100644
index 0000000..17ff52e
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-28lpm.c
@@ -0,0 +1,522 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <dt-bindings/clock/msm-clocks-8952.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+
+#define VCO_DELAY_USEC 1000
+
+static struct clk_div_ops fixed_2div_ops;
+static struct clk_ops byte_mux_clk_ops;
+static struct clk_ops pixel_clk_src_ops;
+static struct clk_ops byte_clk_src_ops;
+static struct clk_ops analog_postdiv_clk_ops;
+static struct lpfr_cfg lpfr_lut_struct[] = {
+ {479500000, 8},
+ {480000000, 11},
+ {575500000, 8},
+ {576000000, 12},
+ {610500000, 8},
+ {659500000, 9},
+ {671500000, 10},
+ {672000000, 14},
+ {708500000, 10},
+ {750000000, 11},
+};
+
+static int vco_set_rate_lpm(struct clk *c, unsigned long rate)
+{
+ int rc;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ /*
+ * DSI PLL software reset. Add HW recommended delays after toggling
+ * the software reset bit off and back on.
+ */
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01);
+ udelay(1000);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00);
+ udelay(1000);
+
+ rc = vco_set_rate(vco, rate);
+
+ mdss_pll_resource_enable(dsi_pll_res, false);
+ return rc;
+}
+
+static void dsi_pll_sw_reset_8916(struct mdss_pll_resources *dsi_pll_res)
+{
+ /*
+ * DSI PLL software reset. Add HW recommended delays after toggling
+ * the software reset bit off and back on.
+ */
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01);
+ ndelay(500);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00);
+}
+
+static void dsi_pll_toggle_lock_detect_8916(
+ struct mdss_pll_resources *dsi_pll_res)
+{
+ /* DSI PLL toggle lock detect setting */
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x04);
+ ndelay(500);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x05);
+ udelay(512);
+}
+
+static int dsi_pll_check_lock_status_8916(
+ struct mdss_pll_resources *dsi_pll_res)
+{
+ int rc = 0;
+
+ rc = dsi_pll_lock_status(dsi_pll_res);
+ if (rc)
+ pr_debug("PLL Locked\n");
+ else
+ pr_err("PLL failed to lock\n");
+
+ return rc;
+}
+
+
+static int gf_2_dsi_pll_enable_seq_8916(struct mdss_pll_resources *dsi_pll_res)
+{
+ int pll_locked = 0;
+
+ dsi_pll_sw_reset_8916(dsi_pll_res);
+
+ /*
+ * GF PART 2 PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x04);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
+ udelay(3);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
+ udelay(500);
+
+ dsi_pll_toggle_lock_detect_8916(dsi_pll_res);
+
+ pll_locked = dsi_pll_check_lock_status_8916(dsi_pll_res);
+ return pll_locked ? 0 : -EINVAL;
+}
+
+static int gf_1_dsi_pll_enable_seq_8916(struct mdss_pll_resources *dsi_pll_res)
+{
+ int pll_locked = 0;
+
+ dsi_pll_sw_reset_8916(dsi_pll_res);
+ /*
+ * GF PART 1 PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x14);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
+ udelay(3);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
+ udelay(500);
+
+ dsi_pll_toggle_lock_detect_8916(dsi_pll_res);
+
+ pll_locked = dsi_pll_check_lock_status_8916(dsi_pll_res);
+ return pll_locked ? 0 : -EINVAL;
+}
+
+static int tsmc_dsi_pll_enable_seq_8916(struct mdss_pll_resources *dsi_pll_res)
+{
+ int pll_locked = 0;
+
+ dsi_pll_sw_reset_8916(dsi_pll_res);
+ /*
+ * TSMC PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x34);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
+ udelay(500);
+
+ dsi_pll_toggle_lock_detect_8916(dsi_pll_res);
+
+ pll_locked = dsi_pll_check_lock_status_8916(dsi_pll_res);
+ return pll_locked ? 0 : -EINVAL;
+}
+
+/* Op structures */
+
+static const struct clk_ops clk_ops_dsi_vco = {
+ .set_rate = vco_set_rate_lpm,
+ .round_rate = vco_round_rate,
+ .handoff = vco_handoff,
+ .prepare = vco_prepare,
+ .unprepare = vco_unprepare,
+};
+
+
+static struct clk_div_ops fixed_4div_ops = {
+ .set_div = fixed_4div_set_div,
+ .get_div = fixed_4div_get_div,
+};
+
+static struct clk_div_ops analog_postdiv_ops = {
+ .set_div = analog_set_div,
+ .get_div = analog_get_div,
+};
+
+static struct clk_div_ops digital_postdiv_ops = {
+ .set_div = digital_set_div,
+ .get_div = digital_get_div,
+};
+
+static struct clk_mux_ops byte_mux_ops = {
+ .set_mux_sel = set_byte_mux_sel,
+ .get_mux_sel = get_byte_mux_sel,
+};
+
+/* DSI PLL0 clock structures */
+static struct dsi_pll_vco_clk dsi_pll0_vco_clk = {
+ .ref_clk_rate = 19200000,
+ .min_rate = 350000000,
+ .max_rate = 750000000,
+ .pll_en_seq_cnt = 9,
+ .pll_enable_seqs[0] = tsmc_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[1] = tsmc_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[2] = tsmc_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[3] = gf_1_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[4] = gf_1_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[5] = gf_1_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[6] = gf_2_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[7] = gf_2_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[8] = gf_2_dsi_pll_enable_seq_8916,
+ .lpfr_lut_size = 10,
+ .lpfr_lut = lpfr_lut_struct,
+ .c = {
+ .dbg_name = "dsi_pll0_vco_clk",
+ .ops = &clk_ops_dsi_vco,
+ CLK_INIT(dsi_pll0_vco_clk.c),
+ },
+};
+
+static struct div_clk dsi_pll0_analog_postdiv_clk = {
+ .data = {
+ .max_div = 255,
+ .min_div = 1,
+ },
+ .ops = &analog_postdiv_ops,
+ .c = {
+ .parent = &dsi_pll0_vco_clk.c,
+ .dbg_name = "dsi_pll0_analog_postdiv_clk",
+ .ops = &analog_postdiv_clk_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi_pll0_analog_postdiv_clk.c),
+ },
+};
+
+static struct div_clk dsi_pll0_indirect_path_div2_clk = {
+ .ops = &fixed_2div_ops,
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi_pll0_analog_postdiv_clk.c,
+ .dbg_name = "dsi_pll0_indirect_path_div2_clk",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi_pll0_indirect_path_div2_clk.c),
+ },
+};
+
+static struct div_clk dsi_pll0_pixel_clk_src = {
+ .data = {
+ .max_div = 255,
+ .min_div = 1,
+ },
+ .ops = &digital_postdiv_ops,
+ .c = {
+ .parent = &dsi_pll0_vco_clk.c,
+ .dbg_name = "dsi_pll0_pixel_clk_src",
+ .ops = &pixel_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi_pll0_pixel_clk_src.c),
+ },
+};
+
+static struct mux_clk dsi_pll0_byte_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]){
+ {&dsi_pll0_vco_clk.c, 0},
+ {&dsi_pll0_indirect_path_div2_clk.c, 1},
+ },
+ .ops = &byte_mux_ops,
+ .c = {
+ .parent = &dsi_pll0_vco_clk.c,
+ .dbg_name = "dsi_pll0_byte_mux",
+ .ops = &byte_mux_clk_ops,
+ CLK_INIT(dsi_pll0_byte_mux.c),
+ },
+};
+
+static struct div_clk dsi_pll0_byte_clk_src = {
+ .ops = &fixed_4div_ops,
+ .data = {
+ .min_div = 4,
+ .max_div = 4,
+ },
+ .c = {
+ .parent = &dsi_pll0_byte_mux.c,
+ .dbg_name = "dsi_pll0_byte_clk_src",
+ .ops = &byte_clk_src_ops,
+ CLK_INIT(dsi_pll0_byte_clk_src.c),
+ },
+};
+
+/* DSI PLL1 clock structures */
+static struct dsi_pll_vco_clk dsi_pll1_vco_clk = {
+ .ref_clk_rate = 19200000,
+ .min_rate = 350000000,
+ .max_rate = 750000000,
+ .pll_en_seq_cnt = 9,
+ .pll_enable_seqs[0] = tsmc_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[1] = tsmc_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[2] = tsmc_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[3] = gf_1_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[4] = gf_1_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[5] = gf_1_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[6] = gf_2_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[7] = gf_2_dsi_pll_enable_seq_8916,
+ .pll_enable_seqs[8] = gf_2_dsi_pll_enable_seq_8916,
+ .lpfr_lut_size = 10,
+ .lpfr_lut = lpfr_lut_struct,
+ .c = {
+ .dbg_name = "dsi_pll1_vco_clk",
+ .ops = &clk_ops_dsi_vco,
+ CLK_INIT(dsi_pll1_vco_clk.c),
+ },
+};
+
+static struct div_clk dsi_pll1_analog_postdiv_clk = {
+ .data = {
+ .max_div = 255,
+ .min_div = 1,
+ },
+ .ops = &analog_postdiv_ops,
+ .c = {
+ .parent = &dsi_pll1_vco_clk.c,
+ .dbg_name = "dsi_pll1_analog_postdiv_clk",
+ .ops = &analog_postdiv_clk_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi_pll1_analog_postdiv_clk.c),
+ },
+};
+
+static struct div_clk dsi_pll1_indirect_path_div2_clk = {
+ .ops = &fixed_2div_ops,
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi_pll1_analog_postdiv_clk.c,
+ .dbg_name = "dsi_pll1_indirect_path_div2_clk",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi_pll1_indirect_path_div2_clk.c),
+ },
+};
+
+static struct div_clk dsi_pll1_pixel_clk_src = {
+ .data = {
+ .max_div = 255,
+ .min_div = 1,
+ },
+ .ops = &digital_postdiv_ops,
+ .c = {
+ .parent = &dsi_pll1_vco_clk.c,
+ .dbg_name = "dsi_pll1_pixel_clk_src",
+ .ops = &pixel_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi_pll1_pixel_clk_src.c),
+ },
+};
+
+static struct mux_clk dsi_pll1_byte_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]){
+ {&dsi_pll1_vco_clk.c, 0},
+ {&dsi_pll1_indirect_path_div2_clk.c, 1},
+ },
+ .ops = &byte_mux_ops,
+ .c = {
+ .parent = &dsi_pll1_vco_clk.c,
+ .dbg_name = "dsi_pll1_byte_mux",
+ .ops = &byte_mux_clk_ops,
+ CLK_INIT(dsi_pll1_byte_mux.c),
+ },
+};
+
+static struct div_clk dsi_pll1_byte_clk_src = {
+ .ops = &fixed_4div_ops,
+ .data = {
+ .min_div = 4,
+ .max_div = 4,
+ },
+ .c = {
+ .parent = &dsi_pll1_byte_mux.c,
+ .dbg_name = "dsi_pll1_byte_clk_src",
+ .ops = &byte_clk_src_ops,
+ CLK_INIT(dsi_pll1_byte_clk_src.c),
+ },
+};
+
+static struct clk_lookup dsi_pll0_cc[] = {
+ CLK_LIST(dsi_pll0_pixel_clk_src),
+ CLK_LIST(dsi_pll0_byte_clk_src),
+};
+
+static struct clk_lookup dsi_pll1_cc[] = {
+ CLK_LIST(dsi_pll1_pixel_clk_src),
+ CLK_LIST(dsi_pll1_byte_clk_src),
+};
+
+int dsi_pll_clock_register_lpm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc;
+ int const ssc_freq_min = 30000; /* min. recommended freq. value */
+ int const ssc_freq_max = 33000; /* max. recommended freq. value */
+ int const ssc_ppm_max = 5000; /* max. recommended ppm */
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ if (!pll_res || !pll_res->pll_base) {
+ pr_err("Invalid PLL resources\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* Set client data to mux, div and vco clocks */
+ if (!pll_res->index) {
+ dsi_pll0_byte_clk_src.priv = pll_res;
+ dsi_pll0_pixel_clk_src.priv = pll_res;
+ dsi_pll0_byte_mux.priv = pll_res;
+ dsi_pll0_indirect_path_div2_clk.priv = pll_res;
+ dsi_pll0_analog_postdiv_clk.priv = pll_res;
+ dsi_pll0_vco_clk.priv = pll_res;
+ } else {
+ dsi_pll1_byte_clk_src.priv = pll_res;
+ dsi_pll1_pixel_clk_src.priv = pll_res;
+ dsi_pll1_byte_mux.priv = pll_res;
+ dsi_pll1_indirect_path_div2_clk.priv = pll_res;
+ dsi_pll1_analog_postdiv_clk.priv = pll_res;
+ dsi_pll1_vco_clk.priv = pll_res;
+ }
+
+ pll_res->vco_delay = VCO_DELAY_USEC;
+
+ /* Set clock source operations */
+ pixel_clk_src_ops = clk_ops_slave_div;
+ pixel_clk_src_ops.prepare = dsi_pll_div_prepare;
+
+ analog_postdiv_clk_ops = clk_ops_div;
+ analog_postdiv_clk_ops.prepare = dsi_pll_div_prepare;
+
+ byte_clk_src_ops = clk_ops_div;
+ byte_clk_src_ops.prepare = dsi_pll_div_prepare;
+
+ byte_mux_clk_ops = clk_ops_gen_mux;
+ byte_mux_clk_ops.prepare = dsi_pll_mux_prepare;
+
+ if (pll_res->ssc_en) {
+ if (!pll_res->ssc_freq || (pll_res->ssc_freq < ssc_freq_min) ||
+ (pll_res->ssc_freq > ssc_freq_max)) {
+ pll_res->ssc_freq = ssc_freq_min;
+ pr_debug("SSC frequency out of recommended range. Set to default=%d\n",
+ pll_res->ssc_freq);
+ }
+
+ if (!pll_res->ssc_ppm || (pll_res->ssc_ppm > ssc_ppm_max)) {
+ pll_res->ssc_ppm = ssc_ppm_max;
+ pr_debug("SSC PPM out of recommended range. Set to default=%d\n",
+ pll_res->ssc_ppm);
+ }
+ }
+
+ if ((pll_res->target_id == MDSS_PLL_TARGET_8952) ||
+ (pll_res->target_id == MDSS_PLL_TARGET_8937) ||
+ (pll_res->target_id == MDSS_PLL_TARGET_8909)) {
+ if (!pll_res->index)
+ rc = of_msm_clock_register(pdev->dev.of_node,
+ dsi_pll0_cc, ARRAY_SIZE(dsi_pll0_cc));
+ else
+ rc = of_msm_clock_register(pdev->dev.of_node,
+ dsi_pll1_cc, ARRAY_SIZE(dsi_pll1_cc));
+ if (rc) {
+ pr_err("Clock register failed\n");
+ rc = -EPROBE_DEFER;
+ }
+ } else {
+ pr_err("Invalid target ID\n");
+ rc = -EINVAL;
+ }
+
+ if (!rc)
+ pr_info("Registered DSI PLL:%d clocks successfully\n",
+ pll_res->index);
+
+ return rc;
+}
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
new file mode 100644
index 0000000..c5d12e5
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
@@ -0,0 +1,1239 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-8996.h"
+
+#define DSI_PLL_POLL_MAX_READS 15
+#define DSI_PLL_POLL_TIMEOUT_US 1000
+#define MSM8996_DSI_PLL_REVISION_2 2
+
+#define DSI_PHY_SPARE_VAL 0x6a
+#define DSI_PLL_DEFAULT_POSTDIV 1
+
+#define CEIL(x, y) (((x) + ((y)-1)) / (y))
+static void pll_db_commit_8996(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb);
+
+int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel)
+{
+ return 0;
+}
+
+int get_mdss_byte_mux_sel_8996(struct mux_clk *clk)
+{
+ return 0;
+}
+
+int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel)
+{
+ return 0;
+}
+
+int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk)
+{
+ return 0;
+}
+
+int post_n1_div_set_div(struct div_clk *clk, int div)
+{
+ struct mdss_pll_resources *pll = clk->priv;
+ struct dsi_pll_db *pdb;
+ struct dsi_pll_output *pout;
+ int rc;
+ u32 n1div = 0;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ pdb = (struct dsi_pll_db *)pll->priv;
+ pout = &pdb->out;
+
+ /*
+ * vco rate = bit_clk * postdiv * n1div
+ * vco range from 1300 to 2600 Mhz
+ * postdiv = 1
+ * n1div = 1 to 15
+ * n1div = roundup(1300Mhz / bit_clk)
+ * support bit_clk above 86.67Mhz
+ */
+
+ /* this is for vco/bit clock */
+ pout->pll_postdiv = DSI_PLL_DEFAULT_POSTDIV;
+ pout->pll_n1div = div;
+
+ n1div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+ n1div &= ~0xf;
+ n1div |= (div & 0xf);
+ MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n1div);
+ /* ensure n1 divider is programed */
+ wmb();
+ pr_debug("ndx=%d div=%d postdiv=%x n1div=%x\n",
+ pll->index, div, pout->pll_postdiv, pout->pll_n1div);
+
+ mdss_pll_resource_enable(pll, false);
+
+ return 0;
+}
+
+int post_n1_div_get_div(struct div_clk *clk)
+{
+ u32 div;
+ int rc;
+ struct mdss_pll_resources *pll = clk->priv;
+
+ if (is_gdsc_disabled(pll))
+ return 0;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ /*
+ * postdiv = 1/2/4/8
+ * n1div = 1 - 15
+ * fot the time being, assume postdiv = 1
+ */
+
+ div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+ div &= 0xF;
+ pr_debug("n1 div = %d\n", div);
+
+ mdss_pll_resource_enable(pll, false);
+
+ return div;
+}
+
+int n2_div_set_div(struct div_clk *clk, int div)
+{
+ int rc;
+ u32 n2div;
+ struct mdss_pll_resources *pll = clk->priv;
+ struct dsi_pll_db *pdb;
+ struct dsi_pll_output *pout;
+ struct mdss_pll_resources *slave;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ pdb = (struct dsi_pll_db *)pll->priv;
+ pout = &pdb->out;
+
+ /* this is for pixel_clock */
+ n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+ n2div &= ~0xf0; /* bits 4 to 7 */
+ n2div |= (div << 4);
+ MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
+
+ /* commit slave if split display is enabled */
+ slave = pll->slave;
+ if (slave)
+ MDSS_PLL_REG_W(slave->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
+
+ pout->pll_n2div = div;
+
+ /* set dsiclk_sel=1 so that n2div *= 2 */
+ MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG1, 1);
+ pr_debug("ndx=%d div=%d n2div=%x\n", pll->index, div, n2div);
+
+ mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+int shadow_n2_div_set_div(struct div_clk *clk, int div)
+{
+ struct mdss_pll_resources *pll = clk->priv;
+ struct dsi_pll_db *pdb;
+ struct dsi_pll_output *pout;
+ u32 data;
+
+ pdb = pll->priv;
+ pout = &pdb->out;
+
+ pout->pll_n2div = div;
+
+ data = (pout->pll_n1div | (pout->pll_n2div << 4));
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+ DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_CLK_CFG1,
+ data, 1);
+ return 0;
+}
+
+int n2_div_get_div(struct div_clk *clk)
+{
+ int rc;
+ u32 n2div;
+ struct mdss_pll_resources *pll = clk->priv;
+
+ if (is_gdsc_disabled(pll))
+ return 0;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll=%d resources\n",
+ pll->index);
+ return rc;
+ }
+
+ n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+ n2div >>= 4;
+ n2div &= 0x0f;
+
+ mdss_pll_resource_enable(pll, false);
+
+ pr_debug("ndx=%d div=%d\n", pll->index, n2div);
+
+ return n2div;
+}
+
+static bool pll_is_pll_locked_8996(struct mdss_pll_resources *pll)
+{
+ u32 status;
+ bool pll_locked;
+
+ /* poll for PLL ready status */
+ if (readl_poll_timeout_atomic((pll->pll_base +
+ DSIPHY_PLL_RESET_SM_READY_STATUS),
+ status,
+ ((status & BIT(5)) > 0),
+ DSI_PLL_POLL_MAX_READS,
+ DSI_PLL_POLL_TIMEOUT_US)) {
+ pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
+ pll->index, status);
+ pll_locked = false;
+ } else if (readl_poll_timeout_atomic((pll->pll_base +
+ DSIPHY_PLL_RESET_SM_READY_STATUS),
+ status,
+ ((status & BIT(0)) > 0),
+ DSI_PLL_POLL_MAX_READS,
+ DSI_PLL_POLL_TIMEOUT_US)) {
+ pr_err("DSI PLL ndx=%d status=%x PLl not ready\n",
+ pll->index, status);
+ pll_locked = false;
+ } else {
+ pll_locked = true;
+ }
+
+ return pll_locked;
+}
+
+static void dsi_pll_start_8996(void __iomem *pll_base)
+{
+ pr_debug("start PLL at base=%pk\n", pll_base);
+
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VREF_CFG1, 0x10);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 1);
+ wmb(); /* make sure register committed */
+}
+
+static void dsi_pll_stop_8996(void __iomem *pll_base)
+{
+ pr_debug("stop PLL at base=%pk\n", pll_base);
+
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
+ wmb(); /* make sure register committed */
+}
+
+static inline bool pll_use_precal(struct mdss_pll_resources *pll)
+{
+ bool ret = true;
+ u32 spare = MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_CMN_GLBL_DIGTOP_SPARE2);
+
+ if (!pll->cache_pll_trim_codes[0] || /* kvco code */
+ !pll->cache_pll_trim_codes[1] || /* vco tune */
+ !pll->cache_pll_trim_codes_rate ||
+ (pll->cache_pll_trim_codes_rate != pll->vco_current_rate) ||
+ (spare != DSI_PHY_SPARE_VAL)) /* phy reset */
+ ret = false;
+
+ pr_debug("ndx:%d kvco:%d vco_tune:%d spare:0x%x rate:%llu old:%llu ret:%d\n",
+ pll->index, pll->cache_pll_trim_codes[0],
+ pll->cache_pll_trim_codes[1], spare,
+ pll->cache_pll_trim_codes_rate,
+ pll->vco_current_rate, ret);
+
+ return ret;
+}
+
+int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll)
+{
+ int rc = 0;
+ struct dsi_pll_db *pdb;
+ struct mdss_pll_resources *slave;
+
+ if (!pll) {
+ pr_err("Invalid PLL resources\n");
+ return -EINVAL;
+ }
+
+ pdb = (struct dsi_pll_db *)pll->priv;
+ if (!pdb) {
+ pr_err("No priv found\n");
+ return -EINVAL;
+ }
+
+ dsi_pll_start_8996(pll->pll_base);
+
+ /*
+ * both DSIPHY_PLL_CLKBUFLR_EN and DSIPHY_CMN_GLBL_TEST_CTRL
+ * enabled at mdss_dsi_8996_phy_config()
+ */
+
+ if (!pll_is_pll_locked_8996(pll)) {
+ pr_err("DSI PLL ndx=%d lock failed, retry full sequence!\n",
+ pll->index);
+ slave = pll->slave;
+
+ /* commit slave if split display is enabled */
+ if (slave)
+ pll_db_commit_8996(slave, pdb);
+
+ /* commit master itself */
+ pll_db_commit_8996(pll, pdb);
+
+ dsi_pll_start_8996(pll->pll_base);
+ if (!pll_is_pll_locked_8996(pll)) {
+ pr_err("DSI PLL ndx=%d lock failed!!!\n",
+ pll->index);
+ rc = -EINVAL;
+ goto init_lock_err;
+ }
+ }
+
+ if (!pll_use_precal(pll)) {
+ /* cache vco settings */
+ pll->cache_pll_trim_codes[0] = MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_CORE_KVCO_CODE_STATUS);
+ pll->cache_pll_trim_codes[1] = MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_CORE_VCO_TUNE_STATUS);
+ pll->cache_pll_trim_codes_rate = pll->vco_current_rate;
+
+ /* write spare */
+ MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_GLBL_DIGTOP_SPARE2,
+ DSI_PHY_SPARE_VAL);
+ }
+
+ pr_debug("DSI PLL ndx:%d Locked! kvco=0x%x vco_tune=0x%x rate=%llu\n",
+ pll->index, pll->cache_pll_trim_codes[0],
+ pll->cache_pll_trim_codes[1],
+ pll->cache_pll_trim_codes_rate);
+
+init_lock_err:
+ return rc;
+}
+
+static int dsi_pll_enable(struct clk *c)
+{
+ int i, rc = 0;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ /* Try all enable sequences until one succeeds */
+ for (i = 0; i < vco->pll_en_seq_cnt; i++) {
+ rc = vco->pll_enable_seqs[i](pll);
+ pr_debug("DSI PLL %s after sequence #%d\n",
+ rc ? "unlocked" : "locked", i + 1);
+ if (!rc)
+ break;
+ }
+
+ if (rc)
+ pr_err("ndx=%d DSI PLL failed to lock\n", pll->index);
+ else
+ pll->pll_on = true;
+
+ return rc;
+}
+
+static void dsi_pll_disable(struct clk *c)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+ struct mdss_pll_resources *slave;
+
+ if (!pll->pll_on &&
+ mdss_pll_resource_enable(pll, true)) {
+ pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+ return;
+ }
+
+ pll->handoff_resources = false;
+ slave = pll->slave;
+
+ dsi_pll_stop_8996(pll->pll_base);
+
+ mdss_pll_resource_enable(pll, false);
+
+ pll->pll_on = false;
+
+ pr_debug("DSI PLL ndx=%d Disabled\n", pll->index);
+}
+
+static void mdss_dsi_pll_8996_input_init(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ pdb->in.fref = 19200000; /* 19.2 Mhz*/
+ pdb->in.fdata = 0; /* bit clock rate */
+ pdb->in.dsiclk_sel = 1; /* 1, reg: 0x0014 */
+ pdb->in.ssc_en = pll->ssc_en; /* 1, reg: 0x0494, bit 0 */
+ pdb->in.ldo_en = 0; /* 0, reg: 0x004c, bit 0 */
+
+ /* fixed input */
+ pdb->in.refclk_dbler_en = 0; /* 0, reg: 0x04c0, bit 1 */
+ pdb->in.vco_measure_time = 5; /* 5, unknown */
+ pdb->in.kvco_measure_time = 5; /* 5, unknown */
+ pdb->in.bandgap_timer = 4; /* 4, reg: 0x0430, bit 3 - 5 */
+ pdb->in.pll_wakeup_timer = 5; /* 5, reg: 0x043c, bit 0 - 2 */
+ pdb->in.plllock_cnt = 1; /* 1, reg: 0x0488, bit 1 - 2 */
+ pdb->in.plllock_rng = 0; /* 0, reg: 0x0488, bit 3 - 4 */
+ pdb->in.ssc_center = pll->ssc_center;/* 0, reg: 0x0494, bit 1 */
+ pdb->in.ssc_adj_period = 37; /* 37, reg: 0x498, bit 0 - 9 */
+ pdb->in.ssc_spread = pll->ssc_ppm / 1000;
+ pdb->in.ssc_freq = pll->ssc_freq;
+
+ pdb->in.pll_ie_trim = 4; /* 4, reg: 0x0400 */
+ pdb->in.pll_ip_trim = 4; /* 4, reg: 0x0404 */
+ pdb->in.pll_cpcset_cur = 1; /* 1, reg: 0x04f0, bit 0 - 2 */
+ pdb->in.pll_cpmset_cur = 1; /* 1, reg: 0x04f0, bit 3 - 5 */
+ pdb->in.pll_icpmset = 4; /* 4, reg: 0x04fc, bit 3 - 5 */
+ pdb->in.pll_icpcset = 4; /* 4, reg: 0x04fc, bit 0 - 2 */
+ pdb->in.pll_icpmset_p = 0; /* 0, reg: 0x04f4, bit 0 - 2 */
+ pdb->in.pll_icpmset_m = 0; /* 0, reg: 0x04f4, bit 3 - 5 */
+ pdb->in.pll_icpcset_p = 0; /* 0, reg: 0x04f8, bit 0 - 2 */
+ pdb->in.pll_icpcset_m = 0; /* 0, reg: 0x04f8, bit 3 - 5 */
+ pdb->in.pll_lpf_res1 = 3; /* 3, reg: 0x0504, bit 0 - 3 */
+ pdb->in.pll_lpf_cap1 = 11; /* 11, reg: 0x0500, bit 0 - 3 */
+ pdb->in.pll_lpf_cap2 = 1; /* 1, reg: 0x0500, bit 4 - 7 */
+ pdb->in.pll_iptat_trim = 7;
+ pdb->in.pll_c3ctrl = 2; /* 2 */
+ pdb->in.pll_r3ctrl = 1; /* 1 */
+}
+
+static void pll_8996_ssc_calc(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ u32 period, ssc_period;
+ u32 ref, rem;
+ s64 step_size;
+
+ pr_debug("%s: vco=%lld ref=%lld\n", __func__,
+ pll->vco_current_rate, pll->vco_ref_clk_rate);
+
+ ssc_period = pdb->in.ssc_freq / 500;
+ period = (unsigned long)pll->vco_ref_clk_rate / 1000;
+ ssc_period = CEIL(period, ssc_period);
+ ssc_period -= 1;
+ pdb->out.ssc_period = ssc_period;
+
+ pr_debug("%s: ssc, freq=%d spread=%d period=%d\n", __func__,
+ pdb->in.ssc_freq, pdb->in.ssc_spread, pdb->out.ssc_period);
+
+ step_size = (u32)pll->vco_current_rate;
+ ref = pll->vco_ref_clk_rate;
+ ref /= 1000;
+ step_size = div_s64(step_size, ref);
+ step_size <<= 20;
+ step_size = div_s64(step_size, 1000);
+ step_size *= pdb->in.ssc_spread;
+ step_size = div_s64(step_size, 1000);
+ step_size *= (pdb->in.ssc_adj_period + 1);
+
+ rem = 0;
+ step_size = div_s64_rem(step_size, ssc_period + 1, &rem);
+ if (rem)
+ step_size++;
+
+ pr_debug("%s: step_size=%lld\n", __func__, step_size);
+
+ step_size &= 0x0ffff; /* take lower 16 bits */
+
+ pdb->out.ssc_step_size = step_size;
+}
+
+static void pll_8996_dec_frac_calc(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ struct dsi_pll_input *pin = &pdb->in;
+ struct dsi_pll_output *pout = &pdb->out;
+ s64 multiplier = BIT(20);
+ s64 dec_start_multiple, dec_start, pll_comp_val;
+ s32 duration, div_frac_start;
+ s64 vco_clk_rate = pll->vco_current_rate;
+ s64 fref = pll->vco_ref_clk_rate;
+
+ pr_debug("vco_clk_rate=%lld ref_clk_rate=%lld\n",
+ vco_clk_rate, fref);
+
+ dec_start_multiple = div_s64(vco_clk_rate * multiplier, fref);
+ div_s64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+ dec_start = div_s64(dec_start_multiple, multiplier);
+
+ pout->dec_start = (u32)dec_start;
+ pout->div_frac_start = div_frac_start;
+
+ if (pin->plllock_cnt == 0)
+ duration = 1024;
+ else if (pin->plllock_cnt == 1)
+ duration = 256;
+ else if (pin->plllock_cnt == 2)
+ duration = 128;
+ else
+ duration = 32;
+
+ pll_comp_val = duration * dec_start_multiple;
+ pll_comp_val = div_s64(pll_comp_val, multiplier);
+ do_div(pll_comp_val, 10);
+
+ pout->plllock_cmp = (u32)pll_comp_val;
+
+ pout->pll_txclk_en = 1;
+ if (pll->revision == MSM8996_DSI_PLL_REVISION_2)
+ pout->cmn_ldo_cntrl = 0x3c;
+ else
+ pout->cmn_ldo_cntrl = 0x1c;
+}
+
+static u32 pll_8996_kvco_slop(u32 vrate)
+{
+ u32 slop = 0;
+
+ if (vrate > 1300000000UL && vrate <= 1800000000UL)
+ slop = 600;
+ else if (vrate > 1800000000UL && vrate < 2300000000UL)
+ slop = 400;
+ else if (vrate > 2300000000UL && vrate < 2600000000UL)
+ slop = 280;
+
+ return slop;
+}
+
+static inline u32 pll_8996_calc_kvco_code(s64 vco_clk_rate)
+{
+ u32 kvco_code;
+
+ if ((vco_clk_rate >= 2300000000ULL) &&
+ (vco_clk_rate <= 2600000000ULL))
+ kvco_code = 0x2f;
+ else if ((vco_clk_rate >= 1800000000ULL) &&
+ (vco_clk_rate < 2300000000ULL))
+ kvco_code = 0x2c;
+ else
+ kvco_code = 0x28;
+
+ pr_debug("rate: %llu kvco_code: 0x%x\n",
+ vco_clk_rate, kvco_code);
+ return kvco_code;
+}
+
+static void pll_8996_calc_vco_count(struct dsi_pll_db *pdb,
+ s64 vco_clk_rate, s64 fref)
+{
+ struct dsi_pll_input *pin = &pdb->in;
+ struct dsi_pll_output *pout = &pdb->out;
+ s64 data;
+ u32 cnt;
+
+ data = fref * pin->vco_measure_time;
+ do_div(data, 1000000);
+ data &= 0x03ff; /* 10 bits */
+ data -= 2;
+ pout->pll_vco_div_ref = data;
+
+ data = (unsigned long)vco_clk_rate / 1000000; /* unit is Mhz */
+ data *= pin->vco_measure_time;
+ do_div(data, 10);
+ pout->pll_vco_count = data; /* reg: 0x0474, 0x0478 */
+
+ data = fref * pin->kvco_measure_time;
+ do_div(data, 1000000);
+ data &= 0x03ff; /* 10 bits */
+ data -= 1;
+ pout->pll_kvco_div_ref = data;
+
+ cnt = pll_8996_kvco_slop(vco_clk_rate);
+ cnt *= 2;
+ do_div(cnt, 100);
+ cnt *= pin->kvco_measure_time;
+ pout->pll_kvco_count = cnt;
+
+ pout->pll_misc1 = 16;
+ pout->pll_resetsm_cntrl = 48;
+ pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
+ pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
+ pout->pll_kvco_code = pll_8996_calc_kvco_code(vco_clk_rate);
+}
+
+static void pll_db_commit_ssc(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ void __iomem *pll_base = pll->pll_base;
+ struct dsi_pll_input *pin = &pdb->in;
+ struct dsi_pll_output *pout = &pdb->out;
+ char data;
+
+ data = pin->ssc_adj_period;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER1, data);
+ data = (pin->ssc_adj_period >> 8);
+ data &= 0x03;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER2, data);
+
+ data = pout->ssc_period;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER1, data);
+ data = (pout->ssc_period >> 8);
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER2, data);
+
+ data = pout->ssc_step_size;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE1, data);
+ data = (pout->ssc_step_size >> 8);
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE2, data);
+
+ data = (pin->ssc_center & 0x01);
+ data <<= 1;
+ data |= 0x01; /* enable */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_EN_CENTER, data);
+
+ wmb(); /* make sure register committed */
+}
+
+static int pll_precal_commit_8996(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ void __iomem *pll_base = pll->pll_base;
+ struct dsi_pll_output *pout = &pdb->out;
+ char data;
+
+ /*
+ * if pre-calibrated values cannot be used, return
+ * error, so we use full sequence.
+ */
+ if (!pll_use_precal(pll)) {
+ pr_debug("cannot use precal sequence ndx:%d\n", pll->index);
+ return -EINVAL;
+ }
+
+ data = pout->cmn_ldo_cntrl;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_LDO_CNTRL, data);
+
+ /* stop pll */
+ data = 0;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, data);
+
+ data = 0x7f;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_0, data);
+
+ data = 0x20;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, data);
+
+ data = 0x38;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL, data);
+
+ data = BIT(7);
+ data |= pll->cache_pll_trim_codes[1]; /* vco tune */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_VCO_TUNE, data);
+
+ data = BIT(5);
+ data |= pll->cache_pll_trim_codes[0]; /* kvco code */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_CODE, data);
+
+ data = 0xff; /* data, clk, pll normal operation */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_0, data);
+
+ data = 0x0;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, data);
+ wmb(); /* make sure register committed */
+
+ return 0;
+}
+
+static void pll_db_commit_common(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ void __iomem *pll_base = pll->pll_base;
+ struct dsi_pll_input *pin = &pdb->in;
+ struct dsi_pll_output *pout = &pdb->out;
+ char data;
+
+ /* confgiure the non frequency dependent pll registers */
+ data = 0;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SYSCLK_EN_RESET, data);
+
+ /* DSIPHY_PLL_CLKBUFLR_EN updated at dsi phy */
+
+ data = pout->pll_txclk_en;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_TXCLK_EN, data);
+
+ data = pout->pll_resetsm_cntrl;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL, data);
+ data = pout->pll_resetsm_cntrl2;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL2, data);
+ data = pout->pll_resetsm_cntrl5;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL5, data);
+
+ data = pout->pll_vco_div_ref;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF1, data);
+ data = (pout->pll_vco_div_ref >> 8);
+ data &= 0x03;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF2, data);
+
+ data = pout->pll_kvco_div_ref;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF1, data);
+ data = (pout->pll_kvco_div_ref >> 8);
+ data &= 0x03;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF2, data);
+
+ data = pout->pll_misc1;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_MISC1, data);
+
+ data = pin->pll_ie_trim;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IE_TRIM, data);
+
+ data = pin->pll_ip_trim;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IP_TRIM, data);
+
+ data = ((pin->pll_cpmset_cur << 3) | pin->pll_cpcset_cur);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CP_SET_CUR, data);
+
+ data = ((pin->pll_icpcset_p << 3) | pin->pll_icpcset_m);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPCSET, data);
+
+ data = ((pin->pll_icpmset_p << 3) | pin->pll_icpcset_m);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPMSET, data);
+
+ data = ((pin->pll_icpmset << 3) | pin->pll_icpcset);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICP_SET, data);
+
+ data = ((pdb->in.pll_lpf_cap2 << 4) | pdb->in.pll_lpf_cap1);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF1, data);
+
+ data = pin->pll_iptat_trim;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IPTAT_TRIM, data);
+
+ data = (pdb->in.pll_c3ctrl | (pdb->in.pll_r3ctrl << 4));
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_CRCTRL, data);
+}
+
+static void pll_db_commit_8996(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ void __iomem *pll_base = pll->pll_base;
+ struct dsi_pll_input *pin = &pdb->in;
+ struct dsi_pll_output *pout = &pdb->out;
+ char data;
+
+ data = pout->cmn_ldo_cntrl;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_LDO_CNTRL, data);
+
+ pll_db_commit_common(pll, pdb);
+
+ /* de assert pll start and apply pll sw reset */
+ /* stop pll */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
+
+ /* pll sw reset */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0x20);
+ wmb(); /* make sure register committed */
+ udelay(10);
+
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0);
+ wmb(); /* make sure register committed */
+
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_VCO_TUNE, 0);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_CODE, 0);
+ wmb(); /* make sure register committed */
+
+ data = pdb->in.dsiclk_sel; /* set dsiclk_sel = 1 */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG1, data);
+
+ data = 0xff; /* data, clk, pll normal operation */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_0, data);
+
+ /* configure the frequency dependent pll registers */
+ data = pout->dec_start;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DEC_START, data);
+
+ data = pout->div_frac_start;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START1, data);
+ data = (pout->div_frac_start >> 8);
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START2, data);
+ data = (pout->div_frac_start >> 16);
+ data &= 0x0f;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START3, data);
+
+ data = pout->plllock_cmp;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP1, data);
+ data = (pout->plllock_cmp >> 8);
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP2, data);
+ data = (pout->plllock_cmp >> 16);
+ data &= 0x03;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP3, data);
+
+ data = ((pin->plllock_cnt << 1) | (pin->plllock_rng << 3));
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP_EN, data);
+
+ data = pout->pll_vco_count;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT1, data);
+ data = (pout->pll_vco_count >> 8);
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT2, data);
+
+ data = pout->pll_kvco_count;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT1, data);
+ data = (pout->pll_kvco_count >> 8);
+ data &= 0x03;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT2, data);
+
+ data = (((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF2_POSTDIV, data);
+
+ data = pout->pll_kvco_code;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_CODE, data);
+ pr_debug("kvco_code:0x%x\n", data);
+
+ data = (pout->pll_n1div | (pout->pll_n2div << 4));
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG0, data);
+
+ if (pll->ssc_en)
+ pll_db_commit_ssc(pll, pdb);
+
+ pr_debug("pll:%d\n", pll->index);
+ wmb(); /* make sure register committed */
+}
+
+/*
+ * pll_source_finding:
+ * Both GLBL_TEST_CTRL and CLKBUFLR_EN are configured
+ * at mdss_dsi_8996_phy_config()
+ */
+static int pll_source_finding(struct mdss_pll_resources *pll)
+{
+ u32 clk_buf_en;
+ u32 glbl_test_ctrl;
+
+ glbl_test_ctrl = MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_CMN_GLBL_TEST_CTRL);
+ clk_buf_en = MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_CLKBUFLR_EN);
+
+ glbl_test_ctrl &= BIT(2);
+ glbl_test_ctrl >>= 2;
+
+ pr_debug("%s: pll=%d clk_buf_en=%x glbl_test_ctrl=%x\n",
+ __func__, pll->index, clk_buf_en, glbl_test_ctrl);
+
+ clk_buf_en &= (PLL_OUTPUT_RIGHT | PLL_OUTPUT_LEFT);
+
+ if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
+ (clk_buf_en == PLL_OUTPUT_BOTH))
+ return PLL_MASTER;
+
+ if ((glbl_test_ctrl == PLL_SOURCE_FROM_RIGHT) &&
+ (clk_buf_en == PLL_OUTPUT_NONE))
+ return PLL_SLAVE;
+
+ if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
+ (clk_buf_en == PLL_OUTPUT_RIGHT))
+ return PLL_STANDALONE;
+
+ pr_debug("%s: Error pll setup, clk_buf_en=%x glbl_test_ctrl=%x\n",
+ __func__, clk_buf_en, glbl_test_ctrl);
+
+ return PLL_UNKNOWN;
+}
+
+static void pll_source_setup(struct mdss_pll_resources *pll)
+{
+ int status;
+ struct dsi_pll_db *pdb = (struct dsi_pll_db *)pll->priv;
+ struct mdss_pll_resources *other;
+
+ if (pdb->source_setup_done)
+ return;
+
+ pdb->source_setup_done++;
+
+ status = pll_source_finding(pll);
+
+ if (status == PLL_STANDALONE || status == PLL_UNKNOWN)
+ return;
+
+ other = pdb->next->pll;
+ if (!other)
+ return;
+
+ pr_debug("%s: status=%d pll=%d other=%d\n", __func__,
+ status, pll->index, other->index);
+
+ if (status == PLL_MASTER)
+ pll->slave = other;
+ else
+ other->slave = pll;
+}
+
+int pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
+{
+ int rc;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+ struct mdss_pll_resources *slave;
+ struct dsi_pll_db *pdb;
+
+ pdb = (struct dsi_pll_db *)pll->priv;
+ if (!pdb) {
+ pr_err("No prov found\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+ return rc;
+ }
+
+ pll_source_setup(pll);
+
+ pr_debug("%s: ndx=%d base=%pk rate=%lu slave=%pk\n", __func__,
+ pll->index, pll->pll_base, rate, pll->slave);
+
+ pll->vco_current_rate = rate;
+ pll->vco_ref_clk_rate = vco->ref_clk_rate;
+
+ mdss_dsi_pll_8996_input_init(pll, pdb);
+ /*
+ * tx_band = pll_postdiv
+ * 0: divided by 1 <== for now
+ * 1: divided by 2
+ * 2: divided by 4
+ * 3: divided by 8
+ */
+ pdb->out.pll_postdiv = DSI_PLL_DEFAULT_POSTDIV;
+
+ pll_8996_dec_frac_calc(pll, pdb);
+
+ if (pll->ssc_en)
+ pll_8996_ssc_calc(pll, pdb);
+
+ pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
+ pll->vco_ref_clk_rate);
+
+ /* precal sequence, only for the master */
+ if (pll_precal_commit_8996(pll, pdb)) {
+ pr_debug("retry full sequence\n");
+ slave = pll->slave;
+
+ /* commit slave if split display is enabled */
+ if (slave)
+ pll_db_commit_8996(slave, pdb);
+
+ /* commit master itself */
+ pll_db_commit_8996(pll, pdb);
+ }
+
+ mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+static void shadow_pll_dynamic_refresh_8996(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb, int *pll_trim_codes)
+{
+ struct dsi_pll_output *pout = &pdb->out;
+
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+ DSIPHY_CMN_CTRL_0, DSIPHY_PLL_SYSCLK_EN_RESET,
+ 0xFF, 0x0);
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+ DSIPHY_PLL_DEC_START, DSIPHY_PLL_DIV_FRAC_START1,
+ pout->dec_start, (pout->div_frac_start & 0x0FF));
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+ DSIPHY_PLL_DIV_FRAC_START2, DSIPHY_PLL_DIV_FRAC_START3,
+ ((pout->div_frac_start >> 8) & 0x0FF),
+ ((pout->div_frac_start >> 16) & 0x0F));
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+ DSIPHY_PLL_PLLLOCK_CMP1, DSIPHY_PLL_PLLLOCK_CMP2,
+ (pout->plllock_cmp & 0x0FF),
+ ((pout->plllock_cmp >> 8) & 0x0FF));
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+ DSIPHY_PLL_PLLLOCK_CMP3, DSIPHY_PLL_PLL_VCO_TUNE,
+ ((pout->plllock_cmp >> 16) & 0x03),
+ (pll_trim_codes[1] | BIT(7))); /* VCO tune*/
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+ DSIPHY_PLL_KVCO_CODE, DSIPHY_PLL_RESETSM_CNTRL,
+ (pll_trim_codes[0] | BIT(5)), 0x38);
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+ DSIPHY_PLL_PLL_LPF2_POSTDIV, DSIPHY_CMN_PLL_CNTRL,
+ (((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1), 0x01);
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+ DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+ 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+ DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+ 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+ DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+ 0x01, 0x01);
+ MDSS_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, 0x0000001E);
+ MDSS_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0x001FFE00);
+
+ pr_debug("core_kvco_code=0x%x core_vco_tune=0x%x\n",
+ pll_trim_codes[0], pll_trim_codes[1]);
+
+ /*
+ * Ensure all the dynamic refresh registers are written before
+ * dynamic refresh to change the fps is triggered
+ */
+ wmb();
+}
+
+int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
+{
+ int rc;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+ struct dsi_pll_db *pdb;
+ int pll_trim_codes[2] = {0, 0};
+
+ if (!pll) {
+ pr_err("PLL data not found\n");
+ return -EINVAL;
+ }
+
+ pdb = pll->priv;
+ if (!pdb) {
+ pr_err("No priv data found\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+ return rc;
+ }
+
+ pr_debug("%s: ndx=%d base=%pk rate=%lu\n", __func__,
+ pll->index, pll->pll_base, rate);
+
+ pll->vco_current_rate = rate;
+ pll->vco_ref_clk_rate = vco->ref_clk_rate;
+
+ mdss_dsi_pll_8996_input_init(pll, pdb);
+
+ pll_8996_dec_frac_calc(pll, pdb);
+
+ pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
+ pll->vco_ref_clk_rate);
+
+ shadow_pll_dynamic_refresh_8996(pll, pdb, pll_trim_codes);
+
+ rc = mdss_pll_resource_enable(pll, false);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+ return rc;
+ }
+
+ return rc;
+}
+
+static unsigned long pll_vco_get_rate_8996(struct clk *c)
+{
+ u64 vco_rate, multiplier = BIT(20);
+ s32 div_frac_start;
+ u32 dec_start;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ u64 ref_clk = vco->ref_clk_rate;
+ int rc;
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (is_gdsc_disabled(pll))
+ return 0;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+ return rc;
+ }
+
+ dec_start = MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DEC_START);
+ dec_start &= 0x0ff;
+ pr_debug("dec_start = 0x%x\n", dec_start);
+
+ div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
+ div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
+ div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
+ pr_debug("div_frac_start = 0x%x\n", div_frac_start);
+
+ vco_rate = ref_clk * dec_start;
+ vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+ pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+ mdss_pll_resource_enable(pll, false);
+
+ return (unsigned long)vco_rate;
+}
+
+long pll_vco_round_rate_8996(struct clk *c, unsigned long rate)
+{
+ unsigned long rrate = rate;
+ u32 div;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+
+ div = vco->min_rate / rate;
+ if (div > 15) {
+ /* rate < 86.67 Mhz */
+ pr_err("rate=%lu NOT supportted\n", rate);
+ return -EINVAL;
+ }
+
+ if (rate < vco->min_rate)
+ rrate = vco->min_rate;
+ if (rate > vco->max_rate)
+ rrate = vco->max_rate;
+
+ return rrate;
+}
+
+enum handoff pll_vco_handoff_8996(struct clk *c)
+{
+ int rc;
+ enum handoff ret = HANDOFF_DISABLED_CLK;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (is_gdsc_disabled(pll))
+ return HANDOFF_DISABLED_CLK;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+ return ret;
+ }
+
+ if (pll_is_pll_locked_8996(pll)) {
+ pll->handoff_resources = true;
+ pll->pll_on = true;
+ c->rate = pll_vco_get_rate_8996(c);
+ ret = HANDOFF_ENABLED_CLK;
+ } else {
+ mdss_pll_resource_enable(pll, false);
+ }
+
+ return ret;
+}
+
+enum handoff shadow_pll_vco_handoff_8996(struct clk *c)
+{
+ return HANDOFF_DISABLED_CLK;
+}
+
+int pll_vco_prepare_8996(struct clk *c)
+{
+ int rc = 0;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (!pll) {
+ pr_err("Dsi pll resources are not available\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("ndx=%d Failed to enable mdss dsi pll resources\n",
+ pll->index);
+ return rc;
+ }
+
+ if ((pll->vco_cached_rate != 0)
+ && (pll->vco_cached_rate == c->rate)) {
+ rc = c->ops->set_rate(c, pll->vco_cached_rate);
+ if (rc) {
+ pr_err("index=%d vco_set_rate failed. rc=%d\n",
+ rc, pll->index);
+ mdss_pll_resource_enable(pll, false);
+ goto error;
+ }
+ }
+
+ rc = dsi_pll_enable(c);
+
+ if (rc) {
+ mdss_pll_resource_enable(pll, false);
+ pr_err("ndx=%d failed to enable dsi pll\n", pll->index);
+ }
+
+error:
+ return rc;
+}
+
+void pll_vco_unprepare_8996(struct clk *c)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (!pll) {
+ pr_err("Dsi pll resources are not available\n");
+ return;
+ }
+
+ pll->vco_cached_rate = c->rate;
+ dsi_pll_disable(c);
+}
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c
new file mode 100644
index 0000000..6423342
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c
@@ -0,0 +1,572 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/workqueue.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8996.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-8996.h"
+
+#define VCO_DELAY_USEC 1
+
+static struct dsi_pll_db pll_db[DSI_PLL_NUM];
+
+static struct clk_ops n2_clk_src_ops;
+static struct clk_ops shadow_n2_clk_src_ops;
+static struct clk_ops byte_clk_src_ops;
+static struct clk_ops post_n1_div_clk_src_ops;
+static struct clk_ops shadow_post_n1_div_clk_src_ops;
+
+static struct clk_ops clk_ops_gen_mux_dsi;
+
+/* Op structures */
+static const struct clk_ops clk_ops_dsi_vco = {
+ .set_rate = pll_vco_set_rate_8996,
+ .round_rate = pll_vco_round_rate_8996,
+ .handoff = pll_vco_handoff_8996,
+ .prepare = pll_vco_prepare_8996,
+ .unprepare = pll_vco_unprepare_8996,
+};
+
+static struct clk_div_ops post_n1_div_ops = {
+ .set_div = post_n1_div_set_div,
+ .get_div = post_n1_div_get_div,
+};
+
+static struct clk_div_ops n2_div_ops = { /* hr_oclk3 */
+ .set_div = n2_div_set_div,
+ .get_div = n2_div_get_div,
+};
+
+static struct clk_mux_ops mdss_byte_mux_ops = {
+ .set_mux_sel = set_mdss_byte_mux_sel_8996,
+ .get_mux_sel = get_mdss_byte_mux_sel_8996,
+};
+
+static struct clk_mux_ops mdss_pixel_mux_ops = {
+ .set_mux_sel = set_mdss_pixel_mux_sel_8996,
+ .get_mux_sel = get_mdss_pixel_mux_sel_8996,
+};
+
+/* Shadow ops for dynamic refresh */
+static const struct clk_ops clk_ops_shadow_dsi_vco = {
+ .set_rate = shadow_pll_vco_set_rate_8996,
+ .round_rate = pll_vco_round_rate_8996,
+ .handoff = shadow_pll_vco_handoff_8996,
+};
+
+static struct clk_div_ops shadow_post_n1_div_ops = {
+ .set_div = post_n1_div_set_div,
+};
+
+static struct clk_div_ops shadow_n2_div_ops = {
+ .set_div = shadow_n2_div_set_div,
+};
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1300000000UL,
+ .max_rate = 2600000000UL,
+ .pll_en_seq_cnt = 1,
+ .pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+ .c = {
+ .dbg_name = "dsi0pll_vco_clk_8996",
+ .ops = &clk_ops_dsi_vco,
+ CLK_INIT(dsi0pll_vco_clk.c),
+ },
+};
+
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+ .ref_clk_rate = 19200000u,
+ .min_rate = 1300000000u,
+ .max_rate = 2600000000u,
+ .c = {
+ .dbg_name = "dsi0pll_shadow_vco_clk",
+ .ops = &clk_ops_shadow_dsi_vco,
+ CLK_INIT(dsi0pll_shadow_vco_clk.c),
+ },
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1300000000UL,
+ .max_rate = 2600000000UL,
+ .pll_en_seq_cnt = 1,
+ .pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+ .c = {
+ .dbg_name = "dsi1pll_vco_clk_8996",
+ .ops = &clk_ops_dsi_vco,
+ CLK_INIT(dsi1pll_vco_clk.c),
+ },
+};
+
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+ .ref_clk_rate = 19200000u,
+ .min_rate = 1300000000u,
+ .max_rate = 2600000000u,
+ .pll_en_seq_cnt = 1,
+ .pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+ .c = {
+ .dbg_name = "dsi1pll_shadow_vco_clk",
+ .ops = &clk_ops_shadow_dsi_vco,
+ CLK_INIT(dsi1pll_shadow_vco_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_post_n1_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &post_n1_div_ops,
+ .c = {
+ .parent = &dsi0pll_vco_clk.c,
+ .dbg_name = "dsi0pll_post_n1_div_clk",
+ .ops = &post_n1_div_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_post_n1_div_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_shadow_post_n1_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &shadow_post_n1_div_ops,
+ .c = {
+ .parent = &dsi0pll_shadow_vco_clk.c,
+ .dbg_name = "dsi0pll_shadow_post_n1_div_clk",
+ .ops = &shadow_post_n1_div_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_shadow_post_n1_div_clk.c),
+ },
+};
+
+static struct div_clk dsi1pll_post_n1_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &post_n1_div_ops,
+ .c = {
+ .parent = &dsi1pll_vco_clk.c,
+ .dbg_name = "dsi1pll_post_n1_div_clk",
+ .ops = &post_n1_div_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_post_n1_div_clk.c),
+ },
+};
+
+static struct div_clk dsi1pll_shadow_post_n1_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &shadow_post_n1_div_ops,
+ .c = {
+ .parent = &dsi1pll_shadow_vco_clk.c,
+ .dbg_name = "dsi1pll_shadow_post_n1_div_clk",
+ .ops = &shadow_post_n1_div_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_shadow_post_n1_div_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_n2_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &n2_div_ops,
+ .c = {
+ .parent = &dsi0pll_post_n1_div_clk.c,
+ .dbg_name = "dsi0pll_n2_div_clk",
+ .ops = &n2_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_n2_div_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_shadow_n2_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &shadow_n2_div_ops,
+ .c = {
+ .parent = &dsi0pll_shadow_post_n1_div_clk.c,
+ .dbg_name = "dsi0pll_shadow_n2_div_clk",
+ .ops = &shadow_n2_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_shadow_n2_div_clk.c),
+ },
+};
+
+static struct div_clk dsi1pll_n2_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &n2_div_ops,
+ .c = {
+ .parent = &dsi1pll_post_n1_div_clk.c,
+ .dbg_name = "dsi1pll_n2_div_clk",
+ .ops = &n2_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_n2_div_clk.c),
+ },
+};
+
+static struct div_clk dsi1pll_shadow_n2_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &shadow_n2_div_ops,
+ .c = {
+ .parent = &dsi1pll_shadow_post_n1_div_clk.c,
+ .dbg_name = "dsi1pll_shadow_n2_div_clk",
+ .ops = &shadow_n2_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_shadow_n2_div_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_pixel_clk_src = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi0pll_n2_div_clk.c,
+ .dbg_name = "dsi0pll_pixel_clk_src",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pixel_clk_src.c),
+ },
+};
+
+static struct div_clk dsi0pll_shadow_pixel_clk_src = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi0pll_shadow_n2_div_clk.c,
+ .dbg_name = "dsi0pll_shadow_pixel_clk_src",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_shadow_pixel_clk_src.c),
+ },
+};
+
+static struct div_clk dsi1pll_pixel_clk_src = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi1pll_n2_div_clk.c,
+ .dbg_name = "dsi1pll_pixel_clk_src",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pixel_clk_src.c),
+ },
+};
+
+static struct div_clk dsi1pll_shadow_pixel_clk_src = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi1pll_shadow_n2_div_clk.c,
+ .dbg_name = "dsi1pll_shadow_pixel_clk_src",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_shadow_pixel_clk_src.c),
+ },
+};
+
+static struct mux_clk dsi0pll_pixel_clk_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi0pll_pixel_clk_src.c, 0},
+ {&dsi0pll_shadow_pixel_clk_src.c, 1},
+ },
+ .ops = &mdss_pixel_mux_ops,
+ .c = {
+ .parent = &dsi0pll_pixel_clk_src.c,
+ .dbg_name = "dsi0pll_pixel_clk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pixel_clk_mux.c),
+ }
+};
+
+static struct mux_clk dsi1pll_pixel_clk_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi1pll_pixel_clk_src.c, 0},
+ {&dsi1pll_shadow_pixel_clk_src.c, 1},
+ },
+ .ops = &mdss_pixel_mux_ops,
+ .c = {
+ .parent = &dsi1pll_pixel_clk_src.c,
+ .dbg_name = "dsi1pll_pixel_clk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pixel_clk_mux.c),
+ }
+};
+
+static struct div_clk dsi0pll_byte_clk_src = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi0pll_post_n1_div_clk.c,
+ .dbg_name = "dsi0pll_byte_clk_src",
+ .ops = &clk_ops_div,
+ CLK_INIT(dsi0pll_byte_clk_src.c),
+ },
+};
+
+static struct div_clk dsi0pll_shadow_byte_clk_src = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi0pll_shadow_post_n1_div_clk.c,
+ .dbg_name = "dsi0pll_shadow_byte_clk_src",
+ .ops = &clk_ops_div,
+ CLK_INIT(dsi0pll_shadow_byte_clk_src.c),
+ },
+};
+
+static struct div_clk dsi1pll_byte_clk_src = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi1pll_post_n1_div_clk.c,
+ .dbg_name = "dsi1pll_byte_clk_src",
+ .ops = &clk_ops_div,
+ CLK_INIT(dsi1pll_byte_clk_src.c),
+ },
+};
+
+static struct div_clk dsi1pll_shadow_byte_clk_src = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi1pll_shadow_post_n1_div_clk.c,
+ .dbg_name = "dsi1pll_shadow_byte_clk_src",
+ .ops = &clk_ops_div,
+ CLK_INIT(dsi1pll_shadow_byte_clk_src.c),
+ },
+};
+
+static struct mux_clk dsi0pll_byte_clk_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi0pll_byte_clk_src.c, 0},
+ {&dsi0pll_shadow_byte_clk_src.c, 1},
+ },
+ .ops = &mdss_byte_mux_ops,
+ .c = {
+ .parent = &dsi0pll_byte_clk_src.c,
+ .dbg_name = "dsi0pll_byte_clk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_byte_clk_mux.c),
+ }
+};
+static struct mux_clk dsi1pll_byte_clk_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi1pll_byte_clk_src.c, 0},
+ {&dsi1pll_shadow_byte_clk_src.c, 1},
+ },
+ .ops = &mdss_byte_mux_ops,
+ .c = {
+ .parent = &dsi1pll_byte_clk_src.c,
+ .dbg_name = "dsi1pll_byte_clk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_byte_clk_mux.c),
+ }
+};
+
+static struct clk_lookup mdss_dsi_pllcc_8996[] = {
+ CLK_LIST(dsi0pll_byte_clk_mux),
+ CLK_LIST(dsi0pll_byte_clk_src),
+ CLK_LIST(dsi0pll_pixel_clk_mux),
+ CLK_LIST(dsi0pll_pixel_clk_src),
+ CLK_LIST(dsi0pll_n2_div_clk),
+ CLK_LIST(dsi0pll_post_n1_div_clk),
+ CLK_LIST(dsi0pll_vco_clk),
+ CLK_LIST(dsi0pll_shadow_byte_clk_src),
+ CLK_LIST(dsi0pll_shadow_pixel_clk_src),
+ CLK_LIST(dsi0pll_shadow_n2_div_clk),
+ CLK_LIST(dsi0pll_shadow_post_n1_div_clk),
+ CLK_LIST(dsi0pll_shadow_vco_clk),
+};
+
+static struct clk_lookup mdss_dsi_pllcc_8996_1[] = {
+ CLK_LIST(dsi1pll_byte_clk_mux),
+ CLK_LIST(dsi1pll_byte_clk_src),
+ CLK_LIST(dsi1pll_pixel_clk_mux),
+ CLK_LIST(dsi1pll_pixel_clk_src),
+ CLK_LIST(dsi1pll_n2_div_clk),
+ CLK_LIST(dsi1pll_post_n1_div_clk),
+ CLK_LIST(dsi1pll_vco_clk),
+ CLK_LIST(dsi1pll_shadow_byte_clk_src),
+ CLK_LIST(dsi1pll_shadow_pixel_clk_src),
+ CLK_LIST(dsi1pll_shadow_n2_div_clk),
+ CLK_LIST(dsi1pll_shadow_post_n1_div_clk),
+ CLK_LIST(dsi1pll_shadow_vco_clk),
+};
+
+int dsi_pll_clock_register_8996(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0, ndx;
+ int const ssc_freq_default = 31500; /* default h/w recommended value */
+ int const ssc_ppm_default = 5000; /* default h/w recommended value */
+ struct dsi_pll_db *pdb;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ if (!pll_res || !pll_res->pll_base) {
+ pr_err("Invalid PLL resources\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (pll_res->index >= DSI_PLL_NUM) {
+ pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
+ return -EINVAL;
+ }
+
+ ndx = pll_res->index;
+ pdb = &pll_db[ndx];
+ pll_res->priv = pdb;
+ pdb->pll = pll_res;
+ ndx++;
+ ndx %= DSI_PLL_NUM;
+ pdb->next = &pll_db[ndx];
+
+ /* Set clock source operations */
+
+ /* hr_oclk3, pixel_clock */
+ n2_clk_src_ops = clk_ops_slave_div;
+ n2_clk_src_ops.prepare = dsi_pll_div_prepare;
+
+ shadow_n2_clk_src_ops = clk_ops_slave_div;
+
+ /* hr_ockl2, byte, vco pll */
+ post_n1_div_clk_src_ops = clk_ops_div;
+ post_n1_div_clk_src_ops.prepare = dsi_pll_div_prepare;
+
+ shadow_post_n1_div_clk_src_ops = clk_ops_div;
+
+ byte_clk_src_ops = clk_ops_div;
+ byte_clk_src_ops.prepare = dsi_pll_div_prepare;
+
+ clk_ops_gen_mux_dsi = clk_ops_gen_mux;
+ clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
+ clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
+
+ if (pll_res->ssc_en) {
+ if (!pll_res->ssc_freq)
+ pll_res->ssc_freq = ssc_freq_default;
+ if (!pll_res->ssc_ppm)
+ pll_res->ssc_ppm = ssc_ppm_default;
+ }
+
+ /* Set client data to mux, div and vco clocks. */
+ if (pll_res->index == DSI_PLL_1) {
+ dsi1pll_byte_clk_src.priv = pll_res;
+ dsi1pll_pixel_clk_src.priv = pll_res;
+ dsi1pll_post_n1_div_clk.priv = pll_res;
+ dsi1pll_n2_div_clk.priv = pll_res;
+ dsi1pll_vco_clk.priv = pll_res;
+
+ dsi1pll_shadow_byte_clk_src.priv = pll_res;
+ dsi1pll_shadow_pixel_clk_src.priv = pll_res;
+ dsi1pll_shadow_post_n1_div_clk.priv = pll_res;
+ dsi1pll_shadow_n2_div_clk.priv = pll_res;
+ dsi1pll_shadow_vco_clk.priv = pll_res;
+
+ pll_res->vco_delay = VCO_DELAY_USEC;
+ if ((pll_res->target_id == MDSS_PLL_TARGET_8996) ||
+ (pll_res->target_id == MDSS_PLL_TARGET_8953)) {
+ rc = of_msm_clock_register(pdev->dev.of_node,
+ mdss_dsi_pllcc_8996_1,
+ ARRAY_SIZE(mdss_dsi_pllcc_8996_1));
+ }
+ } else {
+ dsi0pll_byte_clk_src.priv = pll_res;
+ dsi0pll_pixel_clk_src.priv = pll_res;
+ dsi0pll_post_n1_div_clk.priv = pll_res;
+ dsi0pll_n2_div_clk.priv = pll_res;
+ dsi0pll_vco_clk.priv = pll_res;
+
+ dsi0pll_shadow_byte_clk_src.priv = pll_res;
+ dsi0pll_shadow_pixel_clk_src.priv = pll_res;
+ dsi0pll_shadow_post_n1_div_clk.priv = pll_res;
+ dsi0pll_shadow_n2_div_clk.priv = pll_res;
+ dsi0pll_shadow_vco_clk.priv = pll_res;
+
+ pll_res->vco_delay = VCO_DELAY_USEC;
+ if ((pll_res->target_id == MDSS_PLL_TARGET_8996) ||
+ (pll_res->target_id == MDSS_PLL_TARGET_8953)) {
+ rc = of_msm_clock_register(pdev->dev.of_node,
+ mdss_dsi_pllcc_8996,
+ ARRAY_SIZE(mdss_dsi_pllcc_8996));
+ }
+ }
+
+ if (!rc) {
+ pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
+ pll_res->index);
+ }
+
+ return rc;
+}
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996.h b/drivers/clk/msm/mdss/mdss-dsi-pll-8996.h
new file mode 100644
index 0000000..57700e8
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996.h
@@ -0,0 +1,224 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MDSS_DSI_PLL_8996_H
+#define MDSS_DSI_PLL_8996_H
+
+#define DSIPHY_CMN_CLK_CFG0 0x0010
+#define DSIPHY_CMN_CLK_CFG1 0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL 0x0018
+
+#define DSIPHY_CMN_PLL_CNTRL 0x0048
+#define DSIPHY_CMN_CTRL_0 0x001c
+#define DSIPHY_CMN_CTRL_1 0x0020
+
+#define DSIPHY_CMN_LDO_CNTRL 0x004c
+#define DSIPHY_CMN_GLBL_DIGTOP_SPARE2 0x005c
+
+#define DSIPHY_PLL_IE_TRIM 0x0400
+#define DSIPHY_PLL_IP_TRIM 0x0404
+
+#define DSIPHY_PLL_IPTAT_TRIM 0x0410
+
+#define DSIPHY_PLL_CLKBUFLR_EN 0x041c
+
+#define DSIPHY_PLL_SYSCLK_EN_RESET 0x0428
+#define DSIPHY_PLL_RESETSM_CNTRL 0x042c
+#define DSIPHY_PLL_RESETSM_CNTRL2 0x0430
+#define DSIPHY_PLL_RESETSM_CNTRL3 0x0434
+#define DSIPHY_PLL_RESETSM_CNTRL4 0x0438
+#define DSIPHY_PLL_RESETSM_CNTRL5 0x043c
+#define DSIPHY_PLL_KVCO_DIV_REF1 0x0440
+#define DSIPHY_PLL_KVCO_DIV_REF2 0x0444
+#define DSIPHY_PLL_KVCO_COUNT1 0x0448
+#define DSIPHY_PLL_KVCO_COUNT2 0x044c
+#define DSIPHY_PLL_VREF_CFG1 0x045c
+
+#define DSIPHY_PLL_KVCO_CODE 0x0458
+#define DSIPHY_PLL_CORE_VCO_TUNE_STATUS 0x4D0
+#define DSIPHY_PLL_CORE_KVCO_CODE_STATUS 0x4D4
+
+#define DSIPHY_PLL_VCO_DIV_REF1 0x046c
+#define DSIPHY_PLL_VCO_DIV_REF2 0x0470
+#define DSIPHY_PLL_VCO_COUNT1 0x0474
+#define DSIPHY_PLL_VCO_COUNT2 0x0478
+#define DSIPHY_PLL_PLLLOCK_CMP1 0x047c
+#define DSIPHY_PLL_PLLLOCK_CMP2 0x0480
+#define DSIPHY_PLL_PLLLOCK_CMP3 0x0484
+#define DSIPHY_PLL_PLLLOCK_CMP_EN 0x0488
+#define DSIPHY_PLL_PLL_VCO_TUNE 0x048C
+#define DSIPHY_PLL_DEC_START 0x0490
+#define DSIPHY_PLL_SSC_EN_CENTER 0x0494
+#define DSIPHY_PLL_SSC_ADJ_PER1 0x0498
+#define DSIPHY_PLL_SSC_ADJ_PER2 0x049c
+#define DSIPHY_PLL_SSC_PER1 0x04a0
+#define DSIPHY_PLL_SSC_PER2 0x04a4
+#define DSIPHY_PLL_SSC_STEP_SIZE1 0x04a8
+#define DSIPHY_PLL_SSC_STEP_SIZE2 0x04ac
+#define DSIPHY_PLL_DIV_FRAC_START1 0x04b4
+#define DSIPHY_PLL_DIV_FRAC_START2 0x04b8
+#define DSIPHY_PLL_DIV_FRAC_START3 0x04bc
+#define DSIPHY_PLL_TXCLK_EN 0x04c0
+#define DSIPHY_PLL_PLL_CRCTRL 0x04c4
+
+#define DSIPHY_PLL_RESET_SM_READY_STATUS 0x04cc
+
+#define DSIPHY_PLL_PLL_MISC1 0x04e8
+
+#define DSIPHY_PLL_CP_SET_CUR 0x04f0
+#define DSIPHY_PLL_PLL_ICPMSET 0x04f4
+#define DSIPHY_PLL_PLL_ICPCSET 0x04f8
+#define DSIPHY_PLL_PLL_ICP_SET 0x04fc
+#define DSIPHY_PLL_PLL_LPF1 0x0500
+#define DSIPHY_PLL_PLL_LPF2_POSTDIV 0x0504
+#define DSIPHY_PLL_PLL_BANDGAP 0x0508
+
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15 0x050
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 0x060
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20 0x064
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21 0x068
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22 0x06C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23 0x070
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24 0x074
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25 0x078
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26 0x07C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27 0x080
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28 0x084
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29 0x088
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR 0x094
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2 0x098
+
+struct dsi_pll_input {
+ u32 fref; /* 19.2 Mhz, reference clk */
+ u32 fdata; /* bit clock rate */
+ u32 dsiclk_sel; /* 1, reg: 0x0014 */
+ u32 n2div; /* 1, reg: 0x0010, bit 4-7 */
+ u32 ssc_en; /* 1, reg: 0x0494, bit 0 */
+ u32 ldo_en; /* 0, reg: 0x004c, bit 0 */
+
+ /* fixed */
+ u32 refclk_dbler_en; /* 0, reg: 0x04c0, bit 1 */
+ u32 vco_measure_time; /* 5, unknown */
+ u32 kvco_measure_time; /* 5, unknown */
+ u32 bandgap_timer; /* 4, reg: 0x0430, bit 3 - 5 */
+ u32 pll_wakeup_timer; /* 5, reg: 0x043c, bit 0 - 2 */
+ u32 plllock_cnt; /* 1, reg: 0x0488, bit 1 - 2 */
+ u32 plllock_rng; /* 1, reg: 0x0488, bit 3 - 4 */
+ u32 ssc_center; /* 0, reg: 0x0494, bit 1 */
+ u32 ssc_adj_period; /* 37, reg: 0x498, bit 0 - 9 */
+ u32 ssc_spread; /* 0.005 */
+ u32 ssc_freq; /* unknown */
+ u32 pll_ie_trim; /* 4, reg: 0x0400 */
+ u32 pll_ip_trim; /* 4, reg: 0x0404 */
+ u32 pll_iptat_trim; /* reg: 0x0410 */
+ u32 pll_cpcset_cur; /* 1, reg: 0x04f0, bit 0 - 2 */
+ u32 pll_cpmset_cur; /* 1, reg: 0x04f0, bit 3 - 5 */
+
+ u32 pll_icpmset; /* 4, reg: 0x04fc, bit 3 - 5 */
+ u32 pll_icpcset; /* 4, reg: 0x04fc, bit 0 - 2 */
+
+ u32 pll_icpmset_p; /* 0, reg: 0x04f4, bit 0 - 2 */
+ u32 pll_icpmset_m; /* 0, reg: 0x04f4, bit 3 - 5 */
+
+ u32 pll_icpcset_p; /* 0, reg: 0x04f8, bit 0 - 2 */
+ u32 pll_icpcset_m; /* 0, reg: 0x04f8, bit 3 - 5 */
+
+ u32 pll_lpf_res1; /* 3, reg: 0x0504, bit 0 - 3 */
+ u32 pll_lpf_cap1; /* 11, reg: 0x0500, bit 0 - 3 */
+ u32 pll_lpf_cap2; /* 1, reg: 0x0500, bit 4 - 7 */
+ u32 pll_c3ctrl; /* 2, reg: 0x04c4 */
+ u32 pll_r3ctrl; /* 1, reg: 0x04c4 */
+};
+
+struct dsi_pll_output {
+ u32 pll_txclk_en; /* reg: 0x04c0 */
+ u32 dec_start; /* reg: 0x0490 */
+ u32 div_frac_start; /* reg: 0x04b4, 0x4b8, 0x04bc */
+ u32 ssc_period; /* reg: 0x04a0, 0x04a4 */
+ u32 ssc_step_size; /* reg: 0x04a8, 0x04ac */
+ u32 plllock_cmp; /* reg: 0x047c, 0x0480, 0x0484 */
+ u32 pll_vco_div_ref; /* reg: 0x046c, 0x0470 */
+ u32 pll_vco_count; /* reg: 0x0474, 0x0478 */
+ u32 pll_kvco_div_ref; /* reg: 0x0440, 0x0444 */
+ u32 pll_kvco_count; /* reg: 0x0448, 0x044c */
+ u32 pll_misc1; /* reg: 0x04e8 */
+ u32 pll_lpf2_postdiv; /* reg: 0x0504 */
+ u32 pll_resetsm_cntrl; /* reg: 0x042c */
+ u32 pll_resetsm_cntrl2; /* reg: 0x0430 */
+ u32 pll_resetsm_cntrl5; /* reg: 0x043c */
+ u32 pll_kvco_code; /* reg: 0x0458 */
+
+ u32 cmn_clk_cfg0; /* reg: 0x0010 */
+ u32 cmn_clk_cfg1; /* reg: 0x0014 */
+ u32 cmn_ldo_cntrl; /* reg: 0x004c */
+
+ u32 pll_postdiv; /* vco */
+ u32 pll_n1div; /* vco */
+ u32 pll_n2div; /* hr_oclk3, pixel_clock */
+ u32 fcvo;
+};
+
+enum {
+ DSI_PLL_0,
+ DSI_PLL_1,
+ DSI_PLL_NUM
+};
+
+struct dsi_pll_db {
+ struct dsi_pll_db *next;
+ struct mdss_pll_resources *pll;
+ struct dsi_pll_input in;
+ struct dsi_pll_output out;
+ int source_setup_done;
+};
+
+enum {
+ PLL_OUTPUT_NONE,
+ PLL_OUTPUT_RIGHT,
+ PLL_OUTPUT_LEFT,
+ PLL_OUTPUT_BOTH
+};
+
+enum {
+ PLL_SOURCE_FROM_LEFT,
+ PLL_SOURCE_FROM_RIGHT
+};
+
+enum {
+ PLL_UNKNOWN,
+ PLL_STANDALONE,
+ PLL_SLAVE,
+ PLL_MASTER
+};
+
+int pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
+long pll_vco_round_rate_8996(struct clk *c, unsigned long rate);
+enum handoff pll_vco_handoff_8996(struct clk *c);
+enum handoff shadow_pll_vco_handoff_8996(struct clk *c);
+int shadow_post_n1_div_set_div(struct div_clk *clk, int div);
+int shadow_post_n1_div_get_div(struct div_clk *clk);
+int shadow_n2_div_set_div(struct div_clk *clk, int div);
+int shadow_n2_div_get_div(struct div_clk *clk);
+int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
+int pll_vco_prepare_8996(struct clk *c);
+void pll_vco_unprepare_8996(struct clk *c);
+int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel);
+int get_mdss_byte_mux_sel_8996(struct mux_clk *clk);
+int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel);
+int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk);
+int post_n1_div_set_div(struct div_clk *clk, int div);
+int post_n1_div_get_div(struct div_clk *clk);
+int n2_div_set_div(struct div_clk *clk, int div);
+int n2_div_get_div(struct div_clk *clk);
+int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll);
+
+#endif /* MDSS_DSI_PLL_8996_H */
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-util.c
new file mode 100644
index 0000000..3bc7564
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-util.c
@@ -0,0 +1,654 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+
+#define DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG (0x0)
+#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG (0x0004)
+#define DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG (0x0008)
+#define DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG (0x000C)
+#define DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG (0x0010)
+#define DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG (0x0014)
+#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG (0x0024)
+#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG (0x0028)
+#define DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG (0x002C)
+#define DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG (0x0030)
+#define DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG (0x0034)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0 (0x0038)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1 (0x003C)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2 (0x0040)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3 (0x0044)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4 (0x0048)
+#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG0 (0x004C)
+#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG1 (0x0050)
+#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG2 (0x0054)
+#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG3 (0x0058)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0 (0x006C)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG2 (0x0074)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3 (0x0078)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4 (0x007C)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG5 (0x0080)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6 (0x0084)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7 (0x0088)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8 (0x008C)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9 (0x0090)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10 (0x0094)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11 (0x0098)
+#define DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG (0x009C)
+#define DSI_PHY_PLL_UNIPHY_PLL_STATUS (0x00C0)
+
+#define DSI_PLL_POLL_DELAY_US 50
+#define DSI_PLL_POLL_TIMEOUT_US 500
+
+int set_byte_mux_sel(struct mux_clk *clk, int sel)
+{
+ struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+ pr_debug("byte mux set to %s mode\n", sel ? "indirect" : "direct");
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG, (sel << 1));
+
+ return 0;
+}
+
+int get_byte_mux_sel(struct mux_clk *clk)
+{
+ int mux_mode, rc;
+ struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+ if (is_gdsc_disabled(dsi_pll_res))
+ return 0;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ mux_mode = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG) & BIT(1);
+
+ pr_debug("byte mux mode = %s", mux_mode ? "indirect" : "direct");
+ mdss_pll_resource_enable(dsi_pll_res, false);
+
+ return !!mux_mode;
+}
+
+int dsi_pll_div_prepare(struct clk *c)
+{
+ struct div_clk *div = to_div_clk(c);
+ /* Restore the divider's value */
+ return div->ops->set_div(div, div->data.div);
+}
+
+int dsi_pll_mux_prepare(struct clk *c)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ int i, rc, sel = 0;
+ struct mdss_pll_resources *dsi_pll_res = mux->priv;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ for (i = 0; i < mux->num_parents; i++)
+ if (mux->parents[i].src == c->parent) {
+ sel = mux->parents[i].sel;
+ break;
+ }
+
+ if (i == mux->num_parents) {
+ pr_err("Failed to select the parent clock\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /* Restore the mux source select value */
+ rc = mux->ops->set_mux_sel(mux, sel);
+
+error:
+ mdss_pll_resource_enable(dsi_pll_res, false);
+ return rc;
+}
+
+int fixed_4div_set_div(struct div_clk *clk, int div)
+{
+ int rc;
+ struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG, (div - 1));
+
+ mdss_pll_resource_enable(dsi_pll_res, false);
+ return rc;
+}
+
+int fixed_4div_get_div(struct div_clk *clk)
+{
+ int div = 0, rc;
+ struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+ if (is_gdsc_disabled(dsi_pll_res))
+ return 0;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG);
+
+ mdss_pll_resource_enable(dsi_pll_res, false);
+ return div + 1;
+}
+
+int digital_set_div(struct div_clk *clk, int div)
+{
+ int rc;
+ struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, (div - 1));
+
+ mdss_pll_resource_enable(dsi_pll_res, false);
+ return rc;
+}
+
+int digital_get_div(struct div_clk *clk)
+{
+ int div = 0, rc;
+ struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+ if (is_gdsc_disabled(dsi_pll_res))
+ return 0;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG);
+
+ mdss_pll_resource_enable(dsi_pll_res, false);
+ return div + 1;
+}
+
+int analog_set_div(struct div_clk *clk, int div)
+{
+ int rc;
+ struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, div - 1);
+
+ mdss_pll_resource_enable(dsi_pll_res, false);
+ return rc;
+}
+
+int analog_get_div(struct div_clk *clk)
+{
+ int div = 0, rc;
+ struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+ if (is_gdsc_disabled(dsi_pll_res))
+ return 0;
+
+ rc = mdss_pll_resource_enable(clk->priv, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG) + 1;
+
+ mdss_pll_resource_enable(dsi_pll_res, false);
+
+ return div;
+}
+
+int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res)
+{
+ u32 status;
+ int pll_locked;
+
+ /* poll for PLL ready status */
+ if (readl_poll_timeout_atomic((dsi_pll_res->pll_base +
+ DSI_PHY_PLL_UNIPHY_PLL_STATUS),
+ status,
+ ((status & BIT(0)) == 1),
+ DSI_PLL_POLL_DELAY_US,
+ DSI_PLL_POLL_TIMEOUT_US)) {
+ pr_debug("DSI PLL status=%x failed to Lock\n", status);
+ pll_locked = 0;
+ } else {
+ pll_locked = 1;
+ }
+
+ return pll_locked;
+}
+
+static int pll_28nm_vco_rate_calc(struct dsi_pll_vco_clk *vco,
+ struct mdss_dsi_vco_calc *vco_calc, unsigned long vco_clk_rate)
+{
+ s32 rem;
+ s64 frac_n_mode, ref_doubler_en_b;
+ s64 ref_clk_to_pll, div_fb, frac_n_value;
+ int i;
+
+ /* Configure the Loop filter resistance */
+ for (i = 0; i < vco->lpfr_lut_size; i++)
+ if (vco_clk_rate <= vco->lpfr_lut[i].vco_rate)
+ break;
+ if (i == vco->lpfr_lut_size) {
+ pr_err("unable to get loop filter resistance. vco=%ld\n",
+ vco_clk_rate);
+ return -EINVAL;
+ }
+ vco_calc->lpfr_lut_res = vco->lpfr_lut[i].r;
+
+ div_s64_rem(vco_clk_rate, vco->ref_clk_rate, &rem);
+ if (rem) {
+ vco_calc->refclk_cfg = 0x1;
+ frac_n_mode = 1;
+ ref_doubler_en_b = 0;
+ } else {
+ vco_calc->refclk_cfg = 0x0;
+ frac_n_mode = 0;
+ ref_doubler_en_b = 1;
+ }
+
+ pr_debug("refclk_cfg = %lld\n", vco_calc->refclk_cfg);
+
+ ref_clk_to_pll = ((vco->ref_clk_rate * 2 * (vco_calc->refclk_cfg))
+ + (ref_doubler_en_b * vco->ref_clk_rate));
+
+ div_fb = div_s64_rem(vco_clk_rate, ref_clk_to_pll, &rem);
+ frac_n_value = div_s64(((s64)rem * (1 << 16)), ref_clk_to_pll);
+ vco_calc->gen_vco_clk = vco_clk_rate;
+
+ pr_debug("ref_clk_to_pll = %lld\n", ref_clk_to_pll);
+ pr_debug("div_fb = %lld\n", div_fb);
+ pr_debug("frac_n_value = %lld\n", frac_n_value);
+
+ pr_debug("Generated VCO Clock: %lld\n", vco_calc->gen_vco_clk);
+ rem = 0;
+ if (frac_n_mode) {
+ vco_calc->sdm_cfg0 = 0;
+ vco_calc->sdm_cfg1 = (div_fb & 0x3f) - 1;
+ vco_calc->sdm_cfg3 = div_s64_rem(frac_n_value, 256, &rem);
+ vco_calc->sdm_cfg2 = rem;
+ } else {
+ vco_calc->sdm_cfg0 = (0x1 << 5);
+ vco_calc->sdm_cfg0 |= (div_fb & 0x3f) - 1;
+ vco_calc->sdm_cfg1 = 0;
+ vco_calc->sdm_cfg2 = 0;
+ vco_calc->sdm_cfg3 = 0;
+ }
+
+ pr_debug("sdm_cfg0=%lld\n", vco_calc->sdm_cfg0);
+ pr_debug("sdm_cfg1=%lld\n", vco_calc->sdm_cfg1);
+ pr_debug("sdm_cfg2=%lld\n", vco_calc->sdm_cfg2);
+ pr_debug("sdm_cfg3=%lld\n", vco_calc->sdm_cfg3);
+
+ vco_calc->cal_cfg11 = div_s64_rem(vco_calc->gen_vco_clk,
+ 256 * 1000000, &rem);
+ vco_calc->cal_cfg10 = rem / 1000000;
+ pr_debug("cal_cfg10=%lld, cal_cfg11=%lld\n",
+ vco_calc->cal_cfg10, vco_calc->cal_cfg11);
+
+ return 0;
+}
+
+static void pll_28nm_ssc_param_calc(struct dsi_pll_vco_clk *vco,
+ struct mdss_dsi_vco_calc *vco_calc)
+{
+ struct mdss_pll_resources *dsi_pll_res = vco->priv;
+ s64 ppm_freq, incr, spread_freq, div_rf, frac_n_value;
+ s32 rem;
+
+ if (!dsi_pll_res->ssc_en) {
+ pr_debug("DSI PLL SSC not enabled\n");
+ return;
+ }
+
+ vco_calc->ssc.kdiv = DIV_ROUND_CLOSEST(vco->ref_clk_rate,
+ 1000000) - 1;
+ vco_calc->ssc.triang_steps = DIV_ROUND_CLOSEST(vco->ref_clk_rate,
+ dsi_pll_res->ssc_freq * (vco_calc->ssc.kdiv + 1));
+ ppm_freq = div_s64(vco_calc->gen_vco_clk * dsi_pll_res->ssc_ppm,
+ 1000000);
+ incr = div64_s64(ppm_freq * 65536, vco->ref_clk_rate * 2 *
+ vco_calc->ssc.triang_steps);
+
+ vco_calc->ssc.triang_inc_7_0 = incr & 0xff;
+ vco_calc->ssc.triang_inc_9_8 = (incr >> 8) & 0x3;
+
+ if (!dsi_pll_res->ssc_center)
+ spread_freq = vco_calc->gen_vco_clk - ppm_freq;
+ else
+ spread_freq = vco_calc->gen_vco_clk - (ppm_freq / 2);
+
+ div_rf = div_s64(spread_freq, 2 * vco->ref_clk_rate);
+ vco_calc->ssc.dc_offset = (div_rf - 1);
+
+ div_s64_rem(spread_freq, 2 * vco->ref_clk_rate, &rem);
+ frac_n_value = div_s64((s64)rem * 65536, 2 * vco->ref_clk_rate);
+
+ vco_calc->ssc.freq_seed_7_0 = frac_n_value & 0xff;
+ vco_calc->ssc.freq_seed_15_8 = (frac_n_value >> 8) & 0xff;
+}
+
+static void pll_28nm_vco_config(void __iomem *pll_base,
+ struct mdss_dsi_vco_calc *vco_calc,
+ u32 vco_delay_us, bool ssc_en)
+{
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG,
+ vco_calc->lpfr_lut_res);
+
+ /* Loop filter capacitance values : c1 and c2 */
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG, 0x70);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG, 0x15);
+
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG, 0x02);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3, 0x2b);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4, 0x66);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
+
+ if (!ssc_en) {
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1,
+ (u32)(vco_calc->sdm_cfg1 & 0xff));
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2,
+ (u32)(vco_calc->sdm_cfg2 & 0xff));
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3,
+ (u32)(vco_calc->sdm_cfg3 & 0xff));
+ } else {
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1,
+ (u32)vco_calc->ssc.dc_offset);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2,
+ (u32)vco_calc->ssc.freq_seed_7_0);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3,
+ (u32)vco_calc->ssc.freq_seed_15_8);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG0,
+ (u32)vco_calc->ssc.kdiv);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG1,
+ (u32)vco_calc->ssc.triang_inc_7_0);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG2,
+ (u32)vco_calc->ssc.triang_inc_9_8);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG3,
+ (u32)vco_calc->ssc.triang_steps);
+ }
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);
+
+ /* Add hardware recommended delay for correct PLL configuration */
+ if (vco_delay_us)
+ udelay(vco_delay_us);
+
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG,
+ (u32)vco_calc->refclk_cfg);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG, 0x00);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x71);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0,
+ (u32)vco_calc->sdm_cfg0);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x30);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x00);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x00);
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10,
+ (u32)(vco_calc->cal_cfg10 & 0xff));
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11,
+ (u32)(vco_calc->cal_cfg11 & 0xff));
+ MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG, 0x20);
+}
+
+int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate)
+{
+ struct mdss_dsi_vco_calc vco_calc = {0};
+ struct mdss_pll_resources *dsi_pll_res = vco->priv;
+ int rc = 0;
+
+ rc = pll_28nm_vco_rate_calc(vco, &vco_calc, rate);
+ if (rc) {
+ pr_err("vco rate calculation failed\n");
+ return rc;
+ }
+
+ pll_28nm_ssc_param_calc(vco, &vco_calc);
+ pll_28nm_vco_config(dsi_pll_res->pll_base, &vco_calc,
+ dsi_pll_res->vco_delay, dsi_pll_res->ssc_en);
+
+ return 0;
+}
+
+unsigned long vco_get_rate(struct clk *c)
+{
+ u32 sdm0, doubler, sdm_byp_div;
+ u64 vco_rate;
+ u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ u64 ref_clk = vco->ref_clk_rate;
+ int rc;
+ struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+ if (is_gdsc_disabled(dsi_pll_res))
+ return 0;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ /* Check to see if the ref clk doubler is enabled */
+ doubler = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG) & BIT(0);
+ ref_clk += (doubler * vco->ref_clk_rate);
+
+ /* see if it is integer mode or sdm mode */
+ sdm0 = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0);
+ if (sdm0 & BIT(6)) {
+ /* integer mode */
+ sdm_byp_div = (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0) & 0x3f) + 1;
+ vco_rate = ref_clk * sdm_byp_div;
+ } else {
+ /* sdm mode */
+ sdm_dc_off = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1) & 0xFF;
+ pr_debug("sdm_dc_off = %d\n", sdm_dc_off);
+ sdm2 = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2) & 0xFF;
+ sdm3 = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3) & 0xFF;
+ sdm_freq_seed = (sdm3 << 8) | sdm2;
+ pr_debug("sdm_freq_seed = %d\n", sdm_freq_seed);
+
+ vco_rate = (ref_clk * (sdm_dc_off + 1)) +
+ mult_frac(ref_clk, sdm_freq_seed, BIT(16));
+ pr_debug("vco rate = %lld", vco_rate);
+ }
+
+ pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+ mdss_pll_resource_enable(dsi_pll_res, false);
+
+ return (unsigned long)vco_rate;
+}
+
+static int dsi_pll_enable(struct clk *c)
+{
+ int i, rc;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ /* Try all enable sequences until one succeeds */
+ for (i = 0; i < vco->pll_en_seq_cnt; i++) {
+ rc = vco->pll_enable_seqs[i](dsi_pll_res);
+ pr_debug("DSI PLL %s after sequence #%d\n",
+ rc ? "unlocked" : "locked", i + 1);
+ if (!rc)
+ break;
+ }
+
+ if (rc) {
+ mdss_pll_resource_enable(dsi_pll_res, false);
+ pr_err("DSI PLL failed to lock\n");
+ }
+ dsi_pll_res->pll_on = true;
+
+ return rc;
+}
+
+static void dsi_pll_disable(struct clk *c)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+ if (!dsi_pll_res->pll_on &&
+ mdss_pll_resource_enable(dsi_pll_res, true)) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return;
+ }
+
+ dsi_pll_res->handoff_resources = false;
+
+ MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+ DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x00);
+
+ mdss_pll_resource_enable(dsi_pll_res, false);
+ dsi_pll_res->pll_on = false;
+
+ pr_debug("DSI PLL Disabled\n");
+}
+
+long vco_round_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long rrate = rate;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+
+ if (rate < vco->min_rate)
+ rrate = vco->min_rate;
+ if (rate > vco->max_rate)
+ rrate = vco->max_rate;
+
+ return rrate;
+}
+
+enum handoff vco_handoff(struct clk *c)
+{
+ int rc;
+ enum handoff ret = HANDOFF_DISABLED_CLK;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+ if (is_gdsc_disabled(dsi_pll_res))
+ return HANDOFF_DISABLED_CLK;
+
+ rc = mdss_pll_resource_enable(dsi_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return ret;
+ }
+
+ if (dsi_pll_lock_status(dsi_pll_res)) {
+ dsi_pll_res->handoff_resources = true;
+ dsi_pll_res->pll_on = true;
+ c->rate = vco_get_rate(c);
+ ret = HANDOFF_ENABLED_CLK;
+ } else {
+ mdss_pll_resource_enable(dsi_pll_res, false);
+ }
+
+ return ret;
+}
+
+int vco_prepare(struct clk *c)
+{
+ int rc = 0;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+ if (!dsi_pll_res) {
+ pr_err("Dsi pll resources are not available\n");
+ return -EINVAL;
+ }
+
+ if ((dsi_pll_res->vco_cached_rate != 0)
+ && (dsi_pll_res->vco_cached_rate == c->rate)) {
+ rc = c->ops->set_rate(c, dsi_pll_res->vco_cached_rate);
+ if (rc) {
+ pr_err("vco_set_rate failed. rc=%d\n", rc);
+ goto error;
+ }
+ }
+
+ rc = dsi_pll_enable(c);
+
+error:
+ return rc;
+}
+
+void vco_unprepare(struct clk *c)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+ if (!dsi_pll_res) {
+ pr_err("Dsi pll resources are not available\n");
+ return;
+ }
+
+ dsi_pll_res->vco_cached_rate = c->rate;
+ dsi_pll_disable(c);
+}
+
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll.h b/drivers/clk/msm/mdss/mdss-dsi-pll.h
new file mode 100644
index 0000000..4a9bb64
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll.h
@@ -0,0 +1,132 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_DSI_PLL_H
+#define __MDSS_DSI_PLL_H
+
+#define MAX_DSI_PLL_EN_SEQS 10
+
+#define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG (0x0020)
+#define DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2 (0x0064)
+#define DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG (0x0068)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1 (0x0070)
+
+/* Register offsets for 20nm PHY PLL */
+#define MMSS_DSI_PHY_PLL_PLL_CNTRL (0x0014)
+#define MMSS_DSI_PHY_PLL_PLL_BKG_KVCO_CAL_EN (0x002C)
+#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN (0x009C)
+
+struct lpfr_cfg {
+ unsigned long vco_rate;
+ u32 r;
+};
+
+struct dsi_pll_vco_clk {
+ unsigned long ref_clk_rate;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ u32 pll_en_seq_cnt;
+ struct lpfr_cfg *lpfr_lut;
+ u32 lpfr_lut_size;
+ void *priv;
+
+ struct clk c;
+
+ int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
+ (struct mdss_pll_resources *dsi_pll_Res);
+};
+
+struct ssc_params {
+ s32 kdiv;
+ s64 triang_inc_7_0;
+ s64 triang_inc_9_8;
+ s64 triang_steps;
+ s64 dc_offset;
+ s64 freq_seed_7_0;
+ s64 freq_seed_15_8;
+};
+
+struct mdss_dsi_vco_calc {
+ s64 sdm_cfg0;
+ s64 sdm_cfg1;
+ s64 sdm_cfg2;
+ s64 sdm_cfg3;
+ s64 cal_cfg10;
+ s64 cal_cfg11;
+ s64 refclk_cfg;
+ s64 gen_vco_clk;
+ u32 lpfr_lut_res;
+ struct ssc_params ssc;
+};
+
+static inline struct dsi_pll_vco_clk *to_vco_clk(struct clk *clk)
+{
+ return container_of(clk, struct dsi_pll_vco_clk, c);
+}
+
+int dsi_pll_clock_register_hpm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_20nm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_lpm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_8996(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int set_byte_mux_sel(struct mux_clk *clk, int sel);
+int get_byte_mux_sel(struct mux_clk *clk);
+int dsi_pll_div_prepare(struct clk *c);
+int dsi_pll_mux_prepare(struct clk *c);
+int fixed_4div_set_div(struct div_clk *clk, int div);
+int fixed_4div_get_div(struct div_clk *clk);
+int digital_set_div(struct div_clk *clk, int div);
+int digital_get_div(struct div_clk *clk);
+int analog_set_div(struct div_clk *clk, int div);
+int analog_get_div(struct div_clk *clk);
+int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res);
+int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
+unsigned long vco_get_rate(struct clk *c);
+long vco_round_rate(struct clk *c, unsigned long rate);
+enum handoff vco_handoff(struct clk *c);
+int vco_prepare(struct clk *c);
+void vco_unprepare(struct clk *c);
+
+/* APIs for 20nm PHY PLL */
+int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
+int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
+ unsigned long rate);
+long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate);
+enum handoff pll_20nm_vco_handoff(struct clk *c);
+int pll_20nm_vco_prepare(struct clk *c);
+void pll_20nm_vco_unprepare(struct clk *c);
+int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res);
+
+int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
+int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
+int get_bypass_lp_div_mux_sel(struct mux_clk *clk);
+int fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
+int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
+int fixed_hr_oclk2_get_div(struct div_clk *clk);
+int hr_oclk3_set_div(struct div_clk *clk, int div);
+int shadow_hr_oclk3_set_div(struct div_clk *clk, int div);
+int hr_oclk3_get_div(struct div_clk *clk);
+int ndiv_set_div(struct div_clk *clk, int div);
+int shadow_ndiv_set_div(struct div_clk *clk, int div);
+int ndiv_get_div(struct div_clk *clk);
+void __dsi_pll_disable(void __iomem *pll_base);
+
+int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel);
+int get_mdss_pixel_mux_sel(struct mux_clk *clk);
+int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel);
+int get_mdss_byte_mux_sel(struct mux_clk *clk);
+
+#endif
diff --git a/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c b/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c
new file mode 100644
index 0000000..0f2d61e
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c
@@ -0,0 +1,2689 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8996.h>
+
+#include "mdss-pll.h"
+#include "mdss-hdmi-pll.h"
+
+/* CONSTANTS */
+#define HDMI_BIT_CLK_TO_PIX_CLK_RATIO 10
+#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL
+#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL
+#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000
+#define HDMI_CLKS_PLL_DIVSEL 0
+#define HDMI_CORECLK_DIV 5
+#define HDMI_REF_CLOCK 19200000
+#define HDMI_64B_ERR_VAL 0xFFFFFFFFFFFFFFFFULL
+#define HDMI_VERSION_8996_V1 1
+#define HDMI_VERSION_8996_V2 2
+#define HDMI_VERSION_8996_V3 3
+#define HDMI_VERSION_8996_V3_1_8 4
+
+#define HDMI_VCO_MAX_FREQ 12000000000UL
+#define HDMI_VCO_MIN_FREQ 8000000000UL
+#define HDMI_2400MHZ_BIT_CLK_HZ 2400000000UL
+#define HDMI_2250MHZ_BIT_CLK_HZ 2250000000UL
+#define HDMI_2000MHZ_BIT_CLK_HZ 2000000000UL
+#define HDMI_1700MHZ_BIT_CLK_HZ 1700000000UL
+#define HDMI_1200MHZ_BIT_CLK_HZ 1200000000UL
+#define HDMI_1334MHZ_BIT_CLK_HZ 1334000000UL
+#define HDMI_1000MHZ_BIT_CLK_HZ 1000000000UL
+#define HDMI_850MHZ_BIT_CLK_HZ 850000000
+#define HDMI_667MHZ_BIT_CLK_HZ 667000000
+#define HDMI_600MHZ_BIT_CLK_HZ 600000000
+#define HDMI_500MHZ_BIT_CLK_HZ 500000000
+#define HDMI_450MHZ_BIT_CLK_HZ 450000000
+#define HDMI_334MHZ_BIT_CLK_HZ 334000000
+#define HDMI_300MHZ_BIT_CLK_HZ 300000000
+#define HDMI_282MHZ_BIT_CLK_HZ 282000000
+#define HDMI_250MHZ_BIT_CLK_HZ 250000000
+#define HDMI_KHZ_TO_HZ 1000
+
+/* PLL REGISTERS */
+#define QSERDES_COM_ATB_SEL1 (0x000)
+#define QSERDES_COM_ATB_SEL2 (0x004)
+#define QSERDES_COM_FREQ_UPDATE (0x008)
+#define QSERDES_COM_BG_TIMER (0x00C)
+#define QSERDES_COM_SSC_EN_CENTER (0x010)
+#define QSERDES_COM_SSC_ADJ_PER1 (0x014)
+#define QSERDES_COM_SSC_ADJ_PER2 (0x018)
+#define QSERDES_COM_SSC_PER1 (0x01C)
+#define QSERDES_COM_SSC_PER2 (0x020)
+#define QSERDES_COM_SSC_STEP_SIZE1 (0x024)
+#define QSERDES_COM_SSC_STEP_SIZE2 (0x028)
+#define QSERDES_COM_POST_DIV (0x02C)
+#define QSERDES_COM_POST_DIV_MUX (0x030)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x034)
+#define QSERDES_COM_CLK_ENABLE1 (0x038)
+#define QSERDES_COM_SYS_CLK_CTRL (0x03C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE (0x040)
+#define QSERDES_COM_PLL_EN (0x044)
+#define QSERDES_COM_PLL_IVCO (0x048)
+#define QSERDES_COM_LOCK_CMP1_MODE0 (0x04C)
+#define QSERDES_COM_LOCK_CMP2_MODE0 (0x050)
+#define QSERDES_COM_LOCK_CMP3_MODE0 (0x054)
+#define QSERDES_COM_LOCK_CMP1_MODE1 (0x058)
+#define QSERDES_COM_LOCK_CMP2_MODE1 (0x05C)
+#define QSERDES_COM_LOCK_CMP3_MODE1 (0x060)
+#define QSERDES_COM_LOCK_CMP1_MODE2 (0x064)
+#define QSERDES_COM_CMN_RSVD0 (0x064)
+#define QSERDES_COM_LOCK_CMP2_MODE2 (0x068)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL (0x068)
+#define QSERDES_COM_LOCK_CMP3_MODE2 (0x06C)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS (0x06C)
+#define QSERDES_COM_BG_TRIM (0x070)
+#define QSERDES_COM_CLK_EP_DIV (0x074)
+#define QSERDES_COM_CP_CTRL_MODE0 (0x078)
+#define QSERDES_COM_CP_CTRL_MODE1 (0x07C)
+#define QSERDES_COM_CP_CTRL_MODE2 (0x080)
+#define QSERDES_COM_CMN_RSVD1 (0x080)
+#define QSERDES_COM_PLL_RCTRL_MODE0 (0x084)
+#define QSERDES_COM_PLL_RCTRL_MODE1 (0x088)
+#define QSERDES_COM_PLL_RCTRL_MODE2 (0x08C)
+#define QSERDES_COM_CMN_RSVD2 (0x08C)
+#define QSERDES_COM_PLL_CCTRL_MODE0 (0x090)
+#define QSERDES_COM_PLL_CCTRL_MODE1 (0x094)
+#define QSERDES_COM_PLL_CCTRL_MODE2 (0x098)
+#define QSERDES_COM_CMN_RSVD3 (0x098)
+#define QSERDES_COM_PLL_CNTRL (0x09C)
+#define QSERDES_COM_PHASE_SEL_CTRL (0x0A0)
+#define QSERDES_COM_PHASE_SEL_DC (0x0A4)
+#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL (0x0A8)
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM (0x0A8)
+#define QSERDES_COM_SYSCLK_EN_SEL (0x0AC)
+#define QSERDES_COM_CML_SYSCLK_SEL (0x0B0)
+#define QSERDES_COM_RESETSM_CNTRL (0x0B4)
+#define QSERDES_COM_RESETSM_CNTRL2 (0x0B8)
+#define QSERDES_COM_RESTRIM_CTRL (0x0BC)
+#define QSERDES_COM_RESTRIM_CTRL2 (0x0C0)
+#define QSERDES_COM_RESCODE_DIV_NUM (0x0C4)
+#define QSERDES_COM_LOCK_CMP_EN (0x0C8)
+#define QSERDES_COM_LOCK_CMP_CFG (0x0CC)
+#define QSERDES_COM_DEC_START_MODE0 (0x0D0)
+#define QSERDES_COM_DEC_START_MODE1 (0x0D4)
+#define QSERDES_COM_DEC_START_MODE2 (0x0D8)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL (0x0D8)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 (0x0DC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 (0x0E0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 (0x0E4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 (0x0E8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 (0x0EC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 (0x0F0)
+#define QSERDES_COM_DIV_FRAC_START1_MODE2 (0x0F4)
+#define QSERDES_COM_VCO_TUNE_MINVAL1 (0x0F4)
+#define QSERDES_COM_DIV_FRAC_START2_MODE2 (0x0F8)
+#define QSERDES_COM_VCO_TUNE_MINVAL2 (0x0F8)
+#define QSERDES_COM_DIV_FRAC_START3_MODE2 (0x0FC)
+#define QSERDES_COM_CMN_RSVD4 (0x0FC)
+#define QSERDES_COM_INTEGLOOP_INITVAL (0x100)
+#define QSERDES_COM_INTEGLOOP_EN (0x104)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 (0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 (0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 (0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 (0x114)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE2 (0x118)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1 (0x118)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE2 (0x11C)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2 (0x11C)
+#define QSERDES_COM_RES_TRIM_CONTROL2 (0x120)
+#define QSERDES_COM_VCO_TUNE_CTRL (0x124)
+#define QSERDES_COM_VCO_TUNE_MAP (0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0 (0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0 (0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1 (0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1 (0x138)
+#define QSERDES_COM_VCO_TUNE1_MODE2 (0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL1 (0x13C)
+#define QSERDES_COM_VCO_TUNE2_MODE2 (0x140)
+#define QSERDES_COM_VCO_TUNE_INITVAL2 (0x140)
+#define QSERDES_COM_VCO_TUNE_TIMER1 (0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2 (0x148)
+#define QSERDES_COM_SAR (0x14C)
+#define QSERDES_COM_SAR_CLK (0x150)
+#define QSERDES_COM_SAR_CODE_OUT_STATUS (0x154)
+#define QSERDES_COM_SAR_CODE_READY_STATUS (0x158)
+#define QSERDES_COM_CMN_STATUS (0x15C)
+#define QSERDES_COM_RESET_SM_STATUS (0x160)
+#define QSERDES_COM_RESTRIM_CODE_STATUS (0x164)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS (0x168)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS (0x16C)
+#define QSERDES_COM_BG_CTRL (0x170)
+#define QSERDES_COM_CLK_SELECT (0x174)
+#define QSERDES_COM_HSCLK_SEL (0x178)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS (0x17C)
+#define QSERDES_COM_PLL_ANALOG (0x180)
+#define QSERDES_COM_CORECLK_DIV (0x184)
+#define QSERDES_COM_SW_RESET (0x188)
+#define QSERDES_COM_CORE_CLK_EN (0x18C)
+#define QSERDES_COM_C_READY_STATUS (0x190)
+#define QSERDES_COM_CMN_CONFIG (0x194)
+#define QSERDES_COM_CMN_RATE_OVERRIDE (0x198)
+#define QSERDES_COM_SVS_MODE_CLK_SEL (0x19C)
+#define QSERDES_COM_DEBUG_BUS0 (0x1A0)
+#define QSERDES_COM_DEBUG_BUS1 (0x1A4)
+#define QSERDES_COM_DEBUG_BUS2 (0x1A8)
+#define QSERDES_COM_DEBUG_BUS3 (0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL (0x1B0)
+#define QSERDES_COM_CMN_MISC1 (0x1B4)
+#define QSERDES_COM_CMN_MISC2 (0x1B8)
+#define QSERDES_COM_CORECLK_DIV_MODE1 (0x1BC)
+#define QSERDES_COM_CORECLK_DIV_MODE2 (0x1C0)
+#define QSERDES_COM_CMN_RSVD5 (0x1C0)
+
+/* Tx Channel base addresses */
+#define HDMI_TX_L0_BASE_OFFSET (0x400)
+#define HDMI_TX_L1_BASE_OFFSET (0x600)
+#define HDMI_TX_L2_BASE_OFFSET (0x800)
+#define HDMI_TX_L3_BASE_OFFSET (0xA00)
+
+/* Tx Channel PHY registers */
+#define QSERDES_TX_L0_BIST_MODE_LANENO (0x000)
+#define QSERDES_TX_L0_BIST_INVERT (0x004)
+#define QSERDES_TX_L0_CLKBUF_ENABLE (0x008)
+#define QSERDES_TX_L0_CMN_CONTROL_ONE (0x00C)
+#define QSERDES_TX_L0_CMN_CONTROL_TWO (0x010)
+#define QSERDES_TX_L0_CMN_CONTROL_THREE (0x014)
+#define QSERDES_TX_L0_TX_EMP_POST1_LVL (0x018)
+#define QSERDES_TX_L0_TX_POST2_EMPH (0x01C)
+#define QSERDES_TX_L0_TX_BOOST_LVL_UP_DN (0x020)
+#define QSERDES_TX_L0_HP_PD_ENABLES (0x024)
+#define QSERDES_TX_L0_TX_IDLE_LVL_LARGE_AMP (0x028)
+#define QSERDES_TX_L0_TX_DRV_LVL (0x02C)
+#define QSERDES_TX_L0_TX_DRV_LVL_OFFSET (0x030)
+#define QSERDES_TX_L0_RESET_TSYNC_EN (0x034)
+#define QSERDES_TX_L0_PRE_STALL_LDO_BOOST_EN (0x038)
+#define QSERDES_TX_L0_TX_BAND (0x03C)
+#define QSERDES_TX_L0_SLEW_CNTL (0x040)
+#define QSERDES_TX_L0_INTERFACE_SELECT (0x044)
+#define QSERDES_TX_L0_LPB_EN (0x048)
+#define QSERDES_TX_L0_RES_CODE_LANE_TX (0x04C)
+#define QSERDES_TX_L0_RES_CODE_LANE_RX (0x050)
+#define QSERDES_TX_L0_RES_CODE_LANE_OFFSET (0x054)
+#define QSERDES_TX_L0_PERL_LENGTH1 (0x058)
+#define QSERDES_TX_L0_PERL_LENGTH2 (0x05C)
+#define QSERDES_TX_L0_SERDES_BYP_EN_OUT (0x060)
+#define QSERDES_TX_L0_DEBUG_BUS_SEL (0x064)
+#define QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN (0x068)
+#define QSERDES_TX_L0_TX_POL_INV (0x06C)
+#define QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN (0x070)
+#define QSERDES_TX_L0_BIST_PATTERN1 (0x074)
+#define QSERDES_TX_L0_BIST_PATTERN2 (0x078)
+#define QSERDES_TX_L0_BIST_PATTERN3 (0x07C)
+#define QSERDES_TX_L0_BIST_PATTERN4 (0x080)
+#define QSERDES_TX_L0_BIST_PATTERN5 (0x084)
+#define QSERDES_TX_L0_BIST_PATTERN6 (0x088)
+#define QSERDES_TX_L0_BIST_PATTERN7 (0x08C)
+#define QSERDES_TX_L0_BIST_PATTERN8 (0x090)
+#define QSERDES_TX_L0_LANE_MODE (0x094)
+#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE (0x098)
+#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE_CONFIGURATION (0x09C)
+#define QSERDES_TX_L0_ATB_SEL1 (0x0A0)
+#define QSERDES_TX_L0_ATB_SEL2 (0x0A4)
+#define QSERDES_TX_L0_RCV_DETECT_LVL (0x0A8)
+#define QSERDES_TX_L0_RCV_DETECT_LVL_2 (0x0AC)
+#define QSERDES_TX_L0_PRBS_SEED1 (0x0B0)
+#define QSERDES_TX_L0_PRBS_SEED2 (0x0B4)
+#define QSERDES_TX_L0_PRBS_SEED3 (0x0B8)
+#define QSERDES_TX_L0_PRBS_SEED4 (0x0BC)
+#define QSERDES_TX_L0_RESET_GEN (0x0C0)
+#define QSERDES_TX_L0_RESET_GEN_MUXES (0x0C4)
+#define QSERDES_TX_L0_TRAN_DRVR_EMP_EN (0x0C8)
+#define QSERDES_TX_L0_TX_INTERFACE_MODE (0x0CC)
+#define QSERDES_TX_L0_PWM_CTRL (0x0D0)
+#define QSERDES_TX_L0_PWM_ENCODED_OR_DATA (0x0D4)
+#define QSERDES_TX_L0_PWM_GEAR_1_DIVIDER_BAND2 (0x0D8)
+#define QSERDES_TX_L0_PWM_GEAR_2_DIVIDER_BAND2 (0x0DC)
+#define QSERDES_TX_L0_PWM_GEAR_3_DIVIDER_BAND2 (0x0E0)
+#define QSERDES_TX_L0_PWM_GEAR_4_DIVIDER_BAND2 (0x0E4)
+#define QSERDES_TX_L0_PWM_GEAR_1_DIVIDER_BAND0_1 (0x0E8)
+#define QSERDES_TX_L0_PWM_GEAR_2_DIVIDER_BAND0_1 (0x0EC)
+#define QSERDES_TX_L0_PWM_GEAR_3_DIVIDER_BAND0_1 (0x0F0)
+#define QSERDES_TX_L0_PWM_GEAR_4_DIVIDER_BAND0_1 (0x0F4)
+#define QSERDES_TX_L0_VMODE_CTRL1 (0x0F8)
+#define QSERDES_TX_L0_VMODE_CTRL2 (0x0FC)
+#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV_CNTL (0x100)
+#define QSERDES_TX_L0_BIST_STATUS (0x104)
+#define QSERDES_TX_L0_BIST_ERROR_COUNT1 (0x108)
+#define QSERDES_TX_L0_BIST_ERROR_COUNT2 (0x10C)
+#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV (0x110)
+
+/* HDMI PHY REGISTERS */
+#define HDMI_PHY_BASE_OFFSET (0xC00)
+
+#define HDMI_PHY_CFG (0x00)
+#define HDMI_PHY_PD_CTL (0x04)
+#define HDMI_PHY_MODE (0x08)
+#define HDMI_PHY_MISR_CLEAR (0x0C)
+#define HDMI_PHY_TX0_TX1_BIST_CFG0 (0x10)
+#define HDMI_PHY_TX0_TX1_BIST_CFG1 (0x14)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE0 (0x18)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE1 (0x1C)
+#define HDMI_PHY_TX0_TX1_BIST_PATTERN0 (0x20)
+#define HDMI_PHY_TX0_TX1_BIST_PATTERN1 (0x24)
+#define HDMI_PHY_TX2_TX3_BIST_CFG0 (0x28)
+#define HDMI_PHY_TX2_TX3_BIST_CFG1 (0x2C)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE0 (0x30)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE1 (0x34)
+#define HDMI_PHY_TX2_TX3_BIST_PATTERN0 (0x38)
+#define HDMI_PHY_TX2_TX3_BIST_PATTERN1 (0x3C)
+#define HDMI_PHY_DEBUG_BUS_SEL (0x40)
+#define HDMI_PHY_TXCAL_CFG0 (0x44)
+#define HDMI_PHY_TXCAL_CFG1 (0x48)
+#define HDMI_PHY_TX0_TX1_LANE_CTL (0x4C)
+#define HDMI_PHY_TX2_TX3_LANE_CTL (0x50)
+#define HDMI_PHY_LANE_BIST_CONFIG (0x54)
+#define HDMI_PHY_CLOCK (0x58)
+#define HDMI_PHY_MISC1 (0x5C)
+#define HDMI_PHY_MISC2 (0x60)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS0 (0x64)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS1 (0x68)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS2 (0x6C)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS0 (0x70)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS1 (0x74)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS2 (0x78)
+#define HDMI_PHY_PRE_MISR_STATUS0 (0x7C)
+#define HDMI_PHY_PRE_MISR_STATUS1 (0x80)
+#define HDMI_PHY_PRE_MISR_STATUS2 (0x84)
+#define HDMI_PHY_PRE_MISR_STATUS3 (0x88)
+#define HDMI_PHY_POST_MISR_STATUS0 (0x8C)
+#define HDMI_PHY_POST_MISR_STATUS1 (0x90)
+#define HDMI_PHY_POST_MISR_STATUS2 (0x94)
+#define HDMI_PHY_POST_MISR_STATUS3 (0x98)
+#define HDMI_PHY_STATUS (0x9C)
+#define HDMI_PHY_MISC3_STATUS (0xA0)
+#define HDMI_PHY_MISC4_STATUS (0xA4)
+#define HDMI_PHY_DEBUG_BUS0 (0xA8)
+#define HDMI_PHY_DEBUG_BUS1 (0xAC)
+#define HDMI_PHY_DEBUG_BUS2 (0xB0)
+#define HDMI_PHY_DEBUG_BUS3 (0xB4)
+#define HDMI_PHY_PHY_REVISION_ID0 (0xB8)
+#define HDMI_PHY_PHY_REVISION_ID1 (0xBC)
+#define HDMI_PHY_PHY_REVISION_ID2 (0xC0)
+#define HDMI_PHY_PHY_REVISION_ID3 (0xC4)
+
+#define HDMI_PLL_POLL_MAX_READS 100
+#define HDMI_PLL_POLL_TIMEOUT_US 1500
+
+enum hdmi_pll_freqs {
+ HDMI_PCLK_25200_KHZ,
+ HDMI_PCLK_27027_KHZ,
+ HDMI_PCLK_27000_KHZ,
+ HDMI_PCLK_74250_KHZ,
+ HDMI_PCLK_148500_KHZ,
+ HDMI_PCLK_154000_KHZ,
+ HDMI_PCLK_268500_KHZ,
+ HDMI_PCLK_297000_KHZ,
+ HDMI_PCLK_594000_KHZ,
+ HDMI_PCLK_MAX
+};
+
+struct hdmi_8996_phy_pll_reg_cfg {
+ u32 tx_l0_lane_mode;
+ u32 tx_l2_lane_mode;
+ u32 tx_l0_tx_band;
+ u32 tx_l1_tx_band;
+ u32 tx_l2_tx_band;
+ u32 tx_l3_tx_band;
+ u32 com_svs_mode_clk_sel;
+ u32 com_hsclk_sel;
+ u32 com_pll_cctrl_mode0;
+ u32 com_pll_rctrl_mode0;
+ u32 com_cp_ctrl_mode0;
+ u32 com_dec_start_mode0;
+ u32 com_div_frac_start1_mode0;
+ u32 com_div_frac_start2_mode0;
+ u32 com_div_frac_start3_mode0;
+ u32 com_integloop_gain0_mode0;
+ u32 com_integloop_gain1_mode0;
+ u32 com_lock_cmp_en;
+ u32 com_lock_cmp1_mode0;
+ u32 com_lock_cmp2_mode0;
+ u32 com_lock_cmp3_mode0;
+ u32 com_core_clk_en;
+ u32 com_coreclk_div;
+ u32 com_restrim_ctrl;
+ u32 com_vco_tune_ctrl;
+
+ u32 tx_l0_tx_drv_lvl;
+ u32 tx_l0_tx_emp_post1_lvl;
+ u32 tx_l1_tx_drv_lvl;
+ u32 tx_l1_tx_emp_post1_lvl;
+ u32 tx_l2_tx_drv_lvl;
+ u32 tx_l2_tx_emp_post1_lvl;
+ u32 tx_l3_tx_drv_lvl;
+ u32 tx_l3_tx_emp_post1_lvl;
+ u32 tx_l0_vmode_ctrl1;
+ u32 tx_l0_vmode_ctrl2;
+ u32 tx_l1_vmode_ctrl1;
+ u32 tx_l1_vmode_ctrl2;
+ u32 tx_l2_vmode_ctrl1;
+ u32 tx_l2_vmode_ctrl2;
+ u32 tx_l3_vmode_ctrl1;
+ u32 tx_l3_vmode_ctrl2;
+ u32 tx_l0_res_code_lane_tx;
+ u32 tx_l1_res_code_lane_tx;
+ u32 tx_l2_res_code_lane_tx;
+ u32 tx_l3_res_code_lane_tx;
+
+ u32 phy_mode;
+};
+
+struct hdmi_8996_v3_post_divider {
+ u64 vco_freq;
+ u64 hsclk_divsel;
+ u64 vco_ratio;
+ u64 tx_band_sel;
+ u64 half_rate_mode;
+};
+
+static inline struct hdmi_pll_vco_clk *to_hdmi_8996_vco_clk(struct clk *clk)
+{
+ return container_of(clk, struct hdmi_pll_vco_clk, c);
+}
+
+static inline u64 hdmi_8996_v1_get_post_div_lt_2g(u64 bclk)
+{
+ if (bclk >= HDMI_2400MHZ_BIT_CLK_HZ)
+ return 2;
+ else if (bclk >= HDMI_1700MHZ_BIT_CLK_HZ)
+ return 3;
+ else if (bclk >= HDMI_1200MHZ_BIT_CLK_HZ)
+ return 4;
+ else if (bclk >= HDMI_850MHZ_BIT_CLK_HZ)
+ return 3;
+ else if (bclk >= HDMI_600MHZ_BIT_CLK_HZ)
+ return 4;
+ else if (bclk >= HDMI_450MHZ_BIT_CLK_HZ)
+ return 3;
+ else if (bclk >= HDMI_300MHZ_BIT_CLK_HZ)
+ return 4;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_post_div_lt_2g(u64 bclk, u64 vco_range)
+{
+ u64 hdmi_8ghz = vco_range;
+ u64 tmp_calc;
+
+ hdmi_8ghz <<= 2;
+ tmp_calc = hdmi_8ghz;
+ do_div(tmp_calc, 6U);
+
+ if (bclk >= vco_range)
+ return 2;
+ else if (bclk >= tmp_calc)
+ return 3;
+ else if (bclk >= vco_range >> 1)
+ return 4;
+
+ tmp_calc = hdmi_8ghz;
+ do_div(tmp_calc, 12U);
+ if (bclk >= tmp_calc)
+ return 3;
+ else if (bclk >= vco_range >> 2)
+ return 4;
+
+ tmp_calc = hdmi_8ghz;
+ do_div(tmp_calc, 24U);
+ if (bclk >= tmp_calc)
+ return 3;
+ else if (bclk >= vco_range >> 3)
+ return 4;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_post_div_gt_2g(u64 hsclk)
+{
+ if (hsclk >= 0 && hsclk <= 3)
+ return hsclk + 1;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_get_coreclk_div_lt_2g(u64 bclk)
+{
+ if (bclk >= HDMI_1334MHZ_BIT_CLK_HZ)
+ return 1;
+ else if (bclk >= HDMI_1000MHZ_BIT_CLK_HZ)
+ return 1;
+ else if (bclk >= HDMI_667MHZ_BIT_CLK_HZ)
+ return 2;
+ else if (bclk >= HDMI_500MHZ_BIT_CLK_HZ)
+ return 2;
+ else if (bclk >= HDMI_334MHZ_BIT_CLK_HZ)
+ return 3;
+ else if (bclk >= HDMI_250MHZ_BIT_CLK_HZ)
+ return 3;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_get_coreclk_div_ratio(u64 clks_pll_divsel,
+ u64 coreclk_div)
+{
+ if (clks_pll_divsel == 0)
+ return coreclk_div*2;
+ else if (clks_pll_divsel == 1)
+ return coreclk_div*4;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v1_get_tx_band(u64 bclk)
+{
+ if (bclk >= 2400000000UL)
+ return 0;
+ if (bclk >= 1200000000UL)
+ return 1;
+ if (bclk >= 600000000UL)
+ return 2;
+ if (bclk >= 300000000UL)
+ return 3;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_tx_band(u64 bclk, u64 vco_range)
+{
+ if (bclk >= vco_range)
+ return 0;
+ else if (bclk >= vco_range >> 1)
+ return 1;
+ else if (bclk >= vco_range >> 2)
+ return 2;
+ else if (bclk >= vco_range >> 3)
+ return 3;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v1_get_hsclk(u64 fdata)
+{
+ if (fdata >= 9600000000UL)
+ return 0;
+ else if (fdata >= 4800000000UL)
+ return 1;
+ else if (fdata >= 3200000000UL)
+ return 2;
+ else if (fdata >= 2400000000UL)
+ return 3;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_hsclk(u64 fdata, u64 vco_range)
+{
+ u64 tmp_calc = vco_range;
+
+ tmp_calc <<= 2;
+ do_div(tmp_calc, 3U);
+ if (fdata >= (vco_range << 2))
+ return 0;
+ else if (fdata >= (vco_range << 1))
+ return 1;
+ else if (fdata >= tmp_calc)
+ return 2;
+ else if (fdata >= vco_range)
+ return 3;
+
+ return HDMI_64B_ERR_VAL;
+
+}
+
+static inline u64 hdmi_8996_v2_get_vco_freq(u64 bclk, u64 vco_range)
+{
+ u64 tx_band_div_ratio = 1U << hdmi_8996_v2_get_tx_band(bclk, vco_range);
+ u64 pll_post_div_ratio;
+
+ if (bclk >= vco_range) {
+ u64 hsclk = hdmi_8996_v2_get_hsclk(bclk, vco_range);
+
+ pll_post_div_ratio = hdmi_8996_v2_get_post_div_gt_2g(hsclk);
+ } else {
+ pll_post_div_ratio = hdmi_8996_v2_get_post_div_lt_2g(bclk,
+ vco_range);
+ }
+
+ return bclk * (pll_post_div_ratio * tx_band_div_ratio);
+}
+
+static inline u64 hdmi_8996_v2_get_fdata(u64 bclk, u64 vco_range)
+{
+ if (bclk >= vco_range)
+ return bclk;
+
+ {
+ u64 tmp_calc = hdmi_8996_v2_get_vco_freq(bclk, vco_range);
+ u64 pll_post_div_ratio_lt_2g = hdmi_8996_v2_get_post_div_lt_2g(
+ bclk, vco_range);
+ if (pll_post_div_ratio_lt_2g == HDMI_64B_ERR_VAL)
+ return HDMI_64B_ERR_VAL;
+
+ do_div(tmp_calc, pll_post_div_ratio_lt_2g);
+ return tmp_calc;
+ }
+}
+
+static inline u64 hdmi_8996_get_cpctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) ||
+ (gen_ssc == true))
+ /*
+ * This should be ROUND(11/(19.2/20))).
+ * Since ref clock does not change, hardcoding to 11
+ */
+ return 0xB;
+
+ return 0x23;
+}
+
+static inline u64 hdmi_8996_get_rctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || (gen_ssc == true))
+ return 0x16;
+
+ return 0x10;
+}
+
+static inline u64 hdmi_8996_get_cctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || (gen_ssc == true))
+ return 0x28;
+
+ return 0x1;
+}
+
+static inline u64 hdmi_8996_get_integloop_gain(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || (gen_ssc == true))
+ return 0x80;
+
+ return 0xC4;
+}
+
+static inline u64 hdmi_8996_v3_get_integloop_gain(u64 frac_start, u64 bclk,
+ bool gen_ssc)
+{
+ u64 digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
+ u64 base = ((frac_start != 0) || (gen_ssc == true)) ? 0x40 : 0xC4;
+
+ base <<= digclk_divsel;
+
+ return (base <= 2046 ? base : 0x7FE);
+}
+
+static inline u64 hdmi_8996_get_vco_tune(u64 fdata, u64 div)
+{
+ u64 vco_tune;
+
+ vco_tune = fdata * div;
+ do_div(vco_tune, 1000000);
+ vco_tune = 13000 - vco_tune - 256;
+ do_div(vco_tune, 5);
+
+ return vco_tune;
+}
+
+static inline u64 hdmi_8996_get_pll_cmp(u64 pll_cmp_cnt, u64 core_clk)
+{
+ u64 pll_cmp;
+ u64 rem;
+
+ pll_cmp = pll_cmp_cnt * core_clk;
+ rem = do_div(pll_cmp, HDMI_REF_CLOCK);
+ if (rem > (HDMI_REF_CLOCK >> 1))
+ pll_cmp++;
+ pll_cmp -= 1;
+
+ return pll_cmp;
+}
+
+static inline u64 hdmi_8996_v3_get_pll_cmp(u64 pll_cmp_cnt, u64 fdata)
+{
+ u64 dividend = pll_cmp_cnt * fdata;
+ u64 divisor = HDMI_REF_CLOCK * 10;
+ u64 rem;
+
+ rem = do_div(dividend, divisor);
+ if (rem > (divisor >> 1))
+ dividend++;
+
+ return dividend - 1;
+}
+
+static int hdmi_8996_v3_get_post_div(struct hdmi_8996_v3_post_divider *pd,
+ u64 bclk)
+{
+ u32 ratio[] = {2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35};
+ u32 tx_band_sel[] = {0, 1, 2, 3};
+ u64 vco_freq[60];
+ u64 vco, vco_optimal, half_rate_mode = 0;
+ int vco_optimal_index, vco_freq_index;
+ int i, j, k, x;
+
+ for (i = 0; i <= 1; i++) {
+ vco_optimal = HDMI_VCO_MAX_FREQ;
+ vco_optimal_index = -1;
+ vco_freq_index = 0;
+ for (j = 0; j < 15; j++) {
+ for (k = 0; k < 4; k++) {
+ u64 ratio_mult = ratio[j] << tx_band_sel[k];
+
+ vco = bclk >> half_rate_mode;
+ vco *= ratio_mult;
+ vco_freq[vco_freq_index++] = vco;
+ }
+ }
+
+ for (x = 0; x < 60; x++) {
+ u64 vco_tmp = vco_freq[x];
+
+ if ((vco_tmp >= HDMI_VCO_MIN_FREQ) &&
+ (vco_tmp <= vco_optimal)) {
+ vco_optimal = vco_tmp;
+ vco_optimal_index = x;
+ }
+ }
+
+ if (vco_optimal_index == -1) {
+ if (!half_rate_mode)
+ half_rate_mode++;
+ else
+ return -EINVAL;
+ } else {
+ pd->vco_freq = vco_optimal;
+ pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4];
+ pd->vco_ratio = ratio[vco_optimal_index / 4];
+ break;
+ }
+ }
+
+ switch (pd->vco_ratio) {
+ case 2:
+ pd->hsclk_divsel = 0;
+ break;
+ case 3:
+ pd->hsclk_divsel = 4;
+ break;
+ case 4:
+ pd->hsclk_divsel = 8;
+ break;
+ case 5:
+ pd->hsclk_divsel = 12;
+ break;
+ case 6:
+ pd->hsclk_divsel = 1;
+ break;
+ case 9:
+ pd->hsclk_divsel = 5;
+ break;
+ case 10:
+ pd->hsclk_divsel = 2;
+ break;
+ case 12:
+ pd->hsclk_divsel = 9;
+ break;
+ case 14:
+ pd->hsclk_divsel = 3;
+ break;
+ case 15:
+ pd->hsclk_divsel = 13;
+ break;
+ case 20:
+ pd->hsclk_divsel = 10;
+ break;
+ case 21:
+ pd->hsclk_divsel = 7;
+ break;
+ case 25:
+ pd->hsclk_divsel = 14;
+ break;
+ case 28:
+ pd->hsclk_divsel = 11;
+ break;
+ case 35:
+ pd->hsclk_divsel = 15;
+ break;
+ };
+
+ return 0;
+}
+
+static int hdmi_8996_v1_calculate(u32 pix_clk,
+ struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+ int rc = -EINVAL;
+ u64 fdata, clk_divtx, tmds_clk;
+ u64 bclk;
+ u64 post_div_gt_2g;
+ u64 post_div_lt_2g;
+ u64 coreclk_div1_lt_2g;
+ u64 core_clk_div_ratio;
+ u64 core_clk;
+ u64 pll_cmp;
+ u64 tx_band;
+ u64 tx_band_div_ratio;
+ u64 hsclk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+ u64 cpctrl;
+ u64 rctrl;
+ u64 cctrl;
+ u64 integloop_gain;
+ u64 vco_tune;
+ u64 vco_freq;
+ u64 rem;
+
+ /* FDATA, CLK_DIVTX, PIXEL_CLK, TMDS_CLK */
+ bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+ tmds_clk = bclk/4;
+ else
+ tmds_clk = bclk;
+
+ post_div_lt_2g = hdmi_8996_v1_get_post_div_lt_2g(bclk);
+ if (post_div_lt_2g == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ coreclk_div1_lt_2g = hdmi_8996_get_coreclk_div_lt_2g(bclk);
+
+ core_clk_div_ratio = hdmi_8996_get_coreclk_div_ratio(
+ HDMI_CLKS_PLL_DIVSEL, HDMI_CORECLK_DIV);
+
+ tx_band = hdmi_8996_v1_get_tx_band(bclk);
+ if (tx_band == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ tx_band_div_ratio = 1 << tx_band;
+
+ if (bclk >= HDMI_2400MHZ_BIT_CLK_HZ) {
+ fdata = bclk;
+ hsclk = hdmi_8996_v1_get_hsclk(fdata);
+ if (hsclk == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ post_div_gt_2g = (hsclk <= 3) ? (hsclk + 1) : HDMI_64B_ERR_VAL;
+ if (post_div_gt_2g == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ vco_freq = bclk * (post_div_gt_2g * tx_band_div_ratio);
+ clk_divtx = vco_freq;
+ do_div(clk_divtx, post_div_gt_2g);
+ } else {
+ vco_freq = bclk * (post_div_lt_2g * tx_band_div_ratio);
+ fdata = vco_freq;
+ do_div(fdata, post_div_lt_2g);
+ hsclk = hdmi_8996_v1_get_hsclk(fdata);
+ if (hsclk == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ clk_divtx = vco_freq;
+ do_div(clk_divtx, post_div_lt_2g);
+ post_div_gt_2g = (hsclk <= 3) ? (hsclk + 1) : HDMI_64B_ERR_VAL;
+ if (post_div_gt_2g == HDMI_64B_ERR_VAL)
+ goto fail;
+ }
+
+ /* Decimal and fraction values */
+ dec_start = fdata * post_div_gt_2g;
+ do_div(dec_start, pll_divisor);
+ frac_start = ((pll_divisor - (((dec_start + 1) * pll_divisor) -
+ (fdata * post_div_gt_2g))) * (1 << 20));
+ rem = do_div(frac_start, pll_divisor);
+ /* Round off frac_start to closest integer */
+ if (rem >= (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+ rctrl = hdmi_8996_get_rctrl(frac_start, false);
+ cctrl = hdmi_8996_get_cctrl(frac_start, false);
+ integloop_gain = hdmi_8996_get_integloop_gain(frac_start, false);
+ vco_tune = hdmi_8996_get_vco_tune(fdata, post_div_gt_2g);
+
+ core_clk = clk_divtx;
+ do_div(core_clk, core_clk_div_ratio);
+ pll_cmp = hdmi_8996_get_pll_cmp(1024, core_clk);
+
+ /* Debug dump */
+ DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+ DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+ DEV_DBG("%s: CLK_DIVTX: %llu\n", __func__, clk_divtx);
+ DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+ DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+ DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+ DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+ DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+ DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+ DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+ DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+ DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+ DEV_DBG("%s: VCO_TUNE: %llu\n", __func__, vco_tune);
+ DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+ DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+ /* Convert these values to register specific values */
+ cfg->tx_l0_lane_mode = 0x3;
+ cfg->tx_l2_lane_mode = 0x3;
+ cfg->tx_l0_tx_band = tx_band + 4;
+ cfg->tx_l1_tx_band = tx_band + 4;
+ cfg->tx_l2_tx_band = tx_band + 4;
+ cfg->tx_l3_tx_band = tx_band + 4;
+ cfg->tx_l0_res_code_lane_tx = 0x33;
+ cfg->tx_l1_res_code_lane_tx = 0x33;
+ cfg->tx_l2_res_code_lane_tx = 0x33;
+ cfg->tx_l3_res_code_lane_tx = 0x33;
+ cfg->com_restrim_ctrl = 0x0;
+ cfg->com_vco_tune_ctrl = 0x1C;
+
+ cfg->com_svs_mode_clk_sel =
+ (bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2);
+ cfg->com_hsclk_sel = (0x28 | hsclk);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_core_clk_en = (0x6C | (HDMI_CLKS_PLL_DIVSEL << 4));
+ cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x22;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ cfg->com_restrim_ctrl = 0x0;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x25;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ cfg->com_restrim_ctrl = 0x0;
+ } else {
+ cfg->tx_l0_tx_drv_lvl = 0x20;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l1_tx_drv_lvl = 0x20;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l2_tx_drv_lvl = 0x20;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l3_tx_drv_lvl = 0x20;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0E;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0E;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0E;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x0E;
+ cfg->com_restrim_ctrl = 0xD8;
+ }
+
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+ DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+ DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+ DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+ DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+ DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+ DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+ DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+ DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+ cfg->com_svs_mode_clk_sel);
+ DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+ DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+ cfg->com_pll_cctrl_mode0);
+ DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+ cfg->com_pll_rctrl_mode0);
+ DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+ cfg->com_cp_ctrl_mode0);
+ DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+ cfg->com_dec_start_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+ cfg->com_div_frac_start1_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+ cfg->com_div_frac_start2_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+ cfg->com_div_frac_start3_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+ cfg->com_integloop_gain0_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+ cfg->com_integloop_gain1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+ cfg->com_lock_cmp1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+ cfg->com_lock_cmp2_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+ cfg->com_lock_cmp3_mode0);
+ DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+ DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+ DEV_DBG("PLL PARAM: com_restrim_ctrl = 0x%x\n", cfg->com_restrim_ctrl);
+
+ DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l0_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l1_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l2_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l3_tx_emp_post1_lvl);
+
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: tx_l0_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l0_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l1_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l1_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l2_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l2_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l3_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l3_res_code_lane_tx);
+
+ DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+ rc = 0;
+fail:
+ return rc;
+}
+
+static int hdmi_8996_v2_calculate(u32 pix_clk,
+ struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+ int rc = -EINVAL;
+ u64 fdata, clk_divtx, tmds_clk;
+ u64 bclk;
+ u64 post_div;
+ u64 core_clk_div;
+ u64 core_clk_div_ratio;
+ u64 core_clk;
+ u64 pll_cmp;
+ u64 tx_band;
+ u64 tx_band_div_ratio;
+ u64 hsclk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+ u64 cpctrl;
+ u64 rctrl;
+ u64 cctrl;
+ u64 integloop_gain;
+ u64 vco_tune;
+ u64 vco_freq;
+ u64 vco_range;
+ u64 rem;
+
+ /* FDATA, CLK_DIVTX, PIXEL_CLK, TMDS_CLK */
+ bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+ tmds_clk = pix_clk >> 2;
+ else
+ tmds_clk = pix_clk;
+
+ vco_range = bclk < HDMI_282MHZ_BIT_CLK_HZ ? HDMI_2000MHZ_BIT_CLK_HZ :
+ HDMI_2250MHZ_BIT_CLK_HZ;
+
+ fdata = hdmi_8996_v2_get_fdata(bclk, vco_range);
+ if (fdata == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ hsclk = hdmi_8996_v2_get_hsclk(fdata, vco_range);
+ if (hsclk == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ if (bclk >= vco_range)
+ post_div = hdmi_8996_v2_get_post_div_gt_2g(hsclk);
+ else
+ post_div = hdmi_8996_v2_get_post_div_lt_2g(bclk, vco_range);
+
+ if (post_div == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ core_clk_div = 5;
+ core_clk_div_ratio = core_clk_div * 2;
+
+ tx_band = hdmi_8996_v2_get_tx_band(bclk, vco_range);
+ if (tx_band == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ tx_band_div_ratio = 1 << tx_band;
+
+ vco_freq = hdmi_8996_v2_get_vco_freq(bclk, vco_range);
+ clk_divtx = vco_freq;
+ do_div(clk_divtx, post_div);
+
+ /* Decimal and fraction values */
+ dec_start = fdata * post_div;
+ do_div(dec_start, pll_divisor);
+ frac_start = ((pll_divisor - (((dec_start + 1) * pll_divisor) -
+ (fdata * post_div))) * (1 << 20));
+ rem = do_div(frac_start, pll_divisor);
+ /* Round off frac_start to closest integer */
+ if (rem >= (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+ rctrl = hdmi_8996_get_rctrl(frac_start, false);
+ cctrl = hdmi_8996_get_cctrl(frac_start, false);
+ integloop_gain = hdmi_8996_get_integloop_gain(frac_start, false);
+ vco_tune = hdmi_8996_get_vco_tune(fdata, post_div);
+
+ core_clk = clk_divtx;
+ do_div(core_clk, core_clk_div_ratio);
+ pll_cmp = hdmi_8996_get_pll_cmp(1024, core_clk);
+
+ /* Debug dump */
+ DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+ DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+ DEV_DBG("%s: CLK_DIVTX: %llu\n", __func__, clk_divtx);
+ DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+ DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+ DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+ DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+ DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+ DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+ DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+ DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+ DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+ DEV_DBG("%s: VCO_TUNE: %llu\n", __func__, vco_tune);
+ DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+ DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+ /* Convert these values to register specific values */
+ cfg->tx_l0_lane_mode = 0x3;
+ cfg->tx_l2_lane_mode = 0x3;
+ cfg->tx_l0_tx_band = tx_band + 4;
+ cfg->tx_l1_tx_band = tx_band + 4;
+ cfg->tx_l2_tx_band = tx_band + 4;
+ cfg->tx_l3_tx_band = tx_band + 4;
+
+ if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+ cfg->com_svs_mode_clk_sel = 1;
+ else
+ cfg->com_svs_mode_clk_sel = 2;
+
+ cfg->com_hsclk_sel = (0x28 | hsclk);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_core_clk_en = (0x6C | (HDMI_CLKS_PLL_DIVSEL << 4));
+ cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+ cfg->com_vco_tune_ctrl = 0x0;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x22;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ cfg->tx_l0_res_code_lane_tx = 0x3F;
+ cfg->tx_l1_res_code_lane_tx = 0x3F;
+ cfg->tx_l2_res_code_lane_tx = 0x3F;
+ cfg->tx_l3_res_code_lane_tx = 0x3F;
+ cfg->com_restrim_ctrl = 0x0;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x25;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ cfg->tx_l0_res_code_lane_tx = 0x39;
+ cfg->tx_l1_res_code_lane_tx = 0x39;
+ cfg->tx_l2_res_code_lane_tx = 0x39;
+ cfg->tx_l3_res_code_lane_tx = 0x39;
+ cfg->com_restrim_ctrl = 0x0;
+ } else {
+ cfg->tx_l0_tx_drv_lvl = 0x20;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l1_tx_drv_lvl = 0x20;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l2_tx_drv_lvl = 0x20;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l3_tx_drv_lvl = 0x20;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0E;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0E;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0E;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x0E;
+ cfg->tx_l0_res_code_lane_tx = 0x3F;
+ cfg->tx_l1_res_code_lane_tx = 0x3F;
+ cfg->tx_l2_res_code_lane_tx = 0x3F;
+ cfg->tx_l3_res_code_lane_tx = 0x3F;
+ cfg->com_restrim_ctrl = 0xD8;
+ }
+
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+ DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+ DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+ DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+ DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+ DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+ DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+ DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+ DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+ cfg->com_svs_mode_clk_sel);
+ DEV_DBG("PLL PARAM: com_vco_tune_ctrl = 0x%x\n",
+ cfg->com_vco_tune_ctrl);
+ DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+ DEV_DBG("PLL PARAM: com_lock_cmp_en = 0x%x\n", cfg->com_lock_cmp_en);
+ DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+ cfg->com_pll_cctrl_mode0);
+ DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+ cfg->com_pll_rctrl_mode0);
+ DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+ cfg->com_cp_ctrl_mode0);
+ DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+ cfg->com_dec_start_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+ cfg->com_div_frac_start1_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+ cfg->com_div_frac_start2_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+ cfg->com_div_frac_start3_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+ cfg->com_integloop_gain0_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+ cfg->com_integloop_gain1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+ cfg->com_lock_cmp1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+ cfg->com_lock_cmp2_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+ cfg->com_lock_cmp3_mode0);
+ DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+ DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+
+ DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l0_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l1_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l2_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l3_tx_emp_post1_lvl);
+
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: tx_l0_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l0_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l1_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l1_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l2_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l2_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l3_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l3_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: com_restrim_ctrl = 0x%x\n", cfg->com_restrim_ctrl);
+
+ DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+ rc = 0;
+fail:
+ return rc;
+}
+
+static int hdmi_8996_v3_calculate(u32 pix_clk,
+ struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+ int rc = -EINVAL;
+ struct hdmi_8996_v3_post_divider pd;
+ u64 fdata, tmds_clk;
+ u64 bclk;
+ u64 pll_cmp;
+ u64 tx_band;
+ u64 hsclk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+ u64 cpctrl;
+ u64 rctrl;
+ u64 cctrl;
+ u64 integloop_gain;
+ u64 vco_freq;
+ u64 rem;
+
+ /* FDATA, HSCLK, PIXEL_CLK, TMDS_CLK */
+ bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+ tmds_clk = pix_clk >> 2;
+ else
+ tmds_clk = pix_clk;
+
+ if (hdmi_8996_v3_get_post_div(&pd, bclk) || pd.vco_ratio <= 0 ||
+ pd.vco_freq <= 0)
+ goto fail;
+
+ vco_freq = pd.vco_freq;
+ fdata = pd.vco_freq;
+ do_div(fdata, pd.vco_ratio);
+
+ hsclk = pd.hsclk_divsel;
+ dec_start = vco_freq;
+ do_div(dec_start, pll_divisor);
+
+ frac_start = vco_freq * (1 << 20);
+ rem = do_div(frac_start, pll_divisor);
+ frac_start -= dec_start * (1 << 20);
+ if (rem > (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+ rctrl = hdmi_8996_get_rctrl(frac_start, false);
+ cctrl = hdmi_8996_get_cctrl(frac_start, false);
+ integloop_gain = hdmi_8996_v3_get_integloop_gain(frac_start, bclk,
+ false);
+ pll_cmp = hdmi_8996_v3_get_pll_cmp(1024, fdata);
+ tx_band = pd.tx_band_sel;
+
+ /* Debug dump */
+ DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+ DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+ DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+ DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+ DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+ DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+ DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+ DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+ DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+ DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+ DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+ DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+ DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+ /* Convert these values to register specific values */
+ cfg->tx_l0_tx_band = tx_band + 4;
+ cfg->tx_l1_tx_band = tx_band + 4;
+ cfg->tx_l2_tx_band = tx_band + 4;
+ cfg->tx_l3_tx_band = tx_band + 4;
+
+ if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+ cfg->com_svs_mode_clk_sel = 1;
+ else
+ cfg->com_svs_mode_clk_sel = 2;
+
+ cfg->com_hsclk_sel = (0x20 | hsclk);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_lock_cmp_en = 0x04;
+ cfg->com_core_clk_en = 0x2C;
+ cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+ cfg->com_vco_tune_ctrl = 0x0;
+
+ cfg->tx_l0_lane_mode = 0x43;
+ cfg->tx_l2_lane_mode = 0x43;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x22;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x25;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ } else {
+ cfg->tx_l0_tx_drv_lvl = 0x20;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l1_tx_drv_lvl = 0x20;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l2_tx_drv_lvl = 0x20;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l3_tx_drv_lvl = 0x20;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0E;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0E;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0E;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x0E;
+ }
+
+ DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+ DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+ DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+ DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+ DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+ DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+ cfg->com_svs_mode_clk_sel);
+ DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+ DEV_DBG("PLL PARAM: com_lock_cmp_en = 0x%x\n", cfg->com_lock_cmp_en);
+ DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+ cfg->com_pll_cctrl_mode0);
+ DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+ cfg->com_pll_rctrl_mode0);
+ DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+ cfg->com_cp_ctrl_mode0);
+ DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+ cfg->com_dec_start_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+ cfg->com_div_frac_start1_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+ cfg->com_div_frac_start2_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+ cfg->com_div_frac_start3_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+ cfg->com_integloop_gain0_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+ cfg->com_integloop_gain1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+ cfg->com_lock_cmp1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+ cfg->com_lock_cmp2_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+ cfg->com_lock_cmp3_mode0);
+ DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+ DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+ DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+
+ DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+ DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+ DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l0_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l1_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l2_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l3_tx_emp_post1_lvl);
+
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+ rc = 0;
+fail:
+ return rc;
+}
+
+static int hdmi_8996_calculate(u32 pix_clk,
+ struct hdmi_8996_phy_pll_reg_cfg *cfg, u32 ver)
+{
+ switch (ver) {
+ case HDMI_VERSION_8996_V3:
+ case HDMI_VERSION_8996_V3_1_8:
+ return hdmi_8996_v3_calculate(pix_clk, cfg);
+ case HDMI_VERSION_8996_V2:
+ return hdmi_8996_v2_calculate(pix_clk, cfg);
+ default:
+ return hdmi_8996_v1_calculate(pix_clk, cfg);
+ }
+}
+
+static int hdmi_8996_phy_pll_set_clk_rate(struct clk *c, u32 tmds_clk, u32 ver)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
+
+ rc = hdmi_8996_calculate(tmds_clk, &cfg, ver);
+ if (rc) {
+ DEV_ERR("%s: PLL calculation failed\n", __func__);
+ return rc;
+ }
+
+ /* Initially shut down PHY */
+ DEV_DBG("%s: Disabling PHY\n", __func__);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x0);
+ udelay(500);
+
+ /* Power up sequence */
+ switch (ver) {
+ case HDMI_VERSION_8996_V2:
+ case HDMI_VERSION_8996_V3:
+ case HDMI_VERSION_8996_V3_1_8:
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x04);
+ break;
+ };
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TX0_TX1_LANE_CTL, 0x0F);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TX2_TX3_LANE_CTL, 0x0F);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_LANE_MODE, cfg.tx_l0_lane_mode);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_LANE_MODE, cfg.tx_l2_lane_mode);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND, cfg.tx_l0_tx_band);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND, cfg.tx_l1_tx_band);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND, cfg.tx_l2_tx_band);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND, cfg.tx_l3_tx_band);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0E);
+ if (ver == HDMI_VERSION_8996_V1)
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x06);
+
+ /* Bypass VCO calibration */
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL,
+ cfg.com_svs_mode_clk_sel);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_TRIM, 0x0F);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_IVCO, 0x0F);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE_CTRL,
+ cfg.com_vco_tune_ctrl);
+
+ switch (ver) {
+ case HDMI_VERSION_8996_V1:
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL,
+ cfg.com_svs_mode_clk_sel);
+ break;
+ default:
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x06);
+ }
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CLK_SELECT, 0x30);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_HSCLK_SEL,
+ cfg.com_hsclk_sel);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_EN,
+ cfg.com_lock_cmp_en);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CCTRL_MODE0,
+ cfg.com_pll_cctrl_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_RCTRL_MODE0,
+ cfg.com_pll_rctrl_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CP_CTRL_MODE0,
+ cfg.com_cp_ctrl_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEC_START_MODE0,
+ cfg.com_dec_start_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START1_MODE0,
+ cfg.com_div_frac_start1_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START2_MODE0,
+ cfg.com_div_frac_start2_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START3_MODE0,
+ cfg.com_div_frac_start3_mode0);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+ cfg.com_integloop_gain0_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+ cfg.com_integloop_gain1_mode0);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP1_MODE0,
+ cfg.com_lock_cmp1_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP2_MODE0,
+ cfg.com_lock_cmp2_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP3_MODE0,
+ cfg.com_lock_cmp3_mode0);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE_MAP, 0x00);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORE_CLK_EN,
+ cfg.com_core_clk_en);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORECLK_DIV,
+ cfg.com_coreclk_div);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
+
+ if (ver == HDMI_VERSION_8996_V3 || ver == HDMI_VERSION_8996_V3_1_8)
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
+
+ /* TX lanes setup (TX 0/1/2/3) */
+ if (ver == HDMI_VERSION_8996_V3_1_8) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ 0x00000023);
+ } else {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ cfg.tx_l0_tx_drv_lvl);
+ }
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_EMP_POST1_LVL,
+ cfg.tx_l0_tx_emp_post1_lvl);
+
+ if (ver == HDMI_VERSION_8996_V3_1_8) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ 0x00000023);
+ } else {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ cfg.tx_l1_tx_drv_lvl);
+ }
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TX_EMP_POST1_LVL,
+ cfg.tx_l1_tx_emp_post1_lvl);
+
+ if (ver == HDMI_VERSION_8996_V3_1_8) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ 0x00000023);
+ } else {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ cfg.tx_l2_tx_drv_lvl);
+ }
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TX_EMP_POST1_LVL,
+ cfg.tx_l2_tx_emp_post1_lvl);
+
+ if (ver == HDMI_VERSION_8996_V3_1_8) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ 0x00000020);
+ } else {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ cfg.tx_l3_tx_drv_lvl);
+ }
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TX_EMP_POST1_LVL,
+ cfg.tx_l3_tx_emp_post1_lvl);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL1,
+ cfg.tx_l0_vmode_ctrl1);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL2,
+ cfg.tx_l0_vmode_ctrl2);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL1,
+ cfg.tx_l1_vmode_ctrl1);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL2,
+ cfg.tx_l1_vmode_ctrl2);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL1,
+ cfg.tx_l2_vmode_ctrl1);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL2,
+ cfg.tx_l2_vmode_ctrl2);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL1,
+ cfg.tx_l3_vmode_ctrl1);
+ if (ver == HDMI_VERSION_8996_V3_1_8) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL2,
+ 0x0000000D);
+ } else {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL2,
+ cfg.tx_l3_vmode_ctrl2);
+ }
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+
+ if (ver < HDMI_VERSION_8996_V3) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_TX,
+ cfg.tx_l0_res_code_lane_tx);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_TX,
+ cfg.tx_l1_res_code_lane_tx);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_TX,
+ cfg.tx_l2_res_code_lane_tx);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_TX,
+ cfg.tx_l3_res_code_lane_tx);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESTRIM_CTRL,
+ cfg.com_restrim_ctrl);
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG1, 0x05);
+ }
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_MODE, cfg.phy_mode);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1F);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_HP_PD_ENABLES, 0x03);
+
+ if (ver == HDMI_VERSION_8996_V2) {
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x01);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x01);
+ }
+ /*
+ * Ensure that vco configuration gets flushed to hardware before
+ * enabling the PLL
+ */
+ wmb();
+ return 0;
+}
+
+static int hdmi_8996_phy_ready_status(struct mdss_pll_resources *io)
+{
+ u32 status = 0;
+ int phy_ready = 0;
+ int rc;
+ u32 read_count = 0;
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+ return rc;
+ }
+
+ DEV_DBG("%s: Waiting for PHY Ready\n", __func__);
+
+ /* Poll for PHY read status */
+ while (read_count < HDMI_PLL_POLL_MAX_READS) {
+ status = MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS);
+ if ((status & BIT(0)) == 1) {
+ phy_ready = 1;
+ DEV_DBG("%s: PHY READY\n", __func__);
+ break;
+ }
+ udelay(HDMI_PLL_POLL_TIMEOUT_US);
+ read_count++;
+ }
+
+ if (read_count == HDMI_PLL_POLL_MAX_READS) {
+ phy_ready = 0;
+ DEV_DBG("%s: PHY READY TIMEOUT\n", __func__);
+ }
+
+ mdss_pll_resource_enable(io, false);
+
+ return phy_ready;
+}
+
+static int hdmi_8996_pll_lock_status(struct mdss_pll_resources *io)
+{
+ u32 status;
+ int pll_locked = 0;
+ int rc;
+ u32 read_count = 0;
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+ return rc;
+ }
+
+ DEV_DBG("%s: Waiting for PLL lock\n", __func__);
+
+ while (read_count < HDMI_PLL_POLL_MAX_READS) {
+ status = MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_C_READY_STATUS);
+ if ((status & BIT(0)) == 1) {
+ pll_locked = 1;
+ DEV_DBG("%s: C READY\n", __func__);
+ break;
+ }
+ udelay(HDMI_PLL_POLL_TIMEOUT_US);
+ read_count++;
+ }
+
+ if (read_count == HDMI_PLL_POLL_MAX_READS) {
+ pll_locked = 0;
+ DEV_DBG("%s: C READY TIMEOUT\n", __func__);
+ }
+
+ mdss_pll_resource_enable(io, false);
+
+ return pll_locked;
+}
+
+static int hdmi_8996_v1_perform_sw_calibration(struct clk *c)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ u32 max_code = 0x190;
+ u32 min_code = 0x0;
+ u32 max_cnt = 0;
+ u32 min_cnt = 0;
+ u32 expected_counter_value = 0;
+ u32 step = 0;
+ u32 dbus_all = 0;
+ u32 dbus_sel = 0;
+ u32 vco_code = 0;
+ u32 val = 0;
+
+ vco_code = 0xC8;
+
+ DEV_DBG("%s: Starting SW calibration with vco_code = %d\n", __func__,
+ vco_code);
+
+ expected_counter_value =
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP3_MODE0) << 16) |
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP2_MODE0) << 8) |
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP1_MODE0));
+
+ DEV_DBG("%s: expected_counter_value = %d\n", __func__,
+ expected_counter_value);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+ val |= BIT(4);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+ val |= BIT(3);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEBUG_BUS_SEL, 0x4);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+ val |= BIT(1);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+ udelay(60);
+
+ while (1) {
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE1_MODE0,
+ vco_code & 0xFF);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE2_MODE0,
+ (vco_code >> 8) & 0x3);
+
+ udelay(20);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+ val &= ~BIT(1);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+ udelay(60);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+ val |= BIT(1);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+ udelay(60);
+
+ dbus_all =
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS3) << 24) |
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS2) << 16) |
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS1) << 8) |
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS0));
+
+ dbus_sel = (dbus_all >> 9) & 0x3FFFF;
+ DEV_DBG("%s: loop[%d], dbus_all = 0x%x, dbus_sel = 0x%x\n",
+ __func__, step, dbus_all, dbus_sel);
+ if (dbus_sel == 0)
+ DEV_ERR("%s: CHECK HDMI REF CLK\n", __func__);
+
+ if (dbus_sel == expected_counter_value) {
+ max_code = vco_code;
+ max_cnt = dbus_sel;
+ min_code = vco_code;
+ min_cnt = dbus_sel;
+ } else if (dbus_sel == 0) {
+ max_code = vco_code;
+ max_cnt = dbus_sel;
+ vco_code = (max_code + min_code)/2;
+ } else if (dbus_sel > expected_counter_value) {
+ min_code = vco_code;
+ min_cnt = dbus_sel;
+ vco_code = (max_code + min_code)/2;
+ } else if (dbus_sel < expected_counter_value) {
+ max_code = vco_code;
+ max_cnt = dbus_sel;
+ vco_code = (max_code + min_code)/2;
+ }
+
+ step++;
+
+ if ((vco_code == 0) || (vco_code == 0x3FF) || (step > 0x3FF)) {
+ DEV_ERR("%s: VCO tune code search failed\n", __func__);
+ rc = -ENOTSUPP;
+ break;
+ }
+ if ((max_code - min_code) <= 1) {
+ if ((max_code - min_code) == 1) {
+ if (abs((int)(max_cnt - expected_counter_value))
+ < abs((int)(min_cnt - expected_counter_value
+ ))) {
+ vco_code = max_code;
+ } else {
+ vco_code = min_code;
+ }
+ }
+ break;
+ }
+ DEV_DBG("%s: loop[%d], new vco_code = %d\n", __func__, step,
+ vco_code);
+ }
+
+ DEV_DBG("%s: CALIB done. vco_code = %d\n", __func__, vco_code);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE1_MODE0,
+ vco_code & 0xFF);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE2_MODE0,
+ (vco_code >> 8) & 0x3);
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+ val &= ~BIT(1);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+ val |= BIT(4);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+ val &= ~BIT(3);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+ return rc;
+}
+
+static int hdmi_8996_v2_perform_sw_calibration(struct clk *c)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ u32 vco_code1, vco_code2, integral_loop, ready_poll;
+ u32 read_count = 0;
+
+ while (read_count < (HDMI_PLL_POLL_MAX_READS << 1)) {
+ ready_poll = MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_C_READY_STATUS);
+ if ((ready_poll & BIT(0)) == 1) {
+ ready_poll = 1;
+ DEV_DBG("%s: C READY\n", __func__);
+ break;
+ }
+ udelay(HDMI_PLL_POLL_TIMEOUT_US);
+ read_count++;
+ }
+
+ if (read_count == (HDMI_PLL_POLL_MAX_READS << 1)) {
+ ready_poll = 0;
+ DEV_DBG("%s: C READY TIMEOUT, TRYING SW CALIBRATION\n",
+ __func__);
+ }
+
+ vco_code1 = MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_PLLCAL_CODE1_STATUS);
+ vco_code2 = MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_PLLCAL_CODE2_STATUS);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEBUG_BUS_SEL, 0x5);
+ integral_loop = MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DEBUG_BUS0);
+
+ if (((ready_poll & 0x1) == 0) || (((ready_poll & 1) == 1) &&
+ (vco_code1 == 0xFF) && ((vco_code2 & 0x3) == 0x1) &&
+ (integral_loop > 0xC0))) {
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x04);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x00);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x17);
+ udelay(100);
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x11);
+ udelay(100);
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+ }
+ return rc;
+}
+
+static int hdmi_8996_perform_sw_calibration(struct clk *c, u32 ver)
+{
+ switch (ver) {
+ case HDMI_VERSION_8996_V1:
+ return hdmi_8996_v1_perform_sw_calibration(c);
+ case HDMI_VERSION_8996_V2:
+ return hdmi_8996_v2_perform_sw_calibration(c);
+ }
+ return 0;
+}
+
+static int hdmi_8996_vco_enable(struct clk *c, u32 ver)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x1);
+ udelay(100);
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+ udelay(100);
+
+ rc = hdmi_8996_perform_sw_calibration(c, ver);
+ if (rc) {
+ DEV_ERR("%s: software calibration failed\n", __func__);
+ return rc;
+ }
+
+ rc = hdmi_8996_pll_lock_status(io);
+ if (!rc) {
+ DEV_ERR("%s: PLL not locked\n", __func__);
+ return rc;
+ }
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x6F);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x6F);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x6F);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x6F);
+
+ /* Disable SSC */
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_PER1, 0x0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_PER2, 0x0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_STEP_SIZE1, 0x0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_STEP_SIZE2, 0x0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_EN_CENTER, 0x2);
+
+ rc = hdmi_8996_phy_ready_status(io);
+ if (!rc) {
+ DEV_ERR("%s: PHY not READY\n", __func__);
+ return rc;
+ }
+
+ /* Restart the retiming buffer */
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x18);
+ udelay(1);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+
+ io->pll_on = true;
+ return 0;
+}
+
+static int hdmi_8996_v1_vco_enable(struct clk *c)
+{
+ return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_enable(struct clk *c)
+{
+ return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_enable(struct clk *c)
+{
+ return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_enable(struct clk *c)
+{
+ return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3_1_8);
+}
+
+static int hdmi_8996_vco_get_lock_range(struct clk *c, unsigned long pixel_clk)
+{
+ u32 rng = 64, cmp_cnt = 1024;
+ u32 coreclk_div = 5, clks_pll_divsel = 2;
+ u32 vco_freq, vco_ratio, ppm_range;
+ u64 bclk;
+ struct hdmi_8996_v3_post_divider pd;
+
+ bclk = ((u64)pixel_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+ DEV_DBG("%s: rate=%ld\n", __func__, pixel_clk);
+
+ if (hdmi_8996_v3_get_post_div(&pd, bclk) ||
+ pd.vco_ratio <= 0 || pd.vco_freq <= 0) {
+ DEV_ERR("%s: couldn't get post div\n", __func__);
+ return -EINVAL;
+ }
+
+ do_div(pd.vco_freq, HDMI_KHZ_TO_HZ * HDMI_KHZ_TO_HZ);
+
+ vco_freq = (u32) pd.vco_freq;
+ vco_ratio = (u32) pd.vco_ratio;
+
+ DEV_DBG("%s: freq %d, ratio %d\n", __func__,
+ vco_freq, vco_ratio);
+
+ ppm_range = (rng * HDMI_REF_CLOCK) / cmp_cnt;
+ ppm_range /= vco_freq / vco_ratio;
+ ppm_range *= coreclk_div * clks_pll_divsel;
+
+ DEV_DBG("%s: ppm range: %d\n", __func__, ppm_range);
+
+ return ppm_range;
+}
+
+static int hdmi_8996_vco_rate_atomic_update(struct clk *c,
+ unsigned long rate, u32 ver)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ void __iomem *pll;
+ struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
+ int rc = 0;
+
+ rc = hdmi_8996_calculate(rate, &cfg, ver);
+ if (rc) {
+ DEV_ERR("%s: PLL calculation failed\n", __func__);
+ goto end;
+ }
+
+ pll = io->pll_base;
+
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DEC_START_MODE0,
+ cfg.com_dec_start_mode0);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START1_MODE0,
+ cfg.com_div_frac_start1_mode0);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START2_MODE0,
+ cfg.com_div_frac_start2_mode0);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START3_MODE0,
+ cfg.com_div_frac_start3_mode0);
+
+ MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x01);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x00);
+
+ DEV_DBG("%s: updated to rate %ld\n", __func__, rate);
+end:
+ return rc;
+}
+
+static int hdmi_8996_vco_set_rate(struct clk *c, unsigned long rate, u32 ver)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ unsigned int set_power_dwn = 0;
+ bool atomic_update = false;
+ int rc, pll_lock_range;
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ DEV_ERR("pll resource can't be enabled\n");
+ return rc;
+ }
+
+ DEV_DBG("%s: rate %ld\n", __func__, rate);
+
+ if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0) &&
+ MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
+ pll_lock_range = hdmi_8996_vco_get_lock_range(c, vco->rate);
+
+ if (pll_lock_range > 0 && vco->rate) {
+ u32 range_limit;
+
+ range_limit = vco->rate *
+ (pll_lock_range / HDMI_KHZ_TO_HZ);
+ range_limit /= HDMI_KHZ_TO_HZ;
+
+ DEV_DBG("%s: range limit %d\n", __func__, range_limit);
+
+ if (abs(rate - vco->rate) < range_limit)
+ atomic_update = true;
+ }
+ }
+
+ if (io->pll_on && !atomic_update)
+ set_power_dwn = 1;
+
+ if (atomic_update) {
+ hdmi_8996_vco_rate_atomic_update(c, rate, ver);
+ } else {
+ rc = hdmi_8996_phy_pll_set_clk_rate(c, rate, ver);
+ if (rc)
+ DEV_ERR("%s: Failed to set clk rate\n", __func__);
+ }
+
+ mdss_pll_resource_enable(io, false);
+
+ if (set_power_dwn)
+ hdmi_8996_vco_enable(c, ver);
+
+ vco->rate = rate;
+ vco->rate_set = true;
+
+ return 0;
+}
+
+static int hdmi_8996_v1_vco_set_rate(struct clk *c, unsigned long rate)
+{
+ return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_set_rate(struct clk *c, unsigned long rate)
+{
+ return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_set_rate(struct clk *c, unsigned long rate)
+{
+ return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_set_rate(struct clk *c, unsigned long rate)
+{
+ return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3_1_8);
+}
+
+static unsigned long hdmi_get_hsclk_sel_divisor(unsigned long hsclk_sel)
+{
+ unsigned long divisor;
+
+ switch (hsclk_sel) {
+ case 0:
+ divisor = 2;
+ break;
+ case 1:
+ divisor = 6;
+ break;
+ case 2:
+ divisor = 10;
+ break;
+ case 3:
+ divisor = 14;
+ break;
+ case 4:
+ divisor = 3;
+ break;
+ case 5:
+ divisor = 9;
+ break;
+ case 6:
+ case 13:
+ divisor = 15;
+ break;
+ case 7:
+ divisor = 21;
+ break;
+ case 8:
+ divisor = 4;
+ break;
+ case 9:
+ divisor = 12;
+ break;
+ case 10:
+ divisor = 20;
+ break;
+ case 11:
+ divisor = 28;
+ break;
+ case 12:
+ divisor = 5;
+ break;
+ case 14:
+ divisor = 25;
+ break;
+ case 15:
+ divisor = 35;
+ break;
+ default:
+ divisor = 1;
+ DEV_ERR("%s: invalid hsclk_sel value = %lu",
+ __func__, hsclk_sel);
+ break;
+ }
+
+ return divisor;
+}
+
+static unsigned long hdmi_8996_vco_get_rate(struct clk *c)
+{
+ unsigned long freq = 0, hsclk_sel = 0, tx_band = 0, dec_start = 0,
+ div_frac_start = 0, vco_clock_freq = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ if (mdss_pll_resource_enable(io, true)) {
+ DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+ return freq;
+ }
+
+ dec_start = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEC_START_MODE0);
+
+ div_frac_start =
+ MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DIV_FRAC_START1_MODE0) |
+ MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DIV_FRAC_START2_MODE0) << 8 |
+ MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DIV_FRAC_START3_MODE0) << 16;
+
+ vco_clock_freq = (dec_start + (div_frac_start / (1 << 20)))
+ * 4 * (HDMI_REF_CLOCK);
+
+ hsclk_sel = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_HSCLK_SEL) & 0x15;
+ hsclk_sel = hdmi_get_hsclk_sel_divisor(hsclk_sel);
+ tx_band = MDSS_PLL_REG_R(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND) & 0x3;
+
+ freq = vco_clock_freq / (10 * hsclk_sel * (1 << tx_band));
+
+ mdss_pll_resource_enable(io, false);
+
+ DEV_DBG("%s: freq = %lu\n", __func__, freq);
+
+ return freq;
+}
+
+static long hdmi_8996_vco_round_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long rrate = rate;
+
+ DEV_DBG("rrate=%ld\n", rrate);
+
+ return rrate;
+}
+
+static int hdmi_8996_vco_prepare(struct clk *c, u32 ver)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ int ret = 0;
+
+ DEV_DBG("rate=%ld\n", vco->rate);
+
+ if (!vco->rate_set && vco->rate)
+ ret = hdmi_8996_vco_set_rate(c, vco->rate, ver);
+
+ if (!ret) {
+ ret = mdss_pll_resource_enable(io, true);
+ if (ret)
+ DEV_ERR("pll resource can't be enabled\n");
+ }
+
+ return ret;
+}
+
+static int hdmi_8996_v1_vco_prepare(struct clk *c)
+{
+ return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_prepare(struct clk *c)
+{
+ return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_prepare(struct clk *c)
+{
+ return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_prepare(struct clk *c)
+{
+ return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V3_1_8);
+}
+
+static void hdmi_8996_vco_unprepare(struct clk *c)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ vco->rate_set = false;
+
+ if (!io) {
+ DEV_ERR("Invalid input parameter\n");
+ return;
+ }
+
+ if (!io->pll_on &&
+ mdss_pll_resource_enable(io, true)) {
+ DEV_ERR("pll resource can't be enabled\n");
+ return;
+ }
+
+ io->handoff_resources = false;
+ mdss_pll_resource_enable(io, false);
+ io->pll_on = false;
+}
+
+static enum handoff hdmi_8996_vco_handoff(struct clk *c)
+{
+ enum handoff ret = HANDOFF_DISABLED_CLK;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ if (is_gdsc_disabled(io))
+ return HANDOFF_DISABLED_CLK;
+
+ if (mdss_pll_resource_enable(io, true)) {
+ DEV_ERR("pll resource can't be enabled\n");
+ return ret;
+ }
+
+ io->handoff_resources = true;
+
+ if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0)) {
+ if (MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
+ io->pll_on = true;
+ c->rate = hdmi_8996_vco_get_rate(c);
+ vco->rate = c->rate;
+ ret = HANDOFF_ENABLED_CLK;
+ } else {
+ io->handoff_resources = false;
+ mdss_pll_resource_enable(io, false);
+ DEV_DBG("%s: PHY not ready\n", __func__);
+ }
+ } else {
+ io->handoff_resources = false;
+ mdss_pll_resource_enable(io, false);
+ DEV_DBG("%s: PLL not locked\n", __func__);
+ }
+
+ DEV_DBG("done, ret=%d\n", ret);
+ return ret;
+}
+
+static const struct clk_ops hdmi_8996_v1_vco_clk_ops = {
+ .enable = hdmi_8996_v1_vco_enable,
+ .set_rate = hdmi_8996_v1_vco_set_rate,
+ .get_rate = hdmi_8996_vco_get_rate,
+ .round_rate = hdmi_8996_vco_round_rate,
+ .prepare = hdmi_8996_v1_vco_prepare,
+ .unprepare = hdmi_8996_vco_unprepare,
+ .handoff = hdmi_8996_vco_handoff,
+};
+
+static const struct clk_ops hdmi_8996_v2_vco_clk_ops = {
+ .enable = hdmi_8996_v2_vco_enable,
+ .set_rate = hdmi_8996_v2_vco_set_rate,
+ .get_rate = hdmi_8996_vco_get_rate,
+ .round_rate = hdmi_8996_vco_round_rate,
+ .prepare = hdmi_8996_v2_vco_prepare,
+ .unprepare = hdmi_8996_vco_unprepare,
+ .handoff = hdmi_8996_vco_handoff,
+};
+
+static const struct clk_ops hdmi_8996_v3_vco_clk_ops = {
+ .enable = hdmi_8996_v3_vco_enable,
+ .set_rate = hdmi_8996_v3_vco_set_rate,
+ .get_rate = hdmi_8996_vco_get_rate,
+ .round_rate = hdmi_8996_vco_round_rate,
+ .prepare = hdmi_8996_v3_vco_prepare,
+ .unprepare = hdmi_8996_vco_unprepare,
+ .handoff = hdmi_8996_vco_handoff,
+};
+
+static const struct clk_ops hdmi_8996_v3_1p8_vco_clk_ops = {
+ .enable = hdmi_8996_v3_1p8_vco_enable,
+ .set_rate = hdmi_8996_v3_1p8_vco_set_rate,
+ .get_rate = hdmi_8996_vco_get_rate,
+ .round_rate = hdmi_8996_vco_round_rate,
+ .prepare = hdmi_8996_v3_1p8_vco_prepare,
+ .unprepare = hdmi_8996_vco_unprepare,
+ .handoff = hdmi_8996_vco_handoff,
+};
+
+
+static struct hdmi_pll_vco_clk hdmi_vco_clk = {
+ .c = {
+ .dbg_name = "hdmi_8996_vco_clk",
+ .ops = &hdmi_8996_v1_vco_clk_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(hdmi_vco_clk.c),
+ },
+};
+
+static struct clk_lookup hdmipllcc_8996[] = {
+ CLK_LIST(hdmi_vco_clk),
+};
+
+int hdmi_8996_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res, u32 ver)
+{
+ int rc = -ENOTSUPP;
+
+ if (!pll_res || !pll_res->phy_base || !pll_res->pll_base) {
+ DEV_ERR("%s: Invalid input parameters\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ /* Set client data for vco, mux and div clocks */
+ hdmi_vco_clk.priv = pll_res;
+
+ switch (ver) {
+ case HDMI_VERSION_8996_V2:
+ hdmi_vco_clk.c.ops = &hdmi_8996_v2_vco_clk_ops;
+ break;
+ case HDMI_VERSION_8996_V3:
+ hdmi_vco_clk.c.ops = &hdmi_8996_v3_vco_clk_ops;
+ break;
+ case HDMI_VERSION_8996_V3_1_8:
+ hdmi_vco_clk.c.ops = &hdmi_8996_v3_1p8_vco_clk_ops;
+ break;
+ default:
+ hdmi_vco_clk.c.ops = &hdmi_8996_v1_vco_clk_ops;
+ break;
+ };
+
+ rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8996,
+ ARRAY_SIZE(hdmipllcc_8996));
+ if (rc) {
+ DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
+ rc = -EPROBE_DEFER;
+ } else {
+ DEV_DBG("%s SUCCESS\n", __func__);
+ }
+
+ return rc;
+}
+
+int hdmi_8996_v1_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ return hdmi_8996_pll_clock_register(pdev, pll_res,
+ HDMI_VERSION_8996_V1);
+}
+
+int hdmi_8996_v2_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ return hdmi_8996_pll_clock_register(pdev, pll_res,
+ HDMI_VERSION_8996_V2);
+}
+
+int hdmi_8996_v3_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ return hdmi_8996_pll_clock_register(pdev, pll_res,
+ HDMI_VERSION_8996_V3);
+}
+
+int hdmi_8996_v3_1p8_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ return hdmi_8996_pll_clock_register(pdev, pll_res,
+ HDMI_VERSION_8996_V3_1_8);
+}
diff --git a/drivers/clk/msm/mdss/mdss-hdmi-pll.h b/drivers/clk/msm/mdss/mdss-hdmi-pll.h
new file mode 100644
index 0000000..1f21d79
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-hdmi-pll.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_PLL_H
+#define __MDSS_HDMI_PLL_H
+
+struct hdmi_pll_cfg {
+ unsigned long vco_rate;
+ u32 reg;
+};
+
+struct hdmi_pll_vco_clk {
+ unsigned long rate; /* current vco rate */
+ unsigned long min_rate; /* min vco rate */
+ unsigned long max_rate; /* max vco rate */
+ bool rate_set;
+ struct hdmi_pll_cfg *ip_seti;
+ struct hdmi_pll_cfg *cp_seti;
+ struct hdmi_pll_cfg *ip_setp;
+ struct hdmi_pll_cfg *cp_setp;
+ struct hdmi_pll_cfg *crctrl;
+ void *priv;
+
+ struct clk c;
+};
+
+int hdmi_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_20nm_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v1_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v2_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v3_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v3_1p8_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+#endif
diff --git a/drivers/clk/msm/mdss/mdss-pll-util.c b/drivers/clk/msm/mdss/mdss-pll-util.c
new file mode 100644
index 0000000..7f7da9b
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-pll-util.c
@@ -0,0 +1,364 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/vmalloc.h>
+#include <linux/memblock.h>
+
+#include "mdss-pll.h"
+
+int mdss_pll_util_resource_init(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0;
+ struct mdss_module_power *mp = &pll_res->mp;
+
+ rc = msm_mdss_config_vreg(&pdev->dev,
+ mp->vreg_config, mp->num_vreg, 1);
+ if (rc) {
+ pr_err("Vreg config failed rc=%d\n", rc);
+ goto vreg_err;
+ }
+
+ rc = msm_mdss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
+ if (rc) {
+ pr_err("Clock get failed rc=%d\n", rc);
+ goto clk_err;
+ }
+
+ return rc;
+
+clk_err:
+ msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+ return rc;
+}
+
+/**
+ * mdss_pll_get_mp_by_reg_name() -- Find power module by regulator name
+ *@pll_res: Pointer to the PLL resource
+ *@name: Regulator name as specified in the pll dtsi
+ *
+ * This is a helper function to retrieve the regulator information
+ * for each pll resource.
+ */
+struct mdss_vreg *mdss_pll_get_mp_by_reg_name(struct mdss_pll_resources *pll_res
+ , char *name)
+{
+
+ struct mdss_vreg *regulator = NULL;
+ int i;
+
+ if ((pll_res == NULL) || (pll_res->mp.vreg_config == NULL)) {
+ pr_err("%s Invalid PLL resource\n", __func__);
+ goto error;
+ }
+
+ regulator = pll_res->mp.vreg_config;
+
+ for (i = 0; i < pll_res->mp.num_vreg; i++) {
+ if (!strcmp(name, regulator->vreg_name)) {
+ pr_debug("Found regulator match for %s\n", name);
+ break;
+ }
+ regulator++;
+ }
+
+error:
+ return regulator;
+}
+
+void mdss_pll_util_resource_deinit(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ struct mdss_module_power *mp = &pll_res->mp;
+
+ msm_mdss_put_clk(mp->clk_config, mp->num_clk);
+
+ msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+}
+
+void mdss_pll_util_resource_release(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ struct mdss_module_power *mp = &pll_res->mp;
+
+ devm_kfree(&pdev->dev, mp->clk_config);
+ devm_kfree(&pdev->dev, mp->vreg_config);
+ mp->num_vreg = 0;
+ mp->num_clk = 0;
+}
+
+int mdss_pll_util_resource_enable(struct mdss_pll_resources *pll_res,
+ bool enable)
+{
+ int rc = 0;
+ struct mdss_module_power *mp = &pll_res->mp;
+
+ if (enable) {
+ rc = msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg,
+ enable);
+ if (rc) {
+ pr_err("Failed to enable vregs rc=%d\n", rc);
+ goto vreg_err;
+ }
+
+ rc = msm_mdss_clk_set_rate(mp->clk_config, mp->num_clk);
+ if (rc) {
+ pr_err("Failed to set clock rate rc=%d\n", rc);
+ goto clk_err;
+ }
+
+ rc = msm_mdss_enable_clk(mp->clk_config, mp->num_clk, enable);
+ if (rc) {
+ pr_err("clock enable failed rc:%d\n", rc);
+ goto clk_err;
+ }
+ } else {
+ msm_mdss_enable_clk(mp->clk_config, mp->num_clk, enable);
+
+ msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+ }
+
+ return rc;
+
+clk_err:
+ msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+ return rc;
+}
+
+static int mdss_pll_util_parse_dt_supply(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int i = 0, rc = 0;
+ u32 tmp = 0;
+ struct device_node *of_node = NULL, *supply_root_node = NULL;
+ struct device_node *supply_node = NULL;
+ struct mdss_module_power *mp = &pll_res->mp;
+
+ of_node = pdev->dev.of_node;
+
+ mp->num_vreg = 0;
+ supply_root_node = of_get_child_by_name(of_node,
+ "qcom,platform-supply-entries");
+ if (!supply_root_node) {
+ pr_err("no supply entry present\n");
+ return rc;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node) {
+ mp->num_vreg++;
+ }
+
+ if (mp->num_vreg == 0) {
+ pr_debug("no vreg\n");
+ return rc;
+ }
+ pr_debug("vreg found. count=%d\n", mp->num_vreg);
+
+ mp->vreg_config = devm_kzalloc(&pdev->dev, sizeof(struct mdss_vreg) *
+ mp->num_vreg, GFP_KERNEL);
+ if (!mp->vreg_config) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node) {
+
+ const char *st = NULL;
+
+ rc = of_property_read_string(supply_node,
+ "qcom,supply-name", &st);
+ if (rc) {
+ pr_err(":error reading name. rc=%d\n", rc);
+ goto error;
+ }
+
+ strlcpy(mp->vreg_config[i].vreg_name, st,
+ sizeof(mp->vreg_config[i].vreg_name));
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-min-voltage", &tmp);
+ if (rc) {
+ pr_err(": error reading min volt. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].min_voltage = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-max-voltage", &tmp);
+ if (rc) {
+ pr_err(": error reading max volt. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].max_voltage = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-enable-load", &tmp);
+ if (rc) {
+ pr_err(": error reading enable load. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].load[DSS_REG_MODE_ENABLE] = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-disable-load", &tmp);
+ if (rc) {
+ pr_err(": error reading disable load. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-ulp-load", &tmp);
+ if (rc)
+ pr_warn(": error reading ulp load. rc=%d\n", rc);
+
+ mp->vreg_config[i].load[DSS_REG_MODE_ULP] = (!rc ? tmp :
+ mp->vreg_config[i].load[DSS_REG_MODE_ENABLE]);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-on-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply pre sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-off-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply pre sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-on-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply post sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-off-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply post sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
+
+ pr_debug("%s min=%d, max=%d, enable=%d, disable=%d, ulp=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+ mp->vreg_config[i].vreg_name,
+ mp->vreg_config[i].min_voltage,
+ mp->vreg_config[i].max_voltage,
+ mp->vreg_config[i].load[DSS_REG_MODE_ENABLE],
+ mp->vreg_config[i].load[DSS_REG_MODE_DISABLE],
+ mp->vreg_config[i].load[DSS_REG_MODE_ULP],
+ mp->vreg_config[i].pre_on_sleep,
+ mp->vreg_config[i].post_on_sleep,
+ mp->vreg_config[i].pre_off_sleep,
+ mp->vreg_config[i].post_off_sleep);
+ ++i;
+
+ rc = 0;
+ }
+
+ return rc;
+
+error:
+ if (mp->vreg_config) {
+ devm_kfree(&pdev->dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ mp->num_vreg = 0;
+ }
+
+ return rc;
+}
+
+static int mdss_pll_util_parse_dt_clock(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ u32 i = 0, rc = 0;
+ struct mdss_module_power *mp = &pll_res->mp;
+ const char *clock_name;
+ u32 clock_rate;
+
+ mp->num_clk = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (mp->num_clk <= 0) {
+ pr_err("clocks are not defined\n");
+ goto clk_err;
+ }
+
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct mdss_clk) * mp->num_clk, GFP_KERNEL);
+ if (!mp->clk_config) {
+ rc = -ENOMEM;
+ mp->num_clk = 0;
+ goto clk_err;
+ }
+
+ for (i = 0; i < mp->num_clk; i++) {
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+ i, &clock_rate);
+ mp->clk_config[i].rate = clock_rate;
+
+ if (!clock_rate)
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ else
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+ }
+
+clk_err:
+ return rc;
+}
+
+int mdss_pll_util_resource_parse(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0;
+ struct mdss_module_power *mp = &pll_res->mp;
+
+ rc = mdss_pll_util_parse_dt_supply(pdev, pll_res);
+ if (rc) {
+ pr_err("vreg parsing failed rc=%d\n", rc);
+ goto end;
+ }
+
+ rc = mdss_pll_util_parse_dt_clock(pdev, pll_res);
+ if (rc) {
+ pr_err("clock name parsing failed rc=%d", rc);
+ goto clk_err;
+ }
+
+ return rc;
+
+clk_err:
+ devm_kfree(&pdev->dev, mp->vreg_config);
+ mp->num_vreg = 0;
+end:
+ return rc;
+}
diff --git a/drivers/clk/msm/mdss/mdss-pll.c b/drivers/clk/msm/mdss/mdss-pll.c
new file mode 100644
index 0000000..49f3d7b
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-pll.c
@@ -0,0 +1,439 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-hdmi-pll.h"
+
+int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
+{
+ int rc = 0;
+ int changed = 0;
+
+ if (!pll_res) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Don't turn off resources during handoff or add more than
+ * 1 refcount.
+ */
+ if (pll_res->handoff_resources &&
+ (!enable || (enable & pll_res->resource_enable))) {
+ pr_debug("Do not turn on/off pll resources during handoff case\n");
+ return rc;
+ }
+
+ if (enable) {
+ if (pll_res->resource_ref_cnt == 0)
+ changed++;
+ pll_res->resource_ref_cnt++;
+ } else {
+ if (pll_res->resource_ref_cnt) {
+ pll_res->resource_ref_cnt--;
+ if (pll_res->resource_ref_cnt == 0)
+ changed++;
+ } else {
+ pr_err("PLL Resources already OFF\n");
+ }
+ }
+
+ if (changed) {
+ rc = mdss_pll_util_resource_enable(pll_res, enable);
+ if (rc)
+ pr_err("Resource update failed rc=%d\n", rc);
+ else
+ pll_res->resource_enable = enable;
+ }
+
+ return rc;
+}
+
+static int mdss_pll_resource_init(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ if (!pdev || !pll_res) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ return mdss_pll_util_resource_init(pdev, pll_res);
+}
+
+static void mdss_pll_resource_deinit(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ if (!pdev || !pll_res) {
+ pr_err("Invalid input parameters\n");
+ return;
+ }
+
+ mdss_pll_util_resource_deinit(pdev, pll_res);
+}
+
+static void mdss_pll_resource_release(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ if (!pdev || !pll_res) {
+ pr_err("Invalid input parameters\n");
+ return;
+ }
+
+ mdss_pll_util_resource_release(pdev, pll_res);
+}
+
+static int mdss_pll_resource_parse(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0;
+ const char *compatible_stream;
+
+ if (!pdev || !pll_res) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_util_resource_parse(pdev, pll_res);
+ if (rc) {
+ pr_err("Failed to parse the resources rc=%d\n", rc);
+ goto end;
+ }
+
+ compatible_stream = of_get_property(pdev->dev.of_node,
+ "compatible", NULL);
+ if (!compatible_stream) {
+ pr_err("Failed to parse the compatible stream\n");
+ goto err;
+ }
+
+ if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8952")) {
+ pll_res->pll_interface_type = MDSS_DSI_PLL_LPM;
+ pll_res->target_id = MDSS_PLL_TARGET_8952;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8937")) {
+ pll_res->pll_interface_type = MDSS_DSI_PLL_LPM;
+ pll_res->target_id = MDSS_PLL_TARGET_8937;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8909")) {
+ pll_res->pll_interface_type = MDSS_DSI_PLL_LPM;
+ pll_res->target_id = MDSS_PLL_TARGET_8909;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996")) {
+ pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
+ pll_res->target_id = MDSS_PLL_TARGET_8996;
+ pll_res->revision = 1;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996_v2")) {
+ pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
+ pll_res->target_id = MDSS_PLL_TARGET_8996;
+ pll_res->revision = 2;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8953")) {
+ pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
+ pll_res->target_id = MDSS_PLL_TARGET_8953;
+ pll_res->revision = 2;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996")) {
+ pll_res->pll_interface_type = MDSS_HDMI_PLL_8996;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v2")) {
+ pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V2;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v3")) {
+ pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3;
+ } else if (!strcmp(compatible_stream,
+ "qcom,mdss_hdmi_pll_8996_v3_1p8")) {
+ pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3_1_8;
+ } else {
+ goto err;
+ }
+
+ return rc;
+
+err:
+ mdss_pll_resource_release(pdev, pll_res);
+end:
+ return rc;
+}
+
+static int mdss_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc;
+
+ if (!pdev || !pll_res) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ switch (pll_res->pll_interface_type) {
+ case MDSS_DSI_PLL_LPM:
+ rc = dsi_pll_clock_register_lpm(pdev, pll_res);
+ break;
+ case MDSS_DSI_PLL_8996:
+ rc = dsi_pll_clock_register_8996(pdev, pll_res);
+ break;
+ case MDSS_HDMI_PLL_8996:
+ rc = hdmi_8996_v1_pll_clock_register(pdev, pll_res);
+ break;
+ case MDSS_HDMI_PLL_8996_V2:
+ rc = hdmi_8996_v2_pll_clock_register(pdev, pll_res);
+ break;
+ case MDSS_HDMI_PLL_8996_V3:
+ rc = hdmi_8996_v3_pll_clock_register(pdev, pll_res);
+ break;
+ case MDSS_HDMI_PLL_8996_V3_1_8:
+ rc = hdmi_8996_v3_1p8_pll_clock_register(pdev, pll_res);
+ break;
+ case MDSS_UNKNOWN_PLL:
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc) {
+ pr_err("Pll ndx=%d clock register failed rc=%d\n",
+ pll_res->index, rc);
+ }
+
+ return rc;
+}
+
+static int mdss_pll_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ const char *label;
+ struct resource *pll_base_reg;
+ struct resource *phy_base_reg;
+ struct resource *dynamic_pll_base_reg;
+ struct resource *gdsc_base_reg;
+ struct mdss_pll_resources *pll_res;
+
+ if (!pdev->dev.of_node) {
+ pr_err("MDSS pll driver only supports device tree probe\n");
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+ label = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!label)
+ pr_info("%d: MDSS pll label not specified\n", __LINE__);
+ else
+ pr_info("MDSS pll label = %s\n", label);
+
+ pll_res = devm_kzalloc(&pdev->dev, sizeof(struct mdss_pll_resources),
+ GFP_KERNEL);
+ if (!pll_res) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ platform_set_drvdata(pdev, pll_res);
+
+ rc = of_property_read_u32(pdev->dev.of_node, "cell-index",
+ &pll_res->index);
+ if (rc) {
+ pr_err("Unable to get the cell-index rc=%d\n", rc);
+ pll_res->index = 0;
+ }
+
+ pll_res->ssc_en = of_property_read_bool(pdev->dev.of_node,
+ "qcom,dsi-pll-ssc-en");
+
+ if (pll_res->ssc_en) {
+ pr_info("%s: label=%s PLL SSC enabled\n", __func__, label);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,ssc-frequency-hz", &pll_res->ssc_freq);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,ssc-ppm", &pll_res->ssc_ppm);
+
+ pll_res->ssc_center = false;
+
+ label = of_get_property(pdev->dev.of_node,
+ "qcom,dsi-pll-ssc-mode", NULL);
+
+ if (label && !strcmp(label, "center-spread"))
+ pll_res->ssc_center = true;
+ }
+
+ pll_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "pll_base");
+ if (!pll_base_reg) {
+ pr_err("Unable to get the pll base resources\n");
+ rc = -ENOMEM;
+ goto io_error;
+ }
+
+ pll_res->pll_base = ioremap(pll_base_reg->start,
+ resource_size(pll_base_reg));
+ if (!pll_res->pll_base) {
+ pr_err("Unable to remap pll base resources\n");
+ rc = -ENOMEM;
+ goto io_error;
+ }
+
+ pr_debug("%s: ndx=%d base=%p\n", __func__,
+ pll_res->index, pll_res->pll_base);
+
+ rc = mdss_pll_resource_parse(pdev, pll_res);
+ if (rc) {
+ pr_err("Pll resource parsing from dt failed rc=%d\n", rc);
+ goto res_parse_error;
+ }
+
+ phy_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "phy_base");
+ if (phy_base_reg) {
+ pll_res->phy_base = ioremap(phy_base_reg->start,
+ resource_size(phy_base_reg));
+ if (!pll_res->phy_base) {
+ pr_err("Unable to remap pll phy base resources\n");
+ rc = -ENOMEM;
+ goto phy_io_error;
+ }
+ }
+
+ dynamic_pll_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "dynamic_pll_base");
+ if (dynamic_pll_base_reg) {
+ pll_res->dyn_pll_base = ioremap(dynamic_pll_base_reg->start,
+ resource_size(dynamic_pll_base_reg));
+ if (!pll_res->dyn_pll_base) {
+ pr_err("Unable to remap dynamic pll base resources\n");
+ rc = -ENOMEM;
+ goto dyn_pll_io_error;
+ }
+ }
+
+ gdsc_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "gdsc_base");
+ if (!gdsc_base_reg) {
+ pr_err("Unable to get the gdsc base resource\n");
+ rc = -ENOMEM;
+ goto gdsc_io_error;
+ }
+ pll_res->gdsc_base = ioremap(gdsc_base_reg->start,
+ resource_size(gdsc_base_reg));
+ if (!pll_res->gdsc_base) {
+ pr_err("Unable to remap gdsc base resources\n");
+ rc = -ENOMEM;
+ goto gdsc_io_error;
+ }
+
+ rc = mdss_pll_resource_init(pdev, pll_res);
+ if (rc) {
+ pr_err("Pll ndx=%d resource init failed rc=%d\n",
+ pll_res->index, rc);
+ goto res_init_error;
+ }
+
+ rc = mdss_pll_clock_register(pdev, pll_res);
+ if (rc) {
+ pr_err("Pll ndx=%d clock register failed rc=%d\n",
+ pll_res->index, rc);
+ goto clock_register_error;
+ }
+
+ return rc;
+
+clock_register_error:
+ mdss_pll_resource_deinit(pdev, pll_res);
+res_init_error:
+ if (pll_res->gdsc_base)
+ iounmap(pll_res->gdsc_base);
+gdsc_io_error:
+ if (pll_res->dyn_pll_base)
+ iounmap(pll_res->dyn_pll_base);
+dyn_pll_io_error:
+ if (pll_res->phy_base)
+ iounmap(pll_res->phy_base);
+phy_io_error:
+ mdss_pll_resource_release(pdev, pll_res);
+res_parse_error:
+ iounmap(pll_res->pll_base);
+io_error:
+ devm_kfree(&pdev->dev, pll_res);
+error:
+ return rc;
+}
+
+static int mdss_pll_remove(struct platform_device *pdev)
+{
+ struct mdss_pll_resources *pll_res;
+
+ pll_res = platform_get_drvdata(pdev);
+ if (!pll_res) {
+ pr_err("Invalid PLL resource data");
+ return 0;
+ }
+
+ mdss_pll_resource_deinit(pdev, pll_res);
+ if (pll_res->phy_base)
+ iounmap(pll_res->phy_base);
+ if (pll_res->gdsc_base)
+ iounmap(pll_res->gdsc_base);
+ mdss_pll_resource_release(pdev, pll_res);
+ iounmap(pll_res->pll_base);
+ devm_kfree(&pdev->dev, pll_res);
+ return 0;
+}
+
+static const struct of_device_id mdss_pll_dt_match[] = {
+ {.compatible = "qcom,mdss_dsi_pll_8996"},
+ {.compatible = "qcom,mdss_dsi_pll_8996_v2"},
+ {.compatible = "qcom,mdss_hdmi_pll_8996"},
+ {.compatible = "qcom,mdss_hdmi_pll_8996_v2"},
+ {.compatible = "qcom,mdss_hdmi_pll_8996_v3"},
+ {.compatible = "qcom,mdss_hdmi_pll_8996_v3_1p8"},
+ {.compatible = "qcom,mdss_dsi_pll_8952"},
+ {.compatible = "qcom,mdss_dsi_pll_8937"},
+ {.compatible = "qcom,mdss_dsi_pll_8909"},
+ {.compatible = "qcom,mdss_dsi_pll_8953"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, mdss_clock_dt_match);
+
+static struct platform_driver mdss_pll_driver = {
+ .probe = mdss_pll_probe,
+ .remove = mdss_pll_remove,
+ .driver = {
+ .name = "mdss_pll",
+ .of_match_table = mdss_pll_dt_match,
+ },
+};
+
+static int __init mdss_pll_driver_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&mdss_pll_driver);
+ if (rc)
+ pr_err("mdss_register_pll_driver() failed!\n");
+
+ return rc;
+}
+subsys_initcall(mdss_pll_driver_init);
+
+static void __exit mdss_pll_driver_deinit(void)
+{
+ platform_driver_unregister(&mdss_pll_driver);
+}
+module_exit(mdss_pll_driver_deinit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("mdss pll driver");
diff --git a/drivers/clk/msm/mdss/mdss-pll.h b/drivers/clk/msm/mdss/mdss-pll.h
new file mode 100644
index 0000000..1fa5cff
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-pll.h
@@ -0,0 +1,197 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_PLL_H
+#define __MDSS_PLL_H
+
+#include <linux/mdss_io_util.h>
+#include <linux/io.h>
+
+#define MDSS_PLL_REG_W(base, offset, data) \
+ writel_relaxed((data), (base) + (offset))
+#define MDSS_PLL_REG_R(base, offset) readl_relaxed((base) + (offset))
+
+#define PLL_CALC_DATA(addr0, addr1, data0, data1) \
+ (((data1) << 24) | ((((addr1) / 4) & 0xFF) << 16) | \
+ ((data0) << 8) | (((addr0) / 4) & 0xFF))
+
+#define MDSS_DYN_PLL_REG_W(base, offset, addr0, addr1, data0, data1) \
+ writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
+ (base) + (offset))
+
+enum {
+ MDSS_DSI_PLL_LPM,
+ MDSS_DSI_PLL_8996,
+ MDSS_HDMI_PLL_8996,
+ MDSS_HDMI_PLL_8996_V2,
+ MDSS_HDMI_PLL_8996_V3,
+ MDSS_HDMI_PLL_8996_V3_1_8,
+ MDSS_UNKNOWN_PLL,
+};
+
+enum {
+ MDSS_PLL_TARGET_8996,
+ MDSS_PLL_TARGET_8952,
+ MDSS_PLL_TARGET_8937,
+ MDSS_PLL_TARGET_8953,
+ MDSS_PLL_TARGET_8909,
+};
+
+struct mdss_pll_resources {
+
+ /* Pll specific resources like GPIO, power supply, clocks, etc*/
+ struct mdss_module_power mp;
+
+ /*
+ * dsi/edp/hmdi plls' base register, phy, gdsc and dynamic refresh
+ * register mapping
+ */
+ void __iomem *pll_base;
+ void __iomem *phy_base;
+ void __iomem *gdsc_base;
+ void __iomem *dyn_pll_base;
+
+ bool is_init_locked;
+ s64 vco_current_rate;
+ s64 vco_locking_rate;
+ s64 vco_ref_clk_rate;
+
+ /*
+ * Certain pll's needs to update the same vco rate after resume in
+ * suspend/resume scenario. Cached the vco rate for such plls.
+ */
+ unsigned long vco_cached_rate;
+
+ /* dsi/edp/hmdi pll interface type */
+ u32 pll_interface_type;
+
+ /*
+ * Target ID. Used in pll_register API for valid target check before
+ * registering the PLL clocks.
+ */
+ u32 target_id;
+
+ /* HW recommended delay during configuration of vco clock rate */
+ u32 vco_delay;
+
+ /* Ref-count of the PLL resources */
+ u32 resource_ref_cnt;
+
+ /*
+ * Keep track to resource status to avoid updating same status for the
+ * pll from different paths
+ */
+ bool resource_enable;
+
+ /*
+ * Certain plls' do not allow vco rate update if it is on. Keep track of
+ * status for them to turn on/off after set rate success.
+ */
+ bool pll_on;
+
+ /*
+ * handoff_status is true of pll is already enabled by bootloader with
+ * continuous splash enable case. Clock API will call the handoff API
+ * to enable the status. It is disabled if continuous splash
+ * feature is disabled.
+ */
+ bool handoff_resources;
+
+ /*
+ * caching the pll trim codes in the case of dynamic refresh
+ * or cmd mode idle screen.
+ */
+ int cache_pll_trim_codes[2];
+
+ /*
+ * caching the pll trim codes rate
+ */
+ s64 cache_pll_trim_codes_rate;
+
+ /*
+ * for maintaining the status of saving trim codes
+ */
+ bool reg_upd;
+
+ /*
+ * Notifier callback for MDSS gdsc regulator events
+ */
+ struct notifier_block gdsc_cb;
+
+ /*
+ * Worker function to call PLL off event
+ */
+ struct work_struct pll_off;
+
+ /*
+ * PLL index if multiple index are available. Eg. in case of
+ * DSI we have 2 plls.
+ */
+ uint32_t index;
+
+ bool ssc_en; /* share pll with master */
+ bool ssc_center; /* default is down spread */
+ u32 ssc_freq;
+ u32 ssc_ppm;
+
+ struct mdss_pll_resources *slave;
+
+ /*
+ * target pll revision information
+ */
+ int revision;
+
+ void *priv;
+
+ /*
+ * dynamic refresh pll codes stored in this structure
+ */
+ struct dfps_info *dfps;
+
+};
+
+struct mdss_pll_vco_calc {
+ s32 div_frac_start1;
+ s32 div_frac_start2;
+ s32 div_frac_start3;
+ s64 dec_start1;
+ s64 dec_start2;
+ s64 pll_plllock_cmp1;
+ s64 pll_plllock_cmp2;
+ s64 pll_plllock_cmp3;
+};
+
+static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res)
+{
+ if (!pll_res->gdsc_base) {
+ WARN(1, "gdsc_base register is not defined\n");
+ return true;
+ }
+
+ return ((readl_relaxed(pll_res->gdsc_base + 0x4) & BIT(31)) &&
+ (!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
+}
+
+int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable);
+int mdss_pll_util_resource_init(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+void mdss_pll_util_resource_deinit(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+void mdss_pll_util_resource_release(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+int mdss_pll_util_resource_enable(struct mdss_pll_resources *pll_res,
+ bool enable);
+int mdss_pll_util_resource_parse(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+struct mdss_vreg *mdss_pll_get_mp_by_reg_name(struct mdss_pll_resources *pll_res
+ , char *name);
+#endif
diff --git a/drivers/clk/qcom/gcc-sdxpoorwills.c b/drivers/clk/qcom/gcc-sdxpoorwills.c
index 696d7fb..c6e8faa 100644
--- a/drivers/clk/qcom/gcc-sdxpoorwills.c
+++ b/drivers/clk/qcom/gcc-sdxpoorwills.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1316,33 +1316,6 @@
},
};
-static struct clk_branch gcc_mss_cfg_ahb_clk = {
- .halt_reg = 0x40000,
- .halt_check = BRANCH_HALT,
- .hwcg_reg = 0x40000,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0x40000,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mss_cfg_ahb_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_gate2 gcc_mss_gpll0_div_clk_src = {
- .udelay = 500,
- .clkr = {
- .enable_reg = 0x6d004,
- .enable_mask = BIT(17),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mss_gpll0_div_clk_src",
- .ops = &clk_gate2_ops,
- },
- },
-};
-
static struct clk_branch gcc_pcie_0_clkref_clk = {
.halt_reg = 0x88004,
.halt_check = BRANCH_HALT,
@@ -1794,8 +1767,6 @@
[GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
[GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
- [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
- [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
[GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
[GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr,
[GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr,
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 3f9fcd9..7290205 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,8 @@
#include "mdss-dsi-pll.h"
#include "mdss-pll.h"
#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+#define CREATE_TRACE_POINTS
+#include "mdss_pll_trace.h"
#define VCO_DELAY_USEC 1
@@ -890,8 +892,13 @@
MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
pll->cached_outdiv);
}
-
+ MDSS_PLL_ATRACE_BEGIN("pll_lock");
+ trace_mdss_pll_lock_start((u64)pll->vco_cached_rate,
+ pll->vco_current_rate,
+ pll->cached_cfg0, pll->cached_cfg1,
+ pll->cached_outdiv, pll->resource_ref_cnt);
rc = dsi_pll_enable(vco);
+ MDSS_PLL_ATRACE_END("pll_lock");
if (rc) {
mdss_pll_resource_enable(pll, false);
pr_err("pll(%d) enable failed, rc=%d\n", pll->index, rc);
diff --git a/drivers/clk/qcom/mdss/mdss_pll_trace.h b/drivers/clk/qcom/mdss/mdss_pll_trace.h
new file mode 100644
index 0000000..cd4fda6
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss_pll_trace.h
@@ -0,0 +1,116 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_MDSS_PLL_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MDSS_PLL_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mdss_pll
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mdss_pll_trace
+
+
+TRACE_EVENT(mdss_pll_lock_start,
+ TP_PROTO(
+ u64 vco_cached_rate,
+ s64 vco_current_rate,
+ u32 cached_cfg0,
+ u32 cached_cfg1,
+ u32 cached_outdiv,
+ u32 resource_ref_cnt),
+ TP_ARGS(
+ vco_cached_rate,
+ vco_current_rate,
+ cached_cfg0,
+ cached_cfg1,
+ cached_outdiv,
+ resource_ref_cnt),
+ TP_STRUCT__entry(
+ __field(u64, vco_cached_rate)
+ __field(s64, vco_current_rate)
+ __field(u32, cached_cfg0)
+ __field(u32, cached_cfg1)
+ __field(u32, cached_outdiv)
+ __field(u32, resource_ref_cnt)
+
+ ),
+ TP_fast_assign(
+ __entry->vco_cached_rate = vco_cached_rate;
+ __entry->vco_current_rate = vco_current_rate;
+ __entry->cached_cfg0 = cached_cfg0;
+ __entry->cached_cfg1 = cached_cfg1;
+ __entry->cached_outdiv = cached_outdiv;
+ __entry->resource_ref_cnt = resource_ref_cnt;
+ ),
+ TP_printk(
+ "vco_cached_rate=%llu vco_current_rate=%lld cached_cfg0=%d cached_cfg1=%d cached_outdiv=%d resource_ref_cnt=%d",
+ __entry->vco_cached_rate,
+ __entry->vco_current_rate,
+ __entry->cached_cfg0,
+ __entry->cached_cfg1,
+ __entry->cached_outdiv,
+ __entry->resource_ref_cnt)
+);
+
+TRACE_EVENT(pll_tracing_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(mdss_pll_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+)
+
+#define MDSS_PLL_ATRACE_END(name) trace_pll_tracing_mark_write(current->tgid,\
+ name, 0)
+#define MDSS_PLL_ATRACE_BEGIN(name) trace_pll_tracing_mark_write(current->tgid,\
+ name, 1)
+#define MDSS_PLL_ATRACE_FUNC() MDSS_PLL_ATRACE_BEGIN(__func__)
+#define MDSS_PLL_ATRACE_INT(name, value) \
+ trace_mdss_pll_trace_counter(current->tgid, name, value)
+
+
+#endif /* _MDSS_PLL_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4a4ee0f..5aa9914 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -443,13 +443,12 @@
{
u32 cntkctl = arch_timer_get_cntkctl();
- /* Disable user access to the timers */
+ /* Disable user access to the timers and the physical counter */
/* Also disable virtual event stream */
cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
| ARCH_TIMER_USR_VT_ACCESS_EN
- | ARCH_TIMER_VIRT_EVT_EN);
-
- cntkctl |= ARCH_TIMER_USR_PCT_ACCESS_EN;
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
/* Enable user access to the virtual counter */
if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_VCT_ACCESS))
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
index 90fac32..8a5ad70 100644
--- a/drivers/cpufreq/qcom-cpufreq.c
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -27,10 +27,13 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/cpu_cooling.h>
#include <trace/events/power.h>
static DEFINE_MUTEX(l2bw_lock);
+static struct thermal_cooling_device *cdev[NR_CPUS];
static struct clk *cpu_clk[NR_CPUS];
static struct clk *l2_clk;
static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
@@ -308,6 +311,52 @@
NULL,
};
+static void msm_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct device_node *np, *lmh_node;
+ unsigned int cpu = 0;
+
+ if (cdev[policy->cpu])
+ return;
+
+ np = of_cpu_device_node_get(policy->cpu);
+ if (WARN_ON(!np))
+ return;
+
+ /*
+ * For now, just loading the cooling device;
+ * thermal DT code takes care of matching them.
+ */
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ lmh_node = of_parse_phandle(np, "qcom,lmh-dcvs", 0);
+ if (lmh_node) {
+ of_node_put(lmh_node);
+ goto ready_exit;
+ }
+
+ for_each_cpu(cpu, policy->related_cpus) {
+ cpumask_t cpu_mask = CPU_MASK_NONE;
+
+ of_node_put(np);
+ np = of_cpu_device_node_get(cpu);
+ if (WARN_ON(!np))
+ return;
+
+ cpumask_set_cpu(cpu, &cpu_mask);
+ cdev[cpu] = of_cpufreq_cooling_register(np, &cpu_mask);
+ if (IS_ERR(cdev[cpu])) {
+ pr_err(
+ "running cpufreq for CPU%d without cooling dev: %ld\n",
+ cpu, PTR_ERR(cdev[cpu]));
+ cdev[cpu] = NULL;
+ }
+ }
+ }
+
+ready_exit:
+ of_node_put(np);
+}
+
static struct cpufreq_driver msm_cpufreq_driver = {
/* lps calculations are handled here. */
.flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
@@ -318,6 +367,7 @@
.get = msm_cpufreq_get_freq,
.name = "msm",
.attr = msm_freq_attr,
+ .ready = msm_cpufreq_ready,
};
static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 19fe223..6c66b7f 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1251,7 +1251,6 @@
state_id |= (level->psci_id & cluster->psci_mode_mask)
<< cluster->psci_mode_shift;
- (*aff_lvl)++;
/*
* We may have updated the broadcast timers, update
@@ -1259,6 +1258,8 @@
*/
if (level->notify_rpm)
system_sleep_update_wakeup();
+ if (level->psci_id)
+ (*aff_lvl)++;
}
unlock_and_return:
spin_unlock(&cluster->sync_lock);
@@ -1731,6 +1732,18 @@
{
int rc;
+#ifdef CONFIG_ARM
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ rc = arm_cpuidle_init(smp_processor_id());
+ if (rc) {
+ pr_err("CPU%d ARM CPUidle init failed (%d)\n", cpu, rc);
+ return rc;
+ }
+ }
+#endif
+
rc = platform_driver_register(&lpm_driver);
if (rc) {
pr_info("Error registering %s\n", lpm_driver.driver.name);
diff --git a/drivers/esoc/Kconfig b/drivers/esoc/Kconfig
index 3c65f69..4613150 100644
--- a/drivers/esoc/Kconfig
+++ b/drivers/esoc/Kconfig
@@ -38,7 +38,7 @@
allow logging of different esoc driver traces.
config ESOC_MDM_4x
- bool "Add support for external mdm9x25/mdm9x35/mdm9x55"
+ bool "Add support for external modem"
help
In some Qualcomm Technologies, Inc. boards, an external modem such as
mdm9x25 or mdm9x35 is connected to a primary msm. The primary soc can
@@ -49,7 +49,7 @@
tristate "Command engine for 4x series external modems"
help
Provides a command engine to control the behavior of an external modem
- such as mdm9x25/mdm9x35/mdm9x55/QSC. Allows the primary soc to put the
+ such as mdm9x25/mdm9x35/mdm9x55/sdxpoorwills/QSC. Allows the primary soc to put the
external modem in a specific mode. Also listens for events on the
external modem.
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index 677e21d..bbec9d3 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -794,6 +794,28 @@
mdm->gpio_state_running = NULL;
return retval;
}
+
+static void mdm_release_ipc_gpio(struct mdm_ctrl *mdm)
+{
+ int i;
+
+ if (!mdm)
+ return;
+
+ for (i = 0; i < NUM_GPIOS; ++i)
+ if (gpio_is_valid(MDM_GPIO(mdm, i)))
+ gpio_free(MDM_GPIO(mdm, i));
+}
+
+static void mdm_free_irq(struct mdm_ctrl *mdm)
+{
+ if (!mdm)
+ return;
+
+ free_irq(mdm->errfatal_irq, mdm);
+ free_irq(mdm->status_irq, mdm);
+}
+
static int mdm9x25_setup_hw(struct mdm_ctrl *mdm,
const struct mdm_ops *ops,
struct platform_device *pdev)
@@ -1028,6 +1050,108 @@
return 0;
}
+static int sdxpoorwills_setup_hw(struct mdm_ctrl *mdm,
+ const struct mdm_ops *ops,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node;
+ struct esoc_clink *esoc;
+ const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+ const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+ mdm->dev = &pdev->dev;
+ mdm->pon_ops = pon_ops;
+ node = pdev->dev.of_node;
+
+ esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(esoc)) {
+ dev_err(mdm->dev, "cannot allocate esoc device\n");
+ return PTR_ERR(esoc);
+ }
+
+ esoc->pdev = pdev;
+
+ mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+ if (!mdm->mdm_queue) {
+ dev_err(mdm->dev, "could not create mdm_queue\n");
+ return -ENOMEM;
+ }
+
+ mdm->irq_mask = 0;
+ mdm->ready = false;
+
+ ret = mdm_dt_parse_gpios(mdm);
+ if (ret) {
+ dev_err(mdm->dev, "Failed to parse DT gpios\n");
+ goto err_destroy_wrkq;
+ }
+
+ ret = mdm_pon_dt_init(mdm);
+ if (ret) {
+ dev_err(mdm->dev, "Failed to parse PON DT gpio\n");
+ goto err_destroy_wrkq;
+ }
+
+ ret = mdm_pinctrl_init(mdm);
+ if (ret) {
+ dev_err(mdm->dev, "Failed to init pinctrl\n");
+ goto err_destroy_wrkq;
+ }
+
+ ret = mdm_pon_setup(mdm);
+ if (ret) {
+ dev_err(mdm->dev, "Failed to setup PON\n");
+ goto err_destroy_wrkq;
+ }
+
+ ret = mdm_configure_ipc(mdm, pdev);
+ if (ret) {
+ dev_err(mdm->dev, "Failed to configure the ipc\n");
+ goto err_release_ipc;
+ }
+
+ esoc->name = SDXPOORWILLS_LABEL;
+ esoc->link_name = SDXPOORWILLS_PCIE;
+
+ ret = of_property_read_string(node, "qcom,mdm-link-info",
+ &esoc->link_info);
+ if (ret)
+ dev_info(mdm->dev, "esoc link info missing\n");
+
+ esoc->clink_ops = clink_ops;
+ esoc->parent = mdm->dev;
+ esoc->owner = THIS_MODULE;
+ esoc->np = pdev->dev.of_node;
+ set_esoc_clink_data(esoc, mdm);
+
+ ret = esoc_clink_register(esoc);
+ if (ret) {
+ dev_err(mdm->dev, "esoc registration failed\n");
+ goto err_free_irq;
+ }
+ dev_dbg(mdm->dev, "esoc registration done\n");
+
+ init_completion(&mdm->debug_done);
+ INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+ INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+ INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+ mdm->get_restart_reason = false;
+ mdm->debug_fail = false;
+ mdm->esoc = esoc;
+ mdm->init = 0;
+
+ return 0;
+
+err_free_irq:
+ mdm_free_irq(mdm);
+err_release_ipc:
+ mdm_release_ipc_gpio(mdm);
+err_destroy_wrkq:
+ destroy_workqueue(mdm->mdm_queue);
+ return ret;
+}
+
static struct esoc_clink_ops mdm_cops = {
.cmd_exe = mdm_cmd_exe,
.get_status = mdm_get_status,
@@ -1053,6 +1177,12 @@
.pon_ops = &mdm9x55_pon_ops,
};
+static struct mdm_ops sdxpoorwills_ops = {
+ .clink_ops = &mdm_cops,
+ .config_hw = sdxpoorwills_setup_hw,
+ .pon_ops = &sdxpoorwills_pon_ops,
+};
+
static const struct of_device_id mdm_dt_match[] = {
{ .compatible = "qcom,ext-mdm9x25",
.data = &mdm9x25_ops, },
@@ -1060,6 +1190,8 @@
.data = &mdm9x35_ops, },
{ .compatible = "qcom,ext-mdm9x55",
.data = &mdm9x55_ops, },
+ { .compatible = "qcom,ext-sdxpoorwills",
+ .data = &sdxpoorwills_ops, },
{},
};
MODULE_DEVICE_TABLE(of, mdm_dt_match);
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 77ae84b..4291bbc 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -309,6 +309,10 @@
.name = "MDM9x55",
.data = NULL,
},
+ {
+ .name = "SDXPOORWILLS",
+ .data = NULL,
+ },
};
static struct esoc_drv esoc_ssr_drv = {
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index 0e85776..9624275 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,24 @@
return 0;
}
+/* This function can be called from atomic context. */
+static int sdxpoorwills_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+ int soft_reset_direction_assert = mdm->soft_reset_inverted;
+
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction_assert);
+ /*
+ * Allow PS hold assert to be detected
+ */
+ if (!atomic)
+ usleep_range(80000, 180000);
+ else
+ mdelay(100);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ !soft_reset_direction_assert);
+ return 0;
+}
static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm)
{
@@ -99,6 +117,7 @@
{
struct device *dev = mdm->dev;
int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
+
/* Assert the soft reset line whether mdm2ap_status went low or not */
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction);
@@ -135,6 +154,27 @@
return 0;
}
+static int sdxpoorwills_power_down(struct mdm_ctrl *mdm)
+{
+ struct device *dev = mdm->dev;
+ int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
+
+ /* Assert the soft reset line whether mdm2ap_status went low or not */
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction);
+ dev_info(dev, "Doing a hard reset\n");
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction);
+ /*
+ * Currently, there is a debounce timer on the charm PMIC. It is
+ * necessary to hold the PMIC RESET low for 325ms
+ * for the reset to fully take place. Sleep here to ensure the
+ * reset has occurred before the function exits.
+ */
+ mdelay(325);
+ return 0;
+}
+
static void mdm4x_cold_reset(struct mdm_ctrl *mdm)
{
if (!gpio_is_valid(MDM_GPIO(mdm, AP2MDM_SOFT_RESET)))
@@ -158,6 +198,16 @@
!mdm->soft_reset_inverted);
}
+static void sdxpoorwills_cold_reset(struct mdm_ctrl *mdm)
+{
+ dev_info(mdm->dev, "Triggering mdm cold reset");
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ !!mdm->soft_reset_inverted);
+ mdelay(600);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ !mdm->soft_reset_inverted);
+}
+
static int mdm4x_pon_dt_init(struct mdm_ctrl *mdm)
{
int val;
@@ -215,3 +265,12 @@
.dt_init = mdm4x_pon_dt_init,
.setup = mdm4x_pon_setup,
};
+
+struct mdm_pon_ops sdxpoorwills_pon_ops = {
+ .pon = mdm4x_do_first_power_on,
+ .soft_reset = sdxpoorwills_toggle_soft_reset,
+ .poff_force = sdxpoorwills_power_down,
+ .cold_reset = sdxpoorwills_cold_reset,
+ .dt_init = mdm4x_pon_dt_init,
+ .setup = mdm4x_pon_setup,
+};
diff --git a/drivers/esoc/esoc-mdm.h b/drivers/esoc/esoc-mdm.h
index 621d913..baf4e0b 100644
--- a/drivers/esoc/esoc-mdm.h
+++ b/drivers/esoc/esoc-mdm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@
#define MDM9x35_HSIC "HSIC"
#define MDM9x55_LABEL "MDM9x55"
#define MDM9x55_PCIE "PCIe"
+#define SDXPOORWILLS_LABEL "SDXPOORWILLS"
+#define SDXPOORWILLS_PCIE "PCIe"
#define MDM2AP_STATUS_TIMEOUT_MS 120000L
#define MDM_MODEM_TIMEOUT 3000
#define DEF_RAMDUMP_TIMEOUT 120000
@@ -150,4 +152,5 @@
extern struct mdm_pon_ops mdm9x25_pon_ops;
extern struct mdm_pon_ops mdm9x35_pon_ops;
extern struct mdm_pon_ops mdm9x55_pon_ops;
+extern struct mdm_pon_ops sdxpoorwills_pon_ops;
#endif
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 2b6b112..3d50bae 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -249,8 +249,9 @@
}
#ifdef CONFIG_CPU_IDLE
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+static __maybe_unused DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+#ifdef CONFIG_DT_IDLE_STATES
static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
{
int i, ret, count = 0;
@@ -303,6 +304,10 @@
kfree(psci_states);
return ret;
}
+#else
+static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{ return 0; }
+#endif
#ifdef CONFIG_ACPI
#include <acpi/processor.h>
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 79f2ec9..630b5fb 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,24 +29,54 @@
struct dp_aux dp_aux;
struct dp_catalog_aux *catalog;
struct dp_aux_cfg *cfg;
-
struct mutex mutex;
struct completion comp;
+ struct drm_dp_aux drm_aux;
- u32 aux_error_num;
- u32 retry_cnt;
bool cmd_busy;
bool native;
bool read;
bool no_send_addr;
bool no_send_stop;
+
u32 offset;
u32 segment;
+ u32 aux_error_num;
+ u32 retry_cnt;
+
atomic_t aborted;
- struct drm_dp_aux drm_aux;
+ u8 *dpcd;
+ u8 *edid;
};
+#ifdef CONFIG_DYNAMIC_DEBUG
+static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux,
+ struct drm_dp_aux_msg *msg)
+{
+ DEFINE_DYNAMIC_DEBUG_METADATA(ddm, "dp aux tracker");
+
+ if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT)) {
+ u8 buf[SZ_64];
+ struct dp_aux_private *aux = container_of(drm_aux,
+ struct dp_aux_private, drm_aux);
+
+ snprintf(buf, SZ_64, "[drm-dp] %5s %5s %5xh(%2zu): ",
+ aux->native ? "NATIVE" : "I2C",
+ aux->read ? "READ" : "WRITE",
+ msg->address, msg->size);
+
+ print_hex_dump(KERN_DEBUG, buf, DUMP_PREFIX_NONE,
+ 8, 1, msg->buffer, msg->size, false);
+ }
+}
+#else
+static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux,
+ struct drm_dp_aux_msg *msg)
+{
+}
+#endif
+
static char *dp_aux_get_error(u32 aux_error)
{
switch (aux_error) {
@@ -320,6 +350,7 @@
*
* @aux: DP AUX private structure
* @input_msg: input message from DRM upstream APIs
+ * @send_seg: send the seg to sink
*
* return: void
*
@@ -327,7 +358,7 @@
* sinks that do not handle the i2c middle-of-transaction flag correctly.
*/
static void dp_aux_transfer_helper(struct dp_aux_private *aux,
- struct drm_dp_aux_msg *input_msg)
+ struct drm_dp_aux_msg *input_msg, bool send_seg)
{
struct drm_dp_aux_msg helper_msg;
u32 const message_size = 0x10;
@@ -346,7 +377,7 @@
* duplicate AUX transactions related to this while reading the
* first 16 bytes of each block.
*/
- if (!(aux->offset % edid_block_length))
+ if (!(aux->offset % edid_block_length) || !send_seg)
goto end;
aux->read = false;
@@ -388,6 +419,110 @@
aux->segment = 0x0; /* reset segment at end of block */
}
+static int dp_aux_transfer_ready(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg, bool send_seg)
+{
+ int ret = 0;
+ int const aux_cmd_native_max = 16;
+ int const aux_cmd_i2c_max = 128;
+
+ if (atomic_read(&aux->aborted)) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+
+ /* Ignore address only message */
+ if ((msg->size == 0) || (msg->buffer == NULL)) {
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ goto error;
+ }
+
+ /* msg sanity check */
+ if ((aux->native && (msg->size > aux_cmd_native_max)) ||
+ (msg->size > aux_cmd_i2c_max)) {
+ pr_err("%s: invalid msg: size(%zu), request(%x)\n",
+ __func__, msg->size, msg->request);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ dp_aux_update_offset_and_segment(aux, msg);
+
+ dp_aux_transfer_helper(aux, msg, send_seg);
+
+ aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+
+ if (aux->read) {
+ aux->no_send_addr = true;
+ aux->no_send_stop = false;
+ } else {
+ aux->no_send_addr = true;
+ aux->no_send_stop = true;
+ }
+
+ aux->cmd_busy = true;
+error:
+ return ret;
+}
+
+static ssize_t dp_aux_transfer_debug(struct drm_dp_aux *drm_aux,
+ struct drm_dp_aux_msg *msg)
+{
+ u32 timeout;
+ ssize_t ret;
+ struct dp_aux_private *aux = container_of(drm_aux,
+ struct dp_aux_private, drm_aux);
+
+ ret = dp_aux_transfer_ready(aux, msg, false);
+ if (ret)
+ goto end;
+
+ aux->aux_error_num = DP_AUX_ERR_NONE;
+
+ if (aux->native) {
+ if (aux->read && ((msg->address + msg->size) < SZ_1K)) {
+ aux->dp_aux.reg = msg->address;
+
+ reinit_completion(&aux->comp);
+ timeout = wait_for_completion_timeout(&aux->comp, HZ);
+ if (!timeout)
+ pr_err("aux timeout for 0x%x\n", msg->address);
+
+ aux->dp_aux.reg = 0xFFFF;
+
+ memcpy(msg->buffer, aux->dpcd + msg->address,
+ msg->size);
+ aux->aux_error_num = DP_AUX_ERR_NONE;
+ } else {
+ memset(msg->buffer, 0, msg->size);
+ }
+ } else {
+ if (aux->read && msg->address == 0x50) {
+ memcpy(msg->buffer,
+ aux->edid + aux->offset - 16,
+ msg->size);
+ }
+ }
+
+ if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+ dp_aux_hex_dump(drm_aux, msg);
+
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ } else {
+ /* Reply defer to retry */
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+ }
+
+ ret = msg->size;
+end:
+ return ret;
+}
+
/*
* This function does the real job to process an AUX transaction.
* It will call aux_reset() function to reset the AUX channel,
@@ -397,52 +532,21 @@
struct drm_dp_aux_msg *msg)
{
ssize_t ret;
- int const aux_cmd_native_max = 16;
- int const aux_cmd_i2c_max = 128;
int const retry_count = 5;
struct dp_aux_private *aux = container_of(drm_aux,
struct dp_aux_private, drm_aux);
mutex_lock(&aux->mutex);
- if (atomic_read(&aux->aborted)) {
- ret = -ETIMEDOUT;
+ ret = dp_aux_transfer_ready(aux, msg, true);
+ if (ret)
goto unlock_exit;
- }
- aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
-
- /* Ignore address only message */
- if ((msg->size == 0) || (msg->buffer == NULL)) {
- msg->reply = aux->native ?
- DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ if (!aux->cmd_busy) {
ret = msg->size;
goto unlock_exit;
}
- /* msg sanity check */
- if ((aux->native && (msg->size > aux_cmd_native_max)) ||
- (msg->size > aux_cmd_i2c_max)) {
- pr_err("%s: invalid msg: size(%zu), request(%x)\n",
- __func__, msg->size, msg->request);
- ret = -EINVAL;
- goto unlock_exit;
- }
-
- dp_aux_update_offset_and_segment(aux, msg);
- dp_aux_transfer_helper(aux, msg);
-
- aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
- aux->cmd_busy = true;
-
- if (aux->read) {
- aux->no_send_addr = true;
- aux->no_send_stop = false;
- } else {
- aux->no_send_addr = true;
- aux->no_send_stop = true;
- }
-
ret = dp_aux_cmd_fifo_tx(aux, msg);
if ((ret < 0) && aux->native && !atomic_read(&aux->aborted)) {
aux->retry_cnt++;
@@ -459,6 +563,8 @@
if (aux->read)
dp_aux_cmd_fifo_rx(aux, msg);
+ dp_aux_hex_dump(drm_aux, msg);
+
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
} else {
@@ -558,6 +664,41 @@
drm_dp_aux_unregister(&aux->drm_aux);
}
+static void dp_aux_dpcd_updated(struct dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ complete(&aux->comp);
+}
+
+static void dp_aux_set_sim_mode(struct dp_aux *dp_aux, bool en,
+ u8 *edid, u8 *dpcd)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ aux->edid = edid;
+ aux->dpcd = dpcd;
+
+ if (en)
+ aux->drm_aux.transfer = dp_aux_transfer_debug;
+ else
+ aux->drm_aux.transfer = dp_aux_transfer;
+}
+
struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
struct dp_aux_cfg *aux_cfg)
{
@@ -586,6 +727,7 @@
aux->cfg = aux_cfg;
dp_aux = &aux->dp_aux;
aux->retry_cnt = 0;
+ aux->dp_aux.reg = 0xFFFF;
dp_aux->isr = dp_aux_isr;
dp_aux->init = dp_aux_init;
@@ -594,6 +736,8 @@
dp_aux->drm_aux_deregister = dp_aux_deregister;
dp_aux->reconfig = dp_aux_reconfig;
dp_aux->abort = dp_aux_abort_transaction;
+ dp_aux->dpcd_updated = dp_aux_dpcd_updated;
+ dp_aux->set_sim_mode = dp_aux_set_sim_mode;
return dp_aux;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index e8cb1cc..bf52d57 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,19 @@
#include "dp_catalog.h"
#include "drm_dp_helper.h"
+#define DP_STATE_NOTIFICATION_SENT BIT(0)
+#define DP_STATE_TRAIN_1_STARTED BIT(1)
+#define DP_STATE_TRAIN_1_SUCCEEDED BIT(2)
+#define DP_STATE_TRAIN_1_FAILED BIT(3)
+#define DP_STATE_TRAIN_2_STARTED BIT(4)
+#define DP_STATE_TRAIN_2_SUCCEEDED BIT(5)
+#define DP_STATE_TRAIN_2_FAILED BIT(6)
+#define DP_STATE_CTRL_POWERED_ON BIT(7)
+#define DP_STATE_CTRL_POWERED_OFF BIT(8)
+#define DP_STATE_LINK_MAINTENANCE_STARTED BIT(9)
+#define DP_STATE_LINK_MAINTENANCE_COMPLETED BIT(10)
+#define DP_STATE_LINK_MAINTENANCE_FAILED BIT(11)
+
enum dp_aux_error {
DP_AUX_ERR_NONE = 0,
DP_AUX_ERR_ADDR = -1,
@@ -29,6 +42,9 @@
};
struct dp_aux {
+ u32 reg;
+ u32 state;
+
struct drm_dp_aux *drm_aux;
int (*drm_aux_register)(struct dp_aux *aux);
void (*drm_aux_deregister)(struct dp_aux *aux);
@@ -37,6 +53,8 @@
void (*deinit)(struct dp_aux *aux);
void (*reconfig)(struct dp_aux *aux);
void (*abort)(struct dp_aux *aux);
+ void (*dpcd_updated)(struct dp_aux *aux);
+ void (*set_sim_mode)(struct dp_aux *aux, bool en, u8 *edid, u8 *dpcd);
};
struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index cfb4436..56a41b5 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,15 +23,12 @@
#define DP_GET_MSB(x) (x >> 8)
#define DP_GET_LSB(x) (x & 0xff)
-#define dp_read(offset) readl_relaxed((offset))
-#define dp_write(offset, data) writel_relaxed((data), (offset))
-
-#define dp_catalog_get_priv(x) { \
+#define dp_catalog_get_priv(x) ({ \
struct dp_catalog *dp_catalog; \
dp_catalog = container_of(x, struct dp_catalog, x); \
- catalog = container_of(dp_catalog, struct dp_catalog_private, \
+ container_of(dp_catalog, struct dp_catalog_private, \
dp_catalog); \
-}
+})
#define DP_INTERRUPT_STATUS1 \
(DP_INTR_AUX_I2C_DONE| \
@@ -48,6 +45,14 @@
#define DP_INTR_MASK2 (DP_INTERRUPT_STATUS2 << 2)
+#define dp_catalog_fill_io(x) { \
+ catalog->io.x = parser->get_io(parser, #x); \
+}
+
+#define dp_catalog_fill_io_buf(x) { \
+ parser->get_io_buf(parser, #x); \
+}
+
static u8 const vm_pre_emphasis[4][4] = {
{0x00, 0x0B, 0x12, 0xFF}, /* pe0, 0 db */
{0x00, 0x0A, 0x12, 0xFF}, /* pe1, 3.5 db */
@@ -63,30 +68,77 @@
{0xFF, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
};
+struct dp_catalog_io {
+ struct dp_io_data *dp_ahb;
+ struct dp_io_data *dp_aux;
+ struct dp_io_data *dp_link;
+ struct dp_io_data *dp_p0;
+ struct dp_io_data *dp_phy;
+ struct dp_io_data *dp_ln_tx0;
+ struct dp_io_data *dp_ln_tx1;
+ struct dp_io_data *dp_mmss_cc;
+ struct dp_io_data *dp_pll;
+ struct dp_io_data *usb3_dp_com;
+ struct dp_io_data *hdcp_physical;
+};
+
/* audio related catalog functions */
struct dp_catalog_private {
struct device *dev;
- struct dp_io *io;
+ struct dp_catalog_io io;
+ struct dp_parser *parser;
u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
struct dp_catalog dp_catalog;
+
+ char exe_mode[SZ_4];
};
+static u32 dp_read(struct dp_catalog_private *catalog,
+ struct dp_io_data *io_data, u32 offset)
+{
+ u32 data = 0;
+
+ if (!strcmp(catalog->exe_mode, "hw") ||
+ !strcmp(catalog->exe_mode, "all")) {
+ data = readl_relaxed(io_data->io.base + offset);
+ } else if (!strcmp(catalog->exe_mode, "sw")) {
+ if (io_data->buf)
+ memcpy(&data, io_data->buf + offset, sizeof(offset));
+ }
+
+ return data;
+}
+
+static void dp_write(struct dp_catalog_private *catalog,
+ struct dp_io_data *io_data, u32 offset, u32 data)
+{
+ if (!strcmp(catalog->exe_mode, "hw") ||
+ !strcmp(catalog->exe_mode, "all"))
+ writel_relaxed(data, io_data->io.base + offset);
+
+ if (!strcmp(catalog->exe_mode, "sw") ||
+ !strcmp(catalog->exe_mode, "all")) {
+ if (io_data->buf)
+ memcpy(io_data->buf + offset, &data, sizeof(data));
+ }
+}
+
/* aux related catalog functions */
static u32 dp_catalog_aux_read_data(struct dp_catalog_aux *aux)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!aux) {
pr_err("invalid input\n");
goto end;
}
- dp_catalog_get_priv(aux);
- base = catalog->io->dp_aux.base;
+ catalog = dp_catalog_get_priv(aux);
+ io_data = catalog->io.dp_aux;
- return dp_read(base + DP_AUX_DATA);
+ return dp_read(catalog, io_data, DP_AUX_DATA);
end:
return 0;
}
@@ -95,7 +147,7 @@
{
int rc = 0;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!aux) {
pr_err("invalid input\n");
@@ -103,10 +155,10 @@
goto end;
}
- dp_catalog_get_priv(aux);
- base = catalog->io->dp_aux.base;
+ catalog = dp_catalog_get_priv(aux);
+ io_data = catalog->io.dp_aux;
- dp_write(base + DP_AUX_DATA, aux->data);
+ dp_write(catalog, io_data, DP_AUX_DATA, aux->data);
end:
return rc;
}
@@ -115,7 +167,7 @@
{
int rc = 0;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!aux) {
pr_err("invalid input\n");
@@ -123,10 +175,10 @@
goto end;
}
- dp_catalog_get_priv(aux);
- base = catalog->io->dp_aux.base;
+ catalog = dp_catalog_get_priv(aux);
+ io_data = catalog->io.dp_aux;
- dp_write(base + DP_AUX_TRANS_CTRL, aux->data);
+ dp_write(catalog, io_data, DP_AUX_TRANS_CTRL, aux->data);
end:
return rc;
}
@@ -136,7 +188,7 @@
int rc = 0;
u32 data = 0;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!aux) {
pr_err("invalid input\n");
@@ -144,15 +196,15 @@
goto end;
}
- dp_catalog_get_priv(aux);
- base = catalog->io->dp_aux.base;
+ catalog = dp_catalog_get_priv(aux);
+ io_data = catalog->io.dp_aux;
if (read) {
- data = dp_read(base + DP_AUX_TRANS_CTRL);
+ data = dp_read(catalog, io_data, DP_AUX_TRANS_CTRL);
data &= ~BIT(9);
- dp_write(base + DP_AUX_TRANS_CTRL, data);
+ dp_write(catalog, io_data, DP_AUX_TRANS_CTRL, data);
} else {
- dp_write(base + DP_AUX_TRANS_CTRL, 0);
+ dp_write(catalog, io_data, DP_AUX_TRANS_CTRL, 0);
}
end:
return rc;
@@ -161,7 +213,7 @@
static void dp_catalog_aux_clear_hw_interrupts(struct dp_catalog_aux *aux)
{
struct dp_catalog_private *catalog;
- void __iomem *phy_base;
+ struct dp_io_data *io_data;
u32 data = 0;
if (!aux) {
@@ -169,17 +221,16 @@
return;
}
- dp_catalog_get_priv(aux);
- phy_base = catalog->io->phy_io.base;
+ catalog = dp_catalog_get_priv(aux);
+ io_data = catalog->io.dp_phy;
- data = dp_read(phy_base + DP_PHY_AUX_INTERRUPT_STATUS);
- pr_debug("PHY_AUX_INTERRUPT_STATUS=0x%08x\n", data);
+ data = dp_read(catalog, io_data, DP_PHY_AUX_INTERRUPT_STATUS);
- dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+ dp_write(catalog, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
wmb(); /* make sure 0x1f is written before next write */
- dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+ dp_write(catalog, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
wmb(); /* make sure 0x9f is written before next write */
- dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+ dp_write(catalog, io_data, DP_PHY_AUX_INTERRUPT_CLEAR, 0);
wmb(); /* make sure register is cleared */
}
@@ -187,24 +238,25 @@
{
u32 aux_ctrl;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!aux) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(aux);
- base = catalog->io->dp_aux.base;
+ catalog = dp_catalog_get_priv(aux);
+ io_data = catalog->io.dp_aux;
- aux_ctrl = dp_read(base + DP_AUX_CTRL);
+ aux_ctrl = dp_read(catalog, io_data, DP_AUX_CTRL);
aux_ctrl |= BIT(1);
- dp_write(base + DP_AUX_CTRL, aux_ctrl);
+ dp_write(catalog, io_data, DP_AUX_CTRL, aux_ctrl);
usleep_range(1000, 1010); /* h/w recommended delay */
aux_ctrl &= ~BIT(1);
- dp_write(base + DP_AUX_CTRL, aux_ctrl);
+
+ dp_write(catalog, io_data, DP_AUX_CTRL, aux_ctrl);
wmb(); /* make sure AUX reset is done here */
}
@@ -212,27 +264,27 @@
{
u32 aux_ctrl;
struct dp_catalog_private *catalog;
- void __iomem *base;
-
+ struct dp_io_data *io_data;
if (!aux) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(aux);
- base = catalog->io->dp_aux.base;
+ catalog = dp_catalog_get_priv(aux);
+ io_data = catalog->io.dp_aux;
- aux_ctrl = dp_read(base + DP_AUX_CTRL);
+ aux_ctrl = dp_read(catalog, io_data, DP_AUX_CTRL);
if (enable) {
aux_ctrl |= BIT(0);
- dp_write(base + DP_AUX_CTRL, aux_ctrl);
+ dp_write(catalog, io_data, DP_AUX_CTRL, aux_ctrl);
wmb(); /* make sure AUX module is enabled */
- dp_write(base + DP_TIMEOUT_COUNT, 0xffff);
- dp_write(base + DP_AUX_LIMITS, 0xffff);
+
+ dp_write(catalog, io_data, DP_TIMEOUT_COUNT, 0xffff);
+ dp_write(catalog, io_data, DP_AUX_LIMITS, 0xffff);
} else {
aux_ctrl &= ~BIT(0);
- dp_write(base + DP_AUX_CTRL, aux_ctrl);
+ dp_write(catalog, io_data, DP_AUX_CTRL, aux_ctrl);
}
}
@@ -241,13 +293,16 @@
{
struct dp_catalog_private *catalog;
u32 new_index = 0, current_index = 0;
+ struct dp_io_data *io_data;
if (!aux || !cfg || (type >= PHY_AUX_CFG_MAX)) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(aux);
+ catalog = dp_catalog_get_priv(aux);
+
+ io_data = catalog->io.dp_phy;
current_index = cfg[type].current_index;
new_index = (current_index + 1) % cfg[type].cfg_cnt;
@@ -255,8 +310,7 @@
dp_phy_aux_config_type_to_string(type),
cfg[type].lut[current_index], cfg[type].lut[new_index]);
- dp_write(catalog->io->phy_io.base + cfg[type].offset,
- cfg[type].lut[new_index]);
+ dp_write(catalog, io_data, cfg[type].offset, cfg[type].lut[new_index]);
cfg[type].current_index = new_index;
}
@@ -264,6 +318,7 @@
struct dp_aux_cfg *cfg)
{
struct dp_catalog_private *catalog;
+ struct dp_io_data *io_data;
int i = 0;
if (!aux || !cfg) {
@@ -271,25 +326,32 @@
return;
}
- dp_catalog_get_priv(aux);
+ catalog = dp_catalog_get_priv(aux);
- dp_write(catalog->io->phy_io.base + DP_PHY_PD_CTL, 0x65);
+ io_data = catalog->io.dp_phy;
+ dp_write(catalog, io_data, DP_PHY_PD_CTL, 0x65);
wmb(); /* make sure PD programming happened */
/* Turn on BIAS current for PHY/PLL */
- dp_write(catalog->io->dp_pll_io.base +
- QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1b);
+ io_data = catalog->io.dp_pll;
+ dp_write(catalog, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1b);
+
+ io_data = catalog->io.dp_phy;
+ dp_write(catalog, io_data, DP_PHY_PD_CTL, 0x02);
+ wmb(); /* make sure PD programming happened */
+ dp_write(catalog, io_data, DP_PHY_PD_CTL, 0x7d);
+
+ /* Turn on BIAS current for PHY/PLL */
+ io_data = catalog->io.dp_pll;
+ dp_write(catalog, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
/* DP AUX CFG register programming */
- for (i = 0; i < PHY_AUX_CFG_MAX; i++) {
- pr_debug("%s: offset=0x%08x, value=0x%08x\n",
- dp_phy_aux_config_type_to_string(i),
- cfg[i].offset, cfg[i].lut[cfg[i].current_index]);
- dp_write(catalog->io->phy_io.base + cfg[i].offset,
- cfg[i].lut[cfg[i].current_index]);
- }
+ io_data = catalog->io.dp_phy;
+ for (i = 0; i < PHY_AUX_CFG_MAX; i++)
+ dp_write(catalog, io_data, cfg[i].offset,
+ cfg[i].lut[cfg[i].current_index]);
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_INTERRUPT_MASK, 0x1F);
+ dp_write(catalog, io_data, DP_PHY_AUX_INTERRUPT_MASK, 0x1F);
wmb(); /* make sure AUX configuration is done before enabling it */
}
@@ -297,46 +359,46 @@
{
u32 ack;
struct dp_catalog_private *catalog;
- void __iomem *ahb_base;
+ struct dp_io_data *io_data;
if (!aux) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(aux);
- ahb_base = catalog->io->dp_ahb.base;
+ catalog = dp_catalog_get_priv(aux);
+ io_data = catalog->io.dp_ahb;
- aux->isr = dp_read(ahb_base + DP_INTR_STATUS);
+ aux->isr = dp_read(catalog, io_data, DP_INTR_STATUS);
aux->isr &= ~DP_INTR_MASK1;
ack = aux->isr & DP_INTERRUPT_STATUS1;
ack <<= 1;
ack |= DP_INTR_MASK1;
- dp_write(ahb_base + DP_INTR_STATUS, ack);
+ dp_write(catalog, io_data, DP_INTR_STATUS, ack);
}
/* controller related catalog functions */
static u32 dp_catalog_ctrl_read_hdcp_status(struct dp_catalog_ctrl *ctrl)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return -EINVAL;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_ahb.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_ahb;
- return dp_read(base + DP_HDCP_STATUS);
+ return dp_read(catalog, io_data, DP_HDCP_STATUS);
}
static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel)
{
struct dp_catalog_private *catalog;
struct drm_msm_ext_hdr_metadata *hdr;
- void __iomem *base;
+ struct dp_io_data *io_data;
u32 header, parity, data;
u8 buf[SZ_128], off = 0;
@@ -345,16 +407,16 @@
return;
}
- dp_catalog_get_priv(panel);
+ catalog = dp_catalog_get_priv(panel);
hdr = &panel->hdr_data.hdr_meta;
- base = catalog->io->dp_link.base;
+ io_data = catalog->io.dp_link;
/* HEADER BYTE 1 */
header = panel->hdr_data.vscext_header_byte1;
parity = dp_header_get_parity(header);
data = ((header << HEADER_BYTE_1_BIT)
| (parity << PARITY_BYTE_1_BIT));
- dp_write(base + MMSS_DP_VSCEXT_0, data);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_0, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -363,22 +425,22 @@
parity = dp_header_get_parity(header);
data = ((header << HEADER_BYTE_2_BIT)
| (parity << PARITY_BYTE_2_BIT));
- dp_write(base + MMSS_DP_VSCEXT_1, data);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_1, data);
/* HEADER BYTE 3 */
header = panel->hdr_data.vscext_header_byte3;
parity = dp_header_get_parity(header);
data = ((header << HEADER_BYTE_3_BIT)
| (parity << PARITY_BYTE_3_BIT));
- data |= dp_read(base + MMSS_DP_VSCEXT_1);
- dp_write(base + MMSS_DP_VSCEXT_1, data);
+ data |= dp_read(catalog, io_data, MMSS_DP_VSCEXT_1);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_1, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
data = panel->hdr_data.version;
data |= panel->hdr_data.length << 8;
data |= hdr->eotf << 16;
- dp_write(base + MMSS_DP_VSCEXT_2, data);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_2, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -386,7 +448,7 @@
(DP_GET_MSB(hdr->display_primaries_x[0]) << 8) |
(DP_GET_LSB(hdr->display_primaries_y[0]) << 16) |
(DP_GET_MSB(hdr->display_primaries_y[0]) << 24));
- dp_write(base + MMSS_DP_VSCEXT_3, data);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_3, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -394,7 +456,7 @@
(DP_GET_MSB(hdr->display_primaries_x[1]) << 8) |
(DP_GET_LSB(hdr->display_primaries_y[1]) << 16) |
(DP_GET_MSB(hdr->display_primaries_y[1]) << 24));
- dp_write(base + MMSS_DP_VSCEXT_4, data);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_4, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -402,7 +464,7 @@
(DP_GET_MSB(hdr->display_primaries_x[2]) << 8) |
(DP_GET_LSB(hdr->display_primaries_y[2]) << 16) |
(DP_GET_MSB(hdr->display_primaries_y[2]) << 24));
- dp_write(base + MMSS_DP_VSCEXT_5, data);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_5, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -410,7 +472,7 @@
(DP_GET_MSB(hdr->white_point_x) << 8) |
(DP_GET_LSB(hdr->white_point_y) << 16) |
(DP_GET_MSB(hdr->white_point_y) << 24));
- dp_write(base + MMSS_DP_VSCEXT_6, data);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_6, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -418,7 +480,7 @@
(DP_GET_MSB(hdr->max_luminance) << 8) |
(DP_GET_LSB(hdr->min_luminance) << 16) |
(DP_GET_MSB(hdr->min_luminance) << 24));
- dp_write(base + MMSS_DP_VSCEXT_7, data);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_7, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -426,12 +488,12 @@
(DP_GET_MSB(hdr->max_content_light_level) << 8) |
(DP_GET_LSB(hdr->max_average_light_level) << 16) |
(DP_GET_MSB(hdr->max_average_light_level) << 24));
- dp_write(base + MMSS_DP_VSCEXT_8, data);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_8, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
data = 0;
- dp_write(base + MMSS_DP_VSCEXT_9, data);
+ dp_write(catalog, io_data, MMSS_DP_VSCEXT_9, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -442,7 +504,7 @@
static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
u32 header, parity, data;
u8 bpc, off = 0;
u8 buf[SZ_128];
@@ -452,15 +514,15 @@
return;
}
- dp_catalog_get_priv(panel);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(panel);
+ io_data = catalog->io.dp_link;
/* HEADER BYTE 1 */
header = panel->hdr_data.vsc_header_byte1;
parity = dp_header_get_parity(header);
data = ((header << HEADER_BYTE_1_BIT)
| (parity << PARITY_BYTE_1_BIT));
- dp_write(base + MMSS_DP_GENERIC0_0, data);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_0, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -469,32 +531,32 @@
parity = dp_header_get_parity(header);
data = ((header << HEADER_BYTE_2_BIT)
| (parity << PARITY_BYTE_2_BIT));
- dp_write(base + MMSS_DP_GENERIC0_1, data);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_1, data);
/* HEADER BYTE 3 */
header = panel->hdr_data.vsc_header_byte3;
parity = dp_header_get_parity(header);
data = ((header << HEADER_BYTE_3_BIT)
| (parity << PARITY_BYTE_3_BIT));
- data |= dp_read(base + MMSS_DP_GENERIC0_1);
- dp_write(base + MMSS_DP_GENERIC0_1, data);
+ data |= dp_read(catalog, io_data, MMSS_DP_GENERIC0_1);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_1, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
data = 0;
- dp_write(base + MMSS_DP_GENERIC0_2, data);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_2, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
- dp_write(base + MMSS_DP_GENERIC0_3, data);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_3, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
- dp_write(base + MMSS_DP_GENERIC0_4, data);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_4, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
- dp_write(base + MMSS_DP_GENERIC0_5, data);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_5, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -517,20 +579,20 @@
((panel->hdr_data.dynamic_range & 0x1) << 15) |
((panel->hdr_data.content_type & 0x7) << 16);
- dp_write(base + MMSS_DP_GENERIC0_6, data);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_6, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
data = 0;
- dp_write(base + MMSS_DP_GENERIC0_7, data);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_7, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
- dp_write(base + MMSS_DP_GENERIC0_8, data);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_8, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
- dp_write(base + MMSS_DP_GENERIC0_9, data);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC0_9, data);
memcpy(buf + off, &data, sizeof(data));
off += sizeof(data);
@@ -541,7 +603,7 @@
static void dp_catalog_panel_config_hdr(struct dp_catalog_panel *panel, bool en)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
u32 cfg, cfg2, misc;
if (!panel) {
@@ -549,21 +611,21 @@
return;
}
- dp_catalog_get_priv(panel);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(panel);
+ io_data = catalog->io.dp_link;
- cfg = dp_read(base + MMSS_DP_SDP_CFG);
- cfg2 = dp_read(base + MMSS_DP_SDP_CFG2);
- misc = dp_read(base + DP_MISC1_MISC0);
+ cfg = dp_read(catalog, io_data, MMSS_DP_SDP_CFG);
+ cfg2 = dp_read(catalog, io_data, MMSS_DP_SDP_CFG2);
+ misc = dp_read(catalog, io_data, DP_MISC1_MISC0);
if (en) {
/* VSCEXT_SDP_EN, GEN0_SDP_EN */
cfg |= BIT(16) | BIT(17);
- dp_write(base + MMSS_DP_SDP_CFG, cfg);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG, cfg);
/* EXTN_SDPSIZE GENERIC0_SDPSIZE */
cfg2 |= BIT(15) | BIT(16);
- dp_write(base + MMSS_DP_SDP_CFG2, cfg2);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG2, cfg2);
dp_catalog_panel_setup_vsc_sdp(panel);
dp_catalog_panel_setup_infoframe_sdp(panel);
@@ -578,11 +640,11 @@
} else {
/* VSCEXT_SDP_EN, GEN0_SDP_EN */
cfg &= ~BIT(16) & ~BIT(17);
- dp_write(base + MMSS_DP_SDP_CFG, cfg);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG, cfg);
/* EXTN_SDPSIZE GENERIC0_SDPSIZE */
cfg2 &= ~BIT(15) & ~BIT(16);
- dp_write(base + MMSS_DP_SDP_CFG2, cfg2);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG2, cfg2);
/* switch back to MSA */
misc &= ~BIT(14);
@@ -590,78 +652,78 @@
pr_debug("Disabled\n");
}
- dp_write(base + DP_MISC1_MISC0, misc);
+ dp_write(catalog, io_data, DP_MISC1_MISC0, misc);
- dp_write(base + MMSS_DP_SDP_CFG3, 0x01);
- dp_write(base + MMSS_DP_SDP_CFG3, 0x00);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG3, 0x01);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG3, 0x00);
}
static void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog_ctrl *ctrl)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_link;
- dp_write(base + DP_VALID_BOUNDARY, ctrl->valid_boundary);
- dp_write(base + DP_TU, ctrl->dp_tu);
- dp_write(base + DP_VALID_BOUNDARY_2, ctrl->valid_boundary2);
+ dp_write(catalog, io_data, DP_VALID_BOUNDARY, ctrl->valid_boundary);
+ dp_write(catalog, io_data, DP_TU, ctrl->dp_tu);
+ dp_write(catalog, io_data, DP_VALID_BOUNDARY_2, ctrl->valid_boundary2);
}
static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_link;
- dp_write(base + DP_STATE_CTRL, state);
+ dp_write(catalog, io_data, DP_STATE_CTRL, state);
}
static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl, u32 cfg)
{
struct dp_catalog_private *catalog;
- void __iomem *link_base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- link_base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_link;
pr_debug("DP_CONFIGURATION_CTRL=0x%x\n", cfg);
- dp_write(link_base + DP_CONFIGURATION_CTRL, cfg);
+ dp_write(catalog, io_data, DP_CONFIGURATION_CTRL, cfg);
}
static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_link;
- dp_write(base + DP_LOGICAL2PHYSICAL_LANE_MAPPING, 0xe4);
+ dp_write(catalog, io_data, DP_LOGICAL2PHYSICAL_LANE_MAPPING, 0xe4);
}
static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
@@ -669,29 +731,29 @@
{
u32 mainlink_ctrl;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_link;
if (enable) {
- dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
+ dp_write(catalog, io_data, DP_MAINLINK_CTRL, 0x02000000);
wmb(); /* make sure mainlink is turned off before reset */
- dp_write(base + DP_MAINLINK_CTRL, 0x02000002);
+ dp_write(catalog, io_data, DP_MAINLINK_CTRL, 0x02000002);
wmb(); /* make sure mainlink entered reset */
- dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
+ dp_write(catalog, io_data, DP_MAINLINK_CTRL, 0x02000000);
wmb(); /* make sure mainlink reset done */
- dp_write(base + DP_MAINLINK_CTRL, 0x02000001);
+ dp_write(catalog, io_data, DP_MAINLINK_CTRL, 0x02000001);
wmb(); /* make sure mainlink turned on */
} else {
- mainlink_ctrl = dp_read(base + DP_MAINLINK_CTRL);
+ mainlink_ctrl = dp_read(catalog, io_data, DP_MAINLINK_CTRL);
mainlink_ctrl &= ~BIT(0);
- dp_write(base + DP_MAINLINK_CTRL, mainlink_ctrl);
+ dp_write(catalog, io_data, DP_MAINLINK_CTRL, mainlink_ctrl);
}
}
@@ -700,21 +762,23 @@
{
u32 misc_val = cc;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_link;
+ misc_val = dp_read(catalog, io_data, DP_MISC1_MISC0);
+ misc_val |= cc;
misc_val |= (tb << 5);
misc_val |= BIT(0); /* Configure clock to synchronous mode */
pr_debug("misc settings = 0x%x\n", misc_val);
- dp_write(base + DP_MISC1_MISC0, misc_val);
+ dp_write(catalog, io_data, DP_MISC1_MISC0, misc_val);
}
static void dp_catalog_ctrl_config_msa(struct dp_catalog_ctrl *ctrl,
@@ -728,14 +792,14 @@
u32 const link_rate_hbr2 = 540000;
u32 const link_rate_hbr3 = 810000;
struct dp_catalog_private *catalog;
- void __iomem *base_cc, *base_ctrl;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
+ catalog = dp_catalog_get_priv(ctrl);
if (fixed_nvid) {
pr_debug("use fixed NVID=0x%x\n", nvid_fixed);
nvid = nvid_fixed;
@@ -756,10 +820,10 @@
*/
mvid = (u32) mvid_calc;
} else {
- base_cc = catalog->io->dp_cc_io.base;
+ io_data = catalog->io.dp_mmss_cc;
- pixel_m = dp_read(base_cc + MMSS_DP_PIXEL_M);
- pixel_n = dp_read(base_cc + MMSS_DP_PIXEL_N);
+ pixel_m = dp_read(catalog, io_data, MMSS_DP_PIXEL_M);
+ pixel_n = dp_read(catalog, io_data, MMSS_DP_PIXEL_N);
pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
mvid = (pixel_m & 0xFFFF) * 5;
@@ -774,10 +838,10 @@
nvid *= 3;
}
- base_ctrl = catalog->io->dp_link.base;
+ io_data = catalog->io.dp_link;
pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
- dp_write(base_ctrl + DP_SOFTWARE_MVID, mvid);
- dp_write(base_ctrl + DP_SOFTWARE_NVID, nvid);
+ dp_write(catalog, io_data, DP_SOFTWARE_MVID, mvid);
+ dp_write(catalog, io_data, DP_SOFTWARE_NVID, nvid);
}
static void dp_catalog_ctrl_set_pattern(struct dp_catalog_ctrl *ctrl,
@@ -786,26 +850,26 @@
int bit, cnt = 10;
u32 data;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_link;
bit = 1;
bit <<= (pattern - 1);
pr_debug("hw: bit=%d train=%d\n", bit, pattern);
- dp_write(base + DP_STATE_CTRL, bit);
+ dp_write(catalog, io_data, DP_STATE_CTRL, bit);
bit = 8;
bit <<= (pattern - 1);
while (cnt--) {
- data = dp_read(base + DP_MAINLINK_READY);
+ data = dp_read(catalog, io_data, DP_MAINLINK_READY);
if (data & bit)
break;
}
@@ -817,35 +881,35 @@
static void dp_catalog_ctrl_usb_reset(struct dp_catalog_ctrl *ctrl, bool flip)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
+ catalog = dp_catalog_get_priv(ctrl);
- base = catalog->io->usb3_dp_com.base;
+ io_data = catalog->io.usb3_dp_com;
- dp_write(base + USB3_DP_COM_RESET_OVRD_CTRL, 0x0a);
- dp_write(base + USB3_DP_COM_PHY_MODE_CTRL, 0x02);
- dp_write(base + USB3_DP_COM_SW_RESET, 0x01);
+ dp_write(catalog, io_data, USB3_DP_COM_RESET_OVRD_CTRL, 0x0a);
+ dp_write(catalog, io_data, USB3_DP_COM_PHY_MODE_CTRL, 0x02);
+ dp_write(catalog, io_data, USB3_DP_COM_SW_RESET, 0x01);
/* make sure usb3 com phy software reset is done */
wmb();
if (!flip) /* CC1 */
- dp_write(base + USB3_DP_COM_TYPEC_CTRL, 0x02);
+ dp_write(catalog, io_data, USB3_DP_COM_TYPEC_CTRL, 0x02);
else /* CC2 */
- dp_write(base + USB3_DP_COM_TYPEC_CTRL, 0x03);
+ dp_write(catalog, io_data, USB3_DP_COM_TYPEC_CTRL, 0x03);
- dp_write(base + USB3_DP_COM_SWI_CTRL, 0x00);
- dp_write(base + USB3_DP_COM_SW_RESET, 0x00);
+ dp_write(catalog, io_data, USB3_DP_COM_SWI_CTRL, 0x00);
+ dp_write(catalog, io_data, USB3_DP_COM_SW_RESET, 0x00);
/* make sure the software reset is done */
wmb();
- dp_write(base + USB3_DP_COM_POWER_DOWN_CTRL, 0x01);
- dp_write(base + USB3_DP_COM_RESET_OVRD_CTRL, 0x00);
+ dp_write(catalog, io_data, USB3_DP_COM_POWER_DOWN_CTRL, 0x01);
+ dp_write(catalog, io_data, USB3_DP_COM_RESET_OVRD_CTRL, 0x00);
/* make sure phy is brought out of reset */
wmb();
}
@@ -854,50 +918,53 @@
bool enable)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!panel) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(panel);
- base = catalog->io->dp_p0.base;
+ catalog = dp_catalog_get_priv(panel);
+ io_data = catalog->io.dp_p0;
if (!enable) {
- dp_write(base + MMSS_DP_TPG_MAIN_CONTROL, 0x0);
- dp_write(base + MMSS_DP_BIST_ENABLE, 0x0);
- dp_write(base + MMSS_DP_TIMING_ENGINE_EN, 0x0);
+ dp_write(catalog, io_data, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+ dp_write(catalog, io_data, MMSS_DP_BIST_ENABLE, 0x0);
+ dp_write(catalog, io_data, MMSS_DP_TIMING_ENGINE_EN, 0x0);
wmb(); /* ensure Timing generator is turned off */
return;
}
- dp_write(base + MMSS_DP_INTF_CONFIG, 0x0);
- dp_write(base + MMSS_DP_INTF_HSYNC_CTL, panel->hsync_ctl);
- dp_write(base + MMSS_DP_INTF_VSYNC_PERIOD_F0, panel->vsync_period *
- panel->hsync_period);
- dp_write(base + MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, panel->v_sync_width *
- panel->hsync_period);
- dp_write(base + MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
- dp_write(base + MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
- dp_write(base + MMSS_DP_INTF_DISPLAY_HCTL, panel->display_hctl);
- dp_write(base + MMSS_DP_INTF_ACTIVE_HCTL, 0);
- dp_write(base + MMSS_INTF_DISPLAY_V_START_F0, panel->display_v_start);
- dp_write(base + MMSS_DP_INTF_DISPLAY_V_END_F0, panel->display_v_end);
- dp_write(base + MMSS_INTF_DISPLAY_V_START_F1, 0);
- dp_write(base + MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
- dp_write(base + MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
- dp_write(base + MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
- dp_write(base + MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
- dp_write(base + MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
- dp_write(base + MMSS_DP_INTF_POLARITY_CTL, 0);
+ dp_write(catalog, io_data, MMSS_DP_INTF_CONFIG, 0x0);
+ dp_write(catalog, io_data, MMSS_DP_INTF_HSYNC_CTL, panel->hsync_ctl);
+ dp_write(catalog, io_data, MMSS_DP_INTF_VSYNC_PERIOD_F0,
+ panel->vsync_period * panel->hsync_period);
+ dp_write(catalog, io_data, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0,
+ panel->v_sync_width * panel->hsync_period);
+ dp_write(catalog, io_data, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+ dp_write(catalog, io_data, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+ dp_write(catalog, io_data, MMSS_DP_INTF_DISPLAY_HCTL,
+ panel->display_hctl);
+ dp_write(catalog, io_data, MMSS_DP_INTF_ACTIVE_HCTL, 0);
+ dp_write(catalog, io_data, MMSS_INTF_DISPLAY_V_START_F0,
+ panel->display_v_start);
+ dp_write(catalog, io_data, MMSS_DP_INTF_DISPLAY_V_END_F0,
+ panel->display_v_end);
+ dp_write(catalog, io_data, MMSS_INTF_DISPLAY_V_START_F1, 0);
+ dp_write(catalog, io_data, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+ dp_write(catalog, io_data, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+ dp_write(catalog, io_data, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+ dp_write(catalog, io_data, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+ dp_write(catalog, io_data, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+ dp_write(catalog, io_data, MMSS_DP_INTF_POLARITY_CTL, 0);
wmb(); /* ensure TPG registers are programmed */
- dp_write(base + MMSS_DP_TPG_MAIN_CONTROL, 0x100);
- dp_write(base + MMSS_DP_TPG_VIDEO_CONFIG, 0x5);
+ dp_write(catalog, io_data, MMSS_DP_TPG_MAIN_CONTROL, 0x100);
+ dp_write(catalog, io_data, MMSS_DP_TPG_VIDEO_CONFIG, 0x5);
wmb(); /* ensure TPG config is programmed */
- dp_write(base + MMSS_DP_BIST_ENABLE, 0x1);
- dp_write(base + MMSS_DP_TIMING_ENGINE_EN, 0x1);
+ dp_write(catalog, io_data, MMSS_DP_BIST_ENABLE, 0x1);
+ dp_write(catalog, io_data, MMSS_DP_TIMING_ENGINE_EN, 0x1);
wmb(); /* ensure Timing generator is turned on */
}
@@ -905,24 +972,24 @@
{
u32 sw_reset;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_ahb.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_ahb;
- sw_reset = dp_read(base + DP_SW_RESET);
+ sw_reset = dp_read(catalog, io_data, DP_SW_RESET);
sw_reset |= BIT(0);
- dp_write(base + DP_SW_RESET, sw_reset);
+ dp_write(catalog, io_data, DP_SW_RESET, sw_reset);
usleep_range(1000, 1010); /* h/w recommended delay */
sw_reset &= ~BIT(0);
- dp_write(base + DP_SW_RESET, sw_reset);
+ dp_write(catalog, io_data, DP_SW_RESET, sw_reset);
}
static bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog_ctrl *ctrl)
@@ -930,19 +997,19 @@
u32 data;
int cnt = 10;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
goto end;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_link;
while (--cnt) {
/* DP_MAINLINK_READY */
- data = dp_read(base + DP_MAINLINK_READY);
+ data = dp_read(catalog, io_data, DP_MAINLINK_READY);
if (data & BIT(0))
return true;
@@ -957,52 +1024,51 @@
bool enable)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_ahb.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_ahb;
if (enable) {
- dp_write(base + DP_INTR_STATUS, DP_INTR_MASK1);
- dp_write(base + DP_INTR_STATUS2, DP_INTR_MASK2);
+ dp_write(catalog, io_data, DP_INTR_STATUS, DP_INTR_MASK1);
+ dp_write(catalog, io_data, DP_INTR_STATUS2, DP_INTR_MASK2);
} else {
- dp_write(base + DP_INTR_STATUS, 0x00);
- dp_write(base + DP_INTR_STATUS2, 0x00);
+ dp_write(catalog, io_data, DP_INTR_STATUS, 0x00);
+ dp_write(catalog, io_data, DP_INTR_STATUS2, 0x00);
}
}
static void dp_catalog_ctrl_hpd_config(struct dp_catalog_ctrl *ctrl, bool en)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_aux.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_aux;
if (en) {
- u32 reftimer = dp_read(base + DP_DP_HPD_REFTIMER);
+ u32 reftimer = dp_read(catalog, io_data, DP_DP_HPD_REFTIMER);
- dp_write(base + DP_DP_HPD_INT_ACK, 0xF);
- dp_write(base + DP_DP_HPD_INT_MASK, 0xF);
-
+ dp_write(catalog, io_data, DP_DP_HPD_INT_ACK, 0xF);
+ dp_write(catalog, io_data, DP_DP_HPD_INT_MASK, 0xF);
/* Enabling REFTIMER */
reftimer |= BIT(16);
- dp_write(base + DP_DP_HPD_REFTIMER, 0xF);
+ dp_write(catalog, io_data, DP_DP_HPD_REFTIMER, 0xF);
/* Enable HPD */
- dp_write(base + DP_DP_HPD_CTRL, 0x1);
+ dp_write(catalog, io_data, DP_DP_HPD_CTRL, 0x1);
} else {
/*Disable HPD */
- dp_write(base + DP_DP_HPD_CTRL, 0x0);
+ dp_write(catalog, io_data, DP_DP_HPD_CTRL, 0x0);
}
}
@@ -1010,40 +1076,40 @@
{
u32 ack = 0;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_ahb.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_ahb;
- ctrl->isr = dp_read(base + DP_INTR_STATUS2);
+ ctrl->isr = dp_read(catalog, io_data, DP_INTR_STATUS2);
ctrl->isr &= ~DP_INTR_MASK2;
ack = ctrl->isr & DP_INTERRUPT_STATUS2;
ack <<= 1;
ack |= DP_INTR_MASK2;
- dp_write(base + DP_INTR_STATUS2, ack);
+ dp_write(catalog, io_data, DP_INTR_STATUS2, ack);
}
static void dp_catalog_ctrl_phy_reset(struct dp_catalog_ctrl *ctrl)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_ahb.base;
+ catalog = dp_catalog_get_priv(ctrl);
+ io_data = catalog->io.dp_ahb;
- dp_write(base + DP_PHY_CTRL, 0x5); /* bit 0 & 2 */
+ dp_write(catalog, io_data, DP_PHY_CTRL, 0x5); /* bit 0 & 2 */
usleep_range(1000, 1010); /* h/w recommended delay */
- dp_write(base + DP_PHY_CTRL, 0x0);
+ dp_write(catalog, io_data, DP_PHY_CTRL, 0x0);
wmb(); /* make sure PHY reset done */
}
@@ -1052,6 +1118,7 @@
{
u32 info = 0x0;
struct dp_catalog_private *catalog;
+ struct dp_io_data *io_data;
u8 orientation = BIT(!!flipped);
if (!ctrl) {
@@ -1059,20 +1126,22 @@
return;
}
- dp_catalog_get_priv(ctrl);
+ catalog = dp_catalog_get_priv(ctrl);
+
+ io_data = catalog->io.dp_phy;
info |= (ln_cnt & 0x0F);
info |= ((orientation & 0x0F) << 4);
pr_debug("Shared Info = 0x%x\n", info);
- dp_write(catalog->io->phy_io.base + DP_PHY_SPARE0, info);
+ dp_write(catalog, io_data, DP_PHY_SPARE0, info);
}
static void dp_catalog_ctrl_update_vx_px(struct dp_catalog_ctrl *ctrl,
u8 v_level, u8 p_level)
{
struct dp_catalog_private *catalog;
- void __iomem *base0, *base1;
+ struct dp_io_data *io_data;
u8 value0, value1;
if (!ctrl) {
@@ -1080,9 +1149,7 @@
return;
}
- dp_catalog_get_priv(ctrl);
- base0 = catalog->io->ln_tx0_io.base;
- base1 = catalog->io->ln_tx1_io.base;
+ catalog = dp_catalog_get_priv(ctrl);
pr_debug("hw: v=%d p=%d\n", v_level, p_level);
@@ -1090,10 +1157,14 @@
value1 = vm_pre_emphasis[v_level][p_level];
/* program default setting first */
- dp_write(base0 + TXn_TX_DRV_LVL, 0x2A);
- dp_write(base1 + TXn_TX_DRV_LVL, 0x2A);
- dp_write(base0 + TXn_TX_EMP_POST1_LVL, 0x20);
- dp_write(base1 + TXn_TX_EMP_POST1_LVL, 0x20);
+
+ io_data = catalog->io.dp_ln_tx0;
+ dp_write(catalog, io_data, TXn_TX_DRV_LVL, 0x2A);
+ dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, 0x20);
+
+ io_data = catalog->io.dp_ln_tx1;
+ dp_write(catalog, io_data, TXn_TX_DRV_LVL, 0x2A);
+ dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, 0x20);
/* Enable MUX to use Cursor values from these registers */
value0 |= BIT(5);
@@ -1101,10 +1172,13 @@
/* Configure host and panel only if both values are allowed */
if (value0 != 0xFF && value1 != 0xFF) {
- dp_write(base0 + TXn_TX_DRV_LVL, value0);
- dp_write(base1 + TXn_TX_DRV_LVL, value0);
- dp_write(base0 + TXn_TX_EMP_POST1_LVL, value1);
- dp_write(base1 + TXn_TX_EMP_POST1_LVL, value1);
+ io_data = catalog->io.dp_ln_tx0;
+ dp_write(catalog, io_data, TXn_TX_DRV_LVL, value0);
+ dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, value1);
+
+ io_data = catalog->io.dp_ln_tx1;
+ dp_write(catalog, io_data, TXn_TX_DRV_LVL, value0);
+ dp_write(catalog, io_data, TXn_TX_EMP_POST1_LVL, value1);
pr_debug("hw: vx_value=0x%x px_value=0x%x\n",
value0, value1);
@@ -1119,54 +1193,61 @@
{
struct dp_catalog_private *catalog;
u32 value = 0x0;
- void __iomem *base = NULL;
+ struct dp_io_data *io_data = NULL;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
+ catalog = dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_link.base;
+ io_data = catalog->io.dp_link;
- dp_write(base + DP_STATE_CTRL, 0x0);
+ dp_write(catalog, io_data, DP_STATE_CTRL, 0x0);
switch (pattern) {
case DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING:
- dp_write(base + DP_STATE_CTRL, 0x1);
+ dp_write(catalog, io_data, DP_STATE_CTRL, 0x1);
break;
case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
value &= ~(1 << 16);
- dp_write(base + DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value);
+ dp_write(catalog, io_data, DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
value |= 0xFC;
- dp_write(base + DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value);
- dp_write(base + DP_MAINLINK_LEVELS, 0x2);
- dp_write(base + DP_STATE_CTRL, 0x10);
+ dp_write(catalog, io_data, DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ dp_write(catalog, io_data, DP_MAINLINK_LEVELS, 0x2);
+ dp_write(catalog, io_data, DP_STATE_CTRL, 0x10);
break;
case DP_TEST_PHY_PATTERN_PRBS7:
- dp_write(base + DP_STATE_CTRL, 0x20);
+ dp_write(catalog, io_data, DP_STATE_CTRL, 0x20);
break;
case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN:
- dp_write(base + DP_STATE_CTRL, 0x40);
+ dp_write(catalog, io_data, DP_STATE_CTRL, 0x40);
/* 00111110000011111000001111100000 */
- dp_write(base + DP_TEST_80BIT_CUSTOM_PATTERN_REG0, 0x3E0F83E0);
+ dp_write(catalog, io_data, DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
+ 0x3E0F83E0);
/* 00001111100000111110000011111000 */
- dp_write(base + DP_TEST_80BIT_CUSTOM_PATTERN_REG1, 0x0F83E0F8);
+ dp_write(catalog, io_data, DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
+ 0x0F83E0F8);
/* 1111100000111110 */
- dp_write(base + DP_TEST_80BIT_CUSTOM_PATTERN_REG2, 0x0000F83E);
+ dp_write(catalog, io_data, DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
+ 0x0000F83E);
break;
case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
value = BIT(16);
- dp_write(base + DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value);
+ dp_write(catalog, io_data, DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
value |= 0xFC;
- dp_write(base + DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value);
- dp_write(base + DP_MAINLINK_LEVELS, 0x2);
- dp_write(base + DP_STATE_CTRL, 0x10);
+ dp_write(catalog, io_data, DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ dp_write(catalog, io_data, DP_MAINLINK_LEVELS, 0x2);
+ dp_write(catalog, io_data, DP_STATE_CTRL, 0x10);
break;
case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
- dp_write(base + DP_MAINLINK_CTRL, 0x11);
- dp_write(base + DP_STATE_CTRL, 0x8);
+ dp_write(catalog, io_data, DP_MAINLINK_CTRL, 0x11);
+ dp_write(catalog, io_data, DP_STATE_CTRL, 0x8);
break;
default:
pr_debug("No valid test pattern requested: 0x%x\n", pattern);
@@ -1180,38 +1261,101 @@
static u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog_ctrl *ctrl)
{
struct dp_catalog_private *catalog;
- void __iomem *base = NULL;
+ struct dp_io_data *io_data = NULL;
if (!ctrl) {
pr_err("invalid input\n");
return 0;
}
- dp_catalog_get_priv(ctrl);
+ catalog = dp_catalog_get_priv(ctrl);
- base = catalog->io->dp_link.base;
+ io_data = catalog->io.dp_link;
- return dp_read(base + DP_MAINLINK_READY);
+ return dp_read(catalog, io_data, DP_MAINLINK_READY);
+}
+
+static int dp_catalog_reg_dump(struct dp_catalog *dp_catalog,
+ char *name, u8 **out_buf, u32 *out_buf_len)
+{
+ int ret = 0;
+ u8 *buf;
+ u32 len;
+ struct dp_io_data *io_data;
+ struct dp_catalog_private *catalog;
+ struct dp_parser *parser;
+
+ if (!dp_catalog) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ catalog = container_of(dp_catalog, struct dp_catalog_private,
+ dp_catalog);
+
+ parser = catalog->parser;
+ parser->get_io_buf(parser, name);
+ io_data = parser->get_io(parser, name);
+ if (!io_data) {
+ pr_err("IO %s not found\n", name);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ buf = io_data->buf;
+ len = io_data->io.len;
+
+ if (!buf || !len) {
+ pr_err("no buffer available\n");
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ if (!strcmp(catalog->exe_mode, "hw") ||
+ !strcmp(catalog->exe_mode, "all")) {
+ u32 i, data;
+ u32 const rowsize = 4;
+ void __iomem *addr = io_data->io.base;
+
+ memset(buf, 0, len);
+
+ for (i = 0; i < len / rowsize; i++) {
+ data = readl_relaxed(addr);
+ memcpy(buf + (rowsize * i), &data, sizeof(u32));
+
+ addr += rowsize;
+ }
+ }
+
+ *out_buf = buf;
+ *out_buf_len = len;
+end:
+ if (ret)
+ parser->clear_io_buf(parser);
+
+ return ret;
}
/* panel related catalog functions */
static int dp_catalog_panel_timing_cfg(struct dp_catalog_panel *panel)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
if (!panel) {
pr_err("invalid input\n");
goto end;
}
- dp_catalog_get_priv(panel);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(panel);
+ io_data = catalog->io.dp_link;
- dp_write(base + DP_TOTAL_HOR_VER, panel->total);
- dp_write(base + DP_START_HOR_VER_FROM_SYNC, panel->sync_start);
- dp_write(base + DP_HSYNC_VSYNC_WIDTH_POLARITY, panel->width_blanking);
- dp_write(base + DP_ACTIVE_HOR_VER, panel->dp_active);
+ dp_write(catalog, io_data, DP_TOTAL_HOR_VER, panel->total);
+ dp_write(catalog, io_data, DP_START_HOR_VER_FROM_SYNC,
+ panel->sync_start);
+ dp_write(catalog, io_data, DP_HSYNC_VSYNC_WIDTH_POLARITY,
+ panel->width_blanking);
+ dp_write(catalog, io_data, DP_ACTIVE_HOR_VER, panel->dp_active);
end:
return 0;
}
@@ -1250,7 +1394,7 @@
if (!audio)
return;
- dp_catalog_get_priv(audio);
+ catalog = dp_catalog_get_priv(audio);
catalog->audio_map = sdp_map;
}
@@ -1258,17 +1402,17 @@
static void dp_catalog_audio_config_sdp(struct dp_catalog_audio *audio)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
u32 sdp_cfg = 0;
u32 sdp_cfg2 = 0;
if (!audio)
return;
- dp_catalog_get_priv(audio);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(audio);
+ io_data = catalog->io.dp_link;
- sdp_cfg = dp_read(base + MMSS_DP_SDP_CFG);
+ sdp_cfg = dp_read(catalog, io_data, MMSS_DP_SDP_CFG);
/* AUDIO_TIMESTAMP_SDP_EN */
sdp_cfg |= BIT(1);
@@ -1282,44 +1426,44 @@
sdp_cfg |= BIT(20);
pr_debug("sdp_cfg = 0x%x\n", sdp_cfg);
- dp_write(base + MMSS_DP_SDP_CFG, sdp_cfg);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG, sdp_cfg);
- sdp_cfg2 = dp_read(base + MMSS_DP_SDP_CFG2);
+ sdp_cfg2 = dp_read(catalog, io_data, MMSS_DP_SDP_CFG2);
/* IFRM_REGSRC -> Do not use reg values */
sdp_cfg2 &= ~BIT(0);
/* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
sdp_cfg2 &= ~BIT(1);
pr_debug("sdp_cfg2 = 0x%x\n", sdp_cfg2);
- dp_write(base + MMSS_DP_SDP_CFG2, sdp_cfg2);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
static void dp_catalog_audio_get_header(struct dp_catalog_audio *audio)
{
struct dp_catalog_private *catalog;
u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
- void __iomem *base;
+ struct dp_io_data *io_data;
enum dp_catalog_audio_sdp_type sdp;
enum dp_catalog_audio_header_type header;
if (!audio)
return;
- dp_catalog_get_priv(audio);
+ catalog = dp_catalog_get_priv(audio);
- base = catalog->io->dp_link.base;
+ io_data = catalog->io.dp_link;
sdp_map = catalog->audio_map;
sdp = audio->sdp_type;
header = audio->sdp_header;
- audio->data = dp_read(base + sdp_map[sdp][header]);
+ audio->data = dp_read(catalog, io_data, sdp_map[sdp][header]);
}
static void dp_catalog_audio_set_header(struct dp_catalog_audio *audio)
{
struct dp_catalog_private *catalog;
u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
- void __iomem *base;
+ struct dp_io_data *io_data;
enum dp_catalog_audio_sdp_type sdp;
enum dp_catalog_audio_header_type header;
u32 data;
@@ -1327,69 +1471,69 @@
if (!audio)
return;
- dp_catalog_get_priv(audio);
+ catalog = dp_catalog_get_priv(audio);
- base = catalog->io->dp_link.base;
+ io_data = catalog->io.dp_link;
sdp_map = catalog->audio_map;
sdp = audio->sdp_type;
header = audio->sdp_header;
data = audio->data;
- dp_write(base + sdp_map[sdp][header], data);
+ dp_write(catalog, io_data, sdp_map[sdp][header], data);
}
static void dp_catalog_audio_config_acr(struct dp_catalog_audio *audio)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
u32 acr_ctrl, select;
- dp_catalog_get_priv(audio);
+ catalog = dp_catalog_get_priv(audio);
select = audio->data;
- base = catalog->io->dp_link.base;
+ io_data = catalog->io.dp_link;
acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
pr_debug("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl);
- dp_write(base + MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
+ dp_write(catalog, io_data, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
static void dp_catalog_audio_safe_to_exit_level(struct dp_catalog_audio *audio)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
u32 mainlink_levels, safe_to_exit_level;
- dp_catalog_get_priv(audio);
+ catalog = dp_catalog_get_priv(audio);
- base = catalog->io->dp_link.base;
+ io_data = catalog->io.dp_link;
safe_to_exit_level = audio->data;
- mainlink_levels = dp_read(base + DP_MAINLINK_LEVELS);
+ mainlink_levels = dp_read(catalog, io_data, DP_MAINLINK_LEVELS);
mainlink_levels &= 0xFE0;
mainlink_levels |= safe_to_exit_level;
pr_debug("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
mainlink_levels, safe_to_exit_level);
- dp_write(base + DP_MAINLINK_LEVELS, mainlink_levels);
+ dp_write(catalog, io_data, DP_MAINLINK_LEVELS, mainlink_levels);
}
static void dp_catalog_audio_enable(struct dp_catalog_audio *audio)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
bool enable;
u32 audio_ctrl;
- dp_catalog_get_priv(audio);
+ catalog = dp_catalog_get_priv(audio);
- base = catalog->io->dp_link.base;
+ io_data = catalog->io.dp_link;
enable = !!audio->data;
- audio_ctrl = dp_read(base + MMSS_DP_AUDIO_CFG);
+ audio_ctrl = dp_read(catalog, io_data, MMSS_DP_AUDIO_CFG);
if (enable)
audio_ctrl |= BIT(0);
@@ -1397,7 +1541,7 @@
audio_ctrl &= ~BIT(0);
pr_debug("dp_audio_cfg = 0x%x\n", audio_ctrl);
- dp_write(base + MMSS_DP_AUDIO_CFG, audio_ctrl);
+ dp_write(catalog, io_data, MMSS_DP_AUDIO_CFG, audio_ctrl);
/* make sure audio engine is disabled */
wmb();
@@ -1406,18 +1550,18 @@
static void dp_catalog_config_spd_header(struct dp_catalog_panel *panel)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
u32 value, new_value;
u8 parity_byte;
if (!panel)
return;
- dp_catalog_get_priv(panel);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(panel);
+ io_data = catalog->io.dp_link;
/* Config header and parity byte 1 */
- value = dp_read(base + MMSS_DP_GENERIC1_0);
+ value = dp_read(catalog, io_data, MMSS_DP_GENERIC1_0);
new_value = 0x83;
parity_byte = dp_header_get_parity(new_value);
@@ -1425,10 +1569,10 @@
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_write(base + MMSS_DP_GENERIC1_0, value);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_0, value);
/* Config header and parity byte 2 */
- value = dp_read(base + MMSS_DP_GENERIC1_1);
+ value = dp_read(catalog, io_data, MMSS_DP_GENERIC1_1);
new_value = 0x1b;
parity_byte = dp_header_get_parity(new_value);
@@ -1436,10 +1580,10 @@
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
- dp_write(base + MMSS_DP_GENERIC1_1, value);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_1, value);
/* Config header and parity byte 3 */
- value = dp_read(base + MMSS_DP_GENERIC1_1);
+ value = dp_read(catalog, io_data, MMSS_DP_GENERIC1_1);
new_value = (0x0 | (0x12 << 2));
parity_byte = dp_header_get_parity(new_value);
@@ -1447,13 +1591,13 @@
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
new_value, parity_byte);
- dp_write(base + MMSS_DP_GENERIC1_1, value);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_1, value);
}
static void dp_catalog_panel_config_spd(struct dp_catalog_panel *panel)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ struct dp_io_data *io_data;
u32 spd_cfg = 0, spd_cfg2 = 0;
u8 *vendor = NULL, *product = NULL;
/*
@@ -1479,56 +1623,110 @@
if (!panel)
return;
- dp_catalog_get_priv(panel);
- base = catalog->io->dp_link.base;
+ catalog = dp_catalog_get_priv(panel);
+ io_data = catalog->io.dp_link;
dp_catalog_config_spd_header(panel);
vendor = panel->spd_vendor_name;
product = panel->spd_product_description;
- dp_write(base + MMSS_DP_GENERIC1_2, ((vendor[0] & 0x7f) |
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_2, ((vendor[0] & 0x7f) |
((vendor[1] & 0x7f) << 8) |
((vendor[2] & 0x7f) << 16) |
((vendor[3] & 0x7f) << 24)));
- dp_write(base + MMSS_DP_GENERIC1_3, ((vendor[4] & 0x7f) |
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_3, ((vendor[4] & 0x7f) |
((vendor[5] & 0x7f) << 8) |
((vendor[6] & 0x7f) << 16) |
((vendor[7] & 0x7f) << 24)));
- dp_write(base + MMSS_DP_GENERIC1_4, ((product[0] & 0x7f) |
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_4, ((product[0] & 0x7f) |
((product[1] & 0x7f) << 8) |
((product[2] & 0x7f) << 16) |
((product[3] & 0x7f) << 24)));
- dp_write(base + MMSS_DP_GENERIC1_5, ((product[4] & 0x7f) |
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_5, ((product[4] & 0x7f) |
((product[5] & 0x7f) << 8) |
((product[6] & 0x7f) << 16) |
((product[7] & 0x7f) << 24)));
- dp_write(base + MMSS_DP_GENERIC1_6, ((product[8] & 0x7f) |
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_6, ((product[8] & 0x7f) |
((product[9] & 0x7f) << 8) |
((product[10] & 0x7f) << 16) |
((product[11] & 0x7f) << 24)));
- dp_write(base + MMSS_DP_GENERIC1_7, ((product[12] & 0x7f) |
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_7, ((product[12] & 0x7f) |
((product[13] & 0x7f) << 8) |
((product[14] & 0x7f) << 16) |
((product[15] & 0x7f) << 24)));
- dp_write(base + MMSS_DP_GENERIC1_8, device_type);
- dp_write(base + MMSS_DP_GENERIC1_9, 0x00);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_8, device_type);
+ dp_write(catalog, io_data, MMSS_DP_GENERIC1_9, 0x00);
- spd_cfg = dp_read(base + MMSS_DP_SDP_CFG);
+ spd_cfg = dp_read(catalog, io_data, MMSS_DP_SDP_CFG);
/* GENERIC1_SDP for SPD Infoframe */
spd_cfg |= BIT(18);
- dp_write(base + MMSS_DP_SDP_CFG, spd_cfg);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG, spd_cfg);
- spd_cfg2 = dp_read(base + MMSS_DP_SDP_CFG2);
+ spd_cfg2 = dp_read(catalog, io_data, MMSS_DP_SDP_CFG2);
/* 28 data bytes for SPD Infoframe with GENERIC1 set */
spd_cfg2 |= BIT(17);
- dp_write(base + MMSS_DP_SDP_CFG2, spd_cfg2);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG2, spd_cfg2);
- dp_write(base + MMSS_DP_SDP_CFG3, 0x1);
- dp_write(base + MMSS_DP_SDP_CFG3, 0x0);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG3, 0x1);
+ dp_write(catalog, io_data, MMSS_DP_SDP_CFG3, 0x0);
}
-struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
+static void dp_catalog_get_io_buf(struct dp_catalog_private *catalog)
+{
+ struct dp_parser *parser = catalog->parser;
+
+ dp_catalog_fill_io_buf(dp_ahb);
+ dp_catalog_fill_io_buf(dp_aux);
+ dp_catalog_fill_io_buf(dp_link);
+ dp_catalog_fill_io_buf(dp_p0);
+ dp_catalog_fill_io_buf(dp_phy);
+ dp_catalog_fill_io_buf(dp_ln_tx0);
+ dp_catalog_fill_io_buf(dp_ln_tx1);
+ dp_catalog_fill_io_buf(dp_pll);
+ dp_catalog_fill_io_buf(usb3_dp_com);
+ dp_catalog_fill_io_buf(dp_mmss_cc);
+ dp_catalog_fill_io_buf(hdcp_physical);
+}
+
+static void dp_catalog_get_io(struct dp_catalog_private *catalog)
+{
+ struct dp_parser *parser = catalog->parser;
+
+ dp_catalog_fill_io(dp_ahb);
+ dp_catalog_fill_io(dp_aux);
+ dp_catalog_fill_io(dp_link);
+ dp_catalog_fill_io(dp_p0);
+ dp_catalog_fill_io(dp_phy);
+ dp_catalog_fill_io(dp_ln_tx0);
+ dp_catalog_fill_io(dp_ln_tx1);
+ dp_catalog_fill_io(dp_pll);
+ dp_catalog_fill_io(usb3_dp_com);
+ dp_catalog_fill_io(dp_mmss_cc);
+ dp_catalog_fill_io(hdcp_physical);
+}
+
+static void dp_catalog_set_exe_mode(struct dp_catalog *dp_catalog, char *mode)
+{
+ struct dp_catalog_private *catalog;
+
+ if (!dp_catalog) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ catalog = container_of(dp_catalog, struct dp_catalog_private,
+ dp_catalog);
+
+ strlcpy(catalog->exe_mode, mode, sizeof(catalog->exe_mode));
+
+ if (!strcmp(catalog->exe_mode, "hw"))
+ catalog->parser->clear_io_buf(catalog->parser);
+ else
+ dp_catalog_get_io_buf(catalog);
+}
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser)
{
int rc = 0;
struct dp_catalog *dp_catalog;
@@ -1583,7 +1781,7 @@
.config_spd = dp_catalog_panel_config_spd,
};
- if (!io) {
+ if (!dev || !parser) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
@@ -1596,7 +1794,11 @@
}
catalog->dev = dev;
- catalog->io = io;
+ catalog->parser = parser;
+
+ dp_catalog_get_io(catalog);
+
+ strlcpy(catalog->exe_mode, "hw", sizeof(catalog->exe_mode));
dp_catalog = &catalog->dp_catalog;
@@ -1605,6 +1807,9 @@
dp_catalog->audio = audio;
dp_catalog->panel = panel;
+ dp_catalog->set_exe_mode = dp_catalog_set_exe_mode;
+ dp_catalog->get_reg_dump = dp_catalog_reg_dump;
+
return dp_catalog;
error:
return ERR_PTR(rc);
@@ -1620,5 +1825,6 @@
catalog = container_of(dp_catalog, struct dp_catalog_private,
dp_catalog);
+ catalog->parser->clear_io_buf(catalog->parser);
devm_kfree(catalog->dev, catalog);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index d03be6a..743468d 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -181,6 +181,10 @@
struct dp_catalog_ctrl ctrl;
struct dp_catalog_audio audio;
struct dp_catalog_panel panel;
+
+ void (*set_exe_mode)(struct dp_catalog *dp_catalog, char *mode);
+ int (*get_reg_dump)(struct dp_catalog *dp_catalog,
+ char *mode, u8 **out_buf, u32 *out_buf_len);
};
static inline u8 dp_ecc_get_g0_value(u8 data)
@@ -248,7 +252,7 @@
return parity_byte;
}
-struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser);
void dp_catalog_put(struct dp_catalog *catalog);
#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 006f723..2e2887e 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -71,6 +71,8 @@
struct completion video_comp;
bool orientation;
+ bool power_on;
+
atomic_t aborted;
u32 pixel_rate;
@@ -128,6 +130,11 @@
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ if (!ctrl->power_on || atomic_read(&ctrl->aborted)) {
+ pr_err("CTRL off, return\n");
+ return;
+ }
+
reinit_completion(&ctrl->idle_comp);
dp_ctrl_state_ctrl(ctrl, ST_PUSH_IDLE);
@@ -813,6 +820,10 @@
u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 5;
+ ctrl->aux->state &= ~DP_STATE_TRAIN_1_FAILED;
+ ctrl->aux->state &= ~DP_STATE_TRAIN_1_SUCCEEDED;
+ ctrl->aux->state |= DP_STATE_TRAIN_1_STARTED;
+
dp_ctrl_state_ctrl(ctrl, 0);
/* Make sure to clear the current pattern before starting a new one */
wmb();
@@ -822,18 +833,18 @@
DP_LINK_SCRAMBLING_DISABLE); /* train_1 */
if (ret <= 0) {
ret = -EINVAL;
- return ret;
+ goto end;
}
ret = dp_ctrl_update_vx_px(ctrl);
if (ret <= 0) {
ret = -EINVAL;
- return ret;
+ goto end;
}
tries = 0;
old_v_level = ctrl->link->phy_params.v_level;
- while (1) {
+ while (!atomic_read(&ctrl->aborted)) {
drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
ret = dp_ctrl_read_link_status(ctrl, link_status);
@@ -872,6 +883,13 @@
break;
}
}
+end:
+ ctrl->aux->state &= ~DP_STATE_TRAIN_1_STARTED;
+
+ if (ret)
+ ctrl->aux->state |= DP_STATE_TRAIN_1_FAILED;
+ else
+ ctrl->aux->state |= DP_STATE_TRAIN_1_SUCCEEDED;
return ret;
}
@@ -915,6 +933,10 @@
int const maximum_retries = 5;
u8 link_status[DP_LINK_STATUS_SIZE];
+ ctrl->aux->state &= ~DP_STATE_TRAIN_2_FAILED;
+ ctrl->aux->state &= ~DP_STATE_TRAIN_2_SUCCEEDED;
+ ctrl->aux->state |= DP_STATE_TRAIN_2_STARTED;
+
dp_ctrl_state_ctrl(ctrl, 0);
/* Make sure to clear the current pattern before starting a new one */
wmb();
@@ -927,14 +949,14 @@
ret = dp_ctrl_update_vx_px(ctrl);
if (ret <= 0) {
ret = -EINVAL;
- return ret;
+ goto end;
}
ctrl->catalog->set_pattern(ctrl->catalog, pattern);
ret = dp_ctrl_train_pattern_set(ctrl,
pattern | DP_RECOVERED_CLOCK_OUT_EN);
if (ret <= 0) {
ret = -EINVAL;
- return ret;
+ goto end;
}
do {
@@ -960,8 +982,14 @@
ret = -EINVAL;
break;
}
- } while (1);
+ } while (!atomic_read(&ctrl->aborted));
+end:
+ ctrl->aux->state &= ~DP_STATE_TRAIN_2_STARTED;
+ if (ret)
+ ctrl->aux->state |= DP_STATE_TRAIN_2_FAILED;
+ else
+ ctrl->aux->state |= DP_STATE_TRAIN_2_SUCCEEDED;
return ret;
}
@@ -1102,8 +1130,7 @@
return ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, false);
}
-static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl,
- bool flip, bool multi_func)
+static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
{
struct dp_ctrl_private *ctrl;
struct dp_catalog_ctrl *catalog;
@@ -1118,7 +1145,7 @@
ctrl->orientation = flip;
catalog = ctrl->catalog;
- if (!multi_func) {
+ if (reset) {
catalog->usb_reset(ctrl->catalog, flip);
catalog->phy_reset(ctrl->catalog);
}
@@ -1180,6 +1207,15 @@
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ if (!ctrl->power_on || atomic_read(&ctrl->aborted)) {
+ pr_err("CTRL off, return\n");
+ return -EINVAL;
+ }
+
+ ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_COMPLETED;
+ ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_FAILED;
+ ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_STARTED;
+
ctrl->dp_ctrl.push_idle(&ctrl->dp_ctrl);
ctrl->dp_ctrl.reset(&ctrl->dp_ctrl);
@@ -1219,6 +1255,13 @@
ret = dp_ctrl_setup_main_link(ctrl, true);
} while (ret == -EAGAIN);
+ ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_STARTED;
+
+ if (ret)
+ ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_FAILED;
+ else
+ ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_COMPLETED;
+
return ret;
}
@@ -1341,7 +1384,6 @@
atomic_set(&ctrl->aborted, 0);
rate = ctrl->panel->link_info.rate;
- ctrl->power->clk_enable(ctrl->power, DP_CORE_PM, true);
ctrl->catalog->hpd_config(ctrl->catalog, true);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
@@ -1396,6 +1438,7 @@
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
dp_ctrl_send_phy_test_pattern(ctrl);
+ ctrl->power_on = true;
pr_debug("End-\n");
end:
@@ -1419,6 +1462,7 @@
dp_ctrl_disable_mainlink_clocks(ctrl);
+ ctrl->power_on = false;
pr_debug("DP off done\n");
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index 229c779..31d8f07 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,7 @@
#include "dp_catalog.h"
struct dp_ctrl {
- int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool multi_func);
+ int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
void (*deinit)(struct dp_ctrl *dp_ctrl);
int (*on)(struct dp_ctrl *dp_ctrl);
void (*off)(struct dp_ctrl *dp_ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 0b3d903..78bea02 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,6 @@
#include <linux/debugfs.h>
-#include "dp_parser.h"
#include "dp_power.h"
#include "dp_catalog.h"
#include "dp_aux.h"
@@ -36,15 +35,56 @@
u8 *dpcd;
u32 dpcd_size;
+ int vdo;
+
+ char exe_mode[SZ_32];
+ char reg_dump[SZ_32];
+
struct dp_usbpd *usbpd;
struct dp_link *link;
struct dp_panel *panel;
+ struct dp_aux *aux;
+ struct dp_catalog *catalog;
struct drm_connector **connector;
struct device *dev;
-
+ struct work_struct sim_work;
struct dp_debug dp_debug;
};
+static int dp_debug_get_edid_buf(struct dp_debug_private *debug)
+{
+ int rc = 0;
+
+ if (!debug->edid) {
+ debug->edid = devm_kzalloc(debug->dev, SZ_256, GFP_KERNEL);
+ if (!debug->edid) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ debug->edid_size = SZ_256;
+ }
+end:
+ return rc;
+}
+
+static int dp_debug_get_dpcd_buf(struct dp_debug_private *debug)
+{
+ int rc = 0;
+
+ if (!debug->dpcd) {
+ debug->dpcd = devm_kzalloc(debug->dev, SZ_1K, GFP_KERNEL);
+ if (!debug->dpcd) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ debug->dpcd_size = SZ_1K;
+ }
+end:
+ return rc;
+}
+
static ssize_t dp_debug_write_edid(struct file *file,
const char __user *user_buff, size_t count, loff_t *ppos)
{
@@ -75,7 +115,8 @@
edid_size = size / char_to_nib;
buf_t = buf;
- memset(debug->edid, 0, debug->edid_size);
+ if (dp_debug_get_edid_buf(debug))
+ goto bail;
if (edid_size != debug->edid_size) {
pr_debug("clearing debug edid\n");
@@ -100,13 +141,13 @@
buf_t += char_to_nib;
}
- print_hex_dump(KERN_DEBUG, "DEBUG EDID: ", DUMP_PREFIX_NONE,
- 16, 1, debug->edid, debug->edid_size, false);
-
edid = debug->edid;
bail:
kfree(buf);
- debug->panel->set_edid(debug->panel, edid);
+
+ if (!debug->dp_debug.sim_mode)
+ debug->panel->set_edid(debug->panel, edid);
+
return rc;
}
@@ -119,8 +160,8 @@
size_t dpcd_size = 0;
size_t size = 0, dpcd_buf_index = 0;
ssize_t rc = count;
-
- pr_debug("count=%zu\n", count);
+ char offset_ch[5];
+ u32 offset;
if (!debug)
return -ENODEV;
@@ -128,7 +169,7 @@
if (*ppos)
goto bail;
- size = min_t(size_t, count, SZ_32);
+ size = min_t(size_t, count, SZ_2K);
buf = kzalloc(size, GFP_KERNEL);
if (!buf) {
@@ -139,16 +180,30 @@
if (copy_from_user(buf, user_buff, size))
goto bail;
- dpcd_size = size / char_to_nib;
- buf_t = buf;
+ memcpy(offset_ch, buf, 4);
+ offset_ch[4] = '\0';
- memset(debug->dpcd, 0, debug->dpcd_size);
-
- if (dpcd_size != debug->dpcd_size) {
- pr_debug("clearing debug dpcd\n");
+ if (kstrtoint(offset_ch, 16, &offset)) {
+ pr_err("offset kstrtoint error\n");
goto bail;
}
+ if (dp_debug_get_dpcd_buf(debug))
+ goto bail;
+
+ if (offset == 0xFFFF) {
+ pr_err("clearing dpcd\n");
+ memset(debug->dpcd, 0, debug->dpcd_size);
+ goto bail;
+ }
+
+ size -= 4;
+
+ dpcd_size = size / char_to_nib;
+ buf_t = buf + 4;
+
+ dpcd_buf_index = offset;
+
while (dpcd_size--) {
char t[3];
int d;
@@ -167,16 +222,39 @@
buf_t += char_to_nib;
}
- print_hex_dump(KERN_DEBUG, "DEBUG DPCD: ", DUMP_PREFIX_NONE,
- 8, 1, debug->dpcd, debug->dpcd_size, false);
-
dpcd = debug->dpcd;
bail:
kfree(buf);
- debug->panel->set_dpcd(debug->panel, dpcd);
+ if (debug->dp_debug.sim_mode)
+ debug->aux->dpcd_updated(debug->aux);
+ else
+ debug->panel->set_dpcd(debug->panel, dpcd);
+
return rc;
}
+static ssize_t dp_debug_read_dpcd(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dp_debug_private *debug = file->private_data;
+ char buf[SZ_8];
+ u32 len = 0;
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ len += snprintf(buf, SZ_8, "0x%x\n", debug->aux->reg);
+
+ if (copy_to_user(user_buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ return len;
+}
+
static ssize_t dp_debug_write_hpd(struct file *file,
const char __user *user_buff, size_t count, loff_t *ppos)
{
@@ -323,6 +401,36 @@
return len;
}
+static ssize_t dp_debug_write_exe_mode(struct file *file,
+ const char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dp_debug_private *debug = file->private_data;
+ char *buf;
+ size_t len = 0;
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ len = min_t(size_t, count, SZ_32 - 1);
+ buf = memdup_user(user_buff, len);
+ buf[len] = '\0';
+
+ if (sscanf(buf, "%3s", debug->exe_mode) != 1)
+ goto end;
+
+ if (strcmp(debug->exe_mode, "hw") &&
+ strcmp(debug->exe_mode, "sw") &&
+ strcmp(debug->exe_mode, "all"))
+ goto end;
+
+ debug->catalog->set_exe_mode(debug->catalog, debug->exe_mode);
+end:
+ return len;
+}
+
static ssize_t dp_debug_read_connected(struct file *file,
char __user *user_buff, size_t count, loff_t *ppos)
{
@@ -421,7 +529,6 @@
struct dp_debug_private *debug = file->private_data;
char *buf;
u32 len = 0, rc = 0;
- u64 lclk = 0;
u32 max_size = SZ_4K;
if (!debug)
@@ -434,124 +541,60 @@
if (!buf)
return -ENOMEM;
- rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME);
+ rc = snprintf(buf + len, max_size, "\tstate=0x%x\n", debug->aux->state);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
- rc = snprintf(buf + len, max_size,
- "\tdp_panel\n\t\tmax_pclk_khz = %d\n",
- debug->panel->max_pclk_khz);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\tdrm_dp_link\n\t\trate = %u\n",
+ rc = snprintf(buf + len, max_size, "\tlink_rate=%u\n",
debug->panel->link_info.rate);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
- rc = snprintf(buf + len, max_size,
- "\t\tnum_lanes = %u\n",
+ rc = snprintf(buf + len, max_size, "\tnum_lanes=%u\n",
debug->panel->link_info.num_lanes);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
- rc = snprintf(buf + len, max_size,
- "\t\tcapabilities = %lu\n",
- debug->panel->link_info.capabilities);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\tdp_panel_info:\n\t\tactive = %dx%d\n",
+ rc = snprintf(buf + len, max_size, "\tresolution=%dx%d@%dHz\n",
debug->panel->pinfo.h_active,
- debug->panel->pinfo.v_active);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tback_porch = %dx%d\n",
- debug->panel->pinfo.h_back_porch,
- debug->panel->pinfo.v_back_porch);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tfront_porch = %dx%d\n",
- debug->panel->pinfo.h_front_porch,
- debug->panel->pinfo.v_front_porch);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tsync_width = %dx%d\n",
- debug->panel->pinfo.h_sync_width,
- debug->panel->pinfo.v_sync_width);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tactive_low = %dx%d\n",
- debug->panel->pinfo.h_active_low,
- debug->panel->pinfo.v_active_low);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\th_skew = %d\n",
- debug->panel->pinfo.h_skew);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\trefresh rate = %d\n",
+ debug->panel->pinfo.v_active,
debug->panel->pinfo.refresh_rate);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
- rc = snprintf(buf + len, max_size,
- "\t\tpixel clock khz = %d\n",
+ rc = snprintf(buf + len, max_size, "\tpclock=%dKHz\n",
debug->panel->pinfo.pixel_clk_khz);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
- rc = snprintf(buf + len, max_size,
- "\t\tbpp = %d\n",
+ rc = snprintf(buf + len, max_size, "\tbpp=%d\n",
debug->panel->pinfo.bpp);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
/* Link Information */
- rc = snprintf(buf + len, max_size,
- "\tdp_link:\n\t\ttest_requested = %d\n",
- debug->link->sink_request);
+ rc = snprintf(buf + len, max_size, "\ttest_req=%s\n",
+ dp_link_get_test_name(debug->link->sink_request));
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
- "\t\tlane_count = %d\n", debug->link->link_params.lane_count);
+ "\tlane_count=%d\n", debug->link->link_params.lane_count);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
- "\t\tbw_code = %d\n", debug->link->link_params.bw_code);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- lclk = drm_dp_bw_code_to_link_rate(
- debug->link->link_params.bw_code) * 1000;
- rc = snprintf(buf + len, max_size,
- "\t\tlclk = %lld\n", lclk);
+ "\tbw_code=%d\n", debug->link->link_params.bw_code);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
- "\t\tv_level = %d\n", debug->link->phy_params.v_level);
+ "\tv_level=%d\n", debug->link->phy_params.v_level);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
rc = snprintf(buf + len, max_size,
- "\t\tp_level = %d\n", debug->link->phy_params.p_level);
+ "\tp_level=%d\n", debug->link->phy_params.p_level);
if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
goto error;
@@ -665,6 +708,8 @@
pr_err("invalid input\n");
len = -EINVAL;
}
+
+ debug->panel->setup_hdr(debug->panel, &c_state->hdr_meta);
end:
return len;
}
@@ -814,6 +859,155 @@
return rc;
}
+static ssize_t dp_debug_write_sim(struct file *file,
+ const char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dp_debug_private *debug = file->private_data;
+ char buf[SZ_8];
+ size_t len = 0;
+ int sim;
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ /* Leave room for termination char */
+ len = min_t(size_t, count, SZ_8 - 1);
+ if (copy_from_user(buf, user_buff, len))
+ goto end;
+
+ buf[len] = '\0';
+
+ if (kstrtoint(buf, 10, &sim) != 0)
+ goto end;
+
+ if (sim) {
+ if (dp_debug_get_edid_buf(debug))
+ goto end;
+
+ if (dp_debug_get_dpcd_buf(debug))
+ goto error;
+ } else {
+ if (debug->edid) {
+ devm_kfree(debug->dev, debug->edid);
+ debug->edid = NULL;
+ }
+
+ if (debug->dpcd) {
+ devm_kfree(debug->dev, debug->dpcd);
+ debug->dpcd = NULL;
+ }
+ }
+
+ debug->dp_debug.sim_mode = !!sim;
+
+ debug->aux->set_sim_mode(debug->aux, debug->dp_debug.sim_mode,
+ debug->edid, debug->dpcd);
+end:
+ return len;
+error:
+ devm_kfree(debug->dev, debug->edid);
+ return len;
+}
+
+static ssize_t dp_debug_write_attention(struct file *file,
+ const char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dp_debug_private *debug = file->private_data;
+ char buf[SZ_8];
+ size_t len = 0;
+ int vdo;
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ /* Leave room for termination char */
+ len = min_t(size_t, count, SZ_8 - 1);
+ if (copy_from_user(buf, user_buff, len))
+ goto end;
+
+ buf[len] = '\0';
+
+ if (kstrtoint(buf, 10, &vdo) != 0)
+ goto end;
+
+ debug->vdo = vdo;
+
+ schedule_work(&debug->sim_work);
+end:
+ return len;
+}
+
+static ssize_t dp_debug_write_dump(struct file *file,
+ const char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dp_debug_private *debug = file->private_data;
+ char buf[SZ_32];
+ size_t len = 0;
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ /* Leave room for termination char */
+ len = min_t(size_t, count, SZ_32 - 1);
+ if (copy_from_user(buf, user_buff, len))
+ goto end;
+
+ buf[len] = '\0';
+
+ if (sscanf(buf, "%31s", debug->reg_dump) != 1)
+ goto end;
+
+ /* qfprom register dump not supported */
+ if (!strcmp(debug->reg_dump, "qfprom_physical"))
+ strlcpy(debug->reg_dump, "clear", sizeof(debug->reg_dump));
+end:
+ return len;
+}
+
+static ssize_t dp_debug_read_dump(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ int rc = 0;
+ struct dp_debug_private *debug = file->private_data;
+ u8 *buf = NULL;
+ u32 len = 0;
+ char prefix[SZ_32];
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ if (!debug->usbpd->hpd_high || !strlen(debug->reg_dump))
+ goto end;
+
+ rc = debug->catalog->get_reg_dump(debug->catalog,
+ debug->reg_dump, &buf, &len);
+ if (rc)
+ goto end;
+
+ snprintf(prefix, sizeof(prefix), "%s: ", debug->reg_dump);
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE,
+ 16, 4, buf, len, false);
+
+ if (copy_to_user(user_buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+end:
+ return len;
+}
+
static const struct file_operations dp_debug_fops = {
.open = simple_open,
.read = dp_debug_read_info,
@@ -838,6 +1032,7 @@
static const struct file_operations dpcd_fops = {
.open = simple_open,
.write = dp_debug_write_dpcd,
+ .read = dp_debug_read_dpcd,
};
static const struct file_operations connected_fops = {
@@ -850,6 +1045,10 @@
.read = dp_debug_bw_code_read,
.write = dp_debug_bw_code_write,
};
+static const struct file_operations exe_mode_fops = {
+ .open = simple_open,
+ .write = dp_debug_write_exe_mode,
+};
static const struct file_operations tpg_fops = {
.open = simple_open,
@@ -863,6 +1062,22 @@
.read = dp_debug_read_hdr,
};
+static const struct file_operations sim_fops = {
+ .open = simple_open,
+ .write = dp_debug_write_sim,
+};
+
+static const struct file_operations attention_fops = {
+ .open = simple_open,
+ .write = dp_debug_write_attention,
+};
+
+static const struct file_operations dump_fops = {
+ .open = simple_open,
+ .write = dp_debug_write_dump,
+ .read = dp_debug_read_dump,
+};
+
static int dp_debug_init(struct dp_debug *dp_debug)
{
int rc = 0;
@@ -925,7 +1140,14 @@
rc = PTR_ERR(file);
pr_err("[%s] debugfs max_bw_code failed, rc=%d\n",
DEBUG_NAME, rc);
- goto error_remove_dir;
+ }
+
+ file = debugfs_create_file("exe_mode", 0644, dir,
+ debug, &exe_mode_fops);
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
+ pr_err("[%s] debugfs register failed, rc=%d\n",
+ DEBUG_NAME, rc);
}
file = debugfs_create_file("edid", 0644, dir,
@@ -965,6 +1187,36 @@
goto error_remove_dir;
}
+ file = debugfs_create_file("sim", 0644, dir,
+ debug, &sim_fops);
+
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
+ pr_err("[%s] debugfs sim failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ goto error_remove_dir;
+ }
+
+ file = debugfs_create_file("attention", 0644, dir,
+ debug, &attention_fops);
+
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
+ pr_err("[%s] debugfs attention failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ goto error_remove_dir;
+ }
+
+ file = debugfs_create_file("dump", 0644, dir,
+ debug, &dump_fops);
+
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
+ pr_err("[%s] debugfs dump failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ goto error_remove_dir;
+ }
+
return 0;
error_remove_dir:
@@ -975,15 +1227,24 @@
return rc;
}
+static void dp_debug_sim_work(struct work_struct *work)
+{
+ struct dp_debug_private *debug =
+ container_of(work, typeof(*debug), sim_work);
+
+ debug->usbpd->simulate_attention(debug->usbpd, debug->vdo);
+}
+
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
- struct drm_connector **connector)
+ struct dp_aux *aux, struct drm_connector **connector,
+ struct dp_catalog *catalog)
{
int rc = 0;
struct dp_debug_private *debug;
struct dp_debug *dp_debug;
- if (!dev || !panel || !usbpd || !link) {
+ if (!dev || !panel || !usbpd || !link || !catalog) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
@@ -995,30 +1256,16 @@
goto error;
}
- debug->edid = devm_kzalloc(dev, SZ_256, GFP_KERNEL);
- if (!debug->edid) {
- rc = -ENOMEM;
- kfree(debug);
- goto error;
- }
-
- debug->edid_size = SZ_256;
-
- debug->dpcd = devm_kzalloc(dev, SZ_16, GFP_KERNEL);
- if (!debug->dpcd) {
- rc = -ENOMEM;
- kfree(debug);
- goto error;
- }
-
- debug->dpcd_size = SZ_16;
+ INIT_WORK(&debug->sim_work, dp_debug_sim_work);
debug->dp_debug.debug_en = false;
debug->usbpd = usbpd;
debug->link = link;
debug->panel = panel;
+ debug->aux = aux;
debug->dev = dev;
debug->connector = connector;
+ debug->catalog = catalog;
dp_debug = &debug->dp_debug;
dp_debug->vdisplay = 0;
@@ -1061,7 +1308,11 @@
dp_debug_deinit(dp_debug);
- devm_kfree(debug->dev, debug->edid);
- devm_kfree(debug->dev, debug->dpcd);
+ if (debug->edid)
+ devm_kfree(debug->dev, debug->edid);
+
+ if (debug->dpcd)
+ devm_kfree(debug->dev, debug->dpcd);
+
devm_kfree(debug->dev, debug);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 3b2d23e..2643f70 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,7 @@
#include "dp_panel.h"
#include "dp_link.h"
#include "dp_usbpd.h"
+#include "dp_aux.h"
/**
* struct dp_debug
@@ -29,6 +30,7 @@
*/
struct dp_debug {
bool debug_en;
+ bool sim_mode;
bool psm_enabled;
int aspect_ratio;
int vdisplay;
@@ -45,6 +47,7 @@
* @usbpd: instance of usbpd module
* @link: instance of link module
* @connector: double pointer to display connector
+ * @catalog: instance of catalog module
* return: pointer to allocated debug module data
*
* This function sets up the debug module and provides a way
@@ -52,7 +55,8 @@
*/
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
- struct drm_connector **connector);
+ struct dp_aux *aux, struct drm_connector **connector,
+ struct dp_catalog *catalog);
/**
* dp_debug_put()
*
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index f2c0a0e..b1d03d6 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,8 @@
#include <linux/of_irq.h>
#include <linux/hdcp_qseecom.h>
+#include "sde_connector.h"
+
#include "msm_drv.h"
#include "dp_usbpd.h"
#include "dp_parser.h"
@@ -87,11 +89,12 @@
struct workqueue_struct *wq;
struct delayed_work hdcp_cb_work;
- struct work_struct connect_work;
+ struct delayed_work connect_work;
struct work_struct attention_work;
struct mutex hdcp_mutex;
struct mutex session_lock;
int hdcp_status;
+ unsigned long audio_status;
};
static const struct of_device_id dp_dt_match[] = {
@@ -99,6 +102,11 @@
{}
};
+static bool dp_display_framework_ready(struct dp_display_private *dp)
+{
+ return dp->dp_display.post_open ? false : true;
+}
+
static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp)
{
return dp->hdcp.feature_enabled &&
@@ -266,6 +274,7 @@
static int dp_display_initialize_hdcp(struct dp_display_private *dp)
{
struct sde_hdcp_init_data hdcp_init_data;
+ struct dp_parser *parser;
int rc = 0;
if (!dp) {
@@ -273,6 +282,8 @@
return -EINVAL;
}
+ parser = dp->parser;
+
mutex_init(&dp->hdcp_mutex);
hdcp_init_data.client_id = HDCP_CLIENT_DP;
@@ -282,13 +293,14 @@
hdcp_init_data.mutex = &dp->hdcp_mutex;
hdcp_init_data.sec_access = true;
hdcp_init_data.notify_status = dp_display_notify_hdcp_status_cb;
- hdcp_init_data.core_io = &dp->parser->io.ctrl_io;
- hdcp_init_data.dp_ahb = &dp->parser->io.dp_ahb;
- hdcp_init_data.dp_aux = &dp->parser->io.dp_aux;
- hdcp_init_data.dp_link = &dp->parser->io.dp_link;
- hdcp_init_data.dp_p0 = &dp->parser->io.dp_p0;
- hdcp_init_data.qfprom_io = &dp->parser->io.qfprom_io;
- hdcp_init_data.hdcp_io = &dp->parser->io.hdcp_io;
+ hdcp_init_data.dp_ahb = &parser->get_io(parser, "dp_ahb")->io;
+ hdcp_init_data.dp_aux = &parser->get_io(parser, "dp_aux")->io;
+ hdcp_init_data.dp_link = &parser->get_io(parser, "dp_link")->io;
+ hdcp_init_data.dp_p0 = &parser->get_io(parser, "dp_p0")->io;
+ hdcp_init_data.qfprom_io = &parser->get_io(parser,
+ "qfprom_physical")->io;
+ hdcp_init_data.hdcp_io = &parser->get_io(parser,
+ "hdcp_physical")->io;
hdcp_init_data.revision = &dp->panel->link_info.revision;
dp->hdcp.hdcp1 = sde_hdcp_1x_init(&hdcp_init_data);
@@ -448,35 +460,39 @@
}
/* if cable is already connected, send notification */
- if (dp_display->is_connected)
- dp_display_send_hpd_event(dp);
+ if (dp->usbpd->hpd_high)
+ queue_delayed_work(dp->wq, &dp->connect_work, HZ * 10);
else
dp_display->post_open = NULL;
-
}
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
+ u32 timeout_sec;
+ int ret = 0;
+
dp->dp_display.is_connected = hpd;
- /* in case, framework is not yet up, don't notify hpd */
- if (dp->dp_display.post_open)
- return 0;
+ if (dp_display_framework_ready(dp))
+ timeout_sec = 5;
+ else
+ timeout_sec = 10;
+
+ dp->aux->state |= DP_STATE_NOTIFICATION_SENT;
reinit_completion(&dp->notification_comp);
dp_display_send_hpd_event(dp);
- if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 5)) {
+ if (!wait_for_completion_timeout(&dp->notification_comp,
+ HZ * timeout_sec)) {
pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
- /* cancel any pending request */
- dp->ctrl->abort(dp->ctrl);
- dp->aux->abort(dp->aux);
-
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ dp->aux->state &= ~DP_STATE_NOTIFICATION_SENT;
+
+ return ret;
}
static int dp_display_process_hpd_high(struct dp_display_private *dp)
@@ -497,26 +513,21 @@
rc = dp->panel->read_sink_caps(dp->panel,
dp->dp_display.connector, dp->usbpd->multi_func);
if (rc) {
- if (rc == -ETIMEDOUT) {
- pr_err("Sink cap read failed, skip notification\n");
+ /*
+ * ETIMEDOUT --> cable may have been removed
+ * ENOTCONN --> no downstream device connected
+ */
+ if (rc == -ETIMEDOUT || rc == -ENOTCONN)
goto end;
- } else {
+ else
goto notify;
- }
- }
-
- dp->link->process_request(dp->link);
-
- if (dp_display_is_sink_count_zero(dp)) {
- pr_debug("no downstream devices connected\n");
- rc = -EINVAL;
- goto end;
}
edid = dp->panel->edid_ctrl->edid;
dp->audio_supported = drm_detect_monitor_audio(edid);
+ dp->link->process_request(dp->link);
dp->panel->handle_sink_request(dp->panel);
dp->dp_display.max_pclk_khz = dp->parser->max_pclk_khz;
@@ -530,6 +541,7 @@
static void dp_display_host_init(struct dp_display_private *dp)
{
bool flip = false;
+ bool reset;
if (dp->core_initialized) {
pr_debug("DP core already initialized\n");
@@ -539,8 +551,10 @@
if (dp->usbpd->orientation == ORIENTATION_CC2)
flip = true;
+ reset = dp->debug->sim_mode ? false : !dp->usbpd->multi_func;
+
dp->power->init(dp->power, flip);
- dp->ctrl->init(dp->ctrl, flip, dp->usbpd->multi_func);
+ dp->ctrl->init(dp->ctrl, flip, reset);
enable_irq(dp->irq);
dp->core_initialized = true;
}
@@ -556,6 +570,7 @@
dp->power->deinit(dp->power);
disable_irq(dp->irq);
dp->core_initialized = false;
+ dp->aux->state = 0;
}
static int dp_display_process_hpd_low(struct dp_display_private *dp)
@@ -573,9 +588,9 @@
if (dp->audio_supported)
dp->audio->off(dp->audio);
- rc = dp_display_send_hpd_notification(dp, false);
+ dp->audio_status = -ENODEV;
- dp->aux->deinit(dp->aux);
+ rc = dp_display_send_hpd_notification(dp, false);
dp->panel->video_test = false;
@@ -602,8 +617,9 @@
dp_display_host_init(dp);
- if (dp->usbpd->hpd_high)
- queue_work(dp->wq, &dp->connect_work);
+ /* check for hpd high and framework ready */
+ if (dp->usbpd->hpd_high && dp_display_framework_ready(dp))
+ queue_delayed_work(dp->wq, &dp->connect_work, 0);
end:
return rc;
}
@@ -620,6 +636,8 @@
dp->ctrl->push_idle(dp->ctrl);
dp->ctrl->off(dp->ctrl);
+ dp->panel->deinit(dp->panel);
+ dp->aux->deinit(dp->aux);
dp->power_on = false;
}
@@ -659,6 +677,13 @@
goto end;
}
+ /*
+ * In case cable/dongle is disconnected during adb shell stop,
+ * reset psm_enabled flag to false since it is no more needed
+ */
+ if (dp->dp_display.post_open)
+ dp->debug->psm_enabled = false;
+
if (dp->debug->psm_enabled)
dp->link->psm_config(dp->link, &dp->panel->link_info, true);
@@ -667,6 +692,7 @@
dp->aux->abort(dp->aux);
/* wait for idle state */
+ cancel_delayed_work(&dp->connect_work);
flush_workqueue(dp->wq);
dp_display_handle_disconnect(dp);
@@ -678,13 +704,13 @@
{
mutex_lock(&dp->audio->ops_lock);
- if (dp->audio_supported)
+ if (dp->audio_supported && !IS_ERR_VALUE(dp->audio_status))
dp->audio->off(dp->audio);
dp->ctrl->link_maintenance(dp->ctrl);
- if (dp->audio_supported)
- dp->audio->on(dp->audio);
+ if (dp->audio_supported && !IS_ERR_VALUE(dp->audio_status))
+ dp->audio_status = dp->audio->on(dp->audio);
mutex_unlock(&dp->audio->ops_lock);
}
@@ -707,7 +733,7 @@
return;
}
- queue_work(dp->wq, &dp->connect_work);
+ queue_delayed_work(dp->wq, &dp->connect_work, 0);
return;
}
@@ -753,17 +779,19 @@
return -ENODEV;
}
- if (dp->usbpd->hpd_irq && dp->usbpd->hpd_high) {
+ if (dp->usbpd->hpd_irq && dp->usbpd->hpd_high &&
+ dp->power_on) {
dp->link->process_request(dp->link);
queue_work(dp->wq, &dp->attention_work);
} else if (dp->usbpd->hpd_high) {
- queue_work(dp->wq, &dp->connect_work);
+ queue_delayed_work(dp->wq, &dp->connect_work, 0);
} else {
/* cancel any pending request */
dp->ctrl->abort(dp->ctrl);
dp->aux->abort(dp->aux);
/* wait for idle state */
+ cancel_delayed_work(&dp->connect_work);
flush_workqueue(dp->wq);
dp_display_handle_disconnect(dp);
@@ -774,7 +802,8 @@
static void dp_display_connect_work(struct work_struct *work)
{
- struct dp_display_private *dp = container_of(work,
+ struct delayed_work *dw = to_delayed_work(work);
+ struct dp_display_private *dp = container_of(dw,
struct dp_display_private, connect_work);
if (dp->dp_display.is_connected) {
@@ -828,7 +857,7 @@
goto error_catalog;
}
- dp->catalog = dp_catalog_get(dev, &dp->parser->io);
+ dp->catalog = dp_catalog_get(dev, dp->parser);
if (IS_ERR(dp->catalog)) {
rc = PTR_ERR(dp->catalog);
pr_err("failed to initialize catalog, rc = %d\n", rc);
@@ -920,7 +949,8 @@
}
dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
- dp->link, &dp->dp_display.connector);
+ dp->link, dp->aux, &dp->dp_display.connector,
+ dp->catalog);
if (IS_ERR(dp->debug)) {
rc = PTR_ERR(dp->debug);
pr_err("failed to initialize debug, rc = %d\n", rc);
@@ -1070,7 +1100,7 @@
if (dp->audio_supported) {
dp->audio->bw_code = dp->link->link_params.bw_code;
dp->audio->lane_count = dp->link->link_params.lane_count;
- dp->audio->on(dp->audio);
+ dp->audio_status = dp->audio->on(dp->audio);
}
dp_display_update_hdcp_info(dp);
@@ -1081,9 +1111,12 @@
dp->hdcp_status = HDCP_STATE_AUTHENTICATING;
queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ / 2);
}
+
+ dp->panel->setup_hdr(dp->panel, NULL);
end:
/* clear framework event notifier */
dp_display->post_open = NULL;
+ dp->aux->state |= DP_STATE_CTRL_POWERED_ON;
complete_all(&dp->notification_comp);
mutex_unlock(&dp->session_lock);
@@ -1116,6 +1149,14 @@
dp->hdcp.ops->off(dp->hdcp.data);
}
+ if (dp->usbpd->hpd_high && dp->usbpd->alt_mode_cfg_done) {
+ if (dp->audio_supported)
+ dp->audio->off(dp->audio);
+
+ dp->link->psm_config(dp->link, &dp->panel->link_info, true);
+ dp->debug->psm_enabled = true;
+ }
+
dp->ctrl->push_idle(dp->ctrl);
end:
mutex_unlock(&dp->session_lock);
@@ -1125,6 +1166,8 @@
static int dp_display_disable(struct dp_display *dp_display)
{
struct dp_display_private *dp;
+ struct drm_connector *connector;
+ struct sde_connector_state *c_state;
if (!dp_display) {
pr_err("invalid input\n");
@@ -1132,6 +1175,8 @@
}
dp = container_of(dp_display, struct dp_display_private, dp_display);
+ connector = dp->dp_display.connector;
+ c_state = to_sde_connector_state(connector->state);
mutex_lock(&dp->session_lock);
@@ -1142,9 +1187,27 @@
dp->ctrl->off(dp->ctrl);
dp->panel->deinit(dp->panel);
+ dp->aux->deinit(dp->aux);
+ connector->hdr_eotf = 0;
+ connector->hdr_metadata_type_one = 0;
+ connector->hdr_max_luminance = 0;
+ connector->hdr_avg_luminance = 0;
+ connector->hdr_min_luminance = 0;
+
+ memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
+
+ /*
+ * In case of framework reboot, the DP off sequence is executed without
+ * any notification from driver. Initialize post_open callback to notify
+ * DP connection once framework restarts.
+ */
+ if (dp->usbpd->hpd_high && dp->usbpd->alt_mode_cfg_done) {
+ dp_display->post_open = dp_display_post_open;
+ dp->dp_display.is_connected = false;
+ }
dp->power_on = false;
-
+ dp->aux->state = DP_STATE_CTRL_POWERED_OFF;
end:
complete_all(&dp->notification_comp);
mutex_unlock(&dp->session_lock);
@@ -1252,8 +1315,7 @@
return ret;
}
-
-static int dp_display_pre_kickoff(struct dp_display *dp_display,
+static int dp_display_config_hdr(struct dp_display *dp_display,
struct drm_msm_ext_hdr_metadata *hdr)
{
int rc = 0;
@@ -1266,8 +1328,7 @@
dp = container_of(dp_display, struct dp_display_private, dp_display);
- if (hdr->hdr_supported && dp->panel->hdr_supported(dp->panel))
- rc = dp->panel->setup_hdr(dp->panel, hdr);
+ rc = dp->panel->setup_hdr(dp->panel, hdr);
return rc;
}
@@ -1281,7 +1342,7 @@
}
INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
- INIT_WORK(&dp->connect_work, dp_display_connect_work);
+ INIT_DELAYED_WORK(&dp->connect_work, dp_display_connect_work);
INIT_WORK(&dp->attention_work, dp_display_attention_work);
return 0;
@@ -1308,6 +1369,7 @@
dp->pdev = pdev;
dp->name = "drm_dp";
+ dp->audio_status = -ENODEV;
rc = dp_display_create_workqueue(dp);
if (rc) {
@@ -1332,7 +1394,7 @@
g_dp_display->get_debug = dp_get_debug;
g_dp_display->post_open = dp_display_post_open;
g_dp_display->post_init = dp_display_post_init;
- g_dp_display->pre_kickoff = dp_display_pre_kickoff;
+ g_dp_display->config_hdr = dp_display_config_hdr;
rc = component_add(&pdev->dev, &dp_display_comp_ops);
if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index c55e6c8..266de5f 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -43,7 +43,7 @@
int (*request_irq)(struct dp_display *dp_display);
struct dp_debug *(*get_debug)(struct dp_display *dp_display);
void (*post_open)(struct dp_display *dp_display);
- int (*pre_kickoff)(struct dp_display *dp_display,
+ int (*config_hdr)(struct dp_display *dp_display,
struct drm_msm_ext_hdr_metadata *hdr_meta);
void (*post_init)(struct dp_display *dp_display);
};
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 7746b8e..b834230 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -276,18 +276,17 @@
.mode_set = dp_bridge_mode_set,
};
-int dp_connector_pre_kickoff(struct drm_connector *connector,
- void *display,
- struct msm_display_kickoff_params *params)
+int dp_connector_config_hdr(void *display,
+ struct sde_connector_state *c_state)
{
struct dp_display *dp = display;
- if (!connector || !display || !params) {
+ if (!display || !c_state) {
pr_err("invalid params\n");
return -EINVAL;
}
- return dp->pre_kickoff(dp, params->hdr_meta);
+ return dp->config_hdr(dp, &c_state->hdr_meta);
}
int dp_connector_post_init(struct drm_connector *connector, void *display)
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index 89b0a7e..3ca10c2 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,15 +32,13 @@
};
/**
- * dp_connector_pre_kickoff - callback to perform pre kickoff initialization
- * @connector: Pointer to drm connector structure
+ * dp_connector_config_hdr - callback to configure HDR
* @display: Pointer to private display handle
- * @params: Pointer to kickoff parameters
+ * @c_state: connect state data
* Returns: Zero on success
*/
-int dp_connector_pre_kickoff(struct drm_connector *connector,
- void *display,
- struct msm_display_kickoff_params *params);
+int dp_connector_config_hdr(void *display,
+ struct sde_connector_state *c_state);
/**
* dp_connector_post_init - callback to perform additional initialization steps
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 3ca247c..05629dd 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -724,24 +724,6 @@
return ret;
}
-static char *dp_link_get_test_name(u32 test_requested)
-{
- switch (test_requested) {
- case DP_TEST_LINK_TRAINING:
- return DP_LINK_ENUM_STR(DP_TEST_LINK_TRAINING);
- case DP_TEST_LINK_VIDEO_PATTERN:
- return DP_LINK_ENUM_STR(DP_TEST_LINK_VIDEO_PATTERN);
- case DP_TEST_LINK_EDID_READ:
- return DP_LINK_ENUM_STR(DP_TEST_LINK_EDID_READ);
- case DP_TEST_LINK_PHY_TEST_PATTERN:
- return DP_LINK_ENUM_STR(DP_TEST_LINK_PHY_TEST_PATTERN);
- case DP_TEST_LINK_AUDIO_PATTERN:
- return DP_LINK_ENUM_STR(DP_TEST_LINK_AUDIO_PATTERN);
- default:
- return "unknown";
- }
-}
-
/**
* dp_link_is_video_audio_test_requested() - checks for audio/video link request
* @link: link requested by the sink
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 6f79b6a..46d30a7 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -83,6 +83,24 @@
u32 bw_code;
};
+static inline char *dp_link_get_test_name(u32 test_requested)
+{
+ switch (test_requested) {
+ case DP_TEST_LINK_TRAINING:
+ return DP_LINK_ENUM_STR(DP_TEST_LINK_TRAINING);
+ case DP_TEST_LINK_VIDEO_PATTERN:
+ return DP_LINK_ENUM_STR(DP_TEST_LINK_VIDEO_PATTERN);
+ case DP_TEST_LINK_EDID_READ:
+ return DP_LINK_ENUM_STR(DP_TEST_LINK_EDID_READ);
+ case DP_TEST_LINK_PHY_TEST_PATTERN:
+ return DP_LINK_ENUM_STR(DP_TEST_LINK_PHY_TEST_PATTERN);
+ case DP_TEST_LINK_AUDIO_PATTERN:
+ return DP_LINK_ENUM_STR(DP_TEST_LINK_AUDIO_PATTERN);
+ default:
+ return "unknown";
+ }
+}
+
struct dp_link {
u32 sink_request;
u32 test_response;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 0401760..7132699 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -136,9 +136,6 @@
goto end;
}
-
- print_hex_dump(KERN_DEBUG, "[drm-dp] SINK DPCD: ",
- DUMP_PREFIX_NONE, 8, 1, dp_panel->dpcd, rlen, false);
}
rlen = drm_dp_dpcd_read(panel->aux->drm_aux,
@@ -275,46 +272,44 @@
static int dp_panel_read_edid(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
+ int ret = 0;
struct dp_panel_private *panel;
if (!dp_panel) {
pr_err("invalid input\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto end;
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
if (panel->custom_edid) {
pr_debug("skip edid read in debug mode\n");
- return 0;
+ goto end;
}
sde_get_edid(connector, &panel->aux->drm_aux->ddc,
(void **)&dp_panel->edid_ctrl);
if (!dp_panel->edid_ctrl->edid) {
pr_err("EDID read failed\n");
- } else {
- u8 *buf = (u8 *)dp_panel->edid_ctrl->edid;
- u32 size = buf[0x7E] ? 256 : 128;
-
- print_hex_dump(KERN_DEBUG, "[drm-dp] SINK EDID: ",
- DUMP_PREFIX_NONE, 16, 1, buf, size, false);
-
- return 0;
+ ret = -EINVAL;
+ goto end;
}
-
- return -EINVAL;
+end:
+ return ret;
}
static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector, bool multi_func)
{
- int rc = 0;
+ int rc = 0, rlen, count, downstream_ports;
+ const int count_len = 1;
struct dp_panel_private *panel;
if (!dp_panel || !connector) {
pr_err("invalid input\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto end;
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
@@ -327,19 +322,35 @@
dp_panel->max_bw_code)) {
if ((rc == -ETIMEDOUT) || (rc == -ENODEV)) {
pr_err("DPCD read failed, return early\n");
- return rc;
+ goto end;
}
pr_err("panel dpcd read failed/incorrect, set default params\n");
dp_panel_set_default_link_params(dp_panel);
}
+ downstream_ports = dp_panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT;
+
+ if (downstream_ports) {
+ rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_SINK_COUNT,
+ &count, count_len);
+ if (rlen == count_len) {
+ count = DP_GET_SINK_COUNT(count);
+ if (!count) {
+ pr_err("no downstream ports connected\n");
+ rc = -ENOTCONN;
+ goto end;
+ }
+ }
+ }
+
rc = dp_panel_read_edid(dp_panel, connector);
if (rc) {
pr_err("panel edid read failed, set failsafe mode\n");
return rc;
}
-
- return 0;
+end:
+ return rc;
}
static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
@@ -648,6 +659,7 @@
{
int rc = 0;
struct dp_panel_private *panel;
+ struct dp_catalog_hdr_data *hdr;
if (!dp_panel) {
pr_err("invalid input\n");
@@ -655,11 +667,13 @@
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ hdr = &panel->catalog->hdr_data;
if (!panel->custom_edid)
sde_free_edid((void **)&dp_panel->edid_ctrl);
memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo));
+ memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
panel->panel_on = false;
return rc;
@@ -706,30 +720,6 @@
(panel->minor >= 4 || panel->vscext_supported);
}
-static bool dp_panel_is_validate_hdr_state(struct dp_panel_private *panel,
- struct drm_msm_ext_hdr_metadata *hdr_meta)
-{
- struct drm_msm_ext_hdr_metadata *panel_hdr_meta =
- &panel->catalog->hdr_data.hdr_meta;
-
- if (!hdr_meta)
- goto end;
-
- /* bail out if HDR not active */
- if (hdr_meta->hdr_state == HDR_DISABLED &&
- panel->hdr_state == HDR_DISABLED)
- goto end;
-
- /* bail out if same meta data is received */
- if (hdr_meta->hdr_state == HDR_ENABLED &&
- panel_hdr_meta->eotf == hdr_meta->eotf)
- goto end;
-
- return true;
-end:
- return false;
-}
-
static int dp_panel_setup_hdr(struct dp_panel *dp_panel,
struct drm_msm_ext_hdr_metadata *hdr_meta)
{
@@ -744,14 +734,18 @@
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ hdr = &panel->catalog->hdr_data;
- if (!dp_panel_is_validate_hdr_state(panel, hdr_meta))
- goto end;
+ /* use cached meta data in case meta data not provided */
+ if (!hdr_meta) {
+ if (hdr->hdr_meta.hdr_state)
+ goto cached;
+ else
+ goto end;
+ }
panel->hdr_state = hdr_meta->hdr_state;
- hdr = &panel->catalog->hdr_data;
-
hdr->ext_header_byte0 = 0x00;
hdr->ext_header_byte1 = 0x04;
hdr->ext_header_byte2 = 0x1F;
@@ -786,8 +780,9 @@
memcpy(&hdr->hdr_meta, hdr_meta, sizeof(hdr->hdr_meta));
else
memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
-
- panel->catalog->config_hdr(panel->catalog, panel->hdr_state);
+cached:
+ if (panel->panel_on)
+ panel->catalog->config_hdr(panel->catalog, panel->hdr_state);
end:
return rc;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index c112cdc..adcc762 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,101 +20,45 @@
static void dp_parser_unmap_io_resources(struct dp_parser *parser)
{
+ int i = 0;
struct dp_io *io = &parser->io;
- msm_dss_iounmap(&io->dp_ahb);
- msm_dss_iounmap(&io->dp_aux);
- msm_dss_iounmap(&io->dp_link);
- msm_dss_iounmap(&io->dp_p0);
- msm_dss_iounmap(&io->phy_io);
- msm_dss_iounmap(&io->ln_tx0_io);
- msm_dss_iounmap(&io->ln_tx0_io);
- msm_dss_iounmap(&io->dp_pll_io);
- msm_dss_iounmap(&io->dp_cc_io);
- msm_dss_iounmap(&io->usb3_dp_com);
- msm_dss_iounmap(&io->qfprom_io);
- msm_dss_iounmap(&io->hdcp_io);
+ for (i = 0; i < io->len; i++)
+ msm_dss_iounmap(&io->data[i].io);
}
-static int dp_parser_ctrl_res(struct dp_parser *parser)
+static int dp_parser_reg(struct dp_parser *parser)
{
- int rc = 0;
- u32 index;
+ int rc = 0, i = 0;
+ u32 reg_count;
struct platform_device *pdev = parser->pdev;
- struct device_node *of_node = parser->pdev->dev.of_node;
struct dp_io *io = &parser->io;
+ struct device *dev = &pdev->dev;
- rc = of_property_read_u32(of_node, "cell-index", &index);
- if (rc) {
- pr_err("cell-index not specified, rc=%d\n", rc);
- goto err;
+ reg_count = of_property_count_strings(dev->of_node, "reg-names");
+ if (reg_count <= 0) {
+ pr_err("no reg defined\n");
+ return -EINVAL;
}
- rc = msm_dss_ioremap_byname(pdev, &io->dp_ahb, "dp_ahb");
- if (rc) {
- pr_err("unable to remap dp io resources\n");
- goto err;
+ io->len = reg_count;
+ io->data = devm_kzalloc(dev, sizeof(struct dp_io_data) * reg_count,
+ GFP_KERNEL);
+ if (!io->data)
+ return -ENOMEM;
+
+ for (i = 0; i < reg_count; i++) {
+ of_property_read_string_index(dev->of_node,
+ "reg-names", i, &io->data[i].name);
+ rc = msm_dss_ioremap_byname(pdev, &io->data[i].io,
+ io->data[i].name);
+ if (rc) {
+ pr_err("unable to remap %s resources\n",
+ io->data[i].name);
+ goto err;
+ }
}
- rc = msm_dss_ioremap_byname(pdev, &io->dp_aux, "dp_aux");
- if (rc) {
- pr_err("unable to remap dp io resources\n");
- goto err;
- }
-
- rc = msm_dss_ioremap_byname(pdev, &io->dp_link, "dp_link");
- if (rc) {
- pr_err("unable to remap dp io resources\n");
- goto err;
- }
-
- rc = msm_dss_ioremap_byname(pdev, &io->dp_p0, "dp_p0");
- if (rc) {
- pr_err("unable to remap dp io resources\n");
- goto err;
- }
-
- rc = msm_dss_ioremap_byname(pdev, &io->phy_io, "dp_phy");
- if (rc) {
- pr_err("unable to remap dp PHY resources\n");
- goto err;
- }
-
- rc = msm_dss_ioremap_byname(pdev, &io->ln_tx0_io, "dp_ln_tx0");
- if (rc) {
- pr_err("unable to remap dp TX0 resources\n");
- goto err;
- }
-
- rc = msm_dss_ioremap_byname(pdev, &io->ln_tx1_io, "dp_ln_tx1");
- if (rc) {
- pr_err("unable to remap dp TX1 resources\n");
- goto err;
- }
-
- rc = msm_dss_ioremap_byname(pdev, &io->dp_pll_io, "dp_pll");
- if (rc) {
- pr_err("unable to remap DP PLL resources\n");
- goto err;
- }
-
- rc = msm_dss_ioremap_byname(pdev, &io->usb3_dp_com, "usb3_dp_com");
- if (rc) {
- pr_err("unable to remap USB3 DP com resources\n");
- goto err;
- }
-
- if (msm_dss_ioremap_byname(pdev, &io->dp_cc_io, "dp_mmss_cc")) {
- pr_err("unable to remap dp MMSS_CC resources\n");
- goto err;
- }
-
- if (msm_dss_ioremap_byname(pdev, &io->qfprom_io, "qfprom_physical"))
- pr_warn("unable to remap dp qfprom resources\n");
-
- if (msm_dss_ioremap_byname(pdev, &io->hdcp_io, "hdcp_physical"))
- pr_warn("unable to remap dp hdcp resources\n");
-
return 0;
err:
dp_parser_unmap_io_resources(parser);
@@ -618,7 +562,7 @@
goto err;
}
- rc = dp_parser_ctrl_res(parser);
+ rc = dp_parser_reg(parser);
if (rc)
goto err;
@@ -647,6 +591,74 @@
return rc;
}
+static struct dp_io_data *dp_parser_get_io(struct dp_parser *dp_parser,
+ char *name)
+{
+ int i = 0;
+ struct dp_io *io;
+
+ if (!dp_parser) {
+ pr_err("invalid input\n");
+ goto err;
+ }
+
+ io = &dp_parser->io;
+
+ for (i = 0; i < io->len; i++) {
+ struct dp_io_data *data = &io->data[i];
+
+ if (!strcmp(data->name, name))
+ return data;
+ }
+err:
+ return NULL;
+}
+
+static void dp_parser_get_io_buf(struct dp_parser *dp_parser, char *name)
+{
+ int i = 0;
+ struct dp_io *io;
+
+ if (!dp_parser) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ io = &dp_parser->io;
+
+ for (i = 0; i < io->len; i++) {
+ struct dp_io_data *data = &io->data[i];
+
+ if (!strcmp(data->name, name)) {
+ if (!data->buf)
+ data->buf = devm_kzalloc(&dp_parser->pdev->dev,
+ data->io.len, GFP_KERNEL);
+ }
+ }
+}
+
+static void dp_parser_clear_io_buf(struct dp_parser *dp_parser)
+{
+ int i = 0;
+ struct dp_io *io;
+
+ if (!dp_parser) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ io = &dp_parser->io;
+
+ for (i = 0; i < io->len; i++) {
+ struct dp_io_data *data = &io->data[i];
+
+ if (data->buf)
+ devm_kfree(&dp_parser->pdev->dev, data->buf);
+
+ data->buf = NULL;
+ }
+}
+
struct dp_parser *dp_parser_get(struct platform_device *pdev)
{
struct dp_parser *parser;
@@ -656,6 +668,9 @@
return ERR_PTR(-ENOMEM);
parser->parse = dp_parser_parse;
+ parser->get_io = dp_parser_get_io;
+ parser->get_io_buf = dp_parser_get_io_buf;
+ parser->clear_io_buf = dp_parser_clear_io_buf;
parser->pdev = pdev;
return parser;
@@ -679,5 +694,7 @@
dp_parser_put_gpio_data(&parser->pdev->dev, &power[i]);
}
+ dp_parser_clear_io_buf(parser);
+ devm_kfree(&parser->pdev->dev, parser->io.data);
devm_kfree(&parser->pdev->dev, parser);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 72da381..6e78db2 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -56,35 +56,25 @@
};
/**
- * struct dp_ctrl_resource - controller's IO related data
- *
- * @dp_ahb: controller's ahb mapped memory address
- * @dp_aux: controller's aux mapped memory address
- * @dp_link: controller's link mapped memory address
- * @dp_p0: controller's p0 mapped memory address
- * @phy_io: phy's mapped memory address
- * @ln_tx0_io: USB-DP lane TX0's mapped memory address
- * @ln_tx1_io: USB-DP lane TX1's mapped memory address
- * @dp_cc_io: DP cc's mapped memory address
- * @qfprom_io: qfprom's mapped memory address
- * @dp_pll_io: DP PLL mapped memory address
- * @usb3_dp_com: USB3 DP PHY combo mapped memory address
- * @hdcp_io: hdcp's mapped memory address
+ * struct dp_io_data - data structure to store DP IO related info
+ * @name: name of the IO
+ * @buf: buffer corresponding to IO for debugging
+ * @io: io data which give len and mapped address
+ */
+struct dp_io_data {
+ const char *name;
+ u8 *buf;
+ struct dss_io_data io;
+};
+
+/**
+ * struct dp_io - data struct to store array of DP IO info
+ * @len: total number of IOs
+ * @data: pointer to an array of DP IO data structures.
*/
struct dp_io {
- struct dss_io_data ctrl_io;
- struct dss_io_data dp_ahb;
- struct dss_io_data dp_aux;
- struct dss_io_data dp_link;
- struct dss_io_data dp_p0;
- struct dss_io_data phy_io;
- struct dss_io_data ln_tx0_io;
- struct dss_io_data ln_tx1_io;
- struct dss_io_data dp_cc_io;
- struct dss_io_data qfprom_io;
- struct dss_io_data dp_pll_io;
- struct dss_io_data usb3_dp_com;
- struct dss_io_data hdcp_io;
+ u32 len;
+ struct dp_io_data *data;
};
/**
@@ -171,6 +161,9 @@
* @ctrl_resouce: controller's register address realated data
* @disp_data: controller's display related data
* @parse: function to be called by client to parse device tree.
+ * @get_io: function to be called by client to get io data.
+ * @get_io_buf: function to be called by client to get io buffers.
+ * @clear_io_buf: function to be called by client to clear io buffers.
*/
struct dp_parser {
struct platform_device *pdev;
@@ -184,6 +177,9 @@
u32 max_pclk_khz;
int (*parse)(struct dp_parser *parser);
+ struct dp_io_data *(*get_io)(struct dp_parser *parser, char *name);
+ void (*get_io_buf)(struct dp_parser *parser, char *name);
+ void (*clear_io_buf)(struct dp_parser *parser);
};
/**
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
index 2bd3bd4..42eb9b0 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -426,6 +426,28 @@
return rc;
}
+static int dp_usbpd_simulate_attention(struct dp_usbpd *dp_usbpd, int vdo)
+{
+ int rc = 0;
+ struct dp_usbpd_private *pd;
+
+ if (!dp_usbpd) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
+
+ pd->vdo = vdo;
+ dp_usbpd_get_status(pd);
+
+ if (pd->dp_cb && pd->dp_cb->attention)
+ pd->dp_cb->attention(pd->dev);
+error:
+ return rc;
+}
+
struct dp_usbpd *dp_usbpd_get(struct device *dev, struct dp_usbpd_cb *cb)
{
int rc = 0;
@@ -475,6 +497,7 @@
dp_usbpd = &usbpd->dp_usbpd;
dp_usbpd->simulate_connect = dp_usbpd_simulate_connect;
+ dp_usbpd->simulate_attention = dp_usbpd_simulate_attention;
return dp_usbpd;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.h b/drivers/gpu/drm/msm/dp/dp_usbpd.h
index e70ad7d..0a7efd9 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.h
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -50,6 +50,7 @@
* @alt_mode_cfg_done: bool to specify alt mode status
* @debug_en: bool to specify debug mode
* @simulate_connect: simulate disconnect or connect for debug mode
+ * @simulate_attention: simulate attention messages for debug mode
*/
struct dp_usbpd {
enum dp_usbpd_port port;
@@ -65,6 +66,7 @@
bool debug_en;
int (*simulate_connect)(struct dp_usbpd *dp_usbpd, bool hpd);
+ int (*simulate_attention)(struct dp_usbpd *dp_usbpd, int vdo);
};
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index bfbcf54..6fb7105 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2087,11 +2087,14 @@
}
mutex_lock(&dsi_ctrl->ctrl_lock);
- if (!dsi_rect_is_equal(&dsi_ctrl->roi, roi)) {
+ if ((!dsi_rect_is_equal(&dsi_ctrl->roi, roi)) ||
+ dsi_ctrl->modeupdated) {
*changed = true;
memcpy(&dsi_ctrl->roi, roi, sizeof(dsi_ctrl->roi));
+ dsi_ctrl->modeupdated = false;
} else
*changed = false;
+
mutex_unlock(&dsi_ctrl->ctrl_lock);
return rc;
}
@@ -2647,6 +2650,7 @@
ctrl->mode_bounds.w = ctrl->host_config.video_timing.h_active;
ctrl->mode_bounds.h = ctrl->host_config.video_timing.v_active;
memcpy(&ctrl->roi, &ctrl->mode_bounds, sizeof(ctrl->mode_bounds));
+ ctrl->modeupdated = true;
ctrl->roi.x = 0;
error:
mutex_unlock(&ctrl->ctrl_lock);
@@ -2673,9 +2677,6 @@
return -EINVAL;
}
- mutex_lock(&dsi_ctrl->ctrl_lock);
- mutex_unlock(&dsi_ctrl->ctrl_lock);
-
return rc;
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index ca58896..537bdc3 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -211,6 +211,7 @@
* dsi controller and run only dsi controller.
* @null_insertion_enabled: A boolean property to allow dsi controller to
* insert null packet.
+ * @modeupdated: Boolean to send new roi if mode is updated.
*/
struct dsi_ctrl {
struct platform_device *pdev;
@@ -258,6 +259,7 @@
bool phy_isolation_enabled;
bool null_insertion_enabled;
+ bool modeupdated;
};
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 0ffece3..eaeeb52 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2960,11 +2960,7 @@
return -EINVAL;
}
- mutex_lock(&panel->panel_lock);
-
memcpy(phy_props, &panel->phy_props, sizeof(*phy_props));
-
- mutex_unlock(&panel->panel_lock);
return rc;
}
@@ -2978,11 +2974,7 @@
return -EINVAL;
}
- mutex_lock(&panel->panel_lock);
-
memcpy(dfps_caps, &panel->dfps_caps, sizeof(*dfps_caps));
-
- mutex_unlock(&panel->panel_lock);
return rc;
}
@@ -3329,7 +3321,7 @@
set->cmds[0].msg.rx_len = 0;
set->cmds[0].msg.rx_buf = 0;
set->cmds[0].last_command = 0;
- set->cmds[0].post_wait_ms = 1;
+ set->cmds[0].post_wait_ms = 0;
set->cmds[1].msg.channel = 0;
set->cmds[1].msg.type = MIPI_DSI_DCS_LONG_WRITE;
@@ -3340,7 +3332,7 @@
set->cmds[1].msg.rx_len = 0;
set->cmds[1].msg.rx_buf = 0;
set->cmds[1].last_command = 1;
- set->cmds[1].post_wait_ms = 1;
+ set->cmds[1].post_wait_ms = 0;
goto exit;
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 42aea7e..3e084d5 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -613,6 +613,7 @@
if (!node)
return;
+ spin_lock_irqsave(&node->state_lock, flags);
if (node->state == IRQ_DISABLED) {
ret = sde_core_irq_enable(kms, &irq_idx, 1);
if (ret)
@@ -620,6 +621,7 @@
else
node->state = IRQ_ENABLED;
}
+ spin_unlock_irqrestore(&node->state_lock, flags);
}
static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
@@ -1623,6 +1625,7 @@
struct sde_crtc *crtc;
int i;
int irq_idx, ret;
+ unsigned long flags;
struct sde_cp_node prop_node;
struct sde_crtc_irq_info *node = NULL;
@@ -1673,6 +1676,7 @@
if (!en) {
if (node) {
+ spin_lock_irqsave(&node->state_lock, flags);
if (node->state == IRQ_ENABLED) {
ret = sde_core_irq_disable(kms, &irq_idx, 1);
if (ret)
@@ -1683,6 +1687,7 @@
} else {
node->state = IRQ_NOINIT;
}
+ spin_unlock_irqrestore(&node->state_lock, flags);
} else {
DRM_ERROR("failed to get node from crtc event list\n");
}
@@ -1701,6 +1706,7 @@
if (node) {
/* device resume or resume from IPC cases */
+ spin_lock_irqsave(&node->state_lock, flags);
if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
ret = sde_core_irq_enable(kms, &irq_idx, 1);
if (ret) {
@@ -1712,6 +1718,7 @@
node->state = IRQ_ENABLED;
}
}
+ spin_unlock_irqrestore(&node->state_lock, flags);
} else {
/* request from userspace to register the event
* in this case, node has not been added into the event list
@@ -1807,14 +1814,17 @@
return;
}
+ spin_lock_irqsave(&node->state_lock, flags);
if (node->state == IRQ_ENABLED) {
if (sde_core_irq_disable_nolock(kms, irq_idx)) {
DRM_ERROR("failed to disable irq %d, ret %d\n",
irq_idx, ret);
+ spin_unlock_irqrestore(&node->state_lock, flags);
return;
}
node->state = IRQ_DISABLED;
}
+ spin_unlock_irqrestore(&node->state_lock, flags);
/* lock histogram buffer */
for (i = 0; i < crtc->num_mixers; i++) {
@@ -1886,6 +1896,7 @@
struct sde_crtc *crtc;
struct sde_crtc_irq_info *node = NULL;
int i, irq_idx, ret = 0;
+ unsigned long flags;
if (!crtc_drm || !hist_irq) {
DRM_ERROR("invalid crtc %pK irq %pK\n", crtc_drm, hist_irq);
@@ -1928,6 +1939,7 @@
if (!en) {
if (node) {
/* device suspend case or suspend to IPC cases */
+ spin_lock_irqsave(&node->state_lock, flags);
if (node->state == IRQ_ENABLED) {
ret = sde_core_irq_disable(kms, &irq_idx, 1);
if (ret)
@@ -1938,6 +1950,7 @@
} else {
node->state = IRQ_NOINIT;
}
+ spin_unlock_irqrestore(&node->state_lock, flags);
} else {
DRM_ERROR("failed to get node from crtc event list\n");
}
@@ -1957,6 +1970,7 @@
if (node) {
/* device resume or resume from IPC cases */
+ spin_lock_irqsave(&node->state_lock, flags);
if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
ret = sde_core_irq_enable(kms, &irq_idx, 1);
if (ret) {
@@ -1968,6 +1982,7 @@
node->state = IRQ_ENABLED;
}
}
+ spin_unlock_irqrestore(&node->state_lock, flags);
} else {
/* request from userspace to register the event
* in this case, node has not been added into the event list
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index d5c4386..655390b 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -471,8 +471,11 @@
}
c_conn->last_panel_power_mode = mode;
- if (mode != SDE_MODE_DPMS_ON)
+ if (mode != SDE_MODE_DPMS_ON) {
+ mutex_unlock(&c_conn->lock);
sde_connector_schedule_status_work(connector, false);
+ mutex_lock(&c_conn->lock);
+ }
return rc;
}
@@ -939,34 +942,38 @@
struct sde_connector_state *c_state,
void *usr_ptr)
{
+ int rc = 0;
struct drm_connector *connector;
struct drm_msm_ext_hdr_metadata *hdr_meta;
int i;
if (!c_conn || !c_state) {
SDE_ERROR_CONN(c_conn, "invalid args\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto end;
}
connector = &c_conn->base;
if (!connector->hdr_supported) {
SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
- return -ENOTSUPP;
+ rc = -ENOTSUPP;
+ goto end;
}
memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
if (!usr_ptr) {
SDE_DEBUG_CONN(c_conn, "hdr metadata cleared\n");
- return 0;
+ goto end;
}
if (copy_from_user(&c_state->hdr_meta,
(void __user *)usr_ptr,
sizeof(*hdr_meta))) {
SDE_ERROR_CONN(c_conn, "failed to copy hdr metadata\n");
- return -EFAULT;
+ rc = -EFAULT;
+ goto end;
}
hdr_meta = &c_state->hdr_meta;
@@ -989,7 +996,10 @@
hdr_meta->display_primaries_y[i]);
}
- return 0;
+ if (c_conn->ops.config_hdr)
+ rc = c_conn->ops.config_hdr(c_conn->display, c_state);
+end:
+ return rc;
}
static int sde_connector_atomic_set_property(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 7cf09b7..9c37869 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -241,6 +241,16 @@
*/
int (*cmd_transfer)(void *display, const char *cmd_buf,
u32 cmd_buf_len);
+
+ /**
+ * config_hdr - configure HDR
+ * @display: Pointer to private display handle
+ * @c_state: Pointer to connector state
+ * Returns: Zero on success, negative error code for failures
+ */
+ int (*config_hdr)(void *display,
+ struct sde_connector_state *c_state);
+
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index a6f22c9..442104b 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -513,6 +513,7 @@
struct msm_drm_private *priv;
int i;
int rc;
+ unsigned long irq_flags;
if (!sde_kms) {
SDE_ERROR("invalid sde_kms\n");
@@ -543,6 +544,7 @@
sde_disable_all_irqs(sde_kms);
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
kfree(sde_kms->irq_obj.irq_cb_tbl);
kfree(sde_kms->irq_obj.enable_counts);
kfree(sde_kms->irq_obj.irq_counts);
@@ -550,6 +552,7 @@
sde_kms->irq_obj.enable_counts = NULL;
sde_kms->irq_obj.irq_counts = NULL;
sde_kms->irq_obj.total_irqs = 0;
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
}
static void sde_core_irq_mask(struct irq_data *irqd)
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index b3eb101..1ee75c4 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -2511,9 +2511,6 @@
SDE_EVTLOG_FUNC_CASE3);
}
- if (fevent->event & SDE_ENCODER_FRAME_EVENT_DONE)
- sde_core_perf_crtc_update(crtc, 0, false);
-
if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE
| SDE_ENCODER_FRAME_EVENT_ERROR))
frame_done = true;
@@ -2597,6 +2594,8 @@
SDE_EVT32_VERBOSE(DRMID(crtc));
smmu_state = &sde_crtc->smmu_state;
+ sde_core_perf_crtc_update(crtc, 0, false);
+
/* complete secure transitions if any */
if (smmu_state->transition_type == POST_COMMIT)
sde_crtc_secure_ctrl(crtc, true);
@@ -3276,7 +3275,8 @@
* smmu state is attached,
*/
if ((smmu_state->state != DETACHED) &&
- (smmu_state->state != DETACH_ALL_REQ))
+ (smmu_state->state != DETACH_ALL_REQ) &&
+ sde_crtc->enabled)
sde_cp_crtc_apply_properties(crtc);
/*
@@ -4487,28 +4487,41 @@
}
static int _sde_crtc_excl_rect_overlap_check(struct plane_state pstates[],
- int cnt, int curr_cnt, struct sde_rect *excl_rect, int z_pos)
+ int cnt, int curr_cnt, struct sde_rect *excl_rect)
{
struct sde_rect dst_rect, intersect;
int i, rc = -EINVAL;
const struct drm_plane_state *pstate;
- /* start checking from next plane */
- for (i = curr_cnt; i < cnt; i++) {
+ for (i = 0; i < cnt; i++) {
+ if (i == curr_cnt)
+ continue;
+
pstate = pstates[i].drm_pstate;
POPULATE_RECT(&dst_rect, pstate->crtc_x, pstate->crtc_y,
pstate->crtc_w, pstate->crtc_h, false);
sde_kms_rect_intersect(&dst_rect, excl_rect, &intersect);
+ /* complete intersection of excl_rect is required */
if (intersect.w == excl_rect->w && intersect.h == excl_rect->h
- /* next plane may be on same z-order */
- && z_pos != pstates[i].stage) {
+ /* intersecting rect should be in another z_order */
+ && pstates[curr_cnt].stage != pstates[i].stage) {
rc = 0;
goto end;
}
}
- SDE_ERROR("excl rect does not find top overlapping rect\n");
+ SDE_ERROR(
+ "no overlapping rect for [%d] z_pos:%d, excl_rect:{%d,%d,%d,%d}\n",
+ i, pstates[curr_cnt].stage,
+ excl_rect->x, excl_rect->y, excl_rect->w, excl_rect->h);
+ for (i = 0; i < cnt; i++) {
+ pstate = pstates[i].drm_pstate;
+ SDE_ERROR("[%d] p:%d, z_pos:%d, src:{%d,%d,%d,%d}\n",
+ i, pstate->plane->base.id, pstates[i].stage,
+ pstate->crtc_x, pstate->crtc_y,
+ pstate->crtc_w, pstate->crtc_h);
+ }
end:
return rc;
}
@@ -4550,9 +4563,9 @@
pstate = pstates[i].drm_pstate;
sde_pstate = to_sde_plane_state(pstate);
if (sde_pstate->excl_rect.w && sde_pstate->excl_rect.h) {
- /* check overlap on all top z-order */
+ /* check overlap on any other z-order */
rc = _sde_crtc_excl_rect_overlap_check(pstates, cnt,
- i + 1, &sde_pstate->excl_rect, pstates[i].stage);
+ i, &sde_pstate->excl_rect);
if (rc)
goto end;
}
@@ -6120,6 +6133,7 @@
INIT_LIST_HEAD(&node->list);
node->func = custom_events[i].func;
node->event = event;
+ spin_lock_init(&node->state_lock);
break;
}
}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 33d2b8fa..78f15ec 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -445,7 +445,8 @@
* @event: event type of the interrupt
* @func: function pointer to enable/disable the interrupt
* @list: list of user customized event in crtc
- * @ref_count: reference count for the interrupt
+ * @state: state of the interrupt
+ * @state_lock: spin lock for interrupt state
*/
struct sde_crtc_irq_info {
struct sde_irq_callback irq;
@@ -454,6 +455,7 @@
struct sde_irq_callback *irq);
struct list_head list;
enum sde_crtc_irq_state state;
+ spinlock_t state_lock;
};
#define to_sde_crtc_state(x) \
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 3b5e3f5..92ab669 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -3739,6 +3739,7 @@
SDE_ATRACE_BEGIN("enc_prepare_for_kickoff");
for (i = 0; i < sde_enc->num_phys_encs; i++) {
phys = sde_enc->phys_encs[i];
+ params->is_primary = sde_enc->disp_info.is_primary;
if (phys) {
if (phys->ops.prepare_for_kickoff) {
rc = phys->ops.prepare_for_kickoff(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 8038eb6..2c84e20 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -54,11 +54,13 @@
/**
* sde_encoder_kickoff_params - info encoder requires at kickoff
* @inline_rotate_prefill: number of lines to prefill for inline rotation
+ * @is_primary: set to true if the display is primary display
* @affected_displays: bitmask, bit set means the ROI of the commit lies within
* the bounds of the physical display at the bit index
*/
struct sde_encoder_kickoff_params {
u32 inline_rotate_prefill;
+ u32 is_primary;
unsigned long affected_displays;
};
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 4a15e6f..a3f09b6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -245,9 +245,10 @@
* HW layer requires VSYNC counter of first pixel of tgt VFP line.
* @phys_enc: Pointer to physical encoder
* @rot_fetch_lines: number of line to prefill, or 0 to disable
+ * @is_primary: set true if the display is primary display
*/
static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
- u32 rot_fetch_lines)
+ u32 rot_fetch_lines, u32 is_primary)
{
struct sde_encoder_phys_vid *vid_enc =
to_sde_encoder_phys_vid(phys_enc);
@@ -264,7 +265,8 @@
!phys_enc->hw_ctl->ops.get_bitmask_intf ||
!phys_enc->hw_ctl->ops.update_pending_flush ||
!vid_enc->hw_intf->ops.setup_rot_start ||
- !phys_enc->sde_kms)
+ !phys_enc->sde_kms ||
+ !is_primary)
return;
timing = &vid_enc->timing_params;
@@ -403,7 +405,7 @@
to_sde_encoder_phys_vid(phys_enc);
struct sde_hw_ctl *hw_ctl;
unsigned long lock_flags;
- u32 flush_register = 0;
+ u32 flush_register = ~0;
u32 reset_status = 0;
int new_cnt = -1, old_cnt = -1;
u32 event = 0;
@@ -417,42 +419,45 @@
SDE_ATRACE_BEGIN("vblank_irq");
- /* signal only for master, where there is a pending kickoff */
- if (sde_encoder_phys_vid_is_master(phys_enc)
- && atomic_add_unless(
- &phys_enc->pending_retire_fence_cnt, -1, 0)) {
- event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
- | SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
-
- if (phys_enc->parent_ops.handle_frame_done)
- phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
- phys_enc, event);
- }
-
- if (phys_enc->parent_ops.handle_vblank_virt)
- phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
- phys_enc);
-
- old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
-
/*
* only decrement the pending flush count if we've actually flushed
* hardware. due to sw irq latency, vblank may have already happened
* so we need to double-check with hw that it accepted the flush bits
*/
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+
+ old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
if (hw_ctl && hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
- if (flush_register == 0)
- new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
- -1, 0);
+ if (flush_register)
+ goto not_flushed;
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* signal only for master, where there is a pending kickoff */
+ if (sde_encoder_phys_vid_is_master(phys_enc)) {
+ if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt,
+ -1, 0))
+ event |= SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE |
+ SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
+ }
+
+not_flushed:
if (hw_ctl && hw_ctl->ops.get_reset)
reset_status = hw_ctl->ops.get_reset(hw_ctl);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ if (event && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+
+ if (phys_enc->parent_ops.handle_vblank_virt)
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
old_cnt, new_cnt, reset_status ? SDE_EVTLOG_ERROR : 0,
flush_register, event);
@@ -873,7 +878,8 @@
vid_enc->error_count = 0;
}
- programmable_rot_fetch_config(phys_enc, params->inline_rotate_prefill);
+ programmable_rot_fetch_config(phys_enc,
+ params->inline_rotate_prefill, params->is_primary);
return rc;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index 994bf3d..593e972 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -786,6 +786,7 @@
blk_offset += 4;
val = (ad_cfg->cfg_param_027 & (BIT(16) - 1));
val |= ((ad_cfg->cfg_param_028 & (BIT(16) - 1)) << 16);
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
blk_offset += 4;
val = (ad_cfg->cfg_param_029 & (BIT(16) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 545ed65..cdc6a9c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1681,6 +1681,8 @@
if (sde_cfg->has_wb_ubwc)
set_bit(SDE_WB_UBWC, &wb->features);
+ set_bit(SDE_WB_XY_ROI_OFFSET, &wb->features);
+
for (j = 0; j < sde_cfg->mdp_count; j++) {
sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
PROP_BITVALUE_ACCESS(prop_value,
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 2b0aa37..5d3835c 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -949,7 +949,7 @@
.get_mode_info = dp_connector_get_mode_info,
.post_open = dp_connector_post_open,
.check_status = NULL,
- .pre_kickoff = dp_connector_pre_kickoff,
+ .config_hdr = dp_connector_config_hdr,
.cmd_transfer = NULL,
};
struct msm_display_info info;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index ed1079d..4c281666 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -2379,6 +2379,29 @@
(u64) &rstate->rot_hw->base);
rstate->out_fbo = NULL;
}
+
+ /*
+ * For video mode, reject any downscale factor greater than or
+ * equal to 1.1x
+ *
+ * Check the downscale factor first to avoid querying the
+ * interface mode unnecessarily.
+ */
+ if ((rstate->out_src_h >> 16) * 10 >= state->crtc_h * 11 &&
+ sde_crtc_get_intf_mode(state->crtc) ==
+ INTF_MODE_VIDEO) {
+ SDE_DEBUG_PLANE(psde,
+ "inline %d with invalid scale, %dx%d, %dx%d\n",
+ rstate->sequence_id,
+ rstate->out_src_w, rstate->out_src_h,
+ state->crtc_w, state->crtc_h);
+ SDE_EVT32(DRMID(plane), rstate->sequence_id,
+ rstate->out_src_w >> 16,
+ rstate->out_src_h >> 16,
+ state->crtc_w, state->crtc_h,
+ SDE_EVTLOG_ERROR);
+ return -EINVAL;
+ }
} else {
SDE_DEBUG("plane%d.%d bypass rotator\n", plane->base.id,
@@ -3257,7 +3280,8 @@
if (!fb || !old_fb) {
SDE_DEBUG_PLANE(psde, "can't compare fb handles\n");
- } else if (fb->pixel_format != old_fb->pixel_format) {
+ } else if ((fb->pixel_format != old_fb->pixel_format) ||
+ pstate->const_alpha_en != old_pstate->const_alpha_en) {
SDE_DEBUG_PLANE(psde, "format change\n");
pstate->dirty |= SDE_PLANE_DIRTY_FORMAT | SDE_PLANE_DIRTY_RECTS;
} else {
@@ -3569,7 +3593,8 @@
pstate->const_alpha_en = fmt->alpha_enable &&
(SDE_DRM_BLEND_OP_OPAQUE !=
- sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP));
+ sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP)) &&
+ (pstate->stage != SDE_STAGE_0);
modeset_update:
if (!ret)
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index f9092e2..93304e16 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1129,17 +1129,29 @@
return ret;
}
-/**
- * poll_intr_status - Gets HW interrupt status based on
- * given lookup IRQ index.
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- * @msec: Maximum delay allowed to check intr status
- * return: return zero on success.
- */
-static u32 _sde_rm_poll_intr_status_for_cont_splash
- (struct sde_hw_intr *intr,
- int irq_idx, u32 const msec)
+static void _sde_rm_clear_irq_status(struct sde_hw_intr *hw_intr,
+ int irq_idx_pp_done, int irq_idx_autorefresh)
+{
+ u32 intr_value = 0;
+
+ if ((irq_idx_pp_done >= 0) && (hw_intr->ops.get_intr_status_nomask)) {
+ intr_value = hw_intr->ops.get_intr_status_nomask(hw_intr,
+ irq_idx_pp_done, false);
+ hw_intr->ops.clear_intr_status_force_mask(hw_intr,
+ irq_idx_pp_done, intr_value);
+ }
+
+ if ((irq_idx_autorefresh >= 0) &&
+ (hw_intr->ops.get_intr_status_nomask)) {
+ intr_value = hw_intr->ops.get_intr_status_nomask(hw_intr,
+ irq_idx_autorefresh, false);
+ hw_intr->ops.clear_intr_status_force_mask(hw_intr,
+ irq_idx_autorefresh, intr_value);
+ }
+}
+
+static u32 _sde_rm_poll_intr_status_for_cont_splash(struct sde_hw_intr *intr,
+ int irq_idx_pp_done, int irq_idx_autorefresh, u32 const msec)
{
int i;
u32 status = 0;
@@ -1153,19 +1165,112 @@
for (i = 0; i < loop; i++) {
status = intr->ops.get_intr_status_nomask
- (intr, irq_idx, false);
+ (intr, irq_idx_pp_done, false);
- if (status & BIT(irq_idx)) {
- SDE_DEBUG(" Poll success. i=%d, status=0x%x\n",
+ if (status & BIT(irq_idx_pp_done)) {
+ SDE_DEBUG("pp_done received i=%d, status=0x%x\n",
i, status);
- return 0;
+ SDE_EVT32(status, i, irq_idx_pp_done);
+
+ if (status & BIT(irq_idx_autorefresh))
+ _sde_rm_clear_irq_status(intr,
+ irq_idx_pp_done, irq_idx_autorefresh);
+ else
+ return 0;
}
usleep_range(delay_us, delay_us + 10);
}
+
+ SDE_EVT32(status, irq_idx_pp_done, SDE_EVTLOG_ERROR);
SDE_ERROR("polling timed out. status = 0x%x\n", status);
return -ETIMEDOUT;
}
+static int _sde_rm_autorefresh_disable(struct sde_hw_pingpong *pp,
+ struct sde_hw_intr *hw_intr)
+{
+ u32 const timeout_ms = 35; /* Max two vsyncs delay */
+ int rc = 0, i, loop = 3;
+ struct sde_hw_pp_vsync_info info;
+ int irq_idx_pp_done = -1, irq_idx_autorefresh = -1;
+ struct sde_hw_autorefresh cfg = {0};
+
+ if (!pp->ops.get_autorefresh || !pp->ops.setup_autorefresh ||
+ !pp->ops.connect_external_te || !pp->ops.get_vsync_info) {
+ SDE_ERROR("autorefresh update api not supported\n");
+ return 0;
+ }
+
+ /* read default autorefresh configuration */
+ pp->ops.get_autorefresh(pp, &cfg);
+ if (!cfg.enable) {
+ SDE_DEBUG("autorefresh already disabled\n");
+ SDE_EVT32(pp->idx - PINGPONG_0, SDE_EVTLOG_FUNC_CASE1);
+ return 0;
+ }
+
+ /* disable external TE first */
+ pp->ops.connect_external_te(pp, false);
+
+ /* get all IRQ indexes */
+ if (hw_intr->ops.irq_idx_lookup) {
+ irq_idx_pp_done = hw_intr->ops.irq_idx_lookup(
+ SDE_IRQ_TYPE_PING_PONG_COMP, pp->idx);
+ irq_idx_autorefresh = hw_intr->ops.irq_idx_lookup(
+ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, pp->idx);
+ SDE_DEBUG("pp_done itr_idx = %d autorefresh irq_idx:%d\n",
+ irq_idx_pp_done, irq_idx_autorefresh);
+ }
+
+ /* disable autorefresh */
+ cfg.enable = false;
+ pp->ops.setup_autorefresh(pp, &cfg);
+
+ SDE_EVT32(pp->idx - PINGPONG_0, irq_idx_pp_done, irq_idx_autorefresh);
+ _sde_rm_clear_irq_status(hw_intr, irq_idx_pp_done, irq_idx_autorefresh);
+
+ /*
+ * Check the line count again if
+ * the line count is equal to the active
+ * height to make sure their is no
+ * additional frame updates
+ */
+ for (i = 0; i < loop; i++) {
+ info.wr_ptr_line_count = 0;
+ info.rd_ptr_init_val = 0;
+ pp->ops.get_vsync_info(pp, &info);
+
+ SDE_EVT32(pp->idx - PINGPONG_0, info.wr_ptr_line_count,
+ info.rd_ptr_init_val, SDE_EVTLOG_FUNC_CASE1);
+
+ /* wait for read ptr intr */
+ rc = _sde_rm_poll_intr_status_for_cont_splash(hw_intr,
+ irq_idx_pp_done, irq_idx_autorefresh, timeout_ms);
+
+ info.wr_ptr_line_count = 0;
+ info.rd_ptr_init_val = 0;
+ pp->ops.get_vsync_info(pp, &info);
+ SDE_DEBUG("i=%d, line count=%d\n", i, info.wr_ptr_line_count);
+
+ SDE_EVT32(pp->idx - PINGPONG_0, info.wr_ptr_line_count,
+ info.rd_ptr_init_val, SDE_EVTLOG_FUNC_CASE2);
+
+ /* log line count and return */
+ if (!rc)
+ break;
+ /*
+ * Wait for few milli seconds for line count
+ * to increase if any frame transfer is
+ * pending.
+ */
+ usleep_range(3000, 4000);
+ }
+
+ pp->ops.connect_external_te(pp, true);
+
+ return rc;
+}
+
/**
* sde_rm_get_pp_dsc_for_cont_splash - retrieve the current dsc enabled blocks
* and disable autorefresh if enabled.
@@ -1181,9 +1286,7 @@
{
int index = 0;
int value, dsc_cnt = 0;
- struct sde_hw_autorefresh cfg;
struct sde_rm_hw_iter iter_pp;
- int irq_idx_pp_done = -1;
if (!rm || !sde_kms || !dsc_ids) {
SDE_ERROR("invalid input parameters\n");
@@ -1195,11 +1298,7 @@
while (_sde_rm_get_hw_locked(rm, &iter_pp)) {
struct sde_hw_pingpong *pp =
to_sde_hw_pingpong(iter_pp.blk->hw);
- u32 intr_value = 0;
- u32 const timeout_ms = 35; /* Max two vsyncs delay */
- int rc = 0, i, loop = 2;
struct sde_hw_intr *hw_intr = NULL;
- struct sde_hw_pp_vsync_info info;
if (!pp->ops.get_dsc_status) {
SDE_ERROR("get_dsc_status ops not initialized\n");
@@ -1219,70 +1318,7 @@
}
index++;
- if (!pp->ops.get_autorefresh) {
- SDE_ERROR("get_autorefresh api not supported\n");
- return 0;
- }
- memset(&cfg, 0, sizeof(cfg));
- if (!pp->ops.get_autorefresh(pp, &cfg)
- && (cfg.enable)
- && (pp->ops.setup_autorefresh)) {
- if (hw_intr->ops.irq_idx_lookup) {
- irq_idx_pp_done = hw_intr->ops.irq_idx_lookup
- (SDE_IRQ_TYPE_PING_PONG_COMP,
- pp->idx);
- SDE_DEBUG(" itr_idx = %d\n", irq_idx_pp_done);
- }
-
- if ((irq_idx_pp_done >= 0) &&
- (hw_intr->ops.get_intr_status_nomask)) {
- intr_value = hw_intr->ops.get_intr_status_nomask
- (hw_intr, irq_idx_pp_done, false);
- hw_intr->ops.clear_intr_status_force_mask
- (hw_intr, irq_idx_pp_done, intr_value);
- }
- cfg.enable = false;
- SDE_DEBUG("Disabling autorefresh\n");
- pp->ops.setup_autorefresh(pp, &cfg);
-
- /*
- * Check the line count again if
- * the line count is equal to the active
- * height to make sure their is no
- * additional frame updates
- */
- for (i = 0; i < loop; i++) {
- info.wr_ptr_line_count = 0;
- info.rd_ptr_init_val = 0;
- if (pp->ops.get_vsync_info)
- pp->ops.get_vsync_info(pp, &info);
- /*
- * For cmd-mode using external-TE logic,
- * the rd_ptr_init_val is equal to
- * active-height. Use this init_val to
- * compare that with lane count. Need
- * to implement a different check
- * if external-TE is not used.
- */
- if (info.wr_ptr_line_count
- < info.rd_ptr_init_val) {
- /* wait for read ptr intr */
- rc =
- _sde_rm_poll_intr_status_for_cont_splash
- (hw_intr, irq_idx_pp_done, timeout_ms);
- if (!rc)
- break;
- }
- SDE_DEBUG("i=%d, line count=%d\n",
- i, info.wr_ptr_line_count);
- /*
- * Wait for few milli seconds for line count
- * to increase if any frame transfer is
- * pending.
- */
- usleep_range(3000, 4000);
- }
- }
+ _sde_rm_autorefresh_disable(pp, hw_intr);
}
return dsc_cnt;
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 26c1c39..da37baf 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -335,8 +335,8 @@
.num_protected_regs = 0x20,
.busy_mask = 0xFFFFFFFE,
.gpmufw_name = "a630_gmu.bin",
- .gpmu_major = 0x0,
- .gpmu_minor = 0x005,
+ .gpmu_major = 0x1,
+ .gpmu_minor = 0x003,
.gpmu_tsens = 0x000C000D,
.max_power = 5448,
},
@@ -357,7 +357,7 @@
.busy_mask = 0xFFFFFFFE,
.gpmufw_name = "a630_gmu.bin",
.gpmu_major = 0x1,
- .gpmu_minor = 0x001,
+ .gpmu_minor = 0x003,
.gpmu_tsens = 0x000C000D,
.max_power = 5448,
},
@@ -368,7 +368,7 @@
.minor = 5,
.patchid = ANY_ID,
.features = ADRENO_64BIT | ADRENO_RPMH |
- ADRENO_GPMU | ADRENO_CONTENT_PROTECTION,
+ ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC,
.sqefw_name = "a630_sqe.fw",
.zap_name = "a615_zap",
.gpudev = &adreno_a6xx_gpudev,
@@ -377,6 +377,6 @@
.busy_mask = 0xFFFFFFFE,
.gpmufw_name = "a630_gmu.bin",
.gpmu_major = 0x1,
- .gpmu_minor = 0x001,
+ .gpmu_minor = 0x003,
},
};
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 7d11007..942621e 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -19,6 +19,7 @@
#include <linux/input.h>
#include <linux/io.h>
#include <soc/qcom/scm.h>
+#include <linux/nvmem-consumer.h>
#include <linux/msm-bus-board.h>
#include <linux/msm-bus.h>
@@ -755,6 +756,107 @@
return NULL;
}
+static struct {
+ unsigned int quirk;
+ const char *prop;
+} adreno_quirks[] = {
+ { ADRENO_QUIRK_TWO_PASS_USE_WFI, "qcom,gpu-quirk-two-pass-use-wfi" },
+ { ADRENO_QUIRK_IOMMU_SYNC, "qcom,gpu-quirk-iommu-sync" },
+ { ADRENO_QUIRK_CRITICAL_PACKETS, "qcom,gpu-quirk-critical-packets" },
+ { ADRENO_QUIRK_FAULT_DETECT_MASK, "qcom,gpu-quirk-fault-detect-mask" },
+ { ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING,
+ "qcom,gpu-quirk-dp2clockgating-disable" },
+ { ADRENO_QUIRK_DISABLE_LMLOADKILL,
+ "qcom,gpu-quirk-lmloadkill-disable" },
+ { ADRENO_QUIRK_HFI_USE_REG, "qcom,gpu-quirk-hfi-use-reg" },
+ { ADRENO_QUIRK_SECVID_SET_ONCE, "qcom,gpu-quirk-secvid-set-once" },
+ { ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW,
+ "qcom,gpu-quirk-limit-uche-gbif-rw" },
+};
+
+#if defined(CONFIG_NVMEM) && defined(CONFIG_QCOM_QFPROM)
+static struct device_node *
+adreno_get_soc_hw_revision_node(struct platform_device *pdev)
+{
+ struct device_node *node, *child;
+ struct nvmem_cell *cell;
+ ssize_t len;
+ u32 *buf, hw_rev, rev;
+
+ node = of_find_node_by_name(pdev->dev.of_node, "qcom,soc-hw-revisions");
+ if (node == NULL)
+ goto err;
+
+ /* read the soc hw revision and select revision node */
+ cell = nvmem_cell_get(&pdev->dev, "minor_rev");
+ if (IS_ERR_OR_NULL(cell)) {
+ if (PTR_ERR(cell) == -EPROBE_DEFER)
+ return (void *)cell;
+
+ KGSL_CORE_ERR("Unable to get nvmem cell: ret=%ld\n",
+ PTR_ERR(cell));
+ goto err;
+ }
+
+ buf = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR_OR_NULL(buf)) {
+ KGSL_CORE_ERR("Unable to read nvmem cell: ret=%ld\n",
+ PTR_ERR(buf));
+ goto err;
+ }
+
+ hw_rev = *buf;
+ kfree(buf);
+
+ for_each_child_of_node(node, child) {
+ if (of_property_read_u32(child, "reg", &rev))
+ continue;
+
+ if (rev == hw_rev)
+ return child;
+ }
+
+err:
+ /* fall back to parent node */
+ return pdev->dev.of_node;
+}
+#else
+static struct device_node *
+adreno_get_soc_hw_revision_node(struct platform_device *pdev)
+{
+ return pdev->dev.of_node;
+}
+#endif
+
+
+static int adreno_update_soc_hw_revision_quirks(
+ struct adreno_device *adreno_dev, struct platform_device *pdev)
+{
+ struct device_node *node;
+ int i;
+
+ node = adreno_get_soc_hw_revision_node(pdev);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ /* get chip id, fall back to parent if revision node does not have it */
+ if (of_property_read_u32(node, "qcom,chipid", &adreno_dev->chipid))
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,chipid", &adreno_dev->chipid))
+ KGSL_DRV_FATAL(KGSL_DEVICE(adreno_dev),
+ "No GPU chip ID was specified\n");
+
+ /* update quirk */
+ for (i = 0; i < ARRAY_SIZE(adreno_quirks); i++) {
+ if (of_property_read_bool(node, adreno_quirks[i].prop))
+ adreno_dev->quirks |= adreno_quirks[i].quirk;
+ }
+
+ return 0;
+}
+
static void
adreno_identify_gpu(struct adreno_device *adreno_dev)
{
@@ -762,11 +864,6 @@
struct adreno_gpudev *gpudev;
int i;
- if (kgsl_property_read_u32(KGSL_DEVICE(adreno_dev), "qcom,chipid",
- &adreno_dev->chipid))
- KGSL_DRV_FATAL(KGSL_DEVICE(adreno_dev),
- "No GPU chip ID was specified\n");
-
adreno_dev->gpucore = _get_gpu_core(adreno_dev->chipid);
if (adreno_dev->gpucore == NULL)
@@ -932,31 +1029,12 @@
return of_id ? (struct adreno_device *) of_id->data : NULL;
}
-static struct {
- unsigned int quirk;
- const char *prop;
-} adreno_quirks[] = {
- { ADRENO_QUIRK_TWO_PASS_USE_WFI, "qcom,gpu-quirk-two-pass-use-wfi" },
- { ADRENO_QUIRK_IOMMU_SYNC, "qcom,gpu-quirk-iommu-sync" },
- { ADRENO_QUIRK_CRITICAL_PACKETS, "qcom,gpu-quirk-critical-packets" },
- { ADRENO_QUIRK_FAULT_DETECT_MASK, "qcom,gpu-quirk-fault-detect-mask" },
- { ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING,
- "qcom,gpu-quirk-dp2clockgating-disable" },
- { ADRENO_QUIRK_DISABLE_LMLOADKILL,
- "qcom,gpu-quirk-lmloadkill-disable" },
- { ADRENO_QUIRK_HFI_USE_REG, "qcom,gpu-quirk-hfi-use-reg" },
- { ADRENO_QUIRK_SECVID_SET_ONCE, "qcom,gpu-quirk-secvid-set-once" },
- { ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW,
- "qcom,gpu-quirk-limit-uche-gbif-rw" },
-};
-
static int adreno_of_get_power(struct adreno_device *adreno_dev,
struct platform_device *pdev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct device_node *node = pdev->dev.of_node;
struct resource *res;
- int i;
unsigned int timeout;
if (of_property_read_string(node, "label", &pdev->name)) {
@@ -967,12 +1045,6 @@
if (adreno_of_read_property(node, "qcom,id", &pdev->id))
return -EINVAL;
- /* Set up quirks and other boolean options */
- for (i = 0; i < ARRAY_SIZE(adreno_quirks); i++) {
- if (of_property_read_bool(node, adreno_quirks[i].prop))
- adreno_dev->quirks |= adreno_quirks[i].quirk;
- }
-
/* Get starting physical address of device registers */
res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
device->iomemname);
@@ -1134,6 +1206,12 @@
return -ENODEV;
}
+ status = adreno_update_soc_hw_revision_quirks(adreno_dev, pdev);
+ if (status) {
+ device->pdev = NULL;
+ return status;
+ }
+
/* Get the chip ID from the DT and set up target specific parameters */
adreno_identify_gpu(adreno_dev);
@@ -1517,7 +1595,7 @@
adreno_writereg64(adreno_dev,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
- KGSL_IOMMU_SECURE_BASE);
+ KGSL_IOMMU_SECURE_BASE(&device->mmu));
adreno_writereg(adreno_dev,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
KGSL_IOMMU_SECURE_SIZE);
@@ -1848,11 +1926,6 @@
error_mmu_off:
kgsl_mmu_stop(&device->mmu);
- if (gpudev->oob_clear &&
- ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
- gpudev->oob_clear(adreno_dev,
- OOB_BOOT_SLUMBER_CLEAR_MASK);
- }
error_pwr_off:
/* set the state back to original state */
@@ -2106,7 +2179,7 @@
* anything to mmap().
*/
shadowprop.gpuaddr =
- (unsigned int) device->memstore.gpuaddr;
+ (unsigned long)device->memstore.gpuaddr;
shadowprop.size = device->memstore.size;
/* GSL needs this to be set, even if it
* appears to be meaningless
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index baf366e..a615dca 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -2424,8 +2424,8 @@
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
A5XX_CP_RB_CNTL_DEFAULT);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
- rb->buffer_desc.gpuaddr);
+ adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
+ ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
ret = a5xx_microcode_load(adreno_dev);
if (ret)
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 6275c19..c734123 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -63,6 +63,9 @@
{ adreno_is_a615, a615_gbif },
};
+
+static unsigned long a6xx_oob_state_bitmask;
+
struct kgsl_hwcg_reg {
unsigned int off;
unsigned int val;
@@ -324,7 +327,6 @@
static struct reg_list_pair a6xx_ifpc_pwrup_reglist[] = {
{ A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x0 },
{ A6XX_CP_CHICKEN_DBG, 0x0 },
- { A6XX_CP_ADDR_MODE_CNTL, 0x0 },
{ A6XX_CP_DBG_ECO_CNTL, 0x0 },
{ A6XX_CP_PROTECT_CNTL, 0x0 },
{ A6XX_CP_PROTECT_REG, 0x0 },
@@ -362,7 +364,7 @@
{ A6XX_CP_AHB_CNTL, 0x0 },
};
-static struct reg_list_pair a615_ifpc_pwrup_reglist[] = {
+static struct reg_list_pair a615_pwrup_reglist[] = {
{ A6XX_UCHE_GBIF_GX_CONFIG, 0x0 },
};
@@ -602,7 +604,6 @@
uint32_t i;
struct cpu_gpu_lock *lock;
struct reg_list_pair *r;
- uint16_t a615_list_size = 0;
/* Set up the register values */
for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_pwrup_reglist); i++) {
@@ -615,19 +616,6 @@
kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
}
- if (adreno_is_a615(adreno_dev)) {
- for (i = 0; i < ARRAY_SIZE(a615_ifpc_pwrup_reglist); i++) {
- r = &a615_ifpc_pwrup_reglist[i];
- kgsl_regread(KGSL_DEVICE(adreno_dev),
- r->offset, &r->val);
- }
-
- a615_list_size = sizeof(a615_ifpc_pwrup_reglist);
-
- memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
- a615_ifpc_pwrup_reglist, a615_list_size);
- }
-
lock = (struct cpu_gpu_lock *) adreno_dev->pwrup_reglist.hostptr;
lock->flag_ucode = 0;
lock->flag_kmd = 0;
@@ -646,16 +634,29 @@
* of the static IFPC-only register list.
*/
lock->list_length = (sizeof(a6xx_ifpc_pwrup_reglist) +
- sizeof(a6xx_pwrup_reglist) + a615_list_size) >> 2;
- lock->list_offset = (sizeof(a6xx_ifpc_pwrup_reglist) +
- a615_list_size) >> 2;
+ sizeof(a6xx_pwrup_reglist)) >> 2;
+ lock->list_offset = sizeof(a6xx_ifpc_pwrup_reglist) >> 2;
- memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
- + a615_list_size,
+ memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
a6xx_ifpc_pwrup_reglist, sizeof(a6xx_ifpc_pwrup_reglist));
memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
- + sizeof(a6xx_ifpc_pwrup_reglist) + a615_list_size,
- a6xx_pwrup_reglist, sizeof(a6xx_pwrup_reglist));
+ + sizeof(a6xx_ifpc_pwrup_reglist), a6xx_pwrup_reglist,
+ sizeof(a6xx_pwrup_reglist));
+
+ if (adreno_is_a615(adreno_dev)) {
+ for (i = 0; i < ARRAY_SIZE(a615_pwrup_reglist); i++) {
+ r = &a615_pwrup_reglist[i];
+ kgsl_regread(KGSL_DEVICE(adreno_dev),
+ r->offset, &r->val);
+ }
+
+ memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
+ + sizeof(a6xx_ifpc_pwrup_reglist)
+ + sizeof(a6xx_pwrup_reglist), a615_pwrup_reglist,
+ sizeof(a615_pwrup_reglist));
+
+ lock->list_length += sizeof(a615_pwrup_reglist);
+ }
}
/*
@@ -1083,8 +1084,8 @@
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
A6XX_CP_RB_CNTL_DEFAULT);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
- rb->buffer_desc.gpuaddr);
+ adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
+ ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
ret = a6xx_microcode_load(adreno_dev);
if (ret)
@@ -1451,7 +1452,7 @@
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
int ret = 0;
- if (!kgsl_gmu_isenabled(device))
+ if (!kgsl_gmu_isenabled(device) || !clear_mask)
return 0;
kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, set_mask);
@@ -1467,6 +1468,8 @@
kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
+ set_bit((fls(clear_mask) - 1), &a6xx_oob_state_bitmask);
+
trace_kgsl_gmu_oob_set(set_mask);
return ret;
}
@@ -1481,10 +1484,15 @@
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (!kgsl_gmu_isenabled(device))
+ if (!kgsl_gmu_isenabled(device) || !clear_mask)
return;
- kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, clear_mask);
+ if (test_and_clear_bit(fls(clear_mask) - 1,
+ &a6xx_oob_state_bitmask))
+ kgsl_gmu_regwrite(device,
+ A6XX_GMU_HOST2GMU_INTR_SET,
+ clear_mask);
+
trace_kgsl_gmu_oob_clear(clear_mask);
}
@@ -2023,17 +2031,17 @@
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct gmu_device *gmu = &device->gmu;
- unsigned int status, status2;
+ unsigned int status2;
+ uint64_t ts1;
+ ts1 = read_AO_counter(device);
if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
kgsl_gmu_regread(device,
- A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &status);
- kgsl_gmu_regread(device,
A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2, &status2);
dev_err(&gmu->pdev->dev,
- "GMU not idling: status=0x%x, status2=0x%x\n",
- status, status2);
+ "GMU not idling: status2=0x%x %llx %llx\n",
+ status2, ts1, read_AO_counter(device));
return -ETIMEDOUT;
}
@@ -2308,6 +2316,9 @@
ret = a6xx_gmu_suspend(device);
break;
case GMU_FW_STOP:
+ if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+ a6xx_oob_clear(adreno_dev,
+ OOB_BOOT_SLUMBER_CLEAR_MASK);
ret = a6xx_rpmh_power_off_gpu(device);
break;
case GMU_DCVS_NOHFI:
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 5572cd7..afd1be5 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -376,6 +376,7 @@
A6XX_DBGBUS_CX = 0x17,
A6XX_DBGBUS_GMU_GX = 0x18,
A6XX_DBGBUS_TPFCHE = 0x19,
+ A6XX_DBGBUS_GBIF_GX = 0x1a,
A6XX_DBGBUS_GPC = 0x1d,
A6XX_DBGBUS_LARC = 0x1e,
A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
@@ -1161,11 +1162,14 @@
}
header->id = block->block_id;
+ if ((block->block_id == A6XX_DBGBUS_VBIF) &&
+ adreno_has_gbif(adreno_dev))
+ header->id = A6XX_DBGBUS_GBIF_GX;
header->count = dwords * 2;
block_id = block->block_id;
/* GMU_GX data is read using the GMU_CX block id on A630 */
- if (adreno_is_a630(adreno_dev) &&
+ if ((adreno_is_a630(adreno_dev) || adreno_is_a615(adreno_dev)) &&
(block_id == A6XX_DBGBUS_GMU_GX))
block_id = A6XX_DBGBUS_GMU_CX;
@@ -1428,18 +1432,18 @@
KGSL_SNAPSHOT_SECTION_DEBUGBUS,
snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
(void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
- /*
- * Get debugbus for GBIF CX part if GPU has GBIF block
- * GBIF uses exactly same ID as of VBIF so use
- * it as it is.
- */
- if (adreno_has_gbif(adreno_dev))
- kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUGBUS,
- snapshot,
- a6xx_snapshot_cx_dbgc_debugbus_block,
- (void *) &a6xx_vbif_debugbus_blocks);
}
+ /*
+ * Get debugbus for GBIF CX part if GPU has GBIF block
+ * GBIF uses exactly same ID as of VBIF so use
+ * it as it is.
+ */
+ if (adreno_has_gbif(adreno_dev))
+ kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+ snapshot,
+ a6xx_snapshot_cx_dbgc_debugbus_block,
+ (void *) &a6xx_vbif_debugbus_blocks);
}
}
@@ -1474,6 +1478,8 @@
a6xx_gmu_gx_registers,
ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
}
+
+ a6xx_snapshot_debugbus(device, snapshot);
}
/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
@@ -1631,8 +1637,6 @@
a6xx_snapshot_dbgahb_regs(device, snapshot);
}
- a6xx_snapshot_debugbus(device, snapshot);
-
}
static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index f57fbb6..5039a06 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2998,7 +2998,7 @@
long ret = 0;
bool full_flush = false;
uint64_t size = 0;
- int i, count = 0;
+ int i;
void __user *ptr;
if (param->count == 0 || param->count > 128)
@@ -3010,8 +3010,8 @@
entries = kcalloc(param->count, sizeof(*entries), GFP_KERNEL);
if (entries == NULL) {
- ret = -ENOMEM;
- goto out;
+ kfree(objs);
+ return -ENOMEM;
}
ptr = to_user_ptr(param->objs);
@@ -3028,8 +3028,6 @@
if (entries[i] == NULL)
continue;
- count++;
-
if (!(objs[i].op & KGSL_GPUMEM_CACHE_RANGE))
size += entries[i]->memdesc.size;
else if (objs[i].offset < entries[i]->memdesc.size)
@@ -3038,25 +3036,23 @@
full_flush = check_full_flush(size, objs[i].op);
if (full_flush) {
trace_kgsl_mem_sync_full_cache(i, size);
- break;
+ goto out;
}
ptr += sizeof(*objs);
}
- if (!full_flush) {
- for (i = 0; !ret && i < param->count; i++)
- if (entries[i])
- ret = _kgsl_gpumem_sync_cache(entries[i],
- objs[i].offset, objs[i].length,
- objs[i].op);
- }
+ for (i = 0; !ret && i < param->count; i++)
+ if (entries[i])
+ ret = _kgsl_gpumem_sync_cache(entries[i],
+ objs[i].offset, objs[i].length,
+ objs[i].op);
+out:
for (i = 0; i < param->count; i++)
if (entries[i])
kgsl_mem_entry_put(entries[i]);
-out:
kfree(entries);
kfree(objs);
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 66fc011..10446f7 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1307,10 +1307,13 @@
do {
if (!regulator_is_enabled(gmu->cx_gdsc))
return 0;
- cond_resched();
+ usleep_range(10, 100);
} while (!(time_after(jiffies, t)));
+ if (!regulator_is_enabled(gmu->cx_gdsc))
+ return 0;
+
dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout");
return -ETIMEDOUT;
}
@@ -1635,6 +1638,8 @@
unsigned int fence_mask)
{
unsigned int status, i;
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+ unsigned int reg_offset = gpudev->reg_offsets->offsets[offset];
adreno_writereg(adreno_dev, offset, val);
@@ -1659,6 +1664,6 @@
}
dev_err(adreno_dev->dev.dev,
- "GMU fenced register write timed out: reg %x\n", offset);
+ "GMU fenced register write timed out: reg 0x%x\n", reg_offset);
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 0338c5fd..60c56a06 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,12 +35,14 @@
#include "kgsl_pwrctrl.h"
#define CP_APERTURE_REG 0
+#define CP_SMMU_APERTURE_ID 0x1B
#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
-#define ADDR_IN_GLOBAL(_a) \
- (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \
- ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE)))
+#define ADDR_IN_GLOBAL(_mmu, _a) \
+ (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)) && \
+ ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) + \
+ KGSL_IOMMU_GLOBAL_MEM_SIZE)))
/*
* Flag to set SMMU memory attributes required to
@@ -163,14 +165,19 @@
}
void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device,
- struct kgsl_memdesc *entry)
+ struct kgsl_memdesc *memdesc)
{
- if (!kgsl_mmu_is_secured(&device->mmu))
+ if (!kgsl_mmu_is_secured(&device->mmu) || memdesc == NULL)
return;
- if (entry != NULL && entry->pagetable->name == KGSL_MMU_SECURE_PT)
- kgsl_mmu_unmap(entry->pagetable, entry);
+ /* Check if an empty memdesc got passed in */
+ if ((memdesc->gpuaddr == 0) || (memdesc->size == 0))
+ return;
+ if (memdesc->pagetable) {
+ if (memdesc->pagetable->name == KGSL_MMU_SECURE_PT)
+ kgsl_mmu_unmap(memdesc->pagetable, memdesc);
+ }
}
int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
@@ -184,7 +191,8 @@
if (entry != NULL) {
struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
entry->pagetable = pagetable;
- entry->gpuaddr = KGSL_IOMMU_SECURE_BASE + secure_global_size;
+ entry->gpuaddr = KGSL_IOMMU_SECURE_BASE(&device->mmu) +
+ secure_global_size;
ret = kgsl_mmu_map(pagetable, entry);
if (ret == 0)
@@ -223,7 +231,8 @@
KGSL_IOMMU_GLOBAL_MEM_SIZE))
return;
- memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE + global_pt_alloc;
+ memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc;
+
memdesc->priv |= KGSL_MEMDESC_GLOBAL;
global_pt_alloc += memdesc->size;
@@ -641,7 +650,7 @@
/* Set the maximum possible size as an initial value */
nextentry->gpuaddr = (uint64_t) -1;
- if (ADDR_IN_GLOBAL(faultaddr)) {
+ if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
_get_global_entries(faultaddr, preventry, nextentry);
} else if (context) {
private = context->proc_priv;
@@ -1030,13 +1039,13 @@
struct kgsl_iommu_pt *pt)
{
if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
- pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
- pt->compat_va_end = KGSL_IOMMU_SECURE_END;
- pt->va_start = KGSL_IOMMU_SECURE_BASE;
- pt->va_end = KGSL_IOMMU_SECURE_END;
+ pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
+ pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
+ pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
+ pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
} else {
pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
- pt->compat_va_end = KGSL_IOMMU_SVM_END32;
+ pt->compat_va_end = KGSL_IOMMU_SECURE_BASE(mmu);
pt->va_start = KGSL_IOMMU_VA_BASE64;
pt->va_end = KGSL_IOMMU_VA_END64;
}
@@ -1045,7 +1054,7 @@
pagetable->name != KGSL_MMU_SECURE_PT) {
if ((BITS_PER_LONG == 32) || is_compat_task()) {
pt->svm_start = KGSL_IOMMU_SVM_BASE32;
- pt->svm_end = KGSL_IOMMU_SVM_END32;
+ pt->svm_end = KGSL_IOMMU_SECURE_BASE(mmu);
} else {
pt->svm_start = KGSL_IOMMU_SVM_BASE64;
pt->svm_end = KGSL_IOMMU_SVM_END64;
@@ -1059,19 +1068,19 @@
{
if (mmu->secured) {
if (pagetable->name == KGSL_MMU_SECURE_PT) {
- pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
- pt->compat_va_end = KGSL_IOMMU_SECURE_END;
- pt->va_start = KGSL_IOMMU_SECURE_BASE;
- pt->va_end = KGSL_IOMMU_SECURE_END;
+ pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
+ pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
+ pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
+ pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
} else {
pt->va_start = KGSL_IOMMU_SVM_BASE32;
- pt->va_end = KGSL_IOMMU_SECURE_BASE;
+ pt->va_end = KGSL_IOMMU_SECURE_BASE(mmu);
pt->compat_va_start = pt->va_start;
pt->compat_va_end = pt->va_end;
}
} else {
pt->va_start = KGSL_IOMMU_SVM_BASE32;
- pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE;
+ pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
pt->compat_va_start = pt->va_start;
pt->compat_va_end = pt->va_end;
}
@@ -1166,7 +1175,7 @@
desc.args[3] = 0xFFFFFFFF;
desc.arginfo = SCM_ARGS(4);
- return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, 0x1B), &desc);
+ return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, CP_SMMU_APERTURE_ID), &desc);
}
static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
@@ -1209,7 +1218,8 @@
goto done;
}
- if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE)) {
+ if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) &&
+ scm_is_call_available(SCM_SVC_MP, CP_SMMU_APERTURE_ID)) {
ret = program_smmu_aperture(cb_num, CP_APERTURE_REG);
if (ret) {
pr_err("SMMU aperture programming call failed with error %d\n",
@@ -2381,7 +2391,8 @@
struct rb_node *node;
/* Make sure the requested address doesn't fall in the global range */
- if (ADDR_IN_GLOBAL(gpuaddr) || ADDR_IN_GLOBAL(gpuaddr + size))
+ if (ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr) ||
+ ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr + size))
return -ENOMEM;
spin_lock(&pagetable->lock);
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index acf8ae4..65460f7 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,12 +24,17 @@
* are mapped into all pagetables.
*/
#define KGSL_IOMMU_GLOBAL_MEM_SIZE (20 * SZ_1M)
-#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000
+#define KGSL_IOMMU_GLOBAL_MEM_BASE32 0xf8000000
+#define KGSL_IOMMU_GLOBAL_MEM_BASE64 0xfc000000
+
+#define KGSL_IOMMU_GLOBAL_MEM_BASE(__mmu) \
+ (MMU_FEATURE(__mmu, KGSL_MMU_64BIT) ? \
+ KGSL_IOMMU_GLOBAL_MEM_BASE64 : KGSL_IOMMU_GLOBAL_MEM_BASE32)
#define KGSL_IOMMU_SECURE_SIZE SZ_256M
-#define KGSL_IOMMU_SECURE_END KGSL_IOMMU_GLOBAL_MEM_BASE
-#define KGSL_IOMMU_SECURE_BASE \
- (KGSL_IOMMU_GLOBAL_MEM_BASE - KGSL_IOMMU_SECURE_SIZE)
+#define KGSL_IOMMU_SECURE_END(_mmu) KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)
+#define KGSL_IOMMU_SECURE_BASE(_mmu) \
+ (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) - KGSL_IOMMU_SECURE_SIZE)
#define KGSL_IOMMU_SVM_BASE32 0x300000
#define KGSL_IOMMU_SVM_END32 (0xC0000000 - SZ_16M)
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index c4ff22f..3f7ea18 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -910,12 +910,12 @@
min_level = pwr->thermal_pwrlevel_floor;
/* Thermal limit cannot be lower than lowest non-zero operating freq */
- for (level = 0; level < (pwr->num_pwrlevels - 1); level++)
+ for (level = 0; level < (pwr->num_pwrlevels - 1); level++) {
if (pwr->pwrlevels[level].gpu_freq == max_freq)
max_level = level;
if (pwr->pwrlevels[level].gpu_freq == min_freq)
min_level = level;
-
+ }
pwr->thermal_pwrlevel = max_level;
pwr->thermal_pwrlevel_floor = min_level;
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index d4165b3..a4de6a0 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -734,7 +734,8 @@
fput(sync_file->file);
else if (sfence)
fence_put(&sfence->fence);
- kgsl_syncsource_put(syncsource);
+ else
+ kgsl_syncsource_put(syncsource);
}
return ret;
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index 16a3e7d..f3e16b3 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,9 +39,22 @@
#define PMI_CHG_SCALE_2 391750000000
#define QPNP_VADC_HC_VREF_CODE 0x4000
#define QPNP_VADC_HC_VDD_REFERENCE_MV 1875
+#define CHRG_SCALE_1 -250
+#define CHRG_SCALE_2 377500000
+#define DIE_SCALE_1 500
+#define DIE_SCALE_2 -273150000
+
/* Clamp negative ADC code to 0 */
#define QPNP_VADC_HC_MAX_CODE 0x7FFF
+/*Invalid current reading*/
+#define QPNP_IADC_INV 0x8000
+
+#define IADC_SCALE_1 0xffff
+#define IADC_SCALE_2 152593
+
+#define USBIN_I_SCALE 25
+
/*
* Units for temperature below (on x axis) is in 0.1DegC as
* required by the battery driver. Note the resolution used
@@ -590,6 +603,80 @@
{30, 125}
};
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_batt_therm[] = {
+ {1770, -400},
+ {1757, -380},
+ {1743, -360},
+ {1727, -340},
+ {1710, -320},
+ {1691, -300},
+ {1671, -280},
+ {1650, -260},
+ {1627, -240},
+ {1602, -220},
+ {1576, -200},
+ {1548, -180},
+ {1519, -160},
+ {1488, -140},
+ {1456, -120},
+ {1423, -100},
+ {1388, -80},
+ {1353, -60},
+ {1316, -40},
+ {1278, -20},
+ {1240, 0},
+ {1201, 20},
+ {1162, 40},
+ {1122, 60},
+ {1082, 80},
+ {1042, 100},
+ {1003, 120},
+ {964, 140},
+ {925, 160},
+ {887, 180},
+ {849, 200},
+ {812, 220},
+ {777, 240},
+ {742, 260},
+ {708, 280},
+ {675, 300},
+ {643, 320},
+ {613, 340},
+ {583, 360},
+ {555, 380},
+ {528, 400},
+ {502, 420},
+ {477, 440},
+ {453, 460},
+ {430, 480},
+ {409, 500},
+ {388, 520},
+ {369, 540},
+ {350, 560},
+ {333, 580},
+ {316, 600},
+ {300, 620},
+ {285, 640},
+ {271, 660},
+ {257, 680},
+ {245, 700},
+ {233, 720},
+ {221, 740},
+ {210, 760},
+ {200, 780},
+ {190, 800},
+ {181, 820},
+ {173, 840},
+ {164, 860},
+ {157, 880},
+ {149, 900},
+ {142, 920},
+ {136, 940},
+ {129, 960},
+ {124, 980}
+};
+
/*
* Voltage to temperature table for 100k pull up for NTCG104EF104 with
* 1.875V reference.
@@ -899,6 +986,36 @@
}
EXPORT_SYMBOL(qpnp_adc_tdkntcg_therm);
+int32_t qpnp_adc_batt_therm(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t batt_thm_voltage = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties
+ || !adc_chan_result)
+ return -EINVAL;
+
+ if (adc_properties->adc_hc) {
+ /* (code * vref_vadc (1.875V) * 1000) / (scale_code * 1000) */
+ if (adc_code > QPNP_VADC_HC_MAX_CODE)
+ adc_code = 0;
+ batt_thm_voltage = (int64_t) adc_code;
+ batt_thm_voltage *= (adc_properties->adc_vdd_reference
+ * 1000);
+ batt_thm_voltage = div64_s64(batt_thm_voltage,
+ adc_properties->full_scale_code * 1000);
+ qpnp_adc_map_voltage_temp(adcmap_batt_therm,
+ ARRAY_SIZE(adcmap_batt_therm),
+ batt_thm_voltage, &adc_chan_result->physical);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_batt_therm);
+
int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *chip,
int32_t adc_code,
const struct qpnp_adc_properties *adc_properties,
@@ -920,6 +1037,70 @@
}
EXPORT_SYMBOL(qpnp_adc_scale_batt_therm);
+int32_t qpnp_adc_scale_chrg_temp(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int rc = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties
+ || !adc_chan_result)
+ return -EINVAL;
+
+ rc = qpnp_adc_scale_default(vadc, adc_code, adc_properties,
+ chan_properties, adc_chan_result);
+ if (rc < 0)
+ return rc;
+
+ pr_debug("raw_code:%x, v_adc:%lld\n", adc_code,
+ adc_chan_result->physical);
+ adc_chan_result->physical = (int64_t) ((CHRG_SCALE_1) *
+ (adc_chan_result->physical));
+ adc_chan_result->physical = (int64_t) (adc_chan_result->physical +
+ CHRG_SCALE_2);
+ adc_chan_result->physical = (int64_t) adc_chan_result->physical;
+ adc_chan_result->physical = div64_s64(adc_chan_result->physical,
+ 1000000);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_chrg_temp);
+
+int32_t qpnp_adc_scale_die_temp(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int rc = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties
+ || !adc_chan_result)
+ return -EINVAL;
+
+ rc = qpnp_adc_scale_default(vadc, adc_code, adc_properties,
+ chan_properties, adc_chan_result);
+ if (rc < 0)
+ return rc;
+
+ pr_debug("raw_code:%x, v_adc:%lld\n", adc_code,
+ adc_chan_result->physical);
+ adc_chan_result->physical = (int64_t) ((DIE_SCALE_1) *
+ (adc_chan_result->physical));
+ adc_chan_result->physical = (int64_t) (adc_chan_result->physical +
+ DIE_SCALE_2);
+ adc_chan_result->physical = (int64_t) adc_chan_result->physical;
+ adc_chan_result->physical = div64_s64(adc_chan_result->physical,
+ 1000000);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_die_temp);
+
int32_t qpnp_adc_scale_qrd_batt_therm(struct qpnp_vadc_chip *chip,
int32_t adc_code,
const struct qpnp_adc_properties *adc_properties,
@@ -1279,6 +1460,73 @@
}
EXPORT_SYMBOL(qpnp_adc_scale_default);
+int32_t qpnp_iadc_scale_default(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t scale_current = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties
+ || !adc_chan_result)
+ return -EINVAL;
+
+ if (adc_properties->adc_hc) {
+
+ if (adc_code == QPNP_IADC_INV)
+ return -EINVAL;
+
+ scale_current = (int64_t) adc_code;
+
+ if (adc_code > QPNP_IADC_INV) {
+ scale_current = ((~scale_current) & IADC_SCALE_1);
+ scale_current++;
+ scale_current = -scale_current;
+ }
+ }
+
+ scale_current *= IADC_SCALE_2;
+ scale_current = div64_s64(scale_current,
+ 1000);
+ scale_current *= chan_properties->offset_gain_denominator;
+ scale_current = div64_s64(scale_current,
+ chan_properties->offset_gain_numerator);
+ adc_chan_result->measurement = scale_current;
+ /*
+ * Note: adc_chan_result->measurement is in uA.
+ */
+ adc_chan_result->physical = adc_chan_result->measurement;
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_scale_default);
+
+int qpnp_adc_scale_usbin_curr(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int rc = 0;
+
+ rc = qpnp_adc_scale_default(vadc, adc_code, adc_properties,
+ chan_properties, adc_chan_result);
+ if (rc < 0)
+ return rc;
+
+ pr_debug("raw_code:%x, v_adc:%lld\n", adc_code,
+ adc_chan_result->physical);
+ adc_chan_result->physical = (int64_t) ((USBIN_I_SCALE) *
+ adc_chan_result->physical);
+ adc_chan_result->physical = div64_s64(adc_chan_result->physical,
+ 10);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_usbin_curr);
+
int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *chip,
struct qpnp_adc_tm_btm_param *param,
uint32_t *low_threshold, uint32_t *high_threshold)
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index 6fde46e..8b44c0f 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -219,6 +219,11 @@
[SCALE_NCP_03WF683_THERM] = {qpnp_adc_scale_therm_ncp03},
[SCALE_QRD_SKUT1_BATT_THERM] = {qpnp_adc_scale_qrd_skut1_batt_therm},
[SCALE_PMI_CHG_TEMP] = {qpnp_adc_scale_pmi_chg_temp},
+ [SCALE_BATT_THERM_TEMP] = {qpnp_adc_batt_therm},
+ [SCALE_CHRG_TEMP] = {qpnp_adc_scale_chrg_temp},
+ [SCALE_DIE_TEMP] = {qpnp_adc_scale_die_temp},
+ [SCALE_I_DEFAULT] = {qpnp_iadc_scale_default},
+ [SCALE_USBIN_I] = {qpnp_adc_scale_usbin_curr},
};
static struct qpnp_vadc_rscale_fn adc_vadc_rscale_fn[] = {
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
index 7ef2710..81889b6 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.c
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,17 +23,23 @@
static struct tmc_drvdata *tmcdrvdata;
static void tmc_etr_read_bytes(struct byte_cntr *byte_cntr_data, loff_t *ppos,
- size_t bytes, size_t *len)
+ size_t bytes, size_t *len, char **bufp)
{
- if (*len >= bytes) {
- atomic_dec(&byte_cntr_data->irq_cnt);
+
+ if (*bufp >= (char *)(tmcdrvdata->vaddr + tmcdrvdata->size))
+ *bufp = tmcdrvdata->vaddr;
+
+ if (*len >= bytes)
*len = bytes;
- } else {
- if (((uint32_t)*ppos % bytes) + *len > bytes)
- *len = bytes - ((uint32_t)*ppos % bytes);
- if ((*len + (uint32_t)*ppos) % bytes == 0)
- atomic_dec(&byte_cntr_data->irq_cnt);
- }
+ else if (((uint32_t)*ppos % bytes) + *len > bytes)
+ *len = bytes - ((uint32_t)*ppos % bytes);
+
+ if ((*bufp + *len) > (char *)(tmcdrvdata->vaddr +
+ tmcdrvdata->size))
+ *len = (char *)(tmcdrvdata->vaddr + tmcdrvdata->size) -
+ *bufp;
+ if (*len == bytes || (*len + (uint32_t)*ppos) % bytes == 0)
+ atomic_dec(&byte_cntr_data->irq_cnt);
}
static void tmc_etr_sg_read_pos(loff_t *ppos,
@@ -96,7 +102,7 @@
if (*len >= (bytes - ((uint32_t)*ppos % bytes)))
*len = bytes - ((uint32_t)*ppos % bytes);
- if ((*len + (uint32_t)*ppos) % bytes == 0)
+ if (*len == bytes || (*len + (uint32_t)*ppos) % bytes == 0)
atomic_dec(&tmcdrvdata->byte_cntr->irq_cnt);
}
@@ -153,11 +159,12 @@
if (!byte_cntr_data->read_active)
goto err0;
}
- bufp = (char *)(tmcdrvdata->vaddr + *ppos);
+ bufp = (char *)(tmcdrvdata->buf + *ppos);
if (tmcdrvdata->mem_type == TMC_ETR_MEM_TYPE_CONTIG)
tmc_etr_read_bytes(byte_cntr_data, ppos,
- byte_cntr_data->block_size, &len);
+ byte_cntr_data->block_size, &len,
+ &bufp);
else
tmc_etr_sg_read_pos(ppos, byte_cntr_data->block_size, 0,
&len, &bufp);
@@ -179,7 +186,7 @@
if (tmcdrvdata->mem_type == TMC_ETR_MEM_TYPE_CONTIG)
tmc_etr_read_bytes(byte_cntr_data, ppos,
byte_cntr_data->block_size,
- &len);
+ &len, &bufp);
else
tmc_etr_sg_read_pos(ppos,
byte_cntr_data->block_size,
@@ -229,7 +236,7 @@
mutex_lock(&byte_cntr_data->byte_cntr_lock);
byte_cntr_data->enable = false;
- coresight_csr_set_byte_cntr(0);
+ coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0);
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
}
@@ -243,7 +250,7 @@
mutex_lock(&byte_cntr_data->byte_cntr_lock);
byte_cntr_data->read_active = false;
- coresight_csr_set_byte_cntr(0);
+ coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0);
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
return 0;
@@ -261,7 +268,8 @@
return -EINVAL;
}
- coresight_csr_set_byte_cntr(byte_cntr_data->block_size);
+ coresight_csr_set_byte_cntr(byte_cntr_data->csr,
+ byte_cntr_data->block_size);
fp->private_data = byte_cntr_data;
nonseekable_open(in, fp);
byte_cntr_data->enable = true;
@@ -364,6 +372,7 @@
tmcdrvdata = drvdata;
byte_cntr_data->byte_cntr_irq = byte_cntr_irq;
+ byte_cntr_data->csr = drvdata->csr;
atomic_set(&byte_cntr_data->irq_cnt, 0);
init_waitqueue_head(&byte_cntr_data->wq);
mutex_init(&byte_cntr_data->byte_cntr_lock);
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.h b/drivers/hwtracing/coresight/coresight-byte-cntr.h
index 94e9089..b104d92 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.h
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.h
@@ -16,6 +16,7 @@
atomic_t irq_cnt;
wait_queue_head_t wq;
struct mutex byte_cntr_lock;
+ struct coresight_csr *csr;
};
extern void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data);
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
index 1ec73a5..9069530 100644
--- a/drivers/hwtracing/coresight/coresight-csr.c
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, 2015-2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, 2015-2016,2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/coresight.h>
+#include <linux/clk.h>
#include "coresight-priv.h"
@@ -77,15 +78,32 @@
struct device *dev;
struct coresight_device *csdev;
uint32_t blksize;
+ struct coresight_csr csr;
+ struct clk *clk;
+ spinlock_t spin_lock;
+ bool usb_bam_support;
+ bool hwctrl_set_support;
+ bool set_byte_cntr_support;
+ bool timestamp_support;
};
-static struct csr_drvdata *csrdrvdata;
+static LIST_HEAD(csr_list);
+#define to_csr_drvdata(c) container_of(c, struct csr_drvdata, csr)
-void msm_qdss_csr_enable_bam_to_usb(void)
+void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
{
- struct csr_drvdata *drvdata = csrdrvdata;
+ struct csr_drvdata *drvdata;
uint32_t usbbamctrl, usbflshctrl;
+ unsigned long flags;
+ if (csr == NULL)
+ return;
+
+ drvdata = to_csr_drvdata(csr);
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support)
+ return;
+
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
CSR_UNLOCK(drvdata);
usbbamctrl = csr_readl(drvdata, CSR_USBBAMCTRL);
@@ -102,14 +120,24 @@
csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
CSR_LOCK(drvdata);
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
}
EXPORT_SYMBOL(msm_qdss_csr_enable_bam_to_usb);
-void msm_qdss_csr_disable_bam_to_usb(void)
+void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr)
{
- struct csr_drvdata *drvdata = csrdrvdata;
+ struct csr_drvdata *drvdata;
uint32_t usbbamctrl;
+ unsigned long flags;
+ if (csr == NULL)
+ return;
+
+ drvdata = to_csr_drvdata(csr);
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support)
+ return;
+
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
CSR_UNLOCK(drvdata);
usbbamctrl = csr_readl(drvdata, CSR_USBBAMCTRL);
@@ -117,14 +145,24 @@
csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
CSR_LOCK(drvdata);
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
}
EXPORT_SYMBOL(msm_qdss_csr_disable_bam_to_usb);
-void msm_qdss_csr_disable_flush(void)
+void msm_qdss_csr_disable_flush(struct coresight_csr *csr)
{
- struct csr_drvdata *drvdata = csrdrvdata;
+ struct csr_drvdata *drvdata;
uint32_t usbflshctrl;
+ unsigned long flags;
+ if (csr == NULL)
+ return;
+
+ drvdata = to_csr_drvdata(csr);
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support)
+ return;
+
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
CSR_UNLOCK(drvdata);
usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
@@ -132,14 +170,25 @@
csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
CSR_LOCK(drvdata);
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
}
EXPORT_SYMBOL(msm_qdss_csr_disable_flush);
-int coresight_csr_hwctrl_set(uint64_t addr, uint32_t val)
+int coresight_csr_hwctrl_set(struct coresight_csr *csr, uint64_t addr,
+ uint32_t val)
{
- struct csr_drvdata *drvdata = csrdrvdata;
+ struct csr_drvdata *drvdata;
int ret = 0;
+ unsigned long flags;
+ if (csr == NULL)
+ return -EINVAL;
+
+ drvdata = to_csr_drvdata(csr);
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->hwctrl_set_support)
+ return -EINVAL;
+
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
CSR_UNLOCK(drvdata);
if (addr == (drvdata->pbase + CSR_STMEXTHWCTRL0))
@@ -154,15 +203,24 @@
ret = -EINVAL;
CSR_LOCK(drvdata);
-
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
return ret;
}
EXPORT_SYMBOL(coresight_csr_hwctrl_set);
-void coresight_csr_set_byte_cntr(uint32_t count)
+void coresight_csr_set_byte_cntr(struct coresight_csr *csr, uint32_t count)
{
- struct csr_drvdata *drvdata = csrdrvdata;
+ struct csr_drvdata *drvdata;
+ unsigned long flags;
+ if (csr == NULL)
+ return;
+
+ drvdata = to_csr_drvdata(csr);
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->set_byte_cntr_support)
+ return;
+
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
CSR_UNLOCK(drvdata);
csr_writel(drvdata, count, CSR_BYTECNTVAL);
@@ -171,9 +229,85 @@
mb();
CSR_LOCK(drvdata);
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
}
EXPORT_SYMBOL(coresight_csr_set_byte_cntr);
+struct coresight_csr *coresight_csr_get(const char *name)
+{
+ struct coresight_csr *csr;
+
+ list_for_each_entry(csr, &csr_list, link) {
+ if (!strcmp(csr->name, name))
+ return csr;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(coresight_csr_get);
+
+static ssize_t csr_show_timestamp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t size = 0;
+ uint64_t time_tick = 0;
+ uint32_t val, time_val0, time_val1;
+ int ret;
+ unsigned long flags;
+
+ struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->timestamp_support) {
+ dev_err(dev, "Invalid param\n");
+ return 0;
+ }
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
+ CSR_UNLOCK(drvdata);
+
+ val = csr_readl(drvdata, CSR_TIMESTAMPCTRL);
+
+ val = val & ~BIT(0);
+ csr_writel(drvdata, val, CSR_TIMESTAMPCTRL);
+
+ val = val | BIT(0);
+ csr_writel(drvdata, val, CSR_TIMESTAMPCTRL);
+
+ time_val0 = csr_readl(drvdata, CSR_QDSSTIMEVAL0);
+ time_val1 = csr_readl(drvdata, CSR_QDSSTIMEVAL1);
+
+ CSR_LOCK(drvdata);
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ time_tick |= (uint64_t)time_val1 << 32;
+ time_tick |= (uint64_t)time_val0;
+ size = scnprintf(buf, PAGE_SIZE, "%llu\n", time_tick);
+ dev_dbg(dev, "timestamp : %s\n", buf);
+ return size;
+}
+
+static DEVICE_ATTR(timestamp, 0444, csr_show_timestamp, NULL);
+
+static struct attribute *csr_attrs[] = {
+ &dev_attr_timestamp.attr,
+ NULL,
+};
+
+static struct attribute_group csr_attr_grp = {
+ .attrs = csr_attrs,
+};
+static const struct attribute_group *csr_attr_grps[] = {
+ &csr_attr_grp,
+ NULL,
+};
+
static int csr_probe(struct platform_device *pdev)
{
int ret;
@@ -194,6 +328,10 @@
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
+ drvdata->clk = devm_clk_get(dev, "apb_pclk");
+ if (IS_ERR(drvdata->clk))
+ dev_dbg(dev, "csr not config clk\n");
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr-base");
if (!res)
return -ENODEV;
@@ -208,27 +346,65 @@
if (ret)
drvdata->blksize = BLKSIZE_256;
+ drvdata->usb_bam_support = of_property_read_bool(pdev->dev.of_node,
+ "qcom,usb-bam-support");
+ if (!drvdata->usb_bam_support)
+ dev_dbg(dev, "usb_bam support handled by other subsystem\n");
+ else
+ dev_dbg(dev, "usb_bam operation supported\n");
+
+ drvdata->hwctrl_set_support = of_property_read_bool(pdev->dev.of_node,
+ "qcom,hwctrl-set-support");
+ if (!drvdata->hwctrl_set_support)
+ dev_dbg(dev, "hwctrl_set_support handled by other subsystem\n");
+ else
+ dev_dbg(dev, "hwctrl_set_support operation supported\n");
+
+ drvdata->set_byte_cntr_support = of_property_read_bool(
+ pdev->dev.of_node, "qcom,set-byte-cntr-support");
+ if (!drvdata->set_byte_cntr_support)
+ dev_dbg(dev, "set byte_cntr_support handled by other subsystem\n");
+ else
+ dev_dbg(dev, "set_byte_cntr_support operation supported\n");
+
+ drvdata->timestamp_support = of_property_read_bool(pdev->dev.of_node,
+ "qcom,timestamp-support");
+ if (!drvdata->timestamp_support)
+ dev_dbg(dev, "timestamp_support handled by other subsystem\n");
+ else
+ dev_dbg(dev, "timestamp_support operation supported\n");
+
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
desc->type = CORESIGHT_DEV_TYPE_NONE;
desc->pdata = pdev->dev.platform_data;
desc->dev = &pdev->dev;
+ if (drvdata->timestamp_support)
+ desc->groups = csr_attr_grps;
+
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
/* Store the driver data pointer for use in exported functions */
- csrdrvdata = drvdata;
- dev_info(dev, "CSR initialized\n");
+ spin_lock_init(&drvdata->spin_lock);
+ drvdata->csr.name = ((struct coresight_platform_data *)
+ (pdev->dev.platform_data))->name;
+ list_add_tail(&drvdata->csr.link, &csr_list);
+
+ dev_info(dev, "CSR initialized: %s\n", drvdata->csr.name);
return 0;
}
static int csr_remove(struct platform_device *pdev)
{
+ unsigned long flags;
struct csr_drvdata *drvdata = platform_get_drvdata(pdev);
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
coresight_unregister(drvdata->csdev);
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-hwevent.c b/drivers/hwtracing/coresight/coresight-hwevent.c
index 22e9d6f..1e8872b 100644
--- a/drivers/hwtracing/coresight/coresight-hwevent.c
+++ b/drivers/hwtracing/coresight/coresight-hwevent.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,6 +42,8 @@
struct regulator **hreg;
int nr_hmux;
struct hwevent_mux *hmux;
+ struct coresight_csr *csr;
+ const char *csr_name;
};
static int hwevent_enable(struct hwevent_drvdata *drvdata)
@@ -132,7 +134,7 @@
}
if (i == drvdata->nr_hmux) {
- ret = coresight_csr_hwctrl_set(addr, val);
+ ret = coresight_csr_hwctrl_set(drvdata->csr, addr, val);
if (ret) {
dev_err(dev, "invalid mux control register address\n");
ret = -EINVAL;
@@ -185,6 +187,17 @@
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
+ ret = of_get_coresight_csr_name(dev->of_node, &drvdata->csr_name);
+ if (ret) {
+ dev_err(dev, "No csr data\n");
+ } else{
+ drvdata->csr = coresight_csr_get(drvdata->csr_name);
+ if (IS_ERR(drvdata->csr)) {
+ dev_err(dev, "failed to get csr, defer probe\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
drvdata->nr_hmux = of_property_count_strings(pdev->dev.of_node,
"reg-names");
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index afe9f3d..ba721fd 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -1,5 +1,4 @@
-/* Copyright (c) 2011-2012, 2016-2017, The Linux Foundation.
- * All rights reserved.
+/* Copyright (c) 2011-2012, 2016-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -73,6 +72,11 @@
CS_MODE_PERF,
};
+struct coresight_csr {
+ const char *name;
+ struct list_head link;
+};
+
/**
* struct cs_buffer - keep track of a recording session' specifics
* @cur: index of the current buffer
@@ -149,18 +153,24 @@
#endif
#ifdef CONFIG_CORESIGHT_CSR
-extern void msm_qdss_csr_enable_bam_to_usb(void);
-extern void msm_qdss_csr_disable_bam_to_usb(void);
-extern void msm_qdss_csr_disable_flush(void);
-extern int coresight_csr_hwctrl_set(uint64_t addr, uint32_t val);
-extern void coresight_csr_set_byte_cntr(uint32_t count);
+extern void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr);
+extern void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr);
+extern void msm_qdss_csr_disable_flush(struct coresight_csr *csr);
+extern int coresight_csr_hwctrl_set(struct coresight_csr *csr, uint64_t addr,
+ uint32_t val);
+extern void coresight_csr_set_byte_cntr(struct coresight_csr *csr,
+ uint32_t count);
+extern struct coresight_csr *coresight_csr_get(const char *name);
#else
-static inline void msm_qdss_csr_enable_bam_to_usb(void) {}
-static inline void msm_qdss_csr_disable_bam_to_usb(void) {}
-static inline void msm_qdss_csr_disable_flush(void) {}
-static inline int coresight_csr_hwctrl_set(uint64_t addr,
- uint32_t val) { return -EINVAL; }
-static inline void coresight_csr_set_byte_cntr(uint32_t count) {}
+static inline void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr) {}
+static inline void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr) {}
+static inline void msm_qdss_csr_disable_flush(struct coresight_csr *csr) {}
+static inline int coresight_csr_hwctrl_set(struct coresight_csr *csr,
+ uint64_t addr, uint32_t val) { return -EINVAL; }
+static inline void coresight_csr_set_byte_cntr(struct coresight_csr *csr,
+ uint32_t count) {}
+static inline struct coresight_csr *coresight_csr_get(const char *name)
+ { return NULL; }
#endif
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index eb70e7a..dcdc3f2 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
* Copyright(C) 2016 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
@@ -579,7 +579,7 @@
return;
/* Configure and enable required CSR registers */
- msm_qdss_csr_enable_bam_to_usb();
+ msm_qdss_csr_enable_bam_to_usb(drvdata->csr);
/* Configure and enable ETR for usb bam output */
@@ -675,7 +675,7 @@
return;
/* Ensure periodic flush is disabled in CSR block */
- msm_qdss_csr_disable_flush();
+ msm_qdss_csr_disable_flush(drvdata->csr);
CS_UNLOCK(drvdata->base);
@@ -685,7 +685,7 @@
CS_LOCK(drvdata);
/* Disable CSR configuration */
- msm_qdss_csr_disable_bam_to_usb();
+ msm_qdss_csr_disable_bam_to_usb(drvdata->csr);
drvdata->enable_to_bam = false;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 6f13eb3..802d4f1 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Trace Memory Controller driver
*
@@ -611,6 +611,17 @@
dev_err(dev, "failed to get reset cti\n");
}
+ ret = of_get_coresight_csr_name(adev->dev.of_node, &drvdata->csr_name);
+ if (ret) {
+ dev_err(dev, "No csr data\n");
+ } else{
+ drvdata->csr = coresight_csr_get(drvdata->csr_name);
+ if (IS_ERR(drvdata->csr)) {
+ dev_err(dev, "failed to get csr, defer probe\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
desc.pdata = pdata;
desc.dev = dev;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index fe6bc76..36117ec 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -189,6 +189,8 @@
bool sticky_enable;
struct coresight_cti *cti_flush;
struct coresight_cti *cti_reset;
+ struct coresight_csr *csr;
+ const char *csr_name;
struct byte_cntr *byte_cntr;
};
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 5473fcf..be810fe 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -238,3 +238,21 @@
return ctidata;
}
EXPORT_SYMBOL(of_get_coresight_cti_data);
+
+int of_get_coresight_csr_name(struct device_node *node, const char **csr_name)
+{
+ int ret;
+ struct device_node *csr_node;
+
+ csr_node = of_parse_phandle(node, "coresight-csr", 0);
+ if (!csr_node)
+ return -EINVAL;
+
+ ret = of_property_read_string(csr_node, "coresight-name", csr_name);
+ of_node_put(csr_node);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(of_get_coresight_csr_name);
diff --git a/drivers/input/misc/qpnp-power-on.c b/drivers/input/misc/qpnp-power-on.c
index febcd9c..5fa0d4b 100644
--- a/drivers/input/misc/qpnp-power-on.c
+++ b/drivers/input/misc/qpnp-power-on.c
@@ -212,6 +212,9 @@
int warm_reset_poff_type;
int hard_reset_poff_type;
int shutdown_poff_type;
+ int resin_warm_reset_type;
+ int resin_hard_reset_type;
+ int resin_shutdown_type;
u16 base;
u8 subtype;
u8 pon_ver;
@@ -219,7 +222,12 @@
u8 warm_reset_reason2;
bool is_spon;
bool store_hard_reset_reason;
+ bool resin_hard_reset_disable;
+ bool resin_shutdown_disable;
+ bool ps_hold_hard_reset_disable;
+ bool ps_hold_shutdown_disable;
bool kpdpwr_dbc_enable;
+ bool resin_pon_reset;
ktime_t kpdpwr_last_release_time;
};
@@ -478,6 +486,7 @@
enum pon_power_off_type type)
{
int rc;
+ bool disable = false;
u16 rst_en_reg;
if (pon->pon_ver == QPNP_PON_GEN1_V1)
@@ -497,10 +506,12 @@
case PON_POWER_OFF_HARD_RESET:
if (pon->hard_reset_poff_type != -EINVAL)
type = pon->hard_reset_poff_type;
+ disable = pon->ps_hold_hard_reset_disable;
break;
case PON_POWER_OFF_SHUTDOWN:
if (pon->shutdown_poff_type != -EINVAL)
type = pon->shutdown_poff_type;
+ disable = pon->ps_hold_shutdown_disable;
break;
default:
break;
@@ -513,6 +524,13 @@
rst_en_reg, rc);
/*
+ * Check if ps-hold power off configuration needs to be disabled.
+ * If yes, then return without configuring.
+ */
+ if (disable)
+ return rc;
+
+ /*
* We need 10 sleep clock cycles here. But since the clock is
* internally generated, we need to add 50% tolerance to be
* conservative.
@@ -533,7 +551,80 @@
"Unable to write to addr=%hx, rc(%d)\n",
rst_en_reg, rc);
- dev_dbg(&pon->pdev->dev, "power off type = 0x%02X\n", type);
+ dev_dbg(&pon->pdev->dev, "ps_hold power off type = 0x%02X\n", type);
+ return rc;
+}
+
+static int qpnp_resin_pon_reset_config(struct qpnp_pon *pon,
+ enum pon_power_off_type type)
+{
+ int rc;
+ bool disable = false;
+ u16 rst_en_reg;
+
+ if (pon->pon_ver == QPNP_PON_GEN1_V1)
+ rst_en_reg = QPNP_PON_RESIN_S2_CNTL(pon);
+ else
+ rst_en_reg = QPNP_PON_RESIN_S2_CNTL2(pon);
+
+ /*
+ * Based on the poweroff type set for a PON device through device tree
+ * change the type being configured into PON_RESIN_S2_CTL.
+ */
+ switch (type) {
+ case PON_POWER_OFF_WARM_RESET:
+ if (pon->resin_warm_reset_type != -EINVAL)
+ type = pon->resin_warm_reset_type;
+ break;
+ case PON_POWER_OFF_HARD_RESET:
+ if (pon->resin_hard_reset_type != -EINVAL)
+ type = pon->resin_hard_reset_type;
+ disable = pon->resin_hard_reset_disable;
+ break;
+ case PON_POWER_OFF_SHUTDOWN:
+ if (pon->resin_shutdown_type != -EINVAL)
+ type = pon->resin_shutdown_type;
+ disable = pon->resin_shutdown_disable;
+ break;
+ default:
+ break;
+ }
+
+ rc = qpnp_pon_masked_write(pon, rst_en_reg, QPNP_PON_S2_CNTL_EN, 0);
+ if (rc)
+ dev_err(&pon->pdev->dev,
+ "Unable to write to addr=%hx, rc(%d)\n",
+ rst_en_reg, rc);
+
+ /*
+ * Check if resin power off configuration needs to be disabled.
+ * If yes, then return without configuring.
+ */
+ if (disable)
+ return rc;
+
+ /*
+ * We need 10 sleep clock cycles here. But since the clock is
+ * internally generated, we need to add 50% tolerance to be
+ * conservative.
+ */
+ udelay(500);
+
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_RESIN_S2_CNTL(pon),
+ QPNP_PON_S2_CNTL_TYPE_MASK, type);
+ if (rc)
+ dev_err(&pon->pdev->dev,
+ "Unable to write to addr=%x, rc(%d)\n",
+ QPNP_PON_RESIN_S2_CNTL(pon), rc);
+
+ rc = qpnp_pon_masked_write(pon, rst_en_reg, QPNP_PON_S2_CNTL_EN,
+ QPNP_PON_S2_CNTL_EN);
+ if (rc)
+ dev_err(&pon->pdev->dev,
+ "Unable to write to addr=%hx, rc(%d)\n",
+ rst_en_reg, rc);
+
+ dev_dbg(&pon->pdev->dev, "resin power off type = 0x%02X\n", type);
return rc;
}
@@ -588,6 +679,15 @@
rc);
goto out;
}
+ if (pon->resin_pon_reset) {
+ rc = qpnp_resin_pon_reset_config(pon, type);
+ if (rc) {
+ dev_err(&pon->pdev->dev,
+ "Error configuring secondary PON resin rc: %d\n",
+ rc);
+ goto out;
+ }
+ }
}
/* Set ship mode here if it has been requested */
if (!!pon_ship_mode_en) {
@@ -2337,6 +2437,69 @@
pon->shutdown_poff_type = -EINVAL;
}
+ pon->ps_hold_hard_reset_disable =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ps-hold-hard-reset-disable");
+ pon->ps_hold_shutdown_disable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,ps-hold-shutdown-disable");
+
+
+ pon->resin_pon_reset = of_property_read_bool(pdev->dev.of_node,
+ "qcom,resin-pon-reset");
+
+ rc = of_property_read_u32(pon->pdev->dev.of_node,
+ "qcom,resin-warm-reset-type",
+ &pon->resin_warm_reset_type);
+ if (rc) {
+ if (rc != -EINVAL) {
+ dev_err(&pdev->dev, "Unable to read resin warm reset poweroff type rc: %d\n",
+ rc);
+ goto err_out;
+ }
+ pon->resin_warm_reset_type = -EINVAL;
+ } else if (pon->resin_warm_reset_type <= PON_POWER_OFF_RESERVED ||
+ pon->resin_warm_reset_type >= PON_POWER_OFF_MAX_TYPE) {
+ dev_err(&pdev->dev, "Invalid resin-warm-reset-type\n");
+ pon->resin_warm_reset_type = -EINVAL;
+ }
+
+ rc = of_property_read_u32(pon->pdev->dev.of_node,
+ "qcom,resin-hard-reset-type",
+ &pon->resin_hard_reset_type);
+ if (rc) {
+ if (rc != -EINVAL) {
+ dev_err(&pdev->dev, "Unable to read resin hard reset poweroff type rc: %d\n",
+ rc);
+ goto err_out;
+ }
+ pon->resin_hard_reset_type = -EINVAL;
+ } else if (pon->resin_hard_reset_type <= PON_POWER_OFF_RESERVED ||
+ pon->resin_hard_reset_type >= PON_POWER_OFF_MAX_TYPE) {
+ dev_err(&pdev->dev, "Invalid resin-hard-reset-type\n");
+ pon->resin_hard_reset_type = -EINVAL;
+ }
+
+ rc = of_property_read_u32(pon->pdev->dev.of_node,
+ "qcom,resin-shutdown-type",
+ &pon->resin_shutdown_type);
+ if (rc) {
+ if (rc != -EINVAL) {
+ dev_err(&pdev->dev, "Unable to read resin shutdown poweroff type rc: %d\n",
+ rc);
+ goto err_out;
+ }
+ pon->resin_shutdown_type = -EINVAL;
+ } else if (pon->resin_shutdown_type <= PON_POWER_OFF_RESERVED ||
+ pon->resin_shutdown_type >= PON_POWER_OFF_MAX_TYPE) {
+ dev_err(&pdev->dev, "Invalid resin-shutdown-type\n");
+ pon->resin_shutdown_type = -EINVAL;
+ }
+
+ pon->resin_hard_reset_disable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,resin-hard-reset-disable");
+ pon->resin_shutdown_disable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,resin-shutdown-disable");
+
rc = device_create_file(&pdev->dev, &dev_attr_debounce_us);
if (rc) {
dev_err(&pdev->dev, "sys file creation failed rc: %d\n", rc);
@@ -2365,7 +2528,8 @@
return 0;
err_out:
- sys_reset_dev = NULL;
+ if (sys_reset)
+ sys_reset_dev = NULL;
return rc;
}
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 36777b3..86168b9 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -11,6 +11,8 @@
if INPUT_TOUCHSCREEN
+source "drivers/input/touchscreen/synaptics_dsx_2.6/Kconfig"
+
config TOUCHSCREEN_PROPERTIES
def_tristate INPUT
depends on INPUT
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 0caab59..7ac5a98 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -72,6 +72,7 @@
obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o
obj-$(CONFIG_TOUCHSCREEN_SURFACE3_SPI) += surface3_spi.o
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX) += synaptics_dsx/
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_v26) += synaptics_dsx_2.6/
obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
diff --git a/drivers/input/touchscreen/synaptics_dsx/Kconfig b/drivers/input/touchscreen/synaptics_dsx/Kconfig
index b2fa115..b54e792 100644
--- a/drivers/input/touchscreen/synaptics_dsx/Kconfig
+++ b/drivers/input/touchscreen/synaptics_dsx/Kconfig
@@ -59,6 +59,16 @@
To compile this driver as a module, choose M here: the
module will be called synaptics_dsx_fw_update.
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+ bool "Synaptics DSX firmware update sysfs attributes"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE
+ help
+ Say Y here to enable support for sysfs attributes for
+ performing firmware update in a development environment.
+ This does not affect the core or other subsystem attributes.
+
+ If unsure, say N.
+
config TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING
tristate "Synaptics DSX test reporting module"
depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
index 7f62e01..395def9 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
@@ -137,6 +137,7 @@
static int fwu_recovery_check_status(void);
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static ssize_t fwu_sysfs_show_image(struct file *data_file,
struct kobject *kobj, struct bin_attribute *attributes,
char *buf, loff_t pos, size_t count);
@@ -201,6 +202,8 @@
struct device_attribute *attr, char *buf);
#endif
+#endif
+
enum f34_version {
F34_V0 = 0,
F34_V1,
@@ -757,6 +760,7 @@
struct work_struct fwu_work;
};
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static struct bin_attribute dev_attr_data = {
.attr = {
.name = "data",
@@ -766,8 +770,10 @@
.read = fwu_sysfs_show_image,
.write = fwu_sysfs_store_image,
};
+#endif
static struct device_attribute attrs[] = {
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
__ATTR(dorecovery, 0220,
synaptics_rmi4_show_error,
fwu_sysfs_do_recovery_store),
@@ -821,13 +827,16 @@
fwu_sysfs_read_lockdown_code_show,
fwu_sysfs_write_lockdown_code_store),
#endif
+#endif
};
static struct synaptics_rmi4_fwu_handle *fwu;
DECLARE_COMPLETION(fwu_remove_complete);
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
DEFINE_MUTEX(fwu_sysfs_mutex);
+#endif
static void calculate_checksum(unsigned short *data, unsigned long len,
unsigned long *result)
@@ -3061,6 +3070,7 @@
return 0;
}
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static int fwu_check_pm_configuration_size(void)
{
unsigned short block_count;
@@ -3077,6 +3087,7 @@
return 0;
}
+#endif
static int fwu_check_bl_configuration_size(void)
{
@@ -3444,6 +3455,7 @@
return fwu_write_configuration();
}
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static int fwu_write_pm_configuration(void)
{
fwu->config_area = PM_CONFIG_AREA;
@@ -3469,6 +3481,7 @@
return 0;
}
#endif
+#endif
static int fwu_write_flash_configuration(void)
{
@@ -3757,6 +3770,7 @@
return retval;
}
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static int fwu_do_read_config(void)
{
int retval;
@@ -3984,6 +3998,7 @@
return retval;
}
#endif
+#endif
static int fwu_do_lockdown_v7(void)
{
@@ -4134,6 +4149,7 @@
}
#endif
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static int fwu_start_write_guest_code(void)
{
int retval;
@@ -4339,6 +4355,7 @@
return retval;
}
+#endif
static int fwu_start_reflash(void)
{
@@ -4970,6 +4987,7 @@
}
#endif
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static ssize_t fwu_sysfs_show_image(struct file *data_file,
struct kobject *kobj, struct bin_attribute *attributes,
char *buf, loff_t pos, size_t count)
@@ -5566,6 +5584,7 @@
return count;
}
#endif
+#endif
static void synaptics_rmi4_fwu_attn(struct synaptics_rmi4_data *rmi4_data,
unsigned char intr_mask)
{
@@ -5668,6 +5687,7 @@
if (ENABLE_SYS_REFLASH == false)
return 0;
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
retval = sysfs_create_bin_file(&rmi4_data->input_dev->dev.kobj,
&dev_attr_data);
if (retval < 0) {
@@ -5676,6 +5696,7 @@
__func__);
goto exit_free_mem;
}
+#endif
for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
@@ -5697,7 +5718,9 @@
&attrs[attr_count].attr);
}
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+#endif
exit_free_mem:
kfree(fwu->image_name);
@@ -5739,7 +5762,9 @@
&attrs[attr_count].attr);
}
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+#endif
exit:
complete(&fwu_remove_complete);
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
index 8776d4a..7725cd3 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
@@ -402,11 +402,11 @@
struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
struct i2c_msg msg[2];
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
retval = synaptics_rmi4_i2c_alloc_buf(rmi4_data, length + 1);
if (retval < 0)
- return retval;
-
- mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+ goto exit;
retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
if (retval != PAGE_SELECT_LEN) {
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
index 61cf979..331274e 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
@@ -567,16 +567,22 @@
return -EBADF;
}
- if (count == 0)
- return 0;
+ mutex_lock(&(dev_data->file_mutex));
+
+ if (*f_pos > REG_ADDR_LIMIT) {
+ retval = -EFAULT;
+ goto clean_up;
+ }
if (count > (REG_ADDR_LIMIT - *f_pos))
count = REG_ADDR_LIMIT - *f_pos;
+ if (count == 0) {
+ retval = 0;
+ goto clean_up;
+ }
address = (unsigned short)(*f_pos);
- mutex_lock(&(dev_data->file_mutex));
-
rmidev_allocate_buffer(count);
retval = synaptics_rmi4_reg_read(rmidev->rmi4_data,
@@ -638,18 +644,26 @@
return -EBADF;
}
- if (count == 0)
- return 0;
+ mutex_lock(&(dev_data->file_mutex));
+
+ if (*f_pos > REG_ADDR_LIMIT) {
+ retval = -EFAULT;
+ goto unlock;
+ }
if (count > (REG_ADDR_LIMIT - *f_pos))
count = REG_ADDR_LIMIT - *f_pos;
- mutex_lock(&(dev_data->file_mutex));
-
+ if (count == 0) {
+ retval = 0;
+ goto unlock;
+ }
rmidev_allocate_buffer(count);
- if (copy_from_user(rmidev->tmpbuf, buf, count))
+ if (copy_from_user(rmidev->tmpbuf, buf, count)) {
return -EFAULT;
+ goto unlock;
+ }
retval = synaptics_rmi4_reg_write(rmidev->rmi4_data,
*f_pos,
@@ -658,6 +672,7 @@
if (retval >= 0)
*f_pos += retval;
+unlock:
mutex_unlock(&(dev_data->file_mutex));
return retval;
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/Kconfig b/drivers/input/touchscreen/synaptics_dsx_2.6/Kconfig
new file mode 100644
index 0000000..5389628
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/Kconfig
@@ -0,0 +1,127 @@
+#
+# Synaptics DSX v2.6 touchscreen driver configuration
+#
+menuconfig TOUCHSCREEN_SYNAPTICS_DSX_v26
+ bool "Synaptics DSX v2.6 touchscreen"
+ default y
+ help
+ Say Y here if you have a Synaptics DSX touchscreen connected
+ to your system.
+
+ If unsure, say N.
+
+if TOUCHSCREEN_SYNAPTICS_DSX_v26
+
+choice
+ default TOUCHSCREEN_SYNAPTICS_DSX_I2C_v26
+ prompt "Synaptics DSX v2.6 bus interface"
+config TOUCHSCREEN_SYNAPTICS_DSX_I2C_v26
+ bool "RMI over I2C"
+ depends on I2C
+config TOUCHSCREEN_SYNAPTICS_DSX_SPI_v26
+ bool "RMI over SPI"
+ depends on SPI_MASTER
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C_v26
+ bool "HID over I2C"
+ depends on I2C
+endchoice
+
+config TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+ tristate "Synaptics DSX v2.6 core driver module"
+ depends on I2C || SPI_MASTER
+ help
+ Say Y here to enable basic touch reporting functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_core.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26
+ tristate "Synaptics DSX v2.6 RMI device module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+ help
+ Say Y here to enable support for direct RMI register access.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_rmi_dev.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26
+ tristate "Synaptics DSX v2.6 firmware update module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+ help
+ Say Y here to enable support for doing firmware update.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_fw_update.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING_v26
+ tristate "Synaptics DSX v2.6 test reporting module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+ help
+ Say Y here to enable support for retrieving production test reports.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_test_reporting.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY_v26
+ tristate "Synaptics DSX v2.6 proximity module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+ help
+ Say Y here to enable support for proximity functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_proximity.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN_v26
+ tristate "Synaptics DSX v2.6 active pen module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+ help
+ Say Y here to enable support for active pen functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_active_pen.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_GESTURE_v26
+ tristate "Synaptics DSX v2.6 user defined gesture module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+ help
+ Say Y here to enable support for user defined gesture functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_gesture.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_VIDEO_v26
+ tristate "Synaptics DSX v2.6 video module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+ help
+ Say Y here to enable support for video communication functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_video.
+
+config SECURE_TOUCH_SYNAPTICS_DSX_V26
+ bool "Secure Touch support for Synaptics V2.6 Touchscreen"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_I2C_v26
+ help
+ Say Y here
+ -Synaptics DSX V2.6 touch driver is connected
+ -To enable secure touch for Synaptics DSX V2.6 touch driver
+
+ If unsure, say N.
+
+endif
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/Makefile b/drivers/input/touchscreen/synaptics_dsx_2.6/Makefile
new file mode 100644
index 0000000..e5e7215
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the Synaptics DSX touchscreen driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_I2C_v26) += synaptics_dsx_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_SPI_v26) += synaptics_dsx_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C_v26) += synaptics_dsx_rmi_hid_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26) += synaptics_dsx_core.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26) += synaptics_dsx_rmi_dev.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26) += synaptics_dsx_fw_update.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING_v26) += synaptics_dsx_test_reporting.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY_v26) += synaptics_dsx_proximity.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN_v26) += synaptics_dsx_active_pen.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_GESTURE_v26) += synaptics_dsx_gesture.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_VIDEO_v26) += synaptics_dsx_video.o
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_active_pen.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_active_pen.c
new file mode 100644
index 0000000..db5324a
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_active_pen.c
@@ -0,0 +1,624 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define APEN_PHYS_NAME "synaptics_dsx/active_pen"
+
+#define ACTIVE_PEN_MAX_PRESSURE_16BIT 65535
+#define ACTIVE_PEN_MAX_PRESSURE_8BIT 255
+
+struct synaptics_rmi4_f12_query_8 {
+ union {
+ struct {
+ unsigned char size_of_query9;
+ struct {
+ unsigned char data0_is_present:1;
+ unsigned char data1_is_present:1;
+ unsigned char data2_is_present:1;
+ unsigned char data3_is_present:1;
+ unsigned char data4_is_present:1;
+ unsigned char data5_is_present:1;
+ unsigned char data6_is_present:1;
+ unsigned char data7_is_present:1;
+ } __packed;
+ };
+ unsigned char data[2];
+ };
+};
+
+struct apen_data_8b_pressure {
+ union {
+ struct {
+ unsigned char status_pen:1;
+ unsigned char status_invert:1;
+ unsigned char status_barrel:1;
+ unsigned char status_reserved:5;
+ unsigned char x_lsb;
+ unsigned char x_msb;
+ unsigned char y_lsb;
+ unsigned char y_msb;
+ unsigned char pressure_msb;
+ unsigned char battery_state;
+ unsigned char pen_id_0_7;
+ unsigned char pen_id_8_15;
+ unsigned char pen_id_16_23;
+ unsigned char pen_id_24_31;
+ } __packed;
+ unsigned char data[11];
+ };
+};
+
+struct apen_data {
+ union {
+ struct {
+ unsigned char status_pen:1;
+ unsigned char status_invert:1;
+ unsigned char status_barrel:1;
+ unsigned char status_reserved:5;
+ unsigned char x_lsb;
+ unsigned char x_msb;
+ unsigned char y_lsb;
+ unsigned char y_msb;
+ unsigned char pressure_lsb;
+ unsigned char pressure_msb;
+ unsigned char battery_state;
+ unsigned char pen_id_0_7;
+ unsigned char pen_id_8_15;
+ unsigned char pen_id_16_23;
+ unsigned char pen_id_24_31;
+ } __packed;
+ unsigned char data[12];
+ };
+};
+
+struct synaptics_rmi4_apen_handle {
+ bool apen_present;
+ unsigned char intr_mask;
+ unsigned char battery_state;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ unsigned short apen_data_addr;
+ unsigned short max_pressure;
+ unsigned int pen_id;
+ struct input_dev *apen_dev;
+ struct apen_data *apen_data;
+ struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_apen_handle *apen;
+
+DECLARE_COMPLETION(apen_remove_complete);
+
+static void apen_lift(void)
+{
+ input_report_key(apen->apen_dev, BTN_TOUCH, 0);
+ input_report_key(apen->apen_dev, BTN_TOOL_PEN, 0);
+ input_report_key(apen->apen_dev, BTN_TOOL_RUBBER, 0);
+ input_sync(apen->apen_dev);
+ apen->apen_present = false;
+
+ return;
+}
+
+static void apen_report(void)
+{
+ int retval;
+ int x;
+ int y;
+ int pressure;
+ static int invert = -1;
+ struct apen_data_8b_pressure *apen_data_8b;
+ struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ apen->apen_data_addr,
+ apen->apen_data->data,
+ sizeof(apen->apen_data->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read active pen data\n",
+ __func__);
+ return;
+ }
+
+ if (apen->apen_data->status_pen == 0) {
+ if (apen->apen_present)
+ apen_lift();
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: No active pen data\n",
+ __func__);
+
+ return;
+ }
+
+ x = (apen->apen_data->x_msb << 8) | (apen->apen_data->x_lsb);
+ y = (apen->apen_data->y_msb << 8) | (apen->apen_data->y_lsb);
+
+ if ((x == -1) && (y == -1)) {
+ if (apen->apen_present)
+ apen_lift();
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Active pen in range but no valid x & y\n",
+ __func__);
+
+ return;
+ }
+
+ if (!apen->apen_present)
+ invert = -1;
+
+ if (invert != -1 && invert != apen->apen_data->status_invert)
+ apen_lift();
+
+ invert = apen->apen_data->status_invert;
+
+ if (apen->max_pressure == ACTIVE_PEN_MAX_PRESSURE_16BIT) {
+ pressure = (apen->apen_data->pressure_msb << 8) |
+ apen->apen_data->pressure_lsb;
+ apen->battery_state = apen->apen_data->battery_state;
+ apen->pen_id = (apen->apen_data->pen_id_24_31 << 24) |
+ (apen->apen_data->pen_id_16_23 << 16) |
+ (apen->apen_data->pen_id_8_15 << 8) |
+ apen->apen_data->pen_id_0_7;
+ } else {
+ apen_data_8b = (struct apen_data_8b_pressure *)apen->apen_data;
+ pressure = apen_data_8b->pressure_msb;
+ apen->battery_state = apen_data_8b->battery_state;
+ apen->pen_id = (apen_data_8b->pen_id_24_31 << 24) |
+ (apen_data_8b->pen_id_16_23 << 16) |
+ (apen_data_8b->pen_id_8_15 << 8) |
+ apen_data_8b->pen_id_0_7;
+ }
+
+ input_report_key(apen->apen_dev, BTN_TOUCH, pressure > 0 ? 1 : 0);
+ input_report_key(apen->apen_dev,
+ apen->apen_data->status_invert > 0 ?
+ BTN_TOOL_RUBBER : BTN_TOOL_PEN, 1);
+ input_report_key(apen->apen_dev,
+ BTN_STYLUS, apen->apen_data->status_barrel > 0 ?
+ 1 : 0);
+ input_report_abs(apen->apen_dev, ABS_X, x);
+ input_report_abs(apen->apen_dev, ABS_Y, y);
+ input_report_abs(apen->apen_dev, ABS_PRESSURE, pressure);
+
+ input_sync(apen->apen_dev);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Active pen: status = %d, invert = %d, barrel = %d, x = %d, y = %d, pressure = %d\n",
+ __func__,
+ apen->apen_data->status_pen,
+ apen->apen_data->status_invert,
+ apen->apen_data->status_barrel,
+ x, y, pressure);
+
+ apen->apen_present = true;
+
+ return;
+}
+
+static void apen_set_params(void)
+{
+ input_set_abs_params(apen->apen_dev, ABS_X, 0,
+ apen->rmi4_data->sensor_max_x, 0, 0);
+ input_set_abs_params(apen->apen_dev, ABS_Y, 0,
+ apen->rmi4_data->sensor_max_y, 0, 0);
+ input_set_abs_params(apen->apen_dev, ABS_PRESSURE, 0,
+ apen->max_pressure, 0, 0);
+
+ return;
+}
+
+static int apen_pressure(struct synaptics_rmi4_f12_query_8 *query_8)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char data_reg_presence;
+ unsigned char size_of_query_9;
+ unsigned char *query_9;
+ unsigned char *data_desc;
+ struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+ data_reg_presence = query_8->data[1];
+
+ size_of_query_9 = query_8->size_of_query9;
+ query_9 = kmalloc(size_of_query_9, GFP_KERNEL);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ apen->query_base_addr + 9,
+ query_9,
+ size_of_query_9);
+ if (retval < 0)
+ goto exit;
+
+ data_desc = query_9;
+
+ for (ii = 0; ii < 6; ii++) {
+ if (!(data_reg_presence & (1 << ii)))
+ continue; /* The data register is not present */
+ data_desc++; /* Jump over the size entry */
+ while (*data_desc & (1 << 7))
+ data_desc++;
+ data_desc++; /* Go to the next descriptor */
+ }
+
+ data_desc++; /* Jump over the size entry */
+ /* Check for the presence of subpackets 1 and 2 */
+ if ((*data_desc & (3 << 1)) == (3 << 1))
+ apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_16BIT;
+ else
+ apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_8BIT;
+
+exit:
+ kfree(query_9);
+
+ return retval;
+}
+
+static int apen_reg_init(void)
+{
+ int retval;
+ unsigned char data_offset;
+ unsigned char size_of_query8;
+ struct synaptics_rmi4_f12_query_8 query_8;
+ struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ apen->query_base_addr + 7,
+ &size_of_query8,
+ sizeof(size_of_query8));
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ apen->query_base_addr + 8,
+ query_8.data,
+ sizeof(query_8.data));
+ if (retval < 0)
+ return retval;
+
+ if ((size_of_query8 >= 2) && (query_8.data6_is_present)) {
+ data_offset = query_8.data0_is_present +
+ query_8.data1_is_present +
+ query_8.data2_is_present +
+ query_8.data3_is_present +
+ query_8.data4_is_present +
+ query_8.data5_is_present;
+ apen->apen_data_addr = apen->data_base_addr + data_offset;
+ retval = apen_pressure(&query_8);
+ if (retval < 0)
+ return retval;
+ } else {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Active pen support unavailable\n",
+ __func__);
+ retval = -ENODEV;
+ }
+
+ return retval;
+}
+
+static int apen_scan_pdt(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char page;
+ unsigned char intr_count = 0;
+ unsigned char intr_off;
+ unsigned char intr_src;
+ unsigned short addr;
+ struct synaptics_rmi4_fn_desc fd;
+ struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+ for (page = 0; page < PAGES_TO_SERVICE; page++) {
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ addr |= (page << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&fd,
+ sizeof(fd));
+ if (retval < 0)
+ return retval;
+
+ addr &= ~(MASK_8BIT << 8);
+
+ if (fd.fn_number) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Found F%02x\n",
+ __func__, fd.fn_number);
+ switch (fd.fn_number) {
+ case SYNAPTICS_RMI4_F12:
+ goto f12_found;
+ break;
+ }
+ } else {
+ break;
+ }
+
+ intr_count += fd.intr_src_count;
+ }
+ }
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F12\n",
+ __func__);
+ return -EINVAL;
+
+f12_found:
+ apen->query_base_addr = fd.query_base_addr | (page << 8);
+ apen->control_base_addr = fd.ctrl_base_addr | (page << 8);
+ apen->data_base_addr = fd.data_base_addr | (page << 8);
+ apen->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+ retval = apen_reg_init();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to initialize active pen registers\n",
+ __func__);
+ return retval;
+ }
+
+ apen->intr_mask = 0;
+ intr_src = fd.intr_src_count;
+ intr_off = intr_count % 8;
+ for (ii = intr_off;
+ ii < (intr_src + intr_off);
+ ii++) {
+ apen->intr_mask |= 1 << ii;
+ }
+
+ rmi4_data->intr_mask[0] |= apen->intr_mask;
+
+ addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ addr,
+ &(rmi4_data->intr_mask[0]),
+ sizeof(rmi4_data->intr_mask[0]));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set interrupt enable bit\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_apen_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!apen)
+ return;
+
+ if (apen->intr_mask & intr_mask)
+ apen_report();
+
+ return;
+}
+
+static int synaptics_rmi4_apen_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ if (apen) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ apen = kzalloc(sizeof(*apen), GFP_KERNEL);
+ if (!apen) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for apen\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ apen->apen_data = kzalloc(sizeof(*(apen->apen_data)), GFP_KERNEL);
+ if (!apen->apen_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for apen_data\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_apen;
+ }
+
+ apen->rmi4_data = rmi4_data;
+
+ retval = apen_scan_pdt();
+ if (retval < 0)
+ goto exit_free_apen_data;
+
+ apen->apen_dev = input_allocate_device();
+ if (apen->apen_dev == NULL) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate active pen device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_apen_data;
+ }
+
+ apen->apen_dev->name = ACTIVE_PEN_DRIVER_NAME;
+ apen->apen_dev->phys = APEN_PHYS_NAME;
+ apen->apen_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+ apen->apen_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+ apen->apen_dev->dev.parent = rmi4_data->pdev->dev.parent;
+ input_set_drvdata(apen->apen_dev, rmi4_data);
+
+ set_bit(EV_KEY, apen->apen_dev->evbit);
+ set_bit(EV_ABS, apen->apen_dev->evbit);
+ set_bit(BTN_TOUCH, apen->apen_dev->keybit);
+ set_bit(BTN_TOOL_PEN, apen->apen_dev->keybit);
+ set_bit(BTN_TOOL_RUBBER, apen->apen_dev->keybit);
+ set_bit(BTN_STYLUS, apen->apen_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+ set_bit(INPUT_PROP_DIRECT, apen->apen_dev->propbit);
+#endif
+
+ apen_set_params();
+
+ retval = input_register_device(apen->apen_dev);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to register active pen device\n",
+ __func__);
+ goto exit_free_input_device;
+ }
+
+ return 0;
+
+exit_free_input_device:
+ input_free_device(apen->apen_dev);
+
+exit_free_apen_data:
+ kfree(apen->apen_data);
+
+exit_free_apen:
+ kfree(apen);
+ apen = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_apen_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!apen)
+ goto exit;
+
+ input_unregister_device(apen->apen_dev);
+ kfree(apen->apen_data);
+ kfree(apen);
+ apen = NULL;
+
+exit:
+ complete(&apen_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_apen_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!apen) {
+ synaptics_rmi4_apen_init(rmi4_data);
+ return;
+ }
+
+ apen_lift();
+
+ apen_scan_pdt();
+
+ return;
+}
+
+static void synaptics_rmi4_apen_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!apen)
+ return;
+
+ apen_lift();
+
+ return;
+}
+
+static void synaptics_rmi4_apen_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!apen)
+ return;
+
+ apen_lift();
+
+ return;
+}
+
+static void synaptics_rmi4_apen_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!apen)
+ return;
+
+ apen_lift();
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn active_pen_module = {
+ .fn_type = RMI_ACTIVE_PEN,
+ .init = synaptics_rmi4_apen_init,
+ .remove = synaptics_rmi4_apen_remove,
+ .reset = synaptics_rmi4_apen_reset,
+ .reinit = synaptics_rmi4_apen_reinit,
+ .early_suspend = synaptics_rmi4_apen_e_suspend,
+ .suspend = synaptics_rmi4_apen_suspend,
+ .resume = NULL,
+ .late_resume = NULL,
+ .attn = synaptics_rmi4_apen_attn,
+};
+
+static int __init rmi4_active_pen_module_init(void)
+{
+ synaptics_rmi4_new_function(&active_pen_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_active_pen_module_exit(void)
+{
+ synaptics_rmi4_new_function(&active_pen_module, false);
+
+ wait_for_completion(&apen_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_active_pen_module_init);
+module_exit(rmi4_active_pen_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Active Pen Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
new file mode 100644
index 0000000..7633767
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
@@ -0,0 +1,4712 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+#ifdef KERNEL_ABOVE_2_6_38
+#include <linux/input/mt.h>
+#endif
+
+#define INPUT_PHYS_NAME "synaptics_dsx/touch_input"
+#define STYLUS_PHYS_NAME "synaptics_dsx/stylus"
+
+#define VIRTUAL_KEY_MAP_FILE_NAME "virtualkeys." PLATFORM_DRIVER_NAME
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define TYPE_B_PROTOCOL
+#endif
+
+#define WAKEUP_GESTURE false
+
+#define NO_0D_WHILE_2D
+#define REPORT_2D_Z
+#define REPORT_2D_W
+/*
+#define REPORT_2D_PRESSURE
+*/
+
+#define F12_DATA_15_WORKAROUND
+
+#define IGNORE_FN_INIT_FAILURE
+
+#define FB_READY_RESET
+#define FB_READY_WAIT_MS 100
+#define FB_READY_TIMEOUT_S 30
+
+#define RPT_TYPE (1 << 0)
+#define RPT_X_LSB (1 << 1)
+#define RPT_X_MSB (1 << 2)
+#define RPT_Y_LSB (1 << 3)
+#define RPT_Y_MSB (1 << 4)
+#define RPT_Z (1 << 5)
+#define RPT_WX (1 << 6)
+#define RPT_WY (1 << 7)
+#define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB)
+
+#define REBUILD_WORK_DELAY_MS 500 /* ms */
+
+#define EXP_FN_WORK_DELAY_MS 500 /* ms */
+#define MAX_F11_TOUCH_WIDTH 15
+#define MAX_F12_TOUCH_WIDTH 255
+#define MAX_F12_TOUCH_PRESSURE 255
+
+#define CHECK_STATUS_TIMEOUT_MS 100
+
+#define F01_STD_QUERY_LEN 21
+#define F01_BUID_ID_OFFSET 18
+
+#define STATUS_NO_ERROR 0x00
+#define STATUS_RESET_OCCURRED 0x01
+#define STATUS_INVALID_CONFIG 0x02
+#define STATUS_DEVICE_FAILURE 0x03
+#define STATUS_CONFIG_CRC_FAILURE 0x04
+#define STATUS_FIRMWARE_CRC_FAILURE 0x05
+#define STATUS_CRC_IN_PROGRESS 0x06
+
+#define NORMAL_OPERATION (0 << 0)
+#define SENSOR_SLEEP (1 << 0)
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+#define CONFIGURED (1 << 7)
+
+#define F11_CONTINUOUS_MODE 0x00
+#define F11_WAKEUP_GESTURE_MODE 0x04
+#define F12_CONTINUOUS_MODE 0x00
+#define F12_WAKEUP_GESTURE_MODE 0x02
+#define F12_UDG_DETECT 0x0f
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+ bool *was_in_bl_mode);
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+ bool rebuild);
+
+#ifdef CONFIG_FB
+static void synaptics_rmi4_fb_notify_resume_work(struct work_struct *work);
+static int synaptics_rmi4_fb_notifier_cb(struct notifier_block *self,
+ unsigned long event, void *data);
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#ifndef CONFIG_FB
+#define USE_EARLYSUSPEND
+#endif
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static void synaptics_rmi4_early_suspend(struct early_suspend *h);
+
+static void synaptics_rmi4_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev);
+
+static int synaptics_rmi4_resume(struct device *dev);
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
+
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
+static ssize_t synaptics_rmi4_secure_touch_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_secure_touch_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_secure_touch_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+#endif
+
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data);
+
+struct synaptics_rmi4_f01_device_status {
+ union {
+ struct {
+ unsigned char status_code:4;
+ unsigned char reserved:2;
+ unsigned char flash_prog:1;
+ unsigned char unconfigured:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f11_query_0_5 {
+ union {
+ struct {
+ /* query 0 */
+ unsigned char f11_query0_b0__2:3;
+ unsigned char has_query_9:1;
+ unsigned char has_query_11:1;
+ unsigned char has_query_12:1;
+ unsigned char has_query_27:1;
+ unsigned char has_query_28:1;
+
+ /* query 1 */
+ unsigned char num_of_fingers:3;
+ unsigned char has_rel:1;
+ unsigned char has_abs:1;
+ unsigned char has_gestures:1;
+ unsigned char has_sensitibity_adjust:1;
+ unsigned char f11_query1_b7:1;
+
+ /* query 2 */
+ unsigned char num_of_x_electrodes;
+
+ /* query 3 */
+ unsigned char num_of_y_electrodes;
+
+ /* query 4 */
+ unsigned char max_electrodes:7;
+ unsigned char f11_query4_b7:1;
+
+ /* query 5 */
+ unsigned char abs_data_size:2;
+ unsigned char has_anchored_finger:1;
+ unsigned char has_adj_hyst:1;
+ unsigned char has_dribble:1;
+ unsigned char has_bending_correction:1;
+ unsigned char has_large_object_suppression:1;
+ unsigned char has_jitter_filter:1;
+ } __packed;
+ unsigned char data[6];
+ };
+};
+
+struct synaptics_rmi4_f11_query_7_8 {
+ union {
+ struct {
+ /* query 7 */
+ unsigned char has_single_tap:1;
+ unsigned char has_tap_and_hold:1;
+ unsigned char has_double_tap:1;
+ unsigned char has_early_tap:1;
+ unsigned char has_flick:1;
+ unsigned char has_press:1;
+ unsigned char has_pinch:1;
+ unsigned char has_chiral_scroll:1;
+
+ /* query 8 */
+ unsigned char has_palm_detect:1;
+ unsigned char has_rotate:1;
+ unsigned char has_touch_shapes:1;
+ unsigned char has_scroll_zones:1;
+ unsigned char individual_scroll_zones:1;
+ unsigned char has_multi_finger_scroll:1;
+ unsigned char has_multi_finger_scroll_edge_motion:1;
+ unsigned char has_multi_finger_scroll_inertia:1;
+ } __packed;
+ unsigned char data[2];
+ };
+};
+
+struct synaptics_rmi4_f11_query_9 {
+ union {
+ struct {
+ unsigned char has_pen:1;
+ unsigned char has_proximity:1;
+ unsigned char has_large_object_sensitivity:1;
+ unsigned char has_suppress_on_large_object_detect:1;
+ unsigned char has_two_pen_thresholds:1;
+ unsigned char has_contact_geometry:1;
+ unsigned char has_pen_hover_discrimination:1;
+ unsigned char has_pen_hover_and_edge_filters:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f11_query_12 {
+ union {
+ struct {
+ unsigned char has_small_object_detection:1;
+ unsigned char has_small_object_detection_tuning:1;
+ unsigned char has_8bit_w:1;
+ unsigned char has_2d_adjustable_mapping:1;
+ unsigned char has_general_information_2:1;
+ unsigned char has_physical_properties:1;
+ unsigned char has_finger_limit:1;
+ unsigned char has_linear_cofficient_2:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f11_query_27 {
+ union {
+ struct {
+ unsigned char f11_query27_b0:1;
+ unsigned char has_pen_position_correction:1;
+ unsigned char has_pen_jitter_filter_coefficient:1;
+ unsigned char has_group_decomposition:1;
+ unsigned char has_wakeup_gesture:1;
+ unsigned char has_small_finger_correction:1;
+ unsigned char has_data_37:1;
+ unsigned char f11_query27_b7:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f11_ctrl_6_9 {
+ union {
+ struct {
+ unsigned char sensor_max_x_pos_7_0;
+ unsigned char sensor_max_x_pos_11_8:4;
+ unsigned char f11_ctrl7_b4__7:4;
+ unsigned char sensor_max_y_pos_7_0;
+ unsigned char sensor_max_y_pos_11_8:4;
+ unsigned char f11_ctrl9_b4__7:4;
+ } __packed;
+ unsigned char data[4];
+ };
+};
+
+struct synaptics_rmi4_f11_data_1_5 {
+ union {
+ struct {
+ unsigned char x_position_11_4;
+ unsigned char y_position_11_4;
+ unsigned char x_position_3_0:4;
+ unsigned char y_position_3_0:4;
+ unsigned char wx:4;
+ unsigned char wy:4;
+ unsigned char z;
+ } __packed;
+ unsigned char data[5];
+ };
+};
+
+struct synaptics_rmi4_f12_query_5 {
+ union {
+ struct {
+ unsigned char size_of_query6;
+ struct {
+ unsigned char ctrl0_is_present:1;
+ unsigned char ctrl1_is_present:1;
+ unsigned char ctrl2_is_present:1;
+ unsigned char ctrl3_is_present:1;
+ unsigned char ctrl4_is_present:1;
+ unsigned char ctrl5_is_present:1;
+ unsigned char ctrl6_is_present:1;
+ unsigned char ctrl7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl8_is_present:1;
+ unsigned char ctrl9_is_present:1;
+ unsigned char ctrl10_is_present:1;
+ unsigned char ctrl11_is_present:1;
+ unsigned char ctrl12_is_present:1;
+ unsigned char ctrl13_is_present:1;
+ unsigned char ctrl14_is_present:1;
+ unsigned char ctrl15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl16_is_present:1;
+ unsigned char ctrl17_is_present:1;
+ unsigned char ctrl18_is_present:1;
+ unsigned char ctrl19_is_present:1;
+ unsigned char ctrl20_is_present:1;
+ unsigned char ctrl21_is_present:1;
+ unsigned char ctrl22_is_present:1;
+ unsigned char ctrl23_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl24_is_present:1;
+ unsigned char ctrl25_is_present:1;
+ unsigned char ctrl26_is_present:1;
+ unsigned char ctrl27_is_present:1;
+ unsigned char ctrl28_is_present:1;
+ unsigned char ctrl29_is_present:1;
+ unsigned char ctrl30_is_present:1;
+ unsigned char ctrl31_is_present:1;
+ } __packed;
+ };
+ unsigned char data[5];
+ };
+};
+
+struct synaptics_rmi4_f12_query_8 {
+ union {
+ struct {
+ unsigned char size_of_query9;
+ struct {
+ unsigned char data0_is_present:1;
+ unsigned char data1_is_present:1;
+ unsigned char data2_is_present:1;
+ unsigned char data3_is_present:1;
+ unsigned char data4_is_present:1;
+ unsigned char data5_is_present:1;
+ unsigned char data6_is_present:1;
+ unsigned char data7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char data8_is_present:1;
+ unsigned char data9_is_present:1;
+ unsigned char data10_is_present:1;
+ unsigned char data11_is_present:1;
+ unsigned char data12_is_present:1;
+ unsigned char data13_is_present:1;
+ unsigned char data14_is_present:1;
+ unsigned char data15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char data16_is_present:1;
+ unsigned char data17_is_present:1;
+ unsigned char data18_is_present:1;
+ unsigned char data19_is_present:1;
+ unsigned char data20_is_present:1;
+ unsigned char data21_is_present:1;
+ unsigned char data22_is_present:1;
+ unsigned char data23_is_present:1;
+ } __packed;
+ };
+ unsigned char data[4];
+ };
+};
+
+struct synaptics_rmi4_f12_ctrl_8 {
+ union {
+ struct {
+ unsigned char max_x_coord_lsb;
+ unsigned char max_x_coord_msb;
+ unsigned char max_y_coord_lsb;
+ unsigned char max_y_coord_msb;
+ unsigned char rx_pitch_lsb;
+ unsigned char rx_pitch_msb;
+ unsigned char tx_pitch_lsb;
+ unsigned char tx_pitch_msb;
+ unsigned char low_rx_clip;
+ unsigned char high_rx_clip;
+ unsigned char low_tx_clip;
+ unsigned char high_tx_clip;
+ unsigned char num_of_rx;
+ unsigned char num_of_tx;
+ };
+ unsigned char data[14];
+ };
+};
+
+struct synaptics_rmi4_f12_ctrl_23 {
+ union {
+ struct {
+ unsigned char finger_enable:1;
+ unsigned char active_stylus_enable:1;
+ unsigned char palm_enable:1;
+ unsigned char unclassified_object_enable:1;
+ unsigned char hovering_finger_enable:1;
+ unsigned char gloved_finger_enable:1;
+ unsigned char f12_ctr23_00_b6__7:2;
+ unsigned char max_reported_objects;
+ unsigned char f12_ctr23_02_b0:1;
+ unsigned char report_active_stylus_as_finger:1;
+ unsigned char report_palm_as_finger:1;
+ unsigned char report_unclassified_object_as_finger:1;
+ unsigned char report_hovering_finger_as_finger:1;
+ unsigned char report_gloved_finger_as_finger:1;
+ unsigned char report_narrow_object_swipe_as_finger:1;
+ unsigned char report_handedge_as_finger:1;
+ unsigned char cover_enable:1;
+ unsigned char stylus_enable:1;
+ unsigned char eraser_enable:1;
+ unsigned char small_object_enable:1;
+ unsigned char f12_ctr23_03_b4__7:4;
+ unsigned char report_cover_as_finger:1;
+ unsigned char report_stylus_as_finger:1;
+ unsigned char report_eraser_as_finger:1;
+ unsigned char report_small_object_as_finger:1;
+ unsigned char f12_ctr23_04_b4__7:4;
+ };
+ unsigned char data[5];
+ };
+};
+
+struct synaptics_rmi4_f12_ctrl_31 {
+ union {
+ struct {
+ unsigned char max_x_coord_lsb;
+ unsigned char max_x_coord_msb;
+ unsigned char max_y_coord_lsb;
+ unsigned char max_y_coord_msb;
+ unsigned char rx_pitch_lsb;
+ unsigned char rx_pitch_msb;
+ unsigned char rx_clip_low;
+ unsigned char rx_clip_high;
+ unsigned char wedge_clip_low;
+ unsigned char wedge_clip_high;
+ unsigned char num_of_p;
+ unsigned char num_of_q;
+ };
+ unsigned char data[12];
+ };
+};
+
+struct synaptics_rmi4_f12_finger_data {
+ unsigned char object_type_and_status;
+ unsigned char x_lsb;
+ unsigned char x_msb;
+ unsigned char y_lsb;
+ unsigned char y_msb;
+#ifdef REPORT_2D_Z
+ unsigned char z;
+#endif
+#ifdef REPORT_2D_W
+ unsigned char wx;
+ unsigned char wy;
+#endif
+};
+
+struct synaptics_rmi4_f1a_query {
+ union {
+ struct {
+ unsigned char max_button_count:3;
+ unsigned char f1a_query0_b3__4:2;
+ unsigned char has_query4:1;
+ unsigned char has_query3:1;
+ unsigned char has_query2:1;
+ unsigned char has_general_control:1;
+ unsigned char has_interrupt_enable:1;
+ unsigned char has_multibutton_select:1;
+ unsigned char has_tx_rx_map:1;
+ unsigned char has_perbutton_threshold:1;
+ unsigned char has_release_threshold:1;
+ unsigned char has_strongestbtn_hysteresis:1;
+ unsigned char has_filter_strength:1;
+ } __packed;
+ unsigned char data[2];
+ };
+};
+
+struct synaptics_rmi4_f1a_query_4 {
+ union {
+ struct {
+ unsigned char has_ctrl19:1;
+ unsigned char f1a_query4_b1__4:4;
+ unsigned char has_ctrl24:1;
+ unsigned char f1a_query4_b6__7:2;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f1a_control_0 {
+ union {
+ struct {
+ unsigned char multibutton_report:2;
+ unsigned char filter_mode:2;
+ unsigned char reserved:4;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f1a_control {
+ struct synaptics_rmi4_f1a_control_0 general_control;
+ unsigned char button_int_enable;
+ unsigned char multi_button;
+ unsigned char *txrx_map;
+ unsigned char *button_threshold;
+ unsigned char button_release_threshold;
+ unsigned char strongest_button_hysteresis;
+ unsigned char filter_strength;
+};
+
+struct synaptics_rmi4_f1a_handle {
+ int button_bitmask_size;
+ unsigned char max_count;
+ unsigned char valid_button_count;
+ unsigned char *button_data_buffer;
+ unsigned char *button_map;
+ struct synaptics_rmi4_f1a_query button_query;
+ struct synaptics_rmi4_f1a_control button_control;
+};
+
+struct synaptics_rmi4_exp_fhandler {
+ struct synaptics_rmi4_exp_fn *exp_fn;
+ bool insert;
+ bool remove;
+ struct list_head link;
+};
+
+struct synaptics_rmi4_exp_fn_data {
+ bool initialized;
+ bool queue_work;
+ struct mutex mutex;
+ struct list_head list;
+ struct delayed_work work;
+ struct workqueue_struct *workqueue;
+ struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_exp_fn_data exp_data;
+
+static struct synaptics_dsx_button_map *vir_button_map;
+
+static struct device_attribute attrs[] = {
+ __ATTR(reset, 0220,
+ NULL,
+ synaptics_rmi4_f01_reset_store),
+ __ATTR(productinfo, 0444,
+ synaptics_rmi4_f01_productinfo_show,
+ NULL),
+ __ATTR(buildid, 0444,
+ synaptics_rmi4_f01_buildid_show,
+ NULL),
+ __ATTR(flashprog, 0444,
+ synaptics_rmi4_f01_flashprog_show,
+ NULL),
+ __ATTR(0dbutton, 0664,
+ synaptics_rmi4_0dbutton_show,
+ synaptics_rmi4_0dbutton_store),
+ __ATTR(suspend, 0220,
+ NULL,
+ synaptics_rmi4_suspend_store),
+ __ATTR(wake_gesture, 0664,
+ synaptics_rmi4_wake_gesture_show,
+ synaptics_rmi4_wake_gesture_store),
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
+ __ATTR(secure_touch_enable, 0664,
+ synaptics_rmi4_secure_touch_enable_show,
+ synaptics_rmi4_secure_touch_enable_store),
+ __ATTR(secure_touch, 0444,
+ synaptics_rmi4_secure_touch_show,
+ NULL),
+#endif
+};
+
+static struct kobj_attribute virtual_key_map_attr = {
+ .attr = {
+ .name = VIRTUAL_KEY_MAP_FILE_NAME,
+ .mode = S_IRUGO,
+ },
+ .show = synaptics_rmi4_virtual_key_map_show,
+};
+
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
+static void synaptics_secure_touch_init(struct synaptics_rmi4_data *data)
+{
+ data->st_initialized = 0;
+ init_completion(&data->st_powerdown);
+ init_completion(&data->st_irq_processed);
+
+ /* Get clocks */
+ data->core_clk = devm_clk_get(data->pdev->dev.parent, "core_clk");
+ if (IS_ERR(data->core_clk)) {
+ dev_warn(data->pdev->dev.parent,
+ "%s: error on clk_get(core_clk): %ld\n", __func__,
+ PTR_ERR(data->core_clk));
+ data->core_clk = NULL;
+ }
+
+ data->iface_clk = devm_clk_get(data->pdev->dev.parent, "iface_clk");
+ if (IS_ERR(data->iface_clk)) {
+ dev_warn(data->pdev->dev.parent,
+ "%s: error on clk_get(iface_clk): %ld\n", __func__,
+ PTR_ERR(data->iface_clk));
+ data->iface_clk = NULL;
+ }
+
+ data->st_initialized = 1;
+}
+
+static void synaptics_secure_touch_notify(struct synaptics_rmi4_data *rmi4_data)
+{
+ sysfs_notify(&rmi4_data->input_dev->dev.kobj, NULL, "secure_touch");
+}
+
+static irqreturn_t synaptics_filter_interrupt(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+ if (atomic_read(&rmi4_data->st_enabled)) {
+ if (atomic_cmpxchg(&rmi4_data->st_pending_irqs, 0, 1) == 0) {
+ reinit_completion(&rmi4_data->st_irq_processed);
+ synaptics_secure_touch_notify(rmi4_data);
+ wait_for_completion_interruptible(
+ &rmi4_data->st_irq_processed);
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/*
+ * 'blocking' variable will have value 'true' when we want to prevent the driver
+ * from accessing the xPU/SMMU protected HW resources while the session is
+ * active.
+ */
+static void synaptics_secure_touch_stop(struct synaptics_rmi4_data *rmi4_data,
+ bool blocking)
+{
+ if (atomic_read(&rmi4_data->st_enabled)) {
+ atomic_set(&rmi4_data->st_pending_irqs, -1);
+ synaptics_secure_touch_notify(rmi4_data);
+ if (blocking)
+ wait_for_completion_interruptible(
+ &rmi4_data->st_powerdown);
+ }
+}
+
+#else
+static void synaptics_secure_touch_init(struct synaptics_rmi4_data *rmi4_data)
+{
+}
+
+static irqreturn_t synaptics_filter_interrupt(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+ return IRQ_NONE;
+}
+
+static void synaptics_secure_touch_stop(struct synaptics_rmi4_data *rmi4_data,
+ bool blocking)
+{
+}
+#endif
+
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
+static ssize_t synaptics_rmi4_secure_touch_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d",
+ atomic_read(&rmi4_data->st_enabled));
+}
+/*
+ * Accept only "0" and "1" valid values.
+ * "0" will reset the st_enabled flag, then wake up the reading process and
+ * the interrupt handler.
+ * The bus driver is notified via pm_runtime that it is not required to stay
+ * awake anymore.
+ * It will also make sure the queue of events is emptied in the controller,
+ * in case a touch happened in between the secure touch being disabled and
+ * the local ISR being ungated.
+ * "1" will set the st_enabled flag and clear the st_pending_irqs flag.
+ * The bus driver is requested via pm_runtime to stay awake.
+ */
+static ssize_t synaptics_rmi4_secure_touch_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ unsigned long value;
+ int err = 0;
+
+ if (count > 2)
+ return -EINVAL;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err != 0)
+ return err;
+
+ if (!rmi4_data->st_initialized)
+ return -EIO;
+
+ err = count;
+
+ switch (value) {
+ case 0:
+ if (atomic_read(&rmi4_data->st_enabled) == 0)
+ break;
+
+ synaptics_rmi4_bus_put(rmi4_data);
+ atomic_set(&rmi4_data->st_enabled, 0);
+ synaptics_secure_touch_notify(rmi4_data);
+ complete(&rmi4_data->st_irq_processed);
+ synaptics_rmi4_irq(rmi4_data->irq, rmi4_data);
+ complete(&rmi4_data->st_powerdown);
+
+ break;
+ case 1:
+ if (atomic_read(&rmi4_data->st_enabled)) {
+ err = -EBUSY;
+ break;
+ }
+
+ synchronize_irq(rmi4_data->irq);
+
+ if (synaptics_rmi4_bus_get(rmi4_data) < 0) {
+ dev_err(
+ rmi4_data->pdev->dev.parent,
+ "synaptics_rmi4_bus_get failed\n");
+ err = -EIO;
+ break;
+ }
+ reinit_completion(&rmi4_data->st_powerdown);
+ reinit_completion(&rmi4_data->st_irq_processed);
+ atomic_set(&rmi4_data->st_enabled, 1);
+ atomic_set(&rmi4_data->st_pending_irqs, 0);
+ break;
+ default:
+ dev_err(
+ rmi4_data->pdev->dev.parent,
+ "unsupported value: %lu\n", value);
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+/*
+ * This function returns whether there are pending interrupts, or
+ * other error conditions that need to be signaled to the userspace library,
+ * according tot he following logic:
+ * - st_enabled is 0 if secure touch is not enabled, returning -EBADF
+ * - st_pending_irqs is -1 to signal that secure touch is in being stopped,
+ * returning -EINVAL
+ * - st_pending_irqs is 1 to signal that there is a pending irq, returning
+ * the value "1" to the sysfs read operation
+ * - st_pending_irqs is 0 (only remaining case left) if the pending interrupt
+ * has been processed, so the interrupt handler can be allowed to continue.
+ */
+static ssize_t synaptics_rmi4_secure_touch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ int val = 0;
+
+ if (atomic_read(&rmi4_data->st_enabled) == 0)
+ return -EBADF;
+
+ if (atomic_cmpxchg(&rmi4_data->st_pending_irqs, -1, 0) == -1)
+ return -EINVAL;
+
+ if (atomic_cmpxchg(&rmi4_data->st_pending_irqs, 1, 0) == 1)
+ val = 1;
+ else
+ complete(&rmi4_data->st_irq_processed);
+
+ return scnprintf(buf, PAGE_SIZE, "%u", val);
+
+}
+#endif
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int reset;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ if (sscanf(buf, "%u", &reset) != 1)
+ return -EINVAL;
+
+ if (reset != 1)
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reset_device(rmi4_data, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command, error = %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ return count;
+}
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n",
+ (rmi4_data->rmi4_mod_info.product_info[0]),
+ (rmi4_data->rmi4_mod_info.product_info[1]));
+}
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ rmi4_data->firmware_id);
+}
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ struct synaptics_rmi4_f01_device_status device_status;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ device_status.data,
+ sizeof(device_status.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read device status, error = %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ device_status.flash_prog);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ rmi4_data->button_0d_enabled);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ unsigned char ii;
+ unsigned char intr_enable;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ input = input > 0 ? 1 : 0;
+
+ if (rmi4_data->button_0d_enabled == input)
+ return count;
+
+ if (list_empty(&rmi->support_fn_list))
+ return -ENODEV;
+
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+ ii = fhandler->intr_reg_num;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr + 1 + ii,
+ &intr_enable,
+ sizeof(intr_enable));
+ if (retval < 0)
+ return retval;
+
+ if (input == 1)
+ intr_enable |= fhandler->intr_mask;
+ else
+ intr_enable &= ~fhandler->intr_mask;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr + 1 + ii,
+ &intr_enable,
+ sizeof(intr_enable));
+ if (retval < 0)
+ return retval;
+ }
+ }
+
+ rmi4_data->button_0d_enabled = input;
+
+ return count;
+}
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ synaptics_rmi4_suspend(dev);
+ else if (input == 0)
+ synaptics_rmi4_resume(dev);
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ rmi4_data->enable_wakeup_gesture);
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ input = input > 0 ? 1 : 0;
+
+ if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+ rmi4_data->enable_wakeup_gesture = input;
+
+ return count;
+}
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ii;
+ int cnt;
+ int count = 0;
+
+ for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "0x01:%d:%d:%d:%d:%d\n",
+ vir_button_map->map[ii * 5 + 0],
+ vir_button_map->map[ii * 5 + 1],
+ vir_button_map->map[ii * 5 + 2],
+ vir_button_map->map[ii * 5 + 3],
+ vir_button_map->map[ii * 5 + 4]);
+ buf += cnt;
+ count += cnt;
+ }
+
+ return count;
+}
+
+static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ unsigned char touch_count = 0; /* number of touch points */
+ unsigned char reg_index;
+ unsigned char finger;
+ unsigned char fingers_supported;
+ unsigned char num_of_finger_status_regs;
+ unsigned char finger_shift;
+ unsigned char finger_status;
+ unsigned char finger_status_reg[3];
+ unsigned char detected_gestures;
+ unsigned short data_addr;
+ unsigned short data_offset;
+ int x;
+ int y;
+ int wx;
+ int wy;
+ int temp;
+ struct synaptics_rmi4_f11_data_1_5 data;
+ struct synaptics_rmi4_f11_extra_data *extra_data;
+
+ /*
+ * The number of finger status registers is determined by the
+ * maximum number of fingers supported - 2 bits per finger. So
+ * the number of finger status registers to read is:
+ * register_count = ceil(max_num_of_fingers / 4)
+ */
+ fingers_supported = fhandler->num_of_data_points;
+ num_of_finger_status_regs = (fingers_supported + 3) / 4;
+ data_addr = fhandler->full_addr.data_base;
+
+ extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+ if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr + extra_data->data38_offset,
+ &detected_gestures,
+ sizeof(detected_gestures));
+ if (retval < 0)
+ return 0;
+
+ if (detected_gestures) {
+ input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+ input_sync(rmi4_data->input_dev);
+ input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+ input_sync(rmi4_data->input_dev);
+ rmi4_data->suspend = false;
+ }
+
+ return 0;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr,
+ finger_status_reg,
+ num_of_finger_status_regs);
+ if (retval < 0)
+ return 0;
+
+ mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+ for (finger = 0; finger < fingers_supported; finger++) {
+ reg_index = finger / 4;
+ finger_shift = (finger % 4) * 2;
+ finger_status = (finger_status_reg[reg_index] >> finger_shift)
+ & MASK_2BIT;
+
+ /*
+ * Each 2-bit finger status field represents the following:
+ * 00 = finger not present
+ * 01 = finger present and data accurate
+ * 10 = finger present but data may be inaccurate
+ * 11 = reserved
+ */
+#ifdef TYPE_B_PROTOCOL
+ input_mt_slot(rmi4_data->input_dev, finger);
+ input_mt_report_slot_state(rmi4_data->input_dev,
+ MT_TOOL_FINGER, finger_status);
+#endif
+
+ if (finger_status) {
+ data_offset = data_addr +
+ num_of_finger_status_regs +
+ (finger * sizeof(data.data));
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_offset,
+ data.data,
+ sizeof(data.data));
+ if (retval < 0) {
+ touch_count = 0;
+ goto exit;
+ }
+
+ x = (data.x_position_11_4 << 4) | data.x_position_3_0;
+ y = (data.y_position_11_4 << 4) | data.y_position_3_0;
+ wx = data.wx;
+ wy = data.wy;
+
+ if (rmi4_data->hw_if->board_data->swap_axes) {
+ temp = x;
+ x = y;
+ y = temp;
+ temp = wx;
+ wx = wy;
+ wy = temp;
+ }
+
+ if (rmi4_data->hw_if->board_data->x_flip)
+ x = rmi4_data->sensor_max_x - x;
+ if (rmi4_data->hw_if->board_data->y_flip)
+ y = rmi4_data->sensor_max_y - y;
+
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 1);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 1);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_POSITION_X, x);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MAJOR, max(wx, wy));
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MINOR, min(wx, wy));
+#endif
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+ __func__, finger,
+ finger_status,
+ x, y, wx, wy);
+
+ touch_count++;
+ }
+ }
+
+ if (touch_count == 0) {
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 0);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+ }
+
+ input_sync(rmi4_data->input_dev);
+
+exit:
+ mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+ return touch_count;
+}
+
+static int synaptics_rmi4_f12_abs_report(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ unsigned char touch_count = 0; /* number of touch points */
+ unsigned char index;
+ unsigned char finger;
+ unsigned char fingers_to_process;
+ unsigned char finger_status;
+ unsigned char size_of_2d_data;
+ unsigned char gesture_type;
+ unsigned short data_addr;
+ int x;
+ int y;
+ int wx;
+ int wy;
+ int temp;
+#ifdef REPORT_2D_PRESSURE
+ int pressure;
+#endif
+ struct synaptics_rmi4_f12_extra_data *extra_data;
+ struct synaptics_rmi4_f12_finger_data *data;
+ struct synaptics_rmi4_f12_finger_data *finger_data;
+ static unsigned char finger_presence;
+ static unsigned char stylus_presence;
+#ifdef F12_DATA_15_WORKAROUND
+ static unsigned char objects_already_present;
+#endif
+
+ fingers_to_process = fhandler->num_of_data_points;
+ data_addr = fhandler->full_addr.data_base;
+ extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+ size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+ if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr + extra_data->data4_offset,
+ rmi4_data->gesture_detection,
+ sizeof(rmi4_data->gesture_detection));
+ if (retval < 0)
+ return 0;
+
+ gesture_type = rmi4_data->gesture_detection[0];
+
+ if (gesture_type && gesture_type != F12_UDG_DETECT) {
+ input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+ input_sync(rmi4_data->input_dev);
+ input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+ input_sync(rmi4_data->input_dev);
+ rmi4_data->suspend = false;
+ }
+
+ return 0;
+ }
+
+ /* Determine the total number of fingers to process */
+ if (extra_data->data15_size) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr + extra_data->data15_offset,
+ extra_data->data15_data,
+ extra_data->data15_size);
+ if (retval < 0)
+ return 0;
+
+ /* Start checking from the highest bit */
+ index = extra_data->data15_size - 1; /* Highest byte */
+ finger = (fingers_to_process - 1) % 8; /* Highest bit */
+ do {
+ if (extra_data->data15_data[index] & (1 << finger))
+ break;
+
+ if (finger) {
+ finger--;
+ } else if (index > 0) {
+ index--; /* Move to the next lower byte */
+ finger = 7;
+ }
+
+ fingers_to_process--;
+ } while (fingers_to_process);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Number of fingers to process = %d\n",
+ __func__, fingers_to_process);
+ }
+
+#ifdef F12_DATA_15_WORKAROUND
+ fingers_to_process = max(fingers_to_process, objects_already_present);
+#endif
+
+ if (!fingers_to_process) {
+ synaptics_rmi4_free_fingers(rmi4_data);
+ finger_presence = 0;
+ stylus_presence = 0;
+ return 0;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr + extra_data->data1_offset,
+ (unsigned char *)fhandler->data,
+ fingers_to_process * size_of_2d_data);
+ if (retval < 0)
+ return 0;
+
+ data = (struct synaptics_rmi4_f12_finger_data *)fhandler->data;
+
+#ifdef REPORT_2D_PRESSURE
+ if (rmi4_data->report_pressure) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr + extra_data->data23_offset,
+ extra_data->data23_data,
+ fingers_to_process);
+ if (retval < 0)
+ return 0;
+ }
+#endif
+
+ mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+ for (finger = 0; finger < fingers_to_process; finger++) {
+ finger_data = data + finger;
+ finger_status = finger_data->object_type_and_status;
+
+#ifdef F12_DATA_15_WORKAROUND
+ objects_already_present = finger + 1;
+#endif
+
+ x = (finger_data->x_msb << 8) | (finger_data->x_lsb);
+ y = (finger_data->y_msb << 8) | (finger_data->y_lsb);
+#ifdef REPORT_2D_W
+ wx = finger_data->wx;
+ wy = finger_data->wy;
+#endif
+
+ if (rmi4_data->hw_if->board_data->swap_axes) {
+ temp = x;
+ x = y;
+ y = temp;
+ temp = wx;
+ wx = wy;
+ wy = temp;
+ }
+
+ if (rmi4_data->hw_if->board_data->x_flip)
+ x = rmi4_data->sensor_max_x - x;
+ if (rmi4_data->hw_if->board_data->y_flip)
+ y = rmi4_data->sensor_max_y - y;
+
+ switch (finger_status) {
+ case F12_FINGER_STATUS:
+ case F12_GLOVED_FINGER_STATUS:
+ /* Stylus has priority over fingers */
+ if (stylus_presence)
+ break;
+#ifdef TYPE_B_PROTOCOL
+ input_mt_slot(rmi4_data->input_dev, finger);
+ input_mt_report_slot_state(rmi4_data->input_dev,
+ MT_TOOL_FINGER, 1);
+#endif
+
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 1);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 1);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_POSITION_X, x);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+ if (rmi4_data->wedge_sensor) {
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MAJOR, wx);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MINOR, wx);
+ } else {
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MAJOR,
+ max(wx, wy));
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MINOR,
+ min(wx, wy));
+ }
+#endif
+#ifdef REPORT_2D_PRESSURE
+ if (rmi4_data->report_pressure) {
+ pressure = extra_data->data23_data[finger];
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_PRESSURE, pressure);
+ }
+#endif
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+ __func__, finger,
+ finger_status,
+ x, y, wx, wy);
+
+ finger_presence = 1;
+ touch_count++;
+ break;
+ case F12_PALM_STATUS:
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Finger %d: x = %d, y = %d, wx = %d, wy = %d\n",
+ __func__, finger,
+ x, y, wx, wy);
+ break;
+ case F12_STYLUS_STATUS:
+ case F12_ERASER_STATUS:
+ if (!rmi4_data->stylus_enable)
+ break;
+ /* Stylus has priority over fingers */
+ if (finger_presence) {
+ mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+ synaptics_rmi4_free_fingers(rmi4_data);
+ mutex_lock(&(rmi4_data->rmi4_report_mutex));
+ finger_presence = 0;
+ }
+ if (stylus_presence) {/* Allow one stylus at a timee */
+ if (finger + 1 != stylus_presence)
+ break;
+ }
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOUCH, 1);
+ if (finger_status == F12_STYLUS_STATUS) {
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_PEN, 1);
+ } else {
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_RUBBER, 1);
+ }
+ input_report_abs(rmi4_data->stylus_dev,
+ ABS_X, x);
+ input_report_abs(rmi4_data->stylus_dev,
+ ABS_Y, y);
+ input_sync(rmi4_data->stylus_dev);
+
+ stylus_presence = finger + 1;
+ touch_count++;
+ break;
+ default:
+#ifdef TYPE_B_PROTOCOL
+ input_mt_slot(rmi4_data->input_dev, finger);
+ input_mt_report_slot_state(rmi4_data->input_dev,
+ MT_TOOL_FINGER, 0);
+#endif
+ break;
+ }
+ }
+
+ if (touch_count == 0) {
+ finger_presence = 0;
+#ifdef F12_DATA_15_WORKAROUND
+ objects_already_present = 0;
+#endif
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 0);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+
+ if (rmi4_data->stylus_enable) {
+ stylus_presence = 0;
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOUCH, 0);
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_PEN, 0);
+ if (rmi4_data->eraser_enable) {
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_RUBBER, 0);
+ }
+ input_sync(rmi4_data->stylus_dev);
+ }
+ }
+
+ input_sync(rmi4_data->input_dev);
+
+ mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+ return touch_count;
+}
+
+static void synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ unsigned char touch_count = 0;
+ unsigned char button;
+ unsigned char index;
+ unsigned char shift;
+ unsigned char status;
+ unsigned char *data;
+ unsigned short data_addr = fhandler->full_addr.data_base;
+ struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+ static unsigned char do_once = 1;
+ static bool current_status[MAX_NUMBER_OF_BUTTONS];
+#ifdef NO_0D_WHILE_2D
+ static bool before_2d_status[MAX_NUMBER_OF_BUTTONS];
+ static bool while_2d_status[MAX_NUMBER_OF_BUTTONS];
+#endif
+
+ if (do_once) {
+ memset(current_status, 0, sizeof(current_status));
+#ifdef NO_0D_WHILE_2D
+ memset(before_2d_status, 0, sizeof(before_2d_status));
+ memset(while_2d_status, 0, sizeof(while_2d_status));
+#endif
+ do_once = 0;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr,
+ f1a->button_data_buffer,
+ f1a->button_bitmask_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read button data registers\n",
+ __func__);
+ return;
+ }
+
+ data = f1a->button_data_buffer;
+
+ mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+ for (button = 0; button < f1a->valid_button_count; button++) {
+ index = button / 8;
+ shift = button % 8;
+ status = ((data[index] >> shift) & MASK_1BIT);
+
+ if (current_status[button] == status)
+ continue;
+ else
+ current_status[button] = status;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Button %d (code %d) ->%d\n",
+ __func__, button,
+ f1a->button_map[button],
+ status);
+#ifdef NO_0D_WHILE_2D
+ if (rmi4_data->fingers_on_2d == false) {
+ if (status == 1) {
+ before_2d_status[button] = 1;
+ } else {
+ if (while_2d_status[button] == 1) {
+ while_2d_status[button] = 0;
+ continue;
+ } else {
+ before_2d_status[button] = 0;
+ }
+ }
+ touch_count++;
+ input_report_key(rmi4_data->input_dev,
+ f1a->button_map[button],
+ status);
+ } else {
+ if (before_2d_status[button] == 1) {
+ before_2d_status[button] = 0;
+ touch_count++;
+ input_report_key(rmi4_data->input_dev,
+ f1a->button_map[button],
+ status);
+ } else {
+ if (status == 1)
+ while_2d_status[button] = 1;
+ else
+ while_2d_status[button] = 0;
+ }
+ }
+#else
+ touch_count++;
+ input_report_key(rmi4_data->input_dev,
+ f1a->button_map[button],
+ status);
+#endif
+ }
+
+ if (touch_count)
+ input_sync(rmi4_data->input_dev);
+
+ mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+ return;
+}
+
+static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ unsigned char touch_count_2d;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Function %02x reporting\n",
+ __func__, fhandler->fn_number);
+
+ switch (fhandler->fn_number) {
+ case SYNAPTICS_RMI4_F11:
+ touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data,
+ fhandler);
+
+ if (touch_count_2d)
+ rmi4_data->fingers_on_2d = true;
+ else
+ rmi4_data->fingers_on_2d = false;
+ break;
+ case SYNAPTICS_RMI4_F12:
+ touch_count_2d = synaptics_rmi4_f12_abs_report(rmi4_data,
+ fhandler);
+
+ if (touch_count_2d)
+ rmi4_data->fingers_on_2d = true;
+ else
+ rmi4_data->fingers_on_2d = false;
+ break;
+ case SYNAPTICS_RMI4_F1A:
+ synaptics_rmi4_f1a_report(rmi4_data, fhandler);
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static void synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
+ bool report)
+{
+ int retval;
+ unsigned char data[MAX_INTR_REGISTERS + 1];
+ unsigned char *intr = &data[1];
+ bool was_in_bl_mode;
+ struct synaptics_rmi4_f01_device_status status;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ if (rmi4_data->stay_awake) {
+ msleep(30);
+ return;
+ }
+
+ /*
+ * Get interrupt status information from F01 Data1 register to
+ * determine the source(s) that are flagging the interrupt.
+ */
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ data,
+ rmi4_data->num_of_intr_regs + 1);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read interrupt status\n",
+ __func__);
+ return;
+ }
+
+ status.data[0] = data[0];
+ if (status.status_code == STATUS_CRC_IN_PROGRESS) {
+ retval = synaptics_rmi4_check_status(rmi4_data,
+ &was_in_bl_mode);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to check status\n",
+ __func__);
+ return;
+ }
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read device status\n",
+ __func__);
+ return;
+ }
+ }
+ if (status.unconfigured && !status.flash_prog) {
+ pr_notice("%s: spontaneous reset detected\n", __func__);
+ }
+
+ if (!report)
+ return;
+
+ /*
+ * Traverse the function handler list and service the source(s)
+ * of the interrupt accordingly.
+ */
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->num_of_data_sources) {
+ if (fhandler->intr_mask &
+ intr[fhandler->intr_reg_num]) {
+ synaptics_rmi4_report_touch(rmi4_data,
+ fhandler);
+ }
+ }
+ }
+ }
+
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+ if (!exp_fhandler->insert &&
+ !exp_fhandler->remove &&
+ (exp_fhandler->exp_fn->attn != NULL))
+ exp_fhandler->exp_fn->attn(rmi4_data, intr[0]);
+ }
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ return;
+}
+
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
+{
+ struct synaptics_rmi4_data *rmi4_data = data;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (synaptics_filter_interrupt(data) == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
+ if (gpio_get_value(bdata->irq_gpio) != bdata->irq_on_state)
+ goto exit;
+
+ synaptics_rmi4_sensor_report(rmi4_data, true);
+
+exit:
+ return IRQ_HANDLED;
+}
+
+static int synaptics_rmi4_int_enable(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval = 0;
+ unsigned char ii;
+ unsigned char zero = 0x00;
+ unsigned char *intr_mask;
+ unsigned short intr_addr;
+
+ intr_mask = rmi4_data->intr_mask;
+
+ for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+ if (intr_mask[ii] != 0x00) {
+ intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+ if (enable) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ intr_addr,
+ &(intr_mask[ii]),
+ sizeof(intr_mask[ii]));
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ intr_addr,
+ &zero,
+ sizeof(zero));
+ if (retval < 0)
+ return retval;
+ }
+ }
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+ bool enable, bool attn_only)
+{
+ int retval = 0;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (attn_only) {
+ retval = synaptics_rmi4_int_enable(rmi4_data, enable);
+ return retval;
+ }
+
+ if (enable) {
+ if (rmi4_data->irq_enabled)
+ return retval;
+
+ retval = synaptics_rmi4_int_enable(rmi4_data, false);
+ if (retval < 0)
+ return retval;
+
+ /* Process and clear interrupts */
+ synaptics_rmi4_sensor_report(rmi4_data, false);
+
+ retval = request_threaded_irq(rmi4_data->irq, NULL,
+ synaptics_rmi4_irq, bdata->irq_flags,
+ PLATFORM_DRIVER_NAME, rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create irq thread\n",
+ __func__);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_int_enable(rmi4_data, true);
+ if (retval < 0)
+ return retval;
+
+ rmi4_data->irq_enabled = true;
+ } else {
+ if (rmi4_data->irq_enabled) {
+ disable_irq(rmi4_data->irq);
+ free_irq(rmi4_data->irq, rmi4_data);
+ rmi4_data->irq_enabled = false;
+ }
+ }
+
+ return retval;
+}
+
+static void synaptics_rmi4_set_intr_mask(struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ unsigned char ii;
+ unsigned char intr_offset;
+
+ fhandler->intr_reg_num = (intr_count + 7) / 8;
+ if (fhandler->intr_reg_num != 0)
+ fhandler->intr_reg_num -= 1;
+
+ /* Set an enable bit for each data source */
+ intr_offset = intr_count % 8;
+ fhandler->intr_mask = 0;
+ for (ii = intr_offset;
+ ii < (fd->intr_src_count + intr_offset);
+ ii++)
+ fhandler->intr_mask |= 1 << ii;
+
+ return;
+}
+
+static int synaptics_rmi4_f01_init(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ fhandler->fn_number = fd->fn_number;
+ fhandler->num_of_data_sources = fd->intr_src_count;
+ fhandler->data = NULL;
+ fhandler->extra = NULL;
+
+ synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+ rmi4_data->f01_query_base_addr = fd->query_base_addr;
+ rmi4_data->f01_ctrl_base_addr = fd->ctrl_base_addr;
+ rmi4_data->f01_data_base_addr = fd->data_base_addr;
+ rmi4_data->f01_cmd_base_addr = fd->cmd_base_addr;
+
+ return 0;
+}
+
+static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ int retval;
+ int temp;
+ unsigned char offset;
+ unsigned char fingers_supported;
+ struct synaptics_rmi4_f11_extra_data *extra_data;
+ struct synaptics_rmi4_f11_query_0_5 query_0_5;
+ struct synaptics_rmi4_f11_query_7_8 query_7_8;
+ struct synaptics_rmi4_f11_query_9 query_9;
+ struct synaptics_rmi4_f11_query_12 query_12;
+ struct synaptics_rmi4_f11_query_27 query_27;
+ struct synaptics_rmi4_f11_ctrl_6_9 control_6_9;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ fhandler->fn_number = fd->fn_number;
+ fhandler->num_of_data_sources = fd->intr_src_count;
+ fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+ if (!fhandler->extra) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fhandler->extra\n",
+ __func__);
+ return -ENOMEM;
+ }
+ extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base,
+ query_0_5.data,
+ sizeof(query_0_5.data));
+ if (retval < 0)
+ return retval;
+
+ /* Maximum number of fingers supported */
+ if (query_0_5.num_of_fingers <= 4)
+ fhandler->num_of_data_points = query_0_5.num_of_fingers + 1;
+ else if (query_0_5.num_of_fingers == 5)
+ fhandler->num_of_data_points = 10;
+
+ rmi4_data->num_of_fingers = fhandler->num_of_data_points;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + 6,
+ control_6_9.data,
+ sizeof(control_6_9.data));
+ if (retval < 0)
+ return retval;
+
+ /* Maximum x and y */
+ rmi4_data->sensor_max_x = control_6_9.sensor_max_x_pos_7_0 |
+ (control_6_9.sensor_max_x_pos_11_8 << 8);
+ rmi4_data->sensor_max_y = control_6_9.sensor_max_y_pos_7_0 |
+ (control_6_9.sensor_max_y_pos_11_8 << 8);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Function %02x max x = %d max y = %d\n",
+ __func__, fhandler->fn_number,
+ rmi4_data->sensor_max_x,
+ rmi4_data->sensor_max_y);
+
+ rmi4_data->max_touch_width = MAX_F11_TOUCH_WIDTH;
+
+ if (bdata->swap_axes) {
+ temp = rmi4_data->sensor_max_x;
+ rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+ rmi4_data->sensor_max_y = temp;
+ }
+
+ synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+ fhandler->data = NULL;
+
+ offset = sizeof(query_0_5.data);
+
+ /* query 6 */
+ if (query_0_5.has_rel)
+ offset += 1;
+
+ /* queries 7 8 */
+ if (query_0_5.has_gestures) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + offset,
+ query_7_8.data,
+ sizeof(query_7_8.data));
+ if (retval < 0)
+ return retval;
+
+ offset += sizeof(query_7_8.data);
+ }
+
+ /* query 9 */
+ if (query_0_5.has_query_9) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + offset,
+ query_9.data,
+ sizeof(query_9.data));
+ if (retval < 0)
+ return retval;
+
+ offset += sizeof(query_9.data);
+ }
+
+ /* query 10 */
+ if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+ offset += 1;
+
+ /* query 11 */
+ if (query_0_5.has_query_11)
+ offset += 1;
+
+ /* query 12 */
+ if (query_0_5.has_query_12) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + offset,
+ query_12.data,
+ sizeof(query_12.data));
+ if (retval < 0)
+ return retval;
+
+ offset += sizeof(query_12.data);
+ }
+
+ /* query 13 */
+ if (query_0_5.has_jitter_filter)
+ offset += 1;
+
+ /* query 14 */
+ if (query_0_5.has_query_12 && query_12.has_general_information_2)
+ offset += 1;
+
+ /* queries 15 16 17 18 19 20 21 22 23 24 25 26*/
+ if (query_0_5.has_query_12 && query_12.has_physical_properties)
+ offset += 12;
+
+ /* query 27 */
+ if (query_0_5.has_query_27) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + offset,
+ query_27.data,
+ sizeof(query_27.data));
+ if (retval < 0)
+ return retval;
+
+ rmi4_data->f11_wakeup_gesture = query_27.has_wakeup_gesture;
+ }
+
+ if (!rmi4_data->f11_wakeup_gesture)
+ return retval;
+
+ /* data 0 */
+ fingers_supported = fhandler->num_of_data_points;
+ offset = (fingers_supported + 3) / 4;
+
+ /* data 1 2 3 4 5 */
+ offset += 5 * fingers_supported;
+
+ /* data 6 7 */
+ if (query_0_5.has_rel)
+ offset += 2 * fingers_supported;
+
+ /* data 8 */
+ if (query_0_5.has_gestures && query_7_8.data[0])
+ offset += 1;
+
+ /* data 9 */
+ if (query_0_5.has_gestures && (query_7_8.data[0] || query_7_8.data[1]))
+ offset += 1;
+
+ /* data 10 */
+ if (query_0_5.has_gestures &&
+ (query_7_8.has_pinch || query_7_8.has_flick))
+ offset += 1;
+
+ /* data 11 12 */
+ if (query_0_5.has_gestures &&
+ (query_7_8.has_flick || query_7_8.has_rotate))
+ offset += 2;
+
+ /* data 13 */
+ if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+ offset += (fingers_supported + 3) / 4;
+
+ /* data 14 15 */
+ if (query_0_5.has_gestures &&
+ (query_7_8.has_scroll_zones ||
+ query_7_8.has_multi_finger_scroll ||
+ query_7_8.has_chiral_scroll))
+ offset += 2;
+
+ /* data 16 17 */
+ if (query_0_5.has_gestures &&
+ (query_7_8.has_scroll_zones &&
+ query_7_8.individual_scroll_zones))
+ offset += 2;
+
+ /* data 18 19 20 21 22 23 24 25 26 27 */
+ if (query_0_5.has_query_9 && query_9.has_contact_geometry)
+ offset += 10 * fingers_supported;
+
+ /* data 28 */
+ if (query_0_5.has_bending_correction ||
+ query_0_5.has_large_object_suppression)
+ offset += 1;
+
+ /* data 29 30 31 */
+ if (query_0_5.has_query_9 && query_9.has_pen_hover_discrimination)
+ offset += 3;
+
+ /* data 32 */
+ if (query_0_5.has_query_12 &&
+ query_12.has_small_object_detection_tuning)
+ offset += 1;
+
+ /* data 33 34 */
+ if (query_0_5.has_query_27 && query_27.f11_query27_b0)
+ offset += 2;
+
+ /* data 35 */
+ if (query_0_5.has_query_12 && query_12.has_8bit_w)
+ offset += fingers_supported;
+
+ /* data 36 */
+ if (query_0_5.has_bending_correction)
+ offset += 1;
+
+ /* data 37 */
+ if (query_0_5.has_query_27 && query_27.has_data_37)
+ offset += 1;
+
+ /* data 38 */
+ if (query_0_5.has_query_27 && query_27.has_wakeup_gesture)
+ extra_data->data38_offset = offset;
+
+ return retval;
+}
+
+static int synaptics_rmi4_f12_set_enables(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short ctrl28)
+{
+ int retval;
+ static unsigned short ctrl_28_address;
+
+ if (ctrl28)
+ ctrl_28_address = ctrl28;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ ctrl_28_address,
+ &rmi4_data->report_enable,
+ sizeof(rmi4_data->report_enable));
+ if (retval < 0)
+ return retval;
+
+ return retval;
+}
+
+static int synaptics_rmi4_f12_ctrl_sub(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_f12_query_5 *query_5,
+ unsigned char ctrlreg, unsigned char subpacket)
+{
+ int retval;
+ unsigned char cnt;
+ unsigned char regnum;
+ unsigned char bitnum;
+ unsigned char q5_index;
+ unsigned char q6_index;
+ unsigned char offset;
+ unsigned char max_ctrlreg;
+ unsigned char *query_6;
+
+ max_ctrlreg = (sizeof(query_5->data) - 1) * 8 - 1;
+
+ if (ctrlreg > max_ctrlreg) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Control register number (%d) over limit\n",
+ __func__, ctrlreg);
+ return -EINVAL;
+ }
+
+ q5_index = ctrlreg / 8 + 1;
+ bitnum = ctrlreg % 8;
+ if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Control %d is not present\n",
+ __func__, ctrlreg);
+ return -EINVAL;
+ }
+
+ query_6 = kmalloc(query_5->size_of_query6, GFP_KERNEL);
+ if (!query_6) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for query 6\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + 6,
+ query_6,
+ query_5->size_of_query6);
+ if (retval < 0)
+ goto exit;
+
+ q6_index = 0;
+
+ for (regnum = 0; regnum < ctrlreg; regnum++) {
+ q5_index = regnum / 8 + 1;
+ bitnum = regnum % 8;
+ if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00)
+ continue;
+
+ if (query_6[q6_index] == 0x00)
+ q6_index += 3;
+ else
+ q6_index++;
+
+ while (query_6[q6_index] & ~MASK_7BIT)
+ q6_index++;
+
+ q6_index++;
+ }
+
+ cnt = 0;
+ q6_index++;
+ offset = subpacket / 7;
+ bitnum = subpacket % 7;
+
+ do {
+ if (cnt == offset) {
+ if (query_6[q6_index + cnt] & (1 << bitnum))
+ retval = 1;
+ else
+ retval = 0;
+ goto exit;
+ }
+ cnt++;
+ } while (query_6[q6_index + cnt - 1] & ~MASK_7BIT);
+
+ retval = 0;
+
+exit:
+ kfree(query_6);
+
+ return retval;
+}
+
+static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ int retval = 0;
+ int temp;
+ unsigned char subpacket;
+ unsigned char ctrl_23_size;
+ unsigned char size_of_2d_data;
+ unsigned char size_of_query8;
+ unsigned char ctrl_8_offset;
+ unsigned char ctrl_20_offset;
+ unsigned char ctrl_23_offset;
+ unsigned char ctrl_28_offset;
+ unsigned char ctrl_31_offset;
+ unsigned char num_of_fingers;
+ struct synaptics_rmi4_f12_extra_data *extra_data;
+ struct synaptics_rmi4_f12_query_5 *query_5 = NULL;
+ struct synaptics_rmi4_f12_query_8 *query_8 = NULL;
+ struct synaptics_rmi4_f12_ctrl_8 *ctrl_8 = NULL;
+ struct synaptics_rmi4_f12_ctrl_23 *ctrl_23 = NULL;
+ struct synaptics_rmi4_f12_ctrl_31 *ctrl_31 = NULL;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ fhandler->fn_number = fd->fn_number;
+ fhandler->num_of_data_sources = fd->intr_src_count;
+ fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+ if (!fhandler->extra) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fhandler->extra\n",
+ __func__);
+ return -ENOMEM;
+ }
+ extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+ size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+ query_5 = kmalloc(sizeof(*query_5), GFP_KERNEL);
+ if (!query_5) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for query_5\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ query_8 = kmalloc(sizeof(*query_8), GFP_KERNEL);
+ if (!query_8) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for query_8\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ ctrl_8 = kmalloc(sizeof(*ctrl_8), GFP_KERNEL);
+ if (!ctrl_8) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for ctrl_8\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ ctrl_23 = kmalloc(sizeof(*ctrl_23), GFP_KERNEL);
+ if (!ctrl_23) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for ctrl_23\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ ctrl_31 = kmalloc(sizeof(*ctrl_31), GFP_KERNEL);
+ if (!ctrl_31) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for ctrl_31\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + 5,
+ query_5->data,
+ sizeof(query_5->data));
+ if (retval < 0)
+ goto exit;
+
+ ctrl_8_offset = query_5->ctrl0_is_present +
+ query_5->ctrl1_is_present +
+ query_5->ctrl2_is_present +
+ query_5->ctrl3_is_present +
+ query_5->ctrl4_is_present +
+ query_5->ctrl5_is_present +
+ query_5->ctrl6_is_present +
+ query_5->ctrl7_is_present;
+
+ ctrl_20_offset = ctrl_8_offset +
+ query_5->ctrl8_is_present +
+ query_5->ctrl9_is_present +
+ query_5->ctrl10_is_present +
+ query_5->ctrl11_is_present +
+ query_5->ctrl12_is_present +
+ query_5->ctrl13_is_present +
+ query_5->ctrl14_is_present +
+ query_5->ctrl15_is_present +
+ query_5->ctrl16_is_present +
+ query_5->ctrl17_is_present +
+ query_5->ctrl18_is_present +
+ query_5->ctrl19_is_present;
+
+ ctrl_23_offset = ctrl_20_offset +
+ query_5->ctrl20_is_present +
+ query_5->ctrl21_is_present +
+ query_5->ctrl22_is_present;
+
+ ctrl_28_offset = ctrl_23_offset +
+ query_5->ctrl23_is_present +
+ query_5->ctrl24_is_present +
+ query_5->ctrl25_is_present +
+ query_5->ctrl26_is_present +
+ query_5->ctrl27_is_present;
+
+ ctrl_31_offset = ctrl_28_offset +
+ query_5->ctrl28_is_present +
+ query_5->ctrl29_is_present +
+ query_5->ctrl30_is_present;
+
+ ctrl_23_size = 2;
+ for (subpacket = 2; subpacket <= 4; subpacket++) {
+ retval = synaptics_rmi4_f12_ctrl_sub(rmi4_data,
+ fhandler, query_5, 23, subpacket);
+ if (retval == 1)
+ ctrl_23_size++;
+ else if (retval < 0)
+ goto exit;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + ctrl_23_offset,
+ ctrl_23->data,
+ ctrl_23_size);
+ if (retval < 0)
+ goto exit;
+
+ /* Maximum number of fingers supported */
+ fhandler->num_of_data_points = min_t(unsigned char,
+ ctrl_23->max_reported_objects,
+ (unsigned char)F12_FINGERS_TO_SUPPORT);
+
+ num_of_fingers = fhandler->num_of_data_points;
+ rmi4_data->num_of_fingers = num_of_fingers;
+
+ rmi4_data->stylus_enable = ctrl_23->stylus_enable;
+ rmi4_data->eraser_enable = ctrl_23->eraser_enable;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + 7,
+ &size_of_query8,
+ sizeof(size_of_query8));
+ if (retval < 0)
+ goto exit;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + 8,
+ query_8->data,
+ size_of_query8);
+ if (retval < 0)
+ goto exit;
+
+ /* Determine the presence of the Data0 register */
+ extra_data->data1_offset = query_8->data0_is_present;
+
+ if ((size_of_query8 >= 3) && (query_8->data15_is_present)) {
+ extra_data->data15_offset = query_8->data0_is_present +
+ query_8->data1_is_present +
+ query_8->data2_is_present +
+ query_8->data3_is_present +
+ query_8->data4_is_present +
+ query_8->data5_is_present +
+ query_8->data6_is_present +
+ query_8->data7_is_present +
+ query_8->data8_is_present +
+ query_8->data9_is_present +
+ query_8->data10_is_present +
+ query_8->data11_is_present +
+ query_8->data12_is_present +
+ query_8->data13_is_present +
+ query_8->data14_is_present;
+ extra_data->data15_size = (num_of_fingers + 7) / 8;
+ } else {
+ extra_data->data15_size = 0;
+ }
+
+#ifdef REPORT_2D_PRESSURE
+ if ((size_of_query8 >= 4) && (query_8->data23_is_present)) {
+ extra_data->data23_offset = query_8->data0_is_present +
+ query_8->data1_is_present +
+ query_8->data2_is_present +
+ query_8->data3_is_present +
+ query_8->data4_is_present +
+ query_8->data5_is_present +
+ query_8->data6_is_present +
+ query_8->data7_is_present +
+ query_8->data8_is_present +
+ query_8->data9_is_present +
+ query_8->data10_is_present +
+ query_8->data11_is_present +
+ query_8->data12_is_present +
+ query_8->data13_is_present +
+ query_8->data14_is_present +
+ query_8->data15_is_present +
+ query_8->data16_is_present +
+ query_8->data17_is_present +
+ query_8->data18_is_present +
+ query_8->data19_is_present +
+ query_8->data20_is_present +
+ query_8->data21_is_present +
+ query_8->data22_is_present;
+ extra_data->data23_size = num_of_fingers;
+ rmi4_data->report_pressure = true;
+ } else {
+ extra_data->data23_size = 0;
+ rmi4_data->report_pressure = false;
+ }
+#endif
+
+ rmi4_data->report_enable = RPT_DEFAULT;
+#ifdef REPORT_2D_Z
+ rmi4_data->report_enable |= RPT_Z;
+#endif
+#ifdef REPORT_2D_W
+ rmi4_data->report_enable |= (RPT_WX | RPT_WY);
+#endif
+
+ retval = synaptics_rmi4_f12_set_enables(rmi4_data,
+ fhandler->full_addr.ctrl_base + ctrl_28_offset);
+ if (retval < 0)
+ goto exit;
+
+ if (query_5->ctrl8_is_present) {
+ rmi4_data->wedge_sensor = false;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + ctrl_8_offset,
+ ctrl_8->data,
+ sizeof(ctrl_8->data));
+ if (retval < 0)
+ goto exit;
+
+ /* Maximum x and y */
+ rmi4_data->sensor_max_x =
+ ((unsigned int)ctrl_8->max_x_coord_lsb << 0) |
+ ((unsigned int)ctrl_8->max_x_coord_msb << 8);
+ rmi4_data->sensor_max_y =
+ ((unsigned int)ctrl_8->max_y_coord_lsb << 0) |
+ ((unsigned int)ctrl_8->max_y_coord_msb << 8);
+
+ rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+ } else {
+ rmi4_data->wedge_sensor = true;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + ctrl_31_offset,
+ ctrl_31->data,
+ sizeof(ctrl_31->data));
+ if (retval < 0)
+ goto exit;
+
+ /* Maximum x and y */
+ rmi4_data->sensor_max_x =
+ ((unsigned int)ctrl_31->max_x_coord_lsb << 0) |
+ ((unsigned int)ctrl_31->max_x_coord_msb << 8);
+ rmi4_data->sensor_max_y =
+ ((unsigned int)ctrl_31->max_y_coord_lsb << 0) |
+ ((unsigned int)ctrl_31->max_y_coord_msb << 8);
+
+ rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Function %02x max x = %d max y = %d\n",
+ __func__, fhandler->fn_number,
+ rmi4_data->sensor_max_x,
+ rmi4_data->sensor_max_y);
+
+ if (bdata->swap_axes) {
+ temp = rmi4_data->sensor_max_x;
+ rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+ rmi4_data->sensor_max_y = temp;
+ }
+
+ rmi4_data->f12_wakeup_gesture = query_5->ctrl27_is_present;
+ if (rmi4_data->f12_wakeup_gesture) {
+ extra_data->ctrl20_offset = ctrl_20_offset;
+ extra_data->data4_offset = query_8->data0_is_present +
+ query_8->data1_is_present +
+ query_8->data2_is_present +
+ query_8->data3_is_present;
+ }
+
+ synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+ /* Allocate memory for finger data storage space */
+ fhandler->data_size = num_of_fingers * size_of_2d_data;
+ fhandler->data = kmalloc(fhandler->data_size, GFP_KERNEL);
+ if (!fhandler->data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fhandler->data\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+exit:
+ kfree(query_5);
+ kfree(query_8);
+ kfree(ctrl_8);
+ kfree(ctrl_23);
+ kfree(ctrl_31);
+
+ return retval;
+}
+
+static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ struct synaptics_rmi4_f1a_handle *f1a;
+
+ f1a = kzalloc(sizeof(*f1a), GFP_KERNEL);
+ if (!f1a) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for function handle\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ fhandler->data = (void *)f1a;
+ fhandler->extra = NULL;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base,
+ f1a->button_query.data,
+ sizeof(f1a->button_query.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read query registers\n",
+ __func__);
+ return retval;
+ }
+
+ f1a->max_count = f1a->button_query.max_button_count + 1;
+
+ f1a->button_control.txrx_map = kzalloc(f1a->max_count * 2, GFP_KERNEL);
+ if (!f1a->button_control.txrx_map) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for tx rx mapping\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ f1a->button_bitmask_size = (f1a->max_count + 7) / 8;
+
+ f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size,
+ sizeof(*(f1a->button_data_buffer)), GFP_KERNEL);
+ if (!f1a->button_data_buffer) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for data buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ f1a->button_map = kcalloc(f1a->max_count,
+ sizeof(*(f1a->button_map)), GFP_KERNEL);
+ if (!f1a->button_map) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_f1a_button_map(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char offset = 0;
+ struct synaptics_rmi4_f1a_query_4 query_4;
+ struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ offset = f1a->button_query.has_general_control +
+ f1a->button_query.has_interrupt_enable +
+ f1a->button_query.has_multibutton_select;
+
+ if (f1a->button_query.has_tx_rx_map) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + offset,
+ f1a->button_control.txrx_map,
+ f1a->max_count * 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read tx rx mapping\n",
+ __func__);
+ return retval;
+ }
+
+ rmi4_data->button_txrx_mapping = f1a->button_control.txrx_map;
+ }
+
+ if (f1a->button_query.has_query4) {
+ offset = 2 + f1a->button_query.has_query2 +
+ f1a->button_query.has_query3;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + offset,
+ query_4.data,
+ sizeof(query_4.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read button features 4\n",
+ __func__);
+ return retval;
+ }
+
+ if (query_4.has_ctrl24)
+ rmi4_data->external_afe_buttons = true;
+ else
+ rmi4_data->external_afe_buttons = false;
+ }
+
+ if (!bdata->cap_button_map) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: cap_button_map is NULL in board file\n",
+ __func__);
+ return -ENODEV;
+ } else if (!bdata->cap_button_map->map) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Button map is missing in board file\n",
+ __func__);
+ return -ENODEV;
+ } else {
+ if (bdata->cap_button_map->nbuttons != f1a->max_count) {
+ f1a->valid_button_count = min(f1a->max_count,
+ bdata->cap_button_map->nbuttons);
+ } else {
+ f1a->valid_button_count = f1a->max_count;
+ }
+
+ for (ii = 0; ii < f1a->valid_button_count; ii++)
+ f1a->button_map[ii] = bdata->cap_button_map->map[ii];
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler)
+{
+ struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+
+ if (f1a) {
+ kfree(f1a->button_control.txrx_map);
+ kfree(f1a->button_data_buffer);
+ kfree(f1a->button_map);
+ kfree(f1a);
+ fhandler->data = NULL;
+ }
+
+ return;
+}
+
+static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ int retval;
+
+ fhandler->fn_number = fd->fn_number;
+ fhandler->num_of_data_sources = fd->intr_src_count;
+
+ synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+ retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler);
+ if (retval < 0)
+ goto error_exit;
+
+ retval = synaptics_rmi4_f1a_button_map(rmi4_data, fhandler);
+ if (retval < 0)
+ goto error_exit;
+
+ rmi4_data->button_0d_enabled = 1;
+
+ return 0;
+
+error_exit:
+ synaptics_rmi4_f1a_kfree(fhandler);
+
+ return retval;
+}
+
+static void synaptics_rmi4_empty_fn_list(struct synaptics_rmi4_data *rmi4_data)
+{
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_fn *fhandler_temp;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry_safe(fhandler,
+ fhandler_temp,
+ &rmi->support_fn_list,
+ link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+ synaptics_rmi4_f1a_kfree(fhandler);
+ } else {
+ kfree(fhandler->extra);
+ kfree(fhandler->data);
+ }
+ list_del(&fhandler->link);
+ kfree(fhandler);
+ }
+ }
+ INIT_LIST_HEAD(&rmi->support_fn_list);
+
+ return;
+}
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+ bool *was_in_bl_mode)
+{
+ int retval;
+ int timeout = CHECK_STATUS_TIMEOUT_MS;
+ struct synaptics_rmi4_f01_device_status status;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0)
+ return retval;
+
+ while (status.status_code == STATUS_CRC_IN_PROGRESS) {
+ if (timeout > 0)
+ msleep(20);
+ else
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0)
+ return retval;
+
+ timeout -= 20;
+ }
+
+ if (timeout != CHECK_STATUS_TIMEOUT_MS)
+ *was_in_bl_mode = true;
+
+ if (status.flash_prog == 1) {
+ rmi4_data->flash_prog_mode = true;
+ pr_notice("%s: In flash prog mode, status = 0x%02x\n",
+ __func__,
+ status.status_code);
+ } else {
+ rmi4_data->flash_prog_mode = false;
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_set_configured(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char device_ctrl;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set configured\n",
+ __func__);
+ return;
+ }
+
+ rmi4_data->no_sleep_setting = device_ctrl & NO_SLEEP_ON;
+ device_ctrl |= CONFIGURED;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set configured\n",
+ __func__);
+ }
+
+ return;
+}
+
+static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler,
+ struct synaptics_rmi4_fn_desc *rmi_fd, int page_number)
+{
+ *fhandler = kmalloc(sizeof(**fhandler), GFP_KERNEL);
+ if (!(*fhandler))
+ return -ENOMEM;
+
+ (*fhandler)->full_addr.data_base =
+ (rmi_fd->data_base_addr |
+ (page_number << 8));
+ (*fhandler)->full_addr.ctrl_base =
+ (rmi_fd->ctrl_base_addr |
+ (page_number << 8));
+ (*fhandler)->full_addr.cmd_base =
+ (rmi_fd->cmd_base_addr |
+ (page_number << 8));
+ (*fhandler)->full_addr.query_base =
+ (rmi_fd->query_base_addr |
+ (page_number << 8));
+
+ return 0;
+}
+
+static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char page_number;
+ unsigned char intr_count;
+ unsigned char *f01_query;
+ unsigned short pdt_entry_addr;
+ bool f01found;
+ bool f35found;
+ bool was_in_bl_mode;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+rescan_pdt:
+ f01found = false;
+ f35found = false;
+ was_in_bl_mode = false;
+ intr_count = 0;
+ INIT_LIST_HEAD(&rmi->support_fn_list);
+
+ /* Scan the page description tables of the pages to service */
+ for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) {
+ for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END;
+ pdt_entry_addr -= PDT_ENTRY_SIZE) {
+ pdt_entry_addr |= (page_number << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ pdt_entry_addr,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval < 0)
+ return retval;
+
+ pdt_entry_addr &= ~(MASK_8BIT << 8);
+
+ fhandler = NULL;
+
+ if (rmi_fd.fn_number == 0) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Reached end of PDT\n",
+ __func__);
+ break;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: F%02x found (page %d)\n",
+ __func__, rmi_fd.fn_number,
+ page_number);
+
+ switch (rmi_fd.fn_number) {
+ case SYNAPTICS_RMI4_F01:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ f01found = true;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_f01_init(rmi4_data,
+ fhandler, &rmi_fd, intr_count);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_check_status(rmi4_data,
+ &was_in_bl_mode);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to check status\n",
+ __func__);
+ return retval;
+ }
+
+ if (was_in_bl_mode) {
+ kfree(fhandler);
+ fhandler = NULL;
+ goto rescan_pdt;
+ }
+
+ if (rmi4_data->flash_prog_mode)
+ goto flash_prog_mode;
+
+ break;
+ case SYNAPTICS_RMI4_F11:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_f11_init(rmi4_data,
+ fhandler, &rmi_fd, intr_count);
+ if (retval < 0)
+ return retval;
+ break;
+ case SYNAPTICS_RMI4_F12:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_f12_init(rmi4_data,
+ fhandler, &rmi_fd, intr_count);
+ if (retval < 0)
+ return retval;
+ break;
+ case SYNAPTICS_RMI4_F1A:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_f1a_init(rmi4_data,
+ fhandler, &rmi_fd, intr_count);
+ if (retval < 0) {
+#ifdef IGNORE_FN_INIT_FAILURE
+ kfree(fhandler);
+ fhandler = NULL;
+#else
+ return retval;
+#endif
+ }
+ break;
+ case SYNAPTICS_RMI4_F35:
+ f35found = true;
+ break;
+ }
+
+ /* Accumulate the interrupt count */
+ intr_count += rmi_fd.intr_src_count;
+
+ if (fhandler && rmi_fd.intr_src_count) {
+ list_add_tail(&fhandler->link,
+ &rmi->support_fn_list);
+ }
+ }
+ }
+
+ if (!f01found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F01\n",
+ __func__);
+ if (!f35found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F35\n",
+ __func__);
+ return -EINVAL;
+ } else {
+ pr_notice("%s: In microbootloader mode\n",
+ __func__);
+ return 0;
+ }
+ }
+
+flash_prog_mode:
+ rmi4_data->num_of_intr_regs = (intr_count + 7) / 8;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Number of interrupt registers = %d\n",
+ __func__, rmi4_data->num_of_intr_regs);
+
+ f01_query = kmalloc(F01_STD_QUERY_LEN, GFP_KERNEL);
+ if (!f01_query) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for f01_query\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_query_base_addr,
+ f01_query,
+ F01_STD_QUERY_LEN);
+ if (retval < 0) {
+ kfree(f01_query);
+ return retval;
+ }
+
+ /* RMI Version 4.0 currently supported */
+ rmi->version_major = 4;
+ rmi->version_minor = 0;
+
+ rmi->manufacturer_id = f01_query[0];
+ rmi->product_props = f01_query[1];
+ rmi->product_info[0] = f01_query[2];
+ rmi->product_info[1] = f01_query[3];
+ retval = secure_memcpy(rmi->product_id_string,
+ sizeof(rmi->product_id_string),
+ &f01_query[11],
+ F01_STD_QUERY_LEN - 11,
+ PRODUCT_ID_SIZE);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy product ID string\n",
+ __func__);
+ }
+
+ kfree(f01_query);
+
+ if (rmi->manufacturer_id != 1) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Non-Synaptics device found, manufacturer ID = %d\n",
+ __func__, rmi->manufacturer_id);
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET,
+ rmi->build_id,
+ sizeof(rmi->build_id));
+ if (retval < 0)
+ return retval;
+
+ rmi4_data->firmware_id = (unsigned int)rmi->build_id[0] +
+ (unsigned int)rmi->build_id[1] * 0x100 +
+ (unsigned int)rmi->build_id[2] * 0x10000;
+
+ memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask));
+
+ /*
+ * Map out the interrupt bit masks for the interrupt sources
+ * from the registered function handlers.
+ */
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->num_of_data_sources) {
+ rmi4_data->intr_mask[fhandler->intr_reg_num] |=
+ fhandler->intr_mask;
+ }
+ }
+ }
+
+ if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+ rmi4_data->enable_wakeup_gesture = WAKEUP_GESTURE;
+ else
+ rmi4_data->enable_wakeup_gesture = false;
+
+ synaptics_rmi4_set_configured(rmi4_data);
+
+ return 0;
+}
+
+static int synaptics_rmi4_gpio_setup(int gpio, bool config, int dir, int state)
+{
+ int retval = 0;
+ unsigned char buf[16];
+
+ if (config) {
+ retval = snprintf(buf, ARRAY_SIZE(buf), "dsx_gpio_%u\n", gpio);
+ if (retval >= 16)
+ return -EINVAL;
+
+ retval = gpio_request(gpio, buf);
+ if (retval) {
+ pr_err("%s: Failed to get gpio %d (code: %d)",
+ __func__, gpio, retval);
+ return retval;
+ }
+
+ if (dir == 0)
+ retval = gpio_direction_input(gpio);
+ else
+ retval = gpio_direction_output(gpio, state);
+ if (retval) {
+ pr_err("%s: Failed to set gpio %d direction",
+ __func__, gpio);
+ return retval;
+ }
+ } else {
+ gpio_free(gpio);
+ }
+
+ return retval;
+}
+
+static void synaptics_rmi4_set_params(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char ii;
+ struct synaptics_rmi4_f1a_handle *f1a;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_POSITION_X, 0,
+ rmi4_data->sensor_max_x, 0, 0);
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_POSITION_Y, 0,
+ rmi4_data->sensor_max_y, 0, 0);
+#ifdef REPORT_2D_W
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MAJOR, 0,
+ rmi4_data->max_touch_width, 0, 0);
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MINOR, 0,
+ rmi4_data->max_touch_width, 0, 0);
+#endif
+
+#ifdef REPORT_2D_PRESSURE
+ if (rmi4_data->report_pressure) {
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_PRESSURE, 0,
+ MAX_F12_TOUCH_PRESSURE, 0, 0);
+ }
+#endif
+
+#ifdef TYPE_B_PROTOCOL
+#ifdef KERNEL_ABOVE_3_6
+ input_mt_init_slots(rmi4_data->input_dev,
+ rmi4_data->num_of_fingers, INPUT_MT_DIRECT);
+#else
+ input_mt_init_slots(rmi4_data->input_dev,
+ rmi4_data->num_of_fingers);
+#endif
+#endif
+
+ f1a = NULL;
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
+ f1a = fhandler->data;
+ }
+ }
+
+ if (f1a) {
+ for (ii = 0; ii < f1a->valid_button_count; ii++) {
+ set_bit(f1a->button_map[ii],
+ rmi4_data->input_dev->keybit);
+ input_set_capability(rmi4_data->input_dev,
+ EV_KEY, f1a->button_map[ii]);
+ }
+ }
+
+ if (vir_button_map->nbuttons) {
+ for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+ set_bit(vir_button_map->map[ii * 5],
+ rmi4_data->input_dev->keybit);
+ input_set_capability(rmi4_data->input_dev,
+ EV_KEY, vir_button_map->map[ii * 5]);
+ }
+ }
+
+ if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture) {
+ set_bit(KEY_WAKEUP, rmi4_data->input_dev->keybit);
+ input_set_capability(rmi4_data->input_dev, EV_KEY, KEY_WAKEUP);
+ }
+
+ return;
+}
+
+static int synaptics_rmi4_set_input_dev(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ rmi4_data->input_dev = input_allocate_device();
+ if (rmi4_data->input_dev == NULL) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate input device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_input_device;
+ }
+
+ retval = synaptics_rmi4_query_device(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to query device\n",
+ __func__);
+ goto err_query_device;
+ }
+
+ rmi4_data->input_dev->name = PLATFORM_DRIVER_NAME;
+ rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
+ rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+ rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+ rmi4_data->input_dev->dev.parent = rmi4_data->pdev->dev.parent;
+ input_set_drvdata(rmi4_data->input_dev, rmi4_data);
+
+ set_bit(EV_SYN, rmi4_data->input_dev->evbit);
+ set_bit(EV_KEY, rmi4_data->input_dev->evbit);
+ set_bit(EV_ABS, rmi4_data->input_dev->evbit);
+ set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit);
+ set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+ set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit);
+#endif
+
+ if (bdata->max_y_for_2d >= 0)
+ rmi4_data->sensor_max_y = bdata->max_y_for_2d;
+
+ synaptics_rmi4_set_params(rmi4_data);
+
+ retval = input_register_device(rmi4_data->input_dev);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to register input device\n",
+ __func__);
+ goto err_register_input;
+ }
+
+ if (!rmi4_data->stylus_enable)
+ return 0;
+
+ rmi4_data->stylus_dev = input_allocate_device();
+ if (rmi4_data->stylus_dev == NULL) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate stylus device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_stylus_device;
+ }
+
+ rmi4_data->stylus_dev->name = STYLUS_DRIVER_NAME;
+ rmi4_data->stylus_dev->phys = STYLUS_PHYS_NAME;
+ rmi4_data->stylus_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+ rmi4_data->stylus_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+ rmi4_data->stylus_dev->dev.parent = rmi4_data->pdev->dev.parent;
+ input_set_drvdata(rmi4_data->stylus_dev, rmi4_data);
+
+ set_bit(EV_KEY, rmi4_data->stylus_dev->evbit);
+ set_bit(EV_ABS, rmi4_data->stylus_dev->evbit);
+ set_bit(BTN_TOUCH, rmi4_data->stylus_dev->keybit);
+ set_bit(BTN_TOOL_PEN, rmi4_data->stylus_dev->keybit);
+ if (rmi4_data->eraser_enable)
+ set_bit(BTN_TOOL_RUBBER, rmi4_data->stylus_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+ set_bit(INPUT_PROP_DIRECT, rmi4_data->stylus_dev->propbit);
+#endif
+
+ input_set_abs_params(rmi4_data->stylus_dev, ABS_X, 0,
+ rmi4_data->sensor_max_x, 0, 0);
+ input_set_abs_params(rmi4_data->stylus_dev, ABS_Y, 0,
+ rmi4_data->sensor_max_y, 0, 0);
+
+ retval = input_register_device(rmi4_data->stylus_dev);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to register stylus device\n",
+ __func__);
+ goto err_register_stylus;
+ }
+
+ return 0;
+
+err_register_stylus:
+ rmi4_data->stylus_dev = NULL;
+
+err_stylus_device:
+ input_unregister_device(rmi4_data->input_dev);
+ rmi4_data->input_dev = NULL;
+
+err_register_input:
+err_query_device:
+ synaptics_rmi4_empty_fn_list(rmi4_data);
+ input_free_device(rmi4_data->input_dev);
+
+err_input_device:
+ return retval;
+}
+
+static int synaptics_rmi4_set_gpio(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ retval = synaptics_rmi4_gpio_setup(
+ bdata->irq_gpio,
+ true, 0, 0);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to configure attention GPIO\n",
+ __func__);
+ goto err_gpio_irq;
+ }
+
+ if (bdata->power_gpio >= 0) {
+ retval = synaptics_rmi4_gpio_setup(
+ bdata->power_gpio,
+ true, 1, !bdata->power_on_state);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to configure power GPIO\n",
+ __func__);
+ goto err_gpio_power;
+ }
+ }
+
+ if (bdata->reset_gpio >= 0) {
+ retval = synaptics_rmi4_gpio_setup(
+ bdata->reset_gpio,
+ true, 1, !bdata->reset_on_state);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to configure reset GPIO\n",
+ __func__);
+ goto err_gpio_reset;
+ }
+ }
+
+ if (bdata->power_gpio >= 0) {
+ gpio_set_value(bdata->power_gpio, bdata->power_on_state);
+ msleep(bdata->power_delay_ms);
+ }
+
+ if (bdata->reset_gpio >= 0) {
+ gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+ msleep(bdata->reset_active_ms);
+ gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+ msleep(bdata->reset_delay_ms);
+ }
+
+ return 0;
+
+err_gpio_reset:
+ if (bdata->power_gpio >= 0)
+ synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_gpio_power:
+ synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+err_gpio_irq:
+ return retval;
+}
+
+static int synaptics_rmi4_get_reg(struct synaptics_rmi4_data *rmi4_data,
+ bool get)
+{
+ int retval;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (!get) {
+ retval = 0;
+ goto regulator_put;
+ }
+
+ if ((bdata->pwr_reg_name != NULL) && (*bdata->pwr_reg_name != 0)) {
+ rmi4_data->pwr_reg = regulator_get(rmi4_data->pdev->dev.parent,
+ bdata->pwr_reg_name);
+ if (IS_ERR(rmi4_data->pwr_reg)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to get power regulator\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->pwr_reg);
+ goto regulator_put;
+ }
+ }
+
+ if ((bdata->bus_reg_name != NULL) && (*bdata->bus_reg_name != 0)) {
+ rmi4_data->bus_reg = regulator_get(rmi4_data->pdev->dev.parent,
+ bdata->bus_reg_name);
+ if (IS_ERR(rmi4_data->bus_reg)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to get bus pullup regulator\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->bus_reg);
+ goto regulator_put;
+ }
+ }
+
+ return 0;
+
+regulator_put:
+ if (rmi4_data->pwr_reg) {
+ regulator_put(rmi4_data->pwr_reg);
+ rmi4_data->pwr_reg = NULL;
+ }
+
+ if (rmi4_data->bus_reg) {
+ regulator_put(rmi4_data->bus_reg);
+ rmi4_data->bus_reg = NULL;
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_enable_reg(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (!enable) {
+ retval = 0;
+ goto disable_pwr_reg;
+ }
+
+ if (rmi4_data->bus_reg) {
+ retval = regulator_enable(rmi4_data->bus_reg);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to enable bus pullup regulator\n",
+ __func__);
+ goto exit;
+ }
+ }
+
+ if (rmi4_data->pwr_reg) {
+ retval = regulator_enable(rmi4_data->pwr_reg);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to enable power regulator\n",
+ __func__);
+ goto disable_bus_reg;
+ }
+ msleep(bdata->power_delay_ms);
+ }
+
+ return 0;
+
+disable_pwr_reg:
+ if (rmi4_data->pwr_reg)
+ regulator_disable(rmi4_data->pwr_reg);
+
+disable_bus_reg:
+ if (rmi4_data->bus_reg)
+ regulator_disable(rmi4_data->bus_reg);
+
+exit:
+ return retval;
+}
+
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char ii;
+
+ mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+#ifdef TYPE_B_PROTOCOL
+ for (ii = 0; ii < rmi4_data->num_of_fingers; ii++) {
+ input_mt_slot(rmi4_data->input_dev, ii);
+ input_mt_report_slot_state(rmi4_data->input_dev,
+ MT_TOOL_FINGER, 0);
+ }
+#endif
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 0);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+ input_sync(rmi4_data->input_dev);
+
+ if (rmi4_data->stylus_enable) {
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOUCH, 0);
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_PEN, 0);
+ if (rmi4_data->eraser_enable) {
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_RUBBER, 0);
+ }
+ input_sync(rmi4_data->stylus_dev);
+ }
+
+ mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+ rmi4_data->fingers_on_2d = false;
+
+ return 0;
+}
+
+static int synaptics_rmi4_sw_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char command = 0x01;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_cmd_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0)
+ return retval;
+
+ msleep(rmi4_data->hw_if->board_data->reset_delay_ms);
+
+ if (rmi4_data->hw_if->ui_hw_init) {
+ retval = rmi4_data->hw_if->ui_hw_init(rmi4_data);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_rebuild_work(struct work_struct *work)
+{
+ int retval;
+ unsigned char attr_count;
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(delayed_work, struct synaptics_rmi4_data,
+ rb_work);
+
+ mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+ mutex_lock(&exp_data.mutex);
+
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->remove != NULL)
+ exp_fhandler->exp_fn->remove(rmi4_data);
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ synaptics_rmi4_free_fingers(rmi4_data);
+ synaptics_rmi4_empty_fn_list(rmi4_data);
+ input_unregister_device(rmi4_data->input_dev);
+ rmi4_data->input_dev = NULL;
+ if (rmi4_data->stylus_enable) {
+ input_unregister_device(rmi4_data->stylus_dev);
+ rmi4_data->stylus_dev = NULL;
+ }
+
+ retval = synaptics_rmi4_sw_reset(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = synaptics_rmi4_set_input_dev(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set up input device\n",
+ __func__);
+ goto exit;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ goto exit;
+ }
+ }
+
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->init != NULL)
+ exp_fhandler->exp_fn->init(rmi4_data);
+ }
+
+ retval = 0;
+
+exit:
+ synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+ mutex_unlock(&exp_data.mutex);
+
+ mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+
+ return;
+}
+
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+ bool rebuild)
+{
+ int retval;
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+ if (rebuild) {
+ queue_delayed_work(rmi4_data->rb_workqueue,
+ &rmi4_data->rb_work,
+ msecs_to_jiffies(REBUILD_WORK_DELAY_MS));
+ return 0;
+ }
+
+ mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+ retval = synaptics_rmi4_sw_reset(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ goto exit;
+ }
+
+ synaptics_rmi4_free_fingers(rmi4_data);
+
+ synaptics_rmi4_empty_fn_list(rmi4_data);
+
+ retval = synaptics_rmi4_query_device(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to query device\n",
+ __func__);
+ goto exit;
+ }
+
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->reset != NULL)
+ exp_fhandler->exp_fn->reset(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ retval = 0;
+
+exit:
+ synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+ mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+
+ return retval;
+}
+
+#ifdef FB_READY_RESET
+static void synaptics_rmi4_reset_work(struct work_struct *work)
+{
+ int retval;
+ unsigned int timeout;
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(work, struct synaptics_rmi4_data,
+ reset_work);
+
+ timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+ while (!rmi4_data->fb_ready) {
+ msleep(FB_READY_WAIT_MS);
+ timeout--;
+ if (timeout == 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for FB ready\n",
+ __func__);
+ return;
+ }
+ }
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ retval = synaptics_rmi4_reset_device(rmi4_data, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ }
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ return;
+}
+#endif
+
+static void synaptics_rmi4_sleep_enable(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval;
+ unsigned char device_ctrl;
+ unsigned char no_sleep_setting = rmi4_data->no_sleep_setting;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read device control\n",
+ __func__);
+ return;
+ }
+
+ device_ctrl = device_ctrl & ~MASK_3BIT;
+ if (enable)
+ device_ctrl = device_ctrl | NO_SLEEP_OFF | SENSOR_SLEEP;
+ else
+ device_ctrl = device_ctrl | no_sleep_setting | NORMAL_OPERATION;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write device control\n",
+ __func__);
+ return;
+ }
+
+ rmi4_data->sensor_sleep = enable;
+
+ return;
+}
+
+static void synaptics_rmi4_exp_fn_work(struct work_struct *work)
+{
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler_temp;
+ struct synaptics_rmi4_data *rmi4_data = exp_data.rmi4_data;
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+ mutex_lock(&rmi4_data->rmi4_reset_mutex);
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry_safe(exp_fhandler,
+ exp_fhandler_temp,
+ &exp_data.list,
+ link) {
+ if ((exp_fhandler->exp_fn->init != NULL) &&
+ exp_fhandler->insert) {
+ exp_fhandler->exp_fn->init(rmi4_data);
+ exp_fhandler->insert = false;
+ } else if ((exp_fhandler->exp_fn->remove != NULL) &&
+ exp_fhandler->remove) {
+ exp_fhandler->exp_fn->remove(rmi4_data);
+ list_del(&exp_fhandler->link);
+ kfree(exp_fhandler);
+ }
+ }
+ }
+ mutex_unlock(&exp_data.mutex);
+ mutex_unlock(&rmi4_data->rmi4_reset_mutex);
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ return;
+}
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn,
+ bool insert)
+{
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+ if (!exp_data.initialized) {
+ mutex_init(&exp_data.mutex);
+ INIT_LIST_HEAD(&exp_data.list);
+ exp_data.initialized = true;
+ }
+
+ mutex_lock(&exp_data.mutex);
+ if (insert) {
+ exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL);
+ if (!exp_fhandler) {
+ pr_err("%s: Failed to alloc mem for expansion function\n",
+ __func__);
+ goto exit;
+ }
+ exp_fhandler->exp_fn = exp_fn;
+ exp_fhandler->insert = true;
+ exp_fhandler->remove = false;
+ list_add_tail(&exp_fhandler->link, &exp_data.list);
+ } else if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+ if (exp_fhandler->exp_fn->fn_type == exp_fn->fn_type) {
+ exp_fhandler->insert = false;
+ exp_fhandler->remove = true;
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ mutex_unlock(&exp_data.mutex);
+
+ if (exp_data.queue_work) {
+ queue_delayed_work(exp_data.workqueue,
+ &exp_data.work,
+ msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+ }
+
+ return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_new_function);
+
+static int synaptics_dsx_pinctrl_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ /* Get pinctrl if target uses pinctrl */
+ rmi4_data->ts_pinctrl = devm_pinctrl_get((rmi4_data->pdev->dev.parent));
+ if (IS_ERR_OR_NULL(rmi4_data->ts_pinctrl)) {
+ retval = PTR_ERR(rmi4_data->ts_pinctrl);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "Target does not use pinctrl %d\n", retval);
+ goto err_pinctrl_get;
+ }
+
+ rmi4_data->pinctrl_state_active
+ = pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_active");
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_active)) {
+ retval = PTR_ERR(rmi4_data->pinctrl_state_active);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_ACTIVE, retval);
+ goto err_pinctrl_lookup;
+ }
+
+ rmi4_data->pinctrl_state_suspend
+ = pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_suspend");
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_suspend)) {
+ retval = PTR_ERR(rmi4_data->pinctrl_state_suspend);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_SUSPEND, retval);
+ goto err_pinctrl_lookup;
+ }
+
+ rmi4_data->pinctrl_state_release
+ = pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_release");
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+ retval = PTR_ERR(rmi4_data->pinctrl_state_release);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_RELEASE, retval);
+ }
+
+ return 0;
+
+err_pinctrl_lookup:
+ devm_pinctrl_put(rmi4_data->ts_pinctrl);
+err_pinctrl_get:
+ rmi4_data->ts_pinctrl = NULL;
+ return retval;
+}
+
+static int synaptics_rmi4_probe(struct platform_device *pdev)
+{
+ int retval;
+ unsigned char attr_count;
+ struct synaptics_rmi4_data *rmi4_data;
+ const struct synaptics_dsx_hw_interface *hw_if;
+ const struct synaptics_dsx_board_data *bdata;
+
+ hw_if = pdev->dev.platform_data;
+ if (!hw_if) {
+ dev_err(&pdev->dev,
+ "%s: No hardware interface found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ bdata = hw_if->board_data;
+ if (!bdata) {
+ dev_err(&pdev->dev,
+ "%s: No board data found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rmi4_data = kzalloc(sizeof(*rmi4_data), GFP_KERNEL);
+ if (!rmi4_data) {
+ dev_err(&pdev->dev,
+ "%s: Failed to alloc mem for rmi4_data\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ rmi4_data->pdev = pdev;
+ rmi4_data->current_page = MASK_8BIT;
+ rmi4_data->hw_if = hw_if;
+ rmi4_data->suspend = false;
+ rmi4_data->irq_enabled = false;
+ rmi4_data->fingers_on_2d = false;
+
+ rmi4_data->reset_device = synaptics_rmi4_reset_device;
+ rmi4_data->irq_enable = synaptics_rmi4_irq_enable;
+ rmi4_data->sleep_enable = synaptics_rmi4_sleep_enable;
+ rmi4_data->report_touch = synaptics_rmi4_report_touch;
+
+ mutex_init(&(rmi4_data->rmi4_reset_mutex));
+ mutex_init(&(rmi4_data->rmi4_report_mutex));
+ mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
+ mutex_init(&(rmi4_data->rmi4_exp_init_mutex));
+
+ platform_set_drvdata(pdev, rmi4_data);
+
+ vir_button_map = bdata->vir_button_map;
+
+ retval = synaptics_rmi4_get_reg(rmi4_data, true);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to get regulators\n",
+ __func__);
+ goto err_get_reg;
+ }
+
+ retval = synaptics_rmi4_enable_reg(rmi4_data, true);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to enable regulators\n",
+ __func__);
+ goto err_enable_reg;
+ }
+
+ retval = synaptics_dsx_pinctrl_init(rmi4_data);
+ if (!retval && rmi4_data->ts_pinctrl) {
+ /*
+ * Pinctrl handle is optional. If pinctrl handle is found
+ * let pins to be configured in active state. If not
+ * found continue further without error.
+ */
+ retval = pinctrl_select_state(rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_active);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to select %s pinstate %d\n",
+ __func__, PINCTRL_STATE_ACTIVE, retval);
+ }
+ }
+ retval = synaptics_rmi4_set_gpio(rmi4_data);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to set up GPIO's\n",
+ __func__);
+ goto err_set_gpio;
+ }
+
+ if (hw_if->ui_hw_init) {
+ retval = hw_if->ui_hw_init(rmi4_data);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to initialize hardware interface\n",
+ __func__);
+ goto err_ui_hw_init;
+ }
+ }
+
+ retval = synaptics_rmi4_set_input_dev(rmi4_data);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to set up input device\n",
+ __func__);
+ goto err_set_input_dev;
+ }
+
+#ifdef CONFIG_FB
+ INIT_WORK(&rmi4_data->fb_notify_work,
+ synaptics_rmi4_fb_notify_resume_work);
+ rmi4_data->fb_notifier.notifier_call = synaptics_rmi4_fb_notifier_cb;
+ retval = fb_register_client(&rmi4_data->fb_notifier);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to register fb notifier client\n",
+ __func__);
+ }
+#endif
+
+#ifdef USE_EARLYSUSPEND
+ rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend;
+ rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume;
+ register_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+ if (!exp_data.initialized) {
+ mutex_init(&exp_data.mutex);
+ INIT_LIST_HEAD(&exp_data.list);
+ exp_data.initialized = true;
+ }
+
+ rmi4_data->irq = gpio_to_irq(bdata->irq_gpio);
+
+ retval = synaptics_rmi4_irq_enable(rmi4_data, true, false);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to enable attention interrupt\n",
+ __func__);
+ goto err_enable_irq;
+ }
+
+ if (vir_button_map->nbuttons) {
+ rmi4_data->board_prop_dir = kobject_create_and_add(
+ "board_properties", NULL);
+ if (!rmi4_data->board_prop_dir) {
+ dev_err(&pdev->dev,
+ "%s: Failed to create board_properties directory\n",
+ __func__);
+ goto err_virtual_buttons;
+ } else {
+ retval = sysfs_create_file(rmi4_data->board_prop_dir,
+ &virtual_key_map_attr.attr);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to create virtual key map file\n",
+ __func__);
+ goto err_virtual_buttons;
+ }
+ }
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ goto err_sysfs;
+ }
+ }
+
+ rmi4_data->rb_workqueue =
+ create_singlethread_workqueue("dsx_rebuild_workqueue");
+ if (!rmi4_data->rb_workqueue) {
+ retval = -ENOMEM;
+ goto err_rb_workqueue;
+ }
+ INIT_DELAYED_WORK(&rmi4_data->rb_work, synaptics_rmi4_rebuild_work);
+
+ exp_data.workqueue = create_singlethread_workqueue("dsx_exp_workqueue");
+ if (!exp_data.workqueue) {
+ retval = -ENOMEM;
+ goto err_exp_data_workqueue;
+ }
+ INIT_DELAYED_WORK(&exp_data.work, synaptics_rmi4_exp_fn_work);
+ exp_data.rmi4_data = rmi4_data;
+ exp_data.queue_work = true;
+ queue_delayed_work(exp_data.workqueue, &exp_data.work, 0);
+
+#ifdef FB_READY_RESET
+ rmi4_data->reset_workqueue =
+ create_singlethread_workqueue("dsx_reset_workqueue");
+ if (!rmi4_data->reset_workqueue) {
+ retval = -ENOMEM;
+ goto err_reset_workqueue;
+ }
+ INIT_WORK(&rmi4_data->reset_work, synaptics_rmi4_reset_work);
+ queue_work(rmi4_data->reset_workqueue, &rmi4_data->reset_work);
+#endif
+
+ /* Initialize secure touch */
+ synaptics_secure_touch_init(rmi4_data);
+ synaptics_secure_touch_stop(rmi4_data, true);
+
+ return retval;
+
+#ifdef FB_READY_RESET
+err_reset_workqueue:
+#endif
+ cancel_delayed_work_sync(&exp_data.work);
+ flush_workqueue(exp_data.workqueue);
+ destroy_workqueue(exp_data.workqueue);
+
+err_exp_data_workqueue:
+ cancel_delayed_work_sync(&rmi4_data->rb_work);
+ flush_workqueue(rmi4_data->rb_workqueue);
+ destroy_workqueue(rmi4_data->rb_workqueue);
+
+err_rb_workqueue:
+err_sysfs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+err_virtual_buttons:
+ if (rmi4_data->board_prop_dir) {
+ sysfs_remove_file(rmi4_data->board_prop_dir,
+ &virtual_key_map_attr.attr);
+ kobject_put(rmi4_data->board_prop_dir);
+ }
+
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+err_enable_irq:
+#ifdef CONFIG_FB
+ fb_unregister_client(&rmi4_data->fb_notifier);
+#endif
+
+#ifdef USE_EARLYSUSPEND
+ unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+ synaptics_rmi4_empty_fn_list(rmi4_data);
+ input_unregister_device(rmi4_data->input_dev);
+ rmi4_data->input_dev = NULL;
+ if (rmi4_data->stylus_enable) {
+ input_unregister_device(rmi4_data->stylus_dev);
+ rmi4_data->stylus_dev = NULL;
+ }
+
+err_set_input_dev:
+ synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+ if (bdata->reset_gpio >= 0)
+ synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+ if (bdata->power_gpio >= 0)
+ synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_ui_hw_init:
+err_set_gpio:
+ synaptics_rmi4_enable_reg(rmi4_data, false);
+
+ if (rmi4_data->ts_pinctrl) {
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+ devm_pinctrl_put(rmi4_data->ts_pinctrl);
+ rmi4_data->ts_pinctrl = NULL;
+ } else {
+ retval = pinctrl_select_state(
+ rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_release);
+ if (retval)
+ dev_err(&pdev->dev,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ }
+ }
+
+err_enable_reg:
+ synaptics_rmi4_get_reg(rmi4_data, false);
+
+err_get_reg:
+ kfree(rmi4_data);
+
+ return retval;
+}
+
+static int synaptics_rmi4_remove(struct platform_device *pdev)
+{
+ unsigned char attr_count;
+ int err;
+ struct synaptics_rmi4_data *rmi4_data = platform_get_drvdata(pdev);
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+#ifdef FB_READY_RESET
+ cancel_work_sync(&rmi4_data->reset_work);
+ flush_workqueue(rmi4_data->reset_workqueue);
+ destroy_workqueue(rmi4_data->reset_workqueue);
+#endif
+
+ cancel_delayed_work_sync(&exp_data.work);
+ flush_workqueue(exp_data.workqueue);
+ destroy_workqueue(exp_data.workqueue);
+
+ cancel_delayed_work_sync(&rmi4_data->rb_work);
+ flush_workqueue(rmi4_data->rb_workqueue);
+ destroy_workqueue(rmi4_data->rb_workqueue);
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ if (rmi4_data->board_prop_dir) {
+ sysfs_remove_file(rmi4_data->board_prop_dir,
+ &virtual_key_map_attr.attr);
+ kobject_put(rmi4_data->board_prop_dir);
+ }
+
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+#ifdef CONFIG_FB
+ fb_unregister_client(&rmi4_data->fb_notifier);
+#endif
+
+#ifdef USE_EARLYSUSPEND
+ unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+ synaptics_rmi4_empty_fn_list(rmi4_data);
+ input_unregister_device(rmi4_data->input_dev);
+ rmi4_data->input_dev = NULL;
+ if (rmi4_data->stylus_enable) {
+ input_unregister_device(rmi4_data->stylus_dev);
+ rmi4_data->stylus_dev = NULL;
+ }
+
+ synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+ if (bdata->reset_gpio >= 0)
+ synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+ if (bdata->power_gpio >= 0)
+ synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+
+ if (rmi4_data->ts_pinctrl) {
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+ devm_pinctrl_put(rmi4_data->ts_pinctrl);
+ rmi4_data->ts_pinctrl = NULL;
+ } else {
+ err = pinctrl_select_state(
+ rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_release);
+ if (err)
+ dev_err(&pdev->dev,
+ "Failed to select release pinctrl state %d\n",
+ err);
+ }
+ }
+
+ synaptics_rmi4_enable_reg(rmi4_data, false);
+ synaptics_rmi4_get_reg(rmi4_data, false);
+
+ kfree(rmi4_data);
+
+ return 0;
+}
+
+static void synaptics_rmi4_f11_wg(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval;
+ unsigned char reporting_control;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F11)
+ break;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base,
+ &reporting_control,
+ sizeof(reporting_control));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to change reporting mode\n",
+ __func__);
+ return;
+ }
+
+ reporting_control = (reporting_control & ~MASK_3BIT);
+ if (enable)
+ reporting_control |= F11_WAKEUP_GESTURE_MODE;
+ else
+ reporting_control |= F11_CONTINUOUS_MODE;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ fhandler->full_addr.ctrl_base,
+ &reporting_control,
+ sizeof(reporting_control));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to change reporting mode\n",
+ __func__);
+ return;
+ }
+
+ return;
+}
+
+static void synaptics_rmi4_f12_wg(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval;
+ unsigned char offset;
+ unsigned char reporting_control[3];
+ struct synaptics_rmi4_f12_extra_data *extra_data;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F12)
+ break;
+ }
+
+ extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+ offset = extra_data->ctrl20_offset;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + offset,
+ reporting_control,
+ sizeof(reporting_control));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to change reporting mode\n",
+ __func__);
+ return;
+ }
+
+ if (enable)
+ reporting_control[2] = F12_WAKEUP_GESTURE_MODE;
+ else
+ reporting_control[2] = F12_CONTINUOUS_MODE;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ fhandler->full_addr.ctrl_base + offset,
+ reporting_control,
+ sizeof(reporting_control));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to change reporting mode\n",
+ __func__);
+ return;
+ }
+
+ return;
+}
+
+static void synaptics_rmi4_wakeup_gesture(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ if (rmi4_data->f11_wakeup_gesture)
+ synaptics_rmi4_f11_wg(rmi4_data, enable);
+ else if (rmi4_data->f12_wakeup_gesture)
+ synaptics_rmi4_f12_wg(rmi4_data, enable);
+
+ return;
+}
+
+#ifdef CONFIG_FB
+static void synaptics_rmi4_fb_notify_resume_work(struct work_struct *work)
+{
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(work, struct synaptics_rmi4_data, fb_notify_work);
+ synaptics_rmi4_resume(&(rmi4_data->input_dev->dev));
+ rmi4_data->fb_ready = true;
+}
+
+static int synaptics_rmi4_fb_notifier_cb(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ int *transition;
+ struct fb_event *evdata = data;
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(self, struct synaptics_rmi4_data,
+ fb_notifier);
+
+ if (evdata && evdata->data && rmi4_data) {
+ if (rmi4_data->hw_if->board_data->resume_in_workqueue) {
+ if (event == FB_EARLY_EVENT_BLANK) {
+ synaptics_secure_touch_stop(rmi4_data, false);
+ } else if (event == FB_EVENT_BLANK) {
+ transition = evdata->data;
+ if (*transition == FB_BLANK_POWERDOWN) {
+ flush_work(
+ &(rmi4_data->fb_notify_work));
+ synaptics_rmi4_suspend(
+ &rmi4_data->pdev->dev);
+ rmi4_data->fb_ready = false;
+ } else if (*transition == FB_BLANK_UNBLANK) {
+ schedule_work(
+ &(rmi4_data->fb_notify_work));
+ }
+ }
+ } else {
+ if (event == FB_EARLY_EVENT_BLANK) {
+ synaptics_secure_touch_stop(rmi4_data, false);
+ } else if (event == FB_EVENT_BLANK) {
+ transition = evdata->data;
+ if (*transition == FB_BLANK_POWERDOWN) {
+ synaptics_rmi4_suspend(
+ &rmi4_data->pdev->dev);
+ rmi4_data->fb_ready = false;
+ } else if (*transition == FB_BLANK_UNBLANK) {
+ synaptics_rmi4_resume(
+ &rmi4_data->pdev->dev);
+ rmi4_data->fb_ready = true;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static void synaptics_rmi4_early_suspend(struct early_suspend *h)
+{
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(h, struct synaptics_rmi4_data,
+ early_suspend);
+
+ if (rmi4_data->stay_awake)
+ return;
+
+ /*
+ * During early suspend/late resume, the driver doesn't access xPU/SMMU
+ * protected HW resources. So, there is no compelling need to block,
+ * but notifying the userspace that a power event has occurred is
+ * enough. Hence 'blocking' variable can be set to false.
+ */
+ synaptics_secure_touch_stop(rmi4_data, false);
+
+ if (rmi4_data->enable_wakeup_gesture) {
+ synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+ enable_irq_wake(rmi4_data->irq);
+ goto exit;
+ }
+
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+ synaptics_rmi4_sleep_enable(rmi4_data, true);
+ synaptics_rmi4_free_fingers(rmi4_data);
+
+exit:
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->early_suspend != NULL)
+ exp_fhandler->exp_fn->early_suspend(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ rmi4_data->suspend = true;
+
+ return;
+}
+
+static void synaptics_rmi4_late_resume(struct early_suspend *h)
+{
+#ifdef FB_READY_RESET
+ int retval;
+#endif
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(h, struct synaptics_rmi4_data,
+ early_suspend);
+
+ if (rmi4_data->stay_awake)
+ return;
+
+ synaptics_secure_touch_stop(rmi4_data, false);
+
+ if (rmi4_data->enable_wakeup_gesture) {
+ synaptics_rmi4_wakeup_gesture(rmi4_data, false);
+ disable_irq_wake(rmi4_data->irq);
+ goto exit;
+ }
+
+ rmi4_data->current_page = MASK_8BIT;
+
+ if (rmi4_data->suspend) {
+ synaptics_rmi4_sleep_enable(rmi4_data, false);
+ synaptics_rmi4_irq_enable(rmi4_data, true, false);
+ }
+
+exit:
+#ifdef FB_READY_RESET
+ if (rmi4_data->suspend) {
+ retval = synaptics_rmi4_reset_device(rmi4_data, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ }
+ }
+#endif
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->late_resume != NULL)
+ exp_fhandler->exp_fn->late_resume(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ rmi4_data->suspend = false;
+
+ return;
+}
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev)
+{
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ int retval;
+
+ if (rmi4_data->stay_awake)
+ return 0;
+
+ synaptics_secure_touch_stop(rmi4_data, true);
+
+ if (rmi4_data->enable_wakeup_gesture) {
+ synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+ enable_irq_wake(rmi4_data->irq);
+ goto exit;
+ }
+
+ if (!rmi4_data->suspend) {
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+ synaptics_rmi4_sleep_enable(rmi4_data, true);
+ synaptics_rmi4_free_fingers(rmi4_data);
+ }
+
+ if (rmi4_data->ts_pinctrl) {
+ retval = pinctrl_select_state(rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_suspend);
+ if (retval < 0)
+ dev_err(dev, "Cannot get idle pinctrl state\n");
+ goto err_pinctrl;
+ }
+exit:
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->suspend != NULL)
+ exp_fhandler->exp_fn->suspend(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ if (!rmi4_data->suspend) {
+ synaptics_rmi4_enable_reg(rmi4_data, false);
+ synaptics_rmi4_get_reg(rmi4_data, false);
+ }
+ rmi4_data->suspend = true;
+
+ return 0;
+
+err_pinctrl:
+ synaptics_rmi4_sleep_enable(rmi4_data, false);
+ synaptics_rmi4_irq_enable(rmi4_data, true, false);
+ return retval;
+
+}
+
+static int synaptics_rmi4_resume(struct device *dev)
+{
+#ifdef FB_READY_RESET
+ int retval;
+#endif
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ if (rmi4_data->stay_awake)
+ return 0;
+
+ synaptics_secure_touch_stop(rmi4_data, true);
+
+ if (rmi4_data->enable_wakeup_gesture) {
+ synaptics_rmi4_wakeup_gesture(rmi4_data, false);
+ disable_irq_wake(rmi4_data->irq);
+ goto exit;
+ }
+
+ rmi4_data->current_page = MASK_8BIT;
+
+ if (rmi4_data->suspend) {
+ synaptics_rmi4_get_reg(rmi4_data, true);
+ synaptics_rmi4_enable_reg(rmi4_data, true);
+ }
+
+ synaptics_rmi4_sleep_enable(rmi4_data, false);
+ synaptics_rmi4_irq_enable(rmi4_data, true, false);
+ if (rmi4_data->ts_pinctrl) {
+ retval = pinctrl_select_state(rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_active);
+ if (retval < 0)
+ dev_err(dev, "Cannot get default pinctrl state\n");
+ }
+
+exit:
+#ifdef FB_READY_RESET
+ retval = synaptics_rmi4_reset_device(rmi4_data, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ }
+#endif
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->resume != NULL)
+ exp_fhandler->exp_fn->resume(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ rmi4_data->suspend = false;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
+#ifndef CONFIG_FB
+ .suspend = synaptics_rmi4_suspend,
+ .resume = synaptics_rmi4_resume,
+#endif
+};
+#endif
+
+static struct platform_driver synaptics_rmi4_driver = {
+ .driver = {
+ .name = PLATFORM_DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &synaptics_rmi4_dev_pm_ops,
+#endif
+ },
+ .probe = synaptics_rmi4_probe,
+ .remove = synaptics_rmi4_remove,
+};
+
+static int __init synaptics_rmi4_init(void)
+{
+ int retval;
+
+ retval = synaptics_rmi4_bus_init_v26();
+ if (retval)
+ return retval;
+
+ return platform_driver_register(&synaptics_rmi4_driver);
+}
+
+static void __exit synaptics_rmi4_exit(void)
+{
+ platform_driver_unregister(&synaptics_rmi4_driver);
+
+ synaptics_rmi4_bus_exit_v26();
+
+ return;
+}
+
+module_init(synaptics_rmi4_init);
+module_exit(synaptics_rmi4_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Touch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h
new file mode 100644
index 0000000..39fec9a
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h
@@ -0,0 +1,501 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_RMI4_H_
+#define _SYNAPTICS_DSX_RMI4_H_
+
+#define SYNAPTICS_DS4 (1 << 0)
+#define SYNAPTICS_DS5 (1 << 1)
+#define SYNAPTICS_DSX_DRIVER_PRODUCT (SYNAPTICS_DS4 | SYNAPTICS_DS5)
+#define SYNAPTICS_DSX_DRIVER_VERSION 0x2061
+
+#include <linux/version.h>
+#ifdef CONFIG_FB
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
+#include <linux/completion.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
+#define KERNEL_ABOVE_2_6_38
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define KERNEL_ABOVE_3_6
+#endif
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define sstrtoul(...) kstrtoul(__VA_ARGS__)
+#else
+#define sstrtoul(...) strict_strtoul(__VA_ARGS__)
+#endif
+
+#define PDT_PROPS (0X00EF)
+#define PDT_START (0x00E9)
+#define PDT_END (0x00D0)
+#define PDT_ENTRY_SIZE (0x0006)
+#define PAGES_TO_SERVICE (10)
+#define PAGE_SELECT_LEN (2)
+#define ADDRESS_WORD_LEN (2)
+
+#define SYNAPTICS_RMI4_F01 (0x01)
+#define SYNAPTICS_RMI4_F11 (0x11)
+#define SYNAPTICS_RMI4_F12 (0x12)
+#define SYNAPTICS_RMI4_F1A (0x1A)
+#define SYNAPTICS_RMI4_F34 (0x34)
+#define SYNAPTICS_RMI4_F35 (0x35)
+#define SYNAPTICS_RMI4_F38 (0x38)
+#define SYNAPTICS_RMI4_F51 (0x51)
+#define SYNAPTICS_RMI4_F54 (0x54)
+#define SYNAPTICS_RMI4_F55 (0x55)
+#define SYNAPTICS_RMI4_FDB (0xDB)
+
+#define PRODUCT_INFO_SIZE 2
+#define PRODUCT_ID_SIZE 10
+#define BUILD_ID_SIZE 3
+
+#define F12_FINGERS_TO_SUPPORT 10
+#define F12_NO_OBJECT_STATUS 0x00
+#define F12_FINGER_STATUS 0x01
+#define F12_ACTIVE_STYLUS_STATUS 0x02
+#define F12_PALM_STATUS 0x03
+#define F12_HOVERING_FINGER_STATUS 0x05
+#define F12_GLOVED_FINGER_STATUS 0x06
+#define F12_NARROW_OBJECT_STATUS 0x07
+#define F12_HAND_EDGE_STATUS 0x08
+#define F12_COVER_STATUS 0x0A
+#define F12_STYLUS_STATUS 0x0B
+#define F12_ERASER_STATUS 0x0C
+#define F12_SMALL_OBJECT_STATUS 0x0D
+
+#define F12_GESTURE_DETECTION_LEN 5
+
+#define MAX_NUMBER_OF_BUTTONS 4
+#define MAX_INTR_REGISTERS 4
+
+#define MASK_16BIT 0xFFFF
+#define MASK_8BIT 0xFF
+#define MASK_7BIT 0x7F
+#define MASK_6BIT 0x3F
+#define MASK_5BIT 0x1F
+#define MASK_4BIT 0x0F
+#define MASK_3BIT 0x07
+#define MASK_2BIT 0x03
+#define MASK_1BIT 0x01
+
+#define PINCTRL_STATE_ACTIVE "pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND "pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE "pmx_ts_release"
+enum exp_fn {
+ RMI_DEV = 0,
+ RMI_FW_UPDATER,
+ RMI_TEST_REPORTING,
+ RMI_PROXIMITY,
+ RMI_ACTIVE_PEN,
+ RMI_GESTURE,
+ RMI_VIDEO,
+ RMI_DEBUG,
+ RMI_LAST,
+};
+
+/*
+ * struct synaptics_rmi4_fn_desc - function descriptor fields in PDT entry
+ * @query_base_addr: base address for query registers
+ * @cmd_base_addr: base address for command registers
+ * @ctrl_base_addr: base address for control registers
+ * @data_base_addr: base address for data registers
+ * @intr_src_count: number of interrupt sources
+ * @fn_version: version of function
+ * @fn_number: function number
+ */
+struct synaptics_rmi4_fn_desc {
+ union {
+ struct {
+ unsigned char query_base_addr;
+ unsigned char cmd_base_addr;
+ unsigned char ctrl_base_addr;
+ unsigned char data_base_addr;
+ unsigned char intr_src_count:3;
+ unsigned char reserved_1:2;
+ unsigned char fn_version:2;
+ unsigned char reserved_2:1;
+ unsigned char fn_number;
+ } __packed;
+ unsigned char data[6];
+ };
+};
+
+/*
+ * synaptics_rmi4_fn_full_addr - full 16-bit base addresses
+ * @query_base: 16-bit base address for query registers
+ * @cmd_base: 16-bit base address for command registers
+ * @ctrl_base: 16-bit base address for control registers
+ * @data_base: 16-bit base address for data registers
+ */
+struct synaptics_rmi4_fn_full_addr {
+ unsigned short query_base;
+ unsigned short cmd_base;
+ unsigned short ctrl_base;
+ unsigned short data_base;
+};
+
+/*
+ * struct synaptics_rmi4_f11_extra_data - extra data of F$11
+ * @data38_offset: offset to F11_2D_DATA38 register
+ */
+struct synaptics_rmi4_f11_extra_data {
+ unsigned char data38_offset;
+};
+
+/*
+ * struct synaptics_rmi4_f12_extra_data - extra data of F$12
+ * @data1_offset: offset to F12_2D_DATA01 register
+ * @data4_offset: offset to F12_2D_DATA04 register
+ * @data15_offset: offset to F12_2D_DATA15 register
+ * @data15_size: size of F12_2D_DATA15 register
+ * @data15_data: buffer for reading F12_2D_DATA15 register
+ * @data23_offset: offset to F12_2D_DATA23 register
+ * @data23_size: size of F12_2D_DATA23 register
+ * @data23_data: buffer for reading F12_2D_DATA23 register
+ * @ctrl20_offset: offset to F12_2D_CTRL20 register
+ */
+struct synaptics_rmi4_f12_extra_data {
+ unsigned char data1_offset;
+ unsigned char data4_offset;
+ unsigned char data15_offset;
+ unsigned char data15_size;
+ unsigned char data15_data[(F12_FINGERS_TO_SUPPORT + 7) / 8];
+ unsigned char data23_offset;
+ unsigned char data23_size;
+ unsigned char data23_data[F12_FINGERS_TO_SUPPORT];
+ unsigned char ctrl20_offset;
+};
+
+/*
+ * struct synaptics_rmi4_fn - RMI function handler
+ * @fn_number: function number
+ * @num_of_data_sources: number of data sources
+ * @num_of_data_points: maximum number of fingers supported
+ * @intr_reg_num: index to associated interrupt register
+ * @intr_mask: interrupt mask
+ * @full_addr: full 16-bit base addresses of function registers
+ * @link: linked list for function handlers
+ * @data_size: size of private data
+ * @data: pointer to private data
+ * @extra: pointer to extra data
+ */
+struct synaptics_rmi4_fn {
+ unsigned char fn_number;
+ unsigned char num_of_data_sources;
+ unsigned char num_of_data_points;
+ unsigned char intr_reg_num;
+ unsigned char intr_mask;
+ struct synaptics_rmi4_fn_full_addr full_addr;
+ struct list_head link;
+ int data_size;
+ void *data;
+ void *extra;
+};
+
+/*
+ * struct synaptics_rmi4_device_info - device information
+ * @version_major: RMI protocol major version number
+ * @version_minor: RMI protocol minor version number
+ * @manufacturer_id: manufacturer ID
+ * @product_props: product properties
+ * @product_info: product information
+ * @product_id_string: product ID
+ * @build_id: firmware build ID
+ * @support_fn_list: linked list for function handlers
+ */
+struct synaptics_rmi4_device_info {
+ unsigned int version_major;
+ unsigned int version_minor;
+ unsigned char manufacturer_id;
+ unsigned char product_props;
+ unsigned char product_info[PRODUCT_INFO_SIZE];
+ unsigned char product_id_string[PRODUCT_ID_SIZE + 1];
+ unsigned char build_id[BUILD_ID_SIZE];
+ struct list_head support_fn_list;
+};
+
+/*
+ * struct synaptics_rmi4_data - RMI4 device instance data
+ * @pdev: pointer to platform device
+ * @input_dev: pointer to associated input device
+ * @stylus_dev: pointer to associated stylus device
+ * @hw_if: pointer to hardware interface data
+ * @rmi4_mod_info: device information
+ * @board_prop_dir: /sys/board_properties directory for virtual key map file
+ * @pwr_reg: pointer to regulator for power control
+ * @bus_reg: pointer to regulator for bus pullup control
+ * @rmi4_reset_mutex: mutex for software reset
+ * @rmi4_report_mutex: mutex for input event reporting
+ * @rmi4_io_ctrl_mutex: mutex for communication interface I/O
+ * @rmi4_exp_init_mutex: mutex for expansion function module initialization
+ * @rb_work: work for rebuilding input device
+ * @rb_workqueue: workqueue for rebuilding input device
+ * @fb_notifier: framebuffer notifier client
+ * @reset_work: work for issuing reset after display framebuffer ready
+ * @reset_workqueue: workqueue for issuing reset after display framebuffer ready
+ * @early_suspend: early suspend power management
+ * @current_page: current RMI page for register access
+ * @button_0d_enabled: switch for enabling 0d button support
+ * @num_of_tx: number of Tx channels for 2D touch
+ * @num_of_rx: number of Rx channels for 2D touch
+ * @num_of_fingers: maximum number of fingers for 2D touch
+ * @max_touch_width: maximum touch width
+ * @report_enable: input data to report for F$12
+ * @no_sleep_setting: default setting of NoSleep in F01_RMI_CTRL00 register
+ * @gesture_detection: detected gesture type and properties
+ * @intr_mask: interrupt enable mask
+ * @button_txrx_mapping: Tx Rx mapping of 0D buttons
+ * @num_of_intr_regs: number of interrupt registers
+ * @f01_query_base_addr: query base address for f$01
+ * @f01_cmd_base_addr: command base address for f$01
+ * @f01_ctrl_base_addr: control base address for f$01
+ * @f01_data_base_addr: data base address for f$01
+ * @firmware_id: firmware build ID
+ * @irq: attention interrupt
+ * @sensor_max_x: maximum x coordinate for 2D touch
+ * @sensor_max_y: maximum y coordinate for 2D touch
+ * @flash_prog_mode: flag to indicate flash programming mode status
+ * @irq_enabled: flag to indicate attention interrupt enable status
+ * @fingers_on_2d: flag to indicate presence of fingers in 2D area
+ * @suspend: flag to indicate whether in suspend state
+ * @sensor_sleep: flag to indicate sleep state of sensor
+ * @stay_awake: flag to indicate whether to stay awake during suspend
+ * @fb_ready: flag to indicate whether display framebuffer in ready state
+ * @f11_wakeup_gesture: flag to indicate support for wakeup gestures in F$11
+ * @f12_wakeup_gesture: flag to indicate support for wakeup gestures in F$12
+ * @enable_wakeup_gesture: flag to indicate usage of wakeup gestures
+ * @wedge_sensor: flag to indicate use of wedge sensor
+ * @report_pressure: flag to indicate reporting of pressure data
+ * @stylus_enable: flag to indicate reporting of stylus data
+ * @eraser_enable: flag to indicate reporting of eraser data
+ * @external_afe_buttons: flag to indicate presence of external AFE buttons
+ * @reset_device: pointer to device reset function
+ * @irq_enable: pointer to interrupt enable function
+ * @sleep_enable: pointer to sleep enable function
+ * @report_touch: pointer to touch reporting function
+ */
+struct synaptics_rmi4_data {
+ struct platform_device *pdev;
+ struct input_dev *input_dev;
+ struct input_dev *stylus_dev;
+ const struct synaptics_dsx_hw_interface *hw_if;
+ struct synaptics_rmi4_device_info rmi4_mod_info;
+ struct kobject *board_prop_dir;
+ struct regulator *pwr_reg;
+ struct regulator *bus_reg;
+ struct mutex rmi4_reset_mutex;
+ struct mutex rmi4_report_mutex;
+ struct mutex rmi4_io_ctrl_mutex;
+ struct mutex rmi4_exp_init_mutex;
+ struct delayed_work rb_work;
+ struct workqueue_struct *rb_workqueue;
+#ifdef CONFIG_FB
+ struct work_struct fb_notify_work;
+ struct notifier_block fb_notifier;
+ struct work_struct reset_work;
+ struct workqueue_struct *reset_workqueue;
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ unsigned char current_page;
+ unsigned char button_0d_enabled;
+ unsigned char num_of_tx;
+ unsigned char num_of_rx;
+ unsigned char num_of_fingers;
+ unsigned char max_touch_width;
+ unsigned char report_enable;
+ unsigned char no_sleep_setting;
+ unsigned char gesture_detection[F12_GESTURE_DETECTION_LEN];
+ unsigned char intr_mask[MAX_INTR_REGISTERS];
+ unsigned char *button_txrx_mapping;
+ unsigned short num_of_intr_regs;
+ unsigned short f01_query_base_addr;
+ unsigned short f01_cmd_base_addr;
+ unsigned short f01_ctrl_base_addr;
+ unsigned short f01_data_base_addr;
+ unsigned int firmware_id;
+ int irq;
+ int sensor_max_x;
+ int sensor_max_y;
+ bool flash_prog_mode;
+ bool irq_enabled;
+ bool fingers_on_2d;
+ bool suspend;
+ bool sensor_sleep;
+ bool stay_awake;
+ bool fb_ready;
+ bool f11_wakeup_gesture;
+ bool f12_wakeup_gesture;
+ bool enable_wakeup_gesture;
+ bool wedge_sensor;
+ bool report_pressure;
+ bool stylus_enable;
+ bool eraser_enable;
+ bool external_afe_buttons;
+ int (*reset_device)(struct synaptics_rmi4_data *rmi4_data,
+ bool rebuild);
+ int (*irq_enable)(struct synaptics_rmi4_data *rmi4_data, bool enable,
+ bool attn_only);
+ void (*sleep_enable)(struct synaptics_rmi4_data *rmi4_data,
+ bool enable);
+ void (*report_touch)(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler);
+ struct pinctrl *ts_pinctrl;
+ struct pinctrl_state *pinctrl_state_active;
+ struct pinctrl_state *pinctrl_state_suspend;
+ struct pinctrl_state *pinctrl_state_release;
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
+ atomic_t st_enabled;
+ atomic_t st_pending_irqs;
+ struct completion st_powerdown;
+ struct completion st_irq_processed;
+ bool st_initialized;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+#endif
+};
+
+struct synaptics_dsx_bus_access {
+ unsigned char type;
+ int (*read)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+ unsigned char *data, unsigned short length);
+ int (*write)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+ unsigned char *data, unsigned short length);
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
+ int (*get)(struct synaptics_rmi4_data *rmi4_data);
+ void (*put)(struct synaptics_rmi4_data *rmi4_data);
+#endif
+};
+
+struct synaptics_dsx_hw_interface {
+ struct synaptics_dsx_board_data *board_data;
+ const struct synaptics_dsx_bus_access *bus_access;
+ int (*bl_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+ int (*ui_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+};
+
+struct synaptics_rmi4_exp_fn {
+ enum exp_fn fn_type;
+ int (*init)(struct synaptics_rmi4_data *rmi4_data);
+ void (*remove)(struct synaptics_rmi4_data *rmi4_data);
+ void (*reset)(struct synaptics_rmi4_data *rmi4_data);
+ void (*reinit)(struct synaptics_rmi4_data *rmi4_data);
+ void (*early_suspend)(struct synaptics_rmi4_data *rmi4_data);
+ void (*suspend)(struct synaptics_rmi4_data *rmi4_data);
+ void (*resume)(struct synaptics_rmi4_data *rmi4_data);
+ void (*late_resume)(struct synaptics_rmi4_data *rmi4_data);
+ void (*attn)(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask);
+};
+
+int synaptics_rmi4_bus_init_v26(void);
+
+void synaptics_rmi4_bus_exit_v26(void);
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn_module,
+ bool insert);
+
+int synaptics_fw_updater(const unsigned char *fw_data);
+
+static inline int synaptics_rmi4_reg_read(
+ struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr,
+ unsigned char *data,
+ unsigned short len)
+{
+ return rmi4_data->hw_if->bus_access->read(rmi4_data, addr, data, len);
+}
+
+static inline int synaptics_rmi4_reg_write(
+ struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr,
+ unsigned char *data,
+ unsigned short len)
+{
+ return rmi4_data->hw_if->bus_access->write(rmi4_data, addr, data, len);
+}
+
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
+static inline int synaptics_rmi4_bus_get(struct synaptics_rmi4_data *rmi4_data)
+{
+ return rmi4_data->hw_if->bus_access->get(rmi4_data);
+}
+static inline void synaptics_rmi4_bus_put(struct synaptics_rmi4_data *rmi4_data)
+{
+ rmi4_data->hw_if->bus_access->put(rmi4_data);
+}
+#endif
+
+static inline int secure_memcpy(unsigned char *dest, unsigned int dest_size,
+ const unsigned char *src, unsigned int src_size,
+ unsigned int count)
+{
+ if (dest == NULL || src == NULL)
+ return -EINVAL;
+
+ if (count > dest_size || count > src_size)
+ return -EINVAL;
+
+ memcpy((void *)dest, (const void *)src, count);
+
+ return 0;
+}
+
+static inline void batohs(unsigned short *dest, unsigned char *src)
+{
+ *dest = src[1] * 0x100 + src[0];
+}
+
+static inline void hstoba(unsigned char *dest, unsigned short src)
+{
+ dest[0] = src % 0x100;
+ dest[1] = src / 0x100;
+}
+
+#endif
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c
new file mode 100644
index 0000000..344f4c3
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c
@@ -0,0 +1,4480 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define FW_IMAGE_NAME "synaptics/startup_fw_update.img"
+/*
+#define DO_STARTUP_FW_UPDATE
+*/
+/*
+#ifdef DO_STARTUP_FW_UPDATE
+#ifdef CONFIG_FB
+#define WAIT_FOR_FB_READY
+#define FB_READY_WAIT_MS 100
+#define FB_READY_TIMEOUT_S 30
+#endif
+#endif
+*/
+#define FORCE_UPDATE false
+#define DO_LOCKDOWN false
+
+#define MAX_IMAGE_NAME_LEN 256
+#define MAX_FIRMWARE_ID_LEN 10
+
+#define IMAGE_HEADER_VERSION_05 0x05
+#define IMAGE_HEADER_VERSION_06 0x06
+#define IMAGE_HEADER_VERSION_10 0x10
+
+#define IMAGE_AREA_OFFSET 0x100
+#define LOCKDOWN_SIZE 0x50
+
+#define V5V6_BOOTLOADER_ID_OFFSET 0
+#define V5V6_CONFIG_ID_SIZE 4
+
+#define V5_PROPERTIES_OFFSET 2
+#define V5_BLOCK_SIZE_OFFSET 3
+#define V5_BLOCK_COUNT_OFFSET 5
+#define V5_BLOCK_NUMBER_OFFSET 0
+#define V5_BLOCK_DATA_OFFSET 2
+
+#define V6_PROPERTIES_OFFSET 1
+#define V6_BLOCK_SIZE_OFFSET 2
+#define V6_BLOCK_COUNT_OFFSET 3
+#define V6_PROPERTIES_2_OFFSET 4
+#define V6_GUEST_CODE_BLOCK_COUNT_OFFSET 5
+#define V6_BLOCK_NUMBER_OFFSET 0
+#define V6_BLOCK_DATA_OFFSET 1
+#define V6_FLASH_COMMAND_OFFSET 2
+#define V6_FLASH_STATUS_OFFSET 3
+
+#define V7_CONFIG_ID_SIZE 32
+
+#define V7_FLASH_STATUS_OFFSET 0
+#define V7_PARTITION_ID_OFFSET 1
+#define V7_BLOCK_NUMBER_OFFSET 2
+#define V7_TRANSFER_LENGTH_OFFSET 3
+#define V7_COMMAND_OFFSET 4
+#define V7_PAYLOAD_OFFSET 5
+
+#define V7_PARTITION_SUPPORT_BYTES 4
+
+#define F35_ERROR_CODE_OFFSET 0
+#define F35_CHUNK_NUM_LSB_OFFSET 0
+#define F35_CHUNK_NUM_MSB_OFFSET 1
+#define F35_CHUNK_DATA_OFFSET 2
+#define F35_CHUNK_COMMAND_OFFSET 18
+
+#define F35_CHUNK_SIZE 16
+#define F35_ERASE_ALL_WAIT_MS 3000
+#define F35_RESET_WAIT_MS 250
+
+#define SLEEP_MODE_NORMAL (0x00)
+#define SLEEP_MODE_SENSOR_SLEEP (0x01)
+#define SLEEP_MODE_RESERVED0 (0x02)
+#define SLEEP_MODE_RESERVED1 (0x03)
+
+#define ENABLE_WAIT_MS (1 * 1000)
+#define WRITE_WAIT_MS (3 * 1000)
+#define ERASE_WAIT_MS (5 * 1000)
+
+#define MIN_SLEEP_TIME_US 50
+#define MAX_SLEEP_TIME_US 100
+
+#define INT_DISABLE_WAIT_MS 20
+#define ENTER_FLASH_PROG_WAIT_MS 20
+
+static int fwu_do_reflash(void);
+
+static int fwu_recovery_check_status(void);
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+#endif
+
+enum f34_version {
+ F34_V0 = 0,
+ F34_V1,
+ F34_V2,
+};
+
+enum bl_version {
+ BL_V5 = 5,
+ BL_V6 = 6,
+ BL_V7 = 7,
+ BL_V8 = 8,
+};
+
+enum flash_area {
+ NONE = 0,
+ UI_FIRMWARE,
+ UI_CONFIG,
+};
+
+enum update_mode {
+ NORMAL = 1,
+ FORCE = 2,
+ LOCKDOWN = 8,
+};
+
+enum config_area {
+ UI_CONFIG_AREA = 0,
+ PM_CONFIG_AREA,
+ BL_CONFIG_AREA,
+ DP_CONFIG_AREA,
+ FLASH_CONFIG_AREA,
+};
+
+enum v7_status {
+ SUCCESS = 0x00,
+ DEVICE_NOT_IN_BOOTLOADER_MODE,
+ INVALID_PARTITION,
+ INVALID_COMMAND,
+ INVALID_BLOCK_OFFSET,
+ INVALID_TRANSFER,
+ NOT_ERASED,
+ FLASH_PROGRAMMING_KEY_INCORRECT,
+ BAD_PARTITION_TABLE,
+ CHECKSUM_FAILED,
+ FLASH_HARDWARE_FAILURE = 0x1f,
+};
+
+enum v7_partition_id {
+ BOOTLOADER_PARTITION = 0x01,
+ DEVICE_CONFIG_PARTITION,
+ FLASH_CONFIG_PARTITION,
+ MANUFACTURING_BLOCK_PARTITION,
+ GUEST_SERIALIZATION_PARTITION,
+ GLOBAL_PARAMETERS_PARTITION,
+ CORE_CODE_PARTITION,
+ CORE_CONFIG_PARTITION,
+ GUEST_CODE_PARTITION,
+ DISPLAY_CONFIG_PARTITION,
+};
+
+enum v7_flash_command {
+ CMD_V7_IDLE = 0x00,
+ CMD_V7_ENTER_BL,
+ CMD_V7_READ,
+ CMD_V7_WRITE,
+ CMD_V7_ERASE,
+ CMD_V7_ERASE_AP,
+ CMD_V7_SENSOR_ID,
+};
+
+enum v5v6_flash_command {
+ CMD_V5V6_IDLE = 0x0,
+ CMD_V5V6_WRITE_FW = 0x2,
+ CMD_V5V6_ERASE_ALL = 0x3,
+ CMD_V5V6_WRITE_LOCKDOWN = 0x4,
+ CMD_V5V6_READ_CONFIG = 0x5,
+ CMD_V5V6_WRITE_CONFIG = 0x6,
+ CMD_V5V6_ERASE_UI_CONFIG = 0x7,
+ CMD_V5V6_ERASE_BL_CONFIG = 0x9,
+ CMD_V5V6_ERASE_DISP_CONFIG = 0xa,
+ CMD_V5V6_ERASE_GUEST_CODE = 0xb,
+ CMD_V5V6_WRITE_GUEST_CODE = 0xc,
+ CMD_V5V6_ENABLE_FLASH_PROG = 0xf,
+};
+
+enum flash_command {
+ CMD_IDLE = 0,
+ CMD_WRITE_FW,
+ CMD_WRITE_CONFIG,
+ CMD_WRITE_LOCKDOWN,
+ CMD_WRITE_GUEST_CODE,
+ CMD_READ_CONFIG,
+ CMD_ERASE_ALL,
+ CMD_ERASE_UI_FIRMWARE,
+ CMD_ERASE_UI_CONFIG,
+ CMD_ERASE_BL_CONFIG,
+ CMD_ERASE_DISP_CONFIG,
+ CMD_ERASE_FLASH_CONFIG,
+ CMD_ERASE_GUEST_CODE,
+ CMD_ENABLE_FLASH_PROG,
+};
+
+enum f35_flash_command {
+ CMD_F35_IDLE = 0x0,
+ CMD_F35_RESERVED = 0x1,
+ CMD_F35_WRITE_CHUNK = 0x2,
+ CMD_F35_ERASE_ALL = 0x3,
+ CMD_F35_RESET = 0x10,
+};
+
+enum container_id {
+ TOP_LEVEL_CONTAINER = 0,
+ UI_CONTAINER,
+ UI_CONFIG_CONTAINER,
+ BL_CONTAINER,
+ BL_IMAGE_CONTAINER,
+ BL_CONFIG_CONTAINER,
+ BL_LOCKDOWN_INFO_CONTAINER,
+ PERMANENT_CONFIG_CONTAINER,
+ GUEST_CODE_CONTAINER,
+ BL_PROTOCOL_DESCRIPTOR_CONTAINER,
+ UI_PROTOCOL_DESCRIPTOR_CONTAINER,
+ RMI_SELF_DISCOVERY_CONTAINER,
+ RMI_PAGE_CONTENT_CONTAINER,
+ GENERAL_INFORMATION_CONTAINER,
+ DEVICE_CONFIG_CONTAINER,
+ FLASH_CONFIG_CONTAINER,
+ GUEST_SERIALIZATION_CONTAINER,
+ GLOBAL_PARAMETERS_CONTAINER,
+ CORE_CODE_CONTAINER,
+ CORE_CONFIG_CONTAINER,
+ DISPLAY_CONFIG_CONTAINER,
+};
+
+struct pdt_properties {
+ union {
+ struct {
+ unsigned char reserved_1:6;
+ unsigned char has_bsr:1;
+ unsigned char reserved_2:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct partition_table {
+ unsigned char partition_id:5;
+ unsigned char byte_0_reserved:3;
+ unsigned char byte_1_reserved;
+ unsigned char partition_length_7_0;
+ unsigned char partition_length_15_8;
+ unsigned char start_physical_address_7_0;
+ unsigned char start_physical_address_15_8;
+ unsigned char partition_properties_7_0;
+ unsigned char partition_properties_15_8;
+} __packed;
+
+struct f01_device_control {
+ union {
+ struct {
+ unsigned char sleep_mode:2;
+ unsigned char nosleep:1;
+ unsigned char reserved:2;
+ unsigned char charger_connected:1;
+ unsigned char report_rate:1;
+ unsigned char configured:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f34_v7_query_0 {
+ union {
+ struct {
+ unsigned char subpacket_1_size:3;
+ unsigned char has_config_id:1;
+ unsigned char f34_query0_b4:1;
+ unsigned char has_thqa:1;
+ unsigned char f34_query0_b6__7:2;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f34_v7_query_1_7 {
+ union {
+ struct {
+ /* query 1 */
+ unsigned char bl_minor_revision;
+ unsigned char bl_major_revision;
+
+ /* query 2 */
+ unsigned char bl_fw_id_7_0;
+ unsigned char bl_fw_id_15_8;
+ unsigned char bl_fw_id_23_16;
+ unsigned char bl_fw_id_31_24;
+
+ /* query 3 */
+ unsigned char minimum_write_size;
+ unsigned char block_size_7_0;
+ unsigned char block_size_15_8;
+ unsigned char flash_page_size_7_0;
+ unsigned char flash_page_size_15_8;
+
+ /* query 4 */
+ unsigned char adjustable_partition_area_size_7_0;
+ unsigned char adjustable_partition_area_size_15_8;
+
+ /* query 5 */
+ unsigned char flash_config_length_7_0;
+ unsigned char flash_config_length_15_8;
+
+ /* query 6 */
+ unsigned char payload_length_7_0;
+ unsigned char payload_length_15_8;
+
+ /* query 7 */
+ unsigned char f34_query7_b0:1;
+ unsigned char has_bootloader:1;
+ unsigned char has_device_config:1;
+ unsigned char has_flash_config:1;
+ unsigned char has_manufacturing_block:1;
+ unsigned char has_guest_serialization:1;
+ unsigned char has_global_parameters:1;
+ unsigned char has_core_code:1;
+ unsigned char has_core_config:1;
+ unsigned char has_guest_code:1;
+ unsigned char has_display_config:1;
+ unsigned char f34_query7_b11__15:5;
+ unsigned char f34_query7_b16__23;
+ unsigned char f34_query7_b24__31;
+ } __packed;
+ unsigned char data[21];
+ };
+};
+
+struct f34_v7_data0 {
+ union {
+ struct {
+ unsigned char operation_status:5;
+ unsigned char device_cfg_status:2;
+ unsigned char bl_mode:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f34_v7_data_1_5 {
+ union {
+ struct {
+ unsigned char partition_id:5;
+ unsigned char f34_data1_b5__7:3;
+ unsigned char block_offset_7_0;
+ unsigned char block_offset_15_8;
+ unsigned char transfer_length_7_0;
+ unsigned char transfer_length_15_8;
+ unsigned char command;
+ unsigned char payload_0;
+ unsigned char payload_1;
+ } __packed;
+ unsigned char data[8];
+ };
+};
+
+struct f34_v5v6_flash_properties {
+ union {
+ struct {
+ unsigned char reg_map:1;
+ unsigned char unlocked:1;
+ unsigned char has_config_id:1;
+ unsigned char has_pm_config:1;
+ unsigned char has_bl_config:1;
+ unsigned char has_disp_config:1;
+ unsigned char has_ctrl1:1;
+ unsigned char has_query4:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f34_v5v6_flash_properties_2 {
+ union {
+ struct {
+ unsigned char has_guest_code:1;
+ unsigned char reserved:7;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct register_offset {
+ unsigned char properties;
+ unsigned char properties_2;
+ unsigned char block_size;
+ unsigned char block_count;
+ unsigned char gc_block_count;
+ unsigned char flash_status;
+ unsigned char partition_id;
+ unsigned char block_number;
+ unsigned char transfer_length;
+ unsigned char flash_cmd;
+ unsigned char payload;
+};
+
+struct block_count {
+ unsigned short ui_firmware;
+ unsigned short ui_config;
+ unsigned short dp_config;
+ unsigned short pm_config;
+ unsigned short fl_config;
+ unsigned short bl_image;
+ unsigned short bl_config;
+ unsigned short lockdown;
+ unsigned short guest_code;
+ unsigned short total_count;
+};
+
+struct physical_address {
+ unsigned short ui_firmware;
+ unsigned short ui_config;
+ unsigned short dp_config;
+ unsigned short fl_config;
+ unsigned short guest_code;
+};
+
+struct container_descriptor {
+ unsigned char content_checksum[4];
+ unsigned char container_id[2];
+ unsigned char minor_version;
+ unsigned char major_version;
+ unsigned char reserved_08;
+ unsigned char reserved_09;
+ unsigned char reserved_0a;
+ unsigned char reserved_0b;
+ unsigned char container_option_flags[4];
+ unsigned char content_options_length[4];
+ unsigned char content_options_address[4];
+ unsigned char content_length[4];
+ unsigned char content_address[4];
+};
+
+struct image_header_10 {
+ unsigned char checksum[4];
+ unsigned char reserved_04;
+ unsigned char reserved_05;
+ unsigned char minor_header_version;
+ unsigned char major_header_version;
+ unsigned char reserved_08;
+ unsigned char reserved_09;
+ unsigned char reserved_0a;
+ unsigned char reserved_0b;
+ unsigned char top_level_container_start_addr[4];
+};
+
+struct image_header_05_06 {
+ /* 0x00 - 0x0f */
+ unsigned char checksum[4];
+ unsigned char reserved_04;
+ unsigned char reserved_05;
+ unsigned char options_firmware_id:1;
+ unsigned char options_bootloader:1;
+ unsigned char options_guest_code:1;
+ unsigned char options_tddi:1;
+ unsigned char options_reserved:4;
+ unsigned char header_version;
+ unsigned char firmware_size[4];
+ unsigned char config_size[4];
+ /* 0x10 - 0x1f */
+ unsigned char product_id[PRODUCT_ID_SIZE];
+ unsigned char package_id[2];
+ unsigned char package_id_revision[2];
+ unsigned char product_info[PRODUCT_INFO_SIZE];
+ /* 0x20 - 0x2f */
+ unsigned char bootloader_addr[4];
+ unsigned char bootloader_size[4];
+ unsigned char ui_addr[4];
+ unsigned char ui_size[4];
+ /* 0x30 - 0x3f */
+ unsigned char ds_id[16];
+ /* 0x40 - 0x4f */
+ union {
+ struct {
+ unsigned char cstmr_product_id[PRODUCT_ID_SIZE];
+ unsigned char reserved_4a_4f[6];
+ };
+ struct {
+ unsigned char dsp_cfg_addr[4];
+ unsigned char dsp_cfg_size[4];
+ unsigned char reserved_48_4f[8];
+ };
+ };
+ /* 0x50 - 0x53 */
+ unsigned char firmware_id[4];
+};
+
+struct block_data {
+ unsigned int size;
+ const unsigned char *data;
+};
+
+struct image_metadata {
+ bool contains_firmware_id;
+ bool contains_bootloader;
+ bool contains_guest_code;
+ bool contains_disp_config;
+ bool contains_perm_config;
+ bool contains_flash_config;
+ unsigned int firmware_id;
+ unsigned int checksum;
+ unsigned int bootloader_size;
+ unsigned int disp_config_offset;
+ unsigned char bl_version;
+ unsigned char product_id[PRODUCT_ID_SIZE + 1];
+ unsigned char cstmr_product_id[PRODUCT_ID_SIZE + 1];
+ struct block_data bootloader;
+ struct block_data ui_firmware;
+ struct block_data ui_config;
+ struct block_data dp_config;
+ struct block_data pm_config;
+ struct block_data fl_config;
+ struct block_data bl_image;
+ struct block_data bl_config;
+ struct block_data lockdown;
+ struct block_data guest_code;
+ struct block_count blkcount;
+ struct physical_address phyaddr;
+};
+
+struct synaptics_rmi4_fwu_handle {
+ enum bl_version bl_version;
+ bool initialized;
+ bool in_bl_mode;
+ bool in_ub_mode;
+ bool force_update;
+ bool do_lockdown;
+ bool has_guest_code;
+ bool new_partition_table;
+ unsigned int data_pos;
+ unsigned char *ext_data_source;
+ unsigned char *read_config_buf;
+ unsigned char intr_mask;
+ unsigned char command;
+ unsigned char bootloader_id[2];
+ unsigned char config_id[32];
+ unsigned char flash_status;
+ unsigned char partitions;
+ unsigned short block_size;
+ unsigned short config_size;
+ unsigned short config_area;
+ unsigned short config_block_count;
+ unsigned short flash_config_length;
+ unsigned short payload_length;
+ unsigned short partition_table_bytes;
+ unsigned short read_config_buf_size;
+ const unsigned char *config_data;
+ const unsigned char *image;
+ unsigned char *image_name;
+ unsigned int image_size;
+ struct image_metadata img;
+ struct register_offset off;
+ struct block_count blkcount;
+ struct physical_address phyaddr;
+ struct f34_v5v6_flash_properties flash_properties;
+ struct synaptics_rmi4_fn_desc f34_fd;
+ struct synaptics_rmi4_fn_desc f35_fd;
+ struct synaptics_rmi4_data *rmi4_data;
+ struct workqueue_struct *fwu_workqueue;
+ struct work_struct fwu_work;
+};
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static struct bin_attribute dev_attr_data = {
+ .attr = {
+ .name = "data",
+ .mode = (S_IRUGO | S_IWUGO),
+ },
+ .size = 0,
+ .read = fwu_sysfs_show_image,
+ .write = fwu_sysfs_store_image,
+};
+#endif
+
+static struct device_attribute attrs[] = {
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+ __ATTR(dorecovery, 0220,
+ NULL,
+ fwu_sysfs_do_recovery_store),
+ __ATTR(doreflash, 0220,
+ NULL,
+ fwu_sysfs_do_reflash_store),
+ __ATTR(writeconfig, 0220,
+ NULL,
+ fwu_sysfs_write_config_store),
+ __ATTR(readconfig, 0220,
+ NULL,
+ fwu_sysfs_read_config_store),
+ __ATTR(configarea, 0220,
+ NULL,
+ fwu_sysfs_config_area_store),
+ __ATTR(imagename, 0220,
+ NULL,
+ fwu_sysfs_image_name_store),
+ __ATTR(imagesize, 0220,
+ NULL,
+ fwu_sysfs_image_size_store),
+ __ATTR(blocksize, 0444,
+ fwu_sysfs_block_size_show,
+ NULL),
+ __ATTR(fwblockcount, 0444,
+ fwu_sysfs_firmware_block_count_show,
+ NULL),
+ __ATTR(configblockcount, 0444,
+ fwu_sysfs_configuration_block_count_show,
+ NULL),
+ __ATTR(dispconfigblockcount, 0444,
+ fwu_sysfs_disp_config_block_count_show,
+ NULL),
+ __ATTR(permconfigblockcount, 0444,
+ fwu_sysfs_perm_config_block_count_show,
+ NULL),
+ __ATTR(blconfigblockcount, 0444,
+ fwu_sysfs_bl_config_block_count_show,
+ NULL),
+ __ATTR(guestcodeblockcount, 0444,
+ fwu_sysfs_guest_code_block_count_show,
+ NULL),
+ __ATTR(writeguestcode, 0220,
+ NULL,
+ fwu_sysfs_write_guest_code_store),
+#endif
+};
+
+static struct synaptics_rmi4_fwu_handle *fwu;
+
+DECLARE_COMPLETION(fwu_remove_complete);
+
+static unsigned int le_to_uint(const unsigned char *ptr)
+{
+ return (unsigned int)ptr[0] +
+ (unsigned int)ptr[1] * 0x100 +
+ (unsigned int)ptr[2] * 0x10000 +
+ (unsigned int)ptr[3] * 0x1000000;
+}
+
+static int fwu_allocate_read_config_buf(unsigned int count)
+{
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (count > fwu->read_config_buf_size) {
+ kfree(fwu->read_config_buf);
+ fwu->read_config_buf = kzalloc(count, GFP_KERNEL);
+ if (!fwu->read_config_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fwu->read_config_buf\n",
+ __func__);
+ fwu->read_config_buf_size = 0;
+ return -ENOMEM;
+ }
+ fwu->read_config_buf_size = count;
+ }
+
+ return 0;
+}
+
+static void fwu_compare_partition_tables(void)
+{
+ if (fwu->phyaddr.ui_firmware != fwu->img.phyaddr.ui_firmware) {
+ fwu->new_partition_table = true;
+ return;
+ }
+
+ if (fwu->phyaddr.ui_config != fwu->img.phyaddr.ui_config) {
+ fwu->new_partition_table = true;
+ return;
+ }
+
+ if (fwu->flash_properties.has_disp_config) {
+ if (fwu->phyaddr.dp_config != fwu->img.phyaddr.dp_config) {
+ fwu->new_partition_table = true;
+ return;
+ }
+ }
+
+ if (fwu->has_guest_code) {
+ if (fwu->phyaddr.guest_code != fwu->img.phyaddr.guest_code) {
+ fwu->new_partition_table = true;
+ return;
+ }
+ }
+
+ fwu->new_partition_table = false;
+
+ return;
+}
+
+static void fwu_parse_partition_table(const unsigned char *partition_table,
+ struct block_count *blkcount, struct physical_address *phyaddr)
+{
+ unsigned char ii;
+ unsigned char index;
+ unsigned char offset;
+ unsigned short partition_length;
+ unsigned short physical_address;
+ struct partition_table *ptable;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ for (ii = 0; ii < fwu->partitions; ii++) {
+ index = ii * 8 + 2;
+ ptable = (struct partition_table *)&partition_table[index];
+ partition_length = ptable->partition_length_15_8 << 8 |
+ ptable->partition_length_7_0;
+ physical_address = ptable->start_physical_address_15_8 << 8 |
+ ptable->start_physical_address_7_0;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Partition entry %d:\n",
+ __func__, ii);
+ for (offset = 0; offset < 8; offset++) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: 0x%02x\n",
+ __func__,
+ partition_table[index + offset]);
+ }
+ switch (ptable->partition_id) {
+ case CORE_CODE_PARTITION:
+ blkcount->ui_firmware = partition_length;
+ phyaddr->ui_firmware = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Core code block count: %d\n",
+ __func__, blkcount->ui_firmware);
+ blkcount->total_count += partition_length;
+ break;
+ case CORE_CONFIG_PARTITION:
+ blkcount->ui_config = partition_length;
+ phyaddr->ui_config = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Core config block count: %d\n",
+ __func__, blkcount->ui_config);
+ blkcount->total_count += partition_length;
+ break;
+ case BOOTLOADER_PARTITION:
+ blkcount->bl_image = partition_length;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Core config block count: %d\n",
+ __func__, blkcount->ui_config);
+ blkcount->total_count += partition_length;
+ break;
+ case DISPLAY_CONFIG_PARTITION:
+ blkcount->dp_config = partition_length;
+ phyaddr->dp_config = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Display config block count: %d\n",
+ __func__, blkcount->dp_config);
+ blkcount->total_count += partition_length;
+ break;
+ case FLASH_CONFIG_PARTITION:
+ blkcount->fl_config = partition_length;
+ phyaddr->fl_config = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Flash config block count: %d\n",
+ __func__, blkcount->fl_config);
+ blkcount->total_count += partition_length;
+ break;
+ case GUEST_CODE_PARTITION:
+ blkcount->guest_code = partition_length;
+ phyaddr->guest_code = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Guest code block count: %d\n",
+ __func__, blkcount->guest_code);
+ blkcount->total_count += partition_length;
+ break;
+ case GUEST_SERIALIZATION_PARTITION:
+ blkcount->pm_config = partition_length;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Guest serialization block count: %d\n",
+ __func__, blkcount->pm_config);
+ blkcount->total_count += partition_length;
+ break;
+ case GLOBAL_PARAMETERS_PARTITION:
+ blkcount->bl_config = partition_length;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Global parameters block count: %d\n",
+ __func__, blkcount->bl_config);
+ blkcount->total_count += partition_length;
+ break;
+ case DEVICE_CONFIG_PARTITION:
+ blkcount->lockdown = partition_length;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Device config block count: %d\n",
+ __func__, blkcount->lockdown);
+ blkcount->total_count += partition_length;
+ break;
+ };
+ }
+
+ return;
+}
+
+static void fwu_parse_image_header_10_bl_container(const unsigned char *image)
+{
+ unsigned char ii;
+ unsigned char num_of_containers;
+ unsigned int addr;
+ unsigned int container_id;
+ unsigned int length;
+ const unsigned char *content;
+ struct container_descriptor *descriptor;
+
+ num_of_containers = (fwu->img.bootloader.size - 4) / 4;
+
+ for (ii = 1; ii <= num_of_containers; ii++) {
+ addr = le_to_uint(fwu->img.bootloader.data + (ii * 4));
+ descriptor = (struct container_descriptor *)(image + addr);
+ container_id = descriptor->container_id[0] |
+ descriptor->container_id[1] << 8;
+ content = image + le_to_uint(descriptor->content_address);
+ length = le_to_uint(descriptor->content_length);
+ switch (container_id) {
+ case BL_IMAGE_CONTAINER:
+ fwu->img.bl_image.data = content;
+ fwu->img.bl_image.size = length;
+ break;
+ case BL_CONFIG_CONTAINER:
+ case GLOBAL_PARAMETERS_CONTAINER:
+ fwu->img.bl_config.data = content;
+ fwu->img.bl_config.size = length;
+ break;
+ case BL_LOCKDOWN_INFO_CONTAINER:
+ case DEVICE_CONFIG_CONTAINER:
+ fwu->img.lockdown.data = content;
+ fwu->img.lockdown.size = length;
+ break;
+ default:
+ break;
+ };
+ }
+
+ return;
+}
+
+static void fwu_parse_image_header_10(void)
+{
+ unsigned char ii;
+ unsigned char num_of_containers;
+ unsigned int addr;
+ unsigned int offset;
+ unsigned int container_id;
+ unsigned int length;
+ const unsigned char *image;
+ const unsigned char *content;
+ struct container_descriptor *descriptor;
+ struct image_header_10 *header;
+
+ image = fwu->image;
+ header = (struct image_header_10 *)image;
+
+ fwu->img.checksum = le_to_uint(header->checksum);
+
+ /* address of top level container */
+ offset = le_to_uint(header->top_level_container_start_addr);
+ descriptor = (struct container_descriptor *)(image + offset);
+
+ /* address of top level container content */
+ offset = le_to_uint(descriptor->content_address);
+ num_of_containers = le_to_uint(descriptor->content_length) / 4;
+
+ for (ii = 0; ii < num_of_containers; ii++) {
+ addr = le_to_uint(image + offset);
+ offset += 4;
+ descriptor = (struct container_descriptor *)(image + addr);
+ container_id = descriptor->container_id[0] |
+ descriptor->container_id[1] << 8;
+ content = image + le_to_uint(descriptor->content_address);
+ length = le_to_uint(descriptor->content_length);
+ switch (container_id) {
+ case UI_CONTAINER:
+ case CORE_CODE_CONTAINER:
+ fwu->img.ui_firmware.data = content;
+ fwu->img.ui_firmware.size = length;
+ break;
+ case UI_CONFIG_CONTAINER:
+ case CORE_CONFIG_CONTAINER:
+ fwu->img.ui_config.data = content;
+ fwu->img.ui_config.size = length;
+ break;
+ case BL_CONTAINER:
+ fwu->img.bl_version = *content;
+ fwu->img.bootloader.data = content;
+ fwu->img.bootloader.size = length;
+ fwu_parse_image_header_10_bl_container(image);
+ break;
+ case GUEST_CODE_CONTAINER:
+ fwu->img.contains_guest_code = true;
+ fwu->img.guest_code.data = content;
+ fwu->img.guest_code.size = length;
+ break;
+ case DISPLAY_CONFIG_CONTAINER:
+ fwu->img.contains_disp_config = true;
+ fwu->img.dp_config.data = content;
+ fwu->img.dp_config.size = length;
+ break;
+ case PERMANENT_CONFIG_CONTAINER:
+ case GUEST_SERIALIZATION_CONTAINER:
+ fwu->img.contains_perm_config = true;
+ fwu->img.pm_config.data = content;
+ fwu->img.pm_config.size = length;
+ break;
+ case FLASH_CONFIG_CONTAINER:
+ fwu->img.contains_flash_config = true;
+ fwu->img.fl_config.data = content;
+ fwu->img.fl_config.size = length;
+ break;
+ case GENERAL_INFORMATION_CONTAINER:
+ fwu->img.contains_firmware_id = true;
+ fwu->img.firmware_id = le_to_uint(content + 4);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return;
+}
+
+static void fwu_parse_image_header_05_06(void)
+{
+ int retval;
+ const unsigned char *image;
+ struct image_header_05_06 *header;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ image = fwu->image;
+ header = (struct image_header_05_06 *)image;
+
+ fwu->img.checksum = le_to_uint(header->checksum);
+
+ fwu->img.bl_version = header->header_version;
+
+ fwu->img.contains_bootloader = header->options_bootloader;
+ if (fwu->img.contains_bootloader)
+ fwu->img.bootloader_size = le_to_uint(header->bootloader_size);
+
+ fwu->img.ui_firmware.size = le_to_uint(header->firmware_size);
+ if (fwu->img.ui_firmware.size) {
+ fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+ if (fwu->img.contains_bootloader)
+ fwu->img.ui_firmware.data += fwu->img.bootloader_size;
+ }
+
+ if ((fwu->img.bl_version == BL_V6) && header->options_tddi)
+ fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+
+ fwu->img.ui_config.size = le_to_uint(header->config_size);
+ if (fwu->img.ui_config.size) {
+ fwu->img.ui_config.data = fwu->img.ui_firmware.data +
+ fwu->img.ui_firmware.size;
+ }
+
+ if ((fwu->img.bl_version == BL_V5 && fwu->img.contains_bootloader) ||
+ (fwu->img.bl_version == BL_V6 && header->options_tddi))
+ fwu->img.contains_disp_config = true;
+ else
+ fwu->img.contains_disp_config = false;
+
+ if (fwu->img.contains_disp_config) {
+ fwu->img.disp_config_offset = le_to_uint(header->dsp_cfg_addr);
+ fwu->img.dp_config.size = le_to_uint(header->dsp_cfg_size);
+ fwu->img.dp_config.data = image + fwu->img.disp_config_offset;
+ } else {
+ retval = secure_memcpy(fwu->img.cstmr_product_id,
+ sizeof(fwu->img.cstmr_product_id),
+ header->cstmr_product_id,
+ sizeof(header->cstmr_product_id),
+ PRODUCT_ID_SIZE);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy custom product ID string\n",
+ __func__);
+ }
+ fwu->img.cstmr_product_id[PRODUCT_ID_SIZE] = 0;
+ }
+
+ fwu->img.contains_firmware_id = header->options_firmware_id;
+ if (fwu->img.contains_firmware_id)
+ fwu->img.firmware_id = le_to_uint(header->firmware_id);
+
+ retval = secure_memcpy(fwu->img.product_id,
+ sizeof(fwu->img.product_id),
+ header->product_id,
+ sizeof(header->product_id),
+ PRODUCT_ID_SIZE);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy product ID string\n",
+ __func__);
+ }
+ fwu->img.product_id[PRODUCT_ID_SIZE] = 0;
+
+ fwu->img.lockdown.size = LOCKDOWN_SIZE;
+ fwu->img.lockdown.data = image + IMAGE_AREA_OFFSET - LOCKDOWN_SIZE;
+
+ return;
+}
+
+static int fwu_parse_image_info(void)
+{
+ struct image_header_10 *header;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ header = (struct image_header_10 *)fwu->image;
+
+ memset(&fwu->img, 0x00, sizeof(fwu->img));
+
+ switch (header->major_header_version) {
+ case IMAGE_HEADER_VERSION_10:
+ fwu_parse_image_header_10();
+ break;
+ case IMAGE_HEADER_VERSION_05:
+ case IMAGE_HEADER_VERSION_06:
+ fwu_parse_image_header_05_06();
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Unsupported image file format (0x%02x)\n",
+ __func__, header->major_header_version);
+ return -EINVAL;
+ }
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8) {
+ if (!fwu->img.contains_flash_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No flash config found in firmware image\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ fwu_parse_partition_table(fwu->img.fl_config.data,
+ &fwu->img.blkcount, &fwu->img.phyaddr);
+
+ fwu_compare_partition_tables();
+ } else {
+ fwu->new_partition_table = false;
+ }
+
+ return 0;
+}
+
+static int fwu_read_flash_status(void)
+{
+ int retval;
+ unsigned char status;
+ unsigned char command;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+ &status,
+ sizeof(status));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash status\n",
+ __func__);
+ return retval;
+ }
+
+ fwu->in_bl_mode = status >> 7;
+
+ if (fwu->bl_version == BL_V5)
+ fwu->flash_status = (status >> 4) & MASK_3BIT;
+ else if (fwu->bl_version == BL_V6)
+ fwu->flash_status = status & MASK_3BIT;
+ else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ fwu->flash_status = status & MASK_5BIT;
+
+ if (fwu->flash_status != 0x00) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Flash status = %d, command = 0x%02x\n",
+ __func__, fwu->flash_status, fwu->command);
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f34_fd.data_base_addr + fwu->off.flash_cmd,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash command\n",
+ __func__);
+ return retval;
+ }
+
+ if (fwu->bl_version == BL_V5)
+ fwu->command = command & MASK_4BIT;
+ else if (fwu->bl_version == BL_V6)
+ fwu->command = command & MASK_6BIT;
+ else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ fwu->command = command;
+
+ return 0;
+}
+
+static int fwu_wait_for_idle(int timeout_ms, bool poll)
+{
+ int count = 0;
+ int timeout_count = ((timeout_ms * 1000) / MAX_SLEEP_TIME_US) + 1;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ do {
+ usleep_range(MIN_SLEEP_TIME_US, MAX_SLEEP_TIME_US);
+
+ count++;
+ if (poll || (count == timeout_count))
+ fwu_read_flash_status();
+
+ if ((fwu->command == CMD_IDLE) && (fwu->flash_status == 0x00))
+ return 0;
+ } while (count < timeout_count);
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for idle status\n",
+ __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int fwu_write_f34_v7_command_single_transaction(unsigned char cmd)
+{
+ int retval;
+ unsigned char base;
+ struct f34_v7_data_1_5 data_1_5;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.data_base_addr;
+
+ memset(data_1_5.data, 0x00, sizeof(data_1_5.data));
+
+ switch (cmd) {
+ case CMD_ERASE_ALL:
+ data_1_5.partition_id = CORE_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE_AP;
+ break;
+ case CMD_ERASE_UI_FIRMWARE:
+ data_1_5.partition_id = CORE_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_BL_CONFIG:
+ data_1_5.partition_id = GLOBAL_PARAMETERS_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_UI_CONFIG:
+ data_1_5.partition_id = CORE_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_DISP_CONFIG:
+ data_1_5.partition_id = DISPLAY_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_FLASH_CONFIG:
+ data_1_5.partition_id = FLASH_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_GUEST_CODE:
+ data_1_5.partition_id = GUEST_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ENABLE_FLASH_PROG:
+ data_1_5.partition_id = BOOTLOADER_PARTITION;
+ data_1_5.command = CMD_V7_ENTER_BL;
+ break;
+ };
+
+ data_1_5.payload_0 = fwu->bootloader_id[0];
+ data_1_5.payload_1 = fwu->bootloader_id[1];
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.partition_id,
+ data_1_5.data,
+ sizeof(data_1_5.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write single transaction command\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_write_f34_v7_command(unsigned char cmd)
+{
+ int retval;
+ unsigned char base;
+ unsigned char command;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.data_base_addr;
+
+ switch (cmd) {
+ case CMD_WRITE_FW:
+ case CMD_WRITE_CONFIG:
+ case CMD_WRITE_LOCKDOWN:
+ case CMD_WRITE_GUEST_CODE:
+ command = CMD_V7_WRITE;
+ break;
+ case CMD_READ_CONFIG:
+ command = CMD_V7_READ;
+ break;
+ case CMD_ERASE_ALL:
+ command = CMD_V7_ERASE_AP;
+ break;
+ case CMD_ERASE_UI_FIRMWARE:
+ case CMD_ERASE_BL_CONFIG:
+ case CMD_ERASE_UI_CONFIG:
+ case CMD_ERASE_DISP_CONFIG:
+ case CMD_ERASE_FLASH_CONFIG:
+ case CMD_ERASE_GUEST_CODE:
+ command = CMD_V7_ERASE;
+ break;
+ case CMD_ENABLE_FLASH_PROG:
+ command = CMD_V7_ENTER_BL;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ };
+
+ fwu->command = command;
+
+ switch (cmd) {
+ case CMD_ERASE_ALL:
+ case CMD_ERASE_UI_FIRMWARE:
+ case CMD_ERASE_BL_CONFIG:
+ case CMD_ERASE_UI_CONFIG:
+ case CMD_ERASE_DISP_CONFIG:
+ case CMD_ERASE_FLASH_CONFIG:
+ case CMD_ERASE_GUEST_CODE:
+ case CMD_ENABLE_FLASH_PROG:
+ retval = fwu_write_f34_v7_command_single_transaction(cmd);
+ if (retval < 0)
+ return retval;
+ else
+ return 0;
+ default:
+ break;
+ };
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.flash_cmd,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write flash command\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_write_f34_v5v6_command(unsigned char cmd)
+{
+ int retval;
+ unsigned char base;
+ unsigned char command;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.data_base_addr;
+
+ switch (cmd) {
+ case CMD_IDLE:
+ command = CMD_V5V6_IDLE;
+ break;
+ case CMD_WRITE_FW:
+ command = CMD_V5V6_WRITE_FW;
+ break;
+ case CMD_WRITE_CONFIG:
+ command = CMD_V5V6_WRITE_CONFIG;
+ break;
+ case CMD_WRITE_LOCKDOWN:
+ command = CMD_V5V6_WRITE_LOCKDOWN;
+ break;
+ case CMD_WRITE_GUEST_CODE:
+ command = CMD_V5V6_WRITE_GUEST_CODE;
+ break;
+ case CMD_READ_CONFIG:
+ command = CMD_V5V6_READ_CONFIG;
+ break;
+ case CMD_ERASE_ALL:
+ command = CMD_V5V6_ERASE_ALL;
+ break;
+ case CMD_ERASE_UI_CONFIG:
+ command = CMD_V5V6_ERASE_UI_CONFIG;
+ break;
+ case CMD_ERASE_DISP_CONFIG:
+ command = CMD_V5V6_ERASE_DISP_CONFIG;
+ break;
+ case CMD_ERASE_GUEST_CODE:
+ command = CMD_V5V6_ERASE_GUEST_CODE;
+ break;
+ case CMD_ENABLE_FLASH_PROG:
+ command = CMD_V5V6_ENABLE_FLASH_PROG;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case CMD_ERASE_ALL:
+ case CMD_ERASE_UI_CONFIG:
+ case CMD_ERASE_DISP_CONFIG:
+ case CMD_ERASE_GUEST_CODE:
+ case CMD_ENABLE_FLASH_PROG:
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.payload,
+ fwu->bootloader_id,
+ sizeof(fwu->bootloader_id));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write bootloader ID\n",
+ __func__);
+ return retval;
+ }
+ break;
+ default:
+ break;
+ };
+
+ fwu->command = command;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.flash_cmd,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command 0x%02x\n",
+ __func__, command);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_write_f34_command(unsigned char cmd)
+{
+ int retval;
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ retval = fwu_write_f34_v7_command(cmd);
+ else
+ retval = fwu_write_f34_v5v6_command(cmd);
+
+ return retval;
+}
+
+static int fwu_write_f34_v7_partition_id(unsigned char cmd)
+{
+ int retval;
+ unsigned char base;
+ unsigned char partition;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.data_base_addr;
+
+ switch (cmd) {
+ case CMD_WRITE_FW:
+ partition = CORE_CODE_PARTITION;
+ break;
+ case CMD_WRITE_CONFIG:
+ case CMD_READ_CONFIG:
+ if (fwu->config_area == UI_CONFIG_AREA)
+ partition = CORE_CONFIG_PARTITION;
+ else if (fwu->config_area == DP_CONFIG_AREA)
+ partition = DISPLAY_CONFIG_PARTITION;
+ else if (fwu->config_area == PM_CONFIG_AREA)
+ partition = GUEST_SERIALIZATION_PARTITION;
+ else if (fwu->config_area == BL_CONFIG_AREA)
+ partition = GLOBAL_PARAMETERS_PARTITION;
+ else if (fwu->config_area == FLASH_CONFIG_AREA)
+ partition = FLASH_CONFIG_PARTITION;
+ break;
+ case CMD_WRITE_LOCKDOWN:
+ partition = DEVICE_CONFIG_PARTITION;
+ break;
+ case CMD_WRITE_GUEST_CODE:
+ partition = GUEST_CODE_PARTITION;
+ break;
+ case CMD_ERASE_ALL:
+ partition = CORE_CODE_PARTITION;
+ break;
+ case CMD_ERASE_BL_CONFIG:
+ partition = GLOBAL_PARAMETERS_PARTITION;
+ break;
+ case CMD_ERASE_UI_CONFIG:
+ partition = CORE_CONFIG_PARTITION;
+ break;
+ case CMD_ERASE_DISP_CONFIG:
+ partition = DISPLAY_CONFIG_PARTITION;
+ break;
+ case CMD_ERASE_FLASH_CONFIG:
+ partition = FLASH_CONFIG_PARTITION;
+ break;
+ case CMD_ERASE_GUEST_CODE:
+ partition = GUEST_CODE_PARTITION;
+ break;
+ case CMD_ENABLE_FLASH_PROG:
+ partition = BOOTLOADER_PARTITION;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ };
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.partition_id,
+ &partition,
+ sizeof(partition));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write partition ID\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_write_f34_partition_id(unsigned char cmd)
+{
+ int retval;
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ retval = fwu_write_f34_v7_partition_id(cmd);
+ else
+ retval = 0;
+
+ return retval;
+}
+
+static int fwu_read_f34_v7_partition_table(unsigned char *partition_table)
+{
+ int retval;
+ unsigned char base;
+ unsigned char length[2];
+ unsigned short block_number = 0;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.data_base_addr;
+
+ fwu->config_area = FLASH_CONFIG_AREA;
+
+ retval = fwu_write_f34_partition_id(CMD_READ_CONFIG);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.block_number,
+ (unsigned char *)&block_number,
+ sizeof(block_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block number\n",
+ __func__);
+ return retval;
+ }
+
+ length[0] = (unsigned char)(fwu->flash_config_length & MASK_8BIT);
+ length[1] = (unsigned char)(fwu->flash_config_length >> 8);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.transfer_length,
+ length,
+ sizeof(length));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write transfer length\n",
+ __func__);
+ return retval;
+ }
+
+ retval = fwu_write_f34_command(CMD_READ_CONFIG);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command\n",
+ __func__);
+ return retval;
+ }
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS, true);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to wait for idle status\n",
+ __func__);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.payload,
+ partition_table,
+ fwu->partition_table_bytes);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read block data\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_read_f34_v7_queries(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char base;
+ unsigned char index;
+ unsigned char offset;
+ unsigned char *ptable;
+ struct f34_v7_query_0 query_0;
+ struct f34_v7_query_1_7 query_1_7;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.query_base_addr;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base,
+ query_0.data,
+ sizeof(query_0.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read query 0\n",
+ __func__);
+ return retval;
+ }
+
+ offset = query_0.subpacket_1_size + 1;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + offset,
+ query_1_7.data,
+ sizeof(query_1_7.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read queries 1 to 7\n",
+ __func__);
+ return retval;
+ }
+
+ fwu->bootloader_id[0] = query_1_7.bl_minor_revision;
+ fwu->bootloader_id[1] = query_1_7.bl_major_revision;
+
+ if (fwu->bootloader_id[1] == BL_V8)
+ fwu->bl_version = BL_V8;
+
+ fwu->block_size = query_1_7.block_size_15_8 << 8 |
+ query_1_7.block_size_7_0;
+
+ fwu->flash_config_length = query_1_7.flash_config_length_15_8 << 8 |
+ query_1_7.flash_config_length_7_0;
+
+ fwu->payload_length = query_1_7.payload_length_15_8 << 8 |
+ query_1_7.payload_length_7_0;
+
+ fwu->off.flash_status = V7_FLASH_STATUS_OFFSET;
+ fwu->off.partition_id = V7_PARTITION_ID_OFFSET;
+ fwu->off.block_number = V7_BLOCK_NUMBER_OFFSET;
+ fwu->off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
+ fwu->off.flash_cmd = V7_COMMAND_OFFSET;
+ fwu->off.payload = V7_PAYLOAD_OFFSET;
+
+ index = sizeof(query_1_7.data) - V7_PARTITION_SUPPORT_BYTES;
+
+ fwu->partitions = 0;
+ for (offset = 0; offset < V7_PARTITION_SUPPORT_BYTES; offset++) {
+ for (ii = 0; ii < 8; ii++) {
+ if (query_1_7.data[index + offset] & (1 << ii))
+ fwu->partitions++;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Supported partitions: 0x%02x\n",
+ __func__, query_1_7.data[index + offset]);
+ }
+
+ fwu->partition_table_bytes = fwu->partitions * 8 + 2;
+
+ ptable = kzalloc(fwu->partition_table_bytes, GFP_KERNEL);
+ if (!ptable) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for partition table\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ retval = fwu_read_f34_v7_partition_table(ptable);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read partition table\n",
+ __func__);
+ kfree(ptable);
+ return retval;
+ }
+
+ fwu_parse_partition_table(ptable, &fwu->blkcount, &fwu->phyaddr);
+
+ if (fwu->blkcount.dp_config)
+ fwu->flash_properties.has_disp_config = 1;
+ else
+ fwu->flash_properties.has_disp_config = 0;
+
+ if (fwu->blkcount.pm_config)
+ fwu->flash_properties.has_pm_config = 1;
+ else
+ fwu->flash_properties.has_pm_config = 0;
+
+ if (fwu->blkcount.bl_config)
+ fwu->flash_properties.has_bl_config = 1;
+ else
+ fwu->flash_properties.has_bl_config = 0;
+
+ if (fwu->blkcount.guest_code)
+ fwu->has_guest_code = 1;
+ else
+ fwu->has_guest_code = 0;
+
+ kfree(ptable);
+
+ return 0;
+}
+
+static int fwu_read_f34_v5v6_queries(void)
+{
+ int retval;
+ unsigned char count;
+ unsigned char base;
+ unsigned char buf[10];
+ struct f34_v5v6_flash_properties_2 properties_2;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.query_base_addr;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + V5V6_BOOTLOADER_ID_OFFSET,
+ fwu->bootloader_id,
+ sizeof(fwu->bootloader_id));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read bootloader ID\n",
+ __func__);
+ return retval;
+ }
+
+ if (fwu->bl_version == BL_V5) {
+ fwu->off.properties = V5_PROPERTIES_OFFSET;
+ fwu->off.block_size = V5_BLOCK_SIZE_OFFSET;
+ fwu->off.block_count = V5_BLOCK_COUNT_OFFSET;
+ fwu->off.block_number = V5_BLOCK_NUMBER_OFFSET;
+ fwu->off.payload = V5_BLOCK_DATA_OFFSET;
+ } else if (fwu->bl_version == BL_V6) {
+ fwu->off.properties = V6_PROPERTIES_OFFSET;
+ fwu->off.properties_2 = V6_PROPERTIES_2_OFFSET;
+ fwu->off.block_size = V6_BLOCK_SIZE_OFFSET;
+ fwu->off.block_count = V6_BLOCK_COUNT_OFFSET;
+ fwu->off.gc_block_count = V6_GUEST_CODE_BLOCK_COUNT_OFFSET;
+ fwu->off.block_number = V6_BLOCK_NUMBER_OFFSET;
+ fwu->off.payload = V6_BLOCK_DATA_OFFSET;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.block_size,
+ buf,
+ 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read block size info\n",
+ __func__);
+ return retval;
+ }
+
+ batohs(&fwu->block_size, &(buf[0]));
+
+ if (fwu->bl_version == BL_V5) {
+ fwu->off.flash_cmd = fwu->off.payload + fwu->block_size;
+ fwu->off.flash_status = fwu->off.flash_cmd;
+ } else if (fwu->bl_version == BL_V6) {
+ fwu->off.flash_cmd = V6_FLASH_COMMAND_OFFSET;
+ fwu->off.flash_status = V6_FLASH_STATUS_OFFSET;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.properties,
+ fwu->flash_properties.data,
+ sizeof(fwu->flash_properties.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash properties\n",
+ __func__);
+ return retval;
+ }
+
+ count = 4;
+
+ if (fwu->flash_properties.has_pm_config)
+ count += 2;
+
+ if (fwu->flash_properties.has_bl_config)
+ count += 2;
+
+ if (fwu->flash_properties.has_disp_config)
+ count += 2;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.block_count,
+ buf,
+ count);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read block count info\n",
+ __func__);
+ return retval;
+ }
+
+ batohs(&fwu->blkcount.ui_firmware, &(buf[0]));
+ batohs(&fwu->blkcount.ui_config, &(buf[2]));
+
+ count = 4;
+
+ if (fwu->flash_properties.has_pm_config) {
+ batohs(&fwu->blkcount.pm_config, &(buf[count]));
+ count += 2;
+ }
+
+ if (fwu->flash_properties.has_bl_config) {
+ batohs(&fwu->blkcount.bl_config, &(buf[count]));
+ count += 2;
+ }
+
+ if (fwu->flash_properties.has_disp_config)
+ batohs(&fwu->blkcount.dp_config, &(buf[count]));
+
+ fwu->has_guest_code = false;
+
+ if (fwu->flash_properties.has_query4) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.properties_2,
+ properties_2.data,
+ sizeof(properties_2.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash properties 2\n",
+ __func__);
+ return retval;
+ }
+
+ if (properties_2.has_guest_code) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.gc_block_count,
+ buf,
+ 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read guest code block count\n",
+ __func__);
+ return retval;
+ }
+
+ batohs(&fwu->blkcount.guest_code, &(buf[0]));
+ fwu->has_guest_code = true;
+ }
+ }
+
+ return 0;
+}
+
+static int fwu_read_f34_queries(void)
+{
+ int retval;
+
+ memset(&fwu->blkcount, 0x00, sizeof(fwu->blkcount));
+ memset(&fwu->phyaddr, 0x00, sizeof(fwu->phyaddr));
+
+ if (fwu->bl_version == BL_V7)
+ retval = fwu_read_f34_v7_queries();
+ else
+ retval = fwu_read_f34_v5v6_queries();
+
+ return retval;
+}
+
+static int fwu_write_f34_v7_blocks(unsigned char *block_ptr,
+ unsigned short block_cnt, unsigned char command)
+{
+ int retval;
+ unsigned char base;
+ unsigned char length[2];
+ unsigned short transfer;
+ unsigned short max_transfer;
+ unsigned short remaining = block_cnt;
+ unsigned short block_number = 0;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.data_base_addr;
+
+ retval = fwu_write_f34_partition_id(command);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.block_number,
+ (unsigned char *)&block_number,
+ sizeof(block_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block number\n",
+ __func__);
+ return retval;
+ }
+
+ if (fwu->payload_length > (PAGE_SIZE / fwu->block_size))
+ max_transfer = PAGE_SIZE / fwu->block_size;
+ else
+ max_transfer = fwu->payload_length;
+
+ do {
+ if (remaining / max_transfer)
+ transfer = max_transfer;
+ else
+ transfer = remaining;
+
+ length[0] = (unsigned char)(transfer & MASK_8BIT);
+ length[1] = (unsigned char)(transfer >> 8);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.transfer_length,
+ length,
+ sizeof(length));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write transfer length (%d blocks remaining)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ retval = fwu_write_f34_command(command);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command (%d blocks remaining)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.payload,
+ block_ptr,
+ transfer * fwu->block_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block data (%d blocks remaining)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to wait for idle status (%d blocks remaining)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ block_ptr += (transfer * fwu->block_size);
+ remaining -= transfer;
+ } while (remaining);
+
+ return 0;
+}
+
+static int fwu_write_f34_v5v6_blocks(unsigned char *block_ptr,
+ unsigned short block_cnt, unsigned char command)
+{
+ int retval;
+ unsigned char base;
+ unsigned char block_number[] = {0, 0};
+ unsigned short blk;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.data_base_addr;
+
+ block_number[1] |= (fwu->config_area << 5);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.block_number,
+ block_number,
+ sizeof(block_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block number\n",
+ __func__);
+ return retval;
+ }
+
+ for (blk = 0; blk < block_cnt; blk++) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.payload,
+ block_ptr,
+ fwu->block_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block data (block %d)\n",
+ __func__, blk);
+ return retval;
+ }
+
+ retval = fwu_write_f34_command(command);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command for block %d\n",
+ __func__, blk);
+ return retval;
+ }
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to wait for idle status (block %d)\n",
+ __func__, blk);
+ return retval;
+ }
+
+ block_ptr += fwu->block_size;
+ }
+
+ return 0;
+}
+
+static int fwu_write_f34_blocks(unsigned char *block_ptr,
+ unsigned short block_cnt, unsigned char cmd)
+{
+ int retval;
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ retval = fwu_write_f34_v7_blocks(block_ptr, block_cnt, cmd);
+ else
+ retval = fwu_write_f34_v5v6_blocks(block_ptr, block_cnt, cmd);
+
+ return retval;
+}
+
+static int fwu_read_f34_v7_blocks(unsigned short block_cnt,
+ unsigned char command)
+{
+ int retval;
+ unsigned char base;
+ unsigned char length[2];
+ unsigned short transfer;
+ unsigned short max_transfer;
+ unsigned short remaining = block_cnt;
+ unsigned short block_number = 0;
+ unsigned short index = 0;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.data_base_addr;
+
+ retval = fwu_write_f34_partition_id(command);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.block_number,
+ (unsigned char *)&block_number,
+ sizeof(block_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block number\n",
+ __func__);
+ return retval;
+ }
+
+ if (fwu->payload_length > (PAGE_SIZE / fwu->block_size))
+ max_transfer = PAGE_SIZE / fwu->block_size;
+ else
+ max_transfer = fwu->payload_length;
+
+ do {
+ if (remaining / max_transfer)
+ transfer = max_transfer;
+ else
+ transfer = remaining;
+
+ length[0] = (unsigned char)(transfer & MASK_8BIT);
+ length[1] = (unsigned char)(transfer >> 8);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.transfer_length,
+ length,
+ sizeof(length));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write transfer length (%d blocks remaining)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ retval = fwu_write_f34_command(command);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command (%d blocks remaining)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to wait for idle status (%d blocks remaining)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.payload,
+ &fwu->read_config_buf[index],
+ transfer * fwu->block_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read block data (%d blocks remaining)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ index += (transfer * fwu->block_size);
+ remaining -= transfer;
+ } while (remaining);
+
+ return 0;
+}
+
+static int fwu_read_f34_v5v6_blocks(unsigned short block_cnt,
+ unsigned char command)
+{
+ int retval;
+ unsigned char base;
+ unsigned char block_number[] = {0, 0};
+ unsigned short blk;
+ unsigned short index = 0;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.data_base_addr;
+
+ block_number[1] |= (fwu->config_area << 5);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + fwu->off.block_number,
+ block_number,
+ sizeof(block_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block number\n",
+ __func__);
+ return retval;
+ }
+
+ for (blk = 0; blk < block_cnt; blk++) {
+ retval = fwu_write_f34_command(command);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write read config command\n",
+ __func__);
+ return retval;
+ }
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to wait for idle status\n",
+ __func__);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.payload,
+ &fwu->read_config_buf[index],
+ fwu->block_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read block data (block %d)\n",
+ __func__, blk);
+ return retval;
+ }
+
+ index += fwu->block_size;
+ }
+
+ return 0;
+}
+
+static int fwu_read_f34_blocks(unsigned short block_cnt, unsigned char cmd)
+{
+ int retval;
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ retval = fwu_read_f34_v7_blocks(block_cnt, cmd);
+ else
+ retval = fwu_read_f34_v5v6_blocks(block_cnt, cmd);
+
+ return retval;
+}
+
+static int fwu_get_image_firmware_id(unsigned int *fw_id)
+{
+ int retval;
+ unsigned char index = 0;
+ char *strptr;
+ char *firmware_id;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (fwu->img.contains_firmware_id) {
+ *fw_id = fwu->img.firmware_id;
+ } else {
+ strptr = strnstr(fwu->image_name, "PR", MAX_IMAGE_NAME_LEN);
+ if (!strptr) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No valid PR number (PRxxxxxxx) found in image file name (%s)\n",
+ __func__, fwu->image_name);
+ return -EINVAL;
+ }
+
+ strptr += 2;
+ firmware_id = kzalloc(MAX_FIRMWARE_ID_LEN, GFP_KERNEL);
+ if (!firmware_id) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for firmware_id\n",
+ __func__);
+ return -ENOMEM;
+ }
+ while ((index < MAX_FIRMWARE_ID_LEN - 1) && strptr[index] >= '0'
+ && strptr[index] <= '9') {
+ firmware_id[index] = strptr[index];
+ index++;
+ }
+ firmware_id[index] = '\0';
+
+ retval = sstrtoul(firmware_id, 10, (unsigned long *)fw_id);
+ kfree(firmware_id);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to obtain image firmware ID\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int fwu_get_device_config_id(void)
+{
+ int retval;
+ unsigned char config_id_size;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ config_id_size = V7_CONFIG_ID_SIZE;
+ else
+ config_id_size = V5V6_CONFIG_ID_SIZE;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f34_fd.ctrl_base_addr,
+ fwu->config_id,
+ config_id_size);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static enum flash_area fwu_go_nogo(void)
+{
+ int retval;
+ enum flash_area flash_area = NONE;
+ unsigned char ii;
+ unsigned char config_id_size;
+ unsigned int device_fw_id;
+ unsigned int image_fw_id;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (fwu->force_update) {
+ flash_area = UI_FIRMWARE;
+ goto exit;
+ }
+
+ /* Update both UI and config if device is in bootloader mode */
+ if (fwu->in_bl_mode) {
+ flash_area = UI_FIRMWARE;
+ goto exit;
+ }
+
+ /* Get device firmware ID */
+ device_fw_id = rmi4_data->firmware_id;
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Device firmware ID = %d\n",
+ __func__, device_fw_id);
+
+ /* Get image firmware ID */
+ retval = fwu_get_image_firmware_id(&image_fw_id);
+ if (retval < 0) {
+ flash_area = NONE;
+ goto exit;
+ }
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Image firmware ID = %d\n",
+ __func__, image_fw_id);
+
+ if (image_fw_id > device_fw_id) {
+ flash_area = UI_FIRMWARE;
+ goto exit;
+ } else if (image_fw_id < device_fw_id) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Image firmware ID older than device firmware ID\n",
+ __func__);
+ flash_area = NONE;
+ goto exit;
+ }
+
+ /* Get device config ID */
+ retval = fwu_get_device_config_id();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read device config ID\n",
+ __func__);
+ flash_area = NONE;
+ goto exit;
+ }
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ config_id_size = V7_CONFIG_ID_SIZE;
+ else
+ config_id_size = V5V6_CONFIG_ID_SIZE;
+
+ for (ii = 0; ii < config_id_size; ii++) {
+ if (fwu->img.ui_config.data[ii] > fwu->config_id[ii]) {
+ flash_area = UI_CONFIG;
+ goto exit;
+ } else if (fwu->img.ui_config.data[ii] < fwu->config_id[ii]) {
+ flash_area = NONE;
+ goto exit;
+ }
+ }
+
+ flash_area = NONE;
+
+exit:
+ if (flash_area == NONE) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: No need to do reflash\n",
+ __func__);
+ } else {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Updating %s\n",
+ __func__,
+ flash_area == UI_FIRMWARE ?
+ "UI firmware and config" :
+ "UI config only");
+ }
+
+ return flash_area;
+}
+
+static int fwu_scan_pdt(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char intr_count = 0;
+ unsigned char intr_off;
+ unsigned char intr_src;
+ unsigned short addr;
+ bool f01found = false;
+ bool f34found = false;
+ bool f35found = false;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ fwu->in_ub_mode = false;
+
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval < 0)
+ return retval;
+
+ if (rmi_fd.fn_number) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Found F%02x\n",
+ __func__, rmi_fd.fn_number);
+ switch (rmi_fd.fn_number) {
+ case SYNAPTICS_RMI4_F01:
+ f01found = true;
+
+ rmi4_data->f01_query_base_addr =
+ rmi_fd.query_base_addr;
+ rmi4_data->f01_ctrl_base_addr =
+ rmi_fd.ctrl_base_addr;
+ rmi4_data->f01_data_base_addr =
+ rmi_fd.data_base_addr;
+ rmi4_data->f01_cmd_base_addr =
+ rmi_fd.cmd_base_addr;
+ break;
+ case SYNAPTICS_RMI4_F34:
+ f34found = true;
+ fwu->f34_fd.query_base_addr =
+ rmi_fd.query_base_addr;
+ fwu->f34_fd.ctrl_base_addr =
+ rmi_fd.ctrl_base_addr;
+ fwu->f34_fd.data_base_addr =
+ rmi_fd.data_base_addr;
+
+ switch (rmi_fd.fn_version) {
+ case F34_V0:
+ fwu->bl_version = BL_V5;
+ break;
+ case F34_V1:
+ fwu->bl_version = BL_V6;
+ break;
+ case F34_V2:
+ fwu->bl_version = BL_V7;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Unrecognized F34 version\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ fwu->intr_mask = 0;
+ intr_src = rmi_fd.intr_src_count;
+ intr_off = intr_count % 8;
+ for (ii = intr_off;
+ ii < (intr_src + intr_off);
+ ii++) {
+ fwu->intr_mask |= 1 << ii;
+ }
+ break;
+ case SYNAPTICS_RMI4_F35:
+ f35found = true;
+ fwu->f35_fd.query_base_addr =
+ rmi_fd.query_base_addr;
+ fwu->f35_fd.ctrl_base_addr =
+ rmi_fd.ctrl_base_addr;
+ fwu->f35_fd.data_base_addr =
+ rmi_fd.data_base_addr;
+ break;
+ }
+ } else {
+ break;
+ }
+
+ intr_count += rmi_fd.intr_src_count;
+ }
+
+ if (!f01found || !f34found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find both F01 and F34\n",
+ __func__);
+ if (!f35found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F35\n",
+ __func__);
+ return -EINVAL;
+ } else {
+ fwu->in_ub_mode = true;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: In microbootloader mode\n",
+ __func__);
+ fwu_recovery_check_status();
+ return 0;
+ }
+ }
+
+ rmi4_data->intr_mask[0] |= fwu->intr_mask;
+
+ addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ addr,
+ &(rmi4_data->intr_mask[0]),
+ sizeof(rmi4_data->intr_mask[0]));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set interrupt enable bit\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_enter_flash_prog(void)
+{
+ int retval;
+ struct f01_device_control f01_device_control;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_read_flash_status();
+ if (retval < 0)
+ return retval;
+
+ if (fwu->in_bl_mode)
+ return 0;
+
+ retval = rmi4_data->irq_enable(rmi4_data, false, true);
+ if (retval < 0)
+ return retval;
+
+ msleep(INT_DISABLE_WAIT_MS);
+
+ retval = fwu_write_f34_command(CMD_ENABLE_FLASH_PROG);
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_wait_for_idle(ENABLE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ if (!fwu->in_bl_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: BL mode not entered\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (rmi4_data->hw_if->bl_hw_init) {
+ retval = rmi4_data->hw_if->bl_hw_init(rmi4_data);
+ if (retval < 0)
+ return retval;
+ }
+
+ retval = fwu_scan_pdt();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_read_f34_queries();
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ f01_device_control.data,
+ sizeof(f01_device_control.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F01 device control\n",
+ __func__);
+ return retval;
+ }
+
+ f01_device_control.nosleep = true;
+ f01_device_control.sleep_mode = SLEEP_MODE_NORMAL;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ f01_device_control.data,
+ sizeof(f01_device_control.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write F01 device control\n",
+ __func__);
+ return retval;
+ }
+
+ msleep(ENTER_FLASH_PROG_WAIT_MS);
+
+ return retval;
+}
+
+static int fwu_check_ui_firmware_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+ if (block_count != fwu->blkcount.ui_firmware) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: UI firmware size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_check_ui_configuration_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.ui_config.size / fwu->block_size;
+
+ if (block_count != fwu->blkcount.ui_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: UI configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_check_dp_configuration_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.dp_config.size / fwu->block_size;
+
+ if (block_count != fwu->blkcount.dp_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Display configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_check_pm_configuration_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.pm_config.size / fwu->block_size;
+
+ if (block_count != fwu->blkcount.pm_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Permanent configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+static int fwu_check_bl_configuration_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.bl_config.size / fwu->block_size;
+
+ if (block_count != fwu->blkcount.bl_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Bootloader configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_check_guest_code_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.guest_code.size / fwu->block_size;
+ if (block_count != fwu->blkcount.guest_code) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Guest code size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_write_firmware(void)
+{
+ unsigned short firmware_block_count;
+
+ firmware_block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+ return fwu_write_f34_blocks((unsigned char *)fwu->img.ui_firmware.data,
+ firmware_block_count, CMD_WRITE_FW);
+}
+
+static int fwu_erase_configuration(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_UI_CONFIG);
+ if (retval < 0)
+ return retval;
+ break;
+ case DP_CONFIG_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_DISP_CONFIG);
+ if (retval < 0)
+ return retval;
+ break;
+ case BL_CONFIG_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_BL_CONFIG);
+ if (retval < 0)
+ return retval;
+ break;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ return retval;
+}
+
+static int fwu_erase_guest_code(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_write_f34_command(CMD_ERASE_GUEST_CODE);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ return 0;
+}
+
+static int fwu_erase_all(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (fwu->bl_version == BL_V7) {
+ retval = fwu_write_f34_command(CMD_ERASE_UI_FIRMWARE);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ fwu->config_area = UI_CONFIG_AREA;
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = fwu_write_f34_command(CMD_ERASE_ALL);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase all command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (!(fwu->bl_version == BL_V8 &&
+ fwu->flash_status == BAD_PARTITION_TABLE)) {
+ if (retval < 0)
+ return retval;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ if (fwu->bl_version == BL_V8)
+ return 0;
+ }
+
+ if (fwu->flash_properties.has_disp_config &&
+ fwu->img.contains_disp_config) {
+ fwu->config_area = DP_CONFIG_AREA;
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+ }
+
+ if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+ retval = fwu_erase_guest_code();
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_write_configuration(void)
+{
+ return fwu_write_f34_blocks((unsigned char *)fwu->config_data,
+ fwu->config_block_count, CMD_WRITE_CONFIG);
+}
+
+static int fwu_write_ui_configuration(void)
+{
+ fwu->config_area = UI_CONFIG_AREA;
+ fwu->config_data = fwu->img.ui_config.data;
+ fwu->config_size = fwu->img.ui_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ return fwu_write_configuration();
+}
+
+static int fwu_write_dp_configuration(void)
+{
+ fwu->config_area = DP_CONFIG_AREA;
+ fwu->config_data = fwu->img.dp_config.data;
+ fwu->config_size = fwu->img.dp_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ return fwu_write_configuration();
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_write_pm_configuration(void)
+{
+ fwu->config_area = PM_CONFIG_AREA;
+ fwu->config_data = fwu->img.pm_config.data;
+ fwu->config_size = fwu->img.pm_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ return fwu_write_configuration();
+}
+#endif
+
+static int fwu_write_flash_configuration(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ fwu->config_area = FLASH_CONFIG_AREA;
+ fwu->config_data = fwu->img.fl_config.data;
+ fwu->config_size = fwu->img.fl_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ if (fwu->config_block_count != fwu->blkcount.fl_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Flash configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ retval = fwu_write_f34_command(CMD_ERASE_FLASH_CONFIG);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase flash configuration command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+
+ rmi4_data->reset_device(rmi4_data, false);
+
+ return 0;
+}
+
+static int fwu_write_guest_code(void)
+{
+ int retval;
+ unsigned short guest_code_block_count;
+
+ guest_code_block_count = fwu->img.guest_code.size / fwu->block_size;
+
+ retval = fwu_write_f34_blocks((unsigned char *)fwu->img.guest_code.data,
+ guest_code_block_count, CMD_WRITE_GUEST_CODE);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int fwu_write_lockdown(void)
+{
+ unsigned short lockdown_block_count;
+
+ lockdown_block_count = fwu->img.lockdown.size / fwu->block_size;
+
+ return fwu_write_f34_blocks((unsigned char *)fwu->img.lockdown.data,
+ lockdown_block_count, CMD_WRITE_LOCKDOWN);
+}
+
+static int fwu_write_partition_table_v8(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ fwu->config_area = FLASH_CONFIG_AREA;
+ fwu->config_data = fwu->img.fl_config.data;
+ fwu->config_size = fwu->img.fl_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ if (fwu->config_block_count != fwu->blkcount.fl_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Flash configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+
+ rmi4_data->reset_device(rmi4_data, false);
+
+ return 0;
+}
+
+static int fwu_write_partition_table_v7(void)
+{
+ int retval;
+ unsigned short block_count;
+
+ block_count = fwu->blkcount.bl_config;
+ fwu->config_area = BL_CONFIG_AREA;
+ fwu->config_size = fwu->block_size * block_count;
+
+ retval = fwu_allocate_read_config_buf(fwu->config_size);
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_write_flash_configuration();
+ if (retval < 0)
+ return retval;
+
+ fwu->config_area = BL_CONFIG_AREA;
+ fwu->config_data = fwu->read_config_buf;
+ fwu->config_size = fwu->img.bl_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int fwu_do_reflash(void)
+{
+ int retval;
+
+ if (!fwu->new_partition_table) {
+ retval = fwu_check_ui_firmware_size();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_check_ui_configuration_size();
+ if (retval < 0)
+ return retval;
+
+ if (fwu->flash_properties.has_disp_config &&
+ fwu->img.contains_disp_config) {
+ retval = fwu_check_dp_configuration_size();
+ if (retval < 0)
+ return retval;
+ }
+
+ if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+ retval = fwu_check_guest_code_size();
+ if (retval < 0)
+ return retval;
+ }
+ } else if (fwu->bl_version == BL_V7) {
+ retval = fwu_check_bl_configuration_size();
+ if (retval < 0)
+ return retval;
+ }
+
+ retval = fwu_erase_all();
+ if (retval < 0)
+ return retval;
+
+ if (fwu->bl_version == BL_V7 && fwu->new_partition_table) {
+ retval = fwu_write_partition_table_v7();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Partition table programmed\n", __func__);
+ } else if (fwu->bl_version == BL_V8) {
+ retval = fwu_write_partition_table_v8();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Partition table programmed\n", __func__);
+ }
+
+ retval = fwu_write_firmware();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Firmware programmed\n", __func__);
+
+ fwu->config_area = UI_CONFIG_AREA;
+ retval = fwu_write_ui_configuration();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Configuration programmed\n", __func__);
+
+ if (fwu->flash_properties.has_disp_config &&
+ fwu->img.contains_disp_config) {
+ retval = fwu_write_dp_configuration();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Display configuration programmed\n", __func__);
+ }
+
+ if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+ retval = fwu_write_guest_code();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Guest code programmed\n", __func__);
+ }
+
+ return retval;
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_do_read_config(void)
+{
+ int retval;
+ unsigned short block_count;
+ unsigned short config_area;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ block_count = fwu->blkcount.ui_config;
+ break;
+ case DP_CONFIG_AREA:
+ if (!fwu->flash_properties.has_disp_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Display configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ block_count = fwu->blkcount.dp_config;
+ break;
+ case PM_CONFIG_AREA:
+ if (!fwu->flash_properties.has_pm_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Permanent configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ block_count = fwu->blkcount.pm_config;
+ break;
+ case BL_CONFIG_AREA:
+ if (!fwu->flash_properties.has_bl_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Bootloader configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ block_count = fwu->blkcount.bl_config;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid config area\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (block_count == 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid block count\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ config_area = fwu->config_area;
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ goto exit;
+
+ fwu->config_area = config_area;
+
+ fwu->config_size = fwu->block_size * block_count;
+
+ retval = fwu_allocate_read_config_buf(fwu->config_size);
+ if (retval < 0)
+ goto exit;
+
+ retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+
+exit:
+ rmi4_data->reset_device(rmi4_data, false);
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ return retval;
+}
+#endif
+
+static int fwu_do_lockdown_v7(void)
+{
+ int retval;
+ struct f34_v7_data0 status;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash status\n",
+ __func__);
+ return retval;
+ }
+
+ if (status.device_cfg_status == 2) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Device already locked down\n",
+ __func__);
+ return 0;
+ }
+
+ retval = fwu_write_lockdown();
+ if (retval < 0)
+ return retval;
+
+ pr_notice("%s: Lockdown programmed\n", __func__);
+
+ return retval;
+}
+
+static int fwu_do_lockdown_v5v6(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f34_fd.query_base_addr + fwu->off.properties,
+ fwu->flash_properties.data,
+ sizeof(fwu->flash_properties.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash properties\n",
+ __func__);
+ return retval;
+ }
+
+ if (fwu->flash_properties.unlocked == 0) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Device already locked down\n",
+ __func__);
+ return 0;
+ }
+
+ retval = fwu_write_lockdown();
+ if (retval < 0)
+ return retval;
+
+ pr_notice("%s: Lockdown programmed\n", __func__);
+
+ return retval;
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_start_write_guest_code(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_parse_image_info();
+ if (retval < 0)
+ return -EINVAL;
+
+ if (!fwu->has_guest_code) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Guest code not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!fwu->img.contains_guest_code) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No guest code in firmware image\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ pr_notice("%s: Start of write guest code process\n", __func__);
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ goto exit;
+
+ retval = fwu_check_guest_code_size();
+ if (retval < 0)
+ goto exit;
+
+ retval = fwu_erase_guest_code();
+ if (retval < 0)
+ goto exit;
+
+ retval = fwu_write_guest_code();
+ if (retval < 0)
+ goto exit;
+
+ pr_notice("%s: Guest code programmed\n", __func__);
+
+exit:
+ rmi4_data->reset_device(rmi4_data, false);
+
+ pr_notice("%s: End of write guest code process\n", __func__);
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ rmi4_data->stay_awake = false;
+
+ return retval;
+}
+
+static int fwu_start_write_config(void)
+{
+ int retval;
+ unsigned short config_area;
+ unsigned int device_fw_id;
+ unsigned int image_fw_id;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_parse_image_info();
+ if (retval < 0)
+ return -EINVAL;
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ device_fw_id = rmi4_data->firmware_id;
+ retval = fwu_get_image_firmware_id(&image_fw_id);
+ if (retval < 0)
+ return retval;
+ if (device_fw_id != image_fw_id) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Device and image firmware IDs don't match\n",
+ __func__);
+ return -EINVAL;
+ }
+ retval = fwu_check_ui_configuration_size();
+ if (retval < 0)
+ return retval;
+ break;
+ case DP_CONFIG_AREA:
+ if (!fwu->flash_properties.has_disp_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Display configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (!fwu->img.contains_disp_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No display configuration in firmware image\n",
+ __func__);
+ return -EINVAL;
+ }
+ retval = fwu_check_dp_configuration_size();
+ if (retval < 0)
+ return retval;
+ break;
+ case PM_CONFIG_AREA:
+ if (!fwu->flash_properties.has_pm_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Permanent configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (!fwu->img.contains_perm_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No permanent configuration in firmware image\n",
+ __func__);
+ return -EINVAL;
+ }
+ retval = fwu_check_pm_configuration_size();
+ if (retval < 0)
+ return retval;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ pr_notice("%s: Start of write config process\n", __func__);
+
+ config_area = fwu->config_area;
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ goto exit;
+
+ fwu->config_area = config_area;
+
+ if (fwu->config_area != PM_CONFIG_AREA) {
+ retval = fwu_erase_configuration();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to erase config\n",
+ __func__);
+ goto exit;
+ }
+ }
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ retval = fwu_write_ui_configuration();
+ if (retval < 0)
+ goto exit;
+ break;
+ case DP_CONFIG_AREA:
+ retval = fwu_write_dp_configuration();
+ if (retval < 0)
+ goto exit;
+ break;
+ case PM_CONFIG_AREA:
+ retval = fwu_write_pm_configuration();
+ if (retval < 0)
+ goto exit;
+ break;
+ }
+
+ pr_notice("%s: Config written\n", __func__);
+
+exit:
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ rmi4_data->reset_device(rmi4_data, true);
+ break;
+ case DP_CONFIG_AREA:
+ case PM_CONFIG_AREA:
+ rmi4_data->reset_device(rmi4_data, false);
+ break;
+ }
+
+ pr_notice("%s: End of write config process\n", __func__);
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ rmi4_data->stay_awake = false;
+
+ return retval;
+}
+#endif
+
+static int fwu_start_reflash(void)
+{
+ int retval = 0;
+ enum flash_area flash_area;
+ const struct firmware *fw_entry = NULL;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ pr_notice("%s: Start of reflash process\n", __func__);
+
+ if (fwu->image == NULL) {
+ retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+ FW_IMAGE_NAME, sizeof(FW_IMAGE_NAME),
+ sizeof(FW_IMAGE_NAME));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy image file name\n",
+ __func__);
+ goto exit;
+ }
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Requesting firmware image %s\n",
+ __func__, fwu->image_name);
+
+ retval = request_firmware(&fw_entry, fwu->image_name,
+ rmi4_data->pdev->dev.parent);
+ if (retval != 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Firmware image %s not available\n",
+ __func__, fwu->image_name);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Firmware image size = %d\n",
+ __func__, (unsigned int)fw_entry->size);
+
+ fwu->image = fw_entry->data;
+ }
+
+ retval = fwu_parse_image_info();
+ if (retval < 0)
+ goto exit;
+
+ if (fwu->blkcount.total_count != fwu->img.blkcount.total_count) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Flash size mismatch\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (fwu->bl_version != fwu->img.bl_version) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Bootloader version mismatch\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->force_update && fwu->new_partition_table) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Partition table mismatch\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = fwu_read_flash_status();
+ if (retval < 0)
+ goto exit;
+
+ if (fwu->in_bl_mode) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Device in bootloader mode\n",
+ __func__);
+ }
+
+ flash_area = fwu_go_nogo();
+
+ if (flash_area != NONE) {
+ retval = fwu_enter_flash_prog();
+ if (retval < 0) {
+ rmi4_data->reset_device(rmi4_data, false);
+ goto exit;
+ }
+ }
+
+ switch (flash_area) {
+ case UI_FIRMWARE:
+ retval = fwu_do_reflash();
+ rmi4_data->reset_device(rmi4_data, true);
+ break;
+ case UI_CONFIG:
+ retval = fwu_check_ui_configuration_size();
+ if (retval < 0)
+ break;
+ fwu->config_area = UI_CONFIG_AREA;
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ break;
+ retval = fwu_write_ui_configuration();
+ rmi4_data->reset_device(rmi4_data, true);
+ break;
+ case NONE:
+ default:
+ rmi4_data->reset_device(rmi4_data, false);
+ break;
+ }
+
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do reflash\n",
+ __func__);
+ goto exit;
+ }
+
+ if (fwu->do_lockdown && (fwu->img.lockdown.data != NULL)) {
+ switch (fwu->bl_version) {
+ case BL_V5:
+ case BL_V6:
+ retval = fwu_do_lockdown_v5v6();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do lockdown\n",
+ __func__);
+ }
+ rmi4_data->reset_device(rmi4_data, false);
+ break;
+ case BL_V7:
+ case BL_V8:
+ retval = fwu_do_lockdown_v7();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do lockdown\n",
+ __func__);
+ }
+ rmi4_data->reset_device(rmi4_data, false);
+ break;
+ default:
+ break;
+ }
+ }
+
+exit:
+ if (fw_entry)
+ release_firmware(fw_entry);
+
+ pr_notice("%s: End of reflash process\n", __func__);
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ rmi4_data->stay_awake = false;
+
+ return retval;
+}
+
+static int fwu_recovery_check_status(void)
+{
+ int retval;
+ unsigned char base;
+ unsigned char status;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f35_fd.data_base_addr;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + F35_ERROR_CODE_OFFSET,
+ &status,
+ 1);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read status\n",
+ __func__);
+ return retval;
+ }
+
+ status = status & MASK_7BIT;
+
+ if (status != 0x00) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Recovery mode status = %d\n",
+ __func__, status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_recovery_erase_all(void)
+{
+ int retval;
+ unsigned char base;
+ unsigned char command = CMD_F35_ERASE_ALL;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f35_fd.ctrl_base_addr;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + F35_CHUNK_COMMAND_OFFSET,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue erase all command\n",
+ __func__);
+ return retval;
+ }
+
+ msleep(F35_ERASE_ALL_WAIT_MS);
+
+ retval = fwu_recovery_check_status();
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int fwu_recovery_write_chunk(void)
+{
+ int retval;
+ unsigned char base;
+ unsigned char chunk_number[] = {0, 0};
+ unsigned char chunk_spare;
+ unsigned char chunk_size;
+ unsigned char buf[F35_CHUNK_SIZE + 1];
+ unsigned short chunk;
+ unsigned short chunk_total;
+ unsigned short bytes_written = 0;
+ unsigned char *chunk_ptr = (unsigned char *)fwu->image;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f35_fd.ctrl_base_addr;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + F35_CHUNK_NUM_LSB_OFFSET,
+ chunk_number,
+ sizeof(chunk_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write chunk number\n",
+ __func__);
+ return retval;
+ }
+
+ buf[sizeof(buf) - 1] = CMD_F35_WRITE_CHUNK;
+
+ chunk_total = fwu->image_size / F35_CHUNK_SIZE;
+ chunk_spare = fwu->image_size % F35_CHUNK_SIZE;
+ if (chunk_spare)
+ chunk_total++;
+
+ for (chunk = 0; chunk < chunk_total; chunk++) {
+ if (chunk_spare && chunk == chunk_total - 1)
+ chunk_size = chunk_spare;
+ else
+ chunk_size = F35_CHUNK_SIZE;
+
+ memset(buf, 0x00, F35_CHUNK_SIZE);
+ secure_memcpy(buf, sizeof(buf), chunk_ptr,
+ fwu->image_size - bytes_written,
+ chunk_size);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + F35_CHUNK_DATA_OFFSET,
+ buf,
+ sizeof(buf));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write chunk data (chunk %d)\n",
+ __func__, chunk);
+ return retval;
+ }
+ chunk_ptr += chunk_size;
+ bytes_written += chunk_size;
+ }
+
+ retval = fwu_recovery_check_status();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write chunk data\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_recovery_reset(void)
+{
+ int retval;
+ unsigned char base;
+ unsigned char command = CMD_F35_RESET;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f35_fd.ctrl_base_addr;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ base + F35_CHUNK_COMMAND_OFFSET,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ return retval;
+ }
+
+ msleep(F35_RESET_WAIT_MS);
+
+ return 0;
+}
+
+static int fwu_start_recovery(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ pr_notice("%s: Start of recovery process\n", __func__);
+
+ retval = rmi4_data->irq_enable(rmi4_data, false, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to disable interrupt\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = fwu_recovery_erase_all();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do erase all in recovery mode\n",
+ __func__);
+ goto exit;
+ }
+
+ pr_notice("%s: External flash erased\n", __func__);
+
+ retval = fwu_recovery_write_chunk();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write chunk data in recovery mode\n",
+ __func__);
+ goto exit;
+ }
+
+ pr_notice("%s: Chunk data programmed\n", __func__);
+
+ retval = fwu_recovery_reset();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to reset device in recovery mode\n",
+ __func__);
+ goto exit;
+ }
+
+ pr_notice("%s: Recovery mode reset issued\n", __func__);
+
+ rmi4_data->reset_device(rmi4_data, true);
+
+ retval = 0;
+
+exit:
+ pr_notice("%s: End of recovery process\n", __func__);
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ rmi4_data->stay_awake = false;
+
+ return retval;
+}
+#endif
+
+int synaptics_fw_updater(const unsigned char *fw_data)
+{
+ int retval;
+
+ if (!fwu)
+ return -ENODEV;
+
+ if (!fwu->initialized)
+ return -ENODEV;
+
+ if (fwu->in_ub_mode)
+ return -ENODEV;
+
+ fwu->image = fw_data;
+
+ retval = fwu_start_reflash();
+
+ fwu->image = NULL;
+
+ return retval;
+}
+EXPORT_SYMBOL(synaptics_fw_updater);
+
+#ifdef DO_STARTUP_FW_UPDATE
+static void fwu_startup_fw_update_work(struct work_struct *work)
+{
+ static unsigned char do_once = 1;
+#ifdef WAIT_FOR_FB_READY
+ unsigned int timeout;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+#endif
+
+ if (!do_once)
+ return;
+ do_once = 0;
+
+#ifdef WAIT_FOR_FB_READY
+ timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+ while (!rmi4_data->fb_ready) {
+ msleep(FB_READY_WAIT_MS);
+ timeout--;
+ if (timeout == 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for FB ready\n",
+ __func__);
+ return;
+ }
+ }
+#endif
+
+ synaptics_fw_updater(NULL);
+
+ return;
+}
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (count < fwu->config_size) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not enough space (%d bytes) in buffer\n",
+ __func__, (unsigned int)count);
+ return -EINVAL;
+ }
+
+ retval = secure_memcpy(buf, count, fwu->read_config_buf,
+ fwu->read_config_buf_size, fwu->config_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy config data\n",
+ __func__);
+ return retval;
+ }
+
+ return fwu->config_size;
+}
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = secure_memcpy(&fwu->ext_data_source[fwu->data_pos],
+ fwu->image_size - fwu->data_pos, buf, count, count);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy image data\n",
+ __func__);
+ return retval;
+ }
+
+ fwu->data_pos += count;
+
+ return count;
+}
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->in_ub_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not in microbootloader mode\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->ext_data_source)
+ return -EINVAL;
+ else
+ fwu->image = fwu->ext_data_source;
+
+ retval = fwu_start_recovery();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do recovery\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = NULL;
+ fwu->image = NULL;
+ return retval;
+}
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (fwu->in_ub_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: In microbootloader mode\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->ext_data_source)
+ return -EINVAL;
+ else
+ fwu->image = fwu->ext_data_source;
+
+ if (input & LOCKDOWN) {
+ fwu->do_lockdown = true;
+ input &= ~LOCKDOWN;
+ }
+
+ if ((input != NORMAL) && (input != FORCE)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (input == FORCE)
+ fwu->force_update = true;
+
+ retval = synaptics_fw_updater(fwu->image);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do reflash\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = NULL;
+ fwu->image = NULL;
+ fwu->force_update = FORCE_UPDATE;
+ fwu->do_lockdown = DO_LOCKDOWN;
+ return retval;
+}
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (input != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (fwu->in_ub_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: In microbootloader mode\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->ext_data_source)
+ return -EINVAL;
+ else
+ fwu->image = fwu->ext_data_source;
+
+ retval = fwu_start_write_config();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write config\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = NULL;
+ fwu->image = NULL;
+ return retval;
+}
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ if (fwu->in_ub_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: In microbootloader mode\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ retval = fwu_do_read_config();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read config\n",
+ __func__);
+ return retval;
+ }
+
+ return count;
+}
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long config_area;
+
+ retval = sstrtoul(buf, 10, &config_area);
+ if (retval)
+ return retval;
+
+ fwu->config_area = config_area;
+
+ return count;
+}
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+ buf, count, count);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy image file name\n",
+ __func__);
+ return retval;
+ }
+
+ return count;
+}
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long size;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &size);
+ if (retval)
+ return retval;
+
+ fwu->image_size = size;
+ fwu->data_pos = 0;
+
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = kzalloc(fwu->image_size, GFP_KERNEL);
+ if (!fwu->ext_data_source) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for image data\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return count;
+}
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->block_size);
+}
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_firmware);
+}
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_config);
+}
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.dp_config);
+}
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.pm_config);
+}
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.bl_config);
+}
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.guest_code);
+}
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (input != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (fwu->in_ub_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: In microbootloader mode\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->ext_data_source)
+ return -EINVAL;
+ else
+ fwu->image = fwu->ext_data_source;
+
+ retval = fwu_start_write_guest_code();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write guest code\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = NULL;
+ fwu->image = NULL;
+ return retval;
+}
+#endif
+
+static void synaptics_rmi4_fwu_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!fwu)
+ return;
+
+ if (fwu->intr_mask & intr_mask)
+ fwu_read_flash_status();
+
+ return;
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int synaptics_create_fwu_bin_file(struct synaptics_rmi4_data *rmi4_data)
+{
+ return sysfs_create_bin_file(&rmi4_data->input_dev->dev.kobj,
+ &dev_attr_data);
+}
+
+static void synaptics_remove_fwu_bin_file(struct synaptics_rmi4_data *rmi4_data)
+{
+ sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+}
+#else
+static int synaptics_create_fwu_bin_file(struct synaptics_rmi4_data *rmi4_data)
+{
+ return 0;
+}
+
+static void synaptics_remove_fwu_bin_file(struct synaptics_rmi4_data *rmi4_data)
+{
+}
+#endif
+
+static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char attr_count;
+ struct pdt_properties pdt_props;
+
+ if (fwu) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ fwu = kzalloc(sizeof(*fwu), GFP_KERNEL);
+ if (!fwu) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fwu\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ fwu->image_name = kzalloc(MAX_IMAGE_NAME_LEN, GFP_KERNEL);
+ if (!fwu->image_name) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for image name\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_fwu;
+ }
+
+ fwu->rmi4_data = rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ PDT_PROPS,
+ pdt_props.data,
+ sizeof(pdt_props.data));
+ if (retval < 0) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read PDT properties, assuming 0x00\n",
+ __func__);
+ } else if (pdt_props.has_bsr) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Reflash for LTS not currently supported\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_free_mem;
+ }
+
+ retval = fwu_scan_pdt();
+ if (retval < 0)
+ goto exit_free_mem;
+
+ if (!fwu->in_ub_mode) {
+ retval = fwu_read_f34_queries();
+ if (retval < 0)
+ goto exit_free_mem;
+
+ retval = fwu_get_device_config_id();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read device config ID\n",
+ __func__);
+ goto exit_free_mem;
+ }
+ }
+
+ fwu->force_update = FORCE_UPDATE;
+ fwu->do_lockdown = DO_LOCKDOWN;
+ fwu->initialized = true;
+
+ retval = synaptics_create_fwu_bin_file(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs bin file\n",
+ __func__);
+ goto exit_free_mem;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_remove_attrs;
+ }
+ }
+
+#ifdef DO_STARTUP_FW_UPDATE
+ fwu->fwu_workqueue = create_singlethread_workqueue("fwu_workqueue");
+ INIT_WORK(&fwu->fwu_work, fwu_startup_fw_update_work);
+ queue_work(fwu->fwu_workqueue,
+ &fwu->fwu_work);
+#endif
+
+ return 0;
+
+exit_remove_attrs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ synaptics_remove_fwu_bin_file(rmi4_data);
+
+exit_free_mem:
+ kfree(fwu->image_name);
+
+exit_free_fwu:
+ kfree(fwu);
+ fwu = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_fwu_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char attr_count;
+
+ if (!fwu)
+ goto exit;
+
+#ifdef DO_STARTUP_FW_UPDATE
+ cancel_work_sync(&fwu->fwu_work);
+ flush_workqueue(fwu->fwu_workqueue);
+ destroy_workqueue(fwu->fwu_workqueue);
+#endif
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ synaptics_remove_fwu_bin_file(rmi4_data);
+
+ kfree(fwu->read_config_buf);
+ kfree(fwu->image_name);
+ kfree(fwu);
+ fwu = NULL;
+
+exit:
+ complete(&fwu_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_fwu_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ if (!fwu) {
+ synaptics_rmi4_fwu_init(rmi4_data);
+ return;
+ }
+
+ retval = fwu_scan_pdt();
+ if (retval < 0)
+ return;
+
+ if (!fwu->in_ub_mode)
+ fwu_read_f34_queries();
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn fwu_module = {
+ .fn_type = RMI_FW_UPDATER,
+ .init = synaptics_rmi4_fwu_init,
+ .remove = synaptics_rmi4_fwu_remove,
+ .reset = synaptics_rmi4_fwu_reset,
+ .reinit = NULL,
+ .early_suspend = NULL,
+ .suspend = NULL,
+ .resume = NULL,
+ .late_resume = NULL,
+ .attn = synaptics_rmi4_fwu_attn,
+};
+
+static int __init rmi4_fw_update_module_init(void)
+{
+ synaptics_rmi4_new_function(&fwu_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_fw_update_module_exit(void)
+{
+ synaptics_rmi4_new_function(&fwu_module, false);
+
+ wait_for_completion(&fwu_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_fw_update_module_init);
+module_exit(rmi4_fw_update_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX FW Update Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_gesture.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_gesture.c
new file mode 100644
index 0000000..ae1a55af
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_gesture.c
@@ -0,0 +1,2309 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define GESTURE_PHYS_NAME "synaptics_dsx/gesture"
+
+#define TUNING_SYSFS_DIR_NAME "tuning"
+
+#define STORE_GESTURES
+#ifdef STORE_GESTURES
+#define GESTURES_TO_STORE 10
+#endif
+
+#define CTRL23_FINGER_REPORT_ENABLE_BIT 0
+#define CTRL27_UDG_ENABLE_BIT 4
+#define WAKEUP_GESTURE_MODE 0x02
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static int udg_read_tuning_params(void);
+
+static int udg_write_tuning_params(void);
+
+static int udg_detection_enable(bool enable);
+
+static int udg_engine_enable(bool enable);
+
+static int udg_set_index(unsigned char index);
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void);
+static int udg_write_valid_data(void);
+static int udg_read_template_data(unsigned char index);
+static int udg_write_template_data(void);
+#endif
+
+enum gesture_type {
+ DETECTION = 0x0f,
+ REGISTRATION = 0x10,
+};
+
+struct udg_tuning {
+ union {
+ struct {
+ unsigned char maximum_number_of_templates;
+ unsigned char template_size;
+ unsigned char template_disp_lsb;
+ unsigned char template_disp_msb;
+ unsigned char rotation_inv_lsb;
+ unsigned char rotation_inv_msb;
+ unsigned char scale_inv_lsb;
+ unsigned char scale_inv_msb;
+ unsigned char thres_factor_lsb;
+ unsigned char thres_factor_msb;
+ unsigned char metric_thres_lsb;
+ unsigned char metric_thres_msb;
+ unsigned char inter_stroke_lsb;
+ unsigned char inter_stroke_msb;
+ } __packed;
+ unsigned char data[14];
+ };
+};
+
+struct udg_addr {
+ unsigned short data_4;
+ unsigned short ctrl_18;
+ unsigned short ctrl_20;
+ unsigned short ctrl_23;
+ unsigned short ctrl_27;
+ unsigned short ctrl_41;
+ unsigned short trace_x;
+ unsigned short trace_y;
+ unsigned short trace_segment;
+ unsigned short template_helper;
+ unsigned short template_data;
+ unsigned short template_flags;
+};
+
+struct synaptics_rmi4_f12_query_0 {
+ union {
+ struct {
+ struct {
+ unsigned char has_register_descriptors:1;
+ unsigned char has_closed_cover:1;
+ unsigned char has_fast_glove_detect:1;
+ unsigned char has_dribble:1;
+ unsigned char has_4p4_jitter_filter_strength:1;
+ unsigned char f12_query0_s0_b5__7:3;
+ } __packed;
+ struct {
+ unsigned char max_num_templates:4;
+ unsigned char f12_query0_s1_b4__7:4;
+ unsigned char template_size_lsb;
+ unsigned char template_size_msb;
+ } __packed;
+ };
+ unsigned char data[4];
+ };
+};
+
+struct synaptics_rmi4_f12_query_5 {
+ union {
+ struct {
+ unsigned char size_of_query6;
+ struct {
+ unsigned char ctrl0_is_present:1;
+ unsigned char ctrl1_is_present:1;
+ unsigned char ctrl2_is_present:1;
+ unsigned char ctrl3_is_present:1;
+ unsigned char ctrl4_is_present:1;
+ unsigned char ctrl5_is_present:1;
+ unsigned char ctrl6_is_present:1;
+ unsigned char ctrl7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl8_is_present:1;
+ unsigned char ctrl9_is_present:1;
+ unsigned char ctrl10_is_present:1;
+ unsigned char ctrl11_is_present:1;
+ unsigned char ctrl12_is_present:1;
+ unsigned char ctrl13_is_present:1;
+ unsigned char ctrl14_is_present:1;
+ unsigned char ctrl15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl16_is_present:1;
+ unsigned char ctrl17_is_present:1;
+ unsigned char ctrl18_is_present:1;
+ unsigned char ctrl19_is_present:1;
+ unsigned char ctrl20_is_present:1;
+ unsigned char ctrl21_is_present:1;
+ unsigned char ctrl22_is_present:1;
+ unsigned char ctrl23_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl24_is_present:1;
+ unsigned char ctrl25_is_present:1;
+ unsigned char ctrl26_is_present:1;
+ unsigned char ctrl27_is_present:1;
+ unsigned char ctrl28_is_present:1;
+ unsigned char ctrl29_is_present:1;
+ unsigned char ctrl30_is_present:1;
+ unsigned char ctrl31_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl32_is_present:1;
+ unsigned char ctrl33_is_present:1;
+ unsigned char ctrl34_is_present:1;
+ unsigned char ctrl35_is_present:1;
+ unsigned char ctrl36_is_present:1;
+ unsigned char ctrl37_is_present:1;
+ unsigned char ctrl38_is_present:1;
+ unsigned char ctrl39_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl40_is_present:1;
+ unsigned char ctrl41_is_present:1;
+ unsigned char ctrl42_is_present:1;
+ unsigned char ctrl43_is_present:1;
+ unsigned char ctrl44_is_present:1;
+ unsigned char ctrl45_is_present:1;
+ unsigned char ctrl46_is_present:1;
+ unsigned char ctrl47_is_present:1;
+ } __packed;
+ };
+ unsigned char data[7];
+ };
+};
+
+struct synaptics_rmi4_f12_query_8 {
+ union {
+ struct {
+ unsigned char size_of_query9;
+ struct {
+ unsigned char data0_is_present:1;
+ unsigned char data1_is_present:1;
+ unsigned char data2_is_present:1;
+ unsigned char data3_is_present:1;
+ unsigned char data4_is_present:1;
+ unsigned char data5_is_present:1;
+ unsigned char data6_is_present:1;
+ unsigned char data7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char data8_is_present:1;
+ unsigned char data9_is_present:1;
+ unsigned char data10_is_present:1;
+ unsigned char data11_is_present:1;
+ unsigned char data12_is_present:1;
+ unsigned char data13_is_present:1;
+ unsigned char data14_is_present:1;
+ unsigned char data15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char data16_is_present:1;
+ unsigned char data17_is_present:1;
+ unsigned char data18_is_present:1;
+ unsigned char data19_is_present:1;
+ unsigned char data20_is_present:1;
+ unsigned char data21_is_present:1;
+ unsigned char data22_is_present:1;
+ unsigned char data23_is_present:1;
+ } __packed;
+ };
+ unsigned char data[4];
+ };
+};
+
+struct synaptics_rmi4_f12_control_41 {
+ union {
+ struct {
+ unsigned char enable_registration:1;
+ unsigned char template_index:4;
+ unsigned char begin:1;
+ unsigned char f12_ctrl41_b6__7:2;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_udg_handle {
+ atomic_t attn_event;
+ unsigned char intr_mask;
+ unsigned char report_flags;
+ unsigned char object_type_enable1;
+ unsigned char object_type_enable2;
+ unsigned char trace_size;
+ unsigned char template_index;
+ unsigned char max_num_templates;
+ unsigned char detection_score;
+ unsigned char detection_index;
+ unsigned char detection_status;
+ unsigned char registration_status;
+ unsigned char *ctrl_buf;
+ unsigned char *trace_data_buf;
+ unsigned char *template_data_buf;
+#ifdef STORE_GESTURES
+ unsigned char gestures_to_store;
+ unsigned char *storage_buf;
+ unsigned char valid_buf[2];
+#endif
+ unsigned short trace_data_buf_size;
+ unsigned short template_size;
+ unsigned short template_data_size;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ unsigned short ctrl_18_sub10_off;
+ unsigned short ctrl_20_sub1_off;
+ unsigned short ctrl_23_sub3_off;
+ unsigned short ctrl_27_sub5_off;
+ struct input_dev *udg_dev;
+ struct kobject *tuning_dir;
+ struct udg_addr addr;
+ struct udg_tuning tuning;
+ struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct device_attribute attrs[] = {
+ __ATTR(engine_enable, 0220,
+ NULL,
+ udg_sysfs_engine_enable_store),
+ __ATTR(detection_enable, 0220,
+ NULL,
+ udg_sysfs_detection_enable_store),
+ __ATTR(detection_score, 0444,
+ udg_sysfs_detection_score_show,
+ NULL),
+ __ATTR(detection_index, 0444,
+ udg_sysfs_detection_index_show,
+ NULL),
+ __ATTR(registration_enable, 0220,
+ NULL,
+ udg_sysfs_registration_enable_store),
+ __ATTR(registration_begin, 0220,
+ NULL,
+ udg_sysfs_registration_begin_store),
+ __ATTR(registration_status, 0444,
+ udg_sysfs_registration_status_show,
+ NULL),
+ __ATTR(template_size, 0444,
+ udg_sysfs_template_size_show,
+ NULL),
+ __ATTR(template_max_index, 0444,
+ udg_sysfs_template_max_index_show,
+ NULL),
+ __ATTR(template_detection, 0444,
+ udg_sysfs_template_detection_show,
+ NULL),
+ __ATTR(template_index, 0220,
+ NULL,
+ udg_sysfs_template_index_store),
+ __ATTR(template_valid, 0664,
+ udg_sysfs_template_valid_show,
+ udg_sysfs_template_valid_store),
+ __ATTR(template_clear, 0220,
+ NULL,
+ udg_sysfs_template_clear_store),
+ __ATTR(trace_size, 0444,
+ udg_sysfs_trace_size_show,
+ NULL),
+};
+
+static struct bin_attribute template_data = {
+ .attr = {
+ .name = "template_data",
+ .mode = 0664,
+ },
+ .size = 0,
+ .read = udg_sysfs_template_data_show,
+ .write = udg_sysfs_template_data_store,
+};
+
+static struct bin_attribute trace_data = {
+ .attr = {
+ .name = "trace_data",
+ .mode = 0444,
+ },
+ .size = 0,
+ .read = udg_sysfs_trace_data_show,
+ .write = NULL,
+};
+
+static struct device_attribute params[] = {
+ __ATTR(template_displacement, 0664,
+ udg_sysfs_template_displacement_show,
+ udg_sysfs_template_displacement_store),
+ __ATTR(rotation_invariance, 0664,
+ udg_sysfs_rotation_invariance_show,
+ udg_sysfs_rotation_invariance_store),
+ __ATTR(scale_invariance, 0664,
+ udg_sysfs_scale_invariance_show,
+ udg_sysfs_scale_invariance_store),
+ __ATTR(threshold_factor, 0664,
+ udg_sysfs_threshold_factor_show,
+ udg_sysfs_threshold_factor_store),
+ __ATTR(match_metric_threshold, 0664,
+ udg_sysfs_match_metric_threshold_show,
+ udg_sysfs_match_metric_threshold_store),
+ __ATTR(max_inter_stroke_time, 0664,
+ udg_sysfs_max_inter_stroke_time_show,
+ udg_sysfs_max_inter_stroke_time_store),
+};
+
+static struct synaptics_rmi4_udg_handle *udg;
+
+static unsigned char ctrl_18_sub_size[] = {10, 10, 10, 2, 3, 4, 3, 3, 1, 1};
+static unsigned char ctrl_20_sub_size[] = {2};
+static unsigned char ctrl_23_sub_size[] = {1, 1, 1};
+static unsigned char ctrl_27_sub_size[] = {1, 5, 2, 1, 7};
+
+DECLARE_COMPLETION(udg_remove_complete);
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ bool enable;
+ unsigned int input;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ enable = true;
+ else if (input == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ retval = udg_engine_enable(enable);
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ bool enable;
+ unsigned int input;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ enable = true;
+ else if (input == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ udg->detection_status = 0;
+
+ retval = udg_detection_enable(enable);
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_score);
+}
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_index);
+}
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ bool enable;
+ unsigned int input;
+ struct synaptics_rmi4_f12_control_41 control_41;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ enable = true;
+ else if (input == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ if (enable) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_23,
+ udg->ctrl_buf,
+ udg->ctrl_23_sub3_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->ctrl_buf[0] = 0;
+ udg->ctrl_buf[0] |= (1 << CTRL23_FINGER_REPORT_ENABLE_BIT);
+ if (udg->ctrl_23_sub3_off)
+ udg->ctrl_buf[udg->ctrl_23_sub3_off] = 0;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_23,
+ udg->ctrl_buf,
+ udg->ctrl_23_sub3_off + 1);
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_23,
+ udg->ctrl_buf,
+ udg->ctrl_23_sub3_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->ctrl_buf[0] = udg->object_type_enable1;
+ if (udg->ctrl_23_sub3_off) {
+ udg->ctrl_buf[udg->ctrl_23_sub3_off] =
+ udg->object_type_enable2;
+ }
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_23,
+ udg->ctrl_buf,
+ udg->ctrl_23_sub3_off + 1);
+ if (retval < 0)
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ control_41.enable_registration = enable ? 1 : 0;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ bool begin;
+ unsigned int input;
+ struct synaptics_rmi4_f12_control_41 control_41;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ begin = true;
+ else if (input == 0)
+ begin = false;
+ else
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ control_41.begin = begin ? 1 : 0;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", udg->registration_status);
+}
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", udg->template_size);
+}
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", udg->max_num_templates - 1);
+}
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ int attn_event;
+ unsigned char detection_status;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ attn_event = atomic_read(&udg->attn_event);
+ atomic_set(&udg->attn_event, 0);
+
+ if (attn_event == 0)
+ return snprintf(buf, PAGE_SIZE, "0\n");
+
+ if (udg->detection_status == 0) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.data_4,
+ rmi4_data->gesture_detection,
+ sizeof(rmi4_data->gesture_detection));
+ if (retval < 0)
+ return retval;
+
+ udg->detection_status = rmi4_data->gesture_detection[0];
+ }
+
+ detection_status = udg->detection_status;
+ udg->detection_status = 0;
+
+ switch (detection_status) {
+ case DETECTION:
+ udg->detection_score = rmi4_data->gesture_detection[1];
+ udg->detection_index = rmi4_data->gesture_detection[4];
+ udg->trace_size = rmi4_data->gesture_detection[3];
+ break;
+ case REGISTRATION:
+ udg->registration_status = rmi4_data->gesture_detection[1];
+ udg->trace_size = rmi4_data->gesture_detection[3];
+ break;
+ default:
+ return snprintf(buf, PAGE_SIZE, "0\n");
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", detection_status);
+}
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long index;
+
+ retval = sstrtoul(buf, 10, &index);
+ if (retval)
+ return retval;
+
+ retval = udg_set_index((unsigned char)index);
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned char valid;
+ unsigned char offset;
+ unsigned char byte_num;
+ unsigned char template_flags[2];
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ byte_num = udg->template_index / 8;
+ offset = udg->template_index % 8;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.template_flags,
+ template_flags,
+ sizeof(template_flags));
+ if (retval < 0)
+ return retval;
+
+ valid = (template_flags[byte_num] & (1 << offset)) >> offset;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", valid);
+}
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long valid;
+ unsigned char offset;
+ unsigned char byte_num;
+ unsigned char template_flags[2];
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &valid);
+ if (retval)
+ return retval;
+
+ if (valid > 0)
+ valid = 1;
+
+ byte_num = udg->template_index / 8;
+ offset = udg->template_index % 8;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.template_flags,
+ template_flags,
+ sizeof(template_flags));
+ if (retval < 0)
+ return retval;
+
+ if (valid)
+ template_flags[byte_num] |= (1 << offset);
+ else
+ template_flags[byte_num] &= ~(1 << offset);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.template_flags,
+ template_flags,
+ sizeof(template_flags));
+ if (retval < 0)
+ return retval;
+
+#ifdef STORE_GESTURES
+ udg_read_valid_data();
+#endif
+
+ return count;
+}
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ const char cmd[] = {'0', 0};
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ memset(udg->template_data_buf, 0x00, udg->template_data_size);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.template_data,
+ udg->template_data_buf,
+ udg->template_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to clear template data\n",
+ __func__);
+ return retval;
+ }
+
+ retval = udg_sysfs_template_valid_store(dev, attr, cmd, 1);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to clear valid bit\n",
+ __func__);
+ return retval;
+ }
+
+#ifdef STORE_GESTURES
+ udg_read_template_data(udg->template_index);
+ udg_read_valid_data();
+#endif
+
+ return count;
+}
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", udg->trace_size);
+}
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ unsigned short index = 0;
+ unsigned short trace_data_size;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ trace_data_size = udg->trace_size * 5;
+
+ if (trace_data_size == 0)
+ return -EINVAL;
+
+ if (count < trace_data_size) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not enough space (%d bytes) in buffer\n",
+ __func__, (unsigned int)count);
+ return -EINVAL;
+ }
+
+ if (udg->trace_data_buf_size < trace_data_size) {
+ if (udg->trace_data_buf_size)
+ kfree(udg->trace_data_buf);
+ udg->trace_data_buf = kzalloc(trace_data_size, GFP_KERNEL);
+ if (!udg->trace_data_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for trace data buffer\n",
+ __func__);
+ udg->trace_data_buf_size = 0;
+ return -ENOMEM;
+ }
+ udg->trace_data_buf_size = trace_data_size;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.trace_x,
+ &udg->trace_data_buf[index],
+ udg->trace_size * 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read trace X data\n",
+ __func__);
+ return retval;
+ } else {
+ index += udg->trace_size * 2;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.trace_y,
+ &udg->trace_data_buf[index],
+ udg->trace_size * 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read trace Y data\n",
+ __func__);
+ return retval;
+ } else {
+ index += udg->trace_size * 2;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.trace_segment,
+ &udg->trace_data_buf[index],
+ udg->trace_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read trace segment data\n",
+ __func__);
+ return retval;
+ }
+
+ retval = secure_memcpy(buf, count, udg->trace_data_buf,
+ udg->trace_data_buf_size, trace_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy trace data\n",
+ __func__);
+ return retval;
+ }
+
+ return trace_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (count < udg->template_data_size) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not enough space (%d bytes) in buffer\n",
+ __func__, (unsigned int)count);
+ return -EINVAL;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.template_data,
+ udg->template_data_buf,
+ udg->template_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read template data\n",
+ __func__);
+ return retval;
+ }
+
+ retval = secure_memcpy(buf, count, udg->template_data_buf,
+ udg->template_data_size, udg->template_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy template data\n",
+ __func__);
+ return retval;
+ }
+
+#ifdef STORE_GESTURES
+ udg_read_template_data(udg->template_index);
+ udg_read_valid_data();
+#endif
+
+ return udg->template_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = secure_memcpy(udg->template_data_buf, udg->template_data_size,
+ buf, count, count);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy template data\n",
+ __func__);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.template_data,
+ udg->template_data_buf,
+ count);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write template data\n",
+ __func__);
+ return retval;
+ }
+
+#ifdef STORE_GESTURES
+ udg_read_template_data(udg->template_index);
+ udg_read_valid_data();
+#endif
+
+ return count;
+}
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short template_displacement;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ template_displacement =
+ ((unsigned short)udg->tuning.template_disp_lsb << 0) |
+ ((unsigned short)udg->tuning.template_disp_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", template_displacement);
+}
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.template_disp_lsb = (unsigned char)(input >> 0);
+ udg->tuning.template_disp_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short rotation_invariance;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ rotation_invariance =
+ ((unsigned short)udg->tuning.rotation_inv_lsb << 0) |
+ ((unsigned short)udg->tuning.rotation_inv_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", rotation_invariance);
+}
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.rotation_inv_lsb = (unsigned char)(input >> 0);
+ udg->tuning.rotation_inv_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short scale_invariance;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ scale_invariance =
+ ((unsigned short)udg->tuning.scale_inv_lsb << 0) |
+ ((unsigned short)udg->tuning.scale_inv_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", scale_invariance);
+}
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.scale_inv_lsb = (unsigned char)(input >> 0);
+ udg->tuning.scale_inv_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short threshold_factor;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ threshold_factor =
+ ((unsigned short)udg->tuning.thres_factor_lsb << 0) |
+ ((unsigned short)udg->tuning.thres_factor_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", threshold_factor);
+}
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.thres_factor_lsb = (unsigned char)(input >> 0);
+ udg->tuning.thres_factor_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short match_metric_threshold;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ match_metric_threshold =
+ ((unsigned short)udg->tuning.metric_thres_lsb << 0) |
+ ((unsigned short)udg->tuning.metric_thres_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", match_metric_threshold);
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.metric_thres_lsb = (unsigned char)(input >> 0);
+ udg->tuning.metric_thres_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short max_inter_stroke_time;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ max_inter_stroke_time =
+ ((unsigned short)udg->tuning.inter_stroke_lsb << 0) |
+ ((unsigned short)udg->tuning.inter_stroke_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", max_inter_stroke_time);
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.inter_stroke_lsb = (unsigned char)(input >> 0);
+ udg->tuning.inter_stroke_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static int udg_ctrl_subpacket(unsigned char ctrlreg,
+ unsigned char subpacket,
+ struct synaptics_rmi4_f12_query_5 *query_5)
+{
+ int retval;
+ unsigned char cnt;
+ unsigned char regnum;
+ unsigned char bitnum;
+ unsigned char q5_index;
+ unsigned char q6_index;
+ unsigned char offset;
+ unsigned char max_ctrlreg;
+ unsigned char *query_6;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ max_ctrlreg = (sizeof(query_5->data) - 1) * 8 - 1;
+
+ if (ctrlreg > max_ctrlreg) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Control register number (%d) over limit\n",
+ __func__, ctrlreg);
+ return -EINVAL;
+ }
+
+ q5_index = ctrlreg / 8 + 1;
+ bitnum = ctrlreg % 8;
+ if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Control %d is not present\n",
+ __func__, ctrlreg);
+ return -EINVAL;
+ }
+
+ query_6 = kmalloc(query_5->size_of_query6, GFP_KERNEL);
+ if (!query_6) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for query 6\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 6,
+ query_6,
+ query_5->size_of_query6);
+ if (retval < 0)
+ goto exit;
+
+ q6_index = 0;
+
+ for (regnum = 0; regnum < ctrlreg; regnum++) {
+ q5_index = regnum / 8 + 1;
+ bitnum = regnum % 8;
+ if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00)
+ continue;
+
+ if (query_6[q6_index] == 0x00)
+ q6_index += 3;
+ else
+ q6_index++;
+
+ while (query_6[q6_index] & ~MASK_7BIT)
+ q6_index++;
+
+ q6_index++;
+ }
+
+ cnt = 0;
+ q6_index++;
+ offset = subpacket / 7;
+ bitnum = subpacket % 7;
+
+ do {
+ if (cnt == offset) {
+ if (query_6[q6_index + cnt] & (1 << bitnum))
+ retval = 1;
+ else
+ retval = 0;
+ goto exit;
+ }
+ cnt++;
+ } while (query_6[q6_index + cnt - 1] & ~MASK_7BIT);
+
+ retval = 0;
+
+exit:
+ kfree(query_6);
+
+ return retval;
+}
+
+static int udg_read_tuning_params(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_18,
+ udg->ctrl_buf,
+ udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+ if (retval < 0)
+ return retval;
+
+ secure_memcpy(udg->tuning.data,
+ sizeof(udg->tuning.data),
+ (unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+ sizeof(struct udg_tuning),
+ sizeof(struct udg_tuning));
+
+ return 0;
+}
+
+static int udg_write_tuning_params(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ secure_memcpy((unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+ sizeof(struct udg_tuning),
+ udg->tuning.data,
+ sizeof(udg->tuning.data),
+ sizeof(struct udg_tuning));
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_18,
+ udg->ctrl_buf,
+ udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int udg_detection_enable(bool enable)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_20,
+ udg->ctrl_buf,
+ udg->ctrl_20_sub1_off + 1);
+ if (retval < 0)
+ return retval;
+
+ if (enable)
+ udg->ctrl_buf[udg->ctrl_20_sub1_off] = WAKEUP_GESTURE_MODE;
+ else
+ udg->ctrl_buf[udg->ctrl_20_sub1_off] = udg->report_flags;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_20,
+ udg->ctrl_buf,
+ udg->ctrl_20_sub1_off + 1);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int udg_engine_enable(bool enable)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (enable) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_27,
+ udg->ctrl_buf,
+ udg->ctrl_27_sub5_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->ctrl_buf[udg->ctrl_27_sub5_off] |=
+ (1 << CTRL27_UDG_ENABLE_BIT);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_27,
+ udg->ctrl_buf,
+ udg->ctrl_27_sub5_off + 1);
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_27,
+ udg->ctrl_buf,
+ udg->ctrl_27_sub5_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->ctrl_buf[udg->ctrl_27_sub5_off] &=
+ ~(1 << CTRL27_UDG_ENABLE_BIT);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_27,
+ udg->ctrl_buf,
+ udg->ctrl_27_sub5_off + 1);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static void udg_report(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ atomic_set(&udg->attn_event, 1);
+
+ if (rmi4_data->suspend) {
+ if (rmi4_data->gesture_detection[0] == 0) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.data_4,
+ rmi4_data->gesture_detection,
+ sizeof(rmi4_data->gesture_detection));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read gesture detection\n",
+ __func__);
+ return;
+ }
+ }
+
+ udg->detection_status = rmi4_data->gesture_detection[0];
+ rmi4_data->gesture_detection[0] = 0;
+
+ if (udg->detection_status == DETECTION) {
+ input_report_key(udg->udg_dev, KEY_WAKEUP, 1);
+ input_sync(udg->udg_dev);
+ input_report_key(udg->udg_dev, KEY_WAKEUP, 0);
+ input_sync(udg->udg_dev);
+ rmi4_data->suspend = false;
+ }
+ }
+
+ return;
+}
+
+static int udg_set_index(unsigned char index)
+{
+ int retval;
+ struct synaptics_rmi4_f12_control_41 control_41;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (index >= udg->max_num_templates)
+ return -EINVAL;
+
+ udg->template_index = index;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ control_41.template_index = udg->template_index;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.template_flags,
+ udg->valid_buf,
+ sizeof(udg->valid_buf));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int udg_write_valid_data(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.template_flags,
+ udg->valid_buf,
+ sizeof(udg->valid_buf));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int udg_read_template_data(unsigned char index)
+{
+ int retval;
+ unsigned char *storage;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ udg_set_index(index);
+ storage = &(udg->storage_buf[index * udg->template_data_size]);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.template_data,
+ storage,
+ udg->template_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read template data\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int udg_write_template_data(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char *storage;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ for (ii = 0; ii < udg->gestures_to_store; ii++) {
+ udg_set_index(ii);
+ storage = &(udg->storage_buf[ii * udg->template_data_size]);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.template_data,
+ storage,
+ udg->template_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write template data\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static int udg_reg_init(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char data_offset;
+ unsigned char size_of_query;
+ unsigned char ctrl_18_offset;
+ unsigned char ctrl_20_offset;
+ unsigned char ctrl_23_offset;
+ unsigned char ctrl_27_offset;
+ unsigned char ctrl_41_offset;
+ struct synaptics_rmi4_f12_query_0 query_0;
+ struct synaptics_rmi4_f12_query_5 query_5;
+ struct synaptics_rmi4_f12_query_8 query_8;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 7,
+ &size_of_query,
+ sizeof(size_of_query));
+ if (retval < 0)
+ return retval;
+
+ if (size_of_query < 4) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: User defined gesture support unavailable (missing data registers)\n",
+ __func__);
+ retval = -ENODEV;
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 8,
+ query_8.data,
+ sizeof(query_8.data));
+ if (retval < 0)
+ return retval;
+
+ if ((query_8.data16_is_present) &&
+ (query_8.data17_is_present) &&
+ (query_8.data18_is_present) &&
+ (query_8.data19_is_present) &&
+ (query_8.data20_is_present) &&
+ (query_8.data21_is_present)) {
+ data_offset = query_8.data0_is_present +
+ query_8.data1_is_present +
+ query_8.data2_is_present +
+ query_8.data3_is_present;
+ udg->addr.data_4 = udg->data_base_addr + data_offset;
+ data_offset = data_offset +
+ query_8.data4_is_present +
+ query_8.data5_is_present +
+ query_8.data6_is_present +
+ query_8.data7_is_present +
+ query_8.data8_is_present +
+ query_8.data9_is_present +
+ query_8.data10_is_present +
+ query_8.data11_is_present +
+ query_8.data12_is_present +
+ query_8.data13_is_present +
+ query_8.data14_is_present +
+ query_8.data15_is_present;
+ udg->addr.trace_x = udg->data_base_addr + data_offset;
+ udg->addr.trace_y = udg->addr.trace_x + 1;
+ udg->addr.trace_segment = udg->addr.trace_y + 1;
+ udg->addr.template_helper = udg->addr.trace_segment + 1;
+ udg->addr.template_data = udg->addr.template_helper + 1;
+ udg->addr.template_flags = udg->addr.template_data + 1;
+ } else {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: User defined gesture support unavailable (missing data registers)\n",
+ __func__);
+ retval = -ENODEV;
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 4,
+ &size_of_query,
+ sizeof(size_of_query));
+ if (retval < 0)
+ return retval;
+
+ if (size_of_query < 7) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: User defined gesture support unavailable (missing control registers)\n",
+ __func__);
+ retval = -ENODEV;
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 5,
+ query_5.data,
+ sizeof(query_5.data));
+ if (retval < 0)
+ return retval;
+
+ ctrl_18_offset = query_5.ctrl0_is_present +
+ query_5.ctrl1_is_present +
+ query_5.ctrl2_is_present +
+ query_5.ctrl3_is_present +
+ query_5.ctrl4_is_present +
+ query_5.ctrl5_is_present +
+ query_5.ctrl6_is_present +
+ query_5.ctrl7_is_present +
+ query_5.ctrl8_is_present +
+ query_5.ctrl9_is_present +
+ query_5.ctrl10_is_present +
+ query_5.ctrl11_is_present +
+ query_5.ctrl12_is_present +
+ query_5.ctrl13_is_present +
+ query_5.ctrl14_is_present +
+ query_5.ctrl15_is_present +
+ query_5.ctrl16_is_present +
+ query_5.ctrl17_is_present;
+
+ ctrl_20_offset = ctrl_18_offset +
+ query_5.ctrl18_is_present +
+ query_5.ctrl19_is_present;
+
+ ctrl_23_offset = ctrl_20_offset +
+ query_5.ctrl20_is_present +
+ query_5.ctrl21_is_present +
+ query_5.ctrl22_is_present;
+
+ ctrl_27_offset = ctrl_23_offset+
+ query_5.ctrl23_is_present +
+ query_5.ctrl24_is_present +
+ query_5.ctrl25_is_present +
+ query_5.ctrl26_is_present;
+
+ ctrl_41_offset = ctrl_27_offset+
+ query_5.ctrl27_is_present +
+ query_5.ctrl28_is_present +
+ query_5.ctrl29_is_present +
+ query_5.ctrl30_is_present +
+ query_5.ctrl31_is_present +
+ query_5.ctrl32_is_present +
+ query_5.ctrl33_is_present +
+ query_5.ctrl34_is_present +
+ query_5.ctrl35_is_present +
+ query_5.ctrl36_is_present +
+ query_5.ctrl37_is_present +
+ query_5.ctrl38_is_present +
+ query_5.ctrl39_is_present +
+ query_5.ctrl40_is_present;
+
+ udg->addr.ctrl_18 = udg->control_base_addr + ctrl_18_offset;
+ udg->addr.ctrl_20 = udg->control_base_addr + ctrl_20_offset;
+ udg->addr.ctrl_23 = udg->control_base_addr + ctrl_23_offset;
+ udg->addr.ctrl_27 = udg->control_base_addr + ctrl_27_offset;
+ udg->addr.ctrl_41 = udg->control_base_addr + ctrl_41_offset;
+
+ udg->ctrl_18_sub10_off = 0;
+ for (ii = 0; ii < 10; ii++) {
+ retval = udg_ctrl_subpacket(18, ii, &query_5);
+ if (retval == 1)
+ udg->ctrl_18_sub10_off += ctrl_18_sub_size[ii];
+ else if (retval < 0)
+ return retval;
+ }
+
+ udg->ctrl_20_sub1_off = 0;
+ for (ii = 0; ii < 1; ii++) {
+ retval = udg_ctrl_subpacket(20, ii, &query_5);
+ if (retval == 1)
+ udg->ctrl_20_sub1_off += ctrl_20_sub_size[ii];
+ else if (retval < 0)
+ return retval;
+ }
+
+ udg->ctrl_23_sub3_off = 0;
+ for (ii = 0; ii < 3; ii++) {
+ retval = udg_ctrl_subpacket(23, ii, &query_5);
+ if (retval == 1)
+ udg->ctrl_23_sub3_off += ctrl_23_sub_size[ii];
+ else if (retval < 0)
+ return retval;
+ }
+
+ retval = udg_ctrl_subpacket(23, 3, &query_5);
+ if (retval == 0)
+ udg->ctrl_23_sub3_off = 0;
+ else if (retval < 0)
+ return retval;
+
+ udg->ctrl_27_sub5_off = 0;
+ for (ii = 0; ii < 5; ii++) {
+ retval = udg_ctrl_subpacket(27, ii, &query_5);
+ if (retval == 1)
+ udg->ctrl_27_sub5_off += ctrl_27_sub_size[ii];
+ else if (retval < 0)
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 0,
+ query_0.data,
+ sizeof(query_0.data));
+ if (retval < 0)
+ return retval;
+
+ udg->max_num_templates = query_0.max_num_templates;
+ udg->template_size =
+ ((unsigned short)query_0.template_size_lsb << 0) |
+ ((unsigned short)query_0.template_size_msb << 8);
+ udg->template_data_size = udg->template_size * 4 * 2 + 4 + 1;
+
+#ifdef STORE_GESTURES
+ udg->gestures_to_store = udg->max_num_templates;
+ if (GESTURES_TO_STORE < udg->gestures_to_store)
+ udg->gestures_to_store = GESTURES_TO_STORE;
+#endif
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_20,
+ udg->ctrl_buf,
+ udg->ctrl_20_sub1_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->report_flags = udg->ctrl_buf[udg->ctrl_20_sub1_off];
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_23,
+ udg->ctrl_buf,
+ udg->ctrl_23_sub3_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->object_type_enable1 = udg->ctrl_buf[0];
+ if (udg->ctrl_23_sub3_off)
+ udg->object_type_enable2 = udg->ctrl_buf[udg->ctrl_23_sub3_off];
+
+ return retval;
+}
+
+static int udg_scan_pdt(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char page;
+ unsigned char intr_count = 0;
+ unsigned char intr_off;
+ unsigned char intr_src;
+ unsigned short addr;
+ struct synaptics_rmi4_fn_desc fd;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ for (page = 0; page < PAGES_TO_SERVICE; page++) {
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ addr |= (page << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&fd,
+ sizeof(fd));
+ if (retval < 0)
+ return retval;
+
+ addr &= ~(MASK_8BIT << 8);
+
+ if (fd.fn_number) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Found F%02x\n",
+ __func__, fd.fn_number);
+ switch (fd.fn_number) {
+ case SYNAPTICS_RMI4_F12:
+ goto f12_found;
+ break;
+ }
+ } else {
+ break;
+ }
+
+ intr_count += fd.intr_src_count;
+ }
+ }
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F12\n",
+ __func__);
+ return -EINVAL;
+
+f12_found:
+ udg->query_base_addr = fd.query_base_addr | (page << 8);
+ udg->control_base_addr = fd.ctrl_base_addr | (page << 8);
+ udg->data_base_addr = fd.data_base_addr | (page << 8);
+ udg->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+ retval = udg_reg_init();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to initialize user defined gesture registers\n",
+ __func__);
+ return retval;
+ }
+
+ udg->intr_mask = 0;
+ intr_src = fd.intr_src_count;
+ intr_off = intr_count % 8;
+ for (ii = intr_off;
+ ii < (intr_src + intr_off);
+ ii++) {
+ udg->intr_mask |= 1 << ii;
+ }
+
+ rmi4_data->intr_mask[0] |= udg->intr_mask;
+
+ addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ addr,
+ &rmi4_data->intr_mask[0],
+ sizeof(rmi4_data->intr_mask[0]));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set interrupt enable bit\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_udg_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!udg)
+ return;
+
+ if (udg->intr_mask & intr_mask)
+ udg_report();
+
+ return;
+}
+
+static int synaptics_rmi4_udg_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char size;
+ unsigned char attr_count;
+ unsigned char param_count;
+
+ if (udg) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ udg = kzalloc(sizeof(*udg), GFP_KERNEL);
+ if (!udg) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for udg\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ size = 0;
+ for (ii = 0; ii < sizeof(ctrl_18_sub_size); ii++)
+ size += ctrl_18_sub_size[ii];
+ size += sizeof(struct udg_tuning);
+ udg->ctrl_buf = kzalloc(size, GFP_KERNEL);
+ if (!udg->ctrl_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for ctrl_buf\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_udg;
+ }
+
+ udg->rmi4_data = rmi4_data;
+
+ retval = udg_scan_pdt();
+ if (retval < 0)
+ goto exit_free_ctrl_buf;
+
+ udg->template_data_buf = kzalloc(udg->template_data_size, GFP_KERNEL);
+ if (!udg->template_data_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for template_data_buf\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_ctrl_buf;
+ }
+
+#ifdef STORE_GESTURES
+ udg->storage_buf = kzalloc(
+ udg->template_data_size * udg->gestures_to_store,
+ GFP_KERNEL);
+ if (!udg->storage_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for storage_buf\n",
+ __func__);
+ kfree(udg->template_data_buf);
+ retval = -ENOMEM;
+ goto exit_free_ctrl_buf;
+ }
+#endif
+
+ udg->udg_dev = input_allocate_device();
+ if (udg->udg_dev == NULL) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate gesture device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_template_data_buf;
+ }
+
+ udg->udg_dev->name = GESTURE_DRIVER_NAME;
+ udg->udg_dev->phys = GESTURE_PHYS_NAME;
+ udg->udg_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+ udg->udg_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+ udg->udg_dev->dev.parent = rmi4_data->pdev->dev.parent;
+ input_set_drvdata(udg->udg_dev, rmi4_data);
+
+ set_bit(EV_KEY, udg->udg_dev->evbit);
+ set_bit(KEY_WAKEUP, udg->udg_dev->keybit);
+ input_set_capability(udg->udg_dev, EV_KEY, KEY_WAKEUP);
+
+ retval = input_register_device(udg->udg_dev);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to register gesture device\n",
+ __func__);
+ input_free_device(udg->udg_dev);
+ goto exit_free_template_data_buf;
+ }
+
+ udg->tuning_dir = kobject_create_and_add(TUNING_SYSFS_DIR_NAME,
+ &udg->udg_dev->dev.kobj);
+ if (!udg->tuning_dir) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create tuning sysfs directory\n",
+ __func__);
+ goto exit_unregister_input_device;
+ }
+
+ retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create template data bin file\n",
+ __func__);
+ goto exit_remove_sysfs_directory;
+ }
+
+ retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create trace data bin file\n",
+ __func__);
+ goto exit_remove_bin_file;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&udg->udg_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_remove_attrs;
+ }
+ }
+
+ for (param_count = 0; param_count < ARRAY_SIZE(params); param_count++) {
+ retval = sysfs_create_file(udg->tuning_dir,
+ ¶ms[param_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create tuning parameters\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_remove_params;
+ }
+ }
+
+ retval = udg_engine_enable(true);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to enable gesture engine\n",
+ __func__);
+ goto exit_remove_params;
+ }
+
+ return 0;
+
+exit_remove_params:
+ for (param_count--; param_count >= 0; param_count--) {
+ sysfs_remove_file(udg->tuning_dir,
+ ¶ms[param_count].attr);
+ }
+
+exit_remove_attrs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&udg->udg_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+
+exit_remove_bin_file:
+ sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+
+exit_remove_sysfs_directory:
+ kobject_put(udg->tuning_dir);
+
+exit_unregister_input_device:
+ input_unregister_device(udg->udg_dev);
+
+exit_free_template_data_buf:
+#ifdef STORE_GESTURES
+ kfree(udg->storage_buf);
+#endif
+ kfree(udg->template_data_buf);
+
+exit_free_ctrl_buf:
+ kfree(udg->ctrl_buf);
+
+exit_free_udg:
+ kfree(udg);
+ udg = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_udg_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char count;
+
+ if (!udg)
+ goto exit;
+
+ for (count = 0; count < ARRAY_SIZE(params); count++) {
+ sysfs_remove_file(udg->tuning_dir,
+ ¶ms[count].attr);
+ }
+
+ for (count = 0; count < ARRAY_SIZE(attrs); count++) {
+ sysfs_remove_file(&udg->udg_dev->dev.kobj,
+ &attrs[count].attr);
+ }
+
+ sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+ sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+ kobject_put(udg->tuning_dir);
+
+ input_unregister_device(udg->udg_dev);
+#ifdef STORE_GESTURES
+ kfree(udg->storage_buf);
+#endif
+ kfree(udg->template_data_buf);
+ kfree(udg->trace_data_buf);
+ kfree(udg->ctrl_buf);
+ kfree(udg);
+ udg = NULL;
+
+exit:
+ complete(&udg_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_udg_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg) {
+ synaptics_rmi4_udg_init(rmi4_data);
+ return;
+ }
+
+ udg_scan_pdt();
+ udg_engine_enable(true);
+#ifdef STORE_GESTURES
+ udg_write_template_data();
+ udg_write_valid_data();
+#endif
+
+ return;
+}
+
+static void synaptics_rmi4_udg_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg)
+ return;
+
+ udg_engine_enable(true);
+#ifdef STORE_GESTURES
+ udg_write_template_data();
+ udg_write_valid_data();
+#endif
+
+ return;
+}
+
+static void synaptics_rmi4_udg_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg)
+ return;
+
+ rmi4_data->sleep_enable(rmi4_data, false);
+ rmi4_data->irq_enable(rmi4_data, true, false);
+ enable_irq_wake(rmi4_data->irq);
+
+ udg_engine_enable(true);
+ udg_detection_enable(true);
+
+ return;
+}
+
+static void synaptics_rmi4_udg_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg)
+ return;
+
+ rmi4_data->sleep_enable(rmi4_data, false);
+ rmi4_data->irq_enable(rmi4_data, true, false);
+ enable_irq_wake(rmi4_data->irq);
+
+ udg_engine_enable(true);
+ udg_detection_enable(true);
+
+ return;
+}
+
+static void synaptics_rmi4_udg_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg)
+ return;
+
+ disable_irq_wake(rmi4_data->irq);
+ udg_detection_enable(false);
+
+ return;
+}
+
+static void synaptics_rmi4_udg_l_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg)
+ return;
+
+ disable_irq_wake(rmi4_data->irq);
+ udg_detection_enable(false);
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn gesture_module = {
+ .fn_type = RMI_GESTURE,
+ .init = synaptics_rmi4_udg_init,
+ .remove = synaptics_rmi4_udg_remove,
+ .reset = synaptics_rmi4_udg_reset,
+ .reinit = synaptics_rmi4_udg_reinit,
+ .early_suspend = synaptics_rmi4_udg_e_suspend,
+ .suspend = synaptics_rmi4_udg_suspend,
+ .resume = synaptics_rmi4_udg_resume,
+ .late_resume = synaptics_rmi4_udg_l_resume,
+ .attn = synaptics_rmi4_udg_attn,
+};
+
+static int __init rmi4_gesture_module_init(void)
+{
+ synaptics_rmi4_new_function(&gesture_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_gesture_module_exit(void)
+{
+ synaptics_rmi4_new_function(&gesture_module, false);
+
+ wait_for_completion(&udg_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_gesture_module_init);
+module_exit(rmi4_gesture_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX User Defined Gesture Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
new file mode 100644
index 0000000..df17a0b
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
@@ -0,0 +1,703 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+
+/*
+#define I2C_BURST_LIMIT 255
+*/
+
+#define XFER_MSGS_LIMIT 8
+
+static unsigned char *wr_buf;
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+ int retval;
+ u32 value;
+ const char *name;
+ struct property *prop;
+ struct device_node *np = dev->of_node;
+
+ bdata->irq_gpio = of_get_named_gpio_flags(np,
+ "synaptics,irq-gpio", 0,
+ (enum of_gpio_flags *)&bdata->irq_flags);
+
+ retval = of_property_read_u32(np, "synaptics,irq-on-state",
+ &value);
+ if (retval < 0)
+ bdata->irq_on_state = 0;
+ else
+ bdata->irq_on_state = value;
+
+ bdata->resume_in_workqueue = of_property_read_bool(np,
+ "synaptics,resume-in-workqueue");
+
+ retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+ if (retval < 0)
+ bdata->pwr_reg_name = NULL;
+ else
+ bdata->pwr_reg_name = name;
+
+ retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+ if (retval < 0)
+ bdata->bus_reg_name = NULL;
+ else
+ bdata->bus_reg_name = name;
+
+ prop = of_find_property(np, "synaptics,power-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->power_gpio = of_get_named_gpio_flags(np,
+ "synaptics,power-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,power-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_on_state = value;
+ }
+ } else {
+ bdata->power_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_delay_ms = value;
+ }
+ } else {
+ bdata->power_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->reset_gpio = of_get_named_gpio_flags(np,
+ "synaptics,reset-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,reset-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_on_state = value;
+ }
+ retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_active_ms = value;
+ }
+ } else {
+ bdata->reset_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_delay_ms = value;
+ }
+ } else {
+ bdata->reset_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->max_y_for_2d = value;
+ }
+ } else {
+ bdata->max_y_for_2d = -1;
+ }
+
+ bdata->swap_axes = of_property_read_bool(np, "synaptics,swap-axes");
+ bdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
+ bdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
+
+ prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->ub_i2c_addr = (unsigned short)value;
+ }
+ } else {
+ bdata->ub_i2c_addr = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->cap_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->cap_button_map->map)
+ return -ENOMEM;
+ bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+ retval = of_property_read_u32_array(np,
+ "synaptics,cap-button-codes",
+ bdata->cap_button_map->map,
+ bdata->cap_button_map->nbuttons);
+ if (retval < 0) {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+ } else {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+
+ prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->vir_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->vir_button_map->map)
+ return -ENOMEM;
+ bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+ bdata->vir_button_map->nbuttons /= 5;
+ retval = of_property_read_u32_array(np,
+ "synaptics,vir-button-codes",
+ bdata->vir_button_map->map,
+ bdata->vir_button_map->nbuttons * 5);
+ if (retval < 0) {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+ } else {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+
+ return 0;
+}
+#endif
+
+static int synaptics_rmi4_i2c_alloc_buf(struct synaptics_rmi4_data *rmi4_data,
+ unsigned int count)
+{
+ static unsigned int buf_size;
+
+ if (count > buf_size) {
+ if (buf_size)
+ kfree(wr_buf);
+ wr_buf = kzalloc(count, GFP_KERNEL);
+ if (!wr_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for buffer\n",
+ __func__);
+ buf_size = 0;
+ return -ENOMEM;
+ }
+ buf_size = count;
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_i2c_check_addr(struct synaptics_rmi4_data *rmi4_data,
+ struct i2c_client *i2c)
+{
+ if (hw_if.board_data->ub_i2c_addr == -1)
+ return;
+
+ if (hw_if.board_data->i2c_addr == i2c->addr)
+ hw_if.board_data->i2c_addr = hw_if.board_data->ub_i2c_addr;
+ else
+ hw_if.board_data->i2c_addr = i2c->addr;
+
+ return;
+}
+
+static int synaptics_rmi4_i2c_set_page(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr)
+{
+ int retval = 0;
+ unsigned char retry;
+ unsigned char buf[PAGE_SELECT_LEN];
+ unsigned char page;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ struct i2c_msg msg[1];
+
+ msg[0].addr = hw_if.board_data->i2c_addr;
+ msg[0].flags = 0;
+ msg[0].len = PAGE_SELECT_LEN;
+ msg[0].buf = buf;
+
+ page = ((addr >> 8) & MASK_8BIT);
+ buf[0] = MASK_8BIT;
+ buf[1] = page;
+
+ if (page != rmi4_data->current_page) {
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+ rmi4_data->current_page = page;
+ retval = PAGE_SELECT_LEN;
+ break;
+ }
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+
+ if (retry == SYN_I2C_RETRY_TIMES / 2) {
+ synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+ msg[0].addr = hw_if.board_data->i2c_addr;
+ }
+ }
+ } else {
+ retval = PAGE_SELECT_LEN;
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned short length)
+{
+ int retval;
+ unsigned char retry;
+ unsigned char buf;
+#ifdef I2C_BURST_LIMIT
+ unsigned char ii;
+ unsigned char rd_msgs = ((length - 1) / I2C_BURST_LIMIT) + 1;
+#else
+ unsigned char rd_msgs = 1;
+#endif
+ unsigned char index = 0;
+ unsigned char xfer_msgs;
+ unsigned char remaining_msgs;
+ unsigned short i2c_addr;
+ unsigned short data_offset = 0;
+ unsigned short remaining_length = length;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ struct i2c_adapter *adap = i2c->adapter;
+ struct i2c_msg msg[XFER_MSGS_LIMIT + 1];
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+ if (retval != PAGE_SELECT_LEN) {
+ retval = -EIO;
+ goto exit;
+ }
+
+ msg[0].addr = hw_if.board_data->i2c_addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &buf;
+
+#ifdef I2C_BURST_LIMIT
+ for (ii = 0; ii < (rd_msgs - 1); ii++) {
+ msg[ii + 1].addr = hw_if.board_data->i2c_addr;
+ msg[ii + 1].flags = I2C_M_RD;
+ msg[ii + 1].len = I2C_BURST_LIMIT;
+ msg[ii + 1].buf = &data[data_offset];
+ data_offset += I2C_BURST_LIMIT;
+ remaining_length -= I2C_BURST_LIMIT;
+ }
+#endif
+
+ msg[rd_msgs].addr = hw_if.board_data->i2c_addr;
+ msg[rd_msgs].flags = I2C_M_RD;
+ msg[rd_msgs].len = remaining_length;
+ msg[rd_msgs].buf = &data[data_offset];
+
+ buf = addr & MASK_8BIT;
+
+ remaining_msgs = rd_msgs + 1;
+
+ while (remaining_msgs) {
+ if (remaining_msgs > XFER_MSGS_LIMIT)
+ xfer_msgs = XFER_MSGS_LIMIT;
+ else
+ xfer_msgs = remaining_msgs;
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ retval = i2c_transfer(adap, &msg[index], xfer_msgs);
+ if (retval == xfer_msgs)
+ break;
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+
+ if (retry == SYN_I2C_RETRY_TIMES / 2) {
+ synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+ i2c_addr = hw_if.board_data->i2c_addr;
+ msg[0].addr = i2c_addr;
+#ifdef I2C_BURST_LIMIT
+ for (ii = 0; ii < (rd_msgs - 1); ii++)
+ msg[ii + 1].addr = i2c_addr;
+#endif
+ msg[rd_msgs].addr = i2c_addr;
+ }
+ }
+
+ if (retry == SYN_I2C_RETRY_TIMES) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: I2C read over retry limit\n",
+ __func__);
+ retval = -EIO;
+ goto exit;
+ }
+
+ remaining_msgs -= xfer_msgs;
+ index += xfer_msgs;
+ }
+
+ retval = length;
+
+exit:
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned short length)
+{
+ int retval;
+ unsigned char retry;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ struct i2c_msg msg[1];
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ retval = synaptics_rmi4_i2c_alloc_buf(rmi4_data, length + 1);
+ if (retval < 0)
+ goto exit;
+
+ retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+ if (retval != PAGE_SELECT_LEN) {
+ retval = -EIO;
+ goto exit;
+ }
+
+ msg[0].addr = hw_if.board_data->i2c_addr;
+ msg[0].flags = 0;
+ msg[0].len = length + 1;
+ msg[0].buf = wr_buf;
+
+ wr_buf[0] = addr & MASK_8BIT;
+ retval = secure_memcpy(&wr_buf[1], length, &data[0], length, length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy data\n",
+ __func__);
+ goto exit;
+ }
+
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+ retval = length;
+ break;
+ }
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+
+ if (retry == SYN_I2C_RETRY_TIMES / 2) {
+ synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+ msg[0].addr = hw_if.board_data->i2c_addr;
+ }
+ }
+
+ if (retry == SYN_I2C_RETRY_TIMES) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: I2C write over retry limit\n",
+ __func__);
+ retval = -EIO;
+ }
+
+exit:
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
+static int synaptics_rmi4_clk_prepare_enable(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+ int ret;
+
+ ret = clk_prepare_enable(rmi4_data->iface_clk);
+ if (ret) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "error on clk_prepare_enable(iface_clk):%d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(rmi4_data->core_clk);
+ if (ret) {
+ clk_disable_unprepare(rmi4_data->iface_clk);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "error clk_prepare_enable(core_clk):%d\n", ret);
+ }
+ return ret;
+}
+
+static void synaptics_rmi4_clk_disable_unprepare(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+ clk_disable_unprepare(rmi4_data->core_clk);
+ clk_disable_unprepare(rmi4_data->iface_clk);
+}
+
+static int synaptics_rmi4_i2c_get(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+ retval = pm_runtime_get_sync(i2c->adapter->dev.parent);
+ if (retval >= 0 && rmi4_data->core_clk != NULL &&
+ rmi4_data->iface_clk != NULL) {
+ retval = synaptics_rmi4_clk_prepare_enable(rmi4_data);
+ if (retval)
+ pm_runtime_put_sync(i2c->adapter->dev.parent);
+ }
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static void synaptics_rmi4_i2c_put(struct synaptics_rmi4_data *rmi4_data)
+{
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+ if (rmi4_data->core_clk != NULL && rmi4_data->iface_clk != NULL)
+ synaptics_rmi4_clk_disable_unprepare(rmi4_data);
+ pm_runtime_put_sync(i2c->adapter->dev.parent);
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+}
+#endif
+
+static struct synaptics_dsx_bus_access bus_access = {
+ .type = BUS_I2C,
+ .read = synaptics_rmi4_i2c_read,
+ .write = synaptics_rmi4_i2c_write,
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
+ .get = synaptics_rmi4_i2c_get,
+ .put = synaptics_rmi4_i2c_put,
+#endif
+};
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+ kfree(synaptics_dsx_i2c_device);
+
+ return;
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ int retval;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev,
+ "%s: SMBus byte data commands not supported by host\n",
+ __func__);
+ return -EIO;
+ }
+
+ synaptics_dsx_i2c_device = kzalloc(
+ sizeof(struct platform_device),
+ GFP_KERNEL);
+ if (!synaptics_dsx_i2c_device) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_OF
+ if (client->dev.of_node) {
+ hw_if.board_data = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_board_data),
+ GFP_KERNEL);
+ if (!hw_if.board_data) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for board data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->cap_button_map) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for 0D button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->vir_button_map) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for virtual button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ parse_dt(&client->dev, hw_if.board_data);
+ }
+#else
+ hw_if.board_data = client->dev.platform_data;
+#endif
+
+ hw_if.bus_access = &bus_access;
+ hw_if.board_data->i2c_addr = client->addr;
+
+ synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+ synaptics_dsx_i2c_device->id = 0;
+ synaptics_dsx_i2c_device->num_resources = 0;
+ synaptics_dsx_i2c_device->dev.parent = &client->dev;
+ synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+ synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+ retval = platform_device_register(synaptics_dsx_i2c_device);
+ if (retval) {
+ dev_err(&client->dev,
+ "%s: Failed to register platform device\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+ platform_device_unregister(synaptics_dsx_i2c_device);
+
+ return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+ {I2C_DRIVER_NAME, 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+ {
+ .compatible = "synaptics,dsx-i2c",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = synaptics_rmi4_of_match_table,
+ },
+ .probe = synaptics_rmi4_i2c_probe,
+ .remove = synaptics_rmi4_i2c_remove,
+ .id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init_v26(void)
+{
+ return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init_v26);
+
+void synaptics_rmi4_bus_exit_v26(void)
+{
+ kfree(wr_buf);
+
+ i2c_del_driver(&synaptics_rmi4_i2c_driver);
+
+ return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit_v26);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_proximity.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_proximity.c
new file mode 100644
index 0000000..ce8979c
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_proximity.c
@@ -0,0 +1,692 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define PROX_PHYS_NAME "synaptics_dsx/proximity"
+
+#define HOVER_Z_MAX (255)
+
+#define HOVERING_FINGER_EN (1 << 4)
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static struct device_attribute attrs[] = {
+ __ATTR(hover_finger_en, 0664,
+ synaptics_rmi4_hover_finger_en_show,
+ synaptics_rmi4_hover_finger_en_store),
+};
+
+struct synaptics_rmi4_f12_query_5 {
+ union {
+ struct {
+ unsigned char size_of_query6;
+ struct {
+ unsigned char ctrl0_is_present:1;
+ unsigned char ctrl1_is_present:1;
+ unsigned char ctrl2_is_present:1;
+ unsigned char ctrl3_is_present:1;
+ unsigned char ctrl4_is_present:1;
+ unsigned char ctrl5_is_present:1;
+ unsigned char ctrl6_is_present:1;
+ unsigned char ctrl7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl8_is_present:1;
+ unsigned char ctrl9_is_present:1;
+ unsigned char ctrl10_is_present:1;
+ unsigned char ctrl11_is_present:1;
+ unsigned char ctrl12_is_present:1;
+ unsigned char ctrl13_is_present:1;
+ unsigned char ctrl14_is_present:1;
+ unsigned char ctrl15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl16_is_present:1;
+ unsigned char ctrl17_is_present:1;
+ unsigned char ctrl18_is_present:1;
+ unsigned char ctrl19_is_present:1;
+ unsigned char ctrl20_is_present:1;
+ unsigned char ctrl21_is_present:1;
+ unsigned char ctrl22_is_present:1;
+ unsigned char ctrl23_is_present:1;
+ } __packed;
+ };
+ unsigned char data[4];
+ };
+};
+
+struct synaptics_rmi4_f12_query_8 {
+ union {
+ struct {
+ unsigned char size_of_query9;
+ struct {
+ unsigned char data0_is_present:1;
+ unsigned char data1_is_present:1;
+ unsigned char data2_is_present:1;
+ unsigned char data3_is_present:1;
+ unsigned char data4_is_present:1;
+ unsigned char data5_is_present:1;
+ unsigned char data6_is_present:1;
+ unsigned char data7_is_present:1;
+ } __packed;
+ };
+ unsigned char data[2];
+ };
+};
+
+struct prox_finger_data {
+ union {
+ struct {
+ unsigned char object_type_and_status;
+ unsigned char x_lsb;
+ unsigned char x_msb;
+ unsigned char y_lsb;
+ unsigned char y_msb;
+ unsigned char z;
+ } __packed;
+ unsigned char proximity_data[6];
+ };
+};
+
+struct synaptics_rmi4_prox_handle {
+ bool hover_finger_present;
+ bool hover_finger_en;
+ unsigned char intr_mask;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ unsigned short hover_finger_en_addr;
+ unsigned short hover_finger_data_addr;
+ struct input_dev *prox_dev;
+ struct prox_finger_data *finger_data;
+ struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_prox_handle *prox;
+
+DECLARE_COMPLETION(prox_remove_complete);
+
+static void prox_hover_finger_lift(void)
+{
+ input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+ input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 0);
+ input_sync(prox->prox_dev);
+ prox->hover_finger_present = false;
+
+ return;
+}
+
+static void prox_hover_finger_report(void)
+{
+ int retval;
+ int x;
+ int y;
+ int z;
+ struct prox_finger_data *data;
+ struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+ data = prox->finger_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ prox->hover_finger_data_addr,
+ data->proximity_data,
+ sizeof(data->proximity_data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read hovering finger data\n",
+ __func__);
+ return;
+ }
+
+ if (data->object_type_and_status != F12_HOVERING_FINGER_STATUS) {
+ if (prox->hover_finger_present)
+ prox_hover_finger_lift();
+
+ return;
+ }
+
+ x = (data->x_msb << 8) | (data->x_lsb);
+ y = (data->y_msb << 8) | (data->y_lsb);
+ z = HOVER_Z_MAX - data->z;
+
+ input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+ input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 1);
+ input_report_abs(prox->prox_dev, ABS_X, x);
+ input_report_abs(prox->prox_dev, ABS_Y, y);
+ input_report_abs(prox->prox_dev, ABS_DISTANCE, z);
+
+ input_sync(prox->prox_dev);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: x = %d y = %d z = %d\n",
+ __func__, x, y, z);
+
+ prox->hover_finger_present = true;
+
+ return;
+}
+
+static int prox_set_hover_finger_en(void)
+{
+ int retval;
+ unsigned char object_report_enable;
+ struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ prox->hover_finger_en_addr,
+ &object_report_enable,
+ sizeof(object_report_enable));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read from object report enable register\n",
+ __func__);
+ return retval;
+ }
+
+ if (prox->hover_finger_en)
+ object_report_enable |= HOVERING_FINGER_EN;
+ else
+ object_report_enable &= ~HOVERING_FINGER_EN;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ prox->hover_finger_en_addr,
+ &object_report_enable,
+ sizeof(object_report_enable));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write to object report enable register\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void prox_set_params(void)
+{
+ input_set_abs_params(prox->prox_dev, ABS_X, 0,
+ prox->rmi4_data->sensor_max_x, 0, 0);
+ input_set_abs_params(prox->prox_dev, ABS_Y, 0,
+ prox->rmi4_data->sensor_max_y, 0, 0);
+ input_set_abs_params(prox->prox_dev, ABS_DISTANCE, 0,
+ HOVER_Z_MAX, 0, 0);
+
+ return;
+}
+
+static int prox_reg_init(void)
+{
+ int retval;
+ unsigned char ctrl_23_offset;
+ unsigned char data_1_offset;
+ struct synaptics_rmi4_f12_query_5 query_5;
+ struct synaptics_rmi4_f12_query_8 query_8;
+ struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ prox->query_base_addr + 5,
+ query_5.data,
+ sizeof(query_5.data));
+ if (retval < 0)
+ return retval;
+
+ ctrl_23_offset = query_5.ctrl0_is_present +
+ query_5.ctrl1_is_present +
+ query_5.ctrl2_is_present +
+ query_5.ctrl3_is_present +
+ query_5.ctrl4_is_present +
+ query_5.ctrl5_is_present +
+ query_5.ctrl6_is_present +
+ query_5.ctrl7_is_present +
+ query_5.ctrl8_is_present +
+ query_5.ctrl9_is_present +
+ query_5.ctrl10_is_present +
+ query_5.ctrl11_is_present +
+ query_5.ctrl12_is_present +
+ query_5.ctrl13_is_present +
+ query_5.ctrl14_is_present +
+ query_5.ctrl15_is_present +
+ query_5.ctrl16_is_present +
+ query_5.ctrl17_is_present +
+ query_5.ctrl18_is_present +
+ query_5.ctrl19_is_present +
+ query_5.ctrl20_is_present +
+ query_5.ctrl21_is_present +
+ query_5.ctrl22_is_present;
+
+ prox->hover_finger_en_addr = prox->control_base_addr + ctrl_23_offset;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ prox->query_base_addr + 8,
+ query_8.data,
+ sizeof(query_8.data));
+ if (retval < 0)
+ return retval;
+
+ data_1_offset = query_8.data0_is_present;
+ prox->hover_finger_data_addr = prox->data_base_addr + data_1_offset;
+
+ return retval;
+}
+
+static int prox_scan_pdt(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char page;
+ unsigned char intr_count = 0;
+ unsigned char intr_off;
+ unsigned char intr_src;
+ unsigned short addr;
+ struct synaptics_rmi4_fn_desc fd;
+ struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+ for (page = 0; page < PAGES_TO_SERVICE; page++) {
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ addr |= (page << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&fd,
+ sizeof(fd));
+ if (retval < 0)
+ return retval;
+
+ addr &= ~(MASK_8BIT << 8);
+
+ if (fd.fn_number) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Found F%02x\n",
+ __func__, fd.fn_number);
+ switch (fd.fn_number) {
+ case SYNAPTICS_RMI4_F12:
+ goto f12_found;
+ break;
+ }
+ } else {
+ break;
+ }
+
+ intr_count += fd.intr_src_count;
+ }
+ }
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F12\n",
+ __func__);
+ return -EINVAL;
+
+f12_found:
+ prox->query_base_addr = fd.query_base_addr | (page << 8);
+ prox->control_base_addr = fd.ctrl_base_addr | (page << 8);
+ prox->data_base_addr = fd.data_base_addr | (page << 8);
+ prox->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+ retval = prox_reg_init();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to initialize proximity registers\n",
+ __func__);
+ return retval;
+ }
+
+ prox->intr_mask = 0;
+ intr_src = fd.intr_src_count;
+ intr_off = intr_count % 8;
+ for (ii = intr_off;
+ ii < (intr_src + intr_off);
+ ii++) {
+ prox->intr_mask |= 1 << ii;
+ }
+
+ rmi4_data->intr_mask[0] |= prox->intr_mask;
+
+ addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ addr,
+ &(rmi4_data->intr_mask[0]),
+ sizeof(rmi4_data->intr_mask[0]));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set interrupt enable bit\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (!prox)
+ return -ENODEV;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ prox->hover_finger_en);
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+ if (!prox)
+ return -ENODEV;
+
+ if (sscanf(buf, "%x", &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ prox->hover_finger_en = true;
+ else if (input == 0)
+ prox->hover_finger_en = false;
+ else
+ return -EINVAL;
+
+ retval = prox_set_hover_finger_en();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to change hovering finger enable setting\n",
+ __func__);
+ return retval;
+ }
+
+ return count;
+}
+
+int synaptics_rmi4_prox_hover_finger_en(bool enable)
+{
+ int retval;
+
+ if (!prox)
+ return -ENODEV;
+
+ prox->hover_finger_en = enable;
+
+ retval = prox_set_hover_finger_en();
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+EXPORT_SYMBOL(synaptics_rmi4_prox_hover_finger_en);
+
+static void synaptics_rmi4_prox_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!prox)
+ return;
+
+ if (prox->intr_mask & intr_mask)
+ prox_hover_finger_report();
+
+ return;
+}
+
+static int synaptics_rmi4_prox_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char attr_count;
+
+ if (prox) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ prox = kzalloc(sizeof(*prox), GFP_KERNEL);
+ if (!prox) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for prox\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ prox->finger_data = kzalloc(sizeof(*(prox->finger_data)), GFP_KERNEL);
+ if (!prox->finger_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for finger_data\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_prox;
+ }
+
+ prox->rmi4_data = rmi4_data;
+
+ retval = prox_scan_pdt();
+ if (retval < 0)
+ goto exit_free_finger_data;
+
+ prox->hover_finger_en = true;
+
+ retval = prox_set_hover_finger_en();
+ if (retval < 0)
+ return retval;
+
+ prox->prox_dev = input_allocate_device();
+ if (prox->prox_dev == NULL) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate proximity device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_finger_data;
+ }
+
+ prox->prox_dev->name = PROXIMITY_DRIVER_NAME;
+ prox->prox_dev->phys = PROX_PHYS_NAME;
+ prox->prox_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+ prox->prox_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+ prox->prox_dev->dev.parent = rmi4_data->pdev->dev.parent;
+ input_set_drvdata(prox->prox_dev, rmi4_data);
+
+ set_bit(EV_KEY, prox->prox_dev->evbit);
+ set_bit(EV_ABS, prox->prox_dev->evbit);
+ set_bit(BTN_TOUCH, prox->prox_dev->keybit);
+ set_bit(BTN_TOOL_FINGER, prox->prox_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+ set_bit(INPUT_PROP_DIRECT, prox->prox_dev->propbit);
+#endif
+
+ prox_set_params();
+
+ retval = input_register_device(prox->prox_dev);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to register proximity device\n",
+ __func__);
+ goto exit_free_input_device;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ goto exit_free_sysfs;
+ }
+ }
+
+ return 0;
+
+exit_free_sysfs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ input_unregister_device(prox->prox_dev);
+ prox->prox_dev = NULL;
+
+exit_free_input_device:
+ if (prox->prox_dev)
+ input_free_device(prox->prox_dev);
+
+exit_free_finger_data:
+ kfree(prox->finger_data);
+
+exit_free_prox:
+ kfree(prox);
+ prox = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_prox_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char attr_count;
+
+ if (!prox)
+ goto exit;
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ input_unregister_device(prox->prox_dev);
+ kfree(prox->finger_data);
+ kfree(prox);
+ prox = NULL;
+
+exit:
+ complete(&prox_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_prox_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!prox) {
+ synaptics_rmi4_prox_init(rmi4_data);
+ return;
+ }
+
+ prox_hover_finger_lift();
+
+ prox_scan_pdt();
+
+ prox_set_hover_finger_en();
+
+ return;
+}
+
+static void synaptics_rmi4_prox_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!prox)
+ return;
+
+ prox_hover_finger_lift();
+
+ prox_set_hover_finger_en();
+
+ return;
+}
+
+static void synaptics_rmi4_prox_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!prox)
+ return;
+
+ prox_hover_finger_lift();
+
+ return;
+}
+
+static void synaptics_rmi4_prox_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!prox)
+ return;
+
+ prox_hover_finger_lift();
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn proximity_module = {
+ .fn_type = RMI_PROXIMITY,
+ .init = synaptics_rmi4_prox_init,
+ .remove = synaptics_rmi4_prox_remove,
+ .reset = synaptics_rmi4_prox_reset,
+ .reinit = synaptics_rmi4_prox_reinit,
+ .early_suspend = synaptics_rmi4_prox_e_suspend,
+ .suspend = synaptics_rmi4_prox_suspend,
+ .resume = NULL,
+ .late_resume = NULL,
+ .attn = synaptics_rmi4_prox_attn,
+};
+
+static int __init rmi4_proximity_module_init(void)
+{
+ synaptics_rmi4_new_function(&proximity_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_proximity_module_exit(void)
+{
+ synaptics_rmi4_new_function(&proximity_module, false);
+
+ wait_for_completion(&prox_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_proximity_module_init);
+module_exit(rmi4_proximity_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Proximity Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_dev.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_dev.c
new file mode 100644
index 0000000..4392374
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_dev.c
@@ -0,0 +1,1074 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define CHAR_DEVICE_NAME "rmi"
+#define DEVICE_CLASS_NAME "rmidev"
+#define SYSFS_FOLDER_NAME "rmidev"
+#define DEV_NUMBER 1
+#define REG_ADDR_LIMIT 0xFFFF
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+struct rmidev_handle {
+ dev_t dev_no;
+ pid_t pid;
+ unsigned char intr_mask;
+ unsigned char *tmpbuf;
+ unsigned int tmpbuf_size;
+ struct device dev;
+ struct synaptics_rmi4_data *rmi4_data;
+ struct kobject *sysfs_dir;
+ struct siginfo interrupt_signal;
+ struct siginfo terminate_signal;
+ struct task_struct *task;
+ void *data;
+ bool irq_enabled;
+ bool concurrent;
+};
+
+struct rmidev_data {
+ int ref_count;
+ struct cdev main_dev;
+ struct class *device_class;
+ struct mutex file_mutex;
+ struct rmidev_handle *rmi_dev;
+};
+
+static struct bin_attribute attr_data = {
+ .attr = {
+ .name = "data",
+ .mode = 0664,
+ },
+ .size = 0,
+ .read = rmidev_sysfs_data_show,
+ .write = rmidev_sysfs_data_store,
+};
+
+static struct device_attribute attrs[] = {
+ __ATTR(open, 0220,
+ NULL,
+ rmidev_sysfs_open_store),
+ __ATTR(release, 0220,
+ NULL,
+ rmidev_sysfs_release_store),
+ __ATTR(attn_state, 0444,
+ rmidev_sysfs_attn_state_show,
+ NULL),
+ __ATTR(pid, 0664,
+ rmidev_sysfs_pid_show,
+ rmidev_sysfs_pid_store),
+ __ATTR(term, 0220,
+ NULL,
+ rmidev_sysfs_term_store),
+ __ATTR(intr_mask, 0444,
+ rmidev_sysfs_intr_mask_show,
+ rmidev_sysfs_intr_mask_store),
+ __ATTR(concurrent, 0444,
+ rmidev_sysfs_concurrent_show,
+ rmidev_sysfs_concurrent_store),
+};
+
+static int rmidev_major_num;
+
+static struct class *rmidev_device_class;
+
+static struct rmidev_handle *rmidev;
+
+DECLARE_COMPLETION(rmidev_remove_complete_v26);
+
+static irqreturn_t rmidev_sysfs_irq(int irq, void *data)
+{
+ struct synaptics_rmi4_data *rmi4_data = data;
+
+ sysfs_notify(&rmi4_data->input_dev->dev.kobj,
+ SYSFS_FOLDER_NAME, "attn_state");
+
+ return IRQ_HANDLED;
+}
+
+static int rmidev_sysfs_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval = 0;
+ unsigned char intr_status[MAX_INTR_REGISTERS];
+ unsigned long irq_flags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT;
+
+ if (enable) {
+ if (rmidev->irq_enabled)
+ return retval;
+
+ /* Clear interrupts first */
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr + 1,
+ intr_status,
+ rmi4_data->num_of_intr_regs);
+ if (retval < 0)
+ return retval;
+
+ retval = request_threaded_irq(rmi4_data->irq, NULL,
+ rmidev_sysfs_irq, irq_flags,
+ PLATFORM_DRIVER_NAME, rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create irq thread\n",
+ __func__);
+ return retval;
+ }
+
+ rmidev->irq_enabled = true;
+ } else {
+ if (rmidev->irq_enabled) {
+ disable_irq(rmi4_data->irq);
+ free_irq(rmi4_data->irq, rmi4_data);
+ rmidev->irq_enabled = false;
+ }
+ }
+
+ return retval;
+}
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ unsigned char intr_status = 0;
+ unsigned int length = (unsigned int)count;
+ unsigned short address = (unsigned short)pos;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ if (length > (REG_ADDR_LIMIT - address)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Out of register map limit\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (length) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ address,
+ (unsigned char *)buf,
+ length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read data\n",
+ __func__);
+ return retval;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ if (!rmidev->concurrent)
+ goto exit;
+
+ if (address != rmi4_data->f01_data_base_addr)
+ goto exit;
+
+ if (length <= 1)
+ goto exit;
+
+ intr_status = buf[1];
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->num_of_data_sources) {
+ if (fhandler->intr_mask & intr_status) {
+ rmi4_data->report_touch(rmi4_data,
+ fhandler);
+ }
+ }
+ }
+ }
+
+exit:
+ return length;
+}
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ unsigned int length = (unsigned int)count;
+ unsigned short address = (unsigned short)pos;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (length > (REG_ADDR_LIMIT - address)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Out of register map limit\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (length) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ address,
+ (unsigned char *)buf,
+ length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write data\n",
+ __func__);
+ return retval;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return length;
+}
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ rmi4_data->irq_enable(rmi4_data, false, false);
+ rmidev_sysfs_irq_enable(rmi4_data, true);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Attention interrupt disabled\n",
+ __func__);
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ rmidev_sysfs_irq_enable(rmi4_data, false);
+ rmi4_data->irq_enable(rmi4_data, true, false);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Attention interrupt enabled\n",
+ __func__);
+
+ rmi4_data->reset_device(rmi4_data, false);
+
+ rmi4_data->stay_awake = false;
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int attn_state;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ attn_state = gpio_get_value(bdata->irq_gpio);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", attn_state);
+}
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", rmidev->pid);
+}
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ rmidev->pid = input;
+
+ if (rmidev->pid) {
+ rmidev->task = pid_task(find_vpid(rmidev->pid), PIDTYPE_PID);
+ if (!rmidev->task) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to locate PID of data logging tool\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ if (rmidev->pid)
+ send_sig_info(SIGTERM, &rmidev->terminate_signal, rmidev->task);
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", rmidev->intr_mask);
+}
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ rmidev->intr_mask = (unsigned char)input;
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", rmidev->concurrent);
+}
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ rmidev->concurrent = input > 0 ? true : false;
+
+ return count;
+}
+
+static int rmidev_allocate_buffer(int count)
+{
+ if (count + 1 > rmidev->tmpbuf_size) {
+ if (rmidev->tmpbuf_size)
+ kfree(rmidev->tmpbuf);
+ rmidev->tmpbuf = kzalloc(count + 1, GFP_KERNEL);
+ if (!rmidev->tmpbuf) {
+ dev_err(rmidev->rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for buffer\n",
+ __func__);
+ rmidev->tmpbuf_size = 0;
+ return -ENOMEM;
+ }
+ rmidev->tmpbuf_size = count + 1;
+ }
+
+ return 0;
+}
+
+/*
+ * rmidev_llseek - set register address to access for RMI device
+ *
+ * @filp: pointer to file structure
+ * @off:
+ * if whence == SEEK_SET,
+ * off: 16-bit RMI register address
+ * if whence == SEEK_CUR,
+ * off: offset from current position
+ * if whence == SEEK_END,
+ * off: offset from end position (0xFFFF)
+ * @whence: SEEK_SET, SEEK_CUR, or SEEK_END
+ */
+static loff_t rmidev_llseek(struct file *filp, loff_t off, int whence)
+{
+ loff_t newpos;
+ struct rmidev_data *dev_data = filp->private_data;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (IS_ERR(dev_data)) {
+ pr_err("%s: Pointer of char device data is invalid", __func__);
+ return -EBADF;
+ }
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ switch (whence) {
+ case SEEK_SET:
+ newpos = off;
+ break;
+ case SEEK_CUR:
+ newpos = filp->f_pos + off;
+ break;
+ case SEEK_END:
+ newpos = REG_ADDR_LIMIT + off;
+ break;
+ default:
+ newpos = -EINVAL;
+ goto clean_up;
+ }
+
+ if (newpos < 0 || newpos > REG_ADDR_LIMIT) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: New position 0x%04x is invalid\n",
+ __func__, (unsigned int)newpos);
+ newpos = -EINVAL;
+ goto clean_up;
+ }
+
+ filp->f_pos = newpos;
+
+clean_up:
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return newpos;
+}
+
+/*
+ * rmidev_read: read register data from RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to read
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t retval;
+ unsigned char intr_status = 0;
+ unsigned short address;
+ struct rmidev_data *dev_data = filp->private_data;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ if (IS_ERR(dev_data)) {
+ pr_err("%s: Pointer of char device data is invalid", __func__);
+ return -EBADF;
+ }
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ if (*f_pos > REG_ADDR_LIMIT) {
+ retval = -EFAULT;
+ goto clean_up;
+ }
+
+ if (count > (REG_ADDR_LIMIT - *f_pos))
+ count = REG_ADDR_LIMIT - *f_pos;
+
+ if (count == 0) {
+ retval = 0;
+ goto clean_up;
+ }
+ address = (unsigned short)(*f_pos);
+
+ rmidev_allocate_buffer(count);
+
+ retval = synaptics_rmi4_reg_read(rmidev->rmi4_data,
+ *f_pos,
+ rmidev->tmpbuf,
+ count);
+ if (retval < 0)
+ goto clean_up;
+
+ if (copy_to_user(buf, rmidev->tmpbuf, count))
+ retval = -EFAULT;
+ else
+ *f_pos += retval;
+
+ if (!rmidev->concurrent)
+ goto clean_up;
+
+ if (address != rmi4_data->f01_data_base_addr)
+ goto clean_up;
+
+ if (count <= 1)
+ goto clean_up;
+
+ intr_status = rmidev->tmpbuf[1];
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->num_of_data_sources) {
+ if (fhandler->intr_mask & intr_status) {
+ rmi4_data->report_touch(rmi4_data,
+ fhandler);
+ }
+ }
+ }
+ }
+
+clean_up:
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return retval;
+}
+
+/*
+ * rmidev_write: write register data to RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to write
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t retval;
+ struct rmidev_data *dev_data = filp->private_data;
+
+ if (IS_ERR(dev_data)) {
+ pr_err("%s: Pointer of char device data is invalid", __func__);
+ return -EBADF;
+ }
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ if (*f_pos > REG_ADDR_LIMIT) {
+ retval = -EFAULT;
+ goto unlock;
+ }
+
+ if (count > (REG_ADDR_LIMIT - *f_pos))
+ count = REG_ADDR_LIMIT - *f_pos;
+
+ if (count == 0) {
+ retval = 0;
+ goto unlock;
+ }
+ rmidev_allocate_buffer(count);
+
+ if (copy_from_user(rmidev->tmpbuf, buf, count)) {
+ return -EFAULT;
+ goto unlock;
+ }
+
+ retval = synaptics_rmi4_reg_write(rmidev->rmi4_data,
+ *f_pos,
+ rmidev->tmpbuf,
+ count);
+ if (retval >= 0)
+ *f_pos += retval;
+
+unlock:
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return retval;
+}
+
+static int rmidev_open(struct inode *inp, struct file *filp)
+{
+ int retval = 0;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+ struct rmidev_data *dev_data =
+ container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+ if (!dev_data)
+ return -EACCES;
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ filp->private_data = dev_data;
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ rmi4_data->irq_enable(rmi4_data, false, false);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Attention interrupt disabled\n",
+ __func__);
+
+ if (dev_data->ref_count < 1)
+ dev_data->ref_count++;
+ else
+ retval = -EACCES;
+
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return retval;
+}
+
+static int rmidev_release(struct inode *inp, struct file *filp)
+{
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+ struct rmidev_data *dev_data =
+ container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+ if (!dev_data)
+ return -EACCES;
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ dev_data->ref_count--;
+ if (dev_data->ref_count < 0)
+ dev_data->ref_count = 0;
+
+ rmi4_data->irq_enable(rmi4_data, true, false);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Attention interrupt enabled\n",
+ __func__);
+
+ mutex_unlock(&(dev_data->file_mutex));
+
+ rmi4_data->reset_device(rmi4_data, false);
+
+ rmi4_data->stay_awake = false;
+
+ return 0;
+}
+
+static const struct file_operations rmidev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = rmidev_llseek,
+ .read = rmidev_read,
+ .write = rmidev_write,
+ .open = rmidev_open,
+ .release = rmidev_release,
+};
+
+static void rmidev_device_cleanup(struct rmidev_data *dev_data)
+{
+ dev_t devno;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (dev_data) {
+ devno = dev_data->main_dev.dev;
+
+ if (dev_data->device_class)
+ device_destroy(dev_data->device_class, devno);
+
+ cdev_del(&dev_data->main_dev);
+
+ unregister_chrdev_region(devno, 1);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: rmidev device removed\n",
+ __func__);
+ }
+
+ return;
+}
+
+static char *rmi_char_devnode(struct device *dev, umode_t *mode)
+{
+ if (!mode)
+ return NULL;
+
+ *mode = (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
+
+ return kasprintf(GFP_KERNEL, "rmi/%s", dev_name(dev));
+}
+
+static int rmidev_create_device_class(void)
+{
+ if (rmidev_device_class != NULL)
+ return 0;
+
+ rmidev_device_class = class_create(THIS_MODULE, DEVICE_CLASS_NAME);
+
+ if (IS_ERR(rmidev_device_class)) {
+ pr_err("%s: Failed to create /dev/%s\n",
+ __func__, CHAR_DEVICE_NAME);
+ return -ENODEV;
+ }
+
+ rmidev_device_class->devnode = rmi_char_devnode;
+
+ return 0;
+}
+
+static void rmidev_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!rmidev)
+ return;
+
+ if (rmidev->pid && (rmidev->intr_mask & intr_mask))
+ send_sig_info(SIGIO, &rmidev->interrupt_signal, rmidev->task);
+
+ return;
+}
+
+static int rmidev_init_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ dev_t dev_no;
+ unsigned char attr_count;
+ struct rmidev_data *dev_data;
+ struct device *device_ptr;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (rmidev) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ rmidev = kzalloc(sizeof(*rmidev), GFP_KERNEL);
+ if (!rmidev) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for rmidev\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_rmidev;
+ }
+
+ rmidev->rmi4_data = rmi4_data;
+
+ memset(&rmidev->interrupt_signal, 0, sizeof(rmidev->interrupt_signal));
+ rmidev->interrupt_signal.si_signo = SIGIO;
+ rmidev->interrupt_signal.si_code = SI_USER;
+
+ memset(&rmidev->terminate_signal, 0, sizeof(rmidev->terminate_signal));
+ rmidev->terminate_signal.si_signo = SIGTERM;
+ rmidev->terminate_signal.si_code = SI_USER;
+
+ retval = rmidev_create_device_class();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create device class\n",
+ __func__);
+ goto err_device_class;
+ }
+
+ if (rmidev_major_num) {
+ dev_no = MKDEV(rmidev_major_num, DEV_NUMBER);
+ retval = register_chrdev_region(dev_no, 1, CHAR_DEVICE_NAME);
+ } else {
+ retval = alloc_chrdev_region(&dev_no, 0, 1, CHAR_DEVICE_NAME);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate char device region\n",
+ __func__);
+ goto err_device_region;
+ }
+
+ rmidev_major_num = MAJOR(dev_no);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Major number of rmidev = %d\n",
+ __func__, rmidev_major_num);
+ }
+
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for dev_data\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_dev_data;
+ }
+
+ mutex_init(&dev_data->file_mutex);
+ dev_data->rmi_dev = rmidev;
+ rmidev->data = dev_data;
+
+ cdev_init(&dev_data->main_dev, &rmidev_fops);
+
+ retval = cdev_add(&dev_data->main_dev, dev_no, 1);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to add rmi char device\n",
+ __func__);
+ goto err_char_device;
+ }
+
+ dev_set_name(&rmidev->dev, "rmidev%d", MINOR(dev_no));
+ dev_data->device_class = rmidev_device_class;
+
+ device_ptr = device_create(dev_data->device_class, NULL, dev_no,
+ NULL, CHAR_DEVICE_NAME"%d", MINOR(dev_no));
+ if (IS_ERR(device_ptr)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create rmi char device\n",
+ __func__);
+ retval = -ENODEV;
+ goto err_char_device;
+ }
+
+ retval = gpio_export(bdata->irq_gpio, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to export attention gpio\n",
+ __func__);
+ } else {
+ retval = gpio_export_link(&(rmi4_data->input_dev->dev),
+ "attn", bdata->irq_gpio);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s Failed to create gpio symlink\n",
+ __func__);
+ } else {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Exported attention gpio %d\n",
+ __func__, bdata->irq_gpio);
+ }
+ }
+
+ rmidev->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+ &rmi4_data->input_dev->dev.kobj);
+ if (!rmidev->sysfs_dir) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs directory\n",
+ __func__);
+ retval = -ENODEV;
+ goto err_sysfs_dir;
+ }
+
+ retval = sysfs_create_bin_file(rmidev->sysfs_dir,
+ &attr_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs bin file\n",
+ __func__);
+ goto err_sysfs_bin;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(rmidev->sysfs_dir,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ retval = -ENODEV;
+ goto err_sysfs_attrs;
+ }
+ }
+
+ return 0;
+
+err_sysfs_attrs:
+ for (attr_count--; attr_count >= 0; attr_count--)
+ sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+ sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+err_sysfs_bin:
+ kobject_put(rmidev->sysfs_dir);
+
+err_sysfs_dir:
+err_char_device:
+ rmidev_device_cleanup(dev_data);
+ kfree(dev_data);
+
+err_dev_data:
+ unregister_chrdev_region(dev_no, 1);
+
+err_device_region:
+ if (rmidev_device_class != NULL) {
+ class_destroy(rmidev_device_class);
+ rmidev_device_class = NULL;
+ }
+
+err_device_class:
+ kfree(rmidev);
+ rmidev = NULL;
+
+err_rmidev:
+ return retval;
+}
+
+static void rmidev_remove_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char attr_count;
+ struct rmidev_data *dev_data;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (!rmidev)
+ goto exit;
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+ sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+ sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+ kobject_put(rmidev->sysfs_dir);
+
+ gpio_unexport(bdata->irq_gpio);
+
+ dev_data = rmidev->data;
+ if (dev_data) {
+ rmidev_device_cleanup(dev_data);
+ kfree(dev_data);
+ }
+
+ unregister_chrdev_region(rmidev->dev_no, 1);
+
+ if (rmidev_device_class != NULL) {
+ class_destroy(rmidev_device_class);
+ rmidev_device_class = NULL;
+ }
+
+ kfree(rmidev->tmpbuf);
+
+ kfree(rmidev);
+ rmidev = NULL;
+
+exit:
+ complete(&rmidev_remove_complete_v26);
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn rmidev_module = {
+ .fn_type = RMI_DEV,
+ .init = rmidev_init_device,
+ .remove = rmidev_remove_device,
+ .reset = NULL,
+ .reinit = NULL,
+ .early_suspend = NULL,
+ .suspend = NULL,
+ .resume = NULL,
+ .late_resume = NULL,
+ .attn = rmidev_attn,
+};
+
+static int __init rmidev_module_init(void)
+{
+ synaptics_rmi4_new_function(&rmidev_module, true);
+
+ return 0;
+}
+
+static void __exit rmidev_module_exit(void)
+{
+ synaptics_rmi4_new_function(&rmidev_module, false);
+
+ wait_for_completion(&rmidev_remove_complete_v26);
+
+ return;
+}
+
+module_init(rmidev_module_init);
+module_exit(rmidev_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX RMI Dev Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_hid_i2c.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_hid_i2c.c
new file mode 100644
index 0000000..7e02487
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_hid_i2c.c
@@ -0,0 +1,1006 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+
+#define REPORT_ID_GET_BLOB 0x07
+#define REPORT_ID_WRITE 0x09
+#define REPORT_ID_READ_ADDRESS 0x0a
+#define REPORT_ID_READ_DATA 0x0b
+#define REPORT_ID_SET_RMI_MODE 0x0f
+
+#define PREFIX_USAGE_PAGE_1BYTE 0x05
+#define PREFIX_USAGE_PAGE_2BYTES 0x06
+#define PREFIX_USAGE 0x09
+#define PREFIX_REPORT_ID 0x85
+#define PREFIX_REPORT_COUNT_1BYTE 0x95
+#define PREFIX_REPORT_COUNT_2BYTES 0x96
+
+#define USAGE_GET_BLOB 0xc5
+#define USAGE_WRITE 0x02
+#define USAGE_READ_ADDRESS 0x03
+#define USAGE_READ_DATA 0x04
+#define USAGE_SET_MODE 0x06
+
+#define FEATURE_REPORT_TYPE 0x03
+
+#define VENDOR_DEFINED_PAGE 0xff00
+
+#define BLOB_REPORT_SIZE 256
+
+#define RESET_COMMAND 0x01
+#define GET_REPORT_COMMAND 0x02
+#define SET_REPORT_COMMAND 0x03
+#define SET_POWER_COMMAND 0x08
+
+#define FINGER_MODE 0x00
+#define RMI_MODE 0x02
+
+struct hid_report_info {
+ unsigned char get_blob_id;
+ unsigned char write_id;
+ unsigned char read_addr_id;
+ unsigned char read_data_id;
+ unsigned char set_mode_id;
+ unsigned int blob_size;
+};
+
+static struct hid_report_info hid_report;
+
+struct hid_device_descriptor {
+ unsigned short device_descriptor_length;
+ unsigned short format_version;
+ unsigned short report_descriptor_length;
+ unsigned short report_descriptor_index;
+ unsigned short input_register_index;
+ unsigned short input_report_max_length;
+ unsigned short output_register_index;
+ unsigned short output_report_max_length;
+ unsigned short command_register_index;
+ unsigned short data_register_index;
+ unsigned short vendor_id;
+ unsigned short product_id;
+ unsigned short version_id;
+ unsigned int reserved;
+};
+
+static struct hid_device_descriptor hid_dd;
+
+struct i2c_rw_buffer {
+ unsigned char *read;
+ unsigned char *write;
+ unsigned short read_size;
+ unsigned short write_size;
+};
+
+static struct i2c_rw_buffer buffer;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+ int retval;
+ u32 value;
+ const char *name;
+ struct property *prop;
+ struct device_node *np = dev->of_node;
+
+ bdata->irq_gpio = of_get_named_gpio_flags(np,
+ "synaptics,irq-gpio", 0,
+ (enum of_gpio_flags *)&bdata->irq_flags);
+
+ retval = of_property_read_u32(np, "synaptics,irq-on-state",
+ &value);
+ if (retval < 0)
+ bdata->irq_on_state = 0;
+ else
+ bdata->irq_on_state = value;
+
+ retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+ if (retval < 0)
+ bdata->pwr_reg_name = NULL;
+ else
+ bdata->pwr_reg_name = name;
+
+ retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+ if (retval < 0)
+ bdata->bus_reg_name = NULL;
+ else
+ bdata->bus_reg_name = name;
+
+ prop = of_find_property(np, "synaptics,power-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->power_gpio = of_get_named_gpio_flags(np,
+ "synaptics,power-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,power-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_on_state = value;
+ }
+ } else {
+ bdata->power_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_delay_ms = value;
+ }
+ } else {
+ bdata->power_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->reset_gpio = of_get_named_gpio_flags(np,
+ "synaptics,reset-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,reset-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_on_state = value;
+ }
+ retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_active_ms = value;
+ }
+ } else {
+ bdata->reset_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_delay_ms = value;
+ }
+ } else {
+ bdata->reset_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,dev-dscrptr-addr", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,dev-dscrptr-addr",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,dev-dscrptr-addr property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->device_descriptor_addr = (unsigned short)value;
+ }
+ } else {
+ bdata->device_descriptor_addr = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->max_y_for_2d = value;
+ }
+ } else {
+ bdata->max_y_for_2d = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,swap-axes", NULL);
+ bdata->swap_axes = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,x-flip", NULL);
+ bdata->x_flip = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,y-flip", NULL);
+ bdata->y_flip = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->ub_i2c_addr = (unsigned short)value;
+ }
+ } else {
+ bdata->ub_i2c_addr = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->cap_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->cap_button_map->map)
+ return -ENOMEM;
+ bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+ retval = of_property_read_u32_array(np,
+ "synaptics,cap-button-codes",
+ bdata->cap_button_map->map,
+ bdata->cap_button_map->nbuttons);
+ if (retval < 0) {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+ } else {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+
+ prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->vir_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->vir_button_map->map)
+ return -ENOMEM;
+ bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+ bdata->vir_button_map->nbuttons /= 5;
+ retval = of_property_read_u32_array(np,
+ "synaptics,vir-button-codes",
+ bdata->vir_button_map->map,
+ bdata->vir_button_map->nbuttons * 5);
+ if (retval < 0) {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+ } else {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+
+ return 0;
+}
+#endif
+
+static int do_i2c_transfer(struct i2c_client *client, struct i2c_msg *msg)
+{
+ unsigned char retry;
+
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ if (i2c_transfer(client->adapter, msg, 1) == 1)
+ break;
+ dev_err(&client->dev,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+ }
+
+ if (retry == SYN_I2C_RETRY_TIMES) {
+ dev_err(&client->dev,
+ "%s: I2C transfer over retry limit\n",
+ __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int check_buffer(unsigned char **buffer, unsigned short *buffer_size,
+ unsigned short length)
+{
+ if (*buffer_size < length) {
+ if (*buffer_size)
+ kfree(*buffer);
+ *buffer = kzalloc(length, GFP_KERNEL);
+ if (!(*buffer))
+ return -ENOMEM;
+ *buffer_size = length;
+ }
+
+ return 0;
+}
+
+static int generic_read(struct i2c_client *client, unsigned short length)
+{
+ int retval;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ }
+ };
+
+ check_buffer(&buffer.read, &buffer.read_size, length);
+ msg[0].buf = buffer.read;
+
+ retval = do_i2c_transfer(client, msg);
+
+ return retval;
+}
+
+static int generic_write(struct i2c_client *client, unsigned short length)
+{
+ int retval;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = length,
+ .buf = buffer.write,
+ }
+ };
+
+ retval = do_i2c_transfer(client, msg);
+
+ return retval;
+}
+
+static void traverse_report_descriptor(unsigned int *index)
+{
+ unsigned char size;
+ unsigned char *buf = buffer.read;
+
+ size = buf[*index] & MASK_2BIT;
+ switch (size) {
+ case 0: /* 0 bytes */
+ *index += 1;
+ break;
+ case 1: /* 1 byte */
+ *index += 2;
+ break;
+ case 2: /* 2 bytes */
+ *index += 3;
+ break;
+ case 3: /* 4 bytes */
+ *index += 5;
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static void find_blob_size(unsigned int index)
+{
+ unsigned int ii = index;
+ unsigned char *buf = buffer.read;
+
+ while (ii < hid_dd.report_descriptor_length) {
+ if (buf[ii] == PREFIX_REPORT_COUNT_1BYTE) {
+ hid_report.blob_size = buf[ii + 1];
+ return;
+ } else if (buf[ii] == PREFIX_REPORT_COUNT_2BYTES) {
+ hid_report.blob_size = buf[ii + 1] | (buf[ii + 2] << 8);
+ return;
+ }
+ traverse_report_descriptor(&ii);
+ }
+
+ return;
+}
+
+static void find_reports(unsigned int index)
+{
+ unsigned int ii = index;
+ unsigned char *buf = buffer.read;
+ static unsigned int report_id_index;
+ static unsigned char report_id;
+ static unsigned short usage_page;
+
+ if (buf[ii] == PREFIX_REPORT_ID) {
+ report_id = buf[ii + 1];
+ report_id_index = ii;
+ return;
+ }
+
+ if (buf[ii] == PREFIX_USAGE_PAGE_1BYTE) {
+ usage_page = buf[ii + 1];
+ return;
+ } else if (buf[ii] == PREFIX_USAGE_PAGE_2BYTES) {
+ usage_page = buf[ii + 1] | (buf[ii + 2] << 8);
+ return;
+ }
+
+ if ((usage_page == VENDOR_DEFINED_PAGE) && (buf[ii] == PREFIX_USAGE)) {
+ switch (buf[ii + 1]) {
+ case USAGE_GET_BLOB:
+ hid_report.get_blob_id = report_id;
+ find_blob_size(report_id_index);
+ break;
+ case USAGE_WRITE:
+ hid_report.write_id = report_id;
+ break;
+ case USAGE_READ_ADDRESS:
+ hid_report.read_addr_id = report_id;
+ break;
+ case USAGE_READ_DATA:
+ hid_report.read_data_id = report_id;
+ break;
+ case USAGE_SET_MODE:
+ hid_report.set_mode_id = report_id;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return;
+}
+
+static int parse_report_descriptor(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned int ii = 0;
+ unsigned char *buf;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+ buffer.write[0] = hid_dd.report_descriptor_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.report_descriptor_index >> 8;
+ retval = generic_write(i2c, 2);
+ if (retval < 0)
+ return retval;
+ retval = generic_read(i2c, hid_dd.report_descriptor_length);
+ if (retval < 0)
+ return retval;
+
+ buf = buffer.read;
+
+ hid_report.get_blob_id = REPORT_ID_GET_BLOB;
+ hid_report.write_id = REPORT_ID_WRITE;
+ hid_report.read_addr_id = REPORT_ID_READ_ADDRESS;
+ hid_report.read_data_id = REPORT_ID_READ_DATA;
+ hid_report.set_mode_id = REPORT_ID_SET_RMI_MODE;
+ hid_report.blob_size = BLOB_REPORT_SIZE;
+
+ while (ii < hid_dd.report_descriptor_length) {
+ find_reports(ii);
+ traverse_report_descriptor(&ii);
+ }
+
+ return 0;
+}
+
+static int switch_to_rmi(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ check_buffer(&buffer.write, &buffer.write_size, 11);
+
+ /* set rmi mode */
+ buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.command_register_index >> 8;
+ buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+ buffer.write[3] = SET_REPORT_COMMAND;
+ buffer.write[4] = hid_report.set_mode_id;
+ buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+ buffer.write[6] = hid_dd.data_register_index >> 8;
+ buffer.write[7] = 0x04;
+ buffer.write[8] = 0x00;
+ buffer.write[9] = hid_report.set_mode_id;
+ buffer.write[10] = RMI_MODE;
+
+ retval = generic_write(i2c, 11);
+
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static int check_report_mode(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned short report_size;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ check_buffer(&buffer.write, &buffer.write_size, 7);
+
+ buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.command_register_index >> 8;
+ buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+ buffer.write[3] = GET_REPORT_COMMAND;
+ buffer.write[4] = hid_report.set_mode_id;
+ buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+ buffer.write[6] = hid_dd.data_register_index >> 8;
+
+ retval = generic_write(i2c, 7);
+ if (retval < 0)
+ goto exit;
+
+ retval = generic_read(i2c, 2);
+ if (retval < 0)
+ goto exit;
+
+ report_size = (buffer.read[1] << 8) | buffer.read[0];
+
+ retval = generic_write(i2c, 7);
+ if (retval < 0)
+ goto exit;
+
+ retval = generic_read(i2c, report_size);
+ if (retval < 0)
+ goto exit;
+
+ retval = buffer.read[3];
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Report mode = %d\n",
+ __func__, retval);
+
+exit:
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static int hid_i2c_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ check_buffer(&buffer.write, &buffer.write_size, 6);
+
+ /* read device descriptor */
+ buffer.write[0] = bdata->device_descriptor_addr & MASK_8BIT;
+ buffer.write[1] = bdata->device_descriptor_addr >> 8;
+ retval = generic_write(i2c, 2);
+ if (retval < 0)
+ goto exit;
+ retval = generic_read(i2c, sizeof(hid_dd));
+ if (retval < 0)
+ goto exit;
+ retval = secure_memcpy((unsigned char *)&hid_dd,
+ sizeof(struct hid_device_descriptor),
+ buffer.read,
+ buffer.read_size,
+ sizeof(hid_dd));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy device descriptor data\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = parse_report_descriptor(rmi4_data);
+ if (retval < 0)
+ goto exit;
+
+ /* set power */
+ buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.command_register_index >> 8;
+ buffer.write[2] = 0x00;
+ buffer.write[3] = SET_POWER_COMMAND;
+ retval = generic_write(i2c, 4);
+ if (retval < 0)
+ goto exit;
+
+ /* reset */
+ buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.command_register_index >> 8;
+ buffer.write[2] = 0x00;
+ buffer.write[3] = RESET_COMMAND;
+ retval = generic_write(i2c, 4);
+ if (retval < 0)
+ goto exit;
+
+ while (gpio_get_value(bdata->irq_gpio))
+ msleep(20);
+
+ retval = generic_read(i2c, hid_dd.input_report_max_length);
+ if (retval < 0)
+ goto exit;
+
+ /* get blob */
+ buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.command_register_index >> 8;
+ buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.get_blob_id;
+ buffer.write[3] = 0x02;
+ buffer.write[4] = hid_dd.data_register_index & MASK_8BIT;
+ buffer.write[5] = hid_dd.data_register_index >> 8;
+
+ retval = generic_write(i2c, 6);
+ if (retval < 0)
+ goto exit;
+
+ msleep(20);
+
+ retval = generic_read(i2c, hid_report.blob_size + 3);
+ if (retval < 0)
+ goto exit;
+
+exit:
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to initialize HID/I2C interface\n",
+ __func__);
+ return retval;
+ }
+
+ retval = switch_to_rmi(rmi4_data);
+
+ return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned short length)
+{
+ int retval;
+ unsigned char retry;
+ unsigned char recover = 1;
+ unsigned short report_length;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ struct i2c_msg msg[] = {
+ {
+ .addr = i2c->addr,
+ .flags = 0,
+ .len = hid_dd.output_report_max_length + 2,
+ },
+ {
+ .addr = i2c->addr,
+ .flags = I2C_M_RD,
+ .len = length + 4,
+ },
+ };
+
+recover:
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ check_buffer(&buffer.write, &buffer.write_size,
+ hid_dd.output_report_max_length + 2);
+ msg[0].buf = buffer.write;
+ buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.output_register_index >> 8;
+ buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+ buffer.write[3] = hid_dd.output_report_max_length >> 8;
+ buffer.write[4] = hid_report.read_addr_id;
+ buffer.write[5] = 0x00;
+ buffer.write[6] = addr & MASK_8BIT;
+ buffer.write[7] = addr >> 8;
+ buffer.write[8] = length & MASK_8BIT;
+ buffer.write[9] = length >> 8;
+
+ check_buffer(&buffer.read, &buffer.read_size, length + 4);
+ msg[1].buf = buffer.read;
+
+ retval = do_i2c_transfer(i2c, &msg[0]);
+ if (retval != 0)
+ goto exit;
+
+ retry = 0;
+ do {
+ retval = do_i2c_transfer(i2c, &msg[1]);
+ if (retval == 0)
+ retval = length;
+ else
+ goto exit;
+
+ report_length = (buffer.read[1] << 8) | buffer.read[0];
+ if (report_length == hid_dd.input_report_max_length) {
+ retval = secure_memcpy(&data[0], length,
+ &buffer.read[4], buffer.read_size - 4,
+ length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy data\n",
+ __func__);
+ } else {
+ retval = length;
+ }
+ goto exit;
+ }
+
+ msleep(20);
+ retry++;
+ } while (retry < SYN_I2C_RETRY_TIMES);
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to receive read report\n",
+ __func__);
+ retval = -EIO;
+
+exit:
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ if ((retval != length) && (recover == 1)) {
+ recover = 0;
+ if (check_report_mode(rmi4_data) != RMI_MODE) {
+ retval = hid_i2c_init(rmi4_data);
+ if (retval == 0)
+ goto recover;
+ }
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned short length)
+{
+ int retval;
+ unsigned char recover = 1;
+ unsigned char msg_length;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ struct i2c_msg msg[] = {
+ {
+ .addr = i2c->addr,
+ .flags = 0,
+ }
+ };
+
+ if ((length + 10) < (hid_dd.output_report_max_length + 2))
+ msg_length = hid_dd.output_report_max_length + 2;
+ else
+ msg_length = length + 10;
+
+recover:
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ check_buffer(&buffer.write, &buffer.write_size, msg_length);
+ msg[0].len = msg_length;
+ msg[0].buf = buffer.write;
+ buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.output_register_index >> 8;
+ buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+ buffer.write[3] = hid_dd.output_report_max_length >> 8;
+ buffer.write[4] = hid_report.write_id;
+ buffer.write[5] = 0x00;
+ buffer.write[6] = addr & MASK_8BIT;
+ buffer.write[7] = addr >> 8;
+ buffer.write[8] = length & MASK_8BIT;
+ buffer.write[9] = length >> 8;
+ retval = secure_memcpy(&buffer.write[10], buffer.write_size - 10,
+ &data[0], length, length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy data\n",
+ __func__);
+ } else {
+ retval = do_i2c_transfer(i2c, msg);
+ if (retval == 0)
+ retval = length;
+ }
+
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ if ((retval != length) && (recover == 1)) {
+ recover = 0;
+ if (check_report_mode(rmi4_data) != RMI_MODE) {
+ retval = hid_i2c_init(rmi4_data);
+ if (retval == 0)
+ goto recover;
+ }
+ }
+
+ return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+ .type = BUS_I2C,
+ .read = synaptics_rmi4_i2c_read,
+ .write = synaptics_rmi4_i2c_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+ kfree(synaptics_dsx_i2c_device);
+
+ return;
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ int retval;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev,
+ "%s: SMBus byte data commands not supported by host\n",
+ __func__);
+ return -EIO;
+ }
+
+ synaptics_dsx_i2c_device = kzalloc(
+ sizeof(struct platform_device),
+ GFP_KERNEL);
+ if (!synaptics_dsx_i2c_device) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_OF
+ if (client->dev.of_node) {
+ hw_if.board_data = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_board_data),
+ GFP_KERNEL);
+ if (!hw_if.board_data) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for board data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->cap_button_map) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for 0D button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->vir_button_map) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for virtual button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ parse_dt(&client->dev, hw_if.board_data);
+ }
+#else
+ hw_if.board_data = client->dev.platform_data;
+#endif
+
+ hw_if.bus_access = &bus_access;
+ hw_if.bl_hw_init = switch_to_rmi;
+ hw_if.ui_hw_init = hid_i2c_init;
+
+ synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+ synaptics_dsx_i2c_device->id = 0;
+ synaptics_dsx_i2c_device->num_resources = 0;
+ synaptics_dsx_i2c_device->dev.parent = &client->dev;
+ synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+ synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+ retval = platform_device_register(synaptics_dsx_i2c_device);
+ if (retval) {
+ dev_err(&client->dev,
+ "%s: Failed to register platform device\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+ if (buffer.read_size)
+ kfree(buffer.read);
+
+ if (buffer.write_size)
+ kfree(buffer.write);
+
+ platform_device_unregister(synaptics_dsx_i2c_device);
+
+ return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+ {I2C_DRIVER_NAME, 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+ {
+ .compatible = "synaptics,dsx-rmi-hid-i2c",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = synaptics_rmi4_of_match_table,
+ },
+ .probe = synaptics_rmi4_i2c_probe,
+ .remove = synaptics_rmi4_i2c_remove,
+ .id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init_v26(void)
+{
+ return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init_v26);
+
+void synaptics_rmi4_bus_exit_v26(void)
+{
+ i2c_del_driver(&synaptics_rmi4_i2c_driver);
+
+ return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit_v26);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_spi.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_spi.c
new file mode 100644
index 0000000..382a3dd
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_spi.c
@@ -0,0 +1,634 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define SPI_READ 0x80
+#define SPI_WRITE 0x00
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+ int retval;
+ u32 value;
+ const char *name;
+ struct property *prop;
+ struct device_node *np = dev->of_node;
+
+ bdata->irq_gpio = of_get_named_gpio_flags(np,
+ "synaptics,irq-gpio", 0,
+ (enum of_gpio_flags *)&bdata->irq_flags);
+
+ retval = of_property_read_u32(np, "synaptics,irq-on-state",
+ &value);
+ if (retval < 0)
+ bdata->irq_on_state = 0;
+ else
+ bdata->irq_on_state = value;
+
+ retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+ if (retval < 0)
+ bdata->pwr_reg_name = NULL;
+ else
+ bdata->pwr_reg_name = name;
+
+ retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+ if (retval < 0)
+ bdata->bus_reg_name = NULL;
+ else
+ bdata->bus_reg_name = name;
+
+ prop = of_find_property(np, "synaptics,power-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->power_gpio = of_get_named_gpio_flags(np,
+ "synaptics,power-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,power-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_on_state = value;
+ }
+ } else {
+ bdata->power_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_delay_ms = value;
+ }
+ } else {
+ bdata->power_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->reset_gpio = of_get_named_gpio_flags(np,
+ "synaptics,reset-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,reset-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_on_state = value;
+ }
+ retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_active_ms = value;
+ }
+ } else {
+ bdata->reset_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_delay_ms = value;
+ }
+ } else {
+ bdata->reset_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,byte-delay-us", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,byte-delay-us",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,byte-delay-us property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->byte_delay_us = value;
+ }
+ } else {
+ bdata->byte_delay_us = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,block-delay-us", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,block-delay-us",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,block-delay-us property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->block_delay_us = value;
+ }
+ } else {
+ bdata->block_delay_us = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->max_y_for_2d = value;
+ }
+ } else {
+ bdata->max_y_for_2d = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,swap-axes", NULL);
+ bdata->swap_axes = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,x-flip", NULL);
+ bdata->x_flip = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,y-flip", NULL);
+ bdata->y_flip = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->ub_i2c_addr = (unsigned short)value;
+ }
+ } else {
+ bdata->ub_i2c_addr = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->cap_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->cap_button_map->map)
+ return -ENOMEM;
+ bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+ retval = of_property_read_u32_array(np,
+ "synaptics,cap-button-codes",
+ bdata->cap_button_map->map,
+ bdata->cap_button_map->nbuttons);
+ if (retval < 0) {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+ } else {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+
+ prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->vir_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->vir_button_map->map)
+ return -ENOMEM;
+ bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+ bdata->vir_button_map->nbuttons /= 5;
+ retval = of_property_read_u32_array(np,
+ "synaptics,vir-button-codes",
+ bdata->vir_button_map->map,
+ bdata->vir_button_map->nbuttons * 5);
+ if (retval < 0) {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+ } else {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+
+ return 0;
+}
+#endif
+
+static int synaptics_rmi4_spi_set_page(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr)
+{
+ int retval;
+ unsigned int index;
+ unsigned int xfer_count = PAGE_SELECT_LEN + 1;
+ unsigned char txbuf[xfer_count];
+ unsigned char page;
+ struct spi_message msg;
+ struct spi_transfer xfers[xfer_count];
+ struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ page = ((addr >> 8) & ~MASK_7BIT);
+ if (page != rmi4_data->current_page) {
+ spi_message_init(&msg);
+
+ txbuf[0] = SPI_WRITE;
+ txbuf[1] = MASK_8BIT;
+ txbuf[2] = page;
+
+ for (index = 0; index < xfer_count; index++) {
+ memset(&xfers[index], 0, sizeof(struct spi_transfer));
+ xfers[index].len = 1;
+ xfers[index].delay_usecs = bdata->byte_delay_us;
+ xfers[index].tx_buf = &txbuf[index];
+ spi_message_add_tail(&xfers[index], &msg);
+ }
+
+ if (bdata->block_delay_us)
+ xfers[index - 1].delay_usecs = bdata->block_delay_us;
+
+ retval = spi_sync(spi, &msg);
+ if (retval == 0) {
+ rmi4_data->current_page = page;
+ retval = PAGE_SELECT_LEN;
+ } else {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to complete SPI transfer, error = %d\n",
+ __func__, retval);
+ }
+ } else {
+ retval = PAGE_SELECT_LEN;
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_spi_read(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned short length)
+{
+ int retval;
+ unsigned int index;
+ unsigned int xfer_count = length + ADDRESS_WORD_LEN;
+ unsigned char txbuf[ADDRESS_WORD_LEN];
+ unsigned char *rxbuf = NULL;
+ struct spi_message msg;
+ struct spi_transfer *xfers = NULL;
+ struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ spi_message_init(&msg);
+
+ xfers = kcalloc(xfer_count, sizeof(struct spi_transfer), GFP_KERNEL);
+ if (!xfers) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate memory for xfers\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ txbuf[0] = (addr >> 8) | SPI_READ;
+ txbuf[1] = addr & MASK_8BIT;
+
+ rxbuf = kmalloc(length, GFP_KERNEL);
+ if (!rxbuf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate memory for rxbuf\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+ if (retval != PAGE_SELECT_LEN) {
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+ retval = -EIO;
+ goto exit;
+ }
+
+ for (index = 0; index < xfer_count; index++) {
+ xfers[index].len = 1;
+ xfers[index].delay_usecs = bdata->byte_delay_us;
+ if (index < ADDRESS_WORD_LEN)
+ xfers[index].tx_buf = &txbuf[index];
+ else
+ xfers[index].rx_buf = &rxbuf[index - ADDRESS_WORD_LEN];
+ spi_message_add_tail(&xfers[index], &msg);
+ }
+
+ if (bdata->block_delay_us)
+ xfers[index - 1].delay_usecs = bdata->block_delay_us;
+
+ retval = spi_sync(spi, &msg);
+ if (retval == 0) {
+ retval = secure_memcpy(data, length, rxbuf, length, length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy data\n",
+ __func__);
+ } else {
+ retval = length;
+ }
+ } else {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to complete SPI transfer, error = %d\n",
+ __func__, retval);
+ }
+
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+exit:
+ kfree(rxbuf);
+ kfree(xfers);
+
+ return retval;
+}
+
+static int synaptics_rmi4_spi_write(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned short length)
+{
+ int retval;
+ unsigned int index;
+ unsigned int xfer_count = length + ADDRESS_WORD_LEN;
+ unsigned char *txbuf = NULL;
+ struct spi_message msg;
+ struct spi_transfer *xfers = NULL;
+ struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ spi_message_init(&msg);
+
+ xfers = kcalloc(xfer_count, sizeof(struct spi_transfer), GFP_KERNEL);
+ if (!xfers) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate memory for xfers\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ txbuf = kmalloc(xfer_count, GFP_KERNEL);
+ if (!txbuf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate memory for txbuf\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ txbuf[0] = (addr >> 8) & ~SPI_READ;
+ txbuf[1] = addr & MASK_8BIT;
+ retval = secure_memcpy(&txbuf[ADDRESS_WORD_LEN],
+ xfer_count - ADDRESS_WORD_LEN, data, length, length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy data\n",
+ __func__);
+ goto exit;
+ }
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+ if (retval != PAGE_SELECT_LEN) {
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+ retval = -EIO;
+ goto exit;
+ }
+
+ for (index = 0; index < xfer_count; index++) {
+ xfers[index].len = 1;
+ xfers[index].delay_usecs = bdata->byte_delay_us;
+ xfers[index].tx_buf = &txbuf[index];
+ spi_message_add_tail(&xfers[index], &msg);
+ }
+
+ if (bdata->block_delay_us)
+ xfers[index - 1].delay_usecs = bdata->block_delay_us;
+
+ retval = spi_sync(spi, &msg);
+ if (retval == 0) {
+ retval = length;
+ } else {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to complete SPI transfer, error = %d\n",
+ __func__, retval);
+ }
+
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+exit:
+ kfree(txbuf);
+ kfree(xfers);
+
+ return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+ .type = BUS_SPI,
+ .read = synaptics_rmi4_spi_read,
+ .write = synaptics_rmi4_spi_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_spi_device;
+
+static void synaptics_rmi4_spi_dev_release(struct device *dev)
+{
+ kfree(synaptics_dsx_spi_device);
+
+ return;
+}
+
+static int synaptics_rmi4_spi_probe(struct spi_device *spi)
+{
+ int retval;
+
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ dev_err(&spi->dev,
+ "%s: Full duplex not supported by host\n",
+ __func__);
+ return -EIO;
+ }
+
+ synaptics_dsx_spi_device = kzalloc(
+ sizeof(struct platform_device),
+ GFP_KERNEL);
+ if (!synaptics_dsx_spi_device) {
+ dev_err(&spi->dev,
+ "%s: Failed to allocate memory for synaptics_dsx_spi_device\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_OF
+ if (spi->dev.of_node) {
+ hw_if.board_data = devm_kzalloc(&spi->dev,
+ sizeof(struct synaptics_dsx_board_data),
+ GFP_KERNEL);
+ if (!hw_if.board_data) {
+ dev_err(&spi->dev,
+ "%s: Failed to allocate memory for board data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->cap_button_map = devm_kzalloc(&spi->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->cap_button_map) {
+ dev_err(&spi->dev,
+ "%s: Failed to allocate memory for 0D button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->vir_button_map = devm_kzalloc(&spi->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->vir_button_map) {
+ dev_err(&spi->dev,
+ "%s: Failed to allocate memory for virtual button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ parse_dt(&spi->dev, hw_if.board_data);
+ }
+#else
+ hw_if.board_data = spi->dev.platform_data;
+#endif
+
+ hw_if.bus_access = &bus_access;
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_3;
+
+ retval = spi_setup(spi);
+ if (retval < 0) {
+ dev_err(&spi->dev,
+ "%s: Failed to perform SPI setup\n",
+ __func__);
+ return retval;
+ }
+
+ synaptics_dsx_spi_device->name = PLATFORM_DRIVER_NAME;
+ synaptics_dsx_spi_device->id = 0;
+ synaptics_dsx_spi_device->num_resources = 0;
+ synaptics_dsx_spi_device->dev.parent = &spi->dev;
+ synaptics_dsx_spi_device->dev.platform_data = &hw_if;
+ synaptics_dsx_spi_device->dev.release = synaptics_rmi4_spi_dev_release;
+
+ retval = platform_device_register(synaptics_dsx_spi_device);
+ if (retval) {
+ dev_err(&spi->dev,
+ "%s: Failed to register platform device\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_spi_remove(struct spi_device *spi)
+{
+ platform_device_unregister(synaptics_dsx_spi_device);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+ {
+ .compatible = "synaptics,dsx-spi",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct spi_driver synaptics_rmi4_spi_driver = {
+ .driver = {
+ .name = SPI_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = synaptics_rmi4_of_match_table,
+ },
+ .probe = synaptics_rmi4_spi_probe,
+ .remove = synaptics_rmi4_spi_remove,
+};
+
+
+int synaptics_rmi4_bus_init_v26(void)
+{
+ return spi_register_driver(&synaptics_rmi4_spi_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init_v26);
+
+void synaptics_rmi4_bus_exit_v26(void)
+{
+ spi_unregister_driver(&synaptics_rmi4_spi_driver);
+
+ return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit_v26);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX SPI Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_test_reporting.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_test_reporting.c
new file mode 100644
index 0000000..49bec56
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_test_reporting.c
@@ -0,0 +1,4163 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "f54"
+
+#define GET_REPORT_TIMEOUT_S 3
+#define CALIBRATION_TIMEOUT_S 10
+#define COMMAND_TIMEOUT_100MS 20
+
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+
+#define STATUS_IDLE 0
+#define STATUS_BUSY 1
+#define STATUS_ERROR 2
+
+#define REPORT_INDEX_OFFSET 1
+#define REPORT_DATA_OFFSET 3
+
+#define SENSOR_RX_MAPPING_OFFSET 1
+#define SENSOR_TX_MAPPING_OFFSET 2
+
+#define COMMAND_GET_REPORT 1
+#define COMMAND_FORCE_CAL 2
+#define COMMAND_FORCE_UPDATE 4
+
+#define CONTROL_NO_AUTO_CAL 1
+
+#define CONTROL_0_SIZE 1
+#define CONTROL_1_SIZE 1
+#define CONTROL_2_SIZE 2
+#define CONTROL_3_SIZE 1
+#define CONTROL_4_6_SIZE 3
+#define CONTROL_7_SIZE 1
+#define CONTROL_8_9_SIZE 3
+#define CONTROL_10_SIZE 1
+#define CONTROL_11_SIZE 2
+#define CONTROL_12_13_SIZE 2
+#define CONTROL_14_SIZE 1
+#define CONTROL_15_SIZE 1
+#define CONTROL_16_SIZE 1
+#define CONTROL_17_SIZE 1
+#define CONTROL_18_SIZE 1
+#define CONTROL_19_SIZE 1
+#define CONTROL_20_SIZE 1
+#define CONTROL_21_SIZE 2
+#define CONTROL_22_26_SIZE 7
+#define CONTROL_27_SIZE 1
+#define CONTROL_28_SIZE 2
+#define CONTROL_29_SIZE 1
+#define CONTROL_30_SIZE 1
+#define CONTROL_31_SIZE 1
+#define CONTROL_32_35_SIZE 8
+#define CONTROL_36_SIZE 1
+#define CONTROL_37_SIZE 1
+#define CONTROL_38_SIZE 1
+#define CONTROL_39_SIZE 1
+#define CONTROL_40_SIZE 1
+#define CONTROL_41_SIZE 1
+#define CONTROL_42_SIZE 2
+#define CONTROL_43_54_SIZE 13
+#define CONTROL_55_56_SIZE 2
+#define CONTROL_57_SIZE 1
+#define CONTROL_58_SIZE 1
+#define CONTROL_59_SIZE 2
+#define CONTROL_60_62_SIZE 3
+#define CONTROL_63_SIZE 1
+#define CONTROL_64_67_SIZE 4
+#define CONTROL_68_73_SIZE 8
+#define CONTROL_74_SIZE 2
+#define CONTROL_75_SIZE 1
+#define CONTROL_76_SIZE 1
+#define CONTROL_77_78_SIZE 2
+#define CONTROL_79_83_SIZE 5
+#define CONTROL_84_85_SIZE 2
+#define CONTROL_86_SIZE 1
+#define CONTROL_87_SIZE 1
+#define CONTROL_88_SIZE 1
+#define CONTROL_89_SIZE 1
+#define CONTROL_90_SIZE 1
+#define CONTROL_91_SIZE 1
+#define CONTROL_92_SIZE 1
+#define CONTROL_93_SIZE 1
+#define CONTROL_94_SIZE 1
+#define CONTROL_95_SIZE 1
+#define CONTROL_96_SIZE 1
+#define CONTROL_97_SIZE 1
+#define CONTROL_98_SIZE 1
+#define CONTROL_99_SIZE 1
+#define CONTROL_100_SIZE 1
+#define CONTROL_101_SIZE 1
+#define CONTROL_102_SIZE 1
+#define CONTROL_103_SIZE 1
+#define CONTROL_104_SIZE 1
+#define CONTROL_105_SIZE 1
+#define CONTROL_106_SIZE 1
+#define CONTROL_107_SIZE 1
+#define CONTROL_108_SIZE 1
+#define CONTROL_109_SIZE 1
+#define CONTROL_110_SIZE 1
+#define CONTROL_111_SIZE 1
+#define CONTROL_112_SIZE 1
+#define CONTROL_113_SIZE 1
+#define CONTROL_114_SIZE 1
+#define CONTROL_115_SIZE 1
+#define CONTROL_116_SIZE 1
+#define CONTROL_117_SIZE 1
+#define CONTROL_118_SIZE 1
+#define CONTROL_119_SIZE 1
+#define CONTROL_120_SIZE 1
+#define CONTROL_121_SIZE 1
+#define CONTROL_122_SIZE 1
+#define CONTROL_123_SIZE 1
+#define CONTROL_124_SIZE 1
+#define CONTROL_125_SIZE 1
+#define CONTROL_126_SIZE 1
+#define CONTROL_127_SIZE 1
+#define CONTROL_128_SIZE 1
+#define CONTROL_129_SIZE 1
+#define CONTROL_130_SIZE 1
+#define CONTROL_131_SIZE 1
+#define CONTROL_132_SIZE 1
+#define CONTROL_133_SIZE 1
+#define CONTROL_134_SIZE 1
+#define CONTROL_135_SIZE 1
+#define CONTROL_136_SIZE 1
+#define CONTROL_137_SIZE 1
+#define CONTROL_138_SIZE 1
+#define CONTROL_139_SIZE 1
+#define CONTROL_140_SIZE 1
+#define CONTROL_141_SIZE 1
+#define CONTROL_142_SIZE 1
+#define CONTROL_143_SIZE 1
+#define CONTROL_144_SIZE 1
+#define CONTROL_145_SIZE 1
+#define CONTROL_146_SIZE 1
+#define CONTROL_147_SIZE 1
+#define CONTROL_148_SIZE 1
+#define CONTROL_149_SIZE 1
+#define CONTROL_163_SIZE 1
+#define CONTROL_165_SIZE 1
+#define CONTROL_167_SIZE 1
+#define CONTROL_176_SIZE 1
+#define CONTROL_179_SIZE 1
+#define CONTROL_188_SIZE 1
+
+#define HIGH_RESISTANCE_DATA_SIZE 6
+#define FULL_RAW_CAP_MIN_MAX_DATA_SIZE 4
+#define TRX_OPEN_SHORT_DATA_SIZE 7
+
+#define concat(a, b) a##b
+
+#define attrify(propname) (&dev_attr_##propname.attr)
+
+#define show_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_show)(\
+ struct device *dev,\
+ struct device_attribute *attr,\
+ char *buf);
+
+#define store_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_store)(\
+ struct device *dev,\
+ struct device_attribute *attr,\
+ const char *buf, size_t count);
+
+#define show_store_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_show)(\
+ struct device *dev,\
+ struct device_attribute *attr,\
+ char *buf);\
+\
+static ssize_t concat(test_sysfs, _##propname##_store)(\
+ struct device *dev,\
+ struct device_attribute *attr,\
+ const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+ __ATTR(propname, 0664,\
+ concat(test_sysfs, _##propname##_show),\
+ concat(test_sysfs, _##propname##_store));
+
+#define disable_cbc(ctrl_num)\
+do {\
+ retval = synaptics_rmi4_reg_read(rmi4_data,\
+ f54->control.ctrl_num->address,\
+ f54->control.ctrl_num->data,\
+ sizeof(f54->control.ctrl_num->data));\
+ if (retval < 0) {\
+ dev_err(rmi4_data->pdev->dev.parent,\
+ "%s: Failed to disable CBC (" #ctrl_num ")\n",\
+ __func__);\
+ return retval;\
+ } \
+ f54->control.ctrl_num->cbc_tx_carrier_selection = 0;\
+ retval = synaptics_rmi4_reg_write(rmi4_data,\
+ f54->control.ctrl_num->address,\
+ f54->control.ctrl_num->data,\
+ sizeof(f54->control.ctrl_num->data));\
+ if (retval < 0) {\
+ dev_err(rmi4_data->pdev->dev.parent,\
+ "%s: Failed to disable CBC (" #ctrl_num ")\n",\
+ __func__);\
+ return retval;\
+ } \
+} while (0)
+
+enum f54_report_types {
+ F54_8BIT_IMAGE = 1,
+ F54_16BIT_IMAGE = 2,
+ F54_RAW_16BIT_IMAGE = 3,
+ F54_HIGH_RESISTANCE = 4,
+ F54_TX_TO_TX_SHORTS = 5,
+ F54_RX_TO_RX_SHORTS_1 = 7,
+ F54_TRUE_BASELINE = 9,
+ F54_FULL_RAW_CAP_MIN_MAX = 13,
+ F54_RX_OPENS_1 = 14,
+ F54_TX_OPENS = 15,
+ F54_TX_TO_GND_SHORTS = 16,
+ F54_RX_TO_RX_SHORTS_2 = 17,
+ F54_RX_OPENS_2 = 18,
+ F54_FULL_RAW_CAP = 19,
+ F54_FULL_RAW_CAP_NO_RX_COUPLING = 20,
+ F54_SENSOR_SPEED = 22,
+ F54_ADC_RANGE = 23,
+ F54_TRX_OPENS = 24,
+ F54_TRX_TO_GND_SHORTS = 25,
+ F54_TRX_SHORTS = 26,
+ F54_ABS_RAW_CAP = 38,
+ F54_ABS_DELTA_CAP = 40,
+ F54_ABS_HYBRID_DELTA_CAP = 59,
+ F54_ABS_HYBRID_RAW_CAP = 63,
+ F54_AMP_FULL_RAW_CAP = 78,
+ F54_AMP_RAW_ADC = 83,
+ INVALID_REPORT_TYPE = -1,
+};
+
+enum f54_afe_cal {
+ F54_AFE_CAL,
+ F54_AFE_IS_CAL,
+};
+
+struct f54_query {
+ union {
+ struct {
+ /* query 0 */
+ unsigned char num_of_rx_electrodes;
+
+ /* query 1 */
+ unsigned char num_of_tx_electrodes;
+
+ /* query 2 */
+ unsigned char f54_query2_b0__1:2;
+ unsigned char has_baseline:1;
+ unsigned char has_image8:1;
+ unsigned char f54_query2_b4__5:2;
+ unsigned char has_image16:1;
+ unsigned char f54_query2_b7:1;
+
+ /* queries 3.0 and 3.1 */
+ unsigned short clock_rate;
+
+ /* query 4 */
+ unsigned char touch_controller_family;
+
+ /* query 5 */
+ unsigned char has_pixel_touch_threshold_adjustment:1;
+ unsigned char f54_query5_b1__7:7;
+
+ /* query 6 */
+ unsigned char has_sensor_assignment:1;
+ unsigned char has_interference_metric:1;
+ unsigned char has_sense_frequency_control:1;
+ unsigned char has_firmware_noise_mitigation:1;
+ unsigned char has_ctrl11:1;
+ unsigned char has_two_byte_report_rate:1;
+ unsigned char has_one_byte_report_rate:1;
+ unsigned char has_relaxation_control:1;
+
+ /* query 7 */
+ unsigned char curve_compensation_mode:2;
+ unsigned char f54_query7_b2__7:6;
+
+ /* query 8 */
+ unsigned char f54_query8_b0:1;
+ unsigned char has_iir_filter:1;
+ unsigned char has_cmn_removal:1;
+ unsigned char has_cmn_maximum:1;
+ unsigned char has_touch_hysteresis:1;
+ unsigned char has_edge_compensation:1;
+ unsigned char has_per_frequency_noise_control:1;
+ unsigned char has_enhanced_stretch:1;
+
+ /* query 9 */
+ unsigned char has_force_fast_relaxation:1;
+ unsigned char has_multi_metric_state_machine:1;
+ unsigned char has_signal_clarity:1;
+ unsigned char has_variance_metric:1;
+ unsigned char has_0d_relaxation_control:1;
+ unsigned char has_0d_acquisition_control:1;
+ unsigned char has_status:1;
+ unsigned char has_slew_metric:1;
+
+ /* query 10 */
+ unsigned char has_h_blank:1;
+ unsigned char has_v_blank:1;
+ unsigned char has_long_h_blank:1;
+ unsigned char has_startup_fast_relaxation:1;
+ unsigned char has_esd_control:1;
+ unsigned char has_noise_mitigation2:1;
+ unsigned char has_noise_state:1;
+ unsigned char has_energy_ratio_relaxation:1;
+
+ /* query 11 */
+ unsigned char has_excessive_noise_reporting:1;
+ unsigned char has_slew_option:1;
+ unsigned char has_two_overhead_bursts:1;
+ unsigned char has_query13:1;
+ unsigned char has_one_overhead_burst:1;
+ unsigned char f54_query11_b5:1;
+ unsigned char has_ctrl88:1;
+ unsigned char has_query15:1;
+
+ /* query 12 */
+ unsigned char number_of_sensing_frequencies:4;
+ unsigned char f54_query12_b4__7:4;
+ } __packed;
+ unsigned char data[14];
+ };
+};
+
+struct f54_query_13 {
+ union {
+ struct {
+ unsigned char has_ctrl86:1;
+ unsigned char has_ctrl87:1;
+ unsigned char has_ctrl87_sub0:1;
+ unsigned char has_ctrl87_sub1:1;
+ unsigned char has_ctrl87_sub2:1;
+ unsigned char has_cidim:1;
+ unsigned char has_noise_mitigation_enhancement:1;
+ unsigned char has_rail_im:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_15 {
+ union {
+ struct {
+ unsigned char has_ctrl90:1;
+ unsigned char has_transmit_strength:1;
+ unsigned char has_ctrl87_sub3:1;
+ unsigned char has_query16:1;
+ unsigned char has_query20:1;
+ unsigned char has_query21:1;
+ unsigned char has_query22:1;
+ unsigned char has_query25:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_16 {
+ union {
+ struct {
+ unsigned char has_query17:1;
+ unsigned char has_data17:1;
+ unsigned char has_ctrl92:1;
+ unsigned char has_ctrl93:1;
+ unsigned char has_ctrl94_query18:1;
+ unsigned char has_ctrl95_query19:1;
+ unsigned char has_ctrl99:1;
+ unsigned char has_ctrl100:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_21 {
+ union {
+ struct {
+ unsigned char has_abs_rx:1;
+ unsigned char has_abs_tx:1;
+ unsigned char has_ctrl91:1;
+ unsigned char has_ctrl96:1;
+ unsigned char has_ctrl97:1;
+ unsigned char has_ctrl98:1;
+ unsigned char has_data19:1;
+ unsigned char has_query24_data18:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_22 {
+ union {
+ struct {
+ unsigned char has_packed_image:1;
+ unsigned char has_ctrl101:1;
+ unsigned char has_dynamic_sense_display_ratio:1;
+ unsigned char has_query23:1;
+ unsigned char has_ctrl103_query26:1;
+ unsigned char has_ctrl104:1;
+ unsigned char has_ctrl105:1;
+ unsigned char has_query28:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_23 {
+ union {
+ struct {
+ unsigned char has_ctrl102:1;
+ unsigned char has_ctrl102_sub1:1;
+ unsigned char has_ctrl102_sub2:1;
+ unsigned char has_ctrl102_sub4:1;
+ unsigned char has_ctrl102_sub5:1;
+ unsigned char has_ctrl102_sub9:1;
+ unsigned char has_ctrl102_sub10:1;
+ unsigned char has_ctrl102_sub11:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_25 {
+ union {
+ struct {
+ unsigned char has_ctrl106:1;
+ unsigned char has_ctrl102_sub12:1;
+ unsigned char has_ctrl107:1;
+ unsigned char has_ctrl108:1;
+ unsigned char has_ctrl109:1;
+ unsigned char has_data20:1;
+ unsigned char f54_query25_b6:1;
+ unsigned char has_query27:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_27 {
+ union {
+ struct {
+ unsigned char has_ctrl110:1;
+ unsigned char has_data21:1;
+ unsigned char has_ctrl111:1;
+ unsigned char has_ctrl112:1;
+ unsigned char has_ctrl113:1;
+ unsigned char has_data22:1;
+ unsigned char has_ctrl114:1;
+ unsigned char has_query29:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_29 {
+ union {
+ struct {
+ unsigned char has_ctrl115:1;
+ unsigned char has_ground_ring_options:1;
+ unsigned char has_lost_bursts_tuning:1;
+ unsigned char has_aux_exvcom2_select:1;
+ unsigned char has_ctrl116:1;
+ unsigned char has_data23:1;
+ unsigned char has_ctrl117:1;
+ unsigned char has_query30:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_30 {
+ union {
+ struct {
+ unsigned char has_ctrl118:1;
+ unsigned char has_ctrl119:1;
+ unsigned char has_ctrl120:1;
+ unsigned char has_ctrl121:1;
+ unsigned char has_ctrl122_query31:1;
+ unsigned char has_ctrl123:1;
+ unsigned char f54_query30_b6:1;
+ unsigned char has_query32:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_32 {
+ union {
+ struct {
+ unsigned char has_ctrl125:1;
+ unsigned char has_ctrl126:1;
+ unsigned char has_ctrl127:1;
+ unsigned char has_abs_charge_pump_disable:1;
+ unsigned char has_query33:1;
+ unsigned char has_data24:1;
+ unsigned char has_query34:1;
+ unsigned char has_query35:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_33 {
+ union {
+ struct {
+ unsigned char f54_query33_b0:1;
+ unsigned char f54_query33_b1:1;
+ unsigned char f54_query33_b2:1;
+ unsigned char f54_query33_b3:1;
+ unsigned char has_ctrl132:1;
+ unsigned char has_ctrl133:1;
+ unsigned char has_ctrl134:1;
+ unsigned char has_query36:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_35 {
+ union {
+ struct {
+ unsigned char has_data25:1;
+ unsigned char f54_query35_b1:1;
+ unsigned char f54_query35_b2:1;
+ unsigned char has_ctrl137:1;
+ unsigned char has_ctrl138:1;
+ unsigned char has_ctrl139:1;
+ unsigned char has_data26:1;
+ unsigned char has_ctrl140:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_36 {
+ union {
+ struct {
+ unsigned char f54_query36_b0:1;
+ unsigned char has_ctrl142:1;
+ unsigned char has_query37:1;
+ unsigned char has_ctrl143:1;
+ unsigned char has_ctrl144:1;
+ unsigned char has_ctrl145:1;
+ unsigned char has_ctrl146:1;
+ unsigned char has_query38:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_38 {
+ union {
+ struct {
+ unsigned char has_ctrl147:1;
+ unsigned char has_ctrl148:1;
+ unsigned char has_ctrl149:1;
+ unsigned char f54_query38_b3__6:4;
+ unsigned char has_query39:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_39 {
+ union {
+ struct {
+ unsigned char f54_query39_b0__6:7;
+ unsigned char has_query40:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_40 {
+ union {
+ struct {
+ unsigned char f54_query40_b0:1;
+ unsigned char has_ctrl163_query41:1;
+ unsigned char f54_query40_b2:1;
+ unsigned char has_ctrl165_query42:1;
+ unsigned char f54_query40_b4:1;
+ unsigned char has_ctrl167:1;
+ unsigned char f54_query40_b6:1;
+ unsigned char has_query43:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_43 {
+ union {
+ struct {
+ unsigned char f54_query43_b0__6:7;
+ unsigned char has_query46:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_46 {
+ union {
+ struct {
+ unsigned char has_ctrl176:1;
+ unsigned char f54_query46_b1:1;
+ unsigned char has_ctrl179:1;
+ unsigned char f54_query46_b3:1;
+ unsigned char has_data27:1;
+ unsigned char has_data28:1;
+ unsigned char f54_query46_b6:1;
+ unsigned char has_query47:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_47 {
+ union {
+ struct {
+ unsigned char f54_query47_b0__6:7;
+ unsigned char has_query49:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_49 {
+ union {
+ struct {
+ unsigned char f54_query49_b0__1:2;
+ unsigned char has_ctrl188:1;
+ unsigned char has_data31:1;
+ unsigned char f54_query49_b4__6:3;
+ unsigned char has_query50:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_50 {
+ union {
+ struct {
+ unsigned char f54_query50_b0__6:7;
+ unsigned char has_query51:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_51 {
+ union {
+ struct {
+ unsigned char f54_query51_b0__4:5;
+ unsigned char has_query53_query54_ctrl198:1;
+ unsigned char f54_query51_b6__7:2;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_data_31 {
+ union {
+ struct {
+ unsigned char is_calibration_crc:1;
+ unsigned char calibration_crc:1;
+ unsigned char short_test_row_number:5;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_7 {
+ union {
+ struct {
+ unsigned char cbc_cap:3;
+ unsigned char cbc_polarity:1;
+ unsigned char cbc_tx_carrier_selection:1;
+ unsigned char f54_ctrl7_b5__7:3;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_41 {
+ union {
+ struct {
+ unsigned char no_signal_clarity:1;
+ unsigned char f54_ctrl41_b1__7:7;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_57 {
+ union {
+ struct {
+ unsigned char cbc_cap:3;
+ unsigned char cbc_polarity:1;
+ unsigned char cbc_tx_carrier_selection:1;
+ unsigned char f54_ctrl57_b5__7:3;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_86 {
+ union {
+ struct {
+ unsigned char enable_high_noise_state:1;
+ unsigned char dynamic_sense_display_ratio:2;
+ unsigned char f54_ctrl86_b3__7:5;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_88 {
+ union {
+ struct {
+ unsigned char tx_low_reference_polarity:1;
+ unsigned char tx_high_reference_polarity:1;
+ unsigned char abs_low_reference_polarity:1;
+ unsigned char abs_polarity:1;
+ unsigned char cbc_polarity:1;
+ unsigned char cbc_tx_carrier_selection:1;
+ unsigned char charge_pump_enable:1;
+ unsigned char cbc_abs_auto_servo:1;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_110 {
+ union {
+ struct {
+ unsigned char active_stylus_rx_feedback_cap;
+ unsigned char active_stylus_rx_feedback_cap_reference;
+ unsigned char active_stylus_low_reference;
+ unsigned char active_stylus_high_reference;
+ unsigned char active_stylus_gain_control;
+ unsigned char active_stylus_gain_control_reference;
+ unsigned char active_stylus_timing_mode;
+ unsigned char active_stylus_discovery_bursts;
+ unsigned char active_stylus_detection_bursts;
+ unsigned char active_stylus_discovery_noise_multiplier;
+ unsigned char active_stylus_detection_envelope_min;
+ unsigned char active_stylus_detection_envelope_max;
+ unsigned char active_stylus_lose_count;
+ } __packed;
+ struct {
+ unsigned char data[13];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_149 {
+ union {
+ struct {
+ unsigned char trans_cbc_global_cap_enable:1;
+ unsigned char f54_ctrl149_b1__7:7;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_188 {
+ union {
+ struct {
+ unsigned char start_calibration:1;
+ unsigned char start_is_calibration:1;
+ unsigned char frequency:2;
+ unsigned char start_production_test:1;
+ unsigned char short_test_calibration:1;
+ unsigned char f54_ctrl188_b7:1;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control {
+ struct f54_control_7 *reg_7;
+ struct f54_control_41 *reg_41;
+ struct f54_control_57 *reg_57;
+ struct f54_control_86 *reg_86;
+ struct f54_control_88 *reg_88;
+ struct f54_control_110 *reg_110;
+ struct f54_control_149 *reg_149;
+ struct f54_control_188 *reg_188;
+};
+
+struct synaptics_rmi4_f54_handle {
+ bool no_auto_cal;
+ bool skip_preparation;
+ unsigned char status;
+ unsigned char intr_mask;
+ unsigned char intr_reg_num;
+ unsigned char tx_assigned;
+ unsigned char rx_assigned;
+ unsigned char *report_data;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ unsigned short fifoindex;
+ unsigned int report_size;
+ unsigned int data_buffer_size;
+ unsigned int data_pos;
+ enum f54_report_types report_type;
+ struct f54_query query;
+ struct f54_query_13 query_13;
+ struct f54_query_15 query_15;
+ struct f54_query_16 query_16;
+ struct f54_query_21 query_21;
+ struct f54_query_22 query_22;
+ struct f54_query_23 query_23;
+ struct f54_query_25 query_25;
+ struct f54_query_27 query_27;
+ struct f54_query_29 query_29;
+ struct f54_query_30 query_30;
+ struct f54_query_32 query_32;
+ struct f54_query_33 query_33;
+ struct f54_query_35 query_35;
+ struct f54_query_36 query_36;
+ struct f54_query_38 query_38;
+ struct f54_query_39 query_39;
+ struct f54_query_40 query_40;
+ struct f54_query_43 query_43;
+ struct f54_query_46 query_46;
+ struct f54_query_47 query_47;
+ struct f54_query_49 query_49;
+ struct f54_query_50 query_50;
+ struct f54_query_51 query_51;
+ struct f54_data_31 data_31;
+ struct f54_control control;
+ struct mutex status_mutex;
+ struct kobject *sysfs_dir;
+ struct hrtimer watchdog;
+ struct work_struct timeout_work;
+ struct work_struct test_report_work;
+ struct workqueue_struct *test_report_workqueue;
+ struct synaptics_rmi4_data *rmi4_data;
+};
+
+struct f55_query {
+ union {
+ struct {
+ /* query 0 */
+ unsigned char num_of_rx_electrodes;
+
+ /* query 1 */
+ unsigned char num_of_tx_electrodes;
+
+ /* query 2 */
+ unsigned char has_sensor_assignment:1;
+ unsigned char has_edge_compensation:1;
+ unsigned char curve_compensation_mode:2;
+ unsigned char has_ctrl6:1;
+ unsigned char has_alternate_transmitter_assignment:1;
+ unsigned char has_single_layer_multi_touch:1;
+ unsigned char has_query5:1;
+ } __packed;
+ unsigned char data[3];
+ };
+};
+
+struct f55_query_3 {
+ union {
+ struct {
+ unsigned char has_ctrl8:1;
+ unsigned char has_ctrl9:1;
+ unsigned char has_oncell_pattern_support:1;
+ unsigned char has_data0:1;
+ unsigned char has_single_wide_pattern_support:1;
+ unsigned char has_mirrored_tx_pattern_support:1;
+ unsigned char has_discrete_pattern_support:1;
+ unsigned char has_query9:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_5 {
+ union {
+ struct {
+ unsigned char has_corner_compensation:1;
+ unsigned char has_ctrl12:1;
+ unsigned char has_trx_configuration:1;
+ unsigned char has_ctrl13:1;
+ unsigned char f55_query5_b4:1;
+ unsigned char has_ctrl14:1;
+ unsigned char has_basis_function:1;
+ unsigned char has_query17:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_17 {
+ union {
+ struct {
+ unsigned char f55_query17_b0:1;
+ unsigned char has_ctrl16:1;
+ unsigned char f55_query17_b2:1;
+ unsigned char has_ctrl17:1;
+ unsigned char f55_query17_b4__6:3;
+ unsigned char has_query18:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_18 {
+ union {
+ struct {
+ unsigned char f55_query18_b0__6:7;
+ unsigned char has_query22:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_22 {
+ union {
+ struct {
+ unsigned char f55_query22_b0:1;
+ unsigned char has_query23:1;
+ unsigned char has_guard_disable:1;
+ unsigned char has_ctrl30:1;
+ unsigned char f55_query22_b4__7:4;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_23 {
+ union {
+ struct {
+ unsigned char amp_sensor_enabled:1;
+ unsigned char image_transposed:1;
+ unsigned char first_column_at_left_side:1;
+ unsigned char size_of_column2mux:5;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f55_handle {
+ bool amp_sensor;
+ unsigned char size_of_column2mux;
+ unsigned char *tx_assignment;
+ unsigned char *rx_assignment;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ struct f55_query query;
+ struct f55_query_3 query_3;
+ struct f55_query_5 query_5;
+ struct f55_query_17 query_17;
+ struct f55_query_18 query_18;
+ struct f55_query_22 query_22;
+ struct f55_query_23 query_23;
+};
+
+show_prototype(num_of_mapped_tx)
+show_prototype(num_of_mapped_rx)
+show_prototype(tx_mapping)
+show_prototype(rx_mapping)
+show_prototype(report_size)
+show_prototype(status)
+store_prototype(do_preparation)
+store_prototype(force_cal)
+store_prototype(get_report)
+store_prototype(resume_touch)
+store_prototype(do_afe_calibration)
+show_store_prototype(report_type)
+show_store_prototype(fifoindex)
+show_store_prototype(no_auto_cal)
+show_store_prototype(read_report)
+
+static struct attribute *attrs[] = {
+ attrify(num_of_mapped_tx),
+ attrify(num_of_mapped_rx),
+ attrify(tx_mapping),
+ attrify(rx_mapping),
+ attrify(report_size),
+ attrify(status),
+ attrify(do_preparation),
+ attrify(force_cal),
+ attrify(get_report),
+ attrify(resume_touch),
+ attrify(do_afe_calibration),
+ attrify(report_type),
+ attrify(fifoindex),
+ attrify(no_auto_cal),
+ attrify(read_report),
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static struct bin_attribute test_report_data = {
+ .attr = {
+ .name = "report_data",
+ .mode = S_IRUGO,
+ },
+ .size = 0,
+ .read = test_sysfs_data_read,
+};
+
+static struct synaptics_rmi4_f54_handle *f54;
+static struct synaptics_rmi4_f55_handle *f55;
+
+DECLARE_COMPLETION(test_remove_complete);
+
+static bool test_report_type_valid(enum f54_report_types report_type)
+{
+ switch (report_type) {
+ case F54_8BIT_IMAGE:
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_HIGH_RESISTANCE:
+ case F54_TX_TO_TX_SHORTS:
+ case F54_RX_TO_RX_SHORTS_1:
+ case F54_TRUE_BASELINE:
+ case F54_FULL_RAW_CAP_MIN_MAX:
+ case F54_RX_OPENS_1:
+ case F54_TX_OPENS:
+ case F54_TX_TO_GND_SHORTS:
+ case F54_RX_TO_RX_SHORTS_2:
+ case F54_RX_OPENS_2:
+ case F54_FULL_RAW_CAP:
+ case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+ case F54_SENSOR_SPEED:
+ case F54_ADC_RANGE:
+ case F54_TRX_OPENS:
+ case F54_TRX_TO_GND_SHORTS:
+ case F54_TRX_SHORTS:
+ case F54_ABS_RAW_CAP:
+ case F54_ABS_DELTA_CAP:
+ case F54_ABS_HYBRID_DELTA_CAP:
+ case F54_ABS_HYBRID_RAW_CAP:
+ case F54_AMP_FULL_RAW_CAP:
+ case F54_AMP_RAW_ADC:
+ return true;
+ break;
+ default:
+ f54->report_type = INVALID_REPORT_TYPE;
+ f54->report_size = 0;
+ return false;
+ }
+}
+
+static void test_set_report_size(void)
+{
+ int retval;
+ unsigned char tx = f54->tx_assigned;
+ unsigned char rx = f54->rx_assigned;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ switch (f54->report_type) {
+ case F54_8BIT_IMAGE:
+ f54->report_size = tx * rx;
+ break;
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_TRUE_BASELINE:
+ case F54_FULL_RAW_CAP:
+ case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+ case F54_SENSOR_SPEED:
+ case F54_AMP_FULL_RAW_CAP:
+ case F54_AMP_RAW_ADC:
+ f54->report_size = 2 * tx * rx;
+ break;
+ case F54_HIGH_RESISTANCE:
+ f54->report_size = HIGH_RESISTANCE_DATA_SIZE;
+ break;
+ case F54_TX_TO_TX_SHORTS:
+ case F54_TX_OPENS:
+ case F54_TX_TO_GND_SHORTS:
+ f54->report_size = (tx + 7) / 8;
+ break;
+ case F54_RX_TO_RX_SHORTS_1:
+ case F54_RX_OPENS_1:
+ if (rx < tx)
+ f54->report_size = 2 * rx * rx;
+ else
+ f54->report_size = 2 * tx * rx;
+ break;
+ case F54_FULL_RAW_CAP_MIN_MAX:
+ f54->report_size = FULL_RAW_CAP_MIN_MAX_DATA_SIZE;
+ break;
+ case F54_RX_TO_RX_SHORTS_2:
+ case F54_RX_OPENS_2:
+ if (rx <= tx)
+ f54->report_size = 0;
+ else
+ f54->report_size = 2 * rx * (rx - tx);
+ break;
+ case F54_ADC_RANGE:
+ if (f54->query.has_signal_clarity) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_41->address,
+ f54->control.reg_41->data,
+ sizeof(f54->control.reg_41->data));
+ if (retval < 0) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read control reg_41\n",
+ __func__);
+ f54->report_size = 0;
+ break;
+ }
+ if (!f54->control.reg_41->no_signal_clarity) {
+ if (tx % 4)
+ tx += 4 - (tx % 4);
+ }
+ }
+ f54->report_size = 2 * tx * rx;
+ break;
+ case F54_TRX_OPENS:
+ case F54_TRX_TO_GND_SHORTS:
+ case F54_TRX_SHORTS:
+ f54->report_size = TRX_OPEN_SHORT_DATA_SIZE;
+ break;
+ case F54_ABS_RAW_CAP:
+ case F54_ABS_DELTA_CAP:
+ case F54_ABS_HYBRID_DELTA_CAP:
+ case F54_ABS_HYBRID_RAW_CAP:
+ f54->report_size = 4 * (tx + rx);
+ break;
+ default:
+ f54->report_size = 0;
+ }
+
+ return;
+}
+
+static int test_set_interrupt(bool set)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char zero = 0x00;
+ unsigned char *intr_mask;
+ unsigned short f01_ctrl_reg;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ intr_mask = rmi4_data->intr_mask;
+ f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+ if (!set) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f01_ctrl_reg,
+ &zero,
+ sizeof(zero));
+ if (retval < 0)
+ return retval;
+ }
+
+ for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+ if (intr_mask[ii] != 0x00) {
+ f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+ if (set) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f01_ctrl_reg,
+ &zero,
+ sizeof(zero));
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f01_ctrl_reg,
+ &(intr_mask[ii]),
+ sizeof(intr_mask[ii]));
+ if (retval < 0)
+ return retval;
+ }
+ }
+ }
+
+ f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+ if (set) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f01_ctrl_reg,
+ &f54->intr_mask,
+ 1);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int test_wait_for_command_completion(void)
+{
+ int retval;
+ unsigned char value;
+ unsigned char timeout_count;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ timeout_count = 0;
+ do {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->command_base_addr,
+ &value,
+ sizeof(value));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read command register\n",
+ __func__);
+ return retval;
+ }
+
+ if (value == 0x00)
+ break;
+
+ msleep(100);
+ timeout_count++;
+ } while (timeout_count < COMMAND_TIMEOUT_100MS);
+
+ if (timeout_count == COMMAND_TIMEOUT_100MS) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for command completion\n",
+ __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int test_do_command(unsigned char command)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->command_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command\n",
+ __func__);
+ return retval;
+ }
+
+ retval = test_wait_for_command_completion();
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int test_do_preparation(void)
+{
+ int retval;
+ unsigned char value;
+ unsigned char zero = 0x00;
+ unsigned char device_ctrl;
+ struct f54_control_86 reg_86;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set no sleep\n",
+ __func__);
+ return retval;
+ }
+
+ device_ctrl |= NO_SLEEP_ON;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set no sleep\n",
+ __func__);
+ return retval;
+ }
+
+ if ((f54->query.has_query13) &&
+ (f54->query_13.has_ctrl86)) {
+ reg_86.data[0] = f54->control.reg_86->data[0];
+ reg_86.dynamic_sense_display_ratio = 1;
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_86->address,
+ reg_86.data,
+ sizeof(reg_86.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set sense display ratio\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ if (f54->skip_preparation)
+ return 0;
+
+ switch (f54->report_type) {
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_SENSOR_SPEED:
+ case F54_ADC_RANGE:
+ case F54_ABS_RAW_CAP:
+ case F54_ABS_DELTA_CAP:
+ case F54_ABS_HYBRID_DELTA_CAP:
+ case F54_ABS_HYBRID_RAW_CAP:
+ break;
+ case F54_AMP_RAW_ADC:
+ if (f54->query_49.has_ctrl188) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set start production test\n",
+ __func__);
+ return retval;
+ }
+ f54->control.reg_188->start_production_test = 1;
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set start production test\n",
+ __func__);
+ return retval;
+ }
+ }
+ break;
+ default:
+ if (f54->query.touch_controller_family == 1)
+ disable_cbc(reg_7);
+ else if (f54->query.has_ctrl88)
+ disable_cbc(reg_88);
+
+ if (f54->query.has_0d_acquisition_control)
+ disable_cbc(reg_57);
+
+ if ((f54->query.has_query15) &&
+ (f54->query_15.has_query25) &&
+ (f54->query_25.has_query27) &&
+ (f54->query_27.has_query29) &&
+ (f54->query_29.has_query30) &&
+ (f54->query_30.has_query32) &&
+ (f54->query_32.has_query33) &&
+ (f54->query_33.has_query36) &&
+ (f54->query_36.has_query38) &&
+ (f54->query_38.has_ctrl149)) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_149->address,
+ &zero,
+ sizeof(f54->control.reg_149->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to disable global CBC\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ if (f54->query.has_signal_clarity) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_41->address,
+ &value,
+ sizeof(f54->control.reg_41->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to disable signal clarity\n",
+ __func__);
+ return retval;
+ }
+ value |= 0x01;
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_41->address,
+ &value,
+ sizeof(f54->control.reg_41->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to disable signal clarity\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ retval = test_do_command(COMMAND_FORCE_UPDATE);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do force update\n",
+ __func__);
+ return retval;
+ }
+
+ retval = test_do_command(COMMAND_FORCE_CAL);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do force cal\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ return 0;
+}
+
+static int test_do_afe_calibration(enum f54_afe_cal mode)
+{
+ int retval;
+ unsigned char timeout = CALIBRATION_TIMEOUT_S;
+ unsigned char timeout_count = 0;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to start calibration\n",
+ __func__);
+ return retval;
+ }
+
+ if (mode == F54_AFE_CAL)
+ f54->control.reg_188->start_calibration = 1;
+ else if (mode == F54_AFE_IS_CAL)
+ f54->control.reg_188->start_is_calibration = 1;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to start calibration\n",
+ __func__);
+ return retval;
+ }
+
+ do {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to complete calibration\n",
+ __func__);
+ return retval;
+ }
+
+ if (mode == F54_AFE_CAL) {
+ if (!f54->control.reg_188->start_calibration)
+ break;
+ } else if (mode == F54_AFE_IS_CAL) {
+ if (!f54->control.reg_188->start_is_calibration)
+ break;
+ }
+
+ if (timeout_count == timeout) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for calibration completion\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ timeout_count++;
+ msleep(1000);
+ } while (true);
+
+ /* check CRC */
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->data_31.address,
+ f54->data_31.data,
+ sizeof(f54->data_31.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read calibration CRC\n",
+ __func__);
+ return retval;
+ }
+
+ if (mode == F54_AFE_CAL) {
+ if (f54->data_31.calibration_crc == 0)
+ return 0;
+ } else if (mode == F54_AFE_IS_CAL) {
+ if (f54->data_31.is_calibration_crc == 0)
+ return 0;
+ }
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read calibration CRC\n",
+ __func__);
+
+ return -EINVAL;
+}
+
+static int test_check_for_idle_status(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ switch (f54->status) {
+ case STATUS_IDLE:
+ retval = 0;
+ break;
+ case STATUS_BUSY:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Status busy\n",
+ __func__);
+ retval = -EINVAL;
+ break;
+ case STATUS_ERROR:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Status error\n",
+ __func__);
+ retval = -EINVAL;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid status (%d)\n",
+ __func__, f54->status);
+ retval = -EINVAL;
+ }
+
+ return retval;
+}
+
+static void test_timeout_work(struct work_struct *work)
+{
+ int retval;
+ unsigned char command;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ mutex_lock(&f54->status_mutex);
+
+ if (f54->status == STATUS_BUSY) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->command_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read command register\n",
+ __func__);
+ } else if (command & COMMAND_GET_REPORT) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Report type not supported by FW\n",
+ __func__);
+ } else {
+ queue_work(f54->test_report_workqueue,
+ &f54->test_report_work);
+ goto exit;
+ }
+ f54->status = STATUS_ERROR;
+ f54->report_size = 0;
+ }
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return;
+}
+
+static enum hrtimer_restart test_get_report_timeout(struct hrtimer *timer)
+{
+ schedule_work(&(f54->timeout_work));
+
+ return HRTIMER_NORESTART;
+}
+
+static ssize_t test_sysfs_num_of_mapped_tx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->tx_assigned);
+}
+
+static ssize_t test_sysfs_num_of_mapped_rx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->rx_assigned);
+}
+
+static ssize_t test_sysfs_tx_mapping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int cnt;
+ int count = 0;
+ unsigned char ii;
+ unsigned char tx_num;
+ unsigned char tx_electrodes = f54->query.num_of_tx_electrodes;
+
+ if (!f55)
+ return -EINVAL;
+
+ for (ii = 0; ii < tx_electrodes; ii++) {
+ tx_num = f55->tx_assignment[ii];
+ if (tx_num == 0xff)
+ cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+ else
+ cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", tx_num);
+ buf += cnt;
+ count += cnt;
+ }
+
+ snprintf(buf, PAGE_SIZE - count, "\n");
+ count++;
+
+ return count;
+}
+
+static ssize_t test_sysfs_rx_mapping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int cnt;
+ int count = 0;
+ unsigned char ii;
+ unsigned char rx_num;
+ unsigned char rx_electrodes = f54->query.num_of_rx_electrodes;
+
+ if (!f55)
+ return -EINVAL;
+
+ for (ii = 0; ii < rx_electrodes; ii++) {
+ rx_num = f55->rx_assignment[ii];
+ if (rx_num == 0xff)
+ cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+ else
+ cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", rx_num);
+ buf += cnt;
+ count += cnt;
+ }
+
+ snprintf(buf, PAGE_SIZE - count, "\n");
+ count++;
+
+ return count;
+}
+
+static ssize_t test_sysfs_report_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_size);
+}
+
+static ssize_t test_sysfs_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = snprintf(buf, PAGE_SIZE, "%u\n", f54->status);
+
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_do_preparation_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (setting != 1)
+ return -EINVAL;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = test_check_for_idle_status();
+ if (retval < 0)
+ goto exit;
+
+ retval = test_do_preparation();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do preparation\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_force_cal_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (setting != 1)
+ return -EINVAL;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = test_check_for_idle_status();
+ if (retval < 0)
+ goto exit;
+
+ retval = test_do_command(COMMAND_FORCE_CAL);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do force cal\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_get_report_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char command;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (setting != 1)
+ return -EINVAL;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = test_check_for_idle_status();
+ if (retval < 0)
+ goto exit;
+
+ if (!test_report_type_valid(f54->report_type)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid report type\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ test_set_interrupt(true);
+
+ command = (unsigned char)COMMAND_GET_REPORT;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->command_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write get report command\n",
+ __func__);
+ goto exit;
+ }
+
+ f54->status = STATUS_BUSY;
+ f54->report_size = 0;
+ f54->data_pos = 0;
+
+ hrtimer_start(&f54->watchdog,
+ ktime_set(GET_REPORT_TIMEOUT_S, 0),
+ HRTIMER_MODE_REL);
+
+ retval = count;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_resume_touch_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char device_ctrl;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (setting != 1)
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to restore no sleep setting\n",
+ __func__);
+ return retval;
+ }
+
+ device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+ device_ctrl |= rmi4_data->no_sleep_setting;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to restore no sleep setting\n",
+ __func__);
+ return retval;
+ }
+
+ if ((f54->query.has_query13) &&
+ (f54->query_13.has_ctrl86)) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_86->address,
+ f54->control.reg_86->data,
+ sizeof(f54->control.reg_86->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to restore sense display ratio\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ test_set_interrupt(false);
+
+ if (f54->skip_preparation)
+ return count;
+
+ switch (f54->report_type) {
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_SENSOR_SPEED:
+ case F54_ADC_RANGE:
+ case F54_ABS_RAW_CAP:
+ case F54_ABS_DELTA_CAP:
+ case F54_ABS_HYBRID_DELTA_CAP:
+ case F54_ABS_HYBRID_RAW_CAP:
+ break;
+ case F54_AMP_RAW_ADC:
+ if (f54->query_49.has_ctrl188) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set start production test\n",
+ __func__);
+ return retval;
+ }
+ f54->control.reg_188->start_production_test = 0;
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set start production test\n",
+ __func__);
+ return retval;
+ }
+ }
+ break;
+ default:
+ rmi4_data->reset_device(rmi4_data, false);
+ }
+
+ return count;
+}
+
+static ssize_t test_sysfs_do_afe_calibration_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (!f54->query_49.has_ctrl188) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: F54_ANALOG_Ctrl188 not found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (setting == 0 || setting == 1)
+ retval = test_do_afe_calibration((enum f54_afe_cal)setting);
+ else
+ return -EINVAL;
+
+ if (retval)
+ return retval;
+ else
+ return count;
+}
+
+static ssize_t test_sysfs_report_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_type);
+}
+
+static ssize_t test_sysfs_report_type_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char data;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = test_check_for_idle_status();
+ if (retval < 0)
+ goto exit;
+
+ if (!test_report_type_valid((enum f54_report_types)setting)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Report type not supported by driver\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ f54->report_type = (enum f54_report_types)setting;
+ data = (unsigned char)setting;
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->data_base_addr,
+ &data,
+ sizeof(data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write report type\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_fifoindex_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned char data[2];
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->data_base_addr + REPORT_INDEX_OFFSET,
+ data,
+ sizeof(data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read report index\n",
+ __func__);
+ return retval;
+ }
+
+ batohs(&f54->fifoindex, data);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->fifoindex);
+}
+
+static ssize_t test_sysfs_fifoindex_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char data[2];
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ f54->fifoindex = setting;
+
+ hstoba(data, (unsigned short)setting);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->data_base_addr + REPORT_INDEX_OFFSET,
+ data,
+ sizeof(data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write report index\n",
+ __func__);
+ return retval;
+ }
+
+ return count;
+}
+
+static ssize_t test_sysfs_no_auto_cal_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->no_auto_cal);
+}
+
+static ssize_t test_sysfs_no_auto_cal_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char data;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (setting > 1)
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control_base_addr,
+ &data,
+ sizeof(data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read no auto cal setting\n",
+ __func__);
+ return retval;
+ }
+
+ if (setting)
+ data |= CONTROL_NO_AUTO_CAL;
+ else
+ data &= ~CONTROL_NO_AUTO_CAL;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control_base_addr,
+ &data,
+ sizeof(data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write no auto cal setting\n",
+ __func__);
+ return retval;
+ }
+
+ f54->no_auto_cal = (setting == 1);
+
+ return count;
+}
+
+static ssize_t test_sysfs_read_report_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int ii;
+ unsigned int jj;
+ int cnt;
+ int count = 0;
+ int tx_num = f54->tx_assigned;
+ int rx_num = f54->rx_assigned;
+ char *report_data_8;
+ short *report_data_16;
+ int *report_data_32;
+ unsigned short *report_data_u16;
+ unsigned int *report_data_u32;
+
+ switch (f54->report_type) {
+ case F54_8BIT_IMAGE:
+ report_data_8 = (char *)f54->report_data;
+ for (ii = 0; ii < f54->report_size; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+ ii, *report_data_8);
+ report_data_8++;
+ buf += cnt;
+ count += cnt;
+ }
+ break;
+ case F54_AMP_RAW_ADC:
+ report_data_u16 = (unsigned short *)f54->report_data;
+ cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+ tx_num, rx_num);
+ buf += cnt;
+ count += cnt;
+
+ for (ii = 0; ii < tx_num; ii++) {
+ for (jj = 0; jj < (rx_num - 1); jj++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+ *report_data_u16);
+ report_data_u16++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+ *report_data_u16);
+ report_data_u16++;
+ buf += cnt;
+ count += cnt;
+ }
+ break;
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_TRUE_BASELINE:
+ case F54_FULL_RAW_CAP:
+ case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+ case F54_SENSOR_SPEED:
+ case F54_AMP_FULL_RAW_CAP:
+ report_data_16 = (short *)f54->report_data;
+ cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+ tx_num, rx_num);
+ buf += cnt;
+ count += cnt;
+
+ for (ii = 0; ii < tx_num; ii++) {
+ for (jj = 0; jj < (rx_num - 1); jj++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+ *report_data_16);
+ report_data_16++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+ *report_data_16);
+ report_data_16++;
+ buf += cnt;
+ count += cnt;
+ }
+ break;
+ case F54_HIGH_RESISTANCE:
+ case F54_FULL_RAW_CAP_MIN_MAX:
+ report_data_16 = (short *)f54->report_data;
+ for (ii = 0; ii < f54->report_size; ii += 2) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+ ii / 2, *report_data_16);
+ report_data_16++;
+ buf += cnt;
+ count += cnt;
+ }
+ break;
+ case F54_ABS_RAW_CAP:
+ report_data_u32 = (unsigned int *)f54->report_data;
+ cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < rx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %2d", ii);
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, " ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < rx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %5u",
+ *report_data_u32);
+ report_data_u32++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < tx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %2d", ii);
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, " ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < tx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %5u",
+ *report_data_u32);
+ report_data_u32++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+ break;
+ case F54_ABS_DELTA_CAP:
+ case F54_ABS_HYBRID_DELTA_CAP:
+ case F54_ABS_HYBRID_RAW_CAP:
+ report_data_32 = (int *)f54->report_data;
+ cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < rx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %2d", ii);
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, " ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < rx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %5d",
+ *report_data_32);
+ report_data_32++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < tx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %2d", ii);
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, " ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < tx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %5d",
+ *report_data_32);
+ report_data_32++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+ break;
+ default:
+ for (ii = 0; ii < f54->report_size; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%03d: 0x%02x\n",
+ ii, f54->report_data[ii]);
+ buf += cnt;
+ count += cnt;
+ }
+ }
+
+ snprintf(buf, PAGE_SIZE - count, "\n");
+ count++;
+
+ return count;
+}
+
+static ssize_t test_sysfs_read_report_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char timeout = GET_REPORT_TIMEOUT_S * 10;
+ unsigned char timeout_count;
+ const char cmd[] = {'1', 0};
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = test_sysfs_report_type_store(dev, attr, buf, count);
+ if (retval < 0)
+ goto exit;
+
+ retval = test_sysfs_do_preparation_store(dev, attr, cmd, 1);
+ if (retval < 0)
+ goto exit;
+
+ retval = test_sysfs_get_report_store(dev, attr, cmd, 1);
+ if (retval < 0)
+ goto exit;
+
+ timeout_count = 0;
+ do {
+ if (f54->status != STATUS_BUSY)
+ break;
+ msleep(100);
+ timeout_count++;
+ } while (timeout_count < timeout);
+
+ if ((f54->status != STATUS_IDLE) || (f54->report_size == 0)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read report\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = test_sysfs_resume_touch_store(dev, attr, cmd, 1);
+ if (retval < 0)
+ goto exit;
+
+ return count;
+
+exit:
+ rmi4_data->reset_device(rmi4_data, false);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ unsigned int read_size;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = test_check_for_idle_status();
+ if (retval < 0)
+ goto exit;
+
+ if (!f54->report_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Report type %d data not available\n",
+ __func__, f54->report_type);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if ((f54->data_pos + count) > f54->report_size)
+ read_size = f54->report_size - f54->data_pos;
+ else
+ read_size = min_t(unsigned int, count, f54->report_size);
+
+ retval = secure_memcpy(buf, count, f54->report_data + f54->data_pos,
+ f54->data_buffer_size - f54->data_pos, read_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy report data\n",
+ __func__);
+ goto exit;
+ }
+ f54->data_pos += read_size;
+ retval = read_size;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static void test_report_work(struct work_struct *work)
+{
+ int retval;
+ unsigned char report_index[2];
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ mutex_lock(&f54->status_mutex);
+
+ if (f54->status != STATUS_BUSY) {
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+
+ retval = test_wait_for_command_completion();
+ if (retval < 0) {
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+
+ test_set_report_size();
+ if (f54->report_size == 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Report data size = 0\n",
+ __func__);
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+
+ if (f54->data_buffer_size < f54->report_size) {
+ if (f54->data_buffer_size)
+ kfree(f54->report_data);
+ f54->report_data = kzalloc(f54->report_size, GFP_KERNEL);
+ if (!f54->report_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for data buffer\n",
+ __func__);
+ f54->data_buffer_size = 0;
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+ f54->data_buffer_size = f54->report_size;
+ }
+
+ report_index[0] = 0;
+ report_index[1] = 0;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->data_base_addr + REPORT_INDEX_OFFSET,
+ report_index,
+ sizeof(report_index));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write report data index\n",
+ __func__);
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->data_base_addr + REPORT_DATA_OFFSET,
+ f54->report_data,
+ f54->report_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read report data\n",
+ __func__);
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+
+ retval = STATUS_IDLE;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ if (retval == STATUS_ERROR)
+ f54->report_size = 0;
+
+ f54->status = retval;
+
+ return;
+}
+
+static void test_remove_sysfs(void)
+{
+ sysfs_remove_group(f54->sysfs_dir, &attr_group);
+ sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+ kobject_put(f54->sysfs_dir);
+
+ return;
+}
+
+static int test_set_sysfs(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ f54->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+ &rmi4_data->input_dev->dev.kobj);
+ if (!f54->sysfs_dir) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs directory\n",
+ __func__);
+ goto exit_directory;
+ }
+
+ retval = sysfs_create_bin_file(f54->sysfs_dir, &test_report_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs bin file\n",
+ __func__);
+ goto exit_bin_file;
+ }
+
+ retval = sysfs_create_group(f54->sysfs_dir, &attr_group);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ goto exit_attributes;
+ }
+
+ return 0;
+
+exit_attributes:
+ sysfs_remove_group(f54->sysfs_dir, &attr_group);
+ sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+
+exit_bin_file:
+ kobject_put(f54->sysfs_dir);
+
+exit_directory:
+ return -ENODEV;
+}
+
+static void test_free_control_mem(void)
+{
+ struct f54_control control = f54->control;
+
+ kfree(control.reg_7);
+ kfree(control.reg_41);
+ kfree(control.reg_57);
+ kfree(control.reg_86);
+ kfree(control.reg_88);
+ kfree(control.reg_110);
+ kfree(control.reg_149);
+ kfree(control.reg_188);
+
+ return;
+}
+
+static void test_set_data(void)
+{
+ unsigned short reg_addr;
+
+ reg_addr = f54->data_base_addr + REPORT_DATA_OFFSET + 1;
+
+ /* data 4 */
+ if (f54->query.has_sense_frequency_control)
+ reg_addr++;
+
+ /* data 5 reserved */
+
+ /* data 6 */
+ if (f54->query.has_interference_metric)
+ reg_addr += 2;
+
+ /* data 7 */
+ if (f54->query.has_one_byte_report_rate |
+ f54->query.has_two_byte_report_rate)
+ reg_addr++;
+ if (f54->query.has_two_byte_report_rate)
+ reg_addr++;
+
+ /* data 8 */
+ if (f54->query.has_variance_metric)
+ reg_addr += 2;
+
+ /* data 9 */
+ if (f54->query.has_multi_metric_state_machine)
+ reg_addr += 2;
+
+ /* data 10 */
+ if (f54->query.has_multi_metric_state_machine |
+ f54->query.has_noise_state)
+ reg_addr++;
+
+ /* data 11 */
+ if (f54->query.has_status)
+ reg_addr++;
+
+ /* data 12 */
+ if (f54->query.has_slew_metric)
+ reg_addr += 2;
+
+ /* data 13 */
+ if (f54->query.has_multi_metric_state_machine)
+ reg_addr += 2;
+
+ /* data 14 */
+ if (f54->query_13.has_cidim)
+ reg_addr++;
+
+ /* data 15 */
+ if (f54->query_13.has_rail_im)
+ reg_addr++;
+
+ /* data 16 */
+ if (f54->query_13.has_noise_mitigation_enhancement)
+ reg_addr++;
+
+ /* data 17 */
+ if (f54->query_16.has_data17)
+ reg_addr++;
+
+ /* data 18 */
+ if (f54->query_21.has_query24_data18)
+ reg_addr++;
+
+ /* data 19 */
+ if (f54->query_21.has_data19)
+ reg_addr++;
+
+ /* data_20 */
+ if (f54->query_25.has_ctrl109)
+ reg_addr++;
+
+ /* data 21 */
+ if (f54->query_27.has_data21)
+ reg_addr++;
+
+ /* data 22 */
+ if (f54->query_27.has_data22)
+ reg_addr++;
+
+ /* data 23 */
+ if (f54->query_29.has_data23)
+ reg_addr++;
+
+ /* data 24 */
+ if (f54->query_32.has_data24)
+ reg_addr++;
+
+ /* data 25 */
+ if (f54->query_35.has_data25)
+ reg_addr++;
+
+ /* data 26 */
+ if (f54->query_35.has_data26)
+ reg_addr++;
+
+ /* data 27 */
+ if (f54->query_46.has_data27)
+ reg_addr++;
+
+ /* data 28 */
+ if (f54->query_46.has_data28)
+ reg_addr++;
+
+ /* data 29 30 reserved */
+
+ /* data 31 */
+ if (f54->query_49.has_data31) {
+ f54->data_31.address = reg_addr;
+ reg_addr++;
+ }
+
+ return;
+}
+
+static int test_set_controls(void)
+{
+ int retval;
+ unsigned char length;
+ unsigned char num_of_sensing_freqs;
+ unsigned short reg_addr = f54->control_base_addr;
+ struct f54_control *control = &f54->control;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ num_of_sensing_freqs = f54->query.number_of_sensing_frequencies;
+
+ /* control 0 */
+ reg_addr += CONTROL_0_SIZE;
+
+ /* control 1 */
+ if ((f54->query.touch_controller_family == 0) ||
+ (f54->query.touch_controller_family == 1))
+ reg_addr += CONTROL_1_SIZE;
+
+ /* control 2 */
+ reg_addr += CONTROL_2_SIZE;
+
+ /* control 3 */
+ if (f54->query.has_pixel_touch_threshold_adjustment)
+ reg_addr += CONTROL_3_SIZE;
+
+ /* controls 4 5 6 */
+ if ((f54->query.touch_controller_family == 0) ||
+ (f54->query.touch_controller_family == 1))
+ reg_addr += CONTROL_4_6_SIZE;
+
+ /* control 7 */
+ if (f54->query.touch_controller_family == 1) {
+ control->reg_7 = kzalloc(sizeof(*(control->reg_7)),
+ GFP_KERNEL);
+ if (!control->reg_7)
+ goto exit_no_mem;
+ control->reg_7->address = reg_addr;
+ reg_addr += CONTROL_7_SIZE;
+ }
+
+ /* controls 8 9 */
+ if ((f54->query.touch_controller_family == 0) ||
+ (f54->query.touch_controller_family == 1))
+ reg_addr += CONTROL_8_9_SIZE;
+
+ /* control 10 */
+ if (f54->query.has_interference_metric)
+ reg_addr += CONTROL_10_SIZE;
+
+ /* control 11 */
+ if (f54->query.has_ctrl11)
+ reg_addr += CONTROL_11_SIZE;
+
+ /* controls 12 13 */
+ if (f54->query.has_relaxation_control)
+ reg_addr += CONTROL_12_13_SIZE;
+
+ /* controls 14 15 16 */
+ if (f54->query.has_sensor_assignment) {
+ reg_addr += CONTROL_14_SIZE;
+ reg_addr += CONTROL_15_SIZE * f54->query.num_of_rx_electrodes;
+ reg_addr += CONTROL_16_SIZE * f54->query.num_of_tx_electrodes;
+ }
+
+ /* controls 17 18 19 */
+ if (f54->query.has_sense_frequency_control) {
+ reg_addr += CONTROL_17_SIZE * num_of_sensing_freqs;
+ reg_addr += CONTROL_18_SIZE * num_of_sensing_freqs;
+ reg_addr += CONTROL_19_SIZE * num_of_sensing_freqs;
+ }
+
+ /* control 20 */
+ reg_addr += CONTROL_20_SIZE;
+
+ /* control 21 */
+ if (f54->query.has_sense_frequency_control)
+ reg_addr += CONTROL_21_SIZE;
+
+ /* controls 22 23 24 25 26 */
+ if (f54->query.has_firmware_noise_mitigation)
+ reg_addr += CONTROL_22_26_SIZE;
+
+ /* control 27 */
+ if (f54->query.has_iir_filter)
+ reg_addr += CONTROL_27_SIZE;
+
+ /* control 28 */
+ if (f54->query.has_firmware_noise_mitigation)
+ reg_addr += CONTROL_28_SIZE;
+
+ /* control 29 */
+ if (f54->query.has_cmn_removal)
+ reg_addr += CONTROL_29_SIZE;
+
+ /* control 30 */
+ if (f54->query.has_cmn_maximum)
+ reg_addr += CONTROL_30_SIZE;
+
+ /* control 31 */
+ if (f54->query.has_touch_hysteresis)
+ reg_addr += CONTROL_31_SIZE;
+
+ /* controls 32 33 34 35 */
+ if (f54->query.has_edge_compensation)
+ reg_addr += CONTROL_32_35_SIZE;
+
+ /* control 36 */
+ if ((f54->query.curve_compensation_mode == 1) ||
+ (f54->query.curve_compensation_mode == 2)) {
+ if (f54->query.curve_compensation_mode == 1) {
+ length = max(f54->query.num_of_rx_electrodes,
+ f54->query.num_of_tx_electrodes);
+ } else if (f54->query.curve_compensation_mode == 2) {
+ length = f54->query.num_of_rx_electrodes;
+ }
+ reg_addr += CONTROL_36_SIZE * length;
+ }
+
+ /* control 37 */
+ if (f54->query.curve_compensation_mode == 2)
+ reg_addr += CONTROL_37_SIZE * f54->query.num_of_tx_electrodes;
+
+ /* controls 38 39 40 */
+ if (f54->query.has_per_frequency_noise_control) {
+ reg_addr += CONTROL_38_SIZE * num_of_sensing_freqs;
+ reg_addr += CONTROL_39_SIZE * num_of_sensing_freqs;
+ reg_addr += CONTROL_40_SIZE * num_of_sensing_freqs;
+ }
+
+ /* control 41 */
+ if (f54->query.has_signal_clarity) {
+ control->reg_41 = kzalloc(sizeof(*(control->reg_41)),
+ GFP_KERNEL);
+ if (!control->reg_41)
+ goto exit_no_mem;
+ control->reg_41->address = reg_addr;
+ reg_addr += CONTROL_41_SIZE;
+ }
+
+ /* control 42 */
+ if (f54->query.has_variance_metric)
+ reg_addr += CONTROL_42_SIZE;
+
+ /* controls 43 44 45 46 47 48 49 50 51 52 53 54 */
+ if (f54->query.has_multi_metric_state_machine)
+ reg_addr += CONTROL_43_54_SIZE;
+
+ /* controls 55 56 */
+ if (f54->query.has_0d_relaxation_control)
+ reg_addr += CONTROL_55_56_SIZE;
+
+ /* control 57 */
+ if (f54->query.has_0d_acquisition_control) {
+ control->reg_57 = kzalloc(sizeof(*(control->reg_57)),
+ GFP_KERNEL);
+ if (!control->reg_57)
+ goto exit_no_mem;
+ control->reg_57->address = reg_addr;
+ reg_addr += CONTROL_57_SIZE;
+ }
+
+ /* control 58 */
+ if (f54->query.has_0d_acquisition_control)
+ reg_addr += CONTROL_58_SIZE;
+
+ /* control 59 */
+ if (f54->query.has_h_blank)
+ reg_addr += CONTROL_59_SIZE;
+
+ /* controls 60 61 62 */
+ if ((f54->query.has_h_blank) ||
+ (f54->query.has_v_blank) ||
+ (f54->query.has_long_h_blank))
+ reg_addr += CONTROL_60_62_SIZE;
+
+ /* control 63 */
+ if ((f54->query.has_h_blank) ||
+ (f54->query.has_v_blank) ||
+ (f54->query.has_long_h_blank) ||
+ (f54->query.has_slew_metric) ||
+ (f54->query.has_slew_option) ||
+ (f54->query.has_noise_mitigation2))
+ reg_addr += CONTROL_63_SIZE;
+
+ /* controls 64 65 66 67 */
+ if (f54->query.has_h_blank)
+ reg_addr += CONTROL_64_67_SIZE * 7;
+ else if ((f54->query.has_v_blank) ||
+ (f54->query.has_long_h_blank))
+ reg_addr += CONTROL_64_67_SIZE;
+
+ /* controls 68 69 70 71 72 73 */
+ if ((f54->query.has_h_blank) ||
+ (f54->query.has_v_blank) ||
+ (f54->query.has_long_h_blank))
+ reg_addr += CONTROL_68_73_SIZE;
+
+ /* control 74 */
+ if (f54->query.has_slew_metric)
+ reg_addr += CONTROL_74_SIZE;
+
+ /* control 75 */
+ if (f54->query.has_enhanced_stretch)
+ reg_addr += CONTROL_75_SIZE * num_of_sensing_freqs;
+
+ /* control 76 */
+ if (f54->query.has_startup_fast_relaxation)
+ reg_addr += CONTROL_76_SIZE;
+
+ /* controls 77 78 */
+ if (f54->query.has_esd_control)
+ reg_addr += CONTROL_77_78_SIZE;
+
+ /* controls 79 80 81 82 83 */
+ if (f54->query.has_noise_mitigation2)
+ reg_addr += CONTROL_79_83_SIZE;
+
+ /* controls 84 85 */
+ if (f54->query.has_energy_ratio_relaxation)
+ reg_addr += CONTROL_84_85_SIZE;
+
+ /* control 86 */
+ if (f54->query_13.has_ctrl86) {
+ control->reg_86 = kzalloc(sizeof(*(control->reg_86)),
+ GFP_KERNEL);
+ if (!control->reg_86)
+ goto exit_no_mem;
+ control->reg_86->address = reg_addr;
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_86->address,
+ f54->control.reg_86->data,
+ sizeof(f54->control.reg_86->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read sense display ratio\n",
+ __func__);
+ return retval;
+ }
+ reg_addr += CONTROL_86_SIZE;
+ }
+
+ /* control 87 */
+ if (f54->query_13.has_ctrl87)
+ reg_addr += CONTROL_87_SIZE;
+
+ /* control 88 */
+ if (f54->query.has_ctrl88) {
+ control->reg_88 = kzalloc(sizeof(*(control->reg_88)),
+ GFP_KERNEL);
+ if (!control->reg_88)
+ goto exit_no_mem;
+ control->reg_88->address = reg_addr;
+ reg_addr += CONTROL_88_SIZE;
+ }
+
+ /* control 89 */
+ if (f54->query_13.has_cidim ||
+ f54->query_13.has_noise_mitigation_enhancement ||
+ f54->query_13.has_rail_im)
+ reg_addr += CONTROL_89_SIZE;
+
+ /* control 90 */
+ if (f54->query_15.has_ctrl90)
+ reg_addr += CONTROL_90_SIZE;
+
+ /* control 91 */
+ if (f54->query_21.has_ctrl91)
+ reg_addr += CONTROL_91_SIZE;
+
+ /* control 92 */
+ if (f54->query_16.has_ctrl92)
+ reg_addr += CONTROL_92_SIZE;
+
+ /* control 93 */
+ if (f54->query_16.has_ctrl93)
+ reg_addr += CONTROL_93_SIZE;
+
+ /* control 94 */
+ if (f54->query_16.has_ctrl94_query18)
+ reg_addr += CONTROL_94_SIZE;
+
+ /* control 95 */
+ if (f54->query_16.has_ctrl95_query19)
+ reg_addr += CONTROL_95_SIZE;
+
+ /* control 96 */
+ if (f54->query_21.has_ctrl96)
+ reg_addr += CONTROL_96_SIZE;
+
+ /* control 97 */
+ if (f54->query_21.has_ctrl97)
+ reg_addr += CONTROL_97_SIZE;
+
+ /* control 98 */
+ if (f54->query_21.has_ctrl98)
+ reg_addr += CONTROL_98_SIZE;
+
+ /* control 99 */
+ if (f54->query.touch_controller_family == 2)
+ reg_addr += CONTROL_99_SIZE;
+
+ /* control 100 */
+ if (f54->query_16.has_ctrl100)
+ reg_addr += CONTROL_100_SIZE;
+
+ /* control 101 */
+ if (f54->query_22.has_ctrl101)
+ reg_addr += CONTROL_101_SIZE;
+
+
+ /* control 102 */
+ if (f54->query_23.has_ctrl102)
+ reg_addr += CONTROL_102_SIZE;
+
+ /* control 103 */
+ if (f54->query_22.has_ctrl103_query26) {
+ f54->skip_preparation = true;
+ reg_addr += CONTROL_103_SIZE;
+ }
+
+ /* control 104 */
+ if (f54->query_22.has_ctrl104)
+ reg_addr += CONTROL_104_SIZE;
+
+ /* control 105 */
+ if (f54->query_22.has_ctrl105)
+ reg_addr += CONTROL_105_SIZE;
+
+ /* control 106 */
+ if (f54->query_25.has_ctrl106)
+ reg_addr += CONTROL_106_SIZE;
+
+ /* control 107 */
+ if (f54->query_25.has_ctrl107)
+ reg_addr += CONTROL_107_SIZE;
+
+ /* control 108 */
+ if (f54->query_25.has_ctrl108)
+ reg_addr += CONTROL_108_SIZE;
+
+ /* control 109 */
+ if (f54->query_25.has_ctrl109)
+ reg_addr += CONTROL_109_SIZE;
+
+ /* control 110 */
+ if (f54->query_27.has_ctrl110) {
+ control->reg_110 = kzalloc(sizeof(*(control->reg_110)),
+ GFP_KERNEL);
+ if (!control->reg_110)
+ goto exit_no_mem;
+ control->reg_110->address = reg_addr;
+ reg_addr += CONTROL_110_SIZE;
+ }
+
+ /* control 111 */
+ if (f54->query_27.has_ctrl111)
+ reg_addr += CONTROL_111_SIZE;
+
+ /* control 112 */
+ if (f54->query_27.has_ctrl112)
+ reg_addr += CONTROL_112_SIZE;
+
+ /* control 113 */
+ if (f54->query_27.has_ctrl113)
+ reg_addr += CONTROL_113_SIZE;
+
+ /* control 114 */
+ if (f54->query_27.has_ctrl114)
+ reg_addr += CONTROL_114_SIZE;
+
+ /* control 115 */
+ if (f54->query_29.has_ctrl115)
+ reg_addr += CONTROL_115_SIZE;
+
+ /* control 116 */
+ if (f54->query_29.has_ctrl116)
+ reg_addr += CONTROL_116_SIZE;
+
+ /* control 117 */
+ if (f54->query_29.has_ctrl117)
+ reg_addr += CONTROL_117_SIZE;
+
+ /* control 118 */
+ if (f54->query_30.has_ctrl118)
+ reg_addr += CONTROL_118_SIZE;
+
+ /* control 119 */
+ if (f54->query_30.has_ctrl119)
+ reg_addr += CONTROL_119_SIZE;
+
+ /* control 120 */
+ if (f54->query_30.has_ctrl120)
+ reg_addr += CONTROL_120_SIZE;
+
+ /* control 121 */
+ if (f54->query_30.has_ctrl121)
+ reg_addr += CONTROL_121_SIZE;
+
+ /* control 122 */
+ if (f54->query_30.has_ctrl122_query31)
+ reg_addr += CONTROL_122_SIZE;
+
+ /* control 123 */
+ if (f54->query_30.has_ctrl123)
+ reg_addr += CONTROL_123_SIZE;
+
+ /* control 124 reserved */
+
+ /* control 125 */
+ if (f54->query_32.has_ctrl125)
+ reg_addr += CONTROL_125_SIZE;
+
+ /* control 126 */
+ if (f54->query_32.has_ctrl126)
+ reg_addr += CONTROL_126_SIZE;
+
+ /* control 127 */
+ if (f54->query_32.has_ctrl127)
+ reg_addr += CONTROL_127_SIZE;
+
+ /* controls 128 129 130 131 reserved */
+
+ /* control 132 */
+ if (f54->query_33.has_ctrl132)
+ reg_addr += CONTROL_132_SIZE;
+
+ /* control 133 */
+ if (f54->query_33.has_ctrl133)
+ reg_addr += CONTROL_133_SIZE;
+
+ /* control 134 */
+ if (f54->query_33.has_ctrl134)
+ reg_addr += CONTROL_134_SIZE;
+
+ /* controls 135 136 reserved */
+
+ /* control 137 */
+ if (f54->query_35.has_ctrl137)
+ reg_addr += CONTROL_137_SIZE;
+
+ /* control 138 */
+ if (f54->query_35.has_ctrl138)
+ reg_addr += CONTROL_138_SIZE;
+
+ /* control 139 */
+ if (f54->query_35.has_ctrl139)
+ reg_addr += CONTROL_139_SIZE;
+
+ /* control 140 */
+ if (f54->query_35.has_ctrl140)
+ reg_addr += CONTROL_140_SIZE;
+
+ /* control 141 reserved */
+
+ /* control 142 */
+ if (f54->query_36.has_ctrl142)
+ reg_addr += CONTROL_142_SIZE;
+
+ /* control 143 */
+ if (f54->query_36.has_ctrl143)
+ reg_addr += CONTROL_143_SIZE;
+
+ /* control 144 */
+ if (f54->query_36.has_ctrl144)
+ reg_addr += CONTROL_144_SIZE;
+
+ /* control 145 */
+ if (f54->query_36.has_ctrl145)
+ reg_addr += CONTROL_145_SIZE;
+
+ /* control 146 */
+ if (f54->query_36.has_ctrl146)
+ reg_addr += CONTROL_146_SIZE;
+
+ /* control 147 */
+ if (f54->query_38.has_ctrl147)
+ reg_addr += CONTROL_147_SIZE;
+
+ /* control 148 */
+ if (f54->query_38.has_ctrl148)
+ reg_addr += CONTROL_148_SIZE;
+
+ /* control 149 */
+ if (f54->query_38.has_ctrl149) {
+ control->reg_149 = kzalloc(sizeof(*(control->reg_149)),
+ GFP_KERNEL);
+ if (!control->reg_149)
+ goto exit_no_mem;
+ control->reg_149->address = reg_addr;
+ reg_addr += CONTROL_149_SIZE;
+ }
+
+ /* controls 150 to 162 reserved */
+
+ /* control 163 */
+ if (f54->query_40.has_ctrl163_query41)
+ reg_addr += CONTROL_163_SIZE;
+
+ /* control 164 reserved */
+
+ /* control 165 */
+ if (f54->query_40.has_ctrl165_query42)
+ reg_addr += CONTROL_165_SIZE;
+
+ /* control 166 reserved */
+
+ /* control 167 */
+ if (f54->query_40.has_ctrl167)
+ reg_addr += CONTROL_167_SIZE;
+
+ /* controls 168 to 175 reserved */
+
+ /* control 176 */
+ if (f54->query_46.has_ctrl176)
+ reg_addr += CONTROL_176_SIZE;
+
+ /* controls 177 178 reserved */
+
+ /* control 179 */
+ if (f54->query_46.has_ctrl179)
+ reg_addr += CONTROL_179_SIZE;
+
+ /* controls 180 to 187 reserved */
+
+ /* control 188 */
+ if (f54->query_49.has_ctrl188) {
+ control->reg_188 = kzalloc(sizeof(*(control->reg_188)),
+ GFP_KERNEL);
+ if (!control->reg_188)
+ goto exit_no_mem;
+ control->reg_188->address = reg_addr;
+ reg_addr += CONTROL_188_SIZE;
+ }
+
+ return 0;
+
+exit_no_mem:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for control registers\n",
+ __func__);
+ return -ENOMEM;
+}
+
+static int test_set_queries(void)
+{
+ int retval;
+ unsigned char offset;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr,
+ f54->query.data,
+ sizeof(f54->query.data));
+ if (retval < 0)
+ return retval;
+
+ offset = sizeof(f54->query.data);
+
+ /* query 12 */
+ if (f54->query.has_sense_frequency_control == 0)
+ offset -= 1;
+
+ /* query 13 */
+ if (f54->query.has_query13) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_13.data,
+ sizeof(f54->query_13.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 14 */
+ if (f54->query_13.has_ctrl87)
+ offset += 1;
+
+ /* query 15 */
+ if (f54->query.has_query15) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_15.data,
+ sizeof(f54->query_15.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 16 */
+ if (f54->query_15.has_query16) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_16.data,
+ sizeof(f54->query_16.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 17 */
+ if (f54->query_16.has_query17)
+ offset += 1;
+
+ /* query 18 */
+ if (f54->query_16.has_ctrl94_query18)
+ offset += 1;
+
+ /* query 19 */
+ if (f54->query_16.has_ctrl95_query19)
+ offset += 1;
+
+ /* query 20 */
+ if (f54->query_15.has_query20)
+ offset += 1;
+
+ /* query 21 */
+ if (f54->query_15.has_query21) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_21.data,
+ sizeof(f54->query_21.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 22 */
+ if (f54->query_15.has_query22) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_22.data,
+ sizeof(f54->query_22.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 23 */
+ if (f54->query_22.has_query23) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_23.data,
+ sizeof(f54->query_23.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 24 */
+ if (f54->query_21.has_query24_data18)
+ offset += 1;
+
+ /* query 25 */
+ if (f54->query_15.has_query25) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_25.data,
+ sizeof(f54->query_25.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 26 */
+ if (f54->query_22.has_ctrl103_query26)
+ offset += 1;
+
+ /* query 27 */
+ if (f54->query_25.has_query27) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_27.data,
+ sizeof(f54->query_27.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 28 */
+ if (f54->query_22.has_query28)
+ offset += 1;
+
+ /* query 29 */
+ if (f54->query_27.has_query29) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_29.data,
+ sizeof(f54->query_29.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 30 */
+ if (f54->query_29.has_query30) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_30.data,
+ sizeof(f54->query_30.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 31 */
+ if (f54->query_30.has_ctrl122_query31)
+ offset += 1;
+
+ /* query 32 */
+ if (f54->query_30.has_query32) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_32.data,
+ sizeof(f54->query_32.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 33 */
+ if (f54->query_32.has_query33) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_33.data,
+ sizeof(f54->query_33.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 34 */
+ if (f54->query_32.has_query34)
+ offset += 1;
+
+ /* query 35 */
+ if (f54->query_32.has_query35) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_35.data,
+ sizeof(f54->query_35.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 36 */
+ if (f54->query_33.has_query36) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_36.data,
+ sizeof(f54->query_36.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 37 */
+ if (f54->query_36.has_query37)
+ offset += 1;
+
+ /* query 38 */
+ if (f54->query_36.has_query38) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_38.data,
+ sizeof(f54->query_38.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 39 */
+ if (f54->query_38.has_query39) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_39.data,
+ sizeof(f54->query_39.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 40 */
+ if (f54->query_39.has_query40) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_40.data,
+ sizeof(f54->query_40.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 41 */
+ if (f54->query_40.has_ctrl163_query41)
+ offset += 1;
+
+ /* query 42 */
+ if (f54->query_40.has_ctrl165_query42)
+ offset += 1;
+
+ /* query 43 */
+ if (f54->query_40.has_query43) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_43.data,
+ sizeof(f54->query_43.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* queries 44 45 reserved */
+
+ /* query 46 */
+ if (f54->query_43.has_query46) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_46.data,
+ sizeof(f54->query_46.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 47 */
+ if (f54->query_46.has_query47) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_47.data,
+ sizeof(f54->query_47.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 48 reserved */
+
+ /* query 49 */
+ if (f54->query_47.has_query49) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_49.data,
+ sizeof(f54->query_49.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 50 */
+ if (f54->query_49.has_query50) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_50.data,
+ sizeof(f54->query_50.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 51 */
+ if (f54->query_50.has_query51) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_51.data,
+ sizeof(f54->query_51.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ return 0;
+}
+
+static void test_f54_set_regs(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count,
+ unsigned char page)
+{
+ unsigned char ii;
+ unsigned char intr_offset;
+
+ f54->query_base_addr = fd->query_base_addr | (page << 8);
+ f54->control_base_addr = fd->ctrl_base_addr | (page << 8);
+ f54->data_base_addr = fd->data_base_addr | (page << 8);
+ f54->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+ f54->intr_reg_num = (intr_count + 7) / 8;
+ if (f54->intr_reg_num != 0)
+ f54->intr_reg_num -= 1;
+
+ f54->intr_mask = 0;
+ intr_offset = intr_count % 8;
+ for (ii = intr_offset;
+ ii < (fd->intr_src_count + intr_offset);
+ ii++) {
+ f54->intr_mask |= 1 << ii;
+ }
+
+ return;
+}
+
+static int test_f55_set_queries(void)
+{
+ int retval;
+ unsigned char offset;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr,
+ f55->query.data,
+ sizeof(f55->query.data));
+ if (retval < 0)
+ return retval;
+
+ offset = sizeof(f55->query.data);
+
+ /* query 3 */
+ if (f55->query.has_single_layer_multi_touch) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_3.data,
+ sizeof(f55->query_3.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 4 */
+ if ((f55->query.has_single_layer_multi_touch) &&
+ (f55->query_3.has_ctrl9))
+ offset += 1;
+
+ /* query 5 */
+ if (f55->query.has_query5) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_5.data,
+ sizeof(f55->query_5.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* queries 6 7 */
+ if (f55->query.curve_compensation_mode == 0x3)
+ offset += 2;
+
+ /* query 8 */
+ if ((f55->query.has_single_layer_multi_touch) &&
+ f55->query_3.has_ctrl8)
+ offset += 1;
+
+ /* query 9 */
+ if ((f55->query.has_single_layer_multi_touch) &&
+ f55->query_3.has_query9)
+ offset += 1;
+
+ /* queries 10 11 12 13 14 15 16 */
+ if ((f55->query.has_query5) && (f55->query_5.has_basis_function))
+ offset += 7;
+
+ /* query 17 */
+ if ((f55->query.has_query5) && (f55->query_5.has_query17)) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_17.data,
+ sizeof(f55->query_17.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 18 */
+ if ((f55->query.has_query5) &&
+ (f55->query_5.has_query17) &&
+ (f55->query_17.has_query18)) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_18.data,
+ sizeof(f55->query_18.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 22 */
+ if ((f55->query.has_query5) &&
+ (f55->query_5.has_query17) &&
+ (f55->query_17.has_query18) &&
+ (f55->query_18.has_query22)) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_22.data,
+ sizeof(f55->query_22.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 23 */
+ if ((f55->query.has_query5) &&
+ (f55->query_5.has_query17) &&
+ (f55->query_17.has_query18) &&
+ (f55->query_18.has_query22) &&
+ (f55->query_22.has_query23)) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_23.data,
+ sizeof(f55->query_23.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+
+ f55->amp_sensor = f55->query_23.amp_sensor_enabled;
+ f55->size_of_column2mux = f55->query_23.size_of_column2mux;
+ }
+
+ return 0;
+}
+
+static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char rx_electrodes = f54->query.num_of_rx_electrodes;
+ unsigned char tx_electrodes = f54->query.num_of_tx_electrodes;
+
+ retval = test_f55_set_queries();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read f55 query registers\n",
+ __func__);
+ return;
+ }
+
+ if (!f55->query.has_sensor_assignment)
+ return;
+
+ f55->tx_assignment = kzalloc(tx_electrodes, GFP_KERNEL);
+ f55->rx_assignment = kzalloc(rx_electrodes, GFP_KERNEL);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->control_base_addr + SENSOR_TX_MAPPING_OFFSET,
+ f55->tx_assignment,
+ tx_electrodes);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read f55 tx assignment\n",
+ __func__);
+ return;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->control_base_addr + SENSOR_RX_MAPPING_OFFSET,
+ f55->rx_assignment,
+ rx_electrodes);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read f55 rx assignment\n",
+ __func__);
+ return;
+ }
+
+ f54->tx_assigned = 0;
+ for (ii = 0; ii < tx_electrodes; ii++) {
+ if (f55->tx_assignment[ii] != 0xff)
+ f54->tx_assigned++;
+ }
+
+ f54->rx_assigned = 0;
+ for (ii = 0; ii < rx_electrodes; ii++) {
+ if (f55->rx_assignment[ii] != 0xff)
+ f54->rx_assigned++;
+ }
+
+ if (f55->amp_sensor) {
+ f54->tx_assigned = f55->size_of_column2mux;
+ f54->rx_assigned /= 2;
+ }
+
+ return;
+}
+
+static void test_f55_set_regs(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned char page)
+{
+ f55 = kzalloc(sizeof(*f55), GFP_KERNEL);
+ if (!f55) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for f55\n",
+ __func__);
+ return;
+ }
+
+ f55->query_base_addr = fd->query_base_addr | (page << 8);
+ f55->control_base_addr = fd->ctrl_base_addr | (page << 8);
+ f55->data_base_addr = fd->data_base_addr | (page << 8);
+ f55->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+ return;
+}
+
+static int test_scan_pdt(void)
+{
+ int retval;
+ unsigned char intr_count = 0;
+ unsigned char page;
+ unsigned short addr;
+ bool f54found = false;
+ bool f55found = false;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ for (page = 0; page < PAGES_TO_SERVICE; page++) {
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ addr |= (page << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval < 0)
+ return retval;
+
+ addr &= ~(MASK_8BIT << 8);
+
+ if (!rmi_fd.fn_number)
+ break;
+
+ switch (rmi_fd.fn_number) {
+ case SYNAPTICS_RMI4_F54:
+ test_f54_set_regs(rmi4_data,
+ &rmi_fd, intr_count, page);
+ f54found = true;
+ break;
+ case SYNAPTICS_RMI4_F55:
+ test_f55_set_regs(rmi4_data,
+ &rmi_fd, page);
+ f55found = true;
+ break;
+ default:
+ break;
+ }
+
+ if (f54found && f55found)
+ goto pdt_done;
+
+ intr_count += rmi_fd.intr_src_count;
+ }
+ }
+
+ if (!f54found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F54\n",
+ __func__);
+ return -EINVAL;
+ }
+
+pdt_done:
+ return 0;
+}
+
+static void synaptics_rmi4_test_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!f54)
+ return;
+
+ if (f54->intr_mask & intr_mask)
+ queue_work(f54->test_report_workqueue, &f54->test_report_work);
+
+ return;
+}
+
+static int synaptics_rmi4_test_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ if (f54) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ f54 = kzalloc(sizeof(*f54), GFP_KERNEL);
+ if (!f54) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for f54\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ f54->rmi4_data = rmi4_data;
+
+ f55 = NULL;
+
+ retval = test_scan_pdt();
+ if (retval < 0)
+ goto exit_free_mem;
+
+ retval = test_set_queries();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read f54 query registers\n",
+ __func__);
+ goto exit_free_mem;
+ }
+
+ f54->tx_assigned = f54->query.num_of_tx_electrodes;
+ f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+ retval = test_set_controls();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set up f54 control registers\n",
+ __func__);
+ goto exit_free_control;
+ }
+
+ test_set_data();
+
+ if (f55)
+ test_f55_init(rmi4_data);
+
+ if (rmi4_data->external_afe_buttons)
+ f54->tx_assigned++;
+
+ retval = test_set_sysfs();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs entries\n",
+ __func__);
+ goto exit_sysfs;
+ }
+
+ f54->test_report_workqueue =
+ create_singlethread_workqueue("test_report_workqueue");
+ INIT_WORK(&f54->test_report_work, test_report_work);
+
+ hrtimer_init(&f54->watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ f54->watchdog.function = test_get_report_timeout;
+ INIT_WORK(&f54->timeout_work, test_timeout_work);
+
+ mutex_init(&f54->status_mutex);
+ f54->status = STATUS_IDLE;
+
+ return 0;
+
+exit_sysfs:
+ if (f55) {
+ kfree(f55->tx_assignment);
+ kfree(f55->rx_assignment);
+ }
+
+exit_free_control:
+ test_free_control_mem();
+
+exit_free_mem:
+ kfree(f55);
+ f55 = NULL;
+ kfree(f54);
+ f54 = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_test_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!f54)
+ goto exit;
+
+ hrtimer_cancel(&f54->watchdog);
+
+ cancel_work_sync(&f54->test_report_work);
+ flush_workqueue(f54->test_report_workqueue);
+ destroy_workqueue(f54->test_report_workqueue);
+
+ test_remove_sysfs();
+
+ if (f55) {
+ kfree(f55->tx_assignment);
+ kfree(f55->rx_assignment);
+ }
+
+ test_free_control_mem();
+
+ if (f54->data_buffer_size)
+ kfree(f54->report_data);
+
+ kfree(f55);
+ f55 = NULL;
+
+ kfree(f54);
+ f54 = NULL;
+
+exit:
+ complete(&test_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_test_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ if (!f54) {
+ synaptics_rmi4_test_init(rmi4_data);
+ return;
+ }
+
+ if (f55) {
+ kfree(f55->tx_assignment);
+ kfree(f55->rx_assignment);
+ }
+
+ test_free_control_mem();
+
+ kfree(f55);
+ f55 = NULL;
+
+ retval = test_scan_pdt();
+ if (retval < 0)
+ goto exit_free_mem;
+
+ retval = test_set_queries();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read f54 query registers\n",
+ __func__);
+ goto exit_free_mem;
+ }
+
+ f54->tx_assigned = f54->query.num_of_tx_electrodes;
+ f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+ retval = test_set_controls();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set up f54 control registers\n",
+ __func__);
+ goto exit_free_control;
+ }
+
+ test_set_data();
+
+ if (f55)
+ test_f55_init(rmi4_data);
+
+ if (rmi4_data->external_afe_buttons)
+ f54->tx_assigned++;
+
+ f54->status = STATUS_IDLE;
+
+ return;
+
+exit_free_control:
+ test_free_control_mem();
+
+exit_free_mem:
+ hrtimer_cancel(&f54->watchdog);
+
+ cancel_work_sync(&f54->test_report_work);
+ flush_workqueue(f54->test_report_workqueue);
+ destroy_workqueue(f54->test_report_workqueue);
+
+ test_remove_sysfs();
+
+ if (f54->data_buffer_size)
+ kfree(f54->report_data);
+
+ kfree(f55);
+ f55 = NULL;
+
+ kfree(f54);
+ f54 = NULL;
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn test_module = {
+ .fn_type = RMI_TEST_REPORTING,
+ .init = synaptics_rmi4_test_init,
+ .remove = synaptics_rmi4_test_remove,
+ .reset = synaptics_rmi4_test_reset,
+ .reinit = NULL,
+ .early_suspend = NULL,
+ .suspend = NULL,
+ .resume = NULL,
+ .late_resume = NULL,
+ .attn = synaptics_rmi4_test_attn,
+};
+
+static int __init rmi4_test_module_init(void)
+{
+ synaptics_rmi4_new_function(&test_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_test_module_exit(void)
+{
+ synaptics_rmi4_new_function(&test_module, false);
+
+ wait_for_completion(&test_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_test_module_init);
+module_exit(rmi4_test_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Test Reporting Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_video.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_video.c
new file mode 100644
index 0000000..bfd03cf
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_video.c
@@ -0,0 +1,417 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "video"
+
+/*
+#define RMI_DCS_SUSPEND_RESUME
+*/
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static int video_send_dcs_command(unsigned char command_opcode);
+
+struct f38_command {
+ union {
+ struct {
+ unsigned char command_opcode;
+ unsigned char register_access:1;
+ unsigned char gamma_page:1;
+ unsigned char f38_control1_b2__7:6;
+ unsigned char parameter_field_1;
+ unsigned char parameter_field_2;
+ unsigned char parameter_field_3;
+ unsigned char parameter_field_4;
+ unsigned char send_to_dcs:1;
+ unsigned char f38_command6_b1__7:7;
+ } __packed;
+ unsigned char data[7];
+ };
+};
+
+struct synaptics_rmi4_video_handle {
+ unsigned char param;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ struct synaptics_rmi4_data *rmi4_data;
+ struct kobject *sysfs_dir;
+};
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+struct dcs_command {
+ unsigned char command;
+ unsigned int wait_time;
+};
+
+static struct dcs_command suspend_sequence[] = {
+ {
+ .command = 0x28,
+ .wait_time = 200,
+ },
+ {
+ .command = 0x10,
+ .wait_time = 200,
+ },
+};
+
+static struct dcs_command resume_sequence[] = {
+ {
+ .command = 0x11,
+ .wait_time = 200,
+ },
+ {
+ .command = 0x29,
+ .wait_time = 200,
+ },
+};
+#endif
+
+static struct device_attribute attrs[] = {
+ __ATTR(dcs_write, 0220,
+ NULL,
+ video_sysfs_dcs_write_store),
+ __ATTR(param, 0220,
+ NULL,
+ video_sysfs_param_store),
+};
+
+static struct synaptics_rmi4_video_handle *video;
+
+DECLARE_COMPLETION(video_remove_complete);
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+
+ if (sscanf(buf, "%x", &input) != 1)
+ return -EINVAL;
+
+ retval = video_send_dcs_command((unsigned char)input);
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (sscanf(buf, "%x", &input) != 1)
+ return -EINVAL;
+
+ video->param = (unsigned char)input;
+
+ return count;
+}
+
+static int video_send_dcs_command(unsigned char command_opcode)
+{
+ int retval;
+ struct f38_command command;
+ struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+ memset(&command, 0x00, sizeof(command));
+
+ command.command_opcode = command_opcode;
+ command.parameter_field_1 = video->param;
+ command.send_to_dcs = 1;
+
+ video->param = 0;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ video->command_base_addr,
+ command.data,
+ sizeof(command.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to send DCS command\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int video_scan_pdt(void)
+{
+ int retval;
+ unsigned char page;
+ unsigned short addr;
+ bool f38_found = false;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+ for (page = 0; page < PAGES_TO_SERVICE; page++) {
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ addr |= (page << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval < 0)
+ return retval;
+
+ addr &= ~(MASK_8BIT << 8);
+
+ if (!rmi_fd.fn_number)
+ break;
+
+ if (rmi_fd.fn_number == SYNAPTICS_RMI4_F38) {
+ f38_found = true;
+ goto f38_found;
+ }
+ }
+ }
+
+ if (!f38_found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F38\n",
+ __func__);
+ return -EINVAL;
+ }
+
+f38_found:
+ video->query_base_addr = rmi_fd.query_base_addr | (page << 8);
+ video->control_base_addr = rmi_fd.ctrl_base_addr | (page << 8);
+ video->data_base_addr = rmi_fd.data_base_addr | (page << 8);
+ video->command_base_addr = rmi_fd.cmd_base_addr | (page << 8);
+
+ return 0;
+}
+
+static int synaptics_rmi4_video_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char attr_count;
+
+ if (video) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ video = kzalloc(sizeof(*video), GFP_KERNEL);
+ if (!video) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for video\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ video->rmi4_data = rmi4_data;
+
+ retval = video_scan_pdt();
+ if (retval < 0) {
+ retval = 0;
+ goto exit_scan_pdt;
+ }
+
+ video->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+ &rmi4_data->input_dev->dev.kobj);
+ if (!video->sysfs_dir) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs directory\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_sysfs_dir;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(video->sysfs_dir,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_sysfs_attrs;
+ }
+ }
+
+ return 0;
+
+exit_sysfs_attrs:
+ for (attr_count--; attr_count >= 0; attr_count--)
+ sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+ kobject_put(video->sysfs_dir);
+
+exit_sysfs_dir:
+exit_scan_pdt:
+ kfree(video);
+ video = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_video_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char attr_count;
+
+ if (!video)
+ goto exit;
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+ sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+ kobject_put(video->sysfs_dir);
+
+ kfree(video);
+ video = NULL;
+
+exit:
+ complete(&video_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_video_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!video)
+ synaptics_rmi4_video_init(rmi4_data);
+
+ return;
+}
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+static void synaptics_rmi4_video_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char command;
+ unsigned char num_of_cmds;
+
+ if (!video)
+ return;
+
+ num_of_cmds = ARRAY_SIZE(suspend_sequence);
+
+ for (ii = 0; ii < num_of_cmds; ii++) {
+ command = suspend_sequence[ii].command;
+ retval = video_send_dcs_command(command);
+ if (retval < 0)
+ return;
+ msleep(suspend_sequence[ii].wait_time);
+ }
+
+ return;
+}
+
+static void synaptics_rmi4_video_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char command;
+ unsigned char num_of_cmds;
+
+ if (!video)
+ return;
+
+ num_of_cmds = ARRAY_SIZE(resume_sequence);
+
+ for (ii = 0; ii < num_of_cmds; ii++) {
+ command = resume_sequence[ii].command;
+ retval = video_send_dcs_command(command);
+ if (retval < 0)
+ return;
+ msleep(resume_sequence[ii].wait_time);
+ }
+
+ return;
+}
+#endif
+
+static struct synaptics_rmi4_exp_fn video_module = {
+ .fn_type = RMI_VIDEO,
+ .init = synaptics_rmi4_video_init,
+ .remove = synaptics_rmi4_video_remove,
+ .reset = synaptics_rmi4_video_reset,
+ .reinit = NULL,
+ .early_suspend = NULL,
+#ifdef RMI_DCS_SUSPEND_RESUME
+ .suspend = synaptics_rmi4_video_suspend,
+ .resume = synaptics_rmi4_video_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .late_resume = NULL,
+ .attn = NULL,
+};
+
+static int __init rmi4_video_module_init(void)
+{
+ synaptics_rmi4_new_function(&video_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_video_module_exit(void)
+{
+ synaptics_rmi4_new_function(&video_module, false);
+
+ wait_for_completion(&video_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_video_module_init);
+module_exit(rmi4_video_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Video Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index d8d9011..85df514 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -56,6 +56,7 @@
#include <linux/ktime.h>
#include <trace/events/iommu.h>
#include <linux/notifier.h>
+#include <dt-bindings/arm/arm-smmu.h>
#include <linux/amba/bus.h>
#include <soc/qcom/msm_tz_smmu.h>
@@ -431,16 +432,6 @@
#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
u32 features;
-#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
-#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
-#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
-#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
-#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
-#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
-#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
-#define ARM_SMMU_OPT_MMU500_ERRATA1 (1 << 7)
-#define ARM_SMMU_OPT_STATIC_CB (1 << 8)
-#define ARM_SMMU_OPT_HALT (1 << 9)
u32 options;
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -448,6 +439,7 @@
u32 num_context_banks;
u32 num_s2_context_banks;
DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
+ DECLARE_BITMAP(secure_context_map, ARM_SMMU_MAX_CBS);
atomic_t irptndx;
u32 num_mapping_groups;
@@ -627,7 +619,7 @@
return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
}
-static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu)
+static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu, u32 cb)
{
int ret;
int scm_ret = 0;
@@ -635,7 +627,7 @@
if (!arm_smmu_is_static_cb(smmu))
return 0;
- ret = scm_restore_sec_cfg(smmu->sec_id, 0x0, &scm_ret);
+ ret = scm_restore_sec_cfg(smmu->sec_id, cb, &scm_ret);
if (ret || scm_ret) {
pr_err("scm call IOMMU_SECURE_CFG failed\n");
return -EINVAL;
@@ -1555,6 +1547,19 @@
return IRQ_HANDLED;
}
+static bool arm_smmu_master_attached(struct arm_smmu_device *smmu,
+ struct iommu_fwspec *fwspec)
+{
+ int i, idx;
+
+ for_each_cfg_sme(fwspec, i, idx) {
+ if (smmu->s2crs[idx].attach_count)
+ return true;
+ }
+
+ return false;
+}
+
static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
@@ -1965,6 +1970,10 @@
/* Publish page table ops for map/unmap */
smmu_domain->pgtbl_ops = pgtbl_ops;
+ if (arm_smmu_is_slave_side_secure(smmu_domain) &&
+ !arm_smmu_master_attached(smmu, dev->iommu_fwspec))
+ arm_smmu_restore_sec_cfg(smmu, cfg->cbndx);
+
return 0;
out_clear_smmu:
@@ -2033,6 +2042,11 @@
arm_smmu_unassign_table(smmu_domain);
arm_smmu_secure_domain_unlock(smmu_domain);
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+ /* As the nonsecure context bank index is any way set to zero,
+ * so, directly clearing up the secure cb bitmap.
+ */
+ if (arm_smmu_is_slave_side_secure(smmu_domain))
+ __arm_smmu_free_bitmap(smmu->secure_context_map, cfg->cbndx);
arm_smmu_power_off(smmu->pwr);
arm_smmu_domain_reinit(smmu_domain);
@@ -2758,13 +2772,24 @@
bool arm_smmu_skip_write(void __iomem *addr)
{
struct arm_smmu_device *smmu;
+ int cb;
smmu = arm_smmu_get_by_addr(addr);
- if (smmu &&
- ((unsigned long)addr & (smmu->size - 1)) >= (smmu->size >> 1))
- return false;
- else
+
+ /* Skip write if smmu not available by now */
+ if (!smmu)
return true;
+
+ /* Do not write to global space */
+ if (((unsigned long)addr & (smmu->size - 1)) < (smmu->size >> 1))
+ return true;
+
+ /* Finally skip writing to secure CB */
+ cb = ((unsigned long)addr & ((smmu->size >> 1) - 1)) >> PAGE_SHIFT;
+ if (test_bit(cb, smmu->secure_context_map))
+ return true;
+
+ return false;
}
#endif
@@ -3698,9 +3723,13 @@
cb = smmu->s2crs[idx].cbndx;
}
- if (cb >= 0 && arm_smmu_is_static_cb(smmu))
+ if (cb >= 0 && arm_smmu_is_static_cb(smmu)) {
smmu_domain->slave_side_secure = true;
+ if (arm_smmu_is_slave_side_secure(smmu_domain))
+ bitmap_set(smmu->secure_context_map, cb, 1);
+ }
+
if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
mutex_unlock(&smmu->stream_map_mutex);
return __arm_smmu_alloc_bitmap(smmu->context_map,
@@ -3875,7 +3904,7 @@
if (event == REGULATOR_EVENT_PRE_DISABLE)
qsmmuv2_halt(smmu);
else if (event == REGULATOR_EVENT_ENABLE) {
- if (arm_smmu_restore_sec_cfg(smmu))
+ if (arm_smmu_restore_sec_cfg(smmu, 0))
goto power_off;
qsmmuv2_resume(smmu);
}
@@ -4028,7 +4057,7 @@
bool cttw_dt, cttw_reg;
int i;
- if (arm_smmu_restore_sec_cfg(smmu))
+ if (arm_smmu_restore_sec_cfg(smmu, 0))
return -ENODEV;
dev_dbg(smmu->dev, "probing hardware configuration...\n");
@@ -4514,7 +4543,8 @@
if (arm_smmu_power_on(smmu->pwr))
return -EINVAL;
- if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
+ if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS) ||
+ !bitmap_empty(smmu->secure_context_map, ARM_SMMU_MAX_CBS))
dev_err(&pdev->dev, "removing device with active domains!\n");
idr_destroy(&smmu->asid_idr);
@@ -5282,6 +5312,9 @@
data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
smmu->archdata = data;
+ if (arm_smmu_is_static_cb(smmu))
+ return 0;
+
ret = qsmmuv500_parse_errata1(smmu);
if (ret)
return ret;
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 22a708e..25b85ab 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,7 +28,7 @@
#include <asm/cacheflush.h>
#include <asm/dma-iommu.h>
-#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
+#if defined(CONFIG_IOMMU_TESTS)
static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
{
@@ -170,6 +170,8 @@
u64 phys;
size_t len;
struct list_head list;
+ struct mutex clk_lock;
+ unsigned int clk_count;
};
static int iommu_debug_build_phoney_sg_table(struct device *dev,
@@ -1195,6 +1197,7 @@
return -ENOMEM;
}
+ val = VMID_CP_CAMERA;
if (is_secure && iommu_domain_set_attr(ddev->domain,
DOMAIN_ATTR_SECURE_VMID,
&val)) {
@@ -1485,6 +1488,10 @@
ssize_t retval;
size_t buflen;
+ if (kptr_restrict != 0) {
+ pr_err("kptr_restrict needs to be disabled.\n");
+ return -EPERM;
+ }
if (!dev->archdata.mapping) {
pr_err("No mapping. Did you already attach?\n");
return -EINVAL;
@@ -1552,6 +1559,10 @@
ssize_t retval;
size_t buflen;
+ if (kptr_restrict != 0) {
+ pr_err("kptr_restrict needs to be disabled.\n");
+ return -EPERM;
+ }
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
return -EINVAL;
@@ -1600,6 +1611,10 @@
ssize_t retval;
size_t buflen;
+ if (kptr_restrict != 0) {
+ pr_err("kptr_restrict needs to be disabled.\n");
+ return -EPERM;
+ }
if (!dev->archdata.mapping) {
pr_err("No mapping. Did you already attach?\n");
return -EINVAL;
@@ -2046,20 +2061,34 @@
return -EFAULT;
}
+ mutex_lock(&ddev->clk_lock);
switch (buf) {
case '0':
+ if (ddev->clk_count == 0) {
+ dev_err(dev, "Config clocks already disabled\n");
+ break;
+ }
+
+ if (--ddev->clk_count > 0)
+ break;
+
dev_err(dev, "Disabling config clocks\n");
iommu_disable_config_clocks(ddev->domain);
break;
case '1':
+ if (ddev->clk_count++ > 0)
+ break;
+
dev_err(dev, "Enabling config clocks\n");
if (iommu_enable_config_clocks(ddev->domain))
dev_err(dev, "Failed!\n");
break;
default:
dev_err(dev, "Invalid value. Should be 0 or 1.\n");
+ mutex_unlock(&ddev->clk_lock);
return -EINVAL;
}
+ mutex_unlock(&ddev->clk_lock);
return count;
}
@@ -2109,6 +2138,9 @@
if (!of_find_property(dev->of_node, "iommus", NULL))
return 0;
+ if (!of_device_is_compatible(dev->of_node, "iommu-debug-test"))
+ return 0;
+
/* Hold a reference count */
if (!iommu_group_get(dev))
return 0;
@@ -2116,6 +2148,7 @@
ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
if (!ddev)
return -ENODEV;
+ mutex_init(&ddev->clk_lock);
ddev->dev = dev;
dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
if (!dir) {
diff --git a/drivers/irqchip/qcom/Kconfig b/drivers/irqchip/qcom/Kconfig
index b892109..0038047 100644
--- a/drivers/irqchip/qcom/Kconfig
+++ b/drivers/irqchip/qcom/Kconfig
@@ -20,3 +20,10 @@
default y if ARCH_SDM670
help
QTI Power Domain Controller for SDM670
+
+config QTI_PDC_SDXPOORWILLS
+ bool "QTI PDC SDxPOORWILLS"
+ select QTI_PDC
+ default y if ARCH_SDXPOORWILLS
+ help
+ QTI Power Domain Controller for SDxPoorwills
diff --git a/drivers/irqchip/qcom/Makefile b/drivers/irqchip/qcom/Makefile
index 5e99040..c4ff9ef 100644
--- a/drivers/irqchip/qcom/Makefile
+++ b/drivers/irqchip/qcom/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_QTI_PDC) += pdc.o
obj-$(CONFIG_QTI_PDC_SDM845) += pdc-sdm845.o
obj-$(CONFIG_QTI_PDC_SDM670) += pdc-sdm670.o
+obj-$(CONFIG_QTI_PDC_SDXPOORWILLS) += pdc-sdxpoorwills.o
diff --git a/drivers/irqchip/qcom/pdc-sdxpoorwills.c b/drivers/irqchip/qcom/pdc-sdxpoorwills.c
new file mode 100644
index 0000000..5bbca03
--- /dev/null
+++ b/drivers/irqchip/qcom/pdc-sdxpoorwills.c
@@ -0,0 +1,76 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/irqchip.h>
+#include "pdc.h"
+
+static struct pdc_pin sdxpoorwills_data[] = {
+ {0, 179}, /* rpmh_wake */
+ {1, 180}, /* ee0_apps_hlos_spmi_periph_irq */
+ {2, 181}, /* ee1_apps_trustzone_spmi_periph_irq */
+ {3, 182}, /* secure_wdog_expired */
+ {4, 183}, /* secure_wdog_bark_irq */
+ {5, 184}, /* aop_wdog_expired_irq */
+ {8, 187}, /* aoss_pmic_arb_mpu_xpu_summary_irq */
+ {9, 188}, /* rpmh_wake */
+ {12, 191}, /* pdc_apps_epcb_timeout_summary_irq */
+ {13, 192}, /* spmi_protocol_irq */
+ {14, 193}, /* tsense0_tsense_max_min_int */
+ {15, 194}, /* apps_pdc_irq_in_15 */
+ {16, 195}, /* tsense0_upper_lower_intr */
+ {17, 196}, /* apps_pdc_irq_in_17 */
+ {18, 197}, /* tsense0_critical_intr */
+ {19, 198}, /* apps_pdc_irq_in_19 */
+ {20, 199}, /* apps_pdc.gp_irq_mux[0] */
+ {21, 200}, /* apps_pdc.gp_irq_mux[1] */
+ {22, 201}, /* apps_pdc.gp_irq_mux[2] */
+ {23, 202}, /* apps_pdc.gp_irq_mux[3] */
+ {24, 203}, /* apps_pdc.gp_irq_mux[4] */
+ {25, 204}, /* apps_pdc.gp_irq_mux[5] */
+ {26, 205}, /* apps_pdc.gp_irq_mux[6] */
+ {27, 206}, /* apps_pdc.gp_irq_mux[7] */
+ {28, 207}, /* apps_pdc.gp_irq_mux[8] */
+ {29, 208}, /* apps_pdc.gp_irq_mux[9] */
+ {30, 209}, /* apps_pdc.gp_irq_mux[10] */
+ {31, 210}, /* apps_pdc.gp_irq_mux[11] */
+ {32, 211}, /* apps_pdc.gp_irq_mux[12] */
+ {33, 212}, /* apps_pdc.gp_irq_mux[13] */
+ {34, 213}, /* apps_pdc.gp_irq_mux[14] */
+ {35, 214}, /* apps_pdc.gp_irq_mux[15] */
+ {36, 215}, /* apps_pdc.gp_irq_mux[16] */
+ {37, 216}, /* apps_pdc.gp_irq_mux[17] */
+ {38, 217}, /* apps_pdc.gp_irq_mux[18] */
+ {39, 218}, /* apps_pdc.gp_irq_mux[19] */
+ {40, 219}, /* apps_pdc.gp_irq_mux[20] */
+ {41, 220}, /* apps_pdc.gp_irq_mux[21] */
+ {42, 221}, /* apps_pdc.gp_irq_mux[22] */
+ {43, 222}, /* apps_pdc.gp_irq_mux[23] */
+ {44, 223}, /* apps_pdc.gp_irq_mux[24] */
+ {45, 224}, /* apps_pdc.gp_irq_mux[25] */
+ {46, 225}, /* apps_pdc.gp_irq_mux[26] */
+ {47, 226}, /* apps_pdc.gp_irq_mux[27] */
+ {48, 227}, /* apps_pdc.gp_irq_mux[28] */
+ {49, 228}, /* apps_pdc.gp_irq_mux[29] */
+ {50, 229}, /* apps_pdc.gp_irq_mux[30] */
+ {51, 230}, /* apps_pdc.gp_irq_mux[31] */
+ {-1}
+};
+
+static int __init qcom_pdc_gic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ pr_info("PDC sdxpoowills initialized\n");
+ return qcom_pdc_init(node, parent, sdxpoorwills_data);
+}
+
+IRQCHIP_DECLARE(pdc_sdxpoorwills, "qcom,pdc-sdxpoorwills", qcom_pdc_gic_init);
diff --git a/drivers/irqchip/qcom/pdc.c b/drivers/irqchip/qcom/pdc.c
index 923552f..f7284bd 100644
--- a/drivers/irqchip/qcom/pdc.c
+++ b/drivers/irqchip/qcom/pdc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -95,6 +96,20 @@
return 0;
}
+static int qcom_pdc_gic_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool *state)
+{
+ return d->parent_data->chip->irq_get_irqchip_state(d,
+ which, state);
+}
+
+static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool value)
+{
+ return d->parent_data->chip->irq_set_irqchip_state(d,
+ which, value);
+}
+
static void qcom_pdc_gic_mask(struct irq_data *d)
{
pdc_enable_intr(d, false);
@@ -220,6 +235,8 @@
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
+ .irq_get_irqchip_state = qcom_pdc_gic_get_irqchip_state,
+ .irq_set_irqchip_state = qcom_pdc_gic_set_irqchip_state,
};
static int qcom_pdc_translate(struct irq_domain *d,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 26e03ba..787bda3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -625,6 +625,15 @@
To compile this driver as a module, choose 'm' here: the module
will be called leds-powernv.
+config LEDS_QTI_TRI_LED
+ tristate "LED support for Qualcomm Technologies, Inc. TRI_LED"
+ depends on LEDS_CLASS && MFD_SPMI_PMIC && PWM && OF
+ help
+ This driver supports the TRI_LED module found in Qualcomm
+ Technologies, Inc. PMIC chips. TRI_LED supports 3 LED drivers
+ at max and each is controlled by a PWM channel used for dimming
+ or blinking.
+
config LEDS_SYSCON
bool "LED support for LEDs on system controllers"
depends on LEDS_CLASS=y
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 5514391..e9eaa50 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -61,6 +61,7 @@
obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
+obj-$(CONFIG_LEDS_QTI_TRI_LED) += leds-qti-tri-led.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o
obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
@@ -72,8 +73,8 @@
obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
obj-$(CONFIG_LEDS_QPNP) += leds-qpnp.o
-obj-$(CONFIG_LEDS_QPNP_FLASH) += leds-qpnp-flash.o
-obj-$(CONFIG_LEDS_QPNP_FLASH_V2) += leds-qpnp-flash-v2.o
+obj-$(CONFIG_LEDS_QPNP_FLASH) += leds-qpnp-flash.o leds-qpnp-flash-common.o
+obj-$(CONFIG_LEDS_QPNP_FLASH_V2) += leds-qpnp-flash-v2.o leds-qpnp-flash-common.o
obj-$(CONFIG_LEDS_QPNP_WLED) += leds-qpnp-wled.o
obj-$(CONFIG_LEDS_QPNP_HAPTICS) += leds-qpnp-haptics.o
obj-$(CONFIG_LEDS_QPNP_VIBRATOR_LDO) += leds-qpnp-vibrator-ldo.o
diff --git a/drivers/leds/leds-qpnp-flash-common.c b/drivers/leds/leds-qpnp-flash-common.c
new file mode 100644
index 0000000..5aed910
--- /dev/null
+++ b/drivers/leds/leds-qpnp-flash-common.c
@@ -0,0 +1,16 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/leds-qpnp-flash.h>
+
+int (*qpnp_flash_led_prepare)(struct led_trigger *trig, int options,
+ int *max_current);
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 1a2aea9..f3f9a1a 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -99,11 +99,8 @@
#define VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us) (val_us / 8)
#define VPH_DROOP_HYST_MV_TO_VAL(val_mv) (val_mv / 25)
-#define VPH_DROOP_THRESH_MV_TO_VAL(val_mv) ((val_mv / 100) - 25)
#define VPH_DROOP_THRESH_VAL_TO_UV(val) ((val + 25) * 100000)
#define MITIGATION_THRSH_MA_TO_VAL(val_ma) (val_ma / 100)
-#define CURRENT_MA_TO_REG_VAL(curr_ma, ires_ua) ((curr_ma * 1000) / ires_ua - 1)
-#define SAFETY_TMR_TO_REG_VAL(duration_ms) ((duration_ms / 10) - 1)
#define THERMAL_HYST_TEMP_TO_VAL(val, divisor) (val / divisor)
#define FLASH_LED_ISC_WARMUP_DELAY_SHIFT 6
@@ -317,6 +314,14 @@
FLASH_LED_IRES7P5_MAX_CURR_MA, FLASH_LED_IRES5P0_MAX_CURR_MA
};
+static inline int get_current_reg_code(int target_curr_ma, int ires_ua)
+{
+ if (!ires_ua || !target_curr_ma || (target_curr_ma < (ires_ua / 1000)))
+ return 0;
+
+ return DIV_ROUND_UP(target_curr_ma * 1000, ires_ua) - 1;
+}
+
static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data)
{
int rc;
@@ -542,7 +547,7 @@
return rc;
if (led->pdata->led1n2_iclamp_low_ma) {
- val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_low_ma,
+ val = get_current_reg_code(led->pdata->led1n2_iclamp_low_ma,
led->fnode[LED1].ires_ua);
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_LED1N2_ICLAMP_LOW(led->base),
@@ -552,7 +557,7 @@
}
if (led->pdata->led1n2_iclamp_mid_ma) {
- val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_mid_ma,
+ val = get_current_reg_code(led->pdata->led1n2_iclamp_mid_ma,
led->fnode[LED1].ires_ua);
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_LED1N2_ICLAMP_MID(led->base),
@@ -562,7 +567,7 @@
}
if (led->pdata->led3_iclamp_low_ma) {
- val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_low_ma,
+ val = get_current_reg_code(led->pdata->led3_iclamp_low_ma,
led->fnode[LED3].ires_ua);
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_LED3_ICLAMP_LOW(led->base),
@@ -572,7 +577,7 @@
}
if (led->pdata->led3_iclamp_mid_ma) {
- val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_mid_ma,
+ val = get_current_reg_code(led->pdata->led3_iclamp_mid_ma,
led->fnode[LED3].ires_ua);
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_LED3_ICLAMP_MID(led->base),
@@ -992,7 +997,7 @@
}
fnode->current_ma = prgm_current_ma;
fnode->cdev.brightness = prgm_current_ma;
- fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma,
+ fnode->current_reg_val = get_current_reg_code(prgm_current_ma,
fnode->ires_ua);
fnode->led_on = prgm_current_ma != 0;
@@ -1103,10 +1108,11 @@
return rc;
}
- /* Iterate over all leds for this switch node */
+ /* Iterate over all active leds for this switch node */
val = 0;
for (i = 0; i < led->num_fnodes; i++)
- if (snode->led_mask & BIT(led->fnode[i].id))
+ if (led->fnode[i].led_on &&
+ snode->led_mask & BIT(led->fnode[i].id))
val |= led->fnode[i].ires_idx << (led->fnode[i].id * 2);
rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_IRES(led->base),
@@ -1210,7 +1216,7 @@
return 0;
}
-int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+static int qpnp_flash_led_prepare_v2(struct led_trigger *trig, int options,
int *max_current)
{
struct led_classdev *led_cdev;
@@ -1430,6 +1436,22 @@
return atomic_notifier_chain_unregister(&irq_notifier_list, nb);
}
+static inline u8 get_safety_timer_code(u32 duration_ms)
+{
+ if (!duration_ms)
+ return 0;
+
+ return (duration_ms / 10) - 1;
+}
+
+static inline u8 get_vph_droop_thresh_code(u32 val_mv)
+{
+ if (!val_mv)
+ return 0;
+
+ return (val_mv / 100) - 25;
+}
+
static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
struct flash_node_data *fnode, struct device_node *node)
{
@@ -1521,8 +1543,9 @@
fnode->duration = FLASH_LED_SAFETY_TMR_DISABLED;
rc = of_property_read_u32(node, "qcom,duration-ms", &val);
if (!rc) {
- fnode->duration = (u8)(SAFETY_TMR_TO_REG_VAL(val) |
- FLASH_LED_SAFETY_TMR_ENABLE);
+ fnode->duration = get_safety_timer_code(val);
+ if (fnode->duration)
+ fnode->duration |= FLASH_LED_SAFETY_TMR_ENABLE;
} else if (rc == -EINVAL) {
if (fnode->type == FLASH_LED_TYPE_FLASH) {
pr_err("Timer duration is required for flash LED\n");
@@ -1968,7 +1991,7 @@
rc = of_property_read_u32(node, "qcom,vph-droop-threshold-mv", &val);
if (!rc) {
led->pdata->vph_droop_threshold =
- VPH_DROOP_THRESH_MV_TO_VAL(val);
+ get_vph_droop_thresh_code(val);
} else if (rc != -EINVAL) {
pr_err("Unable to read VPH droop threshold, rc=%d\n", rc);
return rc;
@@ -2194,6 +2217,7 @@
if (!led->pdata)
return -ENOMEM;
+ qpnp_flash_led_prepare = qpnp_flash_led_prepare_v2;
rc = qpnp_flash_led_parse_common_dt(led, node);
if (rc < 0) {
pr_err("Failed to parse common flash LED device tree\n");
diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c
index 3b07af8..ce2b055 100644
--- a/drivers/leds/leds-qpnp-flash.c
+++ b/drivers/leds/leds-qpnp-flash.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1207,7 +1207,7 @@
return rc;
}
-int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+static int qpnp_flash_led_prepare_v1(struct led_trigger *trig, int options,
int *max_current)
{
struct led_classdev *led_cdev = trigger_to_lcdev(trig);
@@ -2468,6 +2468,7 @@
led->pdev = pdev;
led->current_addr = FLASH_LED0_CURRENT(led->base);
led->current2_addr = FLASH_LED1_CURRENT(led->base);
+ qpnp_flash_led_prepare = qpnp_flash_led_prepare_v1;
led->pdata = devm_kzalloc(&pdev->dev, sizeof(*led->pdata), GFP_KERNEL);
if (!led->pdata)
diff --git a/drivers/leds/leds-qpnp-haptics.c b/drivers/leds/leds-qpnp-haptics.c
index ebdff87..fad36ea 100644
--- a/drivers/leds/leds-qpnp-haptics.c
+++ b/drivers/leds/leds-qpnp-haptics.c
@@ -1,5 +1,4 @@
-/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation.
- * All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -275,6 +274,7 @@
* @ last_rate_cfg - Last rate config updated
* @ wave_rep_cnt - waveform repeat count
* @ wave_s_rep_cnt - waveform sample repeat count
+ * @ wf_samp_len - waveform sample length
* @ ext_pwm_freq_khz - external pwm frequency in KHz
* @ ext_pwm_dtest_line - DTEST line for external pwm
* @ status_flags - status
@@ -330,6 +330,7 @@
u16 last_rate_cfg;
u32 wave_rep_cnt;
u32 wave_s_rep_cnt;
+ u32 wf_samp_len;
u32 ext_pwm_freq_khz;
u8 ext_pwm_dtest_line;
u32 status_flags;
@@ -445,6 +446,19 @@
return rc;
}
+static inline int get_buffer_mode_duration(struct hap_chip *chip)
+{
+ int sample_count, sample_duration;
+
+ sample_count = chip->wave_rep_cnt * chip->wave_s_rep_cnt *
+ chip->wf_samp_len;
+ sample_duration = sample_count * chip->wave_play_rate_us;
+ pr_debug("sample_count: %d sample_duration: %d\n", sample_count,
+ sample_duration);
+
+ return (sample_duration / 1000);
+}
+
static bool is_sw_lra_auto_resonance_control(struct hap_chip *chip)
{
if (chip->act_type != HAP_LRA)
@@ -735,11 +749,12 @@
goto out;
}
- if (chip->play_mode != HAP_BUFFER)
- hrtimer_start(&chip->stop_timer,
- ktime_set(time_ms / MSEC_PER_SEC,
- (time_ms % MSEC_PER_SEC) * NSEC_PER_MSEC),
- HRTIMER_MODE_REL);
+ if (chip->play_mode == HAP_BUFFER)
+ time_ms = get_buffer_mode_duration(chip);
+ hrtimer_start(&chip->stop_timer,
+ ktime_set(time_ms / MSEC_PER_SEC,
+ (time_ms % MSEC_PER_SEC) * NSEC_PER_MSEC),
+ HRTIMER_MODE_REL);
rc = qpnp_haptics_auto_res_enable(chip, true);
if (rc < 0) {
@@ -766,6 +781,9 @@
if (chip->play_mode == HAP_PWM)
pwm_disable(chip->pwm_data.pwm_dev);
+
+ if (chip->play_mode == HAP_BUFFER)
+ chip->wave_samp_idx = 0;
}
out:
@@ -1182,8 +1200,11 @@
if (time_ms <= 20) {
wave_samp[0] = HAP_WF_SAMP_MAX;
wave_samp[1] = HAP_WF_SAMP_MAX;
- if (time_ms > 15)
+ chip->wf_samp_len = 2;
+ if (time_ms > 15) {
wave_samp[2] = HAP_WF_SAMP_MAX;
+ chip->wf_samp_len = 3;
+ }
/* short pattern */
rc = qpnp_haptics_parse_buffer_dt(chip);
@@ -1302,33 +1323,20 @@
chip->wave_samp_idx += HAP_WAVE_SAMP_LEN;
if (chip->wave_samp_idx >= ARRAY_SIZE(chip->wave_samp)) {
pr_debug("Samples over\n");
- /* fall through to stop playing */
} else {
pr_debug("moving to next sample set %d\n",
chip->wave_samp_idx);
+ /* Moving to next set of wave sample */
rc = qpnp_haptics_buffer_config(chip, NULL, false);
if (rc < 0) {
pr_err("Error in configuring buffer, rc=%d\n",
rc);
goto irq_handled;
}
-
- /*
- * Moving to next set of wave sample. No need to stop
- * or change the play control. Just return.
- */
- goto irq_handled;
}
}
- rc = qpnp_haptics_play_control(chip, HAP_STOP);
- if (rc < 0) {
- pr_err("Error in disabling play, rc=%d\n", rc);
- goto irq_handled;
- }
- chip->wave_samp_idx = 0;
-
irq_handled:
return IRQ_HANDLED;
}
@@ -1638,6 +1646,7 @@
pos += bytes_read;
}
+ chip->wf_samp_len = i;
for (i = 0; i < ARRAY_SIZE(chip->wave_samp); i++)
chip->wave_samp[i] = samp[i];
@@ -1986,7 +1995,10 @@
/* Use default values */
for (i = 0; i < HAP_WAVE_SAMP_LEN; i++)
chip->wave_samp[i] = HAP_WF_SAMP_MAX;
+
+ wf_samp_len = HAP_WAVE_SAMP_LEN;
}
+ chip->wf_samp_len = wf_samp_len;
return 0;
}
diff --git a/drivers/leds/leds-qti-tri-led.c b/drivers/leds/leds-qti-tri-led.c
new file mode 100644
index 0000000..ab5e876
--- /dev/null
+++ b/drivers/leds/leds-qti-tri-led.c
@@ -0,0 +1,512 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define TRILED_REG_TYPE 0x04
+#define TRILED_REG_SUBTYPE 0x05
+#define TRILED_REG_EN_CTL 0x46
+
+/* TRILED_REG_EN_CTL */
+#define TRILED_EN_CTL_MASK GENMASK(7, 5)
+#define TRILED_EN_CTL_MAX_BIT 7
+
+#define TRILED_TYPE 0x19
+#define TRILED_SUBTYPE_LED3H0L12 0x02
+#define TRILED_SUBTYPE_LED2H0L12 0x03
+#define TRILED_SUBTYPE_LED1H2L12 0x04
+
+#define TRILED_NUM_MAX 3
+
+#define PWM_PERIOD_DEFAULT_NS 1000000
+#define LED_BLINK_ON_MS 125
+#define LED_BLINK_OFF_MS 875
+
+struct pwm_setting {
+ u32 pre_period_ns;
+ u32 period_ns;
+ u32 duty_ns;
+};
+
+struct led_setting {
+ u32 on_ms;
+ u32 off_ms;
+ enum led_brightness brightness;
+ bool blink;
+};
+
+struct qpnp_led_dev {
+ struct led_classdev cdev;
+ struct pwm_device *pwm_dev;
+ struct pwm_setting pwm_setting;
+ struct led_setting led_setting;
+ struct qpnp_tri_led_chip *chip;
+ struct mutex lock;
+ const char *label;
+ const char *default_trigger;
+ u8 id;
+ bool blinking;
+};
+
+struct qpnp_tri_led_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ struct qpnp_led_dev *leds;
+ struct mutex bus_lock;
+ int num_leds;
+ u16 reg_base;
+ u8 subtype;
+};
+
+static int qpnp_tri_led_read(struct qpnp_tri_led_chip *chip, u16 addr, u8 *val)
+{
+ int rc;
+ unsigned int tmp;
+
+ mutex_lock(&chip->bus_lock);
+ rc = regmap_read(chip->regmap, chip->reg_base + addr, &tmp);
+ if (rc < 0)
+ dev_err(chip->dev, "Read addr 0x%x failed, rc=%d\n", addr, rc);
+ else
+ *val = (u8)tmp;
+ mutex_unlock(&chip->bus_lock);
+
+ return rc;
+}
+
+static int qpnp_tri_led_masked_write(struct qpnp_tri_led_chip *chip,
+ u16 addr, u8 mask, u8 val)
+{
+ int rc;
+
+ mutex_lock(&chip->bus_lock);
+ rc = regmap_update_bits(chip->regmap, chip->reg_base + addr, mask, val);
+ if (rc < 0)
+ dev_err(chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x failed, rc=%d\n",
+ addr, val, mask, rc);
+ mutex_unlock(&chip->bus_lock);
+
+ return rc;
+}
+
+static int __tri_led_config_pwm(struct qpnp_led_dev *led,
+ struct pwm_setting *pwm)
+{
+ struct pwm_state pstate;
+ int rc;
+
+ pwm_get_state(led->pwm_dev, &pstate);
+ pstate.enabled = !!(pwm->duty_ns != 0);
+ pstate.period = pwm->period_ns;
+ pstate.duty_cycle = pwm->duty_ns;
+ rc = pwm_apply_state(led->pwm_dev, &pstate);
+
+ if (rc < 0)
+ dev_err(led->chip->dev, "Apply PWM state for %s led failed, rc=%d\n",
+ led->cdev.name, rc);
+
+ return rc;
+}
+
+static int __tri_led_set(struct qpnp_led_dev *led)
+{
+ int rc = 0;
+ u8 val = 0, mask = 0;
+
+ rc = __tri_led_config_pwm(led, &led->pwm_setting);
+ if (rc < 0) {
+ dev_err(led->chip->dev, "Configure PWM for %s led failed, rc=%d\n",
+ led->cdev.name, rc);
+ return rc;
+ }
+
+ mask |= 1 << (TRILED_EN_CTL_MAX_BIT - led->id);
+
+ if (led->pwm_setting.duty_ns == 0)
+ val = 0;
+ else
+ val = mask;
+
+ rc = qpnp_tri_led_masked_write(led->chip, TRILED_REG_EN_CTL,
+ mask, val);
+ if (rc < 0)
+ dev_err(led->chip->dev, "Update addr 0x%x failed, rc=%d\n",
+ TRILED_REG_EN_CTL, rc);
+
+ return rc;
+}
+
+static int qpnp_tri_led_set(struct qpnp_led_dev *led)
+{
+ u32 on_ms, off_ms, period_ns, duty_ns;
+ enum led_brightness brightness = led->led_setting.brightness;
+ int rc = 0;
+
+ if (led->led_setting.blink) {
+ on_ms = led->led_setting.on_ms;
+ off_ms = led->led_setting.off_ms;
+ if (on_ms > INT_MAX / NSEC_PER_MSEC)
+ duty_ns = INT_MAX - 1;
+ else
+ duty_ns = on_ms * NSEC_PER_MSEC;
+
+ if (on_ms + off_ms > INT_MAX / NSEC_PER_MSEC) {
+ period_ns = INT_MAX;
+ duty_ns = (period_ns / (on_ms + off_ms)) * on_ms;
+ } else {
+ period_ns = (on_ms + off_ms) * NSEC_PER_MSEC;
+ }
+
+ if (period_ns < duty_ns && duty_ns != 0)
+ period_ns = duty_ns + 1;
+ } else {
+ /* Use initial period if no blinking is required */
+ period_ns = led->pwm_setting.pre_period_ns;
+
+ if (period_ns > INT_MAX / brightness)
+ duty_ns = (period_ns / LED_FULL) * brightness;
+ else
+ duty_ns = (period_ns * brightness) / LED_FULL;
+
+ if (period_ns < duty_ns && duty_ns != 0)
+ period_ns = duty_ns + 1;
+ }
+ dev_dbg(led->chip->dev, "PWM settings for %s led: period = %dns, duty = %dns\n",
+ led->cdev.name, period_ns, duty_ns);
+
+ led->pwm_setting.duty_ns = duty_ns;
+ led->pwm_setting.period_ns = period_ns;
+
+ rc = __tri_led_set(led);
+ if (rc < 0) {
+ dev_err(led->chip->dev, "__tri_led_set %s failed, rc=%d\n",
+ led->cdev.name, rc);
+ return rc;
+ }
+
+ if (led->led_setting.blink) {
+ led->cdev.brightness = LED_FULL;
+ led->blinking = true;
+ } else {
+ led->cdev.brightness = led->led_setting.brightness;
+ led->blinking = false;
+ }
+
+ return rc;
+}
+
+static int qpnp_tri_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct qpnp_led_dev *led =
+ container_of(led_cdev, struct qpnp_led_dev, cdev);
+ int rc = 0;
+
+ mutex_lock(&led->lock);
+ if (brightness > LED_FULL)
+ brightness = LED_FULL;
+
+ if (brightness == led->led_setting.brightness &&
+ !led->blinking) {
+ mutex_unlock(&led->lock);
+ return 0;
+ }
+
+ led->led_setting.brightness = brightness;
+ if (!!brightness)
+ led->led_setting.off_ms = 0;
+ else
+ led->led_setting.on_ms = 0;
+ led->led_setting.blink = false;
+
+ rc = qpnp_tri_led_set(led);
+ if (rc)
+ dev_err(led->chip->dev, "Set led failed for %s, rc=%d\n",
+ led->label, rc);
+
+ mutex_unlock(&led->lock);
+
+ return rc;
+}
+
+static enum led_brightness qpnp_tri_led_get_brightness(
+ struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
+
+static int qpnp_tri_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *on_ms, unsigned long *off_ms)
+{
+ struct qpnp_led_dev *led =
+ container_of(led_cdev, struct qpnp_led_dev, cdev);
+ int rc = 0;
+
+ mutex_lock(&led->lock);
+ if (led->blinking && *on_ms == led->led_setting.on_ms &&
+ *off_ms == led->led_setting.off_ms) {
+ dev_dbg(led_cdev->dev, "Ignore, on/off setting is not changed: on %lums, off %lums\n",
+ *on_ms, *off_ms);
+ mutex_unlock(&led->lock);
+ return 0;
+ }
+
+ if (*on_ms == 0) {
+ led->led_setting.blink = false;
+ led->led_setting.brightness = LED_OFF;
+ } else if (*off_ms == 0) {
+ led->led_setting.blink = false;
+ led->led_setting.brightness = led->cdev.brightness;
+ } else {
+ led->led_setting.on_ms = *on_ms;
+ led->led_setting.off_ms = *off_ms;
+ led->led_setting.blink = true;
+ }
+
+ rc = qpnp_tri_led_set(led);
+ if (rc)
+ dev_err(led->chip->dev, "Set led failed for %s, rc=%d\n",
+ led->label, rc);
+
+ mutex_unlock(&led->lock);
+ return rc;
+}
+
+static int qpnp_tri_led_register(struct qpnp_tri_led_chip *chip)
+{
+ struct qpnp_led_dev *led;
+ int rc, i, j;
+
+ for (i = 0; i < chip->num_leds; i++) {
+ led = &chip->leds[i];
+ mutex_init(&led->lock);
+ led->cdev.name = led->label;
+ led->cdev.max_brightness = LED_FULL;
+ led->cdev.brightness_set_blocking = qpnp_tri_led_set_brightness;
+ led->cdev.brightness_get = qpnp_tri_led_get_brightness;
+ led->cdev.blink_set = qpnp_tri_led_set_blink;
+ led->cdev.default_trigger = led->default_trigger;
+ led->cdev.brightness = LED_OFF;
+ led->cdev.blink_delay_on = LED_BLINK_ON_MS;
+ led->cdev.blink_delay_off = LED_BLINK_OFF_MS;
+
+ rc = devm_led_classdev_register(chip->dev, &led->cdev);
+ if (rc < 0) {
+ dev_err(chip->dev, "%s led class device registering failed, rc=%d\n",
+ led->label, rc);
+ goto destroy;
+ }
+ }
+
+ return 0;
+destroy:
+ for (j = 0; j <= i; j++)
+ mutex_destroy(&chip->leds[i].lock);
+
+ return rc;
+}
+
+static int qpnp_tri_led_hw_init(struct qpnp_tri_led_chip *chip)
+{
+ int rc = 0;
+ u8 val;
+
+ rc = qpnp_tri_led_read(chip, TRILED_REG_TYPE, &val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Read REG_TYPE failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (val != TRILED_TYPE) {
+ dev_err(chip->dev, "invalid subtype(%d)\n", val);
+ return -ENODEV;
+ }
+
+ rc = qpnp_tri_led_read(chip, TRILED_REG_SUBTYPE, &val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Read REG_SUBTYPE failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->subtype = val;
+
+ return 0;
+}
+
+static int qpnp_tri_led_parse_dt(struct qpnp_tri_led_chip *chip)
+{
+ struct device_node *node = chip->dev->of_node, *child_node;
+ struct qpnp_led_dev *led;
+ struct pwm_args pargs;
+ const __be32 *addr;
+ int rc, id, i = 0;
+
+ addr = of_get_address(chip->dev->of_node, 0, NULL, NULL);
+ if (!addr) {
+ dev_err(chip->dev, "Getting address failed\n");
+ return -EINVAL;
+ }
+ chip->reg_base = be32_to_cpu(addr[0]);
+
+ chip->num_leds = of_get_available_child_count(node);
+ if (chip->num_leds == 0) {
+ dev_err(chip->dev, "No led child node defined\n");
+ return -ENODEV;
+ }
+
+ if (chip->num_leds > TRILED_NUM_MAX) {
+ dev_err(chip->dev, "can't support %d leds(max %d)\n",
+ chip->num_leds, TRILED_NUM_MAX);
+ return -EINVAL;
+ }
+
+ chip->leds = devm_kcalloc(chip->dev, chip->num_leds,
+ sizeof(struct qpnp_led_dev), GFP_KERNEL);
+ if (!chip->leds)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(node, child_node) {
+ rc = of_property_read_u32(child_node, "led-sources", &id);
+ if (rc) {
+ dev_err(chip->dev, "Get led-sources failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (id >= TRILED_NUM_MAX) {
+ dev_err(chip->dev, "only support 0~%d current source\n",
+ TRILED_NUM_MAX - 1);
+ return -EINVAL;
+ }
+
+ led = &chip->leds[i++];
+ led->chip = chip;
+ led->id = id;
+ led->label =
+ of_get_property(child_node, "label", NULL) ? :
+ child_node->name;
+
+ led->pwm_dev =
+ devm_of_pwm_get(chip->dev, child_node, NULL);
+ if (IS_ERR(led->pwm_dev)) {
+ rc = PTR_ERR(led->pwm_dev);
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev, "Get pwm device for %s led failed, rc=%d\n",
+ led->label, rc);
+ return rc;
+ }
+
+ pwm_get_args(led->pwm_dev, &pargs);
+ if (pargs.period == 0)
+ led->pwm_setting.pre_period_ns = PWM_PERIOD_DEFAULT_NS;
+ else
+ led->pwm_setting.pre_period_ns = pargs.period;
+
+ led->default_trigger = of_get_property(child_node,
+ "linux,default-trigger", NULL);
+ }
+
+ return rc;
+}
+
+static int qpnp_tri_led_probe(struct platform_device *pdev)
+{
+ struct qpnp_tri_led_chip *chip;
+ int rc = 0;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+ if (!chip->regmap) {
+ dev_err(chip->dev, "Getting regmap failed\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_tri_led_parse_dt(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Devicetree properties parsing failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ mutex_init(&chip->bus_lock);
+
+ rc = qpnp_tri_led_hw_init(chip);
+ if (rc) {
+ dev_err(chip->dev, "HW initialization failed, rc=%d\n", rc);
+ goto destroy;
+ }
+
+ dev_set_drvdata(chip->dev, chip);
+ rc = qpnp_tri_led_register(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Registering LED class devices failed, rc=%d\n",
+ rc);
+ goto destroy;
+ }
+
+ dev_dbg(chip->dev, "Tri-led module with subtype 0x%x is detected\n",
+ chip->subtype);
+ return 0;
+destroy:
+ mutex_destroy(&chip->bus_lock);
+ dev_set_drvdata(chip->dev, NULL);
+
+ return rc;
+}
+
+static int qpnp_tri_led_remove(struct platform_device *pdev)
+{
+ int i;
+ struct qpnp_tri_led_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ mutex_destroy(&chip->bus_lock);
+ for (i = 0; i < chip->num_leds; i++)
+ mutex_destroy(&chip->leds[i].lock);
+ dev_set_drvdata(chip->dev, NULL);
+ return 0;
+}
+
+static const struct of_device_id qpnp_tri_led_of_match[] = {
+ { .compatible = "qcom,tri-led",},
+ { },
+};
+
+static struct platform_driver qpnp_tri_led_driver = {
+ .driver = {
+ .name = "qcom,tri-led",
+ .of_match_table = qpnp_tri_led_of_match,
+ },
+ .probe = qpnp_tri_led_probe,
+ .remove = qpnp_tri_led_remove,
+};
+module_platform_driver(qpnp_tri_led_driver);
+
+MODULE_DESCRIPTION("QTI TRI_LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:qpnp-tri-led");
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 7e6d999..d1222aa 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -196,6 +196,30 @@
return rc;
}
+int cam_context_handle_crm_process_evt(struct cam_context *ctx,
+ struct cam_req_mgr_link_evt_data *process_evt)
+{
+ int rc = 0;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].crm_ops.process_evt) {
+ rc = ctx->state_machine[ctx->state].crm_ops.process_evt(ctx,
+ process_evt);
+ } else {
+ /* handling of this message is optional */
+ CAM_DBG(CAM_CORE, "No crm process evt in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
int cam_context_handle_acquire_dev(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
{
@@ -257,10 +281,10 @@
int cam_context_handle_flush_dev(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd)
{
- int rc;
+ int rc = 0;
if (!ctx->state_machine) {
- CAM_ERR(CAM_CORE, "context is not ready");
+ CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
@@ -274,9 +298,8 @@
rc = ctx->state_machine[ctx->state].ioctl_ops.flush_dev(
ctx, cmd);
} else {
- CAM_ERR(CAM_CORE, "No flush device in dev %d, state %d",
+ CAM_WARN(CAM_CORE, "No flush device in dev %d, state %d",
ctx->dev_hdl, ctx->state);
- rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index c823b7a..af92b7e 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -110,6 +110,7 @@
* @unlink: Unlink the context
* @apply_req: Apply setting for the context
* @flush_req: Flush request to remove request ids
+ * @process_evt: Handle event notification from CRM.(optional)
*
*/
struct cam_ctx_crm_ops {
@@ -123,6 +124,8 @@
struct cam_req_mgr_apply_request *apply);
int (*flush_req)(struct cam_context *ctx,
struct cam_req_mgr_flush_request *flush);
+ int (*process_evt)(struct cam_context *ctx,
+ struct cam_req_mgr_link_evt_data *evt_data);
};
@@ -273,6 +276,18 @@
struct cam_req_mgr_flush_request *apply);
/**
+ * cam_context_handle_crm_process_evt()
+ *
+ * @brief: Handle process event command
+ *
+ * @ctx: Object pointer for cam_context
+ * @process_evt: process event command payload
+ *
+ */
+int cam_context_handle_crm_process_evt(struct cam_context *ctx,
+ struct cam_req_mgr_link_evt_data *process_evt);
+
+/**
* cam_context_handle_acquire_dev()
*
* @brief: Handle acquire device command
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index aab1a1a..8ea920d 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -430,6 +430,8 @@
uint32_t i;
int rc = 0;
+ CAM_DBG(CAM_CTXT, "E: NRT flush ctx");
+
/*
* flush pending requests, take the sync lock to synchronize with the
* sync callback thread so that the sync cb thread does not try to
@@ -444,23 +446,33 @@
while (!list_empty(&temp_list)) {
req = list_first_entry(&temp_list,
struct cam_ctx_request, list);
+
list_del_init(&req->list);
req->flushed = 1;
+
flush_args.flush_req_pending[flush_args.num_req_pending++] =
req->req_priv;
for (i = 0; i < req->num_out_map_entries; i++)
- if (req->out_map_entries[i].sync_id != -1)
- cam_sync_signal(req->out_map_entries[i].sync_id,
+ if (req->out_map_entries[i].sync_id != -1) {
+ rc = cam_sync_signal(
+ req->out_map_entries[i].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc == -EALREADY) {
+ CAM_ERR(CAM_CTXT,
+ "Req: %llu already signalled, sync_id:%d",
+ req->request_id,
+ req->out_map_entries[i].
+ sync_id);
+ break;
+ }
+ }
}
mutex_unlock(&ctx->sync_mutex);
if (ctx->hw_mgr_intf->hw_flush) {
flush_args.num_req_active = 0;
spin_lock(&ctx->lock);
- INIT_LIST_HEAD(&temp_list);
- list_splice_init(&ctx->active_req_list, &temp_list);
- list_for_each_entry(req, &temp_list, list) {
+ list_for_each_entry(req, &ctx->active_req_list, list) {
flush_args.flush_req_active[flush_args.num_req_active++]
= req->req_priv;
}
@@ -474,24 +486,42 @@
}
}
+ INIT_LIST_HEAD(&temp_list);
+ spin_lock(&ctx->lock);
+ list_splice_init(&ctx->active_req_list, &temp_list);
+ INIT_LIST_HEAD(&ctx->active_req_list);
+ spin_unlock(&ctx->lock);
+
while (!list_empty(&temp_list)) {
req = list_first_entry(&temp_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
- for (i = 0; i < req->num_out_map_entries; i++)
+ for (i = 0; i < req->num_out_map_entries; i++) {
if (req->out_map_entries[i].sync_id != -1) {
- cam_sync_signal(req->out_map_entries[i].sync_id,
+ rc = cam_sync_signal(
+ req->out_map_entries[i].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc == -EALREADY) {
+ CAM_ERR(CAM_CTXT,
+ "Req: %llu already signalled ctx: %pK dev_name: %s dev_handle: %d ctx_state: %d",
+ req->request_id, req->ctx,
+ req->ctx->dev_name,
+ req->ctx->dev_hdl,
+ req->ctx->state);
+ break;
+ }
}
+ }
spin_lock(&ctx->lock);
list_add_tail(&req->list, &ctx->free_req_list);
spin_unlock(&ctx->lock);
req->ctx = NULL;
}
- INIT_LIST_HEAD(&ctx->active_req_list);
- return rc;
+ CAM_DBG(CAM_CTXT, "X: NRT flush ctx");
+
+ return 0;
}
int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
@@ -502,6 +532,8 @@
uint32_t i;
int rc = 0;
+ CAM_DBG(CAM_CTXT, "E: NRT flush req");
+
flush_args.num_req_pending = 0;
flush_args.num_req_active = 0;
mutex_lock(&ctx->sync_mutex);
@@ -510,7 +542,9 @@
if (req->request_id != cmd->req_id)
continue;
+ list_del_init(&req->list);
req->flushed = 1;
+
flush_args.flush_req_pending[flush_args.num_req_pending++] =
req->req_priv;
break;
@@ -525,6 +559,8 @@
if (req->request_id != cmd->req_id)
continue;
+ list_del_init(&req->list);
+
flush_args.flush_req_active[
flush_args.num_req_active++] =
req->req_priv;
@@ -543,20 +579,31 @@
if (req) {
if (flush_args.num_req_pending || flush_args.num_req_active) {
- list_del_init(&req->list);
for (i = 0; i < req->num_out_map_entries; i++)
- if (req->out_map_entries[i].sync_id != -1)
- cam_sync_signal(
+ if (req->out_map_entries[i].sync_id != -1) {
+ rc = cam_sync_signal(
req->out_map_entries[i].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
- spin_lock(&ctx->lock);
- list_add_tail(&req->list, &ctx->free_req_list);
- spin_unlock(&ctx->lock);
- req->ctx = NULL;
+ if (rc == -EALREADY) {
+ CAM_ERR(CAM_CTXT,
+ "Req: %llu already signalled, sync_id:%d",
+ req->request_id,
+ req->out_map_entries[i].
+ sync_id);
+ break;
+ }
+ }
+ if (flush_args.num_req_active) {
+ spin_lock(&ctx->lock);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ spin_unlock(&ctx->lock);
+ req->ctx = NULL;
+ }
}
}
+ CAM_DBG(CAM_CTXT, "X: NRT flush req");
- return rc;
+ return 0;
}
int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index a5977b3..4e9034e 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -220,7 +220,7 @@
rc = cam_context_handle_flush_dev(ctx, flush);
if (rc)
- CAM_ERR(CAM_CORE, "FLush failure for node %s", node->name);
+ CAM_ERR(CAM_CORE, "Flush failure for node %s", node->name);
return rc;
}
@@ -342,6 +342,25 @@
return cam_context_handle_crm_flush_req(ctx, flush);
}
+static int __cam_node_crm_process_evt(
+ struct cam_req_mgr_link_evt_data *evt_data)
+{
+ struct cam_context *ctx = NULL;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_CORE, "Invalid process event request payload");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *) cam_get_device_priv(evt_data->dev_hdl);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ evt_data->dev_hdl);
+ return -EINVAL;
+ }
+ return cam_context_handle_crm_process_evt(ctx, evt_data);
+}
+
int cam_node_deinit(struct cam_node *node)
{
if (node)
@@ -394,6 +413,7 @@
node->crm_node_intf.get_dev_info = __cam_node_crm_get_dev_info;
node->crm_node_intf.link_setup = __cam_node_crm_link_setup;
node->crm_node_intf.flush_req = __cam_node_crm_flush_req;
+ node->crm_node_intf.process_evt = __cam_node_crm_process_evt;
mutex_init(&node->list_mutex);
INIT_LIST_HEAD(&node->free_ctx_list);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 00ead5d..b04bc23 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,12 +16,19 @@
#include <linux/msm-bus.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "cam_cpas_hw.h"
#include "cam_cpas_hw_intf.h"
#include "cam_cpas_soc.h"
-#define CAM_CPAS_AXI_MIN_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_MNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_MNOC_IB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000L)
+
+static uint cam_min_camnoc_ib_bw;
+module_param(cam_min_camnoc_ib_bw, uint, 0644);
int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
@@ -84,11 +91,19 @@
}
static int cam_cpas_util_vote_bus_client_bw(
- struct cam_cpas_bus_client *bus_client, uint64_t ab, uint64_t ib)
+ struct cam_cpas_bus_client *bus_client, uint64_t ab, uint64_t ib,
+ bool camnoc_bw)
{
struct msm_bus_paths *path;
struct msm_bus_scale_pdata *pdata;
int idx = 0;
+ uint64_t min_camnoc_ib_bw = CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+
+ if (cam_min_camnoc_ib_bw > 0)
+ min_camnoc_ib_bw = (uint64_t)cam_min_camnoc_ib_bw * 1000000L;
+
+ CAM_DBG(CAM_CPAS, "cam_min_camnoc_ib_bw = %d, min_camnoc_ib_bw=%llu",
+ cam_min_camnoc_ib_bw, min_camnoc_ib_bw);
if (!bus_client->valid) {
CAM_ERR(CAM_CPAS, "bus client not valid");
@@ -118,11 +133,19 @@
bus_client->curr_vote_level = idx;
mutex_unlock(&bus_client->lock);
- if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_BW))
- ab = CAM_CPAS_AXI_MIN_BW;
+ if (camnoc_bw == true) {
+ if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_CAMNOC_AB_BW))
+ ab = CAM_CPAS_AXI_MIN_CAMNOC_AB_BW;
- if ((ib > 0) && (ib < CAM_CPAS_AXI_MIN_BW))
- ib = CAM_CPAS_AXI_MIN_BW;
+ if ((ib > 0) && (ib < min_camnoc_ib_bw))
+ ib = min_camnoc_ib_bw;
+ } else {
+ if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_MNOC_AB_BW))
+ ab = CAM_CPAS_AXI_MIN_MNOC_AB_BW;
+
+ if ((ib > 0) && (ib < CAM_CPAS_AXI_MIN_MNOC_IB_BW))
+ ib = CAM_CPAS_AXI_MIN_MNOC_IB_BW;
+ }
pdata = bus_client->pdata;
path = &(pdata->usecase[idx]);
@@ -205,7 +228,7 @@
return -EINVAL;
if (bus_client->dyn_vote)
- cam_cpas_util_vote_bus_client_bw(bus_client, 0, 0);
+ cam_cpas_util_vote_bus_client_bw(bus_client, 0, 0, false);
else
cam_cpas_util_vote_bus_client_level(bus_client, 0);
@@ -370,7 +393,7 @@
list_for_each_entry_safe(curr_port, temp_port,
&cpas_core->axi_ports_list_head, sibling_port) {
rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
- mnoc_bw, mnoc_bw);
+ mnoc_bw, mnoc_bw, false);
if (rc) {
CAM_ERR(CAM_CPAS,
"Failed in mnoc vote, enable=%d, rc=%d",
@@ -380,13 +403,13 @@
if (soc_private->axi_camnoc_based) {
cam_cpas_util_vote_bus_client_bw(
- &curr_port->camnoc_bus, 0, camnoc_bw);
+ &curr_port->camnoc_bus, 0, camnoc_bw, true);
if (rc) {
CAM_ERR(CAM_CPAS,
"Failed in mnoc vote, enable=%d, %d",
enable, rc);
cam_cpas_util_vote_bus_client_bw(
- &curr_port->mnoc_bus, 0, 0);
+ &curr_port->mnoc_bus, 0, 0, false);
goto remove_ahb_vote;
}
}
@@ -571,7 +594,7 @@
camnoc_bw, mnoc_bw);
rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
- mnoc_bw, mnoc_bw);
+ mnoc_bw, mnoc_bw, false);
if (rc) {
CAM_ERR(CAM_CPAS,
"Failed in mnoc vote ab[%llu] ib[%llu] rc=%d",
@@ -581,7 +604,7 @@
if (soc_private->axi_camnoc_based) {
rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
- 0, camnoc_bw);
+ 0, camnoc_bw, true);
if (rc) {
CAM_ERR(CAM_CPAS,
"Failed camnoc vote ab[%llu] ib[%llu] rc=%d",
@@ -662,9 +685,10 @@
opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
if (IS_ERR(opp)) {
- CAM_ERR(CAM_CPAS, "Error on OPP freq :%ld, %pK",
+ CAM_DBG(CAM_CPAS, "OPP Ceil not available for freq :%ld, %pK",
corner_freq, opp);
- return -EINVAL;
+ *req_level = CAM_TURBO_VOTE;
+ return 0;
}
corner = dev_pm_opp_get_voltage(opp);
@@ -879,9 +903,11 @@
goto done;
if (cpas_core->streamon_clients == 0) {
+ atomic_set(&cpas_core->irq_count, 1);
rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info,
applied_level);
if (rc) {
+ atomic_set(&cpas_core->irq_count, 0);
CAM_ERR(CAM_CPAS, "enable_resorce failed, rc=%d", rc);
goto done;
}
@@ -889,14 +915,17 @@
if (cpas_core->internal_ops.power_on) {
rc = cpas_core->internal_ops.power_on(cpas_hw);
if (rc) {
+ atomic_set(&cpas_core->irq_count, 0);
cam_cpas_soc_disable_resources(
- &cpas_hw->soc_info);
+ &cpas_hw->soc_info, true, true);
CAM_ERR(CAM_CPAS,
"failed in power_on settings rc=%d",
rc);
goto done;
}
}
+ CAM_DBG(CAM_CPAS, "irq_count=%d\n",
+ atomic_read(&cpas_core->irq_count));
cpas_hw->hw_state = CAM_HW_STATE_POWER_UP;
}
@@ -911,6 +940,10 @@
return rc;
}
+static int _check_irq_count(struct cam_cpas *cpas_core)
+{
+ return (atomic_read(&cpas_core->irq_count) > 0) ? 0 : 1;
+}
static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
uint32_t arg_size)
@@ -923,6 +956,7 @@
struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote;
int rc = 0;
+ long result;
if (!hw_priv || !stop_args) {
CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
@@ -971,11 +1005,29 @@
}
}
- rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+ rc = cam_cpas_soc_disable_irq(&cpas_hw->soc_info);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "disable_irq failed, rc=%d", rc);
+ goto done;
+ }
+
+ /* Wait for any IRQs still being handled */
+ atomic_dec(&cpas_core->irq_count);
+ result = wait_event_timeout(cpas_core->irq_count_wq,
+ _check_irq_count(cpas_core), HZ);
+ if (result == 0) {
+ CAM_ERR(CAM_CPAS, "Wait failed: irq_count=%d",
+ atomic_read(&cpas_core->irq_count));
+ }
+
+ rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info,
+ true, false);
if (rc) {
CAM_ERR(CAM_CPAS, "disable_resorce failed, rc=%d", rc);
goto done;
}
+ CAM_DBG(CAM_CPAS, "Disabled all the resources: irq_count=%d\n",
+ atomic_read(&cpas_core->irq_count));
cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
}
@@ -1426,6 +1478,8 @@
soc_private = (struct cam_cpas_private_soc *)
cpas_hw->soc_info.soc_private;
cpas_core->num_clients = soc_private->num_clients;
+ atomic_set(&cpas_core->irq_count, 0);
+ init_waitqueue_head(&cpas_core->irq_count_wq);
if (internal_ops->setup_regbase) {
rc = internal_ops->setup_regbase(&cpas_hw->soc_info,
@@ -1481,7 +1535,7 @@
if (rc)
goto disable_soc_res;
- rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+ rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true);
if (rc) {
CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc);
goto remove_default_vote;
@@ -1499,7 +1553,7 @@
return 0;
disable_soc_res:
- cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+ cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true);
remove_default_vote:
cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
axi_cleanup:
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index aa3663d..05840bb 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -187,6 +187,8 @@
struct list_head axi_ports_list_head;
struct cam_cpas_internal_ops internal_ops;
struct workqueue_struct *work_queue;
+ atomic_t irq_count;
+ wait_queue_head_t irq_count_wq;
};
int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index d5108f6..0187a64 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -366,7 +366,6 @@
switch (cmd->op_code) {
case CAM_QUERY_CAP: {
struct cam_cpas_query_cap query;
- uint32_t cam_cpas;
rc = copy_from_user(&query, (void __user *) cmd->handle,
sizeof(query));
@@ -377,7 +376,8 @@
}
rc = cam_cpas_get_hw_info(&query.camera_family,
- &query.camera_version, &query.cpas_version, &cam_cpas);
+ &query.camera_version, &query.cpas_version,
+ &query.reserved);
if (rc)
break;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index f85f461..b18af0a 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -209,13 +209,26 @@
return rc;
}
-int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info)
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clocks, bool disble_irq)
{
int rc = 0;
- rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ rc = cam_soc_util_disable_platform_resource(soc_info,
+ disable_clocks, disble_irq);
if (rc)
CAM_ERR(CAM_CPAS, "disable platform failed, rc=%d", rc);
return rc;
}
+
+int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_irq_disable(soc_info);
+ if (rc)
+ CAM_ERR(CAM_CPAS, "disable irq failed, rc=%d", rc);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index d4fc039..fe0187e 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -61,5 +61,7 @@
int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
enum cam_vote_level default_level);
-int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clocks, bool disble_irq);
+int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info);
#endif /* _CAM_CPAS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index 0e5ce85..0533ed8 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -354,6 +354,11 @@
cpas_core = (struct cam_cpas *) cpas_hw->core_info;
soc_info = &cpas_hw->soc_info;
+ if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
+ CAM_ERR(CAM_CPAS, "CPAS off");
+ return;
+ }
+
for (i = 0; i < camnoc_info->irq_err_size; i++) {
if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
(camnoc_info->irq_err[i].enable)) {
@@ -398,6 +403,9 @@
~camnoc_info->irq_err[i].sbm_port;
}
}
+ atomic_dec(&cpas_core->irq_count);
+ wake_up(&cpas_core->irq_count_wq);
+ CAM_DBG(CAM_CPAS, "irq_count=%d\n", atomic_read(&cpas_core->irq_count));
if (payload->irq_status)
CAM_ERR(CAM_CPAS, "IRQ not handled irq_status=0x%x",
@@ -414,9 +422,14 @@
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
struct cam_cpas_work_payload *payload;
+ if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
+ CAM_ERR(CAM_CPAS, "CPAS off");
+ return IRQ_HANDLED;
+ }
+
payload = kzalloc(sizeof(struct cam_cpas_work_payload), GFP_ATOMIC);
if (!payload)
- return IRQ_HANDLED;
+ goto done;
payload->irq_status = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
@@ -433,6 +446,9 @@
cam_cpastop_reset_irq(cpas_hw);
queue_work(cpas_core->work_queue, &payload->work);
+done:
+ atomic_dec(&cpas_core->irq_count);
+ wake_up(&cpas_core->irq_count_wq);
return IRQ_HANDLED;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index b9b59a1..178e734 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -110,7 +110,7 @@
/**
* cam_hfi_deinit() - cleanup HFI
*/
-void cam_hfi_deinit(void);
+void cam_hfi_deinit(void __iomem *icp_base);
/**
* hfi_set_debug_level() - set debug level
* @lvl: FW debug message level
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
index 6909972..73663b3 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -74,6 +74,7 @@
#define ICP_SHARED_MEM_IN_BYTES (1024 * 1024)
#define ICP_UNCACHED_HEAP_SIZE_IN_BYTES (2 * 1024 * 1024)
#define ICP_HFI_MAX_PKT_SIZE_IN_WORDS 25600
+#define ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS 256
#define ICP_HFI_QTBL_HOSTID1 0x01000000
#define ICP_HFI_QTBL_STATUS_ENABLED 0x00000001
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
index 837efec..7b2cb8b 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,30 @@
#define HFI_IPEBPS_HANDLE_TYPE_IPE_NON_RT 0x3
/**
+ * struct abort_data
+ * @num_req_ids: Number of req ids
+ * @num_req_id: point to specific req id
+ *
+ * create abort data
+ */
+struct abort_data {
+ uint32_t num_req_ids;
+ uint32_t num_req_id[1];
+};
+
+/**
+ * struct hfi_cmd_data
+ * @abort: abort data
+ * @user data: user supplied argument
+ *
+ * create session abort data
+ */
+struct hfi_cmd_abort {
+ struct abort_data abort;
+ uint64_t user_data;
+} __packed;
+
+/**
* struct hfi_cmd_abort_destroy
* @user_data: user supplied data
*
@@ -197,6 +221,7 @@
} __packed;
#define MAX_NUM_OF_IMAGE_PLANES 2
+#define MAX_HFR_GROUP 16
enum hfi_ipe_io_images {
IPE_INPUT_IMAGE_FULL,
@@ -220,6 +245,40 @@
IPE_IO_IMAGES_MAX
};
+enum bps_io_images {
+ BPS_INPUT_IMAGE,
+ BPS_OUTPUT_IMAGE_FULL,
+ BPS_OUTPUT_IMAGE_DS4,
+ BPS_OUTPUT_IMAGE_DS16,
+ BPS_OUTPUT_IMAGE_DS64,
+ BPS_OUTPUT_IMAGE_STATS_BG,
+ BPS_OUTPUT_IMAGE_STATS_BHIST,
+ BPS_OUTPUT_IMAGE_REG1,
+ BPS_OUTPUT_IMAGE_REG2,
+ BPS_OUTPUT_IMAGE_FIRST = BPS_OUTPUT_IMAGE_FULL,
+ BPS_OUTPUT_IMAGE_LAST = BPS_OUTPUT_IMAGE_REG2,
+ BPS_IO_IMAGES_MAX
+};
+
+struct frame_buffer {
+ uint32_t buffer_ptr[MAX_NUM_OF_IMAGE_PLANES];
+ uint32_t meta_buffer_ptr[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+struct bps_frame_process_data {
+ struct frame_buffer buffers[BPS_IO_IMAGES_MAX];
+ uint32_t max_num_cores;
+ uint32_t target_time;
+ uint32_t ubwc_stats_buffer_addr;
+ uint32_t ubwc_stats_buffer_size;
+ uint32_t cdm_buffer_addr;
+ uint32_t cdm_buffer_size;
+ uint32_t iq_settings_addr;
+ uint32_t strip_lib_out_addr;
+ uint32_t cdm_prog_addr;
+ uint32_t request_id;
+};
+
enum hfi_ipe_image_format {
IMAGE_FORMAT_INVALID,
IMAGE_FORMAT_MIPI_8,
@@ -361,6 +420,49 @@
struct buffer_layout meta_buf_layout[MAX_NUM_OF_IMAGE_PLANES];
} __packed;
+struct ica_stab_coeff {
+ uint32_t coeffs[8];
+} __packed;
+
+struct ica_stab_params {
+ uint32_t mode;
+ struct ica_stab_coeff transforms[3];
+} __packed;
+
+struct frame_set {
+ struct frame_buffer buffers[IPE_IO_IMAGES_MAX];
+ struct ica_stab_params ica_params;
+ uint32_t cdm_ica1_addr;
+ uint32_t cdm_ica2_addr;
+} __packed;
+
+struct ipe_frame_process_data {
+ uint32_t strip_lib_out_addr;
+ uint32_t iq_settings_addr;
+ uint32_t scratch_buffer_addr;
+ uint32_t scratch_buffer_size;
+ uint32_t ubwc_stats_buffer_addr;
+ uint32_t ubwc_stats_buffer_size;
+ uint32_t cdm_buffer_addr;
+ uint32_t cdm_buffer_size;
+ uint32_t max_num_cores;
+ uint32_t target_time;
+ uint32_t cdm_prog_base;
+ uint32_t cdm_pre_ltm;
+ uint32_t cdm_post_ltm;
+ uint32_t cdm_anr_full_pass;
+ uint32_t cdm_anr_ds4;
+ uint32_t cdm_anr_ds16;
+ uint32_t cdm_anr_ds64;
+ uint32_t cdm_tf_full_pass;
+ uint32_t cdm_tf_ds4;
+ uint32_t cdm_tf_ds16;
+ uint32_t cdm_tf_ds64;
+ uint32_t request_id;
+ uint32_t frames_in_batch;
+ struct frame_set framesets[MAX_HFR_GROUP];
+} __packed;
+
/**
* struct hfi_cmd_ipe_config
* @images: images descreptions
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index eca16d6..f95f8eb 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -124,7 +124,7 @@
* firmware to process
*/
wmb();
- cam_io_w((uint32_t)INTR_ENABLE,
+ cam_io_w_mb((uint32_t)INTR_ENABLE,
g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
err:
mutex_unlock(&hfi_cmd_q_mutex);
@@ -138,6 +138,7 @@
struct hfi_q_hdr *q;
uint32_t new_read_idx, size_in_words, word_diff, temp;
uint32_t *read_q, *read_ptr, *write_ptr;
+ uint32_t size_upper_bound = 0;
int rc = 0;
if (!pmsg) {
@@ -175,10 +176,13 @@
goto err;
}
- if (q_id == Q_MSG)
+ if (q_id == Q_MSG) {
read_q = (uint32_t *)g_hfi->map.msg_q.kva;
- else
+ size_upper_bound = ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS;
+ } else {
read_q = (uint32_t *)g_hfi->map.dbg_q.kva;
+ size_upper_bound = ICP_HFI_MAX_PKT_SIZE_IN_WORDS;
+ }
read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
write_ptr = (uint32_t *)(read_q + q->qhdr_write_idx);
@@ -196,7 +200,7 @@
}
if ((size_in_words == 0) ||
- (size_in_words > ICP_HFI_MAX_PKT_SIZE_IN_WORDS)) {
+ (size_in_words > size_upper_bound)) {
CAM_ERR(CAM_HFI, "invalid HFI message packet size - 0x%08x",
size_in_words << BYTE_WORD_SHIFT);
q->qhdr_read_idx = q->qhdr_write_idx;
@@ -218,6 +222,10 @@
q->qhdr_read_idx = new_read_idx;
*words_read = size_in_words;
+ /* Memory Barrier to make sure message
+ * queue parameters are updated after read
+ */
+ wmb();
err:
mutex_unlock(&hfi_msg_q_mutex);
return rc;
@@ -441,17 +449,17 @@
val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
- cam_io_w(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET);
- cam_io_w(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+ cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
}
void cam_hfi_enable_cpu(void __iomem *icp_base)
{
- cam_io_w((uint32_t)ICP_FLAG_CSR_A5_EN,
+ cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
- cam_io_w((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+ cam_io_w_mb((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
}
int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
@@ -460,23 +468,11 @@
int rc = 0;
uint32_t data;
uint32_t fw_version, status = 0;
+ uint32_t retry_cnt = 0;
cam_hfi_enable_cpu(icp_base);
g_hfi->csr_base = icp_base;
- rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
- status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
-
- if (rc) {
- CAM_ERR(CAM_HFI, "timed out , status = %u", status);
- return -EINVAL;
- }
-
- fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
- CAM_DBG(CAM_HFI, "fw version : [%x]", fw_version);
-
- cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
-
if (debug) {
cam_io_w_mb(ICP_FLAG_A5_CTRL_DBG_EN,
(icp_base + HFI_REG_A5_CSR_A5_CONTROL));
@@ -495,20 +491,54 @@
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
}
+ while (retry_cnt < HFI_MAX_POLL_TRY) {
+ readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+ status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);
+
+ CAM_DBG(CAM_HFI, "1: status = %u", status);
+ status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
+ CAM_DBG(CAM_HFI, "2: status = %u", status);
+ if (status == ICP_INIT_RESP_SUCCESS)
+ break;
+
+ if (status == ICP_INIT_RESP_FAILED) {
+ CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
+ status);
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+ return -EINVAL;
+ }
+ retry_cnt++;
+ }
+
+ if ((retry_cnt == HFI_MAX_POLL_TRY) &&
+ (status == ICP_INIT_RESP_RESET)) {
+ CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
+ status);
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+ return -EINVAL;
+ }
+
+ cam_io_w_mb((uint32_t)INTR_ENABLE,
+ icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_DBG(CAM_HFI, "fw version : [%x]", fw_version);
+
data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
CAM_DBG(CAM_HFI, "wfi status = %x", (int)data);
- cam_io_w((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
- cam_io_w((uint32_t)hfi_mem->shmem.iova,
+ cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
+ cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
icp_base + HFI_REG_SHARED_MEM_PTR);
- cam_io_w((uint32_t)hfi_mem->shmem.len,
+ cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
icp_base + HFI_REG_SHARED_MEM_SIZE);
- cam_io_w((uint32_t)hfi_mem->sec_heap.iova,
+ cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
icp_base + HFI_REG_UNCACHED_HEAP_PTR);
- cam_io_w((uint32_t)hfi_mem->sec_heap.len,
+ cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
- cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
return rc;
}
@@ -520,6 +550,7 @@
struct hfi_qtbl_hdr *qtbl_hdr;
struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
uint32_t hw_version, soc_version, fw_version, status = 0;
+ uint32_t retry_cnt = 0;
mutex_lock(&hfi_cmd_q_mutex);
mutex_lock(&hfi_msg_q_mutex);
@@ -556,7 +587,7 @@
* disabling the clock gating on both V1 and V2 until the
* hardware team root causes this
*/
- cam_io_w((uint32_t)ICP_FLAG_CSR_A5_EN |
+ cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN |
ICP_FLAG_CSR_WAKE_UP_EN |
ICP_CSR_EN_CLKGATE_WFI,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
@@ -673,24 +704,48 @@
break;
}
- cam_io_w((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
- cam_io_w((uint32_t)hfi_mem->shmem.iova,
+ cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
+ cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
icp_base + HFI_REG_SHARED_MEM_PTR);
- cam_io_w((uint32_t)hfi_mem->shmem.len,
+ cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
icp_base + HFI_REG_SHARED_MEM_SIZE);
- cam_io_w((uint32_t)hfi_mem->sec_heap.iova,
+ cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
icp_base + HFI_REG_UNCACHED_HEAP_PTR);
- cam_io_w((uint32_t)hfi_mem->sec_heap.len,
+ cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
- cam_io_w((uint32_t)ICP_INIT_REQUEST_SET,
+ cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_SET,
icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
- rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
- status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
- if (rc) {
- CAM_ERR(CAM_HFI, "timed out , status = %u", status);
+ while (retry_cnt < HFI_MAX_POLL_TRY) {
+ readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+ status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);
+
+ CAM_DBG(CAM_HFI, "1: status = %u rc = %d", status, rc);
+ status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
+ CAM_DBG(CAM_HFI, "2: status = %u rc = %d", status, rc);
+ if (status == ICP_INIT_RESP_SUCCESS)
+ break;
+
+ if (status == ICP_INIT_RESP_FAILED) {
+ CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
+ status);
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+ goto regions_fail;
+ }
+ retry_cnt++;
+ }
+
+ if ((retry_cnt == HFI_MAX_POLL_TRY) &&
+ (status == ICP_INIT_RESP_RESET)) {
+ CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
+ status);
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_ERR(CAM_HFI,
+ "hw version : : [%x], fw version : [%x]",
+ hw_version, fw_version);
goto regions_fail;
}
@@ -702,7 +757,8 @@
g_hfi->hfi_state = HFI_READY;
g_hfi->cmd_q_state = true;
g_hfi->msg_q_state = true;
- cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+ cam_io_w_mb((uint32_t)INTR_ENABLE,
+ icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
mutex_unlock(&hfi_cmd_q_mutex);
mutex_unlock(&hfi_msg_q_mutex);
@@ -710,14 +766,14 @@
return rc;
regions_fail:
kfree(g_hfi);
+ g_hfi = NULL;
alloc_fail:
mutex_unlock(&hfi_cmd_q_mutex);
mutex_unlock(&hfi_msg_q_mutex);
return rc;
}
-
-void cam_hfi_deinit(void)
+void cam_hfi_deinit(void __iomem *icp_base)
{
mutex_lock(&hfi_cmd_q_mutex);
mutex_lock(&hfi_msg_q_mutex);
@@ -730,7 +786,10 @@
g_hfi->cmd_q_state = false;
g_hfi->msg_q_state = false;
- cam_io_w((uint32_t)INTR_DISABLE,
+ cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
+ icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
+
+ cam_io_w_mb((uint32_t)INTR_DISABLE,
g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN);
kzfree(g_hfi);
g_hfi = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index 25e1ce7..5bd7f1c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -276,22 +276,29 @@
rc = cam_bps_handle_resume(bps_dev);
break;
case CAM_ICP_BPS_CMD_UPDATE_CLK: {
- uint32_t clk_rate = *(uint32_t *)cmd_args;
+ struct cam_a5_clk_update_cmd *clk_upd_cmd =
+ (struct cam_a5_clk_update_cmd *)cmd_args;
+ uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
CAM_DBG(CAM_ICP, "bps_src_clk rate = %d", (int)clk_rate);
+
if (!core_info->clk_enable) {
- cam_bps_handle_pc(bps_dev);
- cam_cpas_reg_write(core_info->cpas_handle,
- CAM_CPAS_REG_CPASTOP,
- hw_info->pwr_ctrl, true, 0x0);
+ if (clk_upd_cmd->ipe_bps_pc_enable) {
+ cam_bps_handle_pc(bps_dev);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0x0);
+ }
rc = cam_bps_toggle_clk(soc_info, true);
if (rc)
CAM_ERR(CAM_ICP, "Enable failed");
else
core_info->clk_enable = true;
- rc = cam_bps_handle_resume(bps_dev);
- if (rc)
- CAM_ERR(CAM_ICP, "handle resume failed");
+ if (clk_upd_cmd->ipe_bps_pc_enable) {
+ rc = cam_bps_handle_resume(bps_dev);
+ if (rc)
+ CAM_ERR(CAM_ICP, "BPS resume failed");
+ }
}
CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
rc = cam_bps_update_clk_rate(soc_info, clk_rate);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index b7b636c..d2314c4 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -138,9 +138,22 @@
int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
uint32_t clk_rate)
{
+ int32_t src_clk_idx;
+
if (!soc_info)
return -EINVAL;
+ src_clk_idx = soc_info->src_clk_idx;
+
+ if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+ (soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+ (clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+ CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
+ clk_rate,
+ soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+ clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+ }
+
return cam_soc_util_set_clk_rate(soc_info->clk[soc_info->src_clk_idx],
soc_info->clk_name[soc_info->src_clk_idx], clk_rate);
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index f44fcc0..5dfb1bc 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
@@ -116,7 +117,12 @@
if (ctx_data->clk_info.clk_rate[i] >= base_clk)
return i;
- return 0;
+ /*
+ * Caller has to ensure returned index is within array
+ * size bounds while accessing that index.
+ */
+
+ return i;
}
static bool cam_icp_is_over_clk(struct cam_icp_hw_mgr *hw_mgr,
@@ -132,7 +138,7 @@
curr_clk_idx = cam_icp_get_actual_clk_rate_idx(ctx_data,
hw_mgr_clk_info->curr_clk);
- CAM_DBG(CAM_ICP, "bc_idx = %d cc_idx = %d %lld %lld",
+ CAM_DBG(CAM_ICP, "bc_idx = %d cc_idx = %d %d %d",
base_clk_idx, curr_clk_idx, hw_mgr_clk_info->base_clk,
hw_mgr_clk_info->curr_clk);
@@ -192,9 +198,9 @@
struct cam_hw_info *dev = NULL;
if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
- dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+ dev_intf = hw_mgr->bps_dev_intf;
else
- dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+ dev_intf = hw_mgr->ipe0_dev_intf;
if (!dev_intf) {
CAM_ERR(CAM_ICP, "dev_intf is invalid");
@@ -247,16 +253,16 @@
(struct cam_icp_clk_info *)task_data->data;
uint32_t id;
uint32_t i;
- uint32_t curr_clk_rate;
struct cam_icp_hw_ctx_data *ctx_data;
struct cam_hw_intf *ipe0_dev_intf = NULL;
struct cam_hw_intf *ipe1_dev_intf = NULL;
struct cam_hw_intf *bps_dev_intf = NULL;
struct cam_hw_intf *dev_intf = NULL;
+ struct cam_a5_clk_update_cmd clk_upd_cmd;
- ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
clk_info->base_clk = 0;
clk_info->curr_clk = 0;
@@ -290,19 +296,135 @@
CAM_DBG(CAM_ICP, "Disable %d", clk_info->hw_type);
+ clk_upd_cmd.ipe_bps_pc_enable = icp_hw_mgr.ipe_bps_pc_flag;
+
dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
- &curr_clk_rate, sizeof(curr_clk_rate));
+ &clk_upd_cmd, sizeof(struct cam_a5_clk_update_cmd));
if (clk_info->hw_type != ICP_CLK_HW_BPS)
if (ipe1_dev_intf)
ipe1_dev_intf->hw_ops.process_cmd(
ipe1_dev_intf->hw_priv, id,
- &curr_clk_rate, sizeof(curr_clk_rate));
+ &clk_upd_cmd,
+ sizeof(struct cam_a5_clk_update_cmd));
return 0;
}
-static void cam_icp_timer_cb(unsigned long data)
+static int32_t cam_icp_ctx_timer(void *priv, void *data)
+{
+ struct clk_work_data *task_data = (struct clk_work_data *)data;
+ struct cam_icp_hw_ctx_data *ctx_data =
+ (struct cam_icp_hw_ctx_data *)task_data->data;
+ struct cam_icp_hw_mgr *hw_mgr = &icp_hw_mgr;
+ uint32_t id;
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+ struct cam_hw_intf *dev_intf = NULL;
+ struct cam_icp_clk_info *clk_info;
+ struct cam_icp_cpas_vote clk_update;
+
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "ctx_data is NULL, failed to update clk");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ if ((ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) ||
+ (ctx_data->watch_dog_reset_counter == 0)) {
+ CAM_DBG(CAM_ICP, "state %d, counter=%d",
+ ctx_data->state, ctx_data->watch_dog_reset_counter);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return 0;
+ }
+
+ CAM_DBG(CAM_ICP,
+ "E :ctx_id = %d ubw = %lld cbw = %lld curr_fc = %u bc = %u",
+ ctx_data->ctx_id,
+ ctx_data->clk_info.uncompressed_bw,
+ ctx_data->clk_info.compressed_bw,
+ ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk);
+
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+
+ if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return -EINVAL;
+ }
+
+ if (!ctx_data->icp_dev_acquire_info) {
+ CAM_WARN(CAM_ICP, "NULL acquire info");
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return -EINVAL;
+ }
+
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+ dev_intf = bps_dev_intf;
+ clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+ id = CAM_ICP_BPS_CMD_VOTE_CPAS;
+ } else {
+ dev_intf = ipe0_dev_intf;
+ clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+ id = CAM_ICP_IPE_CMD_VOTE_CPAS;
+ }
+
+ clk_info->compressed_bw -= ctx_data->clk_info.compressed_bw;
+ clk_info->uncompressed_bw -= ctx_data->clk_info.uncompressed_bw;
+ ctx_data->clk_info.uncompressed_bw = 0;
+ ctx_data->clk_info.compressed_bw = 0;
+ ctx_data->clk_info.curr_fc = 0;
+ ctx_data->clk_info.base_clk = 0;
+
+ clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC;
+ clk_update.ahb_vote.vote.freq = clk_info->curr_clk;
+ clk_update.ahb_vote_valid = true;
+ clk_update.axi_vote.compressed_bw = clk_info->compressed_bw;
+ clk_update.axi_vote.uncompressed_bw = clk_info->uncompressed_bw;
+ clk_update.axi_vote_valid = true;
+ dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+ &clk_update, sizeof(clk_update));
+
+ CAM_DBG(CAM_ICP,
+ "X :ctx_id = %d ubw = %lld cbw = %lld curr_fc = %u bc = %u",
+ ctx_data->ctx_id,
+ ctx_data->clk_info.uncompressed_bw,
+ ctx_data->clk_info.compressed_bw,
+ ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk);
+
+ mutex_unlock(&ctx_data->ctx_mutex);
+
+ return 0;
+}
+
+static void cam_icp_ctx_timer_cb(unsigned long data)
+{
+ unsigned long flags;
+ struct crm_workq_task *task;
+ struct clk_work_data *task_data;
+ struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+
+ spin_lock_irqsave(&icp_hw_mgr.hw_mgr_lock, flags);
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "no empty task");
+ spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+ return;
+ }
+
+ task_data = (struct clk_work_data *)task->payload;
+ task_data->data = timer->parent;
+ task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+ task->process_cb = cam_icp_ctx_timer;
+ cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+}
+
+static void cam_icp_device_timer_cb(unsigned long data)
{
unsigned long flags;
struct crm_workq_task *task;
@@ -339,13 +461,29 @@
hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
hw_mgr->clk_info[i].hw_type = i;
+ hw_mgr->clk_info[i].watch_dog_reset_counter = 0;
}
hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
return 0;
}
-static int cam_icp_timer_start(struct cam_icp_hw_mgr *hw_mgr)
+static int cam_icp_ctx_timer_start(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int rc = 0;
+
+ rc = crm_timer_init(&ctx_data->watch_dog,
+ 2000, ctx_data, &cam_icp_ctx_timer_cb);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to start timer");
+
+ ctx_data->watch_dog_reset_counter = 0;
+
+ CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+ return rc;
+}
+
+static int cam_icp_device_timer_start(struct cam_icp_hw_mgr *hw_mgr)
{
int rc = 0;
int i;
@@ -353,21 +491,70 @@
for (i = 0; i < ICP_CLK_HW_MAX; i++) {
if (!hw_mgr->clk_info[i].watch_dog) {
rc = crm_timer_init(&hw_mgr->clk_info[i].watch_dog,
- 3000, &hw_mgr->clk_info[i], &cam_icp_timer_cb);
+ 3000, &hw_mgr->clk_info[i],
+ &cam_icp_device_timer_cb);
+
if (rc)
CAM_ERR(CAM_ICP, "Failed to start timer %d", i);
+
+ hw_mgr->clk_info[i].watch_dog_reset_counter = 0;
}
}
return rc;
}
-static void cam_icp_timer_stop(struct cam_icp_hw_mgr *hw_mgr)
+static int cam_icp_ctx_timer_stop(struct cam_icp_hw_ctx_data *ctx_data)
{
- if (!hw_mgr->bps_ctxt_cnt)
+ if (ctx_data->watch_dog) {
+ CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+ ctx_data->watch_dog_reset_counter = 0;
+ crm_timer_exit(&ctx_data->watch_dog);
+ ctx_data->watch_dog = NULL;
+ }
+
+ return 0;
+}
+
+static void cam_icp_device_timer_stop(struct cam_icp_hw_mgr *hw_mgr)
+{
+ if (!hw_mgr->bps_ctxt_cnt &&
+ hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog) {
+ hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog_reset_counter = 0;
crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
- else if (!hw_mgr->ipe_ctxt_cnt)
+ hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog = NULL;
+ }
+
+ if (!hw_mgr->ipe_ctxt_cnt &&
+ hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog) {
+ hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog_reset_counter = 0;
crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
+ hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog = NULL;
+ }
+}
+
+static int cam_icp_ctx_timer_reset(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ if (ctx_data && ctx_data->watch_dog) {
+ ctx_data->watch_dog_reset_counter++;
+ CAM_DBG(CAM_ICP, "reset timer : ctx_id = %d, counter=%d",
+ ctx_data->ctx_id, ctx_data->watch_dog_reset_counter);
+ crm_timer_reset(ctx_data->watch_dog);
+ }
+
+ return 0;
+}
+
+static void cam_icp_device_timer_reset(struct cam_icp_hw_mgr *hw_mgr,
+ int device_index)
+{
+ if ((device_index >= ICP_CLK_HW_MAX) || (!hw_mgr))
+ return;
+
+ if (hw_mgr->clk_info[device_index].watch_dog) {
+ crm_timer_reset(hw_mgr->clk_info[device_index].watch_dog);
+ hw_mgr->clk_info[device_index].watch_dog_reset_counter++;
+ }
}
static uint32_t cam_icp_mgr_calc_base_clk(uint32_t frame_cycles,
@@ -417,7 +604,9 @@
for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
ctx_data = &hw_mgr->ctx_data[i];
if (ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED &&
- ctx_data->icp_dev_acquire_info->dev_type == dev_type)
+ ICP_DEV_TYPE_TO_CLK_TYPE(
+ ctx_data->icp_dev_acquire_info->dev_type) ==
+ ICP_DEV_TYPE_TO_CLK_TYPE(dev_type))
hw_mgr_clk_info->base_clk +=
ctx_data->clk_info.base_clk;
}
@@ -572,7 +761,8 @@
hw_mgr_clk_info->over_clked = 0;
rc = false;
} else if (hw_mgr_clk_info->curr_clk < hw_mgr_clk_info->base_clk) {
- hw_mgr_clk_info->curr_clk = hw_mgr_clk_info->base_clk;
+ hw_mgr_clk_info->curr_clk = cam_icp_get_actual_clk_rate(hw_mgr,
+ ctx_data, hw_mgr_clk_info->base_clk);
rc = true;
}
mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -630,7 +820,13 @@
* recalculate bandwidth of all contexts of same hardware and update
* voting of bandwidth
*/
- if (clk_info->uncompressed_bw == ctx_data->clk_info.uncompressed_bw)
+ CAM_DBG(CAM_ICP, "ubw ctx = %lld clk_info ubw = %lld busy = %d",
+ ctx_data->clk_info.uncompressed_bw,
+ clk_info->uncompressed_bw, busy);
+
+ if ((clk_info->uncompressed_bw == ctx_data->clk_info.uncompressed_bw) &&
+ (ctx_data->clk_info.uncompressed_bw ==
+ hw_mgr_clk_info->uncompressed_bw))
return false;
if (busy &&
@@ -644,13 +840,18 @@
for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
ctx = &hw_mgr->ctx_data[i];
if (ctx->state == CAM_ICP_CTX_STATE_ACQUIRED &&
- ctx->icp_dev_acquire_info->dev_type ==
- ctx_data->icp_dev_acquire_info->dev_type) {
+ ICP_DEV_TYPE_TO_CLK_TYPE(
+ ctx->icp_dev_acquire_info->dev_type) ==
+ ICP_DEV_TYPE_TO_CLK_TYPE(
+ ctx_data->icp_dev_acquire_info->dev_type)) {
mutex_lock(&hw_mgr->hw_mgr_mutex);
hw_mgr_clk_info->uncompressed_bw +=
ctx->clk_info.uncompressed_bw;
hw_mgr_clk_info->compressed_bw +=
ctx->clk_info.compressed_bw;
+ CAM_DBG(CAM_ICP, "ubw = %lld, cbw = %lld",
+ hw_mgr_clk_info->uncompressed_bw,
+ hw_mgr_clk_info->compressed_bw);
mutex_unlock(&hw_mgr->hw_mgr_mutex);
}
}
@@ -668,12 +869,13 @@
uint64_t req_id;
struct cam_icp_clk_info *hw_mgr_clk_info;
+ cam_icp_ctx_timer_reset(ctx_data);
if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
- crm_timer_reset(hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
+ cam_icp_device_timer_reset(hw_mgr, ICP_CLK_HW_BPS);
hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
CAM_DBG(CAM_ICP, "Reset bps timer");
} else {
- crm_timer_reset(hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
+ cam_icp_device_timer_reset(hw_mgr, ICP_CLK_HW_IPE);
hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
CAM_DBG(CAM_ICP, "Reset ipe timer");
}
@@ -747,6 +949,7 @@
struct cam_hw_intf *ipe1_dev_intf = NULL;
struct cam_hw_intf *bps_dev_intf = NULL;
struct cam_hw_intf *dev_intf = NULL;
+ struct cam_a5_clk_update_cmd clk_upd_cmd;
ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
@@ -768,14 +971,18 @@
id = CAM_ICP_IPE_CMD_UPDATE_CLK;
}
+ clk_upd_cmd.curr_clk_rate = curr_clk_rate;
+ clk_upd_cmd.ipe_bps_pc_enable = icp_hw_mgr.ipe_bps_pc_flag;
+
dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
- &curr_clk_rate, sizeof(curr_clk_rate));
+ &clk_upd_cmd, sizeof(struct cam_a5_clk_update_cmd));
if (ctx_data->icp_dev_acquire_info->dev_type != CAM_ICP_RES_TYPE_BPS)
if (ipe1_dev_intf)
ipe1_dev_intf->hw_ops.process_cmd(
ipe1_dev_intf->hw_priv, id,
- &curr_clk_rate, sizeof(curr_clk_rate));
+ &clk_upd_cmd,
+ sizeof(struct cam_a5_clk_update_cmd));
return 0;
}
@@ -819,11 +1026,13 @@
dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
&clk_update, sizeof(clk_update));
- if (ctx_data->icp_dev_acquire_info->dev_type != CAM_ICP_RES_TYPE_BPS)
- if (ipe1_dev_intf)
- ipe1_dev_intf->hw_ops.process_cmd(
- ipe1_dev_intf->hw_priv, id,
- &clk_update, sizeof(clk_update));
+ /*
+ * Consolidated bw needs to be voted on only one IPE client. Otherwise
+ * total bw that we vote at bus client would be doubled. So either
+ * remove voting on IPE1 or divide the vote for each IPE client
+ * and vote to cpas - cpas will add up and vote full bw to sf client
+ * anyway.
+ */
return 0;
}
@@ -862,8 +1071,12 @@
if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
if (hw_mgr->bps_ctxt_cnt++)
goto end;
- bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
- if (icp_hw_mgr.icp_pc_flag) {
+ if (!hw_mgr->bps_clk_state) {
+ bps_dev_intf->hw_ops.init(
+ bps_dev_intf->hw_priv, NULL, 0);
+ hw_mgr->bps_clk_state = true;
+ }
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
bps_dev_intf->hw_ops.process_cmd(
bps_dev_intf->hw_priv,
CAM_ICP_BPS_CMD_POWER_RESUME, NULL, 0);
@@ -872,34 +1085,40 @@
} else {
if (hw_mgr->ipe_ctxt_cnt++)
goto end;
-
- ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
- if (icp_hw_mgr.icp_pc_flag) {
+ if (!hw_mgr->ipe_clk_state)
+ ipe0_dev_intf->hw_ops.init(
+ ipe0_dev_intf->hw_priv, NULL, 0);
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
ipe0_dev_intf->hw_ops.process_cmd(
ipe0_dev_intf->hw_priv,
CAM_ICP_IPE_CMD_POWER_RESUME, NULL, 0);
}
- if ((icp_hw_mgr.ipe1_enable) && (ipe1_dev_intf)) {
+ if ((icp_hw_mgr.ipe1_enable) &&
+ (ipe1_dev_intf) &&
+ (!hw_mgr->ipe_clk_state)) {
ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
NULL, 0);
- if (icp_hw_mgr.icp_pc_flag) {
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
ipe1_dev_intf->hw_ops.process_cmd(
ipe1_dev_intf->hw_priv,
CAM_ICP_IPE_CMD_POWER_RESUME,
NULL, 0);
}
}
- if (icp_hw_mgr.icp_pc_flag) {
+ hw_mgr->ipe_clk_state = true;
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
hw_mgr->core_info = hw_mgr->core_info |
(ICP_PWR_CLP_IPE0 | ICP_PWR_CLP_IPE1);
}
}
CAM_DBG(CAM_ICP, "core_info %X", hw_mgr->core_info);
- if (icp_hw_mgr.icp_pc_flag)
+ if (icp_hw_mgr.ipe_bps_pc_flag)
rc = hfi_enable_ipe_bps_pc(true, hw_mgr->core_info);
+ else if (icp_hw_mgr.icp_pc_flag)
+ rc = hfi_enable_ipe_bps_pc(false, hw_mgr->core_info);
else
rc = hfi_enable_ipe_bps_pc(false, hw_mgr->core_info);
end:
@@ -936,7 +1155,7 @@
if (hw_mgr->bps_ctxt_cnt)
goto end;
- if (icp_hw_mgr.icp_pc_flag) {
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
rc = bps_dev_intf->hw_ops.process_cmd(
bps_dev_intf->hw_priv,
CAM_ICP_BPS_CMD_POWER_COLLAPSE,
@@ -945,7 +1164,11 @@
hw_mgr->core_info & (~ICP_PWR_CLP_BPS);
}
- bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+ if (hw_mgr->bps_clk_state) {
+ bps_dev_intf->hw_ops.deinit
+ (bps_dev_intf->hw_priv, NULL, 0);
+ hw_mgr->bps_clk_state = false;
+ }
} else {
CAM_DBG(CAM_ICP, "ipe ctx cnt %d", hw_mgr->ipe_ctxt_cnt);
if (ctx_data)
@@ -954,26 +1177,32 @@
if (hw_mgr->ipe_ctxt_cnt)
goto end;
- if (icp_hw_mgr.icp_pc_flag) {
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
rc = ipe0_dev_intf->hw_ops.process_cmd(
ipe0_dev_intf->hw_priv,
CAM_ICP_IPE_CMD_POWER_COLLAPSE, NULL, 0);
}
- ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+
+ if (hw_mgr->ipe_clk_state)
+ ipe0_dev_intf->hw_ops.deinit(
+ ipe0_dev_intf->hw_priv, NULL, 0);
if (ipe1_dev_intf) {
- if (icp_hw_mgr.icp_pc_flag) {
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
rc = ipe1_dev_intf->hw_ops.process_cmd(
ipe1_dev_intf->hw_priv,
CAM_ICP_IPE_CMD_POWER_COLLAPSE,
NULL, 0);
}
+ if (hw_mgr->ipe_clk_state)
ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
NULL, 0);
}
- if (icp_hw_mgr.icp_pc_flag) {
+
+ hw_mgr->ipe_clk_state = false;
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
hw_mgr->core_info = hw_mgr->core_info &
(~(ICP_PWR_CLP_IPE0 | ICP_PWR_CLP_IPE1));
}
@@ -1031,7 +1260,18 @@
rc = -ENOMEM;
goto err;
}
- icp_hw_mgr.icp_pc_flag = 1;
+ icp_hw_mgr.icp_pc_flag = false;
+
+ if (!debugfs_create_bool("ipe_bps_pc",
+ 0644,
+ icp_hw_mgr.dentry,
+ &icp_hw_mgr.ipe_bps_pc_flag)) {
+ CAM_ERR(CAM_ICP, "failed to create ipe_bps_pc entry");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ icp_hw_mgr.ipe_bps_pc_flag = false;
if (!debugfs_create_file("icp_debug_clk",
0644,
@@ -1112,7 +1352,7 @@
ctx_data->hfi_frame_process.in_resource[i]);
cam_sync_destroy(
ctx_data->hfi_frame_process.in_resource[i]);
- ctx_data->hfi_frame_process.in_resource[i] = 0;
+ ctx_data->hfi_frame_process.in_free_resource[i] = 0;
}
hfi_frame_process->fw_process_flag[i] = false;
clear_bit(i, ctx_data->hfi_frame_process.bitmap);
@@ -1122,7 +1362,7 @@
if (!hfi_frame_process->in_free_resource[i])
continue;
- CAM_INFO(CAM_ICP, "Delete merged sync in object: %d",
+ CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
ctx_data->hfi_frame_process.in_free_resource[i]);
cam_sync_destroy(
ctx_data->hfi_frame_process.in_free_resource[i]);
@@ -1155,15 +1395,14 @@
clk_type = ICP_DEV_TYPE_TO_CLK_TYPE(ctx_data->icp_dev_acquire_info->
dev_type);
- crm_timer_reset(icp_hw_mgr.clk_info[clk_type].watch_dog);
+ cam_icp_device_timer_reset(&icp_hw_mgr, clk_type);
mutex_lock(&ctx_data->ctx_mutex);
+ cam_icp_ctx_timer_reset(ctx_data);
if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+ CAM_DBG(CAM_ICP, "ctx %u is in %d state",
+ ctx_data->ctx_id, ctx_data->state);
mutex_unlock(&ctx_data->ctx_mutex);
- CAM_WARN(CAM_ICP,
- "ctx with id: %u not in the right state : %x",
- ctx_data->ctx_id,
- ctx_data->state);
return 0;
}
@@ -1284,6 +1523,7 @@
{
struct hfi_msg_create_handle_ack *create_handle_ack = NULL;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ int rc = 0;
create_handle_ack = (struct hfi_msg_create_handle_ack *)msg_ptr;
if (!create_handle_ack) {
@@ -1300,11 +1540,15 @@
if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE) {
ctx_data->fw_handle = create_handle_ack->fw_handle;
CAM_DBG(CAM_ICP, "fw_handle = %x", ctx_data->fw_handle);
- complete(&ctx_data->wait_complete);
- } else
- CAM_WARN(CAM_ICP, "Timeout failed to create fw handle");
-
- return 0;
+ } else {
+ CAM_WARN(CAM_ICP,
+ "This ctx is no longer in use current state: %d",
+ ctx_data->state);
+ ctx_data->fw_handle = 0;
+ rc = -EPERM;
+ }
+ complete(&ctx_data->wait_complete);
+ return rc;
}
static int cam_icp_mgr_process_msg_ping_ack(uint32_t *msg_ptr)
@@ -1368,21 +1612,38 @@
{
struct cam_icp_hw_ctx_data *ctx_data = NULL;
struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
int rc = 0;
+ a5_dev_intf = icp_hw_mgr.a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+ return -EINVAL;
+ }
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
switch (msg_ptr[ICP_PACKET_OPCODE]) {
- case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
- case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
- CAM_DBG(CAM_ICP, "received IPE/BPS_DESTROY/ABORT:");
+ ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+ ctx_data =
+ (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
+ complete(&ctx_data->wait_complete);
+ CAM_DBG(CAM_ICP, "received IPE/BPS/ ABORT: ctx_state =%d",
+ ctx_data->state);
+ break;
+ case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
+ case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
ctx_data =
(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
if ((ctx_data->state == CAM_ICP_CTX_STATE_RELEASE) ||
- (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE))
+ (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)) {
complete(&ctx_data->wait_complete);
-
+ }
+ CAM_DBG(CAM_ICP, "received IPE/BPS/ DESTROY: ctx_state =%d",
+ ctx_data->state);
break;
default:
CAM_ERR(CAM_ICP, "Invalid opcode : %u",
@@ -1390,7 +1651,6 @@
rc = -EINVAL;
break;
}
-
return rc;
}
@@ -1791,12 +2051,20 @@
return 0;
}
- if (ipe1_dev_intf) {
+ if (ipe1_dev_intf && hw_mgr->ipe_clk_state) {
ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
NULL, 0);
}
- ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
- bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+
+ if (hw_mgr->ipe_clk_state)
+ ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ if (hw_mgr->bps_clk_state)
+ bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+
+
+ hw_mgr->bps_clk_state = false;
+ hw_mgr->ipe_clk_state = false;
+
return 0;
}
static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr)
@@ -1814,9 +2082,15 @@
}
a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
- rc = cam_icp_mgr_send_pc_prep(hw_mgr);
-
- cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+ if (!hw_mgr->icp_pc_flag) {
+ cam_hfi_disable_cpu(
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+ rc = cam_icp_mgr_hw_close(hw_mgr, NULL);
+ } else {
+ rc = cam_icp_mgr_send_pc_prep(hw_mgr);
+ cam_hfi_disable_cpu(
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+ }
a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
CAM_DBG(CAM_ICP, "EXIT");
@@ -1871,40 +2145,13 @@
hw_mgr->a5_jtag_debug);
}
-static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr)
-{
- int rc = 0;
- struct cam_hw_intf *a5_dev_intf = NULL;
-
- CAM_DBG(CAM_ICP, "Enter");
- a5_dev_intf = hw_mgr->a5_dev_intf;
-
- if (!a5_dev_intf) {
- CAM_ERR(CAM_ICP, "a5 dev intf is wrong");
- return -EINVAL;
- }
-
- rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
- if (rc)
- return -EINVAL;
-
- rc = cam_icp_mgr_hfi_resume(hw_mgr);
- if (rc)
- goto hfi_resume_failed;
-
- CAM_DBG(CAM_ICP, "Exit");
- return rc;
-hfi_resume_failed:
- cam_icp_mgr_icp_power_collapse(hw_mgr);
- return rc;
-}
static int cam_icp_mgr_abort_handle(
struct cam_icp_hw_ctx_data *ctx_data)
{
int rc = 0;
unsigned long rem_jiffies;
size_t packet_size;
- int timeout = 5000;
+ int timeout = 100;
struct hfi_cmd_work_data *task_data;
struct hfi_cmd_ipebps_async *abort_cmd;
struct crm_workq_task *task;
@@ -1915,9 +2162,10 @@
packet_size =
sizeof(struct hfi_cmd_ipebps_async) +
- sizeof(struct hfi_cmd_abort_destroy) -
+ sizeof(struct hfi_cmd_abort) -
sizeof(((struct hfi_cmd_ipebps_async *)0)->payload.direct);
abort_cmd = kzalloc(packet_size, GFP_KERNEL);
+ CAM_DBG(CAM_ICP, "abort pkt size = %d", (int) packet_size);
if (!abort_cmd) {
rc = -ENOMEM;
return rc;
@@ -1935,8 +2183,6 @@
abort_cmd->fw_handles[0] = ctx_data->fw_handle;
abort_cmd->user_data1 = (uint64_t)ctx_data;
abort_cmd->user_data2 = (uint64_t)0x0;
- memcpy(abort_cmd->payload.direct, &ctx_data->temp_payload,
- sizeof(uint64_t));
task_data = (struct hfi_cmd_work_data *)task->payload;
task_data->data = (void *)abort_cmd;
@@ -1967,7 +2213,7 @@
struct cam_icp_hw_ctx_data *ctx_data)
{
int rc = 0;
- int timeout = 5000;
+ int timeout = 100;
unsigned long rem_jiffies;
size_t packet_size;
struct hfi_cmd_work_data *task_data;
@@ -2021,7 +2267,10 @@
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- CAM_ERR(CAM_ICP, "FW response timeout: %d", rc);
+ CAM_ERR(CAM_ICP, "FW response timeout: %d for %u",
+ rc, ctx_data->ctx_id);
+ if (icp_hw_mgr.a5_debug_q)
+ cam_icp_mgr_process_dbg_buf();
}
kfree(destroy_cmd);
@@ -2050,6 +2299,8 @@
cam_icp_mgr_ipe_bps_power_collapse(hw_mgr,
&hw_mgr->ctx_data[ctx_id], 0);
hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_RELEASE;
+ CAM_DBG(CAM_ICP, "E: ctx_id = %d", ctx_id);
+ cam_icp_mgr_abort_handle(&hw_mgr->ctx_data[ctx_id]);
cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id]);
cam_icp_mgr_cleanup_ctx(&hw_mgr->ctx_data[ctx_id]);
@@ -2066,8 +2317,10 @@
kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_FREE;
+ cam_icp_ctx_timer_stop(&hw_mgr->ctx_data[ctx_id]);
mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+ CAM_DBG(CAM_ICP, "X: ctx_id = %d", ctx_id);
return 0;
}
@@ -2093,12 +2346,15 @@
ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+ hw_mgr->bps_clk_state = false;
+ hw_mgr->ipe_clk_state = false;
}
static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
{
struct cam_icp_hw_mgr *hw_mgr = hw_priv;
struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
struct cam_icp_a5_set_irq_cb irq_cb;
struct cam_icp_a5_set_fw_buf_info fw_buf_info;
int rc = 0;
@@ -2110,14 +2366,13 @@
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return 0;
}
-
a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_DBG(CAM_ICP, "a5_dev_intf is NULL");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return -EINVAL;
}
-
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
fw_buf_info.kva = 0;
fw_buf_info.iova = 0;
fw_buf_info.len = 0;
@@ -2128,9 +2383,8 @@
sizeof(fw_buf_info));
if (rc)
CAM_ERR(CAM_ICP, "nullify the fw buf failed");
-
- cam_hfi_deinit();
-
+ cam_hfi_deinit(
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
irq_cb.icp_hw_mgr_cb = NULL;
irq_cb.data = NULL;
rc = a5_dev_intf->hw_ops.process_cmd(
@@ -2144,7 +2398,7 @@
hw_mgr->fw_download = false;
hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
mutex_unlock(&hw_mgr->hw_mgr_mutex);
-
+ CAM_DBG(CAM_ICP, "Exit");
return rc;
}
@@ -2185,11 +2439,16 @@
goto ipe1_dev_init_failed;
}
+ hw_mgr->bps_clk_state = true;
+ hw_mgr->ipe_clk_state = true;
+
return rc;
ipe1_dev_init_failed:
ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ hw_mgr->ipe_clk_state = false;
ipe0_dev_init_failed:
bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+ hw_mgr->bps_clk_state = false;
bps_dev_init_failed:
a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
a5_dev_init_failed:
@@ -2321,11 +2580,50 @@
return rc;
}
+static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc = 0;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ bool downloadFromResume = true;
+
+ CAM_DBG(CAM_ICP, "Enter");
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5 dev intf is wrong");
+ return -EINVAL;
+ }
+
+ if (hw_mgr->fw_download == false) {
+ CAM_DBG(CAM_ICP, "Downloading FW");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ rc = cam_icp_mgr_hw_open(hw_mgr, &downloadFromResume);
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ CAM_DBG(CAM_ICP, "FW Download Done Exit");
+ return rc;
+ }
+
+ rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
+ if (rc)
+ return -EINVAL;
+
+ rc = cam_icp_mgr_hfi_resume(hw_mgr);
+ if (rc)
+ goto hfi_resume_failed;
+
+ CAM_DBG(CAM_ICP, "Exit");
+ return rc;
+hfi_resume_failed:
+ cam_icp_mgr_icp_power_collapse(hw_mgr);
+ return rc;
+}
+
static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args)
{
struct cam_hw_intf *a5_dev_intf = NULL;
struct cam_hw_info *a5_dev = NULL;
struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ bool icp_pc = false;
int rc = 0;
if (!hw_mgr) {
@@ -2348,58 +2646,64 @@
}
a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
rc = cam_icp_allocate_hfi_mem();
- if (rc) {
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ if (rc)
goto alloc_hfi_mem_failed;
- }
rc = cam_icp_mgr_device_init(hw_mgr);
- if (rc) {
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ if (rc)
goto dev_init_fail;
- }
rc = cam_icp_mgr_fw_download(hw_mgr);
- if (rc) {
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ if (rc)
goto fw_download_failed;
- }
rc = cam_icp_mgr_hfi_init(hw_mgr);
- if (rc) {
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ if (rc)
goto hfi_init_failed;
- }
rc = cam_icp_mgr_send_fw_init(hw_mgr);
- if (rc) {
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ if (rc)
goto fw_init_failed;
- }
hw_mgr->ctxt_cnt = 0;
+ hw_mgr->fw_download = true;
if (icp_hw_mgr.a5_debug_q)
hfi_set_debug_level(icp_hw_mgr.a5_dbg_lvl);
mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_INFO(CAM_ICP, "FW download done successfully");
+
+ rc = cam_ipe_bps_deint(hw_mgr);
+ if (download_fw_args)
+ icp_pc = *((bool *)download_fw_args);
+
+ if (download_fw_args && icp_pc == true && hw_mgr->icp_pc_flag) {
+ rc = cam_ipe_bps_deint(hw_mgr);
+ CAM_DBG(CAM_ICP, "deinit all clocks");
+ }
+
+ if (download_fw_args && icp_pc == true)
+ return rc;
rc = cam_ipe_bps_deint(hw_mgr);
rc = cam_icp_mgr_icp_power_collapse(hw_mgr);
+ CAM_DBG(CAM_ICP, "deinit all clocks at boot up");
- hw_mgr->fw_download = true;
- CAM_DBG(CAM_ICP, "FW download done successfully");
return rc;
fw_init_failed:
- cam_hfi_deinit();
+ cam_hfi_deinit(
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
hfi_init_failed:
- cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+ cam_hfi_disable_cpu(
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
fw_download_failed:
cam_icp_mgr_device_deinit(hw_mgr);
dev_init_fail:
cam_icp_free_hfi_mem();
alloc_hfi_mem_failed:
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
return rc;
}
@@ -2489,6 +2793,8 @@
rc = cam_icp_mgr_enqueue_config(hw_mgr, config_args);
if (rc)
goto config_err;
+ CAM_DBG(CAM_ICP, "req_id = %lld %u",
+ req_id, ctx_data->ctx_id);
mutex_unlock(&ctx_data->ctx_mutex);
return 0;
@@ -2539,14 +2845,17 @@
}
static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
- struct cam_packet *packet,
+ struct cam_packet *packet, struct cam_icp_hw_ctx_data *ctx_data,
uint32_t *fw_cmd_buf_iova_addr)
{
int rc = 0;
- int i;
+ int i, j, k;
uint64_t addr;
size_t len;
struct cam_cmd_buf_desc *cmd_desc = NULL;
+ uint64_t cpu_addr = 0;
+ struct ipe_frame_process_data *frame_process_data = NULL;
+ struct bps_frame_process_data *bps_frame_process_data = NULL;
cmd_desc = (struct cam_cmd_buf_desc *)
((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
@@ -2564,6 +2873,67 @@
*fw_cmd_buf_iova_addr = addr;
*fw_cmd_buf_iova_addr =
(*fw_cmd_buf_iova_addr + cmd_desc[i].offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ &cpu_addr, &len);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "get cmd buf failed %x",
+ hw_mgr->iommu_hdl);
+ *fw_cmd_buf_iova_addr = 0;
+ return rc;
+ }
+ cpu_addr = cpu_addr + cmd_desc[i].offset;
+ }
+ }
+
+ if (!cpu_addr) {
+ CAM_ERR(CAM_ICP, "Invalid cpu addr");
+ return -EINVAL;
+ }
+
+ if (ctx_data->icp_dev_acquire_info->dev_type !=
+ CAM_ICP_RES_TYPE_BPS) {
+ CAM_DBG(CAM_ICP, "cpu addr = %llx", cpu_addr);
+ frame_process_data = (struct ipe_frame_process_data *)cpu_addr;
+ CAM_DBG(CAM_ICP, "%u %u %u", frame_process_data->max_num_cores,
+ frame_process_data->target_time,
+ frame_process_data->frames_in_batch);
+ frame_process_data->strip_lib_out_addr = 0;
+ frame_process_data->iq_settings_addr = 0;
+ frame_process_data->scratch_buffer_addr = 0;
+ frame_process_data->ubwc_stats_buffer_addr = 0;
+ frame_process_data->cdm_buffer_addr = 0;
+ frame_process_data->cdm_prog_base = 0;
+ for (i = 0; i < frame_process_data->frames_in_batch; i++) {
+ for (j = 0; j < IPE_IO_IMAGES_MAX; j++) {
+ for (k = 0; k < MAX_NUM_OF_IMAGE_PLANES; k++) {
+ frame_process_data->
+ framesets[i].buffers[j].
+ buffer_ptr[k] = 0;
+ frame_process_data->
+ framesets[i].buffers[j].
+ meta_buffer_ptr[k] = 0;
+ }
+ }
+ }
+ } else {
+ CAM_DBG(CAM_ICP, "cpu addr = %llx", cpu_addr);
+ bps_frame_process_data =
+ (struct bps_frame_process_data *)cpu_addr;
+ CAM_DBG(CAM_ICP, "%u %u",
+ bps_frame_process_data->max_num_cores,
+ bps_frame_process_data->target_time);
+ bps_frame_process_data->ubwc_stats_buffer_addr = 0;
+ bps_frame_process_data->cdm_buffer_addr = 0;
+ bps_frame_process_data->iq_settings_addr = 0;
+ bps_frame_process_data->strip_lib_out_addr = 0;
+ bps_frame_process_data->cdm_prog_addr = 0;
+ for (i = 0; i < BPS_IO_IMAGES_MAX; i++) {
+ for (j = 0; j < MAX_NUM_OF_IMAGE_PLANES; j++) {
+ bps_frame_process_data->
+ buffers[i].buffer_ptr[j] = 0;
+ bps_frame_process_data->
+ buffers[i].meta_buffer_ptr[j] = 0;
+ }
}
}
@@ -2766,12 +3136,13 @@
}
rc = cam_icp_mgr_process_cmd_desc(hw_mgr, packet,
- &fw_cmd_buf_iova_addr);
+ ctx_data, &fw_cmd_buf_iova_addr);
if (rc) {
mutex_unlock(&ctx_data->ctx_mutex);
return rc;
}
+ CAM_DBG(CAM_ICP, "E: req id = %lld", packet->header.request_id);
/* Update Buffer Address from handles and patch information */
rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl,
hw_mgr->iommu_sec_hdl);
@@ -2809,6 +3180,8 @@
prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
prepare_args->priv = &ctx_data->hfi_frame_process.request_id[idx];
+ CAM_DBG(CAM_ICP, "X: req id = %lld ctx_id = %u",
+ packet->header.request_id, ctx_data->ctx_id);
mutex_unlock(&ctx_data->ctx_mutex);
return rc;
}
@@ -3025,6 +3398,7 @@
return -EINVAL;
}
+ CAM_DBG(CAM_ICP, "Enter");
ctx_data = release_hw->ctxt_to_hw_map;
if (!ctx_data) {
CAM_ERR(CAM_ICP, "NULL ctx data");
@@ -3054,15 +3428,19 @@
rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
if (!hw_mgr->ctxt_cnt) {
CAM_DBG(CAM_ICP, "Last Release");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
cam_icp_mgr_icp_power_collapse(hw_mgr);
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
cam_icp_hw_mgr_reset_clk_info(hw_mgr);
hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
+ rc = cam_ipe_bps_deint(hw_mgr);
}
mutex_unlock(&hw_mgr->hw_mgr_mutex);
if (!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt)
- cam_icp_timer_stop(hw_mgr);
+ cam_icp_device_timer_stop(hw_mgr);
+ CAM_DBG(CAM_ICP, "Exit");
return rc;
}
@@ -3149,6 +3527,11 @@
CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
}
+ if (ctx_data->fw_handle == 0) {
+ CAM_ERR(CAM_ICP, "Invalid handle created");
+ rc = -EINVAL;
+ }
+
return rc;
}
@@ -3302,6 +3685,7 @@
return -EINVAL;
}
+ CAM_DBG(CAM_ICP, "ENTER");
mutex_lock(&hw_mgr->hw_mgr_mutex);
ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
if (ctx_id >= CAM_ICP_CTX_MAX) {
@@ -3311,12 +3695,13 @@
}
ctx_data = &hw_mgr->ctx_data[ctx_id];
ctx_data->ctx_id = ctx_id;
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
mutex_lock(&ctx_data->ctx_mutex);
rc = cam_icp_get_acquire_info(hw_mgr, args, ctx_data);
- if (rc)
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
goto acquire_info_failed;
+ }
icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;
rc = cam_mem_get_io_buf(
@@ -3325,6 +3710,7 @@
&io_buf_addr, &io_buf_size);
if (rc) {
CAM_ERR(CAM_ICP, "unable to get src buf info from io desc");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
goto get_io_buf_failed;
}
@@ -3332,7 +3718,6 @@
icp_dev_acquire_info->io_config_cmd_handle,
(void *)io_buf_addr, io_buf_size);
- mutex_lock(&hw_mgr->hw_mgr_mutex);
if (!hw_mgr->ctxt_cnt) {
rc = cam_icp_clk_info_init(hw_mgr, ctx_data);
if (rc) {
@@ -3346,6 +3731,9 @@
goto get_io_buf_failed;
}
+ if (icp_hw_mgr.a5_debug_q)
+ hfi_set_debug_level(icp_hw_mgr.a5_dbg_lvl);
+
rc = cam_icp_send_ubwc_cfg(hw_mgr);
if (rc) {
mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -3354,7 +3742,9 @@
}
if (!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt)
- cam_icp_timer_start(hw_mgr);
+ cam_icp_device_timer_start(hw_mgr);
+
+ cam_icp_ctx_timer_start(ctx_data);
rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
if (rc) {
@@ -3421,6 +3811,7 @@
send_ping_failed:
cam_icp_mgr_ipe_bps_power_collapse(hw_mgr, ctx_data, 0);
ipe_bps_resume_failed:
+ cam_icp_ctx_timer_stop(&hw_mgr->ctx_data[ctx_id]);
ubwc_cfg_failed:
if (!hw_mgr->ctxt_cnt)
cam_icp_mgr_icp_power_collapse(hw_mgr);
@@ -3725,7 +4116,6 @@
goto icp_wq_create_failed;
init_completion(&icp_hw_mgr.a5_complete);
-
return rc;
icp_wq_create_failed:
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index 43d7a4a..aac4a5e 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -183,6 +183,8 @@
* @temp_payload: Payload for destroy handle data
* @ctx_id: Context Id
* @clk_info: Current clock info of a context
+ * @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
*/
struct cam_icp_hw_ctx_data {
void *context_priv;
@@ -200,6 +202,8 @@
struct ipe_bps_destroy temp_payload;
uint32_t ctx_id;
struct cam_ctx_clk_info clk_info;
+ struct cam_req_mgr_timer *watch_dog;
+ uint32_t watch_dog_reset_counter;
};
/**
@@ -222,6 +226,7 @@
* @compressed_bw: Current compressed bandwidth voting
* @hw_type: IPE/BPS device type
* @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
*/
struct cam_icp_clk_info {
uint32_t base_clk;
@@ -232,6 +237,7 @@
uint64_t compressed_bw;
uint32_t hw_type;
struct cam_req_mgr_timer *watch_dog;
+ uint32_t watch_dog_reset_counter;
};
/**
@@ -258,6 +264,8 @@
* @dentry: Debugfs entry
* @a5_debug: A5 debug flag
* @icp_pc_flag: Flag to enable/disable power collapse
+ * @ipe_bps_pc_flag: Flag to enable/disable
+ * power collapse for ipe & bps
* @icp_debug_clk: Set clock based on debug value
* @icp_default_clk: Set this clok if user doesn't supply
* @clk_info: Clock info of hardware
@@ -269,6 +277,12 @@
* @ipe1_enable: Flag for IPE1
* @bps_enable: Flag for BPS
* @core_info: 32 bit value , tells IPE0/1 and BPS
+ * @a5_dev_intf : Device interface for A5
+ * @ipe0_dev_intf: Device interface for IPE0
+ * @ipe1_dev_intf: Device interface for IPE1
+ * @bps_dev_intf: Device interface for BPS
+ * @ipe_clk_state: IPE clock state flag
+ * @bps_clk_state: BPS clock state flag
*/
struct cam_icp_hw_mgr {
struct mutex hw_mgr_mutex;
@@ -295,6 +309,7 @@
struct dentry *dentry;
bool a5_debug;
bool icp_pc_flag;
+ bool ipe_bps_pc_flag;
uint64_t icp_debug_clk;
uint64_t icp_default_clk;
struct cam_icp_clk_info clk_info[ICP_CLK_HW_MAX];
@@ -310,6 +325,8 @@
struct cam_hw_intf *ipe0_dev_intf;
struct cam_hw_intf *ipe1_dev_intf;
struct cam_hw_intf *bps_dev_intf;
+ bool ipe_clk_state;
+ bool bps_clk_state;
};
static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
index 6915ad5..9e05f2b 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
@@ -22,4 +22,15 @@
CAM_ICP_DEV_BPS,
CAM_ICP_DEV_MAX,
};
+
+/**
+ * struct cam_a5_clk_update_cmd - Payload for hw manager command
+ *
+ * @curr_clk_rate: clk rate to HW
+ * @ipe_bps_pc_enable power collpase enable flag
+ */
+struct cam_a5_clk_update_cmd {
+ uint32_t curr_clk_rate;
+ bool ipe_bps_pc_enable;
+};
#endif
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
index d2e04ef..771c4ed 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,7 +21,7 @@
#define ICP_CLK_TURBO_HZ 600000000
#define ICP_CLK_SVS_HZ 400000000
-#define CAM_ICP_A5_BW_BYTES_VOTE 100000000
+#define CAM_ICP_A5_BW_BYTES_VOTE 40000000
#define CAM_ICP_CTX_MAX 36
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 5b4156a..87478af 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -267,22 +267,28 @@
rc = cam_ipe_handle_resume(ipe_dev);
break;
case CAM_ICP_IPE_CMD_UPDATE_CLK: {
- uint32_t clk_rate = *(uint32_t *)cmd_args;
+ struct cam_a5_clk_update_cmd *clk_upd_cmd =
+ (struct cam_a5_clk_update_cmd *)cmd_args;
+ uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
CAM_DBG(CAM_ICP, "ipe_src_clk rate = %d", (int)clk_rate);
if (!core_info->clk_enable) {
- cam_ipe_handle_pc(ipe_dev);
- cam_cpas_reg_write(core_info->cpas_handle,
- CAM_CPAS_REG_CPASTOP,
- hw_info->pwr_ctrl, true, 0x0);
+ if (clk_upd_cmd->ipe_bps_pc_enable) {
+ cam_ipe_handle_pc(ipe_dev);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0x0);
+ }
rc = cam_ipe_toggle_clk(soc_info, true);
if (rc)
CAM_ERR(CAM_ICP, "Enable failed");
else
core_info->clk_enable = true;
- rc = cam_ipe_handle_resume(ipe_dev);
- if (rc)
- CAM_ERR(CAM_ICP, "handle resume failed");
+ if (clk_upd_cmd->ipe_bps_pc_enable) {
+ rc = cam_ipe_handle_resume(ipe_dev);
+ if (rc)
+ CAM_ERR(CAM_ICP, "bps resume failed");
+ }
}
CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index 289d7d4..d24305a 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -141,9 +141,22 @@
int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info,
uint32_t clk_rate)
{
+ int32_t src_clk_idx;
+
if (!soc_info)
return -EINVAL;
+ src_clk_idx = soc_info->src_clk_idx;
+
+ if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+ (soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+ (clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+ CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
+ clk_rate,
+ soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+ clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+ }
+
return cam_soc_util_set_clk_rate(soc_info->clk[soc_info->src_clk_idx],
soc_info->clk_name[soc_info->src_clk_idx], clk_rate);
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 01c0a02..d62344d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -86,7 +86,8 @@
struct cam_ctx_request, list);
req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv;
req_isp_new = (struct cam_isp_ctx_req *) req->req_priv;
- if (req_isp_old->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) {
+ if (req_isp_old->hw_update_data.packet_opcode_type ==
+ CAM_ISP_PACKET_INIT_DEV) {
if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >=
CAM_ISP_CTX_CFG_MAX) {
CAM_WARN(CAM_ISP, "Can not merge INIT pkt");
@@ -456,6 +457,9 @@
}
}
+ if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE)
+ request_id = 0;
+
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_SUCCESS);
} else {
@@ -612,7 +616,10 @@
req_isp->bubble_report = 0;
}
- request_id = req->request_id;
+ if (req->request_id > ctx_isp->reported_req_id) {
+ request_id = req->request_id;
+ ctx_isp->reported_req_id = request_id;
+ }
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_ERROR);
@@ -738,9 +745,18 @@
req_isp->bubble_report = 0;
}
- request_id = req->request_id;
- __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
- CAM_REQ_MGR_SOF_EVENT_ERROR);
+ if (!req_isp->bubble_report) {
+ if (req->request_id > ctx_isp->reported_req_id) {
+ request_id = req->request_id;
+ ctx_isp->reported_req_id = request_id;
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+ } else
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ } else
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
@@ -881,6 +897,29 @@
return rc;
}
+static int __cam_isp_ctx_sof_in_flush(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ int rc = 0;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+ if (--ctx_isp->frame_skip_count == 0)
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ else
+ CAM_ERR(CAM_ISP, "Skip currect SOF");
+
+ return rc;
+}
+
static struct cam_isp_ctx_irq_ops
cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
/* SOF */
@@ -952,6 +991,17 @@
/* HALT */
{
},
+ /* FLUSH */
+ {
+ .irq_ops = {
+ NULL,
+ __cam_isp_ctx_sof_in_flush,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_applied,
+ },
+ },
};
static int __cam_isp_ctx_apply_req_in_activated_state(
@@ -1022,6 +1072,7 @@
cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
cfg.hw_update_entries = req_isp->cfg;
cfg.num_hw_update_entries = req_isp->num_cfg;
+ cfg.priv = &req_isp->hw_update_data;
rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
if (rc) {
@@ -1155,6 +1206,24 @@
return rc;
}
+static int __cam_isp_ctx_flush_req_in_activated(
+ struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush_req)
+{
+ int rc = 0;
+ struct cam_isp_context *ctx_isp;
+
+ ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+ spin_lock_bh(&ctx->lock);
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_FLUSH;
+ ctx_isp->frame_skip_count = 2;
+ spin_unlock_bh(&ctx->lock);
+
+ CAM_DBG(CAM_ISP, "Flush request in state %d", ctx->state);
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+ return rc;
+}
+
static int __cam_isp_ctx_flush_req_in_ready(
struct cam_context *ctx,
struct cam_req_mgr_flush_request *flush_req)
@@ -1215,12 +1284,24 @@
.crm_ops = {},
.irq_ops = NULL,
},
+ /* HW ERROR */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
/* HALT */
{
.ioctl_ops = {},
.crm_ops = {},
.irq_ops = NULL,
},
+ /* FLUSH */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
};
static int __cam_isp_ctx_rdi_only_sof_in_top_state(
@@ -1310,6 +1391,17 @@
struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
uint64_t request_id = 0;
+ /*
+ * Sof in bubble applied state means, reg update not received.
+ * before increment frame id and override time stamp value, send
+ * the previous sof time stamp that got captured in the
+ * sof in applied state.
+ */
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
ctx_isp->frame_id++;
ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
@@ -1359,9 +1451,18 @@
req_isp->bubble_report = 0;
}
- request_id = req->request_id;
- __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
- CAM_REQ_MGR_SOF_EVENT_ERROR);
+ if (!req_isp->bubble_report) {
+ if (req->request_id > ctx_isp->reported_req_id) {
+ request_id = req->request_id;
+ ctx_isp->reported_req_id = request_id;
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+ } else
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ } else
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
/* change the state to bubble, as reg update has not come */
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
@@ -1566,10 +1667,23 @@
__cam_isp_ctx_buf_done_in_bubble_applied,
},
},
-
+ /* HW ERROR */
+ {
+ },
/* HALT */
{
},
+ /* FLUSH */
+ {
+ .irq_ops = {
+ NULL,
+ __cam_isp_ctx_sof_in_flush,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_applied,
+ },
+ },
};
static int __cam_isp_ctx_rdi_only_apply_req_top_state(
@@ -1625,15 +1739,26 @@
.crm_ops = {},
.irq_ops = NULL,
},
+ /* HW ERROR */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
/* HALT */
{
.ioctl_ops = {},
.crm_ops = {},
.irq_ops = NULL,
},
+ /* FLUSHED */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
};
-
/* top level state machine */
static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
@@ -1694,7 +1819,6 @@
struct cam_req_mgr_add_request add_req;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- struct cam_isp_prepare_hw_update_data hw_update_data;
CAM_DBG(CAM_ISP, "get free request object......");
@@ -1745,7 +1869,7 @@
cfg.max_in_map_entries = CAM_ISP_CTX_RES_MAX;
cfg.out_map_entries = req_isp->fence_map_out;
cfg.in_map_entries = req_isp->fence_map_in;
- cfg.priv = &hw_update_data;
+ cfg.priv = &req_isp->hw_update_data;
CAM_DBG(CAM_ISP, "try to prepare config packet......");
@@ -1760,7 +1884,6 @@
req_isp->num_fence_map_out = cfg.num_out_map_entries;
req_isp->num_fence_map_in = cfg.num_in_map_entries;
req_isp->num_acked = 0;
- req_isp->packet_opcode_type = hw_update_data.packet_opcode_type;
CAM_DBG(CAM_ISP, "num_entry: %d, num fence out: %d, num fence in: %d",
req_isp->num_cfg, req_isp->num_fence_map_out,
@@ -1770,9 +1893,11 @@
req->status = 1;
CAM_DBG(CAM_ISP, "Packet request id 0x%llx packet opcode:%d",
- packet->header.request_id, req_isp->packet_opcode_type);
+ packet->header.request_id,
+ req_isp->hw_update_data.packet_opcode_type);
- if (req_isp->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) {
+ if (req_isp->hw_update_data.packet_opcode_type ==
+ CAM_ISP_PACKET_INIT_DEV) {
if (ctx->state < CAM_CTX_ACTIVATED) {
rc = __cam_isp_ctx_enqueue_init_request(ctx, req);
if (rc)
@@ -2017,7 +2142,7 @@
struct cam_start_stop_dev_cmd *cmd)
{
int rc = 0;
- struct cam_hw_start_args arg;
+ struct cam_hw_config_args arg;
struct cam_ctx_request *req;
struct cam_isp_ctx_req *req_isp;
struct cam_isp_context *ctx_isp =
@@ -2045,9 +2170,11 @@
rc = -EFAULT;
goto end;
}
+
arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
arg.hw_update_entries = req_isp->cfg;
arg.num_hw_update_entries = req_isp->num_cfg;
+ arg.priv = &req_isp->hw_update_data;
ctx_isp->frame_id = 0;
ctx_isp->active_req_cnt = 0;
@@ -2177,6 +2304,58 @@
return rc;
}
+static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
+{
+ int rc = 0;
+ struct cam_isp_hw_cmd_args hw_cmd_args;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
+ rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &hw_cmd_args);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
+{
+ int rc = 0;
+ struct cam_isp_hw_cmd_args hw_cmd_args;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
+ rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &hw_cmd_args);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
+ struct cam_req_mgr_link_evt_data *link_evt_data)
+{
+ int rc = 0;
+
+ switch (link_evt_data->evt_type) {
+ case CAM_REQ_MGR_LINK_EVT_ERR:
+ /* No need to handle this message now */
+ break;
+ case CAM_REQ_MGR_LINK_EVT_PAUSE:
+ __cam_isp_ctx_link_pause(ctx);
+ break;
+ case CAM_REQ_MGR_LINK_EVT_RESUME:
+ __cam_isp_ctx_link_resume(ctx);
+ break;
+ default:
+ CAM_WARN(CAM_ISP, "Unknown event from CRM");
+ break;
+ }
+ return rc;
+}
+
static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *unlink)
{
@@ -2308,7 +2487,8 @@
.crm_ops = {
.unlink = __cam_isp_ctx_unlink_in_activated,
.apply_req = __cam_isp_ctx_apply_req,
- .flush_req = __cam_isp_ctx_flush_req_in_top_state,
+ .flush_req = __cam_isp_ctx_flush_req_in_activated,
+ .process_evt = __cam_isp_ctx_process_evt,
},
.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
},
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index 88ebc03..f1f3137d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -52,6 +52,7 @@
CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
CAM_ISP_CTX_ACTIVATED_HW_ERROR,
CAM_ISP_CTX_ACTIVATED_HALT,
+ CAM_ISP_CTX_ACTIVATED_FLUSH,
CAM_ISP_CTX_ACTIVATED_MAX,
};
@@ -81,22 +82,22 @@
* the request has been completed.
* @bubble_report: Flag to track if bubble report is active on
* current request
- * @packet_opcode_type: Request packet opcode type,
- * ie INIT packet or update packet
+ * @hw_update_data: HW update data for this request
*
*/
struct cam_isp_ctx_req {
- struct cam_ctx_request *base;
+ struct cam_ctx_request *base;
- struct cam_hw_update_entry cfg[CAM_ISP_CTX_CFG_MAX];
- uint32_t num_cfg;
- struct cam_hw_fence_map_entry fence_map_out[CAM_ISP_CTX_RES_MAX];
- uint32_t num_fence_map_out;
- struct cam_hw_fence_map_entry fence_map_in[CAM_ISP_CTX_RES_MAX];
- uint32_t num_fence_map_in;
- uint32_t num_acked;
- int32_t bubble_report;
- uint32_t packet_opcode_type;
+ struct cam_hw_update_entry cfg[CAM_ISP_CTX_CFG_MAX];
+ uint32_t num_cfg;
+ struct cam_hw_fence_map_entry fence_map_out
+ [CAM_ISP_CTX_RES_MAX];
+ uint32_t num_fence_map_out;
+ struct cam_hw_fence_map_entry fence_map_in[CAM_ISP_CTX_RES_MAX];
+ uint32_t num_fence_map_in;
+ uint32_t num_acked;
+ int32_t bubble_report;
+ struct cam_isp_prepare_hw_update_data hw_update_data;
};
/**
@@ -116,6 +117,7 @@
* @subscribe_event: The irq event mask that CRM subscribes to, IFE will
* invoke CRM cb at those event.
* @last_applied_req_id: Last applied request id
+ * @frame_skip_count: Number of frame to skip before change state
*
*/
struct cam_isp_context {
@@ -135,6 +137,7 @@
int64_t reported_req_id;
uint32_t subscribe_event;
int64_t last_applied_req_id;
+ uint32_t frame_skip_count;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index ccab3a0..a6f1cf5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -860,10 +860,95 @@
return rc;
}
-static int cam_ife_hw_mgr_acquire_res_ife_csid_ipp(
+static int cam_ife_mgr_acquire_cid_res(
struct cam_ife_hw_mgr_ctx *ife_ctx,
struct cam_isp_in_port_info *in_port,
- uint32_t cid_res_id)
+ uint32_t *cid_res_id,
+ enum cam_ife_pix_path_res_id csid_path)
+{
+ int rc = -1;
+ int i, j;
+ struct cam_ife_hw_mgr *ife_hw_mgr;
+ struct cam_ife_hw_mgr_res *cid_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_csid_hw_reserve_resource_args csid_acquire;
+
+ ife_hw_mgr = ife_ctx->hw_mgr;
+
+ rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+ goto err;
+ }
+ cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
+
+ csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
+ csid_acquire.in_port = in_port;
+ csid_acquire.res_id = csid_path;
+
+ for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+ if (!ife_hw_mgr->csid_devices[i])
+ continue;
+
+ hw_intf = ife_hw_mgr->csid_devices[i];
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &csid_acquire,
+ sizeof(csid_acquire));
+ if (rc)
+ continue;
+ else
+ break;
+ }
+
+ if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
+ CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resource");
+ goto err;
+ }
+
+ cid_res->res_type = CAM_IFE_HW_MGR_RES_CID;
+ cid_res->res_id = csid_acquire.node_res->res_id;
+ cid_res->is_dual_vfe = in_port->usage_type;
+ cid_res->hw_res[0] = csid_acquire.node_res;
+ cid_res->hw_res[1] = NULL;
+ /* CID(DT_ID) value of acquire device, require for path */
+ *cid_res_id = csid_acquire.node_res->res_id;
+
+ if (cid_res->is_dual_vfe) {
+ csid_acquire.node_res = NULL;
+ csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
+ csid_acquire.in_port = in_port;
+ for (j = i + 1; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
+ if (!ife_hw_mgr->csid_devices[j])
+ continue;
+
+ hw_intf = ife_hw_mgr->csid_devices[j];
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &csid_acquire, sizeof(csid_acquire));
+ if (rc)
+ continue;
+ else
+ break;
+ }
+
+ if (j == CAM_IFE_CSID_HW_NUM_MAX) {
+ CAM_ERR(CAM_ISP,
+ "Can not acquire ife csid rdi resource");
+ goto err;
+ }
+ cid_res->hw_res[1] = csid_acquire.node_res;
+ }
+ cid_res->parent = &ife_ctx->res_list_ife_in;
+ ife_ctx->res_list_ife_in.child[
+ ife_ctx->res_list_ife_in.num_children++] = cid_res;
+
+ return 0;
+err:
+ return rc;
+
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_csid_ipp(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_isp_in_port_info *in_port)
{
int rc = -1;
int i;
@@ -872,8 +957,17 @@
struct cam_ife_hw_mgr_res *csid_res;
struct cam_ife_hw_mgr_res *cid_res;
struct cam_hw_intf *hw_intf;
+ uint32_t cid_res_id;
struct cam_csid_hw_reserve_resource_args csid_acquire;
+ /* get cid resource */
+ rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id,
+ CAM_IFE_PIX_PATH_RES_IPP);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
+ goto err;
+ }
+
ife_hw_mgr = ife_ctx->hw_mgr;
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
@@ -985,8 +1079,7 @@
static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
struct cam_ife_hw_mgr_ctx *ife_ctx,
- struct cam_isp_in_port_info *in_port,
- uint32_t cid_res_id)
+ struct cam_isp_in_port_info *in_port)
{
int rc = -1;
int i, j;
@@ -996,6 +1089,7 @@
struct cam_ife_hw_mgr_res *cid_res;
struct cam_hw_intf *hw_intf;
struct cam_isp_out_port_info *out_port;
+ uint32_t cid_res_id;
struct cam_csid_hw_reserve_resource_args csid_acquire;
ife_hw_mgr = ife_ctx->hw_mgr;
@@ -1005,6 +1099,15 @@
if (!cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
continue;
+ /* get cid resource */
+ rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id,
+ cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
+ out_port->res_type));
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
+ goto err;
+ }
+
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
&csid_res);
if (rc) {
@@ -1135,91 +1238,6 @@
return 0;
}
-static int cam_ife_mgr_acquire_cid_res(
- struct cam_ife_hw_mgr_ctx *ife_ctx,
- struct cam_isp_in_port_info *in_port,
- uint32_t *cid_res_id,
- int pixel_count)
-{
- int rc = -1;
- int i, j;
- struct cam_ife_hw_mgr *ife_hw_mgr;
- struct cam_ife_hw_mgr_res *cid_res;
- struct cam_hw_intf *hw_intf;
- struct cam_csid_hw_reserve_resource_args csid_acquire;
-
- ife_hw_mgr = ife_ctx->hw_mgr;
-
- rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
- if (rc) {
- CAM_ERR(CAM_ISP, "No more free hw mgr resource");
- goto err;
- }
- cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
-
- csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
- csid_acquire.in_port = in_port;
- csid_acquire.pixel_count = pixel_count;
-
- for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
- if (!ife_hw_mgr->csid_devices[i])
- continue;
-
- hw_intf = ife_hw_mgr->csid_devices[i];
- rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &csid_acquire,
- sizeof(csid_acquire));
- if (rc)
- continue;
- else
- break;
- }
-
- if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
- CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resource");
- goto err;
- }
-
- cid_res->res_type = CAM_IFE_HW_MGR_RES_CID;
- cid_res->res_id = csid_acquire.node_res->res_id;
- cid_res->is_dual_vfe = in_port->usage_type;
- cid_res->hw_res[0] = csid_acquire.node_res;
- cid_res->hw_res[1] = NULL;
- /* CID(DT_ID) value of acquire device, require for path */
- *cid_res_id = csid_acquire.node_res->res_id;
-
- if (cid_res->is_dual_vfe) {
- csid_acquire.node_res = NULL;
- csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
- csid_acquire.in_port = in_port;
- for (j = i + 1; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
- if (!ife_hw_mgr->csid_devices[j])
- continue;
-
- hw_intf = ife_hw_mgr->csid_devices[j];
- rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
- &csid_acquire, sizeof(csid_acquire));
- if (rc)
- continue;
- else
- break;
- }
-
- if (j == CAM_IFE_CSID_HW_NUM_MAX) {
- CAM_ERR(CAM_ISP,
- "Can not acquire ife csid rdi resource");
- goto err;
- }
- cid_res->hw_res[1] = csid_acquire.node_res;
- }
- cid_res->parent = &ife_ctx->res_list_ife_in;
- ife_ctx->res_list_ife_in.child[
- ife_ctx->res_list_ife_in.num_children++] = cid_res;
-
- return 0;
-err:
- return rc;
-
-}
static int cam_ife_mgr_acquire_hw_for_ctx(
struct cam_ife_hw_mgr_ctx *ife_ctx,
struct cam_isp_in_port_info *in_port,
@@ -1229,7 +1247,6 @@
int is_dual_vfe = 0;
int pixel_count = 0;
int rdi_count = 0;
- uint32_t cid_res_id = 0;
is_dual_vfe = in_port->usage_type;
@@ -1248,18 +1265,9 @@
return -EINVAL;
}
- /* get cid resource */
- rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id,
- pixel_count);
- if (rc) {
- CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
- goto err;
- }
-
if (pixel_count) {
/* get ife csid IPP resrouce */
- rc = cam_ife_hw_mgr_acquire_res_ife_csid_ipp(ife_ctx, in_port,
- cid_res_id);
+ rc = cam_ife_hw_mgr_acquire_res_ife_csid_ipp(ife_ctx, in_port);
if (rc) {
CAM_ERR(CAM_ISP,
"Acquire IFE CSID IPP resource Failed");
@@ -1269,8 +1277,7 @@
if (rdi_count) {
/* get ife csid rdi resource */
- rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port,
- cid_res_id);
+ rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port);
if (rc) {
CAM_ERR(CAM_ISP,
"Acquire IFE CSID RDI resource Failed");
@@ -1437,15 +1444,97 @@
return rc;
}
+static int cam_isp_blob_bw_update(
+ struct cam_isp_bw_config *bw_config,
+ struct cam_ife_hw_mgr_ctx *ctx)
+{
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_bw_update_args bw_upd_args;
+ uint64_t cam_bw_bps = 0;
+ uint64_t ext_bw_bps = 0;
+ int rc = -EINVAL;
+ uint32_t i;
+
+ CAM_DBG(CAM_ISP,
+ "usage=%u left cam_bw_bps=%llu ext_bw_bps=%llu\n"
+ "right cam_bw_bps=%llu ext_bw_bps=%llu",
+ bw_config->usage_type,
+ bw_config->left_pix_vote.cam_bw_bps,
+ bw_config->left_pix_vote.ext_bw_bps,
+ bw_config->right_pix_vote.cam_bw_bps,
+ bw_config->right_pix_vote.ext_bw_bps);
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
+ if (i == CAM_ISP_HW_SPLIT_LEFT) {
+ cam_bw_bps =
+ bw_config->left_pix_vote.cam_bw_bps;
+ ext_bw_bps =
+ bw_config->left_pix_vote.ext_bw_bps;
+ } else {
+ cam_bw_bps =
+ bw_config->right_pix_vote.cam_bw_bps;
+ ext_bw_bps =
+ bw_config->right_pix_vote.ext_bw_bps;
+ }
+ else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0)
+ && (hw_mgr_res->res_id <=
+ CAM_ISP_HW_VFE_IN_RDI3)) {
+ uint32_t idx = hw_mgr_res->res_id -
+ CAM_ISP_HW_VFE_IN_RDI0;
+ if (idx >= bw_config->num_rdi)
+ continue;
+
+ cam_bw_bps =
+ bw_config->rdi_vote[idx].cam_bw_bps;
+ ext_bw_bps =
+ bw_config->rdi_vote[idx].ext_bw_bps;
+ } else
+ if (hw_mgr_res->hw_res[i]) {
+ CAM_ERR(CAM_ISP, "Invalid res_id %u",
+ hw_mgr_res->res_id);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ bw_upd_args.node_res =
+ hw_mgr_res->hw_res[i];
+
+ bw_upd_args.camnoc_bw_bytes = cam_bw_bps;
+ bw_upd_args.external_bw_bytes = ext_bw_bps;
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_BW_UPDATE,
+ &bw_upd_args,
+ sizeof(struct cam_vfe_bw_update_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "BW Update failed");
+ } else
+ CAM_WARN(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+
+ return rc;
+}
+
/* entry function: config_hw */
static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
void *config_hw_args)
{
int rc = -1, i;
- struct cam_hw_start_args *cfg;
+ struct cam_hw_config_args *cfg;
struct cam_hw_update_entry *cmd;
struct cam_cdm_bl_request *cdm_cmd;
struct cam_ife_hw_mgr_ctx *ctx;
+ struct cam_isp_prepare_hw_update_data *hw_update_data;
CAM_DBG(CAM_ISP, "Enter");
if (!hw_mgr_priv || !config_hw_args) {
@@ -1467,6 +1556,18 @@
if (atomic_read(&ctx->overflow_pending))
return -EINVAL;
+ hw_update_data = (struct cam_isp_prepare_hw_update_data *) cfg->priv;
+
+ for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+ if (hw_update_data->bw_config_valid[i] == true) {
+ rc = cam_isp_blob_bw_update(
+ (struct cam_isp_bw_config *)
+ &hw_update_data->bw_config[i], ctx);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Bandwidth Update Failed");
+ }
+ }
+
CAM_DBG(CAM_ISP, "Enter ctx id:%d num_hw_upd_entries %d",
ctx->ctx_index, cfg->num_hw_update_entries);
@@ -1798,7 +1899,7 @@
static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
{
int rc = -1;
- struct cam_hw_start_args *start_args = start_hw_args;
+ struct cam_hw_config_args *start_args = start_hw_args;
struct cam_ife_hw_mgr_ctx *ctx;
struct cam_ife_hw_mgr_res *hw_mgr_res;
uint32_t i;
@@ -2204,92 +2305,6 @@
return rc;
}
-static int cam_isp_blob_bw_update(
- uint32_t blob_type,
- struct cam_isp_generic_blob_info *blob_info,
- struct cam_isp_bw_config *bw_config,
- struct cam_hw_prepare_update_args *prepare)
-{
- struct cam_ife_hw_mgr_ctx *ctx = NULL;
- struct cam_ife_hw_mgr_res *hw_mgr_res;
- struct cam_hw_intf *hw_intf;
- struct cam_vfe_bw_update_args bw_upd_args;
- uint64_t cam_bw_bps = 0;
- uint64_t ext_bw_bps = 0;
- int rc = -EINVAL;
- uint32_t i;
-
- ctx = prepare->ctxt_to_hw_map;
-
- CAM_DBG(CAM_ISP,
- "usage=%u left cam_bw_bps=%llu ext_bw_bps=%llu\n"
- "right cam_bw_bps=%llu ext_bw_bps=%llu",
- bw_config->usage_type,
- bw_config->left_pix_vote.cam_bw_bps,
- bw_config->left_pix_vote.ext_bw_bps,
- bw_config->right_pix_vote.cam_bw_bps,
- bw_config->right_pix_vote.ext_bw_bps);
-
- list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
- for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
- if (!hw_mgr_res->hw_res[i])
- continue;
-
- if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
- if (i == CAM_ISP_HW_SPLIT_LEFT) {
- cam_bw_bps =
- bw_config->left_pix_vote.cam_bw_bps;
- ext_bw_bps =
- bw_config->left_pix_vote.ext_bw_bps;
- } else {
- cam_bw_bps =
- bw_config->right_pix_vote.cam_bw_bps;
- ext_bw_bps =
- bw_config->right_pix_vote.ext_bw_bps;
- }
- else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0)
- && (hw_mgr_res->res_id <=
- CAM_ISP_HW_VFE_IN_RDI3)) {
- uint32_t idx = hw_mgr_res->res_id -
- CAM_ISP_HW_VFE_IN_RDI0;
- if (idx >= bw_config->num_rdi)
- continue;
-
- cam_bw_bps =
- bw_config->rdi_vote[idx].cam_bw_bps;
- ext_bw_bps =
- bw_config->rdi_vote[idx].ext_bw_bps;
- } else
- if (hw_mgr_res->hw_res[i]) {
- CAM_ERR(CAM_ISP, "Invalid res_id %u",
- hw_mgr_res->res_id);
- rc = -EINVAL;
- return rc;
- }
-
- hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
- if (hw_intf && hw_intf->hw_ops.process_cmd) {
- bw_upd_args.node_res =
- hw_mgr_res->hw_res[i];
-
- bw_upd_args.camnoc_bw_bytes = cam_bw_bps;
- bw_upd_args.external_bw_bytes = ext_bw_bps;
-
- rc = hw_intf->hw_ops.process_cmd(
- hw_intf->hw_priv,
- CAM_ISP_HW_CMD_BW_UPDATE,
- &bw_upd_args,
- sizeof(struct cam_vfe_bw_update_args));
- if (rc)
- CAM_ERR(CAM_ISP, "BW Update failed");
- } else
- CAM_WARN(CAM_ISP, "NULL hw_intf!");
- }
- }
-
- return rc;
-}
-
static int cam_isp_packet_generic_blob_handler(void *user_data,
uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
{
@@ -2340,11 +2355,22 @@
case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: {
struct cam_isp_bw_config *bw_config =
(struct cam_isp_bw_config *)blob_data;
+ struct cam_isp_prepare_hw_update_data *prepare_hw_data;
- rc = cam_isp_blob_bw_update(blob_type, blob_info,
- bw_config, prepare);
- if (rc)
- CAM_ERR(CAM_ISP, "Bandwidth Update Failed");
+ if (!prepare || !prepare->priv ||
+ (bw_config->usage_type >= CAM_IFE_HW_NUM_MAX)) {
+ CAM_ERR(CAM_ISP, "Invalid inputs");
+ rc = -EINVAL;
+ break;
+ }
+
+ prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)
+ prepare->priv;
+
+ memcpy(&prepare_hw_data->bw_config[bw_config->usage_type],
+ bw_config, sizeof(prepare_hw_data->bw_config[0]));
+ prepare_hw_data->bw_config_valid[bw_config->usage_type] = true;
+
}
break;
default:
@@ -2375,6 +2401,9 @@
CAM_DBG(CAM_ISP, "enter");
+ prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)
+ prepare->priv;
+
ctx = (struct cam_ife_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv;
@@ -2399,6 +2428,13 @@
prepare->num_in_map_entries = 0;
prepare->num_out_map_entries = 0;
+ memset(&prepare_hw_data->bw_config[0], 0x0,
+ sizeof(prepare_hw_data->bw_config[0]) *
+ CAM_IFE_HW_NUM_MAX);
+ memset(&prepare_hw_data->bw_config_valid[0], 0x0,
+ sizeof(prepare_hw_data->bw_config_valid[0]) *
+ CAM_IFE_HW_NUM_MAX);
+
for (i = 0; i < ctx->num_base; i++) {
CAM_DBG(CAM_ISP, "process cmd buffer for device %d", i);
@@ -2452,8 +2488,6 @@
* bits to get the type of operation since UMD definition
* of op_code has some difference from KMD.
*/
- prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)
- prepare->priv;
if (((prepare->packet->header.op_code + 1) & 0xF) ==
CAM_ISP_PACKET_INIT_DEV) {
prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_INIT_DEV;
@@ -2488,6 +2522,53 @@
return rc;
}
+static int cam_ife_mgr_bw_control(struct cam_ife_hw_mgr_ctx *ctx,
+ enum cam_vfe_bw_control_action action)
+{
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_bw_control_args bw_ctrl_args;
+ int rc = -EINVAL;
+ uint32_t i;
+
+ CAM_DBG(CAM_ISP, "Enter...ctx id:%d", ctx->ctx_index);
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ bw_ctrl_args.node_res =
+ hw_mgr_res->hw_res[i];
+ bw_ctrl_args.action = action;
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_BW_CONTROL,
+ &bw_ctrl_args,
+ sizeof(struct cam_vfe_bw_control_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "BW Update failed");
+ } else
+ CAM_WARN(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+
+ return rc;
+}
+
+static int cam_ife_mgr_pause_hw(struct cam_ife_hw_mgr_ctx *ctx)
+{
+ return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_EXCLUDE);
+}
+
+static int cam_ife_mgr_resume_hw(struct cam_ife_hw_mgr_ctx *ctx)
+{
+ return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_INCLUDE);
+}
+
static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
{
int rc = 0;
@@ -2513,6 +2594,12 @@
hw_cmd_args->u.is_rdi_only_context = 0;
break;
+ case CAM_ISP_HW_MGR_CMD_PAUSE_HW:
+ cam_ife_mgr_pause_hw(ctx);
+ break;
+ case CAM_ISP_HW_MGR_CMD_RESUME_HW:
+ cam_ife_mgr_resume_hw(ctx);
+ break;
default:
CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
hw_cmd_args->cmd_type);
@@ -3733,10 +3820,13 @@
struct cam_vfe_top_irq_evt_payload *evt_payload;
int rc = -EINVAL;
- if (!handler_priv)
+ if (!evt_payload_priv)
return rc;
evt_payload = evt_payload_priv;
+ if (!handler_priv)
+ goto put_payload;
+
ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
@@ -3764,7 +3854,7 @@
if (rc) {
CAM_ERR(CAM_ISP, "Encountered Error (%d), ignoring other irqs",
rc);
- return IRQ_HANDLED;
+ goto put_payload;
}
CAM_DBG(CAM_ISP, "Calling EOF");
@@ -3786,6 +3876,8 @@
cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
evt_payload_priv);
+put_payload:
+ cam_vfe_put_evt_payload(evt_payload->core_info, &evt_payload);
return IRQ_HANDLED;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 4d26138..c418a41 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,9 +18,6 @@
#include "cam_ife_csid_hw_intf.h"
#include "cam_tasklet_util.h"
-/* MAX IFE instance */
-#define CAM_IFE_HW_NUM_MAX 4
-
/* enum cam_ife_hw_mgr_res_type - manager resource node type */
enum cam_ife_hw_mgr_res_type {
CAM_IFE_HW_MGR_RES_UNINIT,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
index 031b7b2..b632e77 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -607,6 +607,33 @@
CAM_DBG(CAM_ISP, "Exit");
}
+irqreturn_t cam_irq_controller_clear_and_mask(int irq_num, void *priv)
+{
+ struct cam_irq_controller *controller = priv;
+ uint32_t i = 0;
+
+ if (!controller)
+ return IRQ_NONE;
+
+ for (i = 0; i < controller->num_registers; i++) {
+
+ cam_io_w_mb(0x0, controller->mem_base +
+ controller->irq_register_arr[i].clear_reg_offset);
+ }
+
+ if (controller->global_clear_offset)
+ cam_io_w_mb(controller->global_clear_bitmask,
+ controller->mem_base +
+ controller->global_clear_offset);
+
+ for (i = 0; i < controller->num_registers; i++) {
+ cam_io_w_mb(0x0, controller->mem_base +
+ controller->irq_register_arr[i].mask_reg_offset);
+ }
+
+ return IRQ_HANDLED;
+}
+
irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv)
{
struct cam_irq_controller *controller = priv;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
index 7e307b5..e3071ac 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
@@ -250,4 +250,18 @@
*/
int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle);
+/*
+ * cam_irq_controller_clear_and_mask()
+ *
+ * @brief: This function clears and masks all the irq bits
+ *
+ * @irq_num: Number of IRQ line that was set that lead to this
+ * function being called
+ * @priv: Private data registered with request_irq is passed back
+ * here. This private data should be the irq_controller
+ * structure.
+ *
+ * @return: IRQ_HANDLED/IRQ_NONE
+ */
+irqreturn_t cam_irq_controller_clear_and_mask(int irq_num, void *priv);
#endif /* _CAM_IRQ_CONTROLLER_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
index cf044eb..78336d2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,10 @@
#include <uapi/media/cam_isp.h>
#include "cam_hw_mgr_intf.h"
+/* MAX IFE instance */
+#define CAM_IFE_HW_NUM_MAX 4
+#define CAM_IFE_RDI_NUM_MAX 4
+
/**
* enum cam_isp_hw_event_type - Collection of the ISP hardware events
*/
@@ -47,15 +51,38 @@
};
/**
+ * struct cam_isp_bw_config_internal - Internal Bandwidth configuration
+ *
+ * @usage_type: Usage type (Single/Dual)
+ * @num_rdi: Number of RDI votes
+ * @left_pix_vote: Bandwidth vote for left ISP
+ * @right_pix_vote: Bandwidth vote for right ISP
+ * @rdi_vote: RDI bandwidth requirements
+ */
+
+struct cam_isp_bw_config_internal {
+ uint32_t usage_type;
+ uint32_t num_rdi;
+ struct cam_isp_bw_vote left_pix_vote;
+ struct cam_isp_bw_vote right_pix_vote;
+ struct cam_isp_bw_vote rdi_vote[CAM_IFE_RDI_NUM_MAX];
+};
+
+/**
* struct cam_isp_prepare_hw_update_data - hw prepare data
*
* @packet_opcode_type: Packet header opcode in the packet header
- * this opcode defines, packet is init packet or
- * update packet
+ * this opcode defines, packet is init packet or
+ * update packet
+ * @bw_config: BW config information
+ * @bw_config_valid: Flag indicating whether the bw_config at the index
+ * is valid or not
*
*/
struct cam_isp_prepare_hw_update_data {
- uint32_t packet_opcode_type;
+ uint32_t packet_opcode_type;
+ struct cam_isp_bw_config_internal bw_config[CAM_IFE_HW_NUM_MAX];
+ bool bw_config_valid[CAM_IFE_HW_NUM_MAX];
};
@@ -130,6 +157,8 @@
/* enum cam_isp_hw_mgr_command - Hardware manager command type */
enum cam_isp_hw_mgr_command {
CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT,
+ CAM_ISP_HW_MGR_CMD_PAUSE_HW,
+ CAM_ISP_HW_MGR_CMD_RESUME_HW,
CAM_ISP_HW_MGR_CMD_MAX,
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 70c9c3b..ff0c91f 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -287,7 +287,7 @@
static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
- uint32_t res_type, int pixel_count)
+ uint32_t res_type)
{
int rc = 0;
struct cam_ife_csid_cid_data *cid_data;
@@ -305,8 +305,7 @@
break;
}
} else {
- if (cid_data->vc == vc && cid_data->dt == dt &&
- cid_data->pixel_count == pixel_count) {
+ if (cid_data->vc == vc && cid_data->dt == dt) {
cid_data->cnt++;
*res = &csid_hw->cid_res[i];
break;
@@ -330,7 +329,6 @@
cid_data->vc = vc;
cid_data->dt = dt;
cid_data->cnt = 1;
- cid_data->pixel_count = pixel_count;
csid_hw->cid_res[j].res_state =
CAM_ISP_RESOURCE_STATE_RESERVED;
*res = &csid_hw->cid_res[j];
@@ -570,7 +568,6 @@
struct cam_csid_hw_reserve_resource_args *cid_reserv)
{
int rc = 0;
- uint32_t i;
struct cam_ife_csid_cid_data *cid_data;
CAM_DBG(CAM_ISP,
@@ -728,7 +725,6 @@
cid_data->vc = cid_reserv->in_port->vc;
cid_data->dt = cid_reserv->in_port->dt;
cid_data->cnt = 1;
- cid_data->pixel_count = cid_reserv->pixel_count;
cid_reserv->node_res = &csid_hw->cid_res[0];
csid_hw->csi2_reserve_cnt++;
@@ -737,27 +733,43 @@
csid_hw->hw_intf->hw_idx,
cid_reserv->node_res->res_id);
} else {
- if (cid_reserv->pixel_count > 0) {
- for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
- cid_data = (struct cam_ife_csid_cid_data *)
- csid_hw->cid_res[i].res_priv;
- if ((csid_hw->cid_res[i].res_state >=
- CAM_ISP_RESOURCE_STATE_RESERVED) &&
- cid_data->pixel_count > 0) {
- CAM_DBG(CAM_ISP,
- "CSID:%d IPP resource is full");
- rc = -EINVAL;
- goto end;
- }
+ switch (cid_reserv->res_id) {
+ case CAM_IFE_PIX_PATH_RES_IPP:
+ if (csid_hw->ipp_res.res_state !=
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_DBG(CAM_ISP,
+ "CSID:%d IPP resource not available",
+ csid_hw->hw_intf->hw_idx);
+ rc = -EINVAL;
+ goto end;
}
+ break;
+ case CAM_IFE_PIX_PATH_RES_RDI_0:
+ case CAM_IFE_PIX_PATH_RES_RDI_1:
+ case CAM_IFE_PIX_PATH_RES_RDI_2:
+ case CAM_IFE_PIX_PATH_RES_RDI_3:
+ if (csid_hw->rdi_res[cid_reserv->res_id].res_state !=
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_DBG(CAM_ISP,
+ "CSID:%d RDI:%d resource not available",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->res_id);
+ rc = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID%d: Invalid csid path",
+ csid_hw->hw_intf->hw_idx);
+ rc = -EINVAL;
+ goto end;
}
rc = cam_ife_csid_cid_get(csid_hw,
&cid_reserv->node_res,
cid_reserv->in_port->vc,
cid_reserv->in_port->dt,
- cid_reserv->in_port->res_type,
- cid_reserv->pixel_count);
+ cid_reserv->in_port->res_type);
/* if success then increment the reserve count */
if (!rc) {
if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
@@ -1884,21 +1896,23 @@
if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
!csid_reg->rdi_reg[res->res_id]) {
- CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Invalid res id%d",
csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
- CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
- csid_hw->hw_intf->hw_idx,
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d Res:%d already in stopped state:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, res->res_state);
return rc;
}
if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
- CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid res_state%d",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d Res:%d Invalid res_state%d",
csid_hw->hw_intf->hw_idx, res->res_id,
res->res_state);
return -EINVAL;
@@ -2006,21 +2020,23 @@
soc_info = &csid_hw->hw_info->soc_info;
if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
- CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Invalid res id%d",
csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
- CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
- csid_hw->hw_intf->hw_idx,
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d Res:%d already in stopped state:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, res->res_state);
return rc;
}
if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
- CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid state%d",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d Res:%d Invalid state%d",
csid_hw->hw_intf->hw_idx, res->res_id,
res->res_state);
return -EINVAL;
@@ -2258,6 +2274,33 @@
return rc;
}
+static int cam_ife_csid_reset_retain_sw_reg(
+ struct cam_ife_csid_hw *csid_hw)
+{
+ int rc = 0;
+ struct cam_ife_csid_reg_offset *csid_reg =
+ csid_hw->csid_info->csid_reg;
+
+ cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
+ csid_hw->hw_info->soc_info.reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+ CAM_DBG(CAM_ISP, " Waiting for SW reset complete from irq handler");
+ rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
+ msecs_to_jiffies(IFE_CSID_TIMEOUT));
+ if (rc <= 0) {
+ CAM_ERR(CAM_ISP, "CSID:%d reset completion in fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ } else {
+ rc = 0;
+ }
+
+ return rc;
+}
+
+
static int cam_ife_csid_init_hw(void *hw_priv,
void *init_args, uint32_t arg_size)
{
@@ -2290,7 +2333,6 @@
goto end;
}
-
if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
(res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
CAM_ERR(CAM_ISP,
@@ -2304,7 +2346,6 @@
CAM_DBG(CAM_ISP, "CSID:%d res type :%d res_id:%d",
csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
-
/* Initialize the csid hardware */
rc = cam_ife_csid_enable_hw(csid_hw);
if (rc)
@@ -2328,6 +2369,12 @@
break;
}
+ rc = cam_ife_csid_reset_retain_sw_reg(csid_hw);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "CSID: Failed in SW reset");
+ return rc;
+ }
+
if (rc)
cam_ife_csid_disable_hw(csid_hw);
end:
@@ -2489,8 +2536,7 @@
/*wait for the path to halt */
for (i = 0; i < csid_stop->num_res; i++) {
res = csid_stop->node_res[i];
- if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
- csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
+ if (csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
else
res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
@@ -2548,8 +2594,35 @@
}
+static int cam_ife_csid_halt_device(
+ struct cam_ife_csid_hw *csid_hw)
+{
+ uint32_t i;
+ int rc = 0;
+ struct cam_isp_resource_node *res_node;
+
+ res_node = &csid_hw->ipp_res;
+ if (res_node->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) {
+ rc = cam_ife_csid_disable_ipp_path(csid_hw,
+ res_node, CAM_CSID_HALT_IMMEDIATELY);
+ res_node->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+ }
+
+ for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++) {
+ res_node = &csid_hw->rdi_res[i];
+ if (res_node->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) {
+ rc = cam_ife_csid_disable_rdi_path(csid_hw,
+ res_node, CAM_CSID_HALT_IMMEDIATELY);
+ res_node->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+ }
+ }
+ return rc;
+}
+
+
irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
{
+ int rc = 0;
struct cam_ife_csid_hw *csid_hw;
struct cam_hw_soc_info *soc_info;
struct cam_ife_csid_reg_offset *csid_reg;
@@ -2623,22 +2696,52 @@
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 0 over flow",
csid_hw->hw_intf->hw_idx);
+ rc = cam_ife_csid_halt_device(csid_hw);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d csid halt device fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
+ }
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 1 over flow",
csid_hw->hw_intf->hw_idx);
+ rc = cam_ife_csid_halt_device(csid_hw);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d csid halt device fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
+ }
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 2 over flow",
csid_hw->hw_intf->hw_idx);
+ rc = cam_ife_csid_halt_device(csid_hw);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d csid halt device fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
+ }
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 3 over flow",
csid_hw->hw_intf->hw_idx);
+ rc = cam_ife_csid_halt_device(csid_hw);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d csid halt device fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
+ }
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER FLOW",
csid_hw->hw_intf->hw_idx);
+ rc = cam_ife_csid_halt_device(csid_hw);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d csid halt device fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
+ }
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_EOT_RECEPTION",
@@ -2664,7 +2767,7 @@
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d MMAPPED_VC_DT",
csid_hw->hw_intf->hw_idx);
}
- if (irq_status_rx & CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_STREAM_UNDERFLOW",
csid_hw->hw_intf->hw_idx);
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index b400d14..4b546ea 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -368,7 +368,6 @@
uint32_t dt;
uint32_t cnt;
uint32_t tpg_set;
- int pixel_count;
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
index df97bd6..ceeacbe 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,7 +75,6 @@
* @cid: cid (DT_ID) value for path, this is applicable for CSID path
* reserve
* @node_res : Reserved resource structure pointer
- * @pixel_count: Number of pixel resources
*
*/
struct cam_csid_hw_reserve_resource_args {
@@ -87,7 +86,6 @@
uint32_t master_idx;
uint32_t cid;
struct cam_isp_resource_node *node_res;
- int pixel_count;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 257a5ac..b9f6d77 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -92,6 +92,7 @@
CAM_ISP_HW_CMD_STRIPE_UPDATE,
CAM_ISP_HW_CMD_CLOCK_UPDATE,
CAM_ISP_HW_CMD_BW_UPDATE,
+ CAM_ISP_HW_CMD_BW_CONTROL,
CAM_ISP_HW_CMD_MAX,
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index b771ec6..8927d6a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -185,6 +185,22 @@
uint64_t external_bw_bytes;
};
+enum cam_vfe_bw_control_action {
+ CAM_VFE_BW_CONTROL_EXCLUDE = 0,
+ CAM_VFE_BW_CONTROL_INCLUDE = 1
+};
+
+/*
+ * struct cam_vfe_bw_control_args:
+ *
+ * @node_res: Resource to get the time stamp
+ * @action: Bandwidth control action
+ */
+struct cam_vfe_bw_control_args {
+ struct cam_isp_resource_node *node_res;
+ enum cam_vfe_bw_control_action action;
+};
+
/*
* struct cam_vfe_top_irq_evt_payload:
*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 4a7a4f2..2c4fe9d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -172,6 +172,8 @@
th_payload->evt_status_arr[1]);
cam_irq_controller_disable_irq(core_info->vfe_irq_controller,
core_info->irq_err_handle);
+ cam_irq_controller_clear_and_mask(evt_id,
+ core_info->vfe_irq_controller);
}
rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
@@ -695,6 +697,7 @@
case CAM_ISP_HW_CMD_GET_REG_UPDATE:
case CAM_ISP_HW_CMD_CLOCK_UPDATE:
case CAM_ISP_HW_CMD_BW_UPDATE:
+ case CAM_ISP_HW_CMD_BW_CONTROL:
rc = core_info->vfe_top->hw_ops.process_cmd(
core_info->vfe_top->top_priv, cmd_type, cmd_args,
arg_size);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index c166113..36ce652 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -60,7 +60,7 @@
static uint32_t bus_error_irq_mask[3] = {
0x7800,
0x0000,
- 0x00C0,
+ 0x0040,
};
enum cam_vfe_bus_packer_format {
@@ -106,6 +106,7 @@
struct cam_vfe_bus_irq_evt_payload evt_payload[
CAM_VFE_BUS_VER2_PAYLOAD_MAX];
struct list_head free_payload_list;
+ spinlock_t spin_lock;
struct mutex bus_mutex;
uint32_t secure_mode;
uint32_t num_sec_out;
@@ -214,16 +215,23 @@
struct cam_vfe_bus_ver2_common_data *common_data,
struct cam_vfe_bus_irq_evt_payload **evt_payload)
{
+ int rc;
+
+ spin_lock(&common_data->spin_lock);
if (list_empty(&common_data->free_payload_list)) {
*evt_payload = NULL;
CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
- return -ENODEV;
+ rc = -ENODEV;
+ goto done;
}
*evt_payload = list_first_entry(&common_data->free_payload_list,
struct cam_vfe_bus_irq_evt_payload, list);
list_del_init(&(*evt_payload)->list);
- return 0;
+ rc = 0;
+done:
+ spin_unlock(&common_data->spin_lock);
+ return rc;
}
static enum cam_vfe_bus_comp_grp_id
@@ -254,6 +262,7 @@
struct cam_vfe_bus_ver2_common_data *common_data = NULL;
uint32_t *ife_irq_regs = NULL;
uint32_t status_reg0, status_reg1, status_reg2;
+ unsigned long flags;
if (!core_info) {
CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
@@ -276,8 +285,12 @@
}
common_data = core_info;
+
+ spin_lock_irqsave(&common_data->spin_lock, flags);
list_add_tail(&(*evt_payload)->list,
&common_data->free_payload_list);
+ spin_unlock_irqrestore(&common_data->spin_lock, flags);
+
*evt_payload = NULL;
return 0;
@@ -2556,8 +2569,21 @@
CAM_DBG(CAM_ISP, "WM %d image address 0x%x",
wm_data->index, reg_val_pair[j-1]);
- frame_inc = io_cfg->planes[i].plane_stride *
- io_cfg->planes[i].slice_height;
+ if (wm_data->en_ubwc) {
+ frame_inc = ALIGNUP(io_cfg->planes[i].plane_stride *
+ io_cfg->planes[i].slice_height, 4096);
+ frame_inc += io_cfg->planes[i].meta_size;
+ CAM_DBG(CAM_ISP,
+ "WM %d frm %d: ht: %d stride %d meta: %d",
+ wm_data->index, frame_inc,
+ io_cfg->planes[i].slice_height,
+ io_cfg->planes[i].plane_stride,
+ io_cfg->planes[i].meta_size);
+ } else {
+ frame_inc = io_cfg->planes[i].plane_stride *
+ io_cfg->planes[i].slice_height;
+ }
+
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->frame_inc, frame_inc);
CAM_DBG(CAM_ISP, "WM %d frame_inc %d",
@@ -2975,6 +3001,7 @@
}
}
+ spin_lock_init(&bus_priv->common_data.spin_lock);
INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++) {
INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 9848454..f427ab9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -355,7 +355,6 @@
CAM_DBG(CAM_ISP, "Received EPOCH");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
- cam_vfe_put_evt_payload(payload->core_info, &payload);
break;
case CAM_ISP_HW_EVENT_REG_UPDATE:
if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
@@ -373,7 +372,6 @@
if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
CAM_DBG(CAM_ISP, "Received ERROR\n");
ret = CAM_ISP_HW_ERROR_OVERFLOW;
- cam_vfe_put_evt_payload(payload->core_info, &payload);
} else {
ret = CAM_ISP_HW_ERROR_NONE;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index 28e99f2..50dca827 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -209,7 +209,6 @@
CAM_DBG(CAM_ISP, "Received REG UPDATE");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
- cam_vfe_put_evt_payload(payload->core_info, &payload);
break;
default:
break;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index f166025..f4aa5c3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,8 +20,9 @@
#include "cam_cpas_api.h"
#include "cam_vfe_soc.h"
-#define CAM_VFE_HW_RESET_HW_AND_REG_VAL 0x00003F9F
-#define CAM_VFE_HW_RESET_HW_VAL 0x00003F87
+#define CAM_VFE_HW_RESET_HW_AND_REG_VAL 0x00003F9F
+#define CAM_VFE_HW_RESET_HW_VAL 0x00003F87
+#define CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES 3
struct cam_vfe_top_ver2_common_data {
struct cam_hw_soc_info *soc_info;
@@ -33,7 +34,11 @@
struct cam_vfe_top_ver2_common_data common_data;
struct cam_isp_resource_node mux_rsrc[CAM_VFE_TOP_VER2_MUX_MAX];
unsigned long hw_clk_rate;
- struct cam_axi_vote hw_axi_vote;
+ enum cam_vfe_bw_control_action axi_vote_control[
+ CAM_VFE_TOP_VER2_MUX_MAX];
+ struct cam_axi_vote to_be_applied_axi_vote;
+ struct cam_axi_vote applied_axi_vote;
+ uint32_t counter_to_update_axi_vote;
struct cam_axi_vote req_axi_vote[CAM_VFE_TOP_VER2_MUX_MAX];
unsigned long req_clk_rate[CAM_VFE_TOP_VER2_MUX_MAX];
};
@@ -119,7 +124,8 @@
}
static int cam_vfe_top_set_axi_bw_vote(
- struct cam_vfe_top_ver2_priv *top_priv)
+ struct cam_vfe_top_ver2_priv *top_priv,
+ bool start_stop)
{
struct cam_axi_vote sum = {0, 0};
int i, rc = 0;
@@ -127,6 +133,7 @@
top_priv->common_data.soc_info;
struct cam_vfe_soc_private *soc_private =
soc_info->soc_private;
+ bool apply_bw_update = false;
if (!soc_private) {
CAM_ERR(CAM_ISP, "Error soc_private NULL");
@@ -134,30 +141,98 @@
}
for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
- sum.uncompressed_bw +=
- top_priv->req_axi_vote[i].uncompressed_bw;
- sum.compressed_bw +=
- top_priv->req_axi_vote[i].compressed_bw;
+ if (top_priv->axi_vote_control[i] ==
+ CAM_VFE_BW_CONTROL_INCLUDE) {
+ sum.uncompressed_bw +=
+ top_priv->req_axi_vote[i].uncompressed_bw;
+ sum.compressed_bw +=
+ top_priv->req_axi_vote[i].compressed_bw;
+ }
}
- CAM_DBG(CAM_ISP, "BW Vote: u=%lld c=%lld",
+ CAM_DBG(CAM_ISP, "Updating BW from (%llu %llu) to (%llu %llu)",
+ top_priv->applied_axi_vote.uncompressed_bw,
+ top_priv->applied_axi_vote.compressed_bw,
sum.uncompressed_bw,
sum.compressed_bw);
- if ((top_priv->hw_axi_vote.uncompressed_bw ==
+ if ((top_priv->applied_axi_vote.uncompressed_bw ==
sum.uncompressed_bw) &&
- (top_priv->hw_axi_vote.compressed_bw ==
- sum.compressed_bw))
+ (top_priv->applied_axi_vote.compressed_bw ==
+ sum.compressed_bw)) {
+ CAM_DBG(CAM_ISP, "BW config unchanged %llu %llu",
+ top_priv->applied_axi_vote.uncompressed_bw,
+ top_priv->applied_axi_vote.compressed_bw);
+ top_priv->counter_to_update_axi_vote = 0;
return 0;
+ }
- rc = cam_cpas_update_axi_vote(
+ if ((top_priv->to_be_applied_axi_vote.uncompressed_bw !=
+ sum.uncompressed_bw) ||
+ (top_priv->to_be_applied_axi_vote.compressed_bw !=
+ sum.compressed_bw)) {
+ // we got a new bw value to apply
+ top_priv->counter_to_update_axi_vote = 0;
+
+ top_priv->to_be_applied_axi_vote.uncompressed_bw =
+ sum.uncompressed_bw;
+ top_priv->to_be_applied_axi_vote.compressed_bw =
+ sum.compressed_bw;
+ }
+
+ if (start_stop == true) {
+ CAM_DBG(CAM_ISP,
+ "New bw in start/stop, applying bw now, counter=%d",
+ top_priv->counter_to_update_axi_vote);
+ top_priv->counter_to_update_axi_vote = 0;
+ apply_bw_update = true;
+ } else if ((top_priv->to_be_applied_axi_vote.uncompressed_bw <
+ top_priv->applied_axi_vote.uncompressed_bw) ||
+ (top_priv->to_be_applied_axi_vote.compressed_bw <
+ top_priv->applied_axi_vote.compressed_bw)) {
+ if (top_priv->counter_to_update_axi_vote >=
+ (CAM_VFE_TOP_VER2_MUX_MAX *
+ CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES)) {
+ CAM_DBG(CAM_ISP,
+ "New bw is less, applying bw now, counter=%d",
+ top_priv->counter_to_update_axi_vote);
+ top_priv->counter_to_update_axi_vote = 0;
+ apply_bw_update = true;
+ } else {
+ CAM_DBG(CAM_ISP,
+ "New bw is less, Defer applying bw, counter=%d",
+ top_priv->counter_to_update_axi_vote);
+
+ top_priv->counter_to_update_axi_vote++;
+ apply_bw_update = false;
+ }
+ } else {
+ CAM_DBG(CAM_ISP,
+ "New bw is more, applying bw now, counter=%d",
+ top_priv->counter_to_update_axi_vote);
+ top_priv->counter_to_update_axi_vote = 0;
+ apply_bw_update = true;
+ }
+
+ CAM_DBG(CAM_ISP,
+ "counter=%d, apply_bw_update=%d",
+ top_priv->counter_to_update_axi_vote,
+ apply_bw_update);
+
+ if (apply_bw_update == true) {
+ rc = cam_cpas_update_axi_vote(
soc_private->cpas_handle,
- &sum);
- if (!rc) {
- top_priv->hw_axi_vote.uncompressed_bw = sum.uncompressed_bw;
- top_priv->hw_axi_vote.compressed_bw = sum.compressed_bw;
- } else
- CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
+ &top_priv->to_be_applied_axi_vote);
+ if (!rc) {
+ top_priv->applied_axi_vote.uncompressed_bw =
+ top_priv->to_be_applied_axi_vote.uncompressed_bw;
+ top_priv->applied_axi_vote.compressed_bw =
+ top_priv->to_be_applied_axi_vote.compressed_bw;
+ } else {
+ CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
+ }
+ top_priv->counter_to_update_axi_vote = 0;
+ }
return rc;
}
@@ -239,6 +314,8 @@
bw_update->camnoc_bw_bytes;
top_priv->req_axi_vote[i].compressed_bw =
bw_update->external_bw_bytes;
+ top_priv->axi_vote_control[i] =
+ CAM_VFE_BW_CONTROL_INCLUDE;
break;
}
}
@@ -248,7 +325,51 @@
res->hw_intf->hw_idx,
hw_info->hw_state);
} else
- rc = cam_vfe_top_set_axi_bw_vote(top_priv);
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv, false);
+
+ return rc;
+}
+
+static int cam_vfe_top_bw_control(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_vfe_bw_control_args *bw_ctrl = NULL;
+ struct cam_isp_resource_node *res = NULL;
+ struct cam_hw_info *hw_info = NULL;
+ int rc = 0;
+ int i;
+
+ bw_ctrl = (struct cam_vfe_bw_control_args *)cmd_args;
+ res = bw_ctrl->node_res;
+
+ if (!res || !res->hw_intf->hw_priv)
+ return -EINVAL;
+
+ hw_info = res->hw_intf->hw_priv;
+
+ if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+ res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+ CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+ res->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+ top_priv->axi_vote_control[i] = bw_ctrl->action;
+ break;
+ }
+ }
+
+ if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_DBG(CAM_ISP, "VFE:%d Not ready to set BW yet :%d",
+ res->hw_intf->hw_idx,
+ hw_info->hw_state);
+ } else {
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv, true);
+ }
return rc;
}
@@ -412,7 +533,7 @@
return rc;
}
- rc = cam_vfe_top_set_axi_bw_vote(top_priv);
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv, true);
if (rc) {
CAM_ERR(CAM_ISP, "set_axi_bw_vote failed, rc=%d", rc);
return rc;
@@ -458,6 +579,8 @@
top_priv->req_clk_rate[i] = 0;
top_priv->req_axi_vote[i].compressed_bw = 0;
top_priv->req_axi_vote[i].uncompressed_bw = 0;
+ top_priv->axi_vote_control[i] =
+ CAM_VFE_BW_CONTROL_EXCLUDE;
break;
}
}
@@ -468,7 +591,7 @@
return rc;
}
- rc = cam_vfe_top_set_axi_bw_vote(top_priv);
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv, true);
if (rc) {
CAM_ERR(CAM_ISP, "set_axi_bw_vote failed, rc=%d", rc);
return rc;
@@ -518,6 +641,9 @@
rc = cam_vfe_top_bw_update(top_priv, cmd_args,
arg_size);
break;
+ case CAM_ISP_HW_CMD_BW_CONTROL:
+ rc = cam_vfe_top_bw_control(top_priv, cmd_args, arg_size);
+ break;
default:
rc = -EINVAL;
CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type);
@@ -554,8 +680,11 @@
}
vfe_top->top_priv = top_priv;
top_priv->hw_clk_rate = 0;
- top_priv->hw_axi_vote.compressed_bw = 0;
- top_priv->hw_axi_vote.uncompressed_bw = 0;
+ top_priv->to_be_applied_axi_vote.compressed_bw = 0;
+ top_priv->to_be_applied_axi_vote.uncompressed_bw = 0;
+ top_priv->applied_axi_vote.compressed_bw = 0;
+ top_priv->applied_axi_vote.uncompressed_bw = 0;
+ top_priv->counter_to_update_axi_vote = 0;
for (i = 0, j = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
@@ -565,6 +694,8 @@
top_priv->req_clk_rate[i] = 0;
top_priv->req_axi_vote[i].compressed_bw = 0;
top_priv->req_axi_vote[i].uncompressed_bw = 0;
+ top_priv->axi_vote_control[i] = CAM_VFE_BW_CONTROL_EXCLUDE;
+
if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
top_priv->mux_rsrc[i].res_id =
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index 65922dd..6e2e7e9 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -742,7 +742,7 @@
struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
uint32_t dev_type;
struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
- struct cam_jpeg_hw_cfg_req *cfg_req, *req_temp;
+ struct cam_jpeg_hw_cfg_req *cfg_req = NULL, *req_temp = NULL;
if (!hw_mgr || !ctx_data) {
CAM_ERR(CAM_JPEG, "Invalid args");
@@ -776,8 +776,8 @@
list_for_each_entry_safe(cfg_req, req_temp,
&hw_mgr->hw_config_req_list, list) {
- if ((struct cam_jpeg_hw_ctx_data *)cfg_req->
- hw_cfg_args.ctxt_to_hw_map != ctx_data)
+ if ((cfg_req) && ((struct cam_jpeg_hw_ctx_data *)cfg_req->
+ hw_cfg_args.ctxt_to_hw_map != ctx_data))
continue;
list_del_init(&cfg_req->list);
@@ -800,11 +800,14 @@
return -EINVAL;
}
- request_id = *(int64_t *)flush_args->flush_req_pending[0];
+ if (flush_args->num_req_pending)
+ return 0;
+
+ request_id = *(int64_t *)flush_args->flush_req_active[0];
list_for_each_entry_safe(cfg_req, req_temp,
&hw_mgr->hw_config_req_list, list) {
- if (cfg_req->hw_cfg_args.ctxt_to_hw_map
- != ctx_data)
+ if ((cfg_req) && (cfg_req->hw_cfg_args.ctxt_to_hw_map
+ != ctx_data))
continue;
if (cfg_req->req_id != request_id)
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
index 3fc9032..21e66a2 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -60,8 +60,21 @@
hw_info->bus_rd_reg.bus_client_reg[index].core_cfg, 0x1);
/* 5. unpack_cfg */
- cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
- hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0, 0x0);
+ if (io_buf->io_cfg->format == CAM_FORMAT_PD10)
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+ 0x0);
+ else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY)
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+ 0x1);
+ else if (io_buf->io_cfg->format == CAM_FORMAT_PLAIN16_10)
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+ 0x22);
+ else
+ CAM_ERR(CAM_LRME, "Unsupported format %d",
+ io_buf->io_cfg->format);
}
static void cam_lrme_hw_util_fill_we_reg(struct cam_lrme_hw_io_buffer *io_buf,
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index 9689698..02f03ea 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -761,7 +761,7 @@
"Buffer inactive at idx=%d, continuing", i);
continue;
} else {
- CAM_INFO(CAM_CRM,
+ CAM_DBG(CAM_CRM,
"Active buffer at idx=%d, possible leak needs unmapping",
i);
cam_mem_mgr_unmap_active_buf(i);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 784e90b..3100f91 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1138,9 +1138,8 @@
* @brief : Cleans up the mem allocated while linking
* @link : pointer to link, mem associated with this link is freed
*
- * @return : returns if unlink for any device was success or failure
*/
-static int __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
+static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
{
int32_t i = 0;
struct cam_req_mgr_connected_device *dev;
@@ -1157,12 +1156,13 @@
dev = &link->l_dev[i];
if (dev != NULL) {
link_data.dev_hdl = dev->dev_hdl;
- if (dev->ops && dev->ops->link_setup)
+ if (dev->ops && dev->ops->link_setup) {
rc = dev->ops->link_setup(&link_data);
if (rc)
CAM_ERR(CAM_CRM,
- "Unlink failed dev_hdl %d",
- dev->dev_hdl);
+ "Unlink failed dev_hdl 0x%x rc=%d",
+ dev->dev_hdl, rc);
+ }
dev->dev_hdl = 0;
dev->parent = NULL;
dev->ops = NULL;
@@ -1176,8 +1176,6 @@
link->pd_mask = 0;
link->num_devs = 0;
link->max_delay = 0;
-
- return rc;
}
/**
@@ -1263,45 +1261,71 @@
return NULL;
}
+/*
+ * __cam_req_mgr_free_link()
+ *
+ * @brief: Frees the link and its request queue
+ *
+ * @link: link identifier
+ *
+ */
+static void __cam_req_mgr_free_link(struct cam_req_mgr_core_link *link)
+{
+ kfree(link->req.in_q);
+ link->req.in_q = NULL;
+ kfree(link);
+}
+
/**
* __cam_req_mgr_unreserve_link()
*
- * @brief : Reserves one link data struct within session
+ * @brief : Removes the link data struct from the session and frees it
* @session: session identifier
* @link : link identifier
*
*/
static void __cam_req_mgr_unreserve_link(
struct cam_req_mgr_core_session *session,
- struct cam_req_mgr_core_link **link)
+ struct cam_req_mgr_core_link *link)
{
- int32_t i = 0;
+ int i;
- if (!session || !*link) {
+ if (!session || !link) {
CAM_ERR(CAM_CRM, "NULL session/link ptr %pK %pK",
- session, *link);
+ session, link);
return;
}
mutex_lock(&session->lock);
- if (!session->num_links)
- CAM_WARN(CAM_CRM, "No active link or invalid state %d",
- session->num_links);
- else {
- for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
- if (session->links[i] == *link)
- session->links[i] = NULL;
- }
- session->num_links--;
- CAM_DBG(CAM_CRM, "Active session links (%d)",
- session->num_links);
+ if (!session->num_links) {
+ CAM_WARN(CAM_CRM, "No active link or invalid state: hdl %x",
+ link->link_hdl);
+ mutex_unlock(&session->lock);
+ return;
}
- kfree((*link)->req.in_q);
- (*link)->req.in_q = NULL;
- kfree(*link);
- *link = NULL;
- mutex_unlock(&session->lock);
+ for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+ if (session->links[i] == link)
+ session->links[i] = NULL;
+ }
+
+ if ((session->sync_mode != CAM_REQ_MGR_SYNC_MODE_NO_SYNC) &&
+ (link->sync_link)) {
+ /*
+ * make sure to unlink sync setup under the assumption
+ * of only having 2 links in a given session
+ */
+ session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+ for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+ if (session->links[i])
+ session->links[i]->sync_link = NULL;
+ }
+ }
+
+ session->num_links--;
+ CAM_DBG(CAM_CRM, "Active session links (%d)", session->num_links);
+ mutex_unlock(&session->lock);
+ __cam_req_mgr_free_link(link);
}
/* Workqueue context processing section */
@@ -2145,11 +2169,58 @@
return rc;
}
+/**
+ * __cam_req_mgr_unlink()
+ *
+ * @brief : Unlink devices on a link structure from the session
+ * @link : Pointer to the link structure
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_unlink(struct cam_req_mgr_core_link *link)
+{
+ int rc;
+
+ mutex_lock(&link->lock);
+ spin_lock_bh(&link->link_state_spin_lock);
+ link->state = CAM_CRM_LINK_STATE_IDLE;
+ spin_unlock_bh(&link->link_state_spin_lock);
+ __cam_req_mgr_print_req_tbl(&link->req);
+
+ /* Destroy workq payload data */
+ kfree(link->workq->task.pool[0].payload);
+ link->workq->task.pool[0].payload = NULL;
+
+ /* Destroy workq and timer of link */
+ crm_timer_exit(&link->watchdog);
+
+ cam_req_mgr_workq_destroy(&link->workq);
+
+ /* Cleanup request tables and unlink devices */
+ __cam_req_mgr_destroy_link_info(link);
+
+ /* Free memory holding data of linked devs */
+ __cam_req_mgr_destroy_subdev(link->l_dev);
+
+ /* Destroy the link handle */
+ rc = cam_destroy_device_hdl(link->link_hdl);
+ if (rc < 0) {
+ CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x",
+ rc, link->link_hdl);
+ }
+
+ mutex_unlock(&link->lock);
+ return rc;
+}
+
int cam_req_mgr_destroy_session(
struct cam_req_mgr_session_info *ses_info)
{
int rc;
+ int i;
struct cam_req_mgr_core_session *cam_session = NULL;
+ struct cam_req_mgr_core_link *link;
if (!ses_info) {
CAM_DBG(CAM_CRM, "NULL session info pointer");
@@ -2167,10 +2238,20 @@
}
mutex_lock(&cam_session->lock);
if (cam_session->num_links) {
- CAM_ERR(CAM_CRM, "destroy session %x num_active_links %d",
+ CAM_DBG(CAM_CRM, "destroy session %x num_active_links %d",
ses_info->session_hdl,
cam_session->num_links);
- /* @TODO : Go through active links and destroy ? */
+
+ for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+ link = cam_session->links[i];
+
+ if (!link)
+ continue;
+
+ /* Ignore return value since session is going away */
+ __cam_req_mgr_unlink(link);
+ __cam_req_mgr_free_link(link);
+ }
}
list_del(&cam_session->entry);
mutex_unlock(&cam_session->lock);
@@ -2286,7 +2367,7 @@
link_info->link_hdl = 0;
link_hdl_fail:
mutex_unlock(&link->lock);
- __cam_req_mgr_unreserve_link(cam_session, &link);
+ __cam_req_mgr_unreserve_link(cam_session, link);
mutex_unlock(&g_crm_core_dev->crm_lock);
return rc;
}
@@ -2296,7 +2377,6 @@
int rc = 0;
struct cam_req_mgr_core_session *cam_session;
struct cam_req_mgr_core_link *link;
- int i;
if (!unlink_info) {
CAM_ERR(CAM_CRM, "NULL pointer");
@@ -2319,60 +2399,18 @@
link = cam_get_device_priv(unlink_info->link_hdl);
if (!link) {
CAM_ERR(CAM_CRM, "NULL pointer");
- mutex_unlock(&g_crm_core_dev->crm_lock);
- return -EINVAL;
+ rc = -EINVAL;
+ goto done;
}
- mutex_lock(&link->lock);
- spin_lock_bh(&link->link_state_spin_lock);
- link->state = CAM_CRM_LINK_STATE_IDLE;
- spin_unlock_bh(&link->link_state_spin_lock);
- __cam_req_mgr_print_req_tbl(&link->req);
-
- if ((cam_session->sync_mode != CAM_REQ_MGR_SYNC_MODE_NO_SYNC) &&
- (link->sync_link)) {
- /*
- * make sure to unlink sync setup under the assumption
- * of only having 2 links in a given session
- */
- cam_session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
- for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
- if (cam_session->links[i])
- cam_session->links[i]->sync_link = NULL;
- }
- }
-
- /* Destroy workq payload data */
- kfree(link->workq->task.pool[0].payload);
- link->workq->task.pool[0].payload = NULL;
-
- /* Destroy workq and timer of link */
- crm_timer_exit(&link->watchdog);
-
- cam_req_mgr_workq_destroy(&link->workq);
-
- /* Cleanup request tables and unlink devices */
- rc = __cam_req_mgr_destroy_link_info(link);
- if (rc) {
- CAM_ERR(CAM_CORE, "Unlink failed. Cannot proceed");
- return rc;
- }
-
- /* Free memory holding data of linked devs */
- __cam_req_mgr_destroy_subdev(link->l_dev);
-
- /* Destroy the link handle */
- rc = cam_destroy_device_hdl(unlink_info->link_hdl);
- if (rc < 0) {
- CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x",
- rc, link->link_hdl);
- }
+ rc = __cam_req_mgr_unlink(link);
/* Free curent link and put back into session's free pool of links */
- mutex_unlock(&link->lock);
- __cam_req_mgr_unreserve_link(cam_session, &link);
- mutex_unlock(&g_crm_core_dev->crm_lock);
+ if (!rc)
+ __cam_req_mgr_unreserve_link(cam_session, link);
+done:
+ mutex_unlock(&g_crm_core_dev->crm_lock);
return rc;
}
@@ -2449,6 +2487,12 @@
return -EINVAL;
}
+ if ((!sync_info->link_hdls[0]) || (!sync_info->link_hdls[1])) {
+ CAM_WARN(CAM_CRM, "Invalid link handles 0x%x 0x%x",
+ sync_info->link_hdls[0], sync_info->link_hdls[1]);
+ return -EINVAL;
+ }
+
mutex_lock(&g_crm_core_dev->crm_lock);
/* session hdl's priv data is cam session struct */
cam_session = (struct cam_req_mgr_core_session *)
@@ -2569,9 +2613,12 @@
int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
{
int rc = 0;
- int i;
+ int i, j;
struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_connected_device *dev = NULL;
+ struct cam_req_mgr_link_evt_data evt_data;
+
if (!control) {
CAM_ERR(CAM_CRM, "Control command is NULL");
rc = -EINVAL;
@@ -2601,9 +2648,29 @@
link->link_hdl);
rc = -EFAULT;
}
+ /* notify nodes */
+ for (j = 0; j < link->num_devs; j++) {
+ dev = &link->l_dev[j];
+ evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_RESUME;
+ evt_data.link_hdl = link->link_hdl;
+ evt_data.dev_hdl = dev->dev_hdl;
+ evt_data.req_id = 0;
+ if (dev->ops && dev->ops->process_evt)
+ dev->ops->process_evt(&evt_data);
+ }
} else if (control->ops == CAM_REQ_MGR_LINK_DEACTIVATE) {
/* Destroy SOF watchdog timer */
crm_timer_exit(&link->watchdog);
+ /* notify nodes */
+ for (j = 0; j < link->num_devs; j++) {
+ dev = &link->l_dev[j];
+ evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_PAUSE;
+ evt_data.link_hdl = link->link_hdl;
+ evt_data.dev_hdl = dev->dev_hdl;
+ evt_data.req_id = 0;
+ if (dev->ops && dev->ops->process_evt)
+ dev->ops->process_evt(&evt_data);
+ }
} else {
CAM_ERR(CAM_CRM, "Invalid link control command");
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
index ce8dfa7..45ebc69 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
@@ -81,9 +81,9 @@
* @process_evt : payload to generic event
*/
struct cam_req_mgr_kmd_ops {
- cam_req_mgr_get_dev_info get_dev_info;
- cam_req_mgr_link_setup link_setup;
- cam_req_mgr_apply_req apply_req;
+ cam_req_mgr_get_dev_info get_dev_info;
+ cam_req_mgr_link_setup link_setup;
+ cam_req_mgr_apply_req apply_req;
cam_req_mgr_flush_req flush_req;
cam_req_mgr_process_evt process_evt;
};
@@ -182,6 +182,8 @@
*/
enum cam_req_mgr_link_evt_type {
CAM_REQ_MGR_LINK_EVT_ERR,
+ CAM_REQ_MGR_LINK_EVT_PAUSE,
+ CAM_REQ_MGR_LINK_EVT_RESUME,
CAM_REQ_MGR_LINK_EVT_MAX,
};
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index e0d4502..23d25a4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -612,37 +612,36 @@
case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: {
CAM_DBG(CAM_FLASH,
"CAMERA_FLASH_CMD_TYPE_OPS case called");
- if ((fctrl->flash_state == CAM_FLASH_STATE_START) ||
+ if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
(fctrl->flash_state ==
- CAM_FLASH_STATE_CONFIG)) {
- flash_operation_info =
- (struct cam_flash_set_on_off *) cmd_buf;
- if (!flash_operation_info) {
- CAM_ERR(CAM_FLASH,
- "flash_operation_info Null");
- return -EINVAL;
- }
-
- fctrl->per_frame[frame_offset].opcode =
- flash_operation_info->opcode;
- fctrl->per_frame[frame_offset].cmn_attr.count =
- flash_operation_info->count;
- for (i = 0;
- i < flash_operation_info->count; i++)
- fctrl->per_frame[frame_offset].
- led_current_ma[i]
- = flash_operation_info->
- led_current_ma[i];
-
- } else {
- CAM_ERR(CAM_FLASH,
- "Rxed Update packets without linking");
+ CAM_FLASH_STATE_ACQUIRE)) {
+ CAM_WARN(CAM_FLASH,
+ "Rxed Flash fire ops without linking");
fctrl->per_frame[frame_offset].
cmn_attr.is_settings_valid = false;
+ return 0;
+ }
+
+ flash_operation_info =
+ (struct cam_flash_set_on_off *) cmd_buf;
+ if (!flash_operation_info) {
+ CAM_ERR(CAM_FLASH,
+ "flash_operation_info Null");
return -EINVAL;
}
+
+ fctrl->per_frame[frame_offset].opcode =
+ flash_operation_info->opcode;
+ fctrl->per_frame[frame_offset].cmn_attr.count =
+ flash_operation_info->count;
+ for (i = 0;
+ i < flash_operation_info->count; i++)
+ fctrl->per_frame[frame_offset].
+ led_current_ma[i]
+ = flash_operation_info->
+ led_current_ma[i];
+ }
break;
- }
default:
CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
cmn_hdr->cmd_type);
@@ -741,18 +740,18 @@
break;
}
case CAM_PKT_NOP_OPCODE: {
- if ((fctrl->flash_state == CAM_FLASH_STATE_START) ||
- (fctrl->flash_state == CAM_FLASH_STATE_CONFIG)) {
- CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
- csl_packet->header.request_id);
- goto update_req_mgr;
- } else {
- CAM_ERR(CAM_FLASH,
- "Rxed Update packets without linking");
+ if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) {
+ CAM_WARN(CAM_FLASH,
+ "Rxed NOP packets without linking");
fctrl->per_frame[frame_offset].
cmn_attr.is_settings_valid = false;
- return -EINVAL;
+ return 0;
}
+
+ CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+ csl_packet->header.request_id);
+ goto update_req_mgr;
}
default:
CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
index eddbf97..085bcf6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -82,7 +82,8 @@
}
case CAM_RELEASE_DEV: {
CAM_DBG(CAM_FLASH, "CAM_RELEASE_DEV");
- if (fctrl->flash_state != CAM_FLASH_STATE_ACQUIRE) {
+ if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_START)) {
CAM_WARN(CAM_FLASH,
"Cannot apply Release dev: Prev state:%d",
fctrl->flash_state);
@@ -131,7 +132,8 @@
}
case CAM_START_DEV: {
CAM_DBG(CAM_FLASH, "CAM_START_DEV");
- if (fctrl->flash_state != CAM_FLASH_STATE_CONFIG) {
+ if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_START)) {
CAM_WARN(CAM_FLASH,
"Cannot apply Start Dev: Prev state: %d",
fctrl->flash_state);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index 76f5b46..db80584 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -469,7 +469,7 @@
CAM_ERR(CAM_OIS, "invalid cmd buf");
return -EINVAL;
}
- cmd_buf += cmd_desc->offset / sizeof(uint32_t);
+ cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
cmm_hdr = (struct common_header *)cmd_buf;
switch (cmm_hdr->cmd_type) {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index 9ce7a21..d5bb1b0 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,31 @@
add_req.req_id);
}
+static void cam_sensor_release_stream_rsc(
+ struct cam_sensor_ctrl_t *s_ctrl)
+{
+ struct i2c_settings_array *i2c_set = NULL;
+ int rc;
+
+ i2c_set = &(s_ctrl->i2c_data.streamoff_settings);
+ if (i2c_set->is_settings_valid == 1) {
+ i2c_set->is_settings_valid = -1;
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR,
+ "failed while deleting Streamoff settings");
+ }
+
+ i2c_set = &(s_ctrl->i2c_data.streamon_settings);
+ if (i2c_set->is_settings_valid == 1) {
+ i2c_set->is_settings_valid = -1;
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR,
+ "failed while deleting Streamon settings");
+ }
+}
+
static void cam_sensor_release_resource(
struct cam_sensor_ctrl_t *s_ctrl)
{
@@ -61,26 +86,10 @@
CAM_ERR(CAM_SENSOR,
"failed while deleting Res settings");
}
- i2c_set = &(s_ctrl->i2c_data.streamoff_settings);
- if (i2c_set->is_settings_valid == 1) {
- i2c_set->is_settings_valid = -1;
- rc = delete_request(i2c_set);
- if (rc < 0)
- CAM_ERR(CAM_SENSOR,
- "failed while deleting Streamoff settings");
- }
- i2c_set = &(s_ctrl->i2c_data.streamon_settings);
- if (i2c_set->is_settings_valid == 1) {
- i2c_set->is_settings_valid = -1;
- rc = delete_request(i2c_set);
- if (rc < 0)
- CAM_ERR(CAM_SENSOR,
- "failed while deleting Streamoff settings");
- }
+
if (s_ctrl->i2c_data.per_frame != NULL) {
for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
-
if (i2c_set->is_settings_valid == 1) {
i2c_set->is_settings_valid = -1;
rc = delete_request(i2c_set);
@@ -165,42 +174,44 @@
}
case CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE: {
- if ((s_ctrl->sensor_state == CAM_SENSOR_CONFIG) ||
- (s_ctrl->sensor_state == CAM_SENSOR_START)) {
- i2c_reg_settings =
- &i2c_data->
- per_frame[csl_packet->header.request_id %
- MAX_PER_FRAME_ARRAY];
- CAM_DBG(CAM_SENSOR, "Received Packet: %lld",
- csl_packet->header.request_id % MAX_PER_FRAME_ARRAY);
- if (i2c_reg_settings->is_settings_valid == 1) {
- CAM_ERR(CAM_SENSOR,
- "Already some pkt in offset req : %lld",
- csl_packet->header.request_id);
- rc = delete_request(i2c_reg_settings);
- if (rc < 0) {
- CAM_ERR(CAM_SENSOR,
- "Failed in Deleting the err: %d", rc);
- return rc;
- }
- }
- } else {
- CAM_ERR(CAM_SENSOR,
+ if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+ (s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
+ CAM_WARN(CAM_SENSOR,
"Rxed Update packets without linking");
- return -EINVAL;
+ return 0;
}
- break;
+
+ i2c_reg_settings =
+ &i2c_data->
+ per_frame[csl_packet->header.request_id %
+ MAX_PER_FRAME_ARRAY];
+ CAM_DBG(CAM_SENSOR, "Received Packet: %lld",
+ csl_packet->header.request_id % MAX_PER_FRAME_ARRAY);
+ if (i2c_reg_settings->is_settings_valid == 1) {
+ CAM_ERR(CAM_SENSOR,
+ "Already some pkt in offset req : %lld",
+ csl_packet->header.request_id);
+ /*
+ * Update req mgr even in case of failure.
+ * This will help not to wait indefinitely
+ * and freeze. If this log is triggered then
+ * fix it.
+ */
+ cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+ return 0;
+ }
+ break;
}
case CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP: {
- if ((s_ctrl->sensor_state == CAM_SENSOR_CONFIG) ||
- (s_ctrl->sensor_state == CAM_SENSOR_START)) {
- cam_sensor_update_req_mgr(s_ctrl, csl_packet);
- } else {
- CAM_ERR(CAM_SENSOR,
- "Rxed Update packets without linking");
- rc = -EINVAL;
+ if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+ (s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
+ CAM_WARN(CAM_SENSOR,
+ "Rxed NOP packets without linking");
+ return 0;
}
- return rc;
+
+ cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+ return 0;
}
default:
CAM_ERR(CAM_SENSOR, "Invalid Packet Header");
@@ -489,7 +500,7 @@
return;
cam_sensor_release_resource(s_ctrl);
-
+ cam_sensor_release_stream_rsc(s_ctrl);
if (s_ctrl->sensor_state >= CAM_SENSOR_ACQUIRE)
cam_sensor_power_down(s_ctrl);
@@ -706,8 +717,8 @@
}
break;
case CAM_RELEASE_DEV: {
- if ((s_ctrl->sensor_state < CAM_SENSOR_ACQUIRE) ||
- (s_ctrl->sensor_state > CAM_SENSOR_CONFIG)) {
+ if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+ (s_ctrl->sensor_state == CAM_SENSOR_START)) {
rc = -EINVAL;
CAM_WARN(CAM_SENSOR,
"Not in right state to release : %d",
@@ -722,6 +733,7 @@
}
cam_sensor_release_resource(s_ctrl);
+ cam_sensor_release_stream_rsc(s_ctrl);
if (s_ctrl->bridge_intf.device_hdl == -1) {
CAM_ERR(CAM_SENSOR,
"Invalid Handles: link hdl: %d device hdl: %d",
@@ -754,7 +766,8 @@
break;
}
case CAM_START_DEV: {
- if (s_ctrl->sensor_state != CAM_SENSOR_CONFIG) {
+ if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+ (s_ctrl->sensor_state == CAM_SENSOR_START)) {
rc = -EINVAL;
CAM_WARN(CAM_SENSOR,
"Not in right state to start : %d",
@@ -793,6 +806,8 @@
"cannot apply streamoff settings");
}
}
+
+ cam_sensor_release_resource(s_ctrl);
s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
}
break;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index 7a6d7fd..89aad4e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -98,6 +98,11 @@
return -EINVAL;
}
+ if (!write_setting->reg_setting) {
+ CAM_ERR(CAM_SENSOR, "Invalid Register Settings");
+ return -EINVAL;
+ }
+
if (io_master_info->master_type == CCI_MASTER) {
return cam_cci_i2c_write_table(io_master_info,
write_setting);
@@ -125,6 +130,11 @@
return -EINVAL;
}
+ if (!write_setting->reg_setting) {
+ CAM_ERR(CAM_SENSOR, "Invalid Register Settings");
+ return -EINVAL;
+ }
+
if (io_master_info->master_type == CCI_MASTER) {
return cam_cci_i2c_write_continuous_table(io_master_info,
write_setting, cam_sensor_i2c_write_flag);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
index 72ca737..622dae6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,7 +26,7 @@
#define MAX_REGULATOR 5
#define MAX_POWER_CONFIG 12
-#define MAX_PER_FRAME_ARRAY 8
+#define MAX_PER_FRAME_ARRAY 32
#define CAM_SENSOR_NAME "cam-sensor"
#define CAM_ACTUATOR_NAME "cam-actuator"
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index 7824102..e04c6b9 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -140,6 +140,7 @@
struct work_struct smmu_work;
struct mutex payload_list_lock;
struct list_head payload_list;
+ u32 non_fatal_fault;
};
static const struct of_device_id msm_cam_smmu_dt_match[] = {
@@ -434,7 +435,7 @@
CAM_ERR(CAM_SMMU, "Error: domain = %pK, device = %pK",
domain, dev);
CAM_ERR(CAM_SMMU, "iova = %lX, flags = %d", iova, flags);
- return 0;
+ return -EINVAL;
}
cb_name = (char *)token;
@@ -448,12 +449,12 @@
CAM_ERR(CAM_SMMU,
"Error: index is not valid, index = %d, token = %s",
idx, cb_name);
- return 0;
+ return -EINVAL;
}
payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
if (!payload)
- return 0;
+ return -EINVAL;
payload->domain = domain;
payload->dev = dev;
@@ -468,7 +469,7 @@
schedule_work(&iommu_cb_set.smmu_work);
- return 0;
+ return -EINVAL;
}
static int cam_smmu_translate_dir_to_iommu_dir(
@@ -2902,6 +2903,15 @@
rc = -ENODEV;
goto end;
}
+
+ iommu_cb_set.non_fatal_fault = 1;
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_NON_FATAL_FAULTS,
+ &iommu_cb_set.non_fatal_fault) < 0) {
+ CAM_ERR(CAM_SMMU,
+ "Error: failed to set non fatal fault attribute");
+ }
+
} else {
CAM_ERR(CAM_SMMU, "Context bank does not have IO region");
rc = -ENODEV;
@@ -3140,12 +3150,10 @@
CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s", cb->name);
goto cb_init_fail;
}
-
if (cb->io_support && cb->mapping)
iommu_set_fault_handler(cb->mapping->domain,
cam_smmu_iommu_fault_handler,
(void *)cb->name);
-
/* increment count to next bank */
iommu_cb_set.cb_init_count++;
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
index 1b5fd9f..8d5f96a 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2011-2014, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, 2017-2018, The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,6 +37,8 @@
/* Ensure previous writes are done */
wmb();
writel_relaxed_no_log(data, addr);
+ /* Ensure previous writes are done */
+ wmb();
return 0;
}
@@ -68,6 +71,8 @@
rmb();
data = readl_relaxed(addr);
CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
+ /* Ensure previous read is done */
+ rmb();
return data;
}
@@ -113,6 +118,8 @@
CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
writel_relaxed(*s++, d++);
}
+ /* Ensure previous writes are done */
+ wmb();
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index bd56310..0b1896f 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -859,7 +859,7 @@
count = of_property_count_strings(of_node, "reg-names");
if (count <= 0) {
- CAM_WARN(CAM_UTIL, "no reg-names found for: %s",
+ CAM_DBG(CAM_UTIL, "no reg-names found for: %s",
soc_info->dev_name);
count = 0;
}
@@ -896,7 +896,7 @@
rc = of_property_read_string_index(of_node, "interrupt-names", 0,
&soc_info->irq_name);
if (rc) {
- CAM_WARN(CAM_UTIL, "No interrupt line preset for: %s",
+ CAM_DBG(CAM_UTIL, "No interrupt line preset for: %s",
soc_info->dev_name);
rc = 0;
} else {
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 03dfde6..44cc7dc 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,9 @@
case HFI_ERR_SYS_FATAL:
vidc_err = VIDC_ERR_HW_FATAL;
break;
+ case HFI_ERR_SYS_NOC_ERROR:
+ vidc_err = VIDC_ERR_NOC_ERROR;
+ break;
case HFI_ERR_SYS_VERSION_MISMATCH:
case HFI_ERR_SYS_INVALID_PARAMETER:
case HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE:
@@ -316,11 +319,14 @@
return 0;
}
-static int hfi_process_sys_error(u32 device_id, struct msm_vidc_cb_info *info)
+static int hfi_process_sys_error(u32 device_id,
+ struct hfi_msg_event_notify_packet *pkt,
+ struct msm_vidc_cb_info *info)
{
struct msm_vidc_cb_cmd_done cmd_done = {0};
cmd_done.device_id = device_id;
+ cmd_done.status = hfi_map_err_status(pkt->event_data1);
info->response_type = HAL_SYS_ERROR;
info->response.cmd = cmd_done;
@@ -373,7 +379,7 @@
case HFI_EVENT_SYS_ERROR:
dprintk(VIDC_ERR, "HFI_EVENT_SYS_ERROR: %d, %#x\n",
pkt->event_data1, pkt->event_data2);
- return hfi_process_sys_error(device_id, info);
+ return hfi_process_sys_error(device_id, pkt, info);
case HFI_EVENT_SESSION_ERROR:
dprintk(VIDC_INFO, "HFI_EVENT_SESSION_ERROR[%#x]\n",
pkt->session_id);
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index fa40091..42bf1ba 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,8 @@
static inline struct msm_vidc_inst *get_vidc_inst(struct file *filp, void *fh)
{
+ if (!filp->private_data)
+ return NULL;
return container_of(filp->private_data,
struct msm_vidc_inst, event_handler);
}
@@ -74,6 +76,7 @@
vidc_inst = get_vidc_inst(filp, NULL);
rc = msm_vidc_close(vidc_inst);
+ filp->private_data = NULL;
trace_msm_v4l2_vidc_close_end("msm_v4l2_close end");
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 7d4e4a1..f3ab5be 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@
V4L2_EVENT_MSM_VIDC_RELEASE_BUFFER_REFERENCE
#define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
+#define TRIGGER_SSR_LOCK_RETRIES 5
+
const char *const mpeg_video_vidc_extradata[] = {
"Extradata none",
"Extradata MB Quantization",
@@ -2207,6 +2209,10 @@
}
/* handle the hw error before core released to get full debug info */
msm_vidc_handle_hw_error(core);
+ if (response->status == VIDC_ERR_NOC_ERROR) {
+ dprintk(VIDC_WARN, "Got NOC error");
+ MSM_VIDC_ERROR(true);
+ }
dprintk(VIDC_DBG, "Calling core_release\n");
rc = call_hfi_op(hdev, core_release, hdev->hfi_device_data);
if (rc) {
@@ -3913,13 +3919,17 @@
struct eos_buf *binfo = NULL;
u32 smem_flags = 0;
- get_inst(inst->core, inst);
+ if (inst->state != MSM_VIDC_START_DONE) {
+ dprintk(VIDC_DBG,
+ "Inst = %pK is not ready for EOS\n", inst);
+ break;
+ }
binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
if (!binfo) {
dprintk(VIDC_ERR, "%s: Out of memory\n", __func__);
rc = -ENOMEM;
- goto exit;
+ break;
}
if (inst->flags & VIDC_SECURE)
@@ -3929,26 +3939,25 @@
SZ_4K, 1, smem_flags,
HAL_BUFFER_INPUT, 0, &binfo->smem);
if (rc) {
+ kfree(binfo);
dprintk(VIDC_ERR,
"Failed to allocate output memory\n");
rc = -ENOMEM;
- goto exit;
+ break;
}
mutex_lock(&inst->eosbufs.lock);
list_add_tail(&binfo->list, &inst->eosbufs.list);
mutex_unlock(&inst->eosbufs.lock);
- if (inst->state != MSM_VIDC_START_DONE) {
- dprintk(VIDC_DBG,
- "Inst = %pK is not ready for EOS\n", inst);
- goto exit;
- }
-
rc = msm_vidc_send_pending_eos_buffers(inst);
-
-exit:
- put_inst(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed pending_eos_buffers sending\n");
+ list_del(&binfo->list);
+ kfree(binfo);
+ break;
+ }
break;
}
default:
@@ -5305,6 +5314,7 @@
{
int rc = 0;
struct hfi_device *hdev;
+ int try_lock_counter = TRIGGER_SSR_LOCK_RETRIES;
if (!core || !core->device) {
dprintk(VIDC_WARN, "Invalid parameters: %pK\n", core);
@@ -5312,7 +5322,13 @@
}
hdev = core->device;
- mutex_lock(&core->lock);
+ while (try_lock_counter) {
+ if (mutex_trylock(&core->lock))
+ break;
+ try_lock_counter--;
+ if (!try_lock_counter)
+ return -EBUSY;
+ }
if (core->state == VIDC_CORE_INIT_DONE) {
/*
* In current implementation user-initiated SSR triggers
@@ -6045,6 +6061,16 @@
inst->prop.width[CAPTURE_PORT] = inst->reconfig_width;
inst->prop.height[OUTPUT_PORT] = inst->reconfig_height;
inst->prop.width[OUTPUT_PORT] = inst->reconfig_width;
+ if (msm_comm_get_stream_output_mode(inst) ==
+ HAL_VIDEO_DECODER_SECONDARY) {
+ rc = msm_comm_queue_output_buffers(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to queue output buffers: %d\n",
+ rc);
+ goto sess_continue_fail;
+ }
+ }
} else if (inst->session_type == MSM_VIDC_ENCODER) {
dprintk(VIDC_DBG,
"session_continue not supported for encoder");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 215bb78..74fa3d5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -131,7 +131,7 @@
static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos) {
unsigned long ssr_trigger_val = 0;
- int rc = 0;
+ int rc = 0, ret = 0;
struct msm_vidc_core *core = filp->private_data;
size_t size = MAX_SSR_STRING_LEN;
char kbuf[MAX_SSR_STRING_LEN + 1] = {0};
@@ -156,8 +156,8 @@
dprintk(VIDC_WARN, "returning error err %d\n", rc);
rc = -EINVAL;
} else {
- msm_vidc_trigger_ssr(core, ssr_trigger_val);
- rc = count;
+ ret = msm_vidc_trigger_ssr(core, ssr_trigger_val);
+ rc = (ret == -EBUSY ? ret : count);
}
exit:
return rc;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index b1a240d..530fe3a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -992,10 +992,11 @@
if (core->smmu_fault_handled) {
if (core->resources.non_fatal_pagefaults) {
- msm_vidc_noc_error_info(core);
- MSM_VIDC_ERROR(true);
+ dprintk(VIDC_ERR,
+ "%s: non-fatal pagefault address: %lx\n",
+ __func__, iova);
+ return 0;
}
- return -ENOSYS;
}
dprintk(VIDC_ERR, "%s - faulting address: %lx\n", __func__, iova);
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 2260b55..54cbdfc 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -88,6 +88,7 @@
VIDC_ERR_TIMEOUT,
VIDC_ERR_CMDQFULL,
VIDC_ERR_START_CODE_NOT_FOUND,
+ VIDC_ERR_NOC_ERROR,
VIDC_ERR_CLIENT_PRESENT = 0x90000001,
VIDC_ERR_CLIENT_FATAL,
VIDC_ERR_CMD_QUEUE_FULL,
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index ca6d803..ea8cf1a 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -45,7 +45,7 @@
#define HFI_ERR_SYS_SESSION_IN_USE (HFI_COMMON_BASE + 0x7)
#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE (HFI_COMMON_BASE + 0x8)
#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN (HFI_COMMON_BASE + 0x9)
-
+#define HFI_ERR_SYS_NOC_ERROR (HFI_COMMON_BASE + 0x11)
#define HFI_ERR_SESSION_FATAL (HFI_COMMON_BASE + 0x1001)
#define HFI_ERR_SESSION_INVALID_PARAMETER (HFI_COMMON_BASE + 0x1002)
#define HFI_ERR_SESSION_BAD_POINTER (HFI_COMMON_BASE + 0x1003)
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 146ca6f..b5ad125 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1,7 +1,7 @@
/*
* QTI Secure Execution Environment Communicator (QSEECOM) driver
*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1859,6 +1859,8 @@
struct qseecom_command_scm_resp continue_resp;
bool found_app = false;
unsigned long flags;
+ sigset_t new_sigset;
+ sigset_t old_sigset;
if (!resp || !data) {
pr_err("invalid resp or data pointer\n");
@@ -1900,23 +1902,23 @@
ptr_app->blocked_on_listener_id = resp->data;
/* sleep until listener is available */
+ sigfillset(&new_sigset);
+ sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
do {
qseecom.app_block_ref_cnt++;
ptr_app->app_blocked = true;
mutex_unlock(&app_access_lock);
- if (wait_event_freezable(
+ wait_event_freezable(
list_ptr->listener_block_app_wq,
- !list_ptr->listener_in_use)) {
- pr_err("Interrupted: listener_id %d, app_id %d\n",
- resp->data, ptr_app->app_id);
- ret = -ERESTARTSYS;
- goto exit;
- }
+ !list_ptr->listener_in_use);
mutex_lock(&app_access_lock);
ptr_app->app_blocked = false;
qseecom.app_block_ref_cnt--;
} while (list_ptr->listener_in_use);
+ sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+
ptr_app->blocked_on_listener_id = 0;
/* notify the blocked app that listener is available */
pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
@@ -1950,6 +1952,8 @@
struct qseecom_continue_blocked_request_ireq ireq;
struct qseecom_command_scm_resp continue_resp;
unsigned int session_id;
+ sigset_t new_sigset;
+ sigset_t old_sigset;
if (!resp) {
pr_err("invalid resp pointer\n");
@@ -1965,22 +1969,23 @@
}
pr_debug("lsntr %d in_use = %d\n",
resp->data, list_ptr->listener_in_use);
+
/* sleep until listener is available */
+ sigfillset(&new_sigset);
+ sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
do {
qseecom.app_block_ref_cnt++;
mutex_unlock(&app_access_lock);
- if (wait_event_freezable(
+ wait_event_freezable(
list_ptr->listener_block_app_wq,
- !list_ptr->listener_in_use)) {
- pr_err("Interrupted: listener_id %d, session_id %d\n",
- resp->data, session_id);
- ret = -ERESTARTSYS;
- goto exit;
- }
+ !list_ptr->listener_in_use);
mutex_lock(&app_access_lock);
qseecom.app_block_ref_cnt--;
} while (list_ptr->listener_in_use);
+ sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+
/* notify TZ that listener is available */
pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
resp->data, session_id);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bdc4e2a..ff4f84f 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -3004,12 +3004,6 @@
struct mmc_card *card = host->card;
int ret;
- /*
- * In the case of recovery, we can't expect flushing the cache to work
- * always, but we have a go and ignore errors.
- */
- mmc_flush_cache(host->card);
-
if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
mmc_can_reset(card)) {
mmc_host_clk_hold(host);
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index 55ce946..01811d9 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -357,7 +357,7 @@
if (!cq_host->desc_base || !cq_host->trans_desc_base)
return -ENOMEM;
- pr_info("desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
+ pr_debug("desc-base: 0x%pK trans-base: 0x%pK\n desc_dma 0x%llx trans_dma: 0x%llx\n",
cq_host->desc_base, cq_host->trans_desc_base,
(unsigned long long)cq_host->desc_dma_base,
(unsigned long long) cq_host->trans_desc_dma_base);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 367c84f..5ed9b72 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2,7 +2,7 @@
* drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
* driver source file
*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,7 @@
#include <linux/iopoll.h>
#include <linux/msm-bus.h>
#include <linux/pm_runtime.h>
+#include <linux/nvmem-consumer.h>
#include <trace/events/mmc.h>
#include "sdhci-msm.h"
@@ -1885,6 +1886,65 @@
}
}
+#ifdef CONFIG_NVMEM
+/* Parse qfprom data for deciding on errata work-arounds */
+static long qfprom_read(struct device *dev, const char *name)
+{
+ struct nvmem_cell *cell;
+ ssize_t len = 0;
+ u32 *buf, val = 0;
+ long err = 0;
+
+ cell = nvmem_cell_get(dev, name);
+ if (IS_ERR(cell)) {
+ err = PTR_ERR(cell);
+ dev_err(dev, "failed opening nvmem cell err : %ld\n", err);
+ /* If entry does not exist, then that is not an error */
+ if (err == -ENOENT)
+ err = 0;
+ return err;
+ }
+
+ buf = (u32 *)nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf) || !len) {
+ dev_err(dev, "Failed reading nvmem cell, err: %u, bytes fetched: %zd\n",
+ *buf, len);
+ if (!IS_ERR(buf)) {
+ kfree(buf);
+ err = -EINVAL;
+ } else {
+ err = PTR_ERR(buf);
+ }
+ } else {
+ val = *buf;
+ kfree(buf);
+ }
+
+ nvmem_cell_put(cell);
+ return err ? err : (long) val;
+}
+
+/* Reads the SoC version */
+static int sdhci_msm_get_socrev(struct device *dev,
+ struct sdhci_msm_host *msm_host)
+{
+
+ msm_host->soc_min_rev = qfprom_read(dev, "minor_rev");
+
+ if (msm_host->soc_min_rev < 0)
+ dev_err(dev, "failed getting soc_min_rev, err : %d\n",
+ msm_host->soc_min_rev);
+ return msm_host->soc_min_rev;
+}
+#else
+/* Reads the SoC version */
+static int sdhci_msm_get_socrev(struct device *dev,
+ struct sdhci_msm_host *msm_host)
+{
+ return 0;
+}
+#endif
+
/* Parse platform data */
static
struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
@@ -2062,6 +2122,13 @@
if (!of_property_read_u32(np, "qcom,ddr-config", &pdata->ddr_config))
pdata->rclk_wa = true;
+ /*
+ * rclk_wa is not required if soc version is mentioned and
+ * is not base version.
+ */
+ if (msm_host->soc_min_rev != 0)
+ pdata->rclk_wa = false;
+
return pdata;
out:
return NULL;
@@ -4170,11 +4237,10 @@
group->latency = PM_QOS_DEFAULT_VALUE;
pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
group->latency);
- pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
+ pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d\n",
__func__, i,
group->req.cpus_affine.bits[0],
- group->latency,
- &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
+ group->latency);
}
msm_host->pm_qos_prev_cpu = -1;
msm_host->pm_qos_group_enable = true;
@@ -4530,6 +4596,12 @@
msm_host->mmc = host->mmc;
msm_host->pdev = pdev;
+ ret = sdhci_msm_get_socrev(&pdev->dev, msm_host);
+ if (ret == -EPROBE_DEFER) {
+ dev_err(&pdev->dev, "SoC version rd: fail: defer for now\n");
+ goto pltfm_free;
+ }
+
/* get the ice device vops if present */
ret = sdhci_msm_ice_get_dev(host);
if (ret == -EPROBE_DEFER) {
@@ -4735,8 +4807,6 @@
goto vreg_deinit;
}
writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
- dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
- &tlmm_memres->start, readl_relaxed(tlmm_mem));
}
/*
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 7c737cc..9c2442d 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -255,6 +255,7 @@
bool core_3_0v_support;
bool pltfm_init_done;
struct sdhci_msm_regs_restore regs_restore;
+ int soc_min_rev;
};
extern char *saved_command_line;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 566be69..b674b38 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3068,13 +3068,13 @@
struct sdhci_adma2_64_desc *dma_desc = desc;
if (host->flags & SDHCI_USE_64_BIT_DMA)
- DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
+ DBG("%s: %pK: DMA 0x%08x%08x, LEN 0x%04x,Attr=0x%02x\n",
name, desc, le32_to_cpu(dma_desc->addr_hi),
le32_to_cpu(dma_desc->addr_lo),
le16_to_cpu(dma_desc->len),
le16_to_cpu(dma_desc->cmd));
else
- DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+ DBG("%s: %pK: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
name, desc, le32_to_cpu(dma_desc->addr_lo),
le16_to_cpu(dma_desc->len),
le16_to_cpu(dma_desc->cmd));
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
index c131b5e..d32c1f4 100644
--- a/drivers/net/wireless/ath/wil6210/boot_loader.h
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -39,7 +40,8 @@
/* valid only for version 2 and above */
__le32 bl_assert_code; /* 0x880A58 BL Assert code */
__le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */
- __le32 bl_reserved[22]; /* 0x880A60 - 0x880AB4 */
+ __le32 bl_shutdown_handshake; /* 0x880A60 BL cleaner shutdown */
+ __le32 bl_reserved[21]; /* 0x880A64 - 0x880AB4 */
__le32 bl_magic_number; /* 0x880AB8 BL Magic number */
} __packed;
@@ -58,4 +60,9 @@
u8 mac_address[6]; /* 0x880A4c BL mac address */
} __packed;
+/* bits for bl_shutdown_handshake */
+#define BL_SHUTDOWN_HS_GRTD BIT(0)
+#define BL_SHUTDOWN_HS_RTD BIT(1)
+#define BL_SHUTDOWN_HS_PROT_VER(x) WIL_GET_BITS(x, 28, 31)
+
#endif /* BOOT_LOADER_EXPORT_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 51030c3..91d29f5 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1031,9 +1031,8 @@
struct cfg80211_chan_def *chandef)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = wil_to_wdev(wil);
- wdev->preset_chandef = *chandef;
+ wil->monitor_chandef = *chandef;
return 0;
}
@@ -1807,9 +1806,8 @@
wil_dbg_pm(wil, "suspending\n");
mutex_lock(&wil->mutex);
- wil_p2p_stop_discovery(wil);
-
mutex_lock(&wil->p2p_wdev_mutex);
+ wil_p2p_stop_radio_operations(wil);
wil_abort_scan(wil, true);
mutex_unlock(&wil->p2p_wdev_mutex);
mutex_unlock(&wil->mutex);
@@ -1827,6 +1825,68 @@
return 0;
}
+static int
+wil_cfg80211_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int i, rc;
+
+ wil_dbg_misc(wil,
+ "sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n",
+ request->n_ssids, request->ie_len, request->flags);
+ for (i = 0; i < request->n_ssids; i++) {
+ wil_dbg_misc(wil, "SSID[%d]:", i);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ request->ssids[i].ssid,
+ request->ssids[i].ssid_len, true);
+ }
+ wil_dbg_misc(wil, "channels:");
+ for (i = 0; i < request->n_channels; i++)
+ wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value,
+ i == request->n_channels - 1 ? "\n" : "");
+ wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n",
+ request->n_match_sets, request->min_rssi_thold,
+ request->delay);
+ for (i = 0; i < request->n_match_sets; i++) {
+ struct cfg80211_match_set *ms = &request->match_sets[i];
+
+ wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n",
+ i, ms->rssi_thold);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ ms->ssid.ssid,
+ ms->ssid.ssid_len, true);
+ }
+ wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans);
+ for (i = 0; i < request->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i];
+
+ wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n",
+ i, sp->interval, sp->iterations);
+ }
+
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+ if (rc)
+ return rc;
+ return wmi_start_sched_scan(wil, request);
+}
+
+static int
+wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ rc = wmi_stop_sched_scan(wil);
+ /* device would return error if it thinks PNO is already stopped.
+ * ignore the return code so user space and driver gets back in-sync
+ */
+ wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc);
+
+ return 0;
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -1860,6 +1920,8 @@
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
.suspend = wil_cfg80211_suspend,
.resume = wil_cfg80211_resume,
+ .sched_scan_start = wil_cfg80211_sched_scan_start,
+ .sched_scan_stop = wil_cfg80211_sched_scan_stop,
};
static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 831780a..751e911 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -875,7 +875,6 @@
params.buf = frame;
params.len = len;
- params.chan = wdev->preset_chandef.chan;
rc = wil_cfg80211_mgmt_tx(wiphy, wdev, ¶ms, NULL);
@@ -1682,8 +1681,6 @@
struct wil6210_priv *wil = file->private_data;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
- wil->suspend_stats.min_suspend_time = ULONG_MAX;
- wil->suspend_stats.collection_start = ktime_get();
return len;
}
@@ -1693,33 +1690,41 @@
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
- static char text[400];
- int n;
- unsigned long long stats_collection_time =
- ktime_to_us(ktime_sub(ktime_get(),
- wil->suspend_stats.collection_start));
+ char *text;
+ int n, ret, text_size = 500;
- n = snprintf(text, sizeof(text),
- "Suspend statistics:\n"
+ text = kmalloc(text_size, GFP_KERNEL);
+ if (!text)
+ return -ENOMEM;
+
+ n = snprintf(text, text_size,
+ "Radio on suspend statistics:\n"
"successful suspends:%ld failed suspends:%ld\n"
"successful resumes:%ld failed resumes:%ld\n"
- "rejected by host:%ld rejected by device:%ld\n"
- "total suspend time:%lld min suspend time:%lld\n"
- "max suspend time:%lld stats collection time: %lld\n",
- wil->suspend_stats.successful_suspends,
- wil->suspend_stats.failed_suspends,
- wil->suspend_stats.successful_resumes,
- wil->suspend_stats.failed_resumes,
- wil->suspend_stats.rejected_by_host,
+ "rejected by device:%ld\n"
+ "Radio off suspend statistics:\n"
+ "successful suspends:%ld failed suspends:%ld\n"
+ "successful resumes:%ld failed resumes:%ld\n"
+ "General statistics:\n"
+ "rejected by host:%ld\n",
+ wil->suspend_stats.r_on.successful_suspends,
+ wil->suspend_stats.r_on.failed_suspends,
+ wil->suspend_stats.r_on.successful_resumes,
+ wil->suspend_stats.r_on.failed_resumes,
wil->suspend_stats.rejected_by_device,
- wil->suspend_stats.total_suspend_time,
- wil->suspend_stats.min_suspend_time,
- wil->suspend_stats.max_suspend_time,
- stats_collection_time);
+ wil->suspend_stats.r_off.successful_suspends,
+ wil->suspend_stats.r_off.failed_suspends,
+ wil->suspend_stats.r_off.successful_resumes,
+ wil->suspend_stats.r_off.failed_resumes,
+ wil->suspend_stats.rejected_by_host);
- n = min_t(int, n, sizeof(text));
+ n = min_t(int, n, text_size);
- return simple_read_from_buffer(user_buf, count, ppos, text, n);
+ ret = simple_read_from_buffer(user_buf, count, ppos, text, n);
+
+ kfree(text);
+
+ return ret;
}
static const struct file_operations fops_suspend_stats = {
@@ -1888,8 +1893,6 @@
wil6210_debugfs_create_ITR_CNT(wil, dbg);
- wil->suspend_stats.collection_start = ktime_get();
-
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index 2f2b910..2c7b24f 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -58,15 +59,30 @@
u8 data[0]; /* free-form data [data_size], see above */
} __packed;
+/* Comment header - common for all comment record types */
+struct wil_fw_record_comment_hdr {
+ __le32 magic;
+};
+
/* FW capabilities encoded inside a comment record */
#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
/* identifies capabilities record */
- __le32 magic;
+ struct wil_fw_record_comment_hdr hdr;
/* capabilities (variable size), see enum wmi_fw_capability */
u8 capabilities[0];
};
+/* brd file info encoded inside a comment record */
+#define WIL_BRD_FILE_MAGIC (0xabcddcbb)
+struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */
+ /* identifies brd file record */
+ struct wil_fw_record_comment_hdr hdr;
+ __le32 version;
+ __le32 base_addr;
+ __le32 max_size_bytes;
+} __packed;
+
/* perform action
* data_size = @head.size - offsetof(struct wil_fw_record_action, data)
*/
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 77d1902..914c010 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -128,14 +129,13 @@
}
static int
-fw_handle_comment(struct wil6210_priv *wil, const void *data,
- size_t size)
+fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
+ size_t size)
{
const struct wil_fw_record_capabilities *rec = data;
size_t capa_size;
- if (size < sizeof(*rec) ||
- le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC) {
+ if (size < sizeof(*rec)) {
wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
data, size, true);
return 0;
@@ -151,8 +151,56 @@
return 0;
}
-static int fw_handle_data(struct wil6210_priv *wil, const void *data,
- size_t size)
+static int
+fw_handle_brd_file(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_brd_file *rec = data;
+
+ if (size < sizeof(*rec)) {
+ wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+ data, size, true);
+ return 0;
+ }
+
+ wil->brd_file_addr = le32_to_cpu(rec->base_addr);
+ wil->brd_file_max_size = le32_to_cpu(rec->max_size_bytes);
+
+ wil_dbg_fw(wil, "brd_file_addr 0x%x, brd_file_max_size %d\n",
+ wil->brd_file_addr, wil->brd_file_max_size);
+
+ return 0;
+}
+
+static int
+fw_handle_comment(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_comment_hdr *hdr = data;
+ u32 magic;
+ int rc = 0;
+
+ if (size < sizeof(*hdr))
+ return 0;
+
+ magic = le32_to_cpu(hdr->magic);
+
+ switch (magic) {
+ case WIL_FW_CAPABILITIES_MAGIC:
+ wil_dbg_fw(wil, "magic is WIL_FW_CAPABILITIES_MAGIC\n");
+ rc = fw_handle_capabilities(wil, data, size);
+ break;
+ case WIL_BRD_FILE_MAGIC:
+ wil_dbg_fw(wil, "magic is WIL_BRD_FILE_MAGIC\n");
+ rc = fw_handle_brd_file(wil, data, size);
+ break;
+ }
+
+ return rc;
+}
+
+static int __fw_handle_data(struct wil6210_priv *wil, const void *data,
+ size_t size, __le32 addr)
{
const struct wil_fw_record_data *d = data;
void __iomem *dst;
@@ -163,16 +211,23 @@
return -EINVAL;
}
- if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+ if (!wil_fw_addr_check(wil, &dst, addr, s, "address"))
return -EINVAL;
- wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr),
- s);
+ wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(addr), s);
wil_memcpy_toio_32(dst, d->data, s);
wmb(); /* finish before processing next record */
return 0;
}
+static int fw_handle_data(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_data *d = data;
+
+ return __fw_handle_data(wil, data, size, d->addr);
+}
+
static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
size_t size)
{
@@ -552,6 +607,100 @@
}
/**
+ * wil_brd_process - process section from BRD file
+ *
+ * Return error code
+ */
+static int wil_brd_process(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ int rc = 0;
+ const struct wil_fw_record_head *hdr = data;
+ size_t s, hdr_sz;
+ u16 type;
+
+ /* Assuming the board file includes only one header record and one data
+ * record. Each record starts with wil_fw_record_head.
+ */
+ if (size < sizeof(*hdr))
+ return -EINVAL;
+ s = sizeof(*hdr) + le32_to_cpu(hdr->size);
+ if (s > size)
+ return -EINVAL;
+
+ /* Skip the header record and handle the data record */
+ hdr = (const void *)hdr + s;
+ size -= s;
+ if (size < sizeof(*hdr))
+ return -EINVAL;
+ hdr_sz = le32_to_cpu(hdr->size);
+
+ if (wil->brd_file_max_size && hdr_sz > wil->brd_file_max_size)
+ return -EINVAL;
+ if (sizeof(*hdr) + hdr_sz > size)
+ return -EINVAL;
+ if (hdr_sz % 4) {
+ wil_err_fw(wil, "unaligned record size: %zu\n",
+ hdr_sz);
+ return -EINVAL;
+ }
+ type = le16_to_cpu(hdr->type);
+ if (type != wil_fw_type_data) {
+ wil_err_fw(wil, "invalid record type for board file: %d\n",
+ type);
+ return -EINVAL;
+ }
+ if (hdr_sz < sizeof(struct wil_fw_record_data)) {
+ wil_err_fw(wil, "data record too short: %zu\n", hdr_sz);
+ return -EINVAL;
+ }
+
+ wil_dbg_fw(wil, "using addr from fw file: [0x%08x]\n",
+ wil->brd_file_addr);
+
+ rc = __fw_handle_data(wil, &hdr[1], hdr_sz,
+ cpu_to_le32(wil->brd_file_addr));
+
+ return rc;
+}
+
+/**
+ * wil_request_board - Request board file
+ *
+ * Request board image from the file
+ * board file address and max size are read from FW file
+ * during initialization.
+ * brd file shall include one header and one data section.
+ *
+ * Return error code
+ */
+int wil_request_board(struct wil6210_priv *wil, const char *name)
+{
+ int rc, dlen;
+ const struct firmware *brd;
+
+ rc = request_firmware(&brd, name, wil_to_dev(wil));
+ if (rc) {
+ wil_err_fw(wil, "Failed to load brd %s\n", name);
+ return rc;
+ }
+ wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, brd->size);
+
+ /* Verify the header */
+ dlen = wil_fw_verify(wil, brd->data, brd->size);
+ if (dlen < 0) {
+ rc = dlen;
+ goto out;
+ }
+ /* Process the data record */
+ rc = wil_brd_process(wil, brd->data, dlen);
+
+out:
+ release_firmware(brd);
+ return rc;
+}
+
+/**
* wil_fw_verify_file_exists - checks if firmware file exist
*
* @wil: driver context
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 5cf3417..1835187 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -395,8 +396,9 @@
wil6210_mask_irq_misc(wil, false);
if (isr & ISR_MISC_FW_ERROR) {
- u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE);
- u32 ucode_assert_code = wil_r(wil, RGF_UCODE_ASSERT_CODE);
+ u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
+ u32 ucode_assert_code =
+ wil_r(wil, wil->rgf_ucode_assert_code_addr);
wil_err(wil,
"Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n",
@@ -565,7 +567,7 @@
if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
return IRQ_NONE;
- /* FIXME: IRQ mask debug */
+ /* IRQ mask debug */
if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
return IRQ_NONE;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index ae5a1b6..9cef0f0 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -583,7 +584,6 @@
wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
WMI_WAKEUP_TRIGGER_BCAST;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
- wil->suspend_stats.min_suspend_time = ULONG_MAX;
wil->vring_idle_trsh = 16;
return 0;
@@ -643,6 +643,98 @@
destroy_workqueue(wil->wmi_wq);
}
+static void wil_shutdown_bl(struct wil6210_priv *wil)
+{
+ u32 val;
+
+ wil_s(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake), BL_SHUTDOWN_HS_GRTD);
+
+ usleep_range(100, 150);
+
+ val = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake));
+ if (val & BL_SHUTDOWN_HS_RTD) {
+ wil_dbg_misc(wil, "BL is ready for halt\n");
+ return;
+ }
+
+ wil_err(wil, "BL did not report ready for halt\n");
+}
+
+/* this format is used by ARC embedded CPU for instruction memory */
+static inline u32 ARC_me_imm32(u32 d)
+{
+ return ((d & 0xffff0000) >> 16) | ((d & 0x0000ffff) << 16);
+}
+
+/* defines access to interrupt vectors for wil_freeze_bl */
+#define ARC_IRQ_VECTOR_OFFSET(N) ((N) * 8)
+/* ARC long jump instruction */
+#define ARC_JAL_INST (0x20200f80)
+
+static void wil_freeze_bl(struct wil6210_priv *wil)
+{
+ u32 jal, upc, saved;
+ u32 ivt3 = ARC_IRQ_VECTOR_OFFSET(3);
+
+ jal = wil_r(wil, wil->iccm_base + ivt3);
+ if (jal != ARC_me_imm32(ARC_JAL_INST)) {
+ wil_dbg_misc(wil, "invalid IVT entry found, skipping\n");
+ return;
+ }
+
+ /* prevent the target from entering deep sleep
+ * and disabling memory access
+ */
+ saved = wil_r(wil, RGF_USER_USAGE_8);
+ wil_w(wil, RGF_USER_USAGE_8, saved | BIT_USER_PREVENT_DEEP_SLEEP);
+ usleep_range(20, 25); /* let the BL process the bit */
+
+ /* redirect to endless loop in the INT_L1 context and let it trap */
+ wil_w(wil, wil->iccm_base + ivt3 + 4, ARC_me_imm32(ivt3));
+ usleep_range(20, 25); /* let the BL get into the trap */
+
+ /* verify the BL is frozen */
+ upc = wil_r(wil, RGF_USER_CPU_PC);
+ if (upc < ivt3 || (upc > (ivt3 + 8)))
+ wil_dbg_misc(wil, "BL freeze failed, PC=0x%08X\n", upc);
+
+ wil_w(wil, RGF_USER_USAGE_8, saved);
+}
+
+static void wil_bl_prepare_halt(struct wil6210_priv *wil)
+{
+ u32 tmp, ver;
+
+ /* before halting device CPU driver must make sure BL is not accessing
+ * host memory. This is done differently depending on BL version:
+ * 1. For very old BL versions the procedure is skipped
+ * (not supported).
+ * 2. For old BL version we use a special trick to freeze the BL
+ * 3. For new BL versions we shutdown the BL using handshake procedure.
+ */
+ tmp = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_struct_version));
+ if (!tmp) {
+ wil_dbg_misc(wil, "old BL, skipping halt preperation\n");
+ return;
+ }
+
+ tmp = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake));
+ ver = BL_SHUTDOWN_HS_PROT_VER(tmp);
+
+ if (ver > 0)
+ wil_shutdown_bl(wil);
+ else
+ wil_freeze_bl(wil);
+}
+
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
@@ -676,7 +768,7 @@
}
}
-static int wil_target_reset(struct wil6210_priv *wil)
+static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
{
int delay = 0;
u32 x, x1 = 0;
@@ -690,9 +782,16 @@
wil_halt_cpu(wil);
- /* clear all boot loader "ready" bits */
- wil_w(wil, RGF_USER_BL +
- offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
+ if (!no_flash) {
+ /* clear all boot loader "ready" bits */
+ wil_w(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready), 0);
+ /* this should be safe to write even with old BLs */
+ wil_w(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake), 0);
+ }
/* Clear Fw Download notification */
wil_c(wil, RGF_USER_USAGE_6, BIT(0));
@@ -733,21 +832,33 @@
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
/* wait until device ready. typical time is 20..80 msec */
- do {
- msleep(RST_DELAY);
- x = wil_r(wil, RGF_USER_BL +
- offsetof(struct bl_dedicated_registers_v0,
- boot_loader_ready));
- if (x1 != x) {
- wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
- x1 = x;
- }
- if (delay++ > RST_COUNT) {
- wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
- x);
- return -ETIME;
- }
- } while (x != BL_READY);
+ if (no_flash)
+ do {
+ msleep(RST_DELAY);
+ x = wil_r(wil, USER_EXT_USER_PMU_3);
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, PMU_3 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while ((x & BIT_PMU_DEVICE_RDY) == 0);
+ else
+ do {
+ msleep(RST_DELAY);
+ x = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready));
+ if (x1 != x) {
+ wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
+ x1, x);
+ x1 = x;
+ }
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while (x != BL_READY);
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -755,6 +866,21 @@
wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+ if (no_flash) {
+ /* Reset OTP HW vectors to fit 40MHz */
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME3, 0x1);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME4, 0x20027);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME5, 0x30003);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME6, 0x20002);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME7, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME8, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME9, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME10, 0x60001);
+ wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
+ }
+
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
}
@@ -792,6 +918,14 @@
else
wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+ if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) {
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM;
+ wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM;
+ wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN;
+ wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
+ }
+
if (wil->platform_ops.set_features) {
features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
wil->fw_capabilities) &&
@@ -904,6 +1038,27 @@
}
}
+static int wil_get_otp_info(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ u8 mac[8];
+
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC),
+ sizeof(mac));
+ if (!is_valid_ether_addr(mac)) {
+ wil_err(wil, "Invalid MAC %pM\n", mac);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(ndev->perm_addr, mac);
+ ether_addr_copy(wiphy->perm_addr, mac);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ ether_addr_copy(ndev->dev_addr, mac);
+
+ return 0;
+}
+
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
ulong to = msecs_to_jiffies(1000);
@@ -997,6 +1152,7 @@
{
int rc;
unsigned long status_flags = BIT(wil_status_resetting);
+ int no_flash;
wil_dbg_misc(wil, "reset\n");
@@ -1075,20 +1231,28 @@
flush_workqueue(wil->wq_service);
flush_workqueue(wil->wmi_wq);
- wil_bl_crash_info(wil, false);
+ no_flash = test_bit(hw_capa_no_flash, wil->hw_capa);
+ if (!no_flash)
+ wil_bl_crash_info(wil, false);
wil_disable_irq(wil);
- rc = wil_target_reset(wil);
+ rc = wil_target_reset(wil, no_flash);
wil6210_clear_irq(wil);
wil_enable_irq(wil);
wil_rx_fini(wil);
if (rc) {
- wil_bl_crash_info(wil, true);
+ if (!no_flash)
+ wil_bl_crash_info(wil, true);
goto out;
}
- rc = wil_get_bl_info(wil);
- if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
- rc = 0;
+ if (no_flash) {
+ rc = wil_get_otp_info(wil);
+ } else {
+ rc = wil_get_bl_info(wil);
+ if (rc == -EAGAIN && !load_fw)
+ /* ignore RF error if not going up */
+ rc = 0;
+ }
if (rc)
goto out;
@@ -1097,13 +1261,21 @@
wil_info(wil, "Use firmware <%s> + board <%s>\n",
wil->wil_fw_name, WIL_BOARD_FILE_NAME);
+ if (!no_flash)
+ wil_bl_prepare_halt(wil);
+
wil_halt_cpu(wil);
memset(wil->fw_version, 0, sizeof(wil->fw_version));
/* Loading f/w from the file */
rc = wil_request_firmware(wil, wil->wil_fw_name, true);
if (rc)
goto out;
- rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
+ if (wil->brd_file_addr)
+ rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+ else
+ rc = wil_request_firmware(wil,
+ WIL_BOARD_FILE_NAME,
+ true);
if (rc)
goto out;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 40cd32a..e2abe67 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -179,7 +179,7 @@
wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
/* default monitor channel */
ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
- cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+ cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
ndev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, wil_dev_setup);
if (!ndev) {
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 370068a..1875387 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -39,15 +40,16 @@
#endif /* CONFIG_PM */
static
-void wil_set_capabilities(struct wil6210_priv *wil)
+int wil_set_capabilities(struct wil6210_priv *wil)
{
const char *wil_fw_name;
u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
RGF_USER_REVISION_ID_MASK);
int platform_capa;
+ struct fw_map *iccm_section, *sct;
- bitmap_zero(wil->hw_capabilities, hw_capability_last);
+ bitmap_zero(wil->hw_capa, hw_capa_last);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
@@ -56,6 +58,8 @@
switch (jtag_id) {
case JTAG_DEV_ID_SPARROW:
+ memcpy(fw_mapping, sparrow_fw_mapping,
+ sizeof(sparrow_fw_mapping));
switch (chip_revision) {
case REVISION_ID_SPARROW_D0:
wil->hw_name = "Sparrow D0";
@@ -65,6 +69,12 @@
if (wil_fw_verify_file_exists(wil, wil_fw_name))
wil->wil_fw_name = wil_fw_name;
+ sct = wil_find_fw_mapping("mac_rgf_ext");
+ if (!sct) {
+ wil_err(wil, "mac_rgf_ext section not found in fw_mapping\n");
+ return -EINVAL;
+ }
+ memcpy(sct, &sparrow_d0_mac_rgf_ext, sizeof(*sct));
break;
case REVISION_ID_SPARROW_B0:
wil->hw_name = "Sparrow B0";
@@ -75,15 +85,36 @@
wil->hw_version = HW_VER_UNKNOWN;
break;
}
+ wil->rgf_fw_assert_code_addr = SPARROW_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
+ break;
+ case JTAG_DEV_ID_TALYN:
+ wil->hw_name = "Talyn";
+ wil->hw_version = HW_VER_TALYN;
+ memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
+ wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
+ if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) &
+ BIT_NO_FLASH_INDICATION)
+ set_bit(hw_capa_no_flash, wil->hw_capa);
break;
default:
wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
jtag_id, chip_revision);
wil->hw_name = "Unknown";
wil->hw_version = HW_VER_UNKNOWN;
+ return -EINVAL;
}
- wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+ iccm_section = wil_find_fw_mapping("fw_code");
+ if (!iccm_section) {
+ wil_err(wil, "fw_code section not found in fw_mapping\n");
+ return -EINVAL;
+ }
+ wil->iccm_base = iccm_section->host;
+
+ wil_info(wil, "Board hardware is %s, flash %sexist\n", wil->hw_name,
+ test_bit(hw_capa_no_flash, wil->hw_capa) ? "doesn't " : "");
/* Get platform capabilities */
if (wil->platform_ops.get_capa) {
@@ -96,6 +127,8 @@
/* extract FW capabilities from file without loading the FW */
wil_request_firmware(wil, wil->wil_fw_name, false);
wil_refresh_fw_capabilities(wil);
+
+ return 0;
}
void wil_disable_irq(struct wil6210_priv *wil)
@@ -211,6 +244,8 @@
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
+ int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
+ int i;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
@@ -246,21 +281,23 @@
}
/* rollback to err_plat */
- /* device supports 48bit addresses */
- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (rc) {
- dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ /* device supports >32bit addresses */
+ for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
+ rc = dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
- dev_err(dev,
- "dma_set_mask_and_coherent(32) failed: %d\n",
- rc);
- goto err_plat;
+ dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+ dma_addr_size[i], rc);
+ continue;
}
- } else {
- wil->use_extended_dma_addr = 1;
+ dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+ wil->dma_addr_size = dma_addr_size[i];
+ break;
}
+ if (wil->dma_addr_size == 0)
+ goto err_plat;
+
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
@@ -295,7 +332,11 @@
/* rollback to err_iounmap */
wil_info(wil, "CSR at %pR -> 0x%p\n", &pdev->resource[0], wil->csr);
- wil_set_capabilities(wil);
+ rc = wil_set_capabilities(wil);
+ if (rc) {
+ wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
+ goto err_iounmap;
+ }
wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */
@@ -381,6 +422,7 @@
static const struct pci_device_id wil6210_pcie_ids[] = {
{ PCI_DEVICE(0x1ae9, 0x0310) },
{ PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
+ { PCI_DEVICE(0x17cb, 0x1201) }, /* Talyn */
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
@@ -393,6 +435,9 @@
int rc = 0;
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
@@ -400,16 +445,18 @@
if (rc)
goto out;
- rc = wil_suspend(wil, is_runtime);
+ rc = wil_suspend(wil, is_runtime, keep_radio_on);
if (!rc) {
- wil->suspend_stats.successful_suspends++;
-
- /* If platform device supports keep_radio_on_during_sleep
- * it will control PCIe master
+ /* In case radio stays on, platform device will control
+ * PCIe master
*/
- if (!wil->keep_radio_on_during_sleep)
+ if (!keep_radio_on) {
/* disable bus mastering */
pci_clear_master(pdev);
+ wil->suspend_stats.r_off.successful_suspends++;
+ } else {
+ wil->suspend_stats.r_on.successful_suspends++;
+ }
}
out:
return rc;
@@ -420,23 +467,32 @@
int rc = 0;
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
- /* If platform device supports keep_radio_on_during_sleep it will
- * control PCIe master
+ /* In case radio stays on, platform device will control
+ * PCIe master
*/
- if (!wil->keep_radio_on_during_sleep)
+ if (!keep_radio_on)
/* allow master */
pci_set_master(pdev);
- rc = wil_resume(wil, is_runtime);
+ rc = wil_resume(wil, is_runtime, keep_radio_on);
if (rc) {
wil_err(wil, "device failed to resume (%d)\n", rc);
- wil->suspend_stats.failed_resumes++;
- if (!wil->keep_radio_on_during_sleep)
+ if (!keep_radio_on) {
pci_clear_master(pdev);
+ wil->suspend_stats.r_off.failed_resumes++;
+ } else {
+ wil->suspend_stats.r_on.failed_resumes++;
+ }
} else {
- wil->suspend_stats.successful_resumes++;
+ if (keep_radio_on)
+ wil->suspend_stats.r_on.successful_resumes++;
+ else
+ wil->suspend_stats.r_off.successful_resumes++;
}
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 153c1cf..14533ed 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -186,7 +186,7 @@
break;
wil_err(wil,
"TO waiting for idle RX, suspend failed\n");
- wil->suspend_stats.failed_suspends++;
+ wil->suspend_stats.r_on.failed_suspends++;
goto resume_after_fail;
}
wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
@@ -202,7 +202,7 @@
*/
if (!wil_is_wmi_idle(wil)) {
wil_err(wil, "suspend failed due to pending WMI events\n");
- wil->suspend_stats.failed_suspends++;
+ wil->suspend_stats.r_on.failed_suspends++;
goto resume_after_fail;
}
@@ -216,7 +216,7 @@
if (rc) {
wil_err(wil, "platform device failed to suspend (%d)\n",
rc);
- wil->suspend_stats.failed_suspends++;
+ wil->suspend_stats.r_on.failed_suspends++;
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
wil_unmask_irq(wil);
goto resume_after_fail;
@@ -272,6 +272,7 @@
rc = wil_down(wil);
if (rc) {
wil_err(wil, "wil_down : %d\n", rc);
+ wil->suspend_stats.r_off.failed_suspends++;
goto out;
}
}
@@ -284,6 +285,7 @@
rc = wil->platform_ops.suspend(wil->platform_handle, false);
if (rc) {
wil_enable_irq(wil);
+ wil->suspend_stats.r_off.failed_suspends++;
goto out;
}
}
@@ -317,12 +319,9 @@
return rc;
}
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
{
int rc = 0;
- struct net_device *ndev = wil_to_ndev(wil);
- bool keep_radio_on = ndev->flags & IFF_UP &&
- wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
@@ -339,19 +338,12 @@
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
- if (!rc)
- wil->suspend_stats.suspend_start_time = ktime_get();
-
return rc;
}
-int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
{
int rc = 0;
- struct net_device *ndev = wil_to_ndev(wil);
- bool keep_radio_on = ndev->flags & IFF_UP &&
- wil->keep_radio_on_during_sleep;
- unsigned long long suspend_time_usec = 0;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
@@ -369,21 +361,9 @@
else
rc = wil_resume_radio_off(wil);
- if (rc)
- goto out;
-
- suspend_time_usec =
- ktime_to_us(ktime_sub(ktime_get(),
- wil->suspend_stats.suspend_start_time));
- wil->suspend_stats.total_suspend_time += suspend_time_usec;
- if (suspend_time_usec < wil->suspend_stats.min_suspend_time)
- wil->suspend_stats.min_suspend_time = suspend_time_usec;
- if (suspend_time_usec > wil->suspend_stats.max_suspend_time)
- wil->suspend_stats.max_suspend_time = suspend_time_usec;
-
out:
- wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n",
- is_runtime ? "runtime" : "system", rc, suspend_time_usec);
+ wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system",
+ rc);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 2e301b6..4ea27b0 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -111,14 +111,14 @@
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
- * if we are using 48 bit addresses switch to 32 bit allocation
- * before allocating vring memory.
+ * if we are using more than 32 bit addresses switch to 32 bit
+ * allocation before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/
- if (wil->use_extended_dma_addr)
+ if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
pmc->pring_va = dma_alloc_coherent(dev,
@@ -126,8 +126,9 @@
&pmc->pring_pa,
GFP_KERNEL);
- if (wil->use_extended_dma_addr)
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (wil->dma_addr_size > 32)
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(wil->dma_addr_size));
wil_dbg_misc(wil,
"pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 8fe2239..62d1d07 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -178,14 +178,14 @@
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
- * if we are using 48 bit addresses switch to 32 bit allocation
- * before allocating vring memory.
+ * if we are using more than 32 bit addresses switch to 32 bit
+ * allocation before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/
- if (wil->use_extended_dma_addr)
+ if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@@ -195,8 +195,9 @@
return -ENOMEM;
}
- if (wil->use_extended_dma_addr)
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (wil->dma_addr_size > 32)
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(wil->dma_addr_size));
/* initially, all descriptors are SW owned
* For Tx and Rx, ownership bit is at the same location, thus
@@ -347,7 +348,6 @@
static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
struct sk_buff *skb)
{
- struct wireless_dev *wdev = wil->wdev;
struct wil6210_rtap {
struct ieee80211_radiotap_header rthdr;
/* fields should be in the order of bits in rthdr.it_present */
@@ -374,7 +374,7 @@
int rtap_len = sizeof(struct wil6210_rtap);
int phy_length = 0; /* phy info header size, bytes */
static char phy_data[128];
- struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+ struct ieee80211_channel *ch = wil->monitor_chandef.chan;
if (rtap_include_phy_info) {
rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index bb43f3f..2f6d6c9 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -83,18 +84,18 @@
*/
#define WIL_MAX_MPDU_OVERHEAD (62)
-struct wil_suspend_stats {
+struct wil_suspend_count_stats {
unsigned long successful_suspends;
- unsigned long failed_suspends;
unsigned long successful_resumes;
+ unsigned long failed_suspends;
unsigned long failed_resumes;
- unsigned long rejected_by_device;
+};
+
+struct wil_suspend_stats {
+ struct wil_suspend_count_stats r_off;
+ struct wil_suspend_count_stats r_on;
+ unsigned long rejected_by_device; /* only radio on */
unsigned long rejected_by_host;
- unsigned long long total_suspend_time;
- unsigned long long min_suspend_time;
- unsigned long long max_suspend_time;
- ktime_t collection_start;
- ktime_t suspend_start_time;
};
/* Calculate MAC buffer size for the firmware. It includes all overhead,
@@ -170,6 +171,7 @@
#define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
#define BIT_USER_USER_CPU_MAN_RST BIT(1) /* user_cpu_man_rst */
+#define RGF_USER_CPU_PC (0x8801e8)
#define RGF_USER_MAC_CPU_0 (0x8801fc)
#define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */
#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
@@ -195,6 +197,19 @@
#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1 (0x880c2c)
#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
#define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
+#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0)
+ #define BIT_NO_FLASH_INDICATION BIT(8)
+#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec)
+#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0)
+#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4)
+#define RGF_USER_XPM_IFC_RD_TIME4 (0x880cf8)
+#define RGF_USER_XPM_IFC_RD_TIME5 (0x880cfc)
+#define RGF_USER_XPM_IFC_RD_TIME6 (0x880d00)
+#define RGF_USER_XPM_IFC_RD_TIME7 (0x880d04)
+#define RGF_USER_XPM_IFC_RD_TIME8 (0x880d08)
+#define RGF_USER_XPM_IFC_RD_TIME9 (0x880d0c)
+#define RGF_USER_XPM_IFC_RD_TIME10 (0x880d10)
+#define RGF_USER_XPM_RD_DOUT_SAMPLE_TIME (0x880d64)
#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
#define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
@@ -285,22 +300,33 @@
#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
+#define USER_EXT_USER_PMU_3 (0x88d00c)
+ #define BIT_PMU_DEVICE_RDY BIT(0)
+
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
#define JTAG_DEV_ID_SPARROW (0x2632072f)
+ #define JTAG_DEV_ID_TALYN (0x7e0e1)
#define RGF_USER_REVISION_ID (0x88afe4)
#define RGF_USER_REVISION_ID_MASK (3)
#define REVISION_ID_SPARROW_B0 (0x0)
#define REVISION_ID_SPARROW_D0 (0x3)
+#define RGF_OTP_MAC (0x8a0620)
+
/* crash codes for FW/Ucode stored here */
-#define RGF_FW_ASSERT_CODE (0x91f020)
-#define RGF_UCODE_ASSERT_CODE (0x91f028)
+
+/* ASSERT RGFs */
+#define SPARROW_RGF_FW_ASSERT_CODE (0x91f020)
+#define SPARROW_RGF_UCODE_ASSERT_CODE (0x91f028)
+#define TALYN_RGF_FW_ASSERT_CODE (0xa37020)
+#define TALYN_RGF_UCODE_ASSERT_CODE (0xa37028)
enum {
HW_VER_UNKNOWN,
HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
+ HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */
};
/* popular locations */
@@ -316,6 +342,10 @@
#define WIL_DATA_COMPLETION_TO_MS 200
/* Hardware definitions end */
+#define SPARROW_FW_MAPPING_TABLE_SIZE 10
+#define TALYN_FW_MAPPING_TABLE_SIZE 13
+#define MAX_FW_MAPPING_TABLE_SIZE 13
+
struct fw_map {
u32 from; /* linker address - from, inclusive */
u32 to; /* linker address - to, exclusive */
@@ -325,7 +355,10 @@
};
/* array size should be in sync with actual definition in the wmi.c */
-extern const struct fw_map fw_mapping[10];
+extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
+extern const struct fw_map sparrow_d0_mac_rgf_ext;
+extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
+extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
/**
* mk_cidxtid - construct @cidxtid field
@@ -440,7 +473,7 @@
wil_status_fwconnected,
wil_status_dontscan,
wil_status_mbox_ready, /* MBOX structures ready */
- wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+ wil_status_irqen, /* interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
wil_status_suspending, /* suspend in progress */
@@ -572,7 +605,8 @@
};
enum {
- hw_capability_last
+ hw_capa_no_flash,
+ hw_capa_last
};
struct wil_probe_client_req {
@@ -648,7 +682,10 @@
u8 chip_revision;
const char *hw_name;
const char *wil_fw_name;
- DECLARE_BITMAP(hw_capabilities, hw_capability_last);
+ char *board_file;
+ u32 brd_file_addr;
+ u32 brd_file_max_size;
+ DECLARE_BITMAP(hw_capa, hw_capa_last);
DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
u8 n_mids; /* number of additional MIDs as reported by FW */
@@ -657,6 +694,7 @@
unsigned long last_fw_recovery; /* jiffies of last fw recovery */
wait_queue_head_t wq; /* for all wait_event() use */
/* profile */
+ struct cfg80211_chan_def monitor_chandef;
u32 monitor_flags;
u32 privacy; /* secure connection? */
u8 hidden_ssid; /* relevant in AP mode */
@@ -712,7 +750,7 @@
struct wil_sta_info sta[WIL6210_MAX_CID];
int bcast_vring;
u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */
- bool use_extended_dma_addr; /* indicates whether we are using 48 bits */
+ u32 dma_addr_size; /* indicates dma addr size */
/* scan */
struct cfg80211_scan_request *scan_request;
@@ -721,7 +759,7 @@
atomic_t isr_count_rx, isr_count_tx;
/* debugfs */
struct dentry *debug;
- struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+ struct wil_blob_wrapper blobs[MAX_FW_MAPPING_TABLE_SIZE];
u8 discovery_mode;
u8 abft_len;
u8 wakeup_trigger;
@@ -769,6 +807,10 @@
bool suspend_resp_comp;
u32 bus_request_kbps;
u32 bus_request_kbps_pre_suspend;
+
+ u32 rgf_fw_assert_code_addr;
+ u32 rgf_ucode_assert_code_addr;
+ u32 iccm_base;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -894,6 +936,7 @@
int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
void wil_set_ethtoolops(struct net_device *ndev);
+struct fw_map *wil_find_fw_mapping(const char *section);
void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size);
void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
@@ -1033,6 +1076,7 @@
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
bool load);
+int wil_request_board(struct wil6210_priv *wil, const char *name);
bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
void wil_pm_runtime_allow(struct wil6210_priv *wil);
@@ -1041,8 +1085,8 @@
void wil_pm_runtime_put(struct wil6210_priv *wil);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
bool wil_is_wmi_idle(struct wil6210_priv *wil);
int wmi_resume(struct wil6210_priv *wil);
int wmi_suspend(struct wil6210_priv *wil);
@@ -1076,4 +1120,9 @@
bool fst_link_loss);
int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct);
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+ struct cfg80211_sched_scan_request *request);
+int wmi_stop_sched_scan(struct wil6210_priv *wil);
+
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 9520c39..f2dba31 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -39,6 +40,7 @@
" 60G device led enablement. Set the led ID (0-2) to enable");
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+#define WIL_WMI_CALL_GENERAL_TO_MS 100
/**
* WMI event receiving - theory of operations
@@ -70,23 +72,23 @@
* On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
* AHB addresses starting from 0x880000
*
- * Internally, firmware uses addresses that allows faster access but
+ * Internally, firmware uses addresses that allow faster access but
* are invisible from the host. To read from these addresses, alternative
* AHB address must be used.
- *
- * Memory mapping
- * Linker address PCI/Host address
- * 0x880000 .. 0xa80000 2Mb BAR0
- * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM
- * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH
*/
/**
- * @fw_mapping provides memory remapping table
+ * @sparrow_fw_mapping provides memory remapping table for sparrow
*
* array size should be in sync with the declaration in the wil6210.h
+ *
+ * Sparrow memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xa80000 2Mb BAR0
+ * 0x800000 .. 0x808000 0x900000 .. 0x908000 32k DCCM
+ * 0x840000 .. 0x860000 0x908000 .. 0x928000 128k PERIPH
*/
-const struct fw_map fw_mapping[] = {
+const struct fw_map sparrow_fw_mapping[] = {
/* FW code RAM 256k */
{0x000000, 0x040000, 0x8c0000, "fw_code", true},
/* FW data RAM 32k */
@@ -112,6 +114,59 @@
{0x800000, 0x804000, 0x940000, "uc_data", false},
};
+/**
+ * @sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0
+ * it is a bit larger to support extra features
+ */
+const struct fw_map sparrow_d0_mac_rgf_ext = {
+ 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true
+};
+
+/**
+ * @talyn_fw_mapping provides memory remapping table for Talyn
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Talyn memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xc80000 4Mb BAR0
+ * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
+ * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
+ */
+const struct fw_map talyn_fw_mapping[] = {
+ /* FW code RAM 1M */
+ {0x000000, 0x100000, 0x900000, "fw_code", true},
+ /* FW data RAM 128k */
+ {0x800000, 0x820000, 0xa00000, "fw_data", true},
+ /* periph. data RAM 96k */
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true},
+ /* various RGF 40k */
+ {0x880000, 0x88a000, 0x880000, "rgf", true},
+ /* AGC table 4k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+ /* Pcie_ext_rgf 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+ /* mac_ext_rgf 1344b */
+ {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true},
+ /* ext USER RGF 4k */
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true},
+ /* OTP 4k */
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true},
+ /* DMA EXT RGF 64k */
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true},
+ /* upper area 1536k */
+ {0x900000, 0xa80000, 0x900000, "upper", true},
+ /* UCODE areas - accessible by debugfs blobs but not by
+ * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+ */
+ /* ucode code RAM 256k */
+ {0x000000, 0x040000, 0xa38000, "uc_code", false},
+ /* ucode data RAM 32k */
+ {0x800000, 0x808000, 0xa78000, "uc_data", false},
+};
+
+struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
+
struct blink_on_off_time led_blink_time[] = {
{WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS},
{WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS},
@@ -139,6 +194,24 @@
}
/**
+ * find fw_mapping entry by section name
+ * @section - section name
+ *
+ * Return pointer to section or NULL if not found
+ */
+struct fw_map *wil_find_fw_mapping(const char *section)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++)
+ if (fw_mapping[i].name &&
+ !strcmp(section, fw_mapping[i].name))
+ return &fw_mapping[i];
+
+ return NULL;
+}
+
+/**
* Check address validity for WMI buffer; remap if needed
* @ptr - internal (linker) fw/ucode address
* @size - if non zero, validate the block does not
@@ -208,6 +281,242 @@
return 0;
}
+static const char *cmdid2name(u16 cmdid)
+{
+ switch (cmdid) {
+ case WMI_NOTIFY_REQ_CMDID:
+ return "WMI_NOTIFY_REQ_CMD";
+ case WMI_START_SCAN_CMDID:
+ return "WMI_START_SCAN_CMD";
+ case WMI_CONNECT_CMDID:
+ return "WMI_CONNECT_CMD";
+ case WMI_DISCONNECT_CMDID:
+ return "WMI_DISCONNECT_CMD";
+ case WMI_SW_TX_REQ_CMDID:
+ return "WMI_SW_TX_REQ_CMD";
+ case WMI_GET_RF_SECTOR_PARAMS_CMDID:
+ return "WMI_GET_RF_SECTOR_PARAMS_CMD";
+ case WMI_SET_RF_SECTOR_PARAMS_CMDID:
+ return "WMI_SET_RF_SECTOR_PARAMS_CMD";
+ case WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID:
+ return "WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD";
+ case WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID:
+ return "WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD";
+ case WMI_BRP_SET_ANT_LIMIT_CMDID:
+ return "WMI_BRP_SET_ANT_LIMIT_CMD";
+ case WMI_TOF_SESSION_START_CMDID:
+ return "WMI_TOF_SESSION_START_CMD";
+ case WMI_AOA_MEAS_CMDID:
+ return "WMI_AOA_MEAS_CMD";
+ case WMI_PMC_CMDID:
+ return "WMI_PMC_CMD";
+ case WMI_TOF_GET_TX_RX_OFFSET_CMDID:
+ return "WMI_TOF_GET_TX_RX_OFFSET_CMD";
+ case WMI_TOF_SET_TX_RX_OFFSET_CMDID:
+ return "WMI_TOF_SET_TX_RX_OFFSET_CMD";
+ case WMI_VRING_CFG_CMDID:
+ return "WMI_VRING_CFG_CMD";
+ case WMI_BCAST_VRING_CFG_CMDID:
+ return "WMI_BCAST_VRING_CFG_CMD";
+ case WMI_TRAFFIC_SUSPEND_CMDID:
+ return "WMI_TRAFFIC_SUSPEND_CMD";
+ case WMI_TRAFFIC_RESUME_CMDID:
+ return "WMI_TRAFFIC_RESUME_CMD";
+ case WMI_ECHO_CMDID:
+ return "WMI_ECHO_CMD";
+ case WMI_SET_MAC_ADDRESS_CMDID:
+ return "WMI_SET_MAC_ADDRESS_CMD";
+ case WMI_LED_CFG_CMDID:
+ return "WMI_LED_CFG_CMD";
+ case WMI_PCP_START_CMDID:
+ return "WMI_PCP_START_CMD";
+ case WMI_PCP_STOP_CMDID:
+ return "WMI_PCP_STOP_CMD";
+ case WMI_SET_SSID_CMDID:
+ return "WMI_SET_SSID_CMD";
+ case WMI_GET_SSID_CMDID:
+ return "WMI_GET_SSID_CMD";
+ case WMI_SET_PCP_CHANNEL_CMDID:
+ return "WMI_SET_PCP_CHANNEL_CMD";
+ case WMI_GET_PCP_CHANNEL_CMDID:
+ return "WMI_GET_PCP_CHANNEL_CMD";
+ case WMI_P2P_CFG_CMDID:
+ return "WMI_P2P_CFG_CMD";
+ case WMI_START_LISTEN_CMDID:
+ return "WMI_START_LISTEN_CMD";
+ case WMI_START_SEARCH_CMDID:
+ return "WMI_START_SEARCH_CMD";
+ case WMI_DISCOVERY_STOP_CMDID:
+ return "WMI_DISCOVERY_STOP_CMD";
+ case WMI_DELETE_CIPHER_KEY_CMDID:
+ return "WMI_DELETE_CIPHER_KEY_CMD";
+ case WMI_ADD_CIPHER_KEY_CMDID:
+ return "WMI_ADD_CIPHER_KEY_CMD";
+ case WMI_SET_APPIE_CMDID:
+ return "WMI_SET_APPIE_CMD";
+ case WMI_CFG_RX_CHAIN_CMDID:
+ return "WMI_CFG_RX_CHAIN_CMD";
+ case WMI_TEMP_SENSE_CMDID:
+ return "WMI_TEMP_SENSE_CMD";
+ case WMI_DEL_STA_CMDID:
+ return "WMI_DEL_STA_CMD";
+ case WMI_DISCONNECT_STA_CMDID:
+ return "WMI_DISCONNECT_STA_CMD";
+ case WMI_VRING_BA_EN_CMDID:
+ return "WMI_VRING_BA_EN_CMD";
+ case WMI_VRING_BA_DIS_CMDID:
+ return "WMI_VRING_BA_DIS_CMD";
+ case WMI_RCP_DELBA_CMDID:
+ return "WMI_RCP_DELBA_CMD";
+ case WMI_RCP_ADDBA_RESP_CMDID:
+ return "WMI_RCP_ADDBA_RESP_CMD";
+ case WMI_PS_DEV_PROFILE_CFG_CMDID:
+ return "WMI_PS_DEV_PROFILE_CFG_CMD";
+ case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
+ return "WMI_SET_MGMT_RETRY_LIMIT_CMD";
+ case WMI_GET_MGMT_RETRY_LIMIT_CMDID:
+ return "WMI_GET_MGMT_RETRY_LIMIT_CMD";
+ case WMI_ABORT_SCAN_CMDID:
+ return "WMI_ABORT_SCAN_CMD";
+ case WMI_NEW_STA_CMDID:
+ return "WMI_NEW_STA_CMD";
+ case WMI_SET_THERMAL_THROTTLING_CFG_CMDID:
+ return "WMI_SET_THERMAL_THROTTLING_CFG_CMD";
+ case WMI_GET_THERMAL_THROTTLING_CFG_CMDID:
+ return "WMI_GET_THERMAL_THROTTLING_CFG_CMD";
+ case WMI_LINK_MAINTAIN_CFG_WRITE_CMDID:
+ return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
+ case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
+ return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
+ case WMI_START_SCHED_SCAN_CMDID:
+ return "WMI_START_SCHED_SCAN_CMD";
+ case WMI_STOP_SCHED_SCAN_CMDID:
+ return "WMI_STOP_SCHED_SCAN_CMD";
+ default:
+ return "Untracked CMD";
+ }
+}
+
+static const char *eventid2name(u16 eventid)
+{
+ switch (eventid) {
+ case WMI_NOTIFY_REQ_DONE_EVENTID:
+ return "WMI_NOTIFY_REQ_DONE_EVENT";
+ case WMI_DISCONNECT_EVENTID:
+ return "WMI_DISCONNECT_EVENT";
+ case WMI_SW_TX_COMPLETE_EVENTID:
+ return "WMI_SW_TX_COMPLETE_EVENT";
+ case WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID:
+ return "WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT";
+ case WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID:
+ return "WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT";
+ case WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+ return "WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+ case WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+ return "WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+ case WMI_BRP_SET_ANT_LIMIT_EVENTID:
+ return "WMI_BRP_SET_ANT_LIMIT_EVENT";
+ case WMI_FW_READY_EVENTID:
+ return "WMI_FW_READY_EVENT";
+ case WMI_TRAFFIC_RESUME_EVENTID:
+ return "WMI_TRAFFIC_RESUME_EVENT";
+ case WMI_TOF_GET_TX_RX_OFFSET_EVENTID:
+ return "WMI_TOF_GET_TX_RX_OFFSET_EVENT";
+ case WMI_TOF_SET_TX_RX_OFFSET_EVENTID:
+ return "WMI_TOF_SET_TX_RX_OFFSET_EVENT";
+ case WMI_VRING_CFG_DONE_EVENTID:
+ return "WMI_VRING_CFG_DONE_EVENT";
+ case WMI_READY_EVENTID:
+ return "WMI_READY_EVENT";
+ case WMI_RX_MGMT_PACKET_EVENTID:
+ return "WMI_RX_MGMT_PACKET_EVENT";
+ case WMI_TX_MGMT_PACKET_EVENTID:
+ return "WMI_TX_MGMT_PACKET_EVENT";
+ case WMI_SCAN_COMPLETE_EVENTID:
+ return "WMI_SCAN_COMPLETE_EVENT";
+ case WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID:
+ return "WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT";
+ case WMI_CONNECT_EVENTID:
+ return "WMI_CONNECT_EVENT";
+ case WMI_EAPOL_RX_EVENTID:
+ return "WMI_EAPOL_RX_EVENT";
+ case WMI_BA_STATUS_EVENTID:
+ return "WMI_BA_STATUS_EVENT";
+ case WMI_RCP_ADDBA_REQ_EVENTID:
+ return "WMI_RCP_ADDBA_REQ_EVENT";
+ case WMI_DELBA_EVENTID:
+ return "WMI_DELBA_EVENT";
+ case WMI_VRING_EN_EVENTID:
+ return "WMI_VRING_EN_EVENT";
+ case WMI_DATA_PORT_OPEN_EVENTID:
+ return "WMI_DATA_PORT_OPEN_EVENT";
+ case WMI_AOA_MEAS_EVENTID:
+ return "WMI_AOA_MEAS_EVENT";
+ case WMI_TOF_SESSION_END_EVENTID:
+ return "WMI_TOF_SESSION_END_EVENT";
+ case WMI_TOF_GET_CAPABILITIES_EVENTID:
+ return "WMI_TOF_GET_CAPABILITIES_EVENT";
+ case WMI_TOF_SET_LCR_EVENTID:
+ return "WMI_TOF_SET_LCR_EVENT";
+ case WMI_TOF_SET_LCI_EVENTID:
+ return "WMI_TOF_SET_LCI_EVENT";
+ case WMI_TOF_FTM_PER_DEST_RES_EVENTID:
+ return "WMI_TOF_FTM_PER_DEST_RES_EVENT";
+ case WMI_TOF_CHANNEL_INFO_EVENTID:
+ return "WMI_TOF_CHANNEL_INFO_EVENT";
+ case WMI_TRAFFIC_SUSPEND_EVENTID:
+ return "WMI_TRAFFIC_SUSPEND_EVENT";
+ case WMI_ECHO_RSP_EVENTID:
+ return "WMI_ECHO_RSP_EVENT";
+ case WMI_LED_CFG_DONE_EVENTID:
+ return "WMI_LED_CFG_DONE_EVENT";
+ case WMI_PCP_STARTED_EVENTID:
+ return "WMI_PCP_STARTED_EVENT";
+ case WMI_PCP_STOPPED_EVENTID:
+ return "WMI_PCP_STOPPED_EVENT";
+ case WMI_GET_SSID_EVENTID:
+ return "WMI_GET_SSID_EVENT";
+ case WMI_GET_PCP_CHANNEL_EVENTID:
+ return "WMI_GET_PCP_CHANNEL_EVENT";
+ case WMI_P2P_CFG_DONE_EVENTID:
+ return "WMI_P2P_CFG_DONE_EVENT";
+ case WMI_LISTEN_STARTED_EVENTID:
+ return "WMI_LISTEN_STARTED_EVENT";
+ case WMI_SEARCH_STARTED_EVENTID:
+ return "WMI_SEARCH_STARTED_EVENT";
+ case WMI_DISCOVERY_STOPPED_EVENTID:
+ return "WMI_DISCOVERY_STOPPED_EVENT";
+ case WMI_CFG_RX_CHAIN_DONE_EVENTID:
+ return "WMI_CFG_RX_CHAIN_DONE_EVENT";
+ case WMI_TEMP_SENSE_DONE_EVENTID:
+ return "WMI_TEMP_SENSE_DONE_EVENT";
+ case WMI_RCP_ADDBA_RESP_SENT_EVENTID:
+ return "WMI_RCP_ADDBA_RESP_SENT_EVENT";
+ case WMI_PS_DEV_PROFILE_CFG_EVENTID:
+ return "WMI_PS_DEV_PROFILE_CFG_EVENT";
+ case WMI_SET_MGMT_RETRY_LIMIT_EVENTID:
+ return "WMI_SET_MGMT_RETRY_LIMIT_EVENT";
+ case WMI_GET_MGMT_RETRY_LIMIT_EVENTID:
+ return "WMI_GET_MGMT_RETRY_LIMIT_EVENT";
+ case WMI_SET_THERMAL_THROTTLING_CFG_EVENTID:
+ return "WMI_SET_THERMAL_THROTTLING_CFG_EVENT";
+ case WMI_GET_THERMAL_THROTTLING_CFG_EVENTID:
+ return "WMI_GET_THERMAL_THROTTLING_CFG_EVENT";
+ case WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID:
+ return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
+ case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
+ return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
+ case WMI_START_SCHED_SCAN_EVENTID:
+ return "WMI_START_SCHED_SCAN_EVENT";
+ case WMI_STOP_SCHED_SCAN_EVENTID:
+ return "WMI_STOP_SCHED_SCAN_EVENT";
+ case WMI_SCHED_SCAN_RESULT_EVENTID:
+ return "WMI_SCHED_SCAN_RESULT_EVENT";
+ default:
+ return "Untracked EVENT";
+ }
+}
+
static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
{
struct {
@@ -304,7 +613,8 @@
}
cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
/* set command */
- wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+ wil_dbg_wmi(wil, "sending %s (0x%04x) [%d]\n",
+ cmdid2name(cmdid), cmdid, len);
wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
sizeof(cmd), true);
wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
@@ -581,8 +891,6 @@
}
}
- /* FIXME FW can transmit only ucast frames to peer */
- /* FIXME real ring_id instead of hard coded 0 */
ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
wil->sta[evt->cid].status = wil_sta_conn_pending;
@@ -869,6 +1177,75 @@
wil_ftm_evt_per_dest_res(wil, evt);
}
+static void
+wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_sched_scan_result_event *data = d;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct ieee80211_mgmt *rx_mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int flen = len - offsetof(struct wmi_sched_scan_result_event, payload);
+ int ch_no;
+ u32 freq;
+ struct ieee80211_channel *channel;
+ s32 signal;
+ __le16 fc;
+ u32 d_len;
+ struct cfg80211_bss *bss;
+
+ if (flen < 0) {
+ wil_err(wil, "sched scan result event too short, len %d\n",
+ len);
+ return;
+ }
+
+ d_len = le32_to_cpu(data->info.len);
+ if (d_len != flen) {
+ wil_err(wil,
+ "sched scan result length mismatch, d_len %d should be %d\n",
+ d_len, flen);
+ return;
+ }
+
+ fc = rx_mgmt_frame->frame_control;
+ if (!ieee80211_is_probe_resp(fc)) {
+ wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n",
+ fc);
+ return;
+ }
+
+ ch_no = data->info.channel + 1;
+ freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
+ channel = ieee80211_get_channel(wiphy, freq);
+ if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+ signal = 100 * data->info.rssi;
+ else
+ signal = data->info.sqi;
+
+ wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n",
+ data->info.channel, data->info.mcs, data->info.rssi);
+ wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
+ d_len, data->info.qid, data->info.mid, data->info.cid);
+ wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+ d_len, true);
+
+ if (!channel) {
+ wil_err(wil, "Frame on unsupported channel\n");
+ return;
+ }
+
+ bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
+ d_len, signal, GFP_KERNEL);
+ if (bss) {
+ wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
+ cfg80211_put_bss(wiphy, bss);
+ } else {
+ wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
+ }
+
+ cfg80211_sched_scan_results(wiphy);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -903,6 +1280,7 @@
{WMI_TOF_SET_LCI_EVENTID, wmi_evt_ignore},
{WMI_TOF_FTM_PER_DEST_RES_EVENTID, wmi_evt_per_dest_res},
{WMI_TOF_CHANNEL_INFO_EVENTID, wmi_evt_ignore},
+ {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
};
/*
@@ -1009,8 +1387,8 @@
}
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
- wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
- id, wmi->mid, tstamp);
+ wil_dbg_wmi(wil, "recv %s (0x%04x) MID %d @%d msec\n",
+ eventid2name(id), id, wmi->mid, tstamp);
trace_wil6210_wmi_event(wmi, &wmi[1],
len - sizeof(*wmi));
}
@@ -1513,7 +1891,7 @@
int rc;
if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
- struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+ struct ieee80211_channel *ch = wil->monitor_chandef.chan;
cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
if (ch)
@@ -1969,6 +2347,16 @@
return rc;
}
+static const char *suspend_status2name(u8 status)
+{
+ switch (status) {
+ case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
+ return "LINK_NOT_IDLE";
+ default:
+ return "Untracked status";
+ }
+}
+
int wmi_suspend(struct wil6210_priv *wil)
{
int rc;
@@ -1984,7 +2372,7 @@
wil->suspend_resp_rcvd = false;
wil->suspend_resp_comp = false;
- reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+ reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE;
rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
@@ -2016,8 +2404,9 @@
}
wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
- if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
- wil_dbg_pm(wil, "device rejected the suspend\n");
+ if (reply.evt.status != WMI_TRAFFIC_SUSPEND_APPROVED) {
+ wil_dbg_pm(wil, "device rejected the suspend, %s\n",
+ suspend_status2name(reply.evt.status));
wil->suspend_stats.rejected_by_device++;
}
rc = reply.evt.status;
@@ -2029,21 +2418,50 @@
return rc;
}
+static void resume_triggers2string(u32 triggers, char *string, int str_size)
+{
+ string[0] = '\0';
+
+ if (!triggers) {
+ strlcat(string, " UNKNOWN", str_size);
+ return;
+ }
+
+ if (triggers & WMI_RESUME_TRIGGER_HOST)
+ strlcat(string, " HOST", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_UCAST_RX)
+ strlcat(string, " UCAST_RX", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_BCAST_RX)
+ strlcat(string, " BCAST_RX", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
+ strlcat(string, " WMI_EVT", str_size);
+}
+
int wmi_resume(struct wil6210_priv *wil)
{
int rc;
+ char string[100];
struct {
struct wmi_cmd_hdr wmi;
struct wmi_traffic_resume_event evt;
} __packed reply;
reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+ reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN;
rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
if (rc)
return rc;
+ resume_triggers2string(le32_to_cpu(reply.evt.resume_triggers), string,
+ sizeof(string));
+ wil_dbg_pm(wil, "device resume %s, resume triggers:%s (0x%x)\n",
+ reply.evt.status ? "failed" : "passed", string,
+ le32_to_cpu(reply.evt.resume_triggers));
return reply.evt.status;
}
@@ -2074,8 +2492,8 @@
void *evt_data = (void *)(&wmi[1]);
u16 id = le16_to_cpu(wmi->command_id);
- wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
- id, wil->reply_id);
+ wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x)\n",
+ eventid2name(id), id, wil->reply_id);
/* check if someone waits for this event */
if (wil->reply_id && wil->reply_id == id) {
WARN_ON(wil->reply_buf);
@@ -2199,3 +2617,159 @@
return 0;
}
+
+static void
+wmi_sched_scan_set_ssids(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ struct cfg80211_ssid *ssids, int n_ssids,
+ struct cfg80211_match_set *match_sets,
+ int n_match_sets)
+{
+ int i;
+
+ if (n_match_sets > WMI_MAX_PNO_SSID_NUM) {
+ wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n",
+ n_match_sets, WMI_MAX_PNO_SSID_NUM);
+ n_match_sets = WMI_MAX_PNO_SSID_NUM;
+ }
+ cmd->num_of_ssids = n_match_sets;
+
+ for (i = 0; i < n_match_sets; i++) {
+ struct wmi_sched_scan_ssid_match *wmi_match =
+ &cmd->ssid_for_match[i];
+ struct cfg80211_match_set *cfg_match = &match_sets[i];
+ int j;
+
+ wmi_match->ssid_len = cfg_match->ssid.ssid_len;
+ memcpy(wmi_match->ssid, cfg_match->ssid.ssid,
+ min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN));
+ wmi_match->rssi_threshold = S8_MIN;
+ if (cfg_match->rssi_thold >= S8_MIN &&
+ cfg_match->rssi_thold <= S8_MAX)
+ wmi_match->rssi_threshold = cfg_match->rssi_thold;
+
+ for (j = 0; j < n_ssids; j++)
+ if (wmi_match->ssid_len == ssids[j].ssid_len &&
+ memcmp(wmi_match->ssid, ssids[j].ssid,
+ wmi_match->ssid_len) == 0)
+ wmi_match->add_ssid_to_probe = true;
+ }
+}
+
+static void
+wmi_sched_scan_set_channels(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ u32 n_channels,
+ struct ieee80211_channel **channels)
+{
+ int i;
+
+ if (n_channels > WMI_MAX_CHANNEL_NUM) {
+ wil_dbg_wmi(wil, "too many channels (%d), use first %d\n",
+ n_channels, WMI_MAX_CHANNEL_NUM);
+ n_channels = WMI_MAX_CHANNEL_NUM;
+ }
+ cmd->num_of_channels = n_channels;
+
+ for (i = 0; i < n_channels; i++) {
+ struct ieee80211_channel *cfg_chan = channels[i];
+
+ cmd->channel_list[i] = cfg_chan->hw_value - 1;
+ }
+}
+
+static void
+wmi_sched_scan_set_plans(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ struct cfg80211_sched_scan_plan *scan_plans,
+ int n_scan_plans)
+{
+ int i;
+
+ if (n_scan_plans > WMI_MAX_PLANS_NUM) {
+ wil_dbg_wmi(wil, "too many plans (%d), use first %d\n",
+ n_scan_plans, WMI_MAX_PLANS_NUM);
+ n_scan_plans = WMI_MAX_PLANS_NUM;
+ }
+
+ for (i = 0; i < n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i];
+
+ cmd->scan_plans[i].interval_sec =
+ cpu_to_le16(cfg_plan->interval);
+ cmd->scan_plans[i].num_of_iterations =
+ cpu_to_le16(cfg_plan->iterations);
+ }
+}
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+ struct cfg80211_sched_scan_request *request)
+{
+ int rc;
+ struct wmi_start_sched_scan_cmd cmd = {
+ .min_rssi_threshold = S8_MIN,
+ .initial_delay_sec = cpu_to_le16(request->delay),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_start_sched_scan_event evt;
+ } __packed reply;
+
+ if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ if (request->min_rssi_thold >= S8_MIN &&
+ request->min_rssi_thold <= S8_MAX)
+ cmd.min_rssi_threshold = request->min_rssi_thold;
+
+ wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids,
+ request->match_sets, request->n_match_sets);
+ wmi_sched_scan_set_channels(wil, &cmd,
+ request->n_channels, request->channels);
+ wmi_sched_scan_set_plans(wil, &cmd,
+ request->scan_plans, request->n_scan_plans);
+
+ reply.evt.result = WMI_PNO_REJECT;
+
+ rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, &cmd, sizeof(cmd),
+ WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.result != WMI_PNO_SUCCESS) {
+ wil_err(wil, "start sched scan failed, result %d\n",
+ reply.evt.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wmi_stop_sched_scan(struct wil6210_priv *wil)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_stop_sched_scan_event evt;
+ } __packed reply;
+
+ if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ reply.evt.result = WMI_PNO_REJECT;
+
+ rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, NULL, 0,
+ WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.result != WMI_PNO_SUCCESS) {
+ wil_err(wil, "stop sched scan failed, result %d\n",
+ reply.evt.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 809e320..28568dc 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -71,6 +71,7 @@
WMI_FW_CAPABILITY_RSSI_REPORTING = 12,
WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13,
WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14,
+ WMI_FW_CAPABILITY_PNO = 15,
WMI_FW_CAPABILITY_CONNECT_SNR_THR = 16,
WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
WMI_FW_CAPABILITY_MAX,
@@ -89,6 +90,8 @@
WMI_CONNECT_CMDID = 0x01,
WMI_DISCONNECT_CMDID = 0x03,
WMI_DISCONNECT_STA_CMDID = 0x04,
+ WMI_START_SCHED_SCAN_CMDID = 0x05,
+ WMI_STOP_SCHED_SCAN_CMDID = 0x06,
WMI_START_SCAN_CMDID = 0x07,
WMI_SET_BSS_FILTER_CMDID = 0x09,
WMI_SET_PROBED_SSID_CMDID = 0x0A,
@@ -387,6 +390,38 @@
} channel_list[0];
} __packed;
+#define WMI_MAX_PNO_SSID_NUM (16)
+#define WMI_MAX_CHANNEL_NUM (6)
+#define WMI_MAX_PLANS_NUM (2)
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_ssid_match {
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ s8 rssi_threshold;
+ /* boolean */
+ u8 add_ssid_to_probe;
+ u8 reserved;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_plan {
+ __le16 interval_sec;
+ __le16 num_of_iterations;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_start_sched_scan_cmd {
+ struct wmi_sched_scan_ssid_match ssid_for_match[WMI_MAX_PNO_SSID_NUM];
+ u8 num_of_ssids;
+ s8 min_rssi_threshold;
+ u8 channel_list[WMI_MAX_CHANNEL_NUM];
+ u8 num_of_channels;
+ u8 reserved;
+ __le16 initial_delay_sec;
+ struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
+} __packed;
+
/* WMI_SET_PROBED_SSID_CMDID */
#define MAX_PROBED_SSID_INDEX (3)
@@ -1240,6 +1275,9 @@
WMI_READY_EVENTID = 0x1001,
WMI_CONNECT_EVENTID = 0x1002,
WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_START_SCHED_SCAN_EVENTID = 0x1005,
+ WMI_STOP_SCHED_SCAN_EVENTID = 0x1006,
+ WMI_SCHED_SCAN_RESULT_EVENTID = 0x1007,
WMI_SCAN_COMPLETE_EVENTID = 0x100A,
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
@@ -1602,6 +1640,49 @@
__le32 status;
} __packed;
+/* wmi_rx_mgmt_info */
+struct wmi_rx_mgmt_info {
+ u8 mcs;
+ s8 rssi;
+ u8 range;
+ u8 sqi;
+ __le16 stype;
+ __le16 snr;
+ __le32 len;
+ /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+ u8 qid;
+ /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+ u8 mid;
+ u8 cid;
+ /* From Radio MNGR */
+ u8 channel;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_EVENTID */
+enum wmi_pno_result {
+ WMI_PNO_SUCCESS = 0x00,
+ WMI_PNO_REJECT = 0x01,
+ WMI_PNO_INVALID_PARAMETERS = 0x02,
+ WMI_PNO_NOT_ENABLED = 0x03,
+};
+
+struct wmi_start_sched_scan_event {
+ /* pno_result */
+ u8 result;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_stop_sched_scan_event {
+ /* pno_result */
+ u8 result;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_sched_scan_result_event {
+ struct wmi_rx_mgmt_info info;
+ u8 payload[0];
+} __packed;
+
/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
enum wmi_acs_info_bitmask {
WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01,
@@ -1816,24 +1897,6 @@
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
-/* wmi_rx_mgmt_info */
-struct wmi_rx_mgmt_info {
- u8 mcs;
- s8 rssi;
- u8 range;
- u8 sqi;
- __le16 stype;
- __le16 snr;
- __le32 len;
- /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
- u8 qid;
- /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
- u8 mid;
- u8 cid;
- /* From Radio MNGR */
- u8 channel;
-} __packed;
-
/* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */
struct wmi_rf_xpm_read_result_event {
/* enum wmi_fw_status_e - success=0 or fail=1 */
@@ -2269,8 +2332,8 @@
} __packed;
enum wmi_traffic_suspend_status {
- WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
- WMI_TRAFFIC_SUSPEND_REJECTED = 0x1,
+ WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
+ WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE = 0x1,
};
/* WMI_TRAFFIC_SUSPEND_EVENTID */
@@ -2284,10 +2347,21 @@
WMI_TRAFFIC_RESUME_FAILED = 0x1,
};
+enum wmi_resume_trigger {
+ WMI_RESUME_TRIGGER_UNKNOWN = 0x0,
+ WMI_RESUME_TRIGGER_HOST = 0x1,
+ WMI_RESUME_TRIGGER_UCAST_RX = 0x2,
+ WMI_RESUME_TRIGGER_BCAST_RX = 0x4,
+ WMI_RESUME_TRIGGER_WMI_EVT = 0x8,
+};
+
/* WMI_TRAFFIC_RESUME_EVENTID */
struct wmi_traffic_resume_event {
- /* enum wmi_traffic_resume_status_e */
+ /* enum wmi_traffic_resume_status */
u8 status;
+ u8 reserved[3];
+ /* enum wmi_resume_trigger bitmap */
+ __le32 resume_triggers;
} __packed;
/* Power Save command completion status codes */
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index ea4bedf..0280d42 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,6 +36,8 @@
unsigned int firm_gpio;
unsigned int ese_gpio;
const char *clk_src_name;
+ /* NFC_CLK pin voting state */
+ bool clk_pin_voting;
};
static const struct of_device_id msm_match_table[] = {
@@ -469,39 +471,47 @@
dev_dbg(&nqx_dev->client->dev,
"gpio_set_value disable: %s: info: %p\n",
__func__, nqx_dev);
- if (gpio_is_valid(nqx_dev->firm_gpio))
+ if (gpio_is_valid(nqx_dev->firm_gpio)) {
gpio_set_value(nqx_dev->firm_gpio, 0);
+ usleep_range(10000, 10100);
+ }
if (gpio_is_valid(nqx_dev->ese_gpio)) {
if (!gpio_get_value(nqx_dev->ese_gpio)) {
dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
gpio_set_value(nqx_dev->en_gpio, 0);
+ usleep_range(10000, 10100);
} else {
dev_dbg(&nqx_dev->client->dev, "keeping en_gpio high\n");
}
} else {
dev_dbg(&nqx_dev->client->dev, "ese_gpio invalid, set en_gpio to low\n");
gpio_set_value(nqx_dev->en_gpio, 0);
+ usleep_range(10000, 10100);
}
- r = nqx_clock_deselect(nqx_dev);
- if (r < 0)
- dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
+ if (nqx_dev->pdata->clk_pin_voting) {
+ r = nqx_clock_deselect(nqx_dev);
+ if (r < 0)
+ dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
+ }
nqx_dev->nfc_ven_enabled = false;
- /* hardware dependent delay */
- msleep(100);
} else if (arg == 1) {
nqx_enable_irq(nqx_dev);
dev_dbg(&nqx_dev->client->dev,
"gpio_set_value enable: %s: info: %p\n",
__func__, nqx_dev);
- if (gpio_is_valid(nqx_dev->firm_gpio))
+ if (gpio_is_valid(nqx_dev->firm_gpio)) {
gpio_set_value(nqx_dev->firm_gpio, 0);
+ usleep_range(10000, 10100);
+ }
gpio_set_value(nqx_dev->en_gpio, 1);
- r = nqx_clock_select(nqx_dev);
- if (r < 0)
- dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
+ usleep_range(10000, 10100);
+ if (nqx_dev->pdata->clk_pin_voting) {
+ r = nqx_clock_select(nqx_dev);
+ if (r < 0)
+ dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
+ }
nqx_dev->nfc_ven_enabled = true;
- msleep(20);
} else if (arg == 2) {
/*
* We are switching to Dowload Mode, toggle the enable pin
@@ -515,14 +525,15 @@
}
}
gpio_set_value(nqx_dev->en_gpio, 1);
- msleep(20);
- if (gpio_is_valid(nqx_dev->firm_gpio))
+ usleep_range(10000, 10100);
+ if (gpio_is_valid(nqx_dev->firm_gpio)) {
gpio_set_value(nqx_dev->firm_gpio, 1);
- msleep(20);
+ usleep_range(10000, 10100);
+ }
gpio_set_value(nqx_dev->en_gpio, 0);
- msleep(100);
+ usleep_range(10000, 10100);
gpio_set_value(nqx_dev->en_gpio, 1);
- msleep(20);
+ usleep_range(10000, 10100);
} else {
r = -ENOIOCTLCMD;
}
@@ -648,13 +659,14 @@
unsigned char nci_reset_rsp[6];
unsigned char init_rsp_len = 0;
unsigned int enable_gpio = nqx_dev->en_gpio;
+
/* making sure that the NFCC starts in a clean state. */
gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
/* hardware dependent delay */
- msleep(20);
+ usleep_range(10000, 10100);
gpio_set_value(enable_gpio, 1);/* HPD : Enable*/
/* hardware dependent delay */
- msleep(20);
+ usleep_range(10000, 10100);
/* send NCI CORE RESET CMD with Keep Config parameters */
ret = i2c_master_send(client, raw_nci_reset_cmd,
@@ -670,21 +682,17 @@
/* Read Response of RESET command */
ret = i2c_master_recv(client, nci_reset_rsp,
sizeof(nci_reset_rsp));
- dev_err(&client->dev,
- "%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
- __func__, nci_reset_rsp[0],
- nci_reset_rsp[1], nci_reset_rsp[2]);
if (ret < 0) {
dev_err(&client->dev,
"%s: - i2c_master_recv Error\n", __func__);
goto err_nfcc_hw_check;
}
- ret = i2c_master_send(client, raw_nci_init_cmd,
- sizeof(raw_nci_init_cmd));
+ ret = nqx_standby_write(nqx_dev, raw_nci_init_cmd,
+ sizeof(raw_nci_init_cmd));
if (ret < 0) {
dev_err(&client->dev,
"%s: - i2c_master_send Error\n", __func__);
- goto err_nfcc_hw_check;
+ goto err_nfcc_core_init_fail;
}
/* hardware dependent delay */
msleep(30);
@@ -694,7 +702,7 @@
if (ret < 0) {
dev_err(&client->dev,
"%s: - i2c_master_recv Error\n", __func__);
- goto err_nfcc_hw_check;
+ goto err_nfcc_core_init_fail;
}
init_rsp_len = 2 + nci_init_rsp[2]; /*payload + len*/
if (init_rsp_len > PAYLOAD_HEADER_LENGTH) {
@@ -707,6 +715,11 @@
nqx_dev->nqx_info.info.fw_minor =
nci_init_rsp[init_rsp_len];
}
+ dev_dbg(&client->dev,
+ "%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+ __func__, nci_reset_rsp[0],
+ nci_reset_rsp[1], nci_reset_rsp[2]);
+
dev_dbg(&nqx_dev->client->dev, "NQ NFCC chip_type = %x\n",
nqx_dev->nqx_info.info.chip_type);
dev_dbg(&nqx_dev->client->dev, "NQ fw version = %x.%x.%x\n",
@@ -746,6 +759,12 @@
ret = 0;
goto done;
+err_nfcc_core_init_fail:
+ dev_err(&client->dev,
+ "%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+ __func__, nci_reset_rsp[0],
+ nci_reset_rsp[1], nci_reset_rsp[2]);
+
err_nfcc_hw_check:
ret = -ENXIO;
dev_err(&client->dev,
@@ -828,12 +847,13 @@
pdata->ese_gpio = -EINVAL;
}
- r = of_property_read_string(np, "qcom,clk-src", &pdata->clk_src_name);
+ if (of_property_read_string(np, "qcom,clk-src", &pdata->clk_src_name))
+ pdata->clk_pin_voting = false;
+ else
+ pdata->clk_pin_voting = true;
pdata->clkreq_gpio = of_get_named_gpio(np, "qcom,nq-clkreq", 0);
- if (r)
- return -EINVAL;
return r;
}
@@ -1213,6 +1233,7 @@
.owner = THIS_MODULE,
.name = "nq-nci",
.of_match_table = msm_match_table,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &nfc_pm_ops,
},
};
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index b5305f0..c25d7dc 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -98,7 +98,26 @@
.of_match_table = qfprom_of_match,
},
};
-module_platform_driver(qfprom_driver);
+
+static int __init qfprom_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&qfprom_driver);
+ if (ret != 0)
+ pr_err("Failed to register qfprom driver\n");
+
+ return ret;
+}
+
+static void __exit qfprom_exit(void)
+{
+ return platform_driver_unregister(&qfprom_driver);
+}
+
+subsys_initcall(qfprom_init);
+module_exit(qfprom_exit);
+
MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
MODULE_DESCRIPTION("Qualcomm QFPROM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 7d59613..b897813 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -36,6 +36,7 @@
#include <linux/reset.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
+#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -49,8 +50,7 @@
#include <linux/ipc_logging.h>
#include <linux/msm_pcie.h>
-#define PCIE_VENDOR_ID_RCP 0x17cb
-#define PCIE_DEVICE_ID_RCP 0x0106
+#define PCIE_VENDOR_ID_QCOM 0x17cb
#define PCIE20_L1SUB_CONTROL1 0x1E4
#define PCIE20_PARF_DBI_BASE_ADDR 0x350
@@ -62,7 +62,6 @@
#define PCIE_N_SW_RESET(n) (PCS_PORT(n) + 0x00)
#define PCIE_N_POWER_DOWN_CONTROL(n) (PCS_PORT(n) + 0x04)
-#define PCIE_N_PCS_STATUS(n) (PCS_PORT(n) + 0x174)
#define PCIE_GEN3_COM_INTEGLOOP_GAIN1_MODE0 0x0154
#define PCIE_GEN3_L0_DRVR_CTRL0 0x080c
@@ -70,7 +69,6 @@
#define PCIE_GEN3_L0_BIST_ERR_CNT1_STATUS 0x08a8
#define PCIE_GEN3_L0_BIST_ERR_CNT2_STATUS 0x08ac
#define PCIE_GEN3_L0_DEBUG_BUS_STATUS4 0x08bc
-#define PCIE_GEN3_PCIE_PHY_PCS_STATUS 0x1aac
#define PCIE20_PARF_SYS_CTRL 0x00
#define PCIE20_PARF_PM_CTRL 0x20
@@ -121,6 +119,15 @@
#define PCIE20_PLR_IATU_LTAR 0x918
#define PCIE20_PLR_IATU_UTAR 0x91c
+#define PCIE_IATU_BASE(n) (n * 0x200)
+
+#define PCIE_IATU_CTRL1(n) (PCIE_IATU_BASE(n) + 0x00)
+#define PCIE_IATU_CTRL2(n) (PCIE_IATU_BASE(n) + 0x04)
+#define PCIE_IATU_LBAR(n) (PCIE_IATU_BASE(n) + 0x08)
+#define PCIE_IATU_UBAR(n) (PCIE_IATU_BASE(n) + 0x0c)
+#define PCIE_IATU_LAR(n) (PCIE_IATU_BASE(n) + 0x10)
+#define PCIE_IATU_LTAR(n) (PCIE_IATU_BASE(n) + 0x14)
+#define PCIE_IATU_UTAR(n) (PCIE_IATU_BASE(n) + 0x18)
#define PCIE20_PORT_LINK_CTRL_REG 0x710
#define PCIE20_GEN3_RELATED_REG 0x890
@@ -177,7 +184,7 @@
#define MAX_PROP_SIZE 32
#define MAX_RC_NAME_LEN 15
#define MSM_PCIE_MAX_VREG 4
-#define MSM_PCIE_MAX_CLK 12
+#define MSM_PCIE_MAX_CLK 13
#define MSM_PCIE_MAX_PIPE_CLK 1
#define MAX_RC_NUM 3
#define MAX_DEVICE_NUM 20
@@ -185,7 +192,6 @@
#define PCIE_TLP_RD_SIZE 0x5
#define PCIE_MSI_NR_IRQS 256
#define MSM_PCIE_MAX_MSI 32
-#define MAX_MSG_LEN 80
#define PCIE_LOG_PAGES (50)
#define PCIE_CONF_SPACE_DW 1024
#define PCIE_CLEAR 0xDEADBEEF
@@ -217,6 +223,9 @@
#endif
#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
+#define PCIE_BUS_PRIV_DATA(bus) \
+ (struct msm_pcie_dev_t *)(bus->sysdata)
+
/* Config Space Offsets */
#define BDF_OFFSET(bus, devfn) \
((bus << 24) | (devfn << 16))
@@ -287,6 +296,7 @@
MSM_PCIE_RES_PHY,
MSM_PCIE_RES_DM_CORE,
MSM_PCIE_RES_ELBI,
+ MSM_PCIE_RES_IATU,
MSM_PCIE_RES_CONF,
MSM_PCIE_RES_IO,
MSM_PCIE_RES_BARS,
@@ -363,6 +373,76 @@
MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
};
+enum msm_pcie_debugfs_option {
+ MSM_PCIE_OUTPUT_PCIE_INFO,
+ MSM_PCIE_DISABLE_LINK,
+ MSM_PCIE_ENABLE_LINK,
+ MSM_PCIE_DISABLE_ENABLE_LINK,
+ MSM_PCIE_DUMP_SHADOW_REGISTER,
+ MSM_PCIE_DISABLE_L0S,
+ MSM_PCIE_ENABLE_L0S,
+ MSM_PCIE_DISABLE_L1,
+ MSM_PCIE_ENABLE_L1,
+ MSM_PCIE_DISABLE_L1SS,
+ MSM_PCIE_ENABLE_L1SS,
+ MSM_PCIE_ENUMERATION,
+ MSM_PCIE_READ_PCIE_REGISTER,
+ MSM_PCIE_WRITE_PCIE_REGISTER,
+ MSM_PCIE_DUMP_PCIE_REGISTER_SPACE,
+ MSM_PCIE_ALLOCATE_DDR_MAP_LBAR,
+ MSM_PCIE_FREE_DDR_UNMAP_LBAR,
+ MSM_PCIE_OUTPUT_DDR_LBAR_ADDRESS,
+ MSM_PCIE_CONFIGURE_LOOPBACK,
+ MSM_PCIE_SETUP_LOOPBACK_IATU,
+ MSM_PCIE_READ_DDR,
+ MSM_PCIE_READ_LBAR,
+ MSM_PCIE_WRITE_DDR,
+ MSM_PCIE_WRITE_LBAR,
+ MSM_PCIE_DISABLE_AER,
+ MSM_PCIE_ENABLE_AER,
+ MSM_PCIE_GPIO_STATUS,
+ MSM_PCIE_ASSERT_PERST,
+ MSM_PCIE_DEASSERT_PERST,
+ MSM_PCIE_KEEP_RESOURCES_ON,
+ MSM_PCIE_FORCE_GEN1,
+ MSM_PCIE_MAX_DEBUGFS_OPTION
+};
+
+static const char * const
+ msm_pcie_debugfs_option_desc[MSM_PCIE_MAX_DEBUGFS_OPTION] = {
+ "OUTPUT PCIE INFO",
+ "DISABLE LINK",
+ "ENABLE LINK",
+ "DISABLE AND ENABLE LINK",
+ "DUMP PCIE SHADOW REGISTER",
+ "DISABLE L0S",
+ "ENABLE L0S",
+ "DISABLE L1",
+ "ENABLE L1",
+ "DISABLE L1SS",
+ "ENABLE L1SS",
+ "ENUMERATE",
+ "READ A PCIE REGISTER",
+ "WRITE TO PCIE REGISTER",
+ "DUMP PCIE REGISTER SPACE",
+ "ALLOCATE DDR AND MAP LBAR",
+ "FREE DDR AND UNMAP LBAR",
+ "OUTPUT DDR AND LBAR VIR ADDRESS",
+ "CONFIGURE PCIE LOOPBACK",
+ "SETUP LOOPBACK IATU",
+ "READ DDR",
+ "READ LBAR",
+ "WRITE DDR",
+ "WRITE LBAR",
+ "SET AER ENABLE FLAG",
+ "CLEAR AER ENABLE FLAG",
+ "OUTPUT PERST AND WAKE GPIO STATUS",
+ "ASSERT PERST",
+ "DE-ASSERT PERST",
+ "SET KEEP_RESOURCES_ON FLAG",
+ "FORCE GEN 1 SPEED FOR LINK TRAINING"
+};
+
/* gpio info structure */
struct msm_pcie_gpio_info_t {
char *name;
@@ -452,6 +532,7 @@
void __iomem *parf;
void __iomem *phy;
void __iomem *elbi;
+ void __iomem *iatu;
void __iomem *dm_core;
void __iomem *conf;
void __iomem *bars;
@@ -506,6 +587,7 @@
uint32_t switch_latency;
uint32_t wr_halt_size;
uint32_t slv_addr_space_size;
+ uint32_t phy_status_offset;
uint32_t cpl_timeout;
uint32_t current_bdf;
uint32_t perst_delay_us_min;
@@ -587,13 +669,32 @@
module_param_named(keep_resources_on, msm_pcie_keep_resources_on,
int, 0644);
+/*
+ * For each bit set, force the corresponding root complex
+ * to do link training at gen1 speed.
+ */
+static int msm_pcie_force_gen1;
+module_param_named(force_gen1, msm_pcie_force_gen1,
+ int, 0644);
+
+
+/*
+ * For each bit set in BIT[3:0] determines which corresponding
+ * root complex will use the value in BIT[31:4] to override the
+ * default (LINK_UP_CHECK_MAX_COUNT) max check count for link training.
+ * Each iteration is LINK_UP_TIMEOUT_US_MIN long.
+ */
+static int msm_pcie_link_check_max_count;
+module_param_named(link_check_max_count, msm_pcie_link_check_max_count,
+ int, 0644);
+
/* debugfs values */
-static u32 rc_sel;
+static u32 rc_sel = BIT(0);
static u32 base_sel;
static u32 wr_offset;
static u32 wr_mask;
static u32 wr_value;
-static ulong corr_counter_limit = 5;
+static u32 corr_counter_limit = 5;
/* Table to track info of PCIe devices */
static struct msm_pcie_device_info
@@ -675,6 +776,7 @@
{NULL, "pcie_0_ldo", 0, false, true},
{NULL, "pcie_0_smmu_clk", 0, false, false},
{NULL, "pcie_0_slv_q2a_axi_clk", 0, false, false},
+ {NULL, "pcie_0_sleep_clk", 0, false, false},
{NULL, "pcie_phy_refgen_clk", 0, false, false},
{NULL, "pcie_tbu_clk", 0, false, false},
{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
@@ -689,6 +791,7 @@
{NULL, "pcie_1_ldo", 0, false, true},
{NULL, "pcie_1_smmu_clk", 0, false, false},
{NULL, "pcie_1_slv_q2a_axi_clk", 0, false, false},
+ {NULL, "pcie_1_sleep_clk", 0, false, false},
{NULL, "pcie_phy_refgen_clk", 0, false, false},
{NULL, "pcie_tbu_clk", 0, false, false},
{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
@@ -703,6 +806,7 @@
{NULL, "pcie_2_ldo", 0, false, true},
{NULL, "pcie_2_smmu_clk", 0, false, false},
{NULL, "pcie_2_slv_q2a_axi_clk", 0, false, false},
+ {NULL, "pcie_2_sleep_clk", 0, false, false},
{NULL, "pcie_phy_refgen_clk", 0, false, false},
{NULL, "pcie_tbu_clk", 0, false, false},
{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
@@ -730,6 +834,7 @@
{"phy", NULL, NULL},
{"dm_core", NULL, NULL},
{"elbi", NULL, NULL},
+ {"iatu", NULL, NULL},
{"conf", NULL, NULL},
{"io", NULL, NULL},
{"bars", NULL, NULL},
@@ -766,36 +871,21 @@
};
static int msm_pcie_config_device(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l0s_disable(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l0s_enable(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l1_disable(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l1_enable(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l1ss_disable(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l1ss_enable(struct pci_dev *dev, void *pdev);
static void msm_pcie_config_link_pm_rc(struct msm_pcie_dev_t *dev,
struct pci_dev *pdev, bool enable);
#ifdef CONFIG_ARM
-#define PCIE_BUS_PRIV_DATA(bus) \
- (((struct pci_sys_data *)bus->sysdata)->private_data)
-
-static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
-
-static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
-{
- msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
- msm_pcie_sys_data[dev->rc_idx].private_data = dev;
-
- return &msm_pcie_sys_data[dev->rc_idx];
-}
-
static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
{
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
}
#else
-#define PCIE_BUS_PRIV_DATA(bus) \
- (struct msm_pcie_dev_t *)(bus->sysdata)
-
-static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
-{
- return dev;
-}
-
static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
{
}
@@ -904,11 +994,7 @@
static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
{
- u32 pos = (dev->max_link_speed == GEN2_SPEED) ?
- PCIE_N_PCS_STATUS(dev->rc_idx) :
- PCIE_GEN3_PCIE_PHY_PCS_STATUS;
-
- if (readl_relaxed(dev->phy + pos) & BIT(6))
+ if (readl_relaxed(dev->phy + dev->phy_status_offset) & BIT(6))
return false;
else
return true;
@@ -1166,6 +1252,8 @@
dev->wr_halt_size);
PCIE_DBG_FS(dev, "slv_addr_space_size: 0x%x\n",
dev->slv_addr_space_size);
+ PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n",
+ dev->phy_status_offset);
PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
dev->cpl_timeout);
PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
@@ -1234,56 +1322,24 @@
static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
u32 testcase)
{
- phys_addr_t dbi_base_addr =
- dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+ u32 dbi_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
phys_addr_t loopback_lbar_phy =
- dbi_base_addr + LOOPBACK_BASE_ADDR_OFFSET;
+ dev->res[MSM_PCIE_RES_DM_CORE].resource->start +
+ LOOPBACK_BASE_ADDR_OFFSET;
static uint32_t loopback_val = 0x1;
- static u64 loopback_ddr_phy;
+ static dma_addr_t loopback_ddr_phy;
static uint32_t *loopback_ddr_vir;
static void __iomem *loopback_lbar_vir;
int ret, i;
u32 base_sel_size = 0;
- u32 val = 0;
- u32 current_offset = 0;
- u32 ep_l1sub_ctrl1_offset = 0;
- u32 ep_l1sub_cap_reg1_offset = 0;
- u32 ep_link_ctrlstts_offset = 0;
- u32 ep_dev_ctrl2stts2_offset = 0;
-
- if (testcase >= 5 && testcase <= 10) {
- current_offset =
- readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
-
- while (current_offset) {
- val = readl_relaxed(dev->conf + current_offset);
- if ((val & 0xff) == PCIE20_CAP_ID) {
- ep_link_ctrlstts_offset = current_offset +
- 0x10;
- ep_dev_ctrl2stts2_offset = current_offset +
- 0x28;
- break;
- }
- current_offset = (val >> 8) & 0xff;
- }
-
- if (!ep_link_ctrlstts_offset)
- PCIE_DBG(dev,
- "RC%d endpoint does not support PCIe capability registers\n",
- dev->rc_idx);
- else
- PCIE_DBG(dev,
- "RC%d: ep_link_ctrlstts_offset: 0x%x\n",
- dev->rc_idx, ep_link_ctrlstts_offset);
- }
switch (testcase) {
- case 0: /* output status */
+ case MSM_PCIE_OUTPUT_PCIE_INFO:
PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
dev->rc_idx);
msm_pcie_show_status(dev);
break;
- case 1: /* disable link */
+ case MSM_PCIE_DISABLE_LINK:
PCIE_DBG_FS(dev,
"\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
@@ -1296,7 +1352,7 @@
PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
__func__);
break;
- case 2: /* enable link and recover config space for RC and EP */
+ case MSM_PCIE_ENABLE_LINK:
PCIE_DBG_FS(dev,
"\n\nPCIe: RC%d: enable link and recover config space\n\n",
dev->rc_idx);
@@ -1311,10 +1367,7 @@
msm_pcie_recover_config(dev->dev);
}
break;
- case 3: /*
- * disable and enable link, recover config space for
- * RC and EP
- */
+ case MSM_PCIE_DISABLE_ENABLE_LINK:
PCIE_DBG_FS(dev,
"\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
dev->rc_idx);
@@ -1337,7 +1390,7 @@
msm_pcie_recover_config(dev->dev);
}
break;
- case 4: /* dump shadow registers for RC and EP */
+ case MSM_PCIE_DUMP_SHADOW_REGISTER:
PCIE_DBG_FS(dev,
"\n\nPCIe: RC%d: dumping RC shadow registers\n",
dev->rc_idx);
@@ -1348,236 +1401,97 @@
dev->rc_idx);
msm_pcie_shadow_dump(dev, false);
break;
- case 5: /* disable L0s */
+ case MSM_PCIE_DISABLE_L0S:
PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
dev->rc_idx);
- msm_pcie_write_mask(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS,
- BIT(0), 0);
- msm_pcie_write_mask(dev->conf +
- ep_link_ctrlstts_offset,
- BIT(0), 0);
- if (dev->shadow_en) {
- dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
- readl_relaxed(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS);
- dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
- readl_relaxed(dev->conf +
- ep_link_ctrlstts_offset);
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+ struct pci_bus *bus, *c_bus;
+ struct list_head *children = &dev->dev->bus->children;
+
+ msm_pcie_config_l0s_disable(dev->dev, dev);
+
+ list_for_each_entry_safe(bus, c_bus, children, node)
+ pci_walk_bus(bus,
+ &msm_pcie_config_l0s_disable, dev);
}
- PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
- readl_relaxed(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS));
- PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
- readl_relaxed(dev->conf +
- ep_link_ctrlstts_offset));
+ dev->l0s_supported = false;
break;
- case 6: /* enable L0s */
+ case MSM_PCIE_ENABLE_L0S:
PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
dev->rc_idx);
- msm_pcie_write_mask(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS,
- 0, BIT(0));
- msm_pcie_write_mask(dev->conf +
- ep_link_ctrlstts_offset,
- 0, BIT(0));
- if (dev->shadow_en) {
- dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
- readl_relaxed(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS);
- dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
- readl_relaxed(dev->conf +
- ep_link_ctrlstts_offset);
+ dev->l0s_supported = true;
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+ struct pci_bus *bus, *c_bus;
+ struct list_head *children = &dev->dev->bus->children;
+
+ list_for_each_entry_safe(bus, c_bus, children, node)
+ pci_walk_bus(bus,
+ &msm_pcie_config_l0s_enable, dev);
+
+ msm_pcie_config_l0s_enable(dev->dev, dev);
}
- PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
- readl_relaxed(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS));
- PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
- readl_relaxed(dev->conf +
- ep_link_ctrlstts_offset));
break;
- case 7: /* disable L1 */
+ case MSM_PCIE_DISABLE_L1:
PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
dev->rc_idx);
- msm_pcie_write_mask(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS,
- BIT(1), 0);
- msm_pcie_write_mask(dev->conf +
- ep_link_ctrlstts_offset,
- BIT(1), 0);
- if (dev->shadow_en) {
- dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
- readl_relaxed(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS);
- dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
- readl_relaxed(dev->conf +
- ep_link_ctrlstts_offset);
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+ struct pci_bus *bus, *c_bus;
+ struct list_head *children = &dev->dev->bus->children;
+
+ msm_pcie_config_l1_disable(dev->dev, dev);
+
+ list_for_each_entry_safe(bus, c_bus, children, node)
+ pci_walk_bus(bus,
+ &msm_pcie_config_l1_disable, dev);
}
- PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
- readl_relaxed(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS));
- PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
- readl_relaxed(dev->conf +
- ep_link_ctrlstts_offset));
+ dev->l1_supported = false;
break;
- case 8: /* enable L1 */
+ case MSM_PCIE_ENABLE_L1:
PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
dev->rc_idx);
- msm_pcie_write_mask(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS,
- 0, BIT(1));
- msm_pcie_write_mask(dev->conf +
- ep_link_ctrlstts_offset,
- 0, BIT(1));
- if (dev->shadow_en) {
- dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
- readl_relaxed(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS);
- dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
- readl_relaxed(dev->conf +
- ep_link_ctrlstts_offset);
+ dev->l1_supported = true;
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+ struct pci_bus *bus, *c_bus;
+ struct list_head *children = &dev->dev->bus->children;
+
+ list_for_each_entry_safe(bus, c_bus, children, node)
+ pci_walk_bus(bus,
+ &msm_pcie_config_l1_enable, dev);
+
+ msm_pcie_config_l1_enable(dev->dev, dev);
}
- PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
- readl_relaxed(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS));
- PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
- readl_relaxed(dev->conf +
- ep_link_ctrlstts_offset));
break;
- case 9: /* disable L1ss */
+ case MSM_PCIE_DISABLE_L1SS:
PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
dev->rc_idx);
- current_offset = PCIE_EXT_CAP_OFFSET;
- while (current_offset) {
- val = readl_relaxed(dev->conf + current_offset);
- if ((val & 0xffff) == L1SUB_CAP_ID) {
- ep_l1sub_ctrl1_offset =
- current_offset + 0x8;
- break;
- }
- current_offset = val >> 20;
- }
- if (!ep_l1sub_ctrl1_offset) {
- PCIE_DBG_FS(dev,
- "PCIe: RC%d endpoint does not support l1ss registers\n",
- dev->rc_idx);
- break;
- }
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+ struct pci_bus *bus, *c_bus;
+ struct list_head *children = &dev->dev->bus->children;
- PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
- dev->rc_idx, ep_l1sub_ctrl1_offset);
+ msm_pcie_config_l1ss_disable(dev->dev, dev);
- msm_pcie_write_reg_field(dev->dm_core,
- PCIE20_L1SUB_CONTROL1,
- 0xf, 0);
- msm_pcie_write_mask(dev->dm_core +
- PCIE20_DEVICE_CONTROL2_STATUS2,
- BIT(10), 0);
- msm_pcie_write_reg_field(dev->conf,
- ep_l1sub_ctrl1_offset,
- 0xf, 0);
- msm_pcie_write_mask(dev->conf +
- ep_dev_ctrl2stts2_offset,
- BIT(10), 0);
- if (dev->shadow_en) {
- dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
- readl_relaxed(dev->dm_core +
- PCIE20_L1SUB_CONTROL1);
- dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
- readl_relaxed(dev->dm_core +
- PCIE20_DEVICE_CONTROL2_STATUS2);
- dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
- readl_relaxed(dev->conf +
- ep_l1sub_ctrl1_offset);
- dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
- readl_relaxed(dev->conf +
- ep_dev_ctrl2stts2_offset);
+ list_for_each_entry_safe(bus, c_bus, children, node)
+ pci_walk_bus(bus,
+ &msm_pcie_config_l1ss_disable, dev);
}
- PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
- readl_relaxed(dev->dm_core +
- PCIE20_L1SUB_CONTROL1));
- PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
- readl_relaxed(dev->dm_core +
- PCIE20_DEVICE_CONTROL2_STATUS2));
- PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
- readl_relaxed(dev->conf +
- ep_l1sub_ctrl1_offset));
- PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
- readl_relaxed(dev->conf +
- ep_dev_ctrl2stts2_offset));
+ dev->l1ss_supported = false;
break;
- case 10: /* enable L1ss */
+ case MSM_PCIE_ENABLE_L1SS:
PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
dev->rc_idx);
- current_offset = PCIE_EXT_CAP_OFFSET;
- while (current_offset) {
- val = readl_relaxed(dev->conf + current_offset);
- if ((val & 0xffff) == L1SUB_CAP_ID) {
- ep_l1sub_cap_reg1_offset =
- current_offset + 0x4;
- ep_l1sub_ctrl1_offset =
- current_offset + 0x8;
- break;
- }
- current_offset = val >> 20;
+ dev->l1ss_supported = true;
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+ struct pci_bus *bus, *c_bus;
+ struct list_head *children = &dev->dev->bus->children;
+
+ list_for_each_entry_safe(bus, c_bus, children, node)
+ pci_walk_bus(bus,
+ &msm_pcie_config_l1ss_enable, dev);
+
+ msm_pcie_config_l1ss_enable(dev->dev, dev);
}
- if (!ep_l1sub_ctrl1_offset) {
- PCIE_DBG_FS(dev,
- "PCIe: RC%d endpoint does not support l1ss registers\n",
- dev->rc_idx);
- break;
- }
-
- val = readl_relaxed(dev->conf +
- ep_l1sub_cap_reg1_offset);
-
- PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
- val);
- PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
- dev->rc_idx, ep_l1sub_ctrl1_offset);
-
- val &= 0xf;
-
- msm_pcie_write_reg_field(dev->dm_core,
- PCIE20_L1SUB_CONTROL1,
- 0xf, val);
- msm_pcie_write_mask(dev->dm_core +
- PCIE20_DEVICE_CONTROL2_STATUS2,
- 0, BIT(10));
- msm_pcie_write_reg_field(dev->conf,
- ep_l1sub_ctrl1_offset,
- 0xf, val);
- msm_pcie_write_mask(dev->conf +
- ep_dev_ctrl2stts2_offset,
- 0, BIT(10));
- if (dev->shadow_en) {
- dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
- readl_relaxed(dev->dm_core +
- PCIE20_L1SUB_CONTROL1);
- dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
- readl_relaxed(dev->dm_core +
- PCIE20_DEVICE_CONTROL2_STATUS2);
- dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
- readl_relaxed(dev->conf +
- ep_l1sub_ctrl1_offset);
- dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
- readl_relaxed(dev->conf +
- ep_dev_ctrl2stts2_offset);
- }
- PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
- readl_relaxed(dev->dm_core +
- PCIE20_L1SUB_CONTROL1));
- PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
- readl_relaxed(dev->dm_core +
- PCIE20_DEVICE_CONTROL2_STATUS2));
- PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
- readl_relaxed(dev->conf +
- ep_l1sub_ctrl1_offset));
- PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
- readl_relaxed(dev->conf +
- ep_dev_ctrl2stts2_offset));
break;
- case 11: /* enumerate PCIe */
+ case MSM_PCIE_ENUMERATION:
PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
dev->rc_idx);
if (dev->enumerated)
@@ -1594,7 +1508,41 @@
dev->rc_idx);
}
break;
- case 12: /* write a value to a register */
+ case MSM_PCIE_READ_PCIE_REGISTER:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: read a PCIe register\n\n",
+ dev->rc_idx);
+ if (!base_sel) {
+ PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
+ break;
+ }
+
+ PCIE_DBG_FS(dev, "base: %s: 0x%pK\nwr_offset: 0x%x\n",
+ dev->res[base_sel - 1].name,
+ dev->res[base_sel - 1].base,
+ wr_offset);
+
+ base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+ if (wr_offset > base_sel_size - 4 ||
+ msm_pcie_check_align(dev, wr_offset)) {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+ dev->rc_idx, wr_offset, base_sel_size - 4);
+ } else {
+ phys_addr_t wr_register =
+ dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+
+ wr_register += wr_offset;
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: register: 0x%pa value: 0x%x\n",
+ dev->rc_idx, &wr_register,
+ readl_relaxed(dev->res[base_sel - 1].base +
+ wr_offset));
+ }
+
+ break;
+ case MSM_PCIE_WRITE_PCIE_REGISTER:
PCIE_DBG_FS(dev,
"\n\nPCIe: RC%d: writing a value to a register\n\n",
dev->rc_idx);
@@ -1605,7 +1553,7 @@
}
PCIE_DBG_FS(dev,
- "base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
+ "base: %s: 0x%pK\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
dev->res[base_sel - 1].name,
dev->res[base_sel - 1].base,
wr_offset, wr_mask, wr_value);
@@ -1622,7 +1570,7 @@
wr_offset, wr_mask, wr_value);
break;
- case 13: /* dump all registers of base_sel */
+ case MSM_PCIE_DUMP_PCIE_REGISTER_SPACE:
if (!base_sel) {
PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
break;
@@ -1655,7 +1603,7 @@
readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
}
break;
- case 14:
+ case MSM_PCIE_ALLOCATE_DDR_MAP_LBAR:
PCIE_DBG_FS(dev,
"PCIe: RC%d: Allocate 4K DDR memory and map LBAR.\n",
dev->rc_idx);
@@ -1671,25 +1619,25 @@
"PCIe: RC%d: VIR DDR memory address: 0x%pK\n",
dev->rc_idx, loopback_ddr_vir);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PHY DDR memory address: 0x%llx\n",
- dev->rc_idx, loopback_ddr_phy);
+ "PCIe: RC%d: PHY DDR memory address: %pad\n",
+ dev->rc_idx, &loopback_ddr_phy);
}
- PCIE_DBG_FS(dev, "PCIe: RC%d: map LBAR: 0x%llx\n",
- dev->rc_idx, loopback_lbar_phy);
+ PCIE_DBG_FS(dev, "PCIe: RC%d: map LBAR: %pa\n",
+ dev->rc_idx, &loopback_lbar_phy);
loopback_lbar_vir = devm_ioremap(&dev->pdev->dev,
loopback_lbar_phy, SZ_4K);
if (!loopback_lbar_vir) {
- PCIE_DBG_FS(dev, "PCIe: RC%d: failed to map 0x%llx\n",
- dev->rc_idx, loopback_lbar_phy);
+ PCIE_DBG_FS(dev, "PCIe: RC%d: failed to map %pa\n",
+ dev->rc_idx, &loopback_lbar_phy);
} else {
PCIE_DBG_FS(dev,
- "PCIe: RC%d: successfully mapped 0x%llx to 0x%pK\n",
- dev->rc_idx, loopback_lbar_phy,
+ "PCIe: RC%d: successfully mapped %pa to 0x%pK\n",
+ dev->rc_idx, &loopback_lbar_phy,
loopback_lbar_vir);
}
break;
- case 15:
+ case MSM_PCIE_FREE_DDR_UNMAP_LBAR:
PCIE_DBG_FS(dev,
"PCIe: RC%d: Release 4K DDR memory and unmap LBAR.\n",
dev->rc_idx);
@@ -1706,7 +1654,7 @@
loopback_lbar_vir = NULL;
}
break;
- case 16:
+ case MSM_PCIE_OUTPUT_DDR_LBAR_ADDRESS:
PCIE_DBG_FS(dev,
"PCIe: RC%d: Print DDR and LBAR addresses.\n",
dev->rc_idx);
@@ -1719,19 +1667,19 @@
}
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PHY DDR address: 0x%llx\n",
- dev->rc_idx, loopback_ddr_phy);
+ "PCIe: RC%d: PHY DDR address: %pad\n",
+ dev->rc_idx, &loopback_ddr_phy);
PCIE_DBG_FS(dev,
"PCIe: RC%d: VIR DDR address: 0x%pK\n",
dev->rc_idx, loopback_ddr_vir);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PHY LBAR address: 0x%llx\n",
- dev->rc_idx, loopback_lbar_phy);
+ "PCIe: RC%d: PHY LBAR address: %pa\n",
+ dev->rc_idx, &loopback_lbar_phy);
PCIE_DBG_FS(dev,
"PCIe: RC%d: VIR LBAR address: 0x%pK\n",
dev->rc_idx, loopback_lbar_vir);
break;
- case 17:
+ case MSM_PCIE_CONFIGURE_LOOPBACK:
PCIE_DBG_FS(dev,
"PCIe: RC%d: Configure Loopback.\n",
dev->rc_idx);
@@ -1739,7 +1687,7 @@
writel_relaxed(0x10000,
dev->dm_core + PCIE20_GEN3_RELATED_REG);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: 0x%llx: 0x%x\n",
+ "PCIe: RC%d: 0x%x: 0x%x\n",
dev->rc_idx,
dbi_base_addr + PCIE20_GEN3_RELATED_REG,
readl_relaxed(dev->dm_core +
@@ -1748,7 +1696,7 @@
writel_relaxed(0x80000001,
dev->dm_core + PCIE20_PIPE_LOOPBACK_CONTROL);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: 0x%llx: 0x%x\n",
+ "PCIe: RC%d: 0x%x: 0x%x\n",
dev->rc_idx,
dbi_base_addr + PCIE20_PIPE_LOOPBACK_CONTROL,
readl_relaxed(dev->dm_core +
@@ -1757,13 +1705,13 @@
writel_relaxed(0x00010124,
dev->dm_core + PCIE20_PORT_LINK_CTRL_REG);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: 0x%llx: 0x%x\n",
+ "PCIe: RC%d: 0x%x: 0x%x\n",
dev->rc_idx,
dbi_base_addr + PCIE20_PORT_LINK_CTRL_REG,
readl_relaxed(dev->dm_core +
PCIE20_PORT_LINK_CTRL_REG));
break;
- case 18:
+ case MSM_PCIE_SETUP_LOOPBACK_IATU:
PCIE_DBG_FS(dev, "PCIe: RC%d: Setup iATU.\n", dev->rc_idx);
if (!loopback_ddr_vir) {
@@ -1775,57 +1723,57 @@
writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_VIEWPORT);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PCIE20_PLR_IATU_VIEWPORT:\t0x%llx: 0x%x\n",
+ "PCIe: RC%d: PCIE20_PLR_IATU_VIEWPORT:\t0x%x: 0x%x\n",
dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_VIEWPORT,
readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_CTRL1);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PCIE20_PLR_IATU_CTRL1:\t0x%llx: 0x%x\n",
+ "PCIe: RC%d: PCIE20_PLR_IATU_CTRL1:\t0x%x: 0x%x\n",
dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_CTRL1,
readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
writel_relaxed(loopback_lbar_phy,
dev->dm_core + PCIE20_PLR_IATU_LBAR);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PCIE20_PLR_IATU_LBAR:\t0x%llx: 0x%x\n",
+ "PCIe: RC%d: PCIE20_PLR_IATU_LBAR:\t0x%x: 0x%x\n",
dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LBAR,
readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_UBAR);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PCIE20_PLR_IATU_UBAR:\t0x%llx: 0x%x\n",
+ "PCIe: RC%d: PCIE20_PLR_IATU_UBAR:\t0x%x: 0x%x\n",
dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_UBAR,
readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
writel_relaxed(loopback_lbar_phy + 0xfff,
dev->dm_core + PCIE20_PLR_IATU_LAR);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PCIE20_PLR_IATU_LAR:\t0x%llx: 0x%x\n",
+ "PCIe: RC%d: PCIE20_PLR_IATU_LAR:\t0x%x: 0x%x\n",
dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LAR,
readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
writel_relaxed(loopback_ddr_phy,
dev->dm_core + PCIE20_PLR_IATU_LTAR);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PCIE20_PLR_IATU_LTAR:\t0x%llx: 0x%x\n",
+ "PCIe: RC%d: PCIE20_PLR_IATU_LTAR:\t0x%x: 0x%x\n",
dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LTAR,
readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
writel_relaxed(0, dev->dm_core + PCIE20_PLR_IATU_UTAR);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PCIE20_PLR_IATU_UTAR:\t0x%llx: 0x%x\n",
+ "PCIe: RC%d: PCIE20_PLR_IATU_UTAR:\t0x%x: 0x%x\n",
dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_UTAR,
readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
writel_relaxed(0x80000000,
dev->dm_core + PCIE20_PLR_IATU_CTRL2);
PCIE_DBG_FS(dev,
- "PCIe: RC%d: PCIE20_PLR_IATU_CTRL2:\t0x%llx: 0x%x\n",
+ "PCIe: RC%d: PCIE20_PLR_IATU_CTRL2:\t0x%x: 0x%x\n",
dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_CTRL2,
readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
break;
- case 19:
+ case MSM_PCIE_READ_DDR:
PCIE_DBG_FS(dev,
"PCIe: RC%d: Read DDR values.\n",
dev->rc_idx);
@@ -1851,7 +1799,7 @@
loopback_ddr_vir[i + 7]);
}
break;
- case 20:
+ case MSM_PCIE_READ_LBAR:
PCIE_DBG_FS(dev,
"PCIe: RC%d: Read LBAR values.\n",
dev->rc_idx);
@@ -1877,7 +1825,7 @@
readl_relaxed(loopback_lbar_vir + (i + 28)));
}
break;
- case 21:
+ case MSM_PCIE_WRITE_DDR:
PCIE_DBG_FS(dev, "PCIe: RC%d: Write 0x%x to DDR.\n",
dev->rc_idx, loopback_val);
@@ -1896,7 +1844,7 @@
else
loopback_val++;
break;
- case 22:
+ case MSM_PCIE_WRITE_LBAR:
PCIE_DBG_FS(dev, "PCIe: RC%d: Write 0x%x to LBAR.\n",
dev->rc_idx, loopback_val);
@@ -1931,6 +1879,53 @@
else
loopback_val++;
break;
+ case MSM_PCIE_DISABLE_AER:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: clear AER enable flag\n\n",
+ dev->rc_idx);
+ dev->aer_enable = false;
+ break;
+ case MSM_PCIE_ENABLE_AER:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: set AER enable flag\n\n",
+ dev->rc_idx);
+ dev->aer_enable = true;
+ break;
+ case MSM_PCIE_GPIO_STATUS:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: PERST and WAKE status\n\n",
+ dev->rc_idx);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PERST: gpio%u value: %d\n",
+ dev->rc_idx, dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ gpio_get_value(dev->gpio[MSM_PCIE_GPIO_PERST].num));
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: WAKE: gpio%u value: %d\n",
+ dev->rc_idx, dev->gpio[MSM_PCIE_GPIO_WAKE].num,
+ gpio_get_value(dev->gpio[MSM_PCIE_GPIO_WAKE].num));
+ break;
+ case MSM_PCIE_ASSERT_PERST:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: assert PERST\n\n",
+ dev->rc_idx);
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ dev->gpio[MSM_PCIE_GPIO_PERST].on);
+ usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
+ break;
+ case MSM_PCIE_DEASSERT_PERST:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: de-assert PERST\n\n",
+ dev->rc_idx);
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
+ usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
+ break;
+ case MSM_PCIE_KEEP_RESOURCES_ON:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: set keep resources on flag\n\n",
+ dev->rc_idx);
+ msm_pcie_keep_resources_on |= BIT(dev->rc_idx);
+ break;
+ case MSM_PCIE_FORCE_GEN1:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: set force gen1 flag\n\n",
+ dev->rc_idx);
+ msm_pcie_force_gen1 |= BIT(dev->rc_idx);
+ break;
default:
PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
break;
@@ -1948,8 +1943,10 @@
return -ENODEV;
}
- if (option == 12 || option == 13) {
- if (!base || base > 5) {
+ if (option == MSM_PCIE_READ_PCIE_REGISTER ||
+ option == MSM_PCIE_WRITE_PCIE_REGISTER ||
+ option == MSM_PCIE_DUMP_PCIE_REGISTER_SPACE) {
+ if (!base || base >= MSM_PCIE_MAX_RES) {
PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
PCIE_DBG_FS(pdev,
"PCIe: base_sel is still 0x%x\n", base_sel);
@@ -1959,7 +1956,8 @@
base_sel = base;
PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
- if (option == 12) {
+ if (option == MSM_PCIE_READ_PCIE_REGISTER ||
+ option == MSM_PCIE_WRITE_PCIE_REGISTER) {
wr_offset = offset;
wr_mask = mask;
wr_value = value;
@@ -1974,7 +1972,7 @@
}
pdev = PCIE_BUS_PRIV_DATA(dev->bus);
- rc_sel = 1 << pdev->rc_idx;
+ rc_sel = BIT(pdev->rc_idx);
msm_pcie_sel_debug_testcase(pdev, option);
@@ -2039,59 +2037,87 @@
static u32 rc_sel_max;
-static ssize_t msm_pcie_cmd_debug(struct file *file,
+static int msm_pcie_debugfs_parse_input(const char __user *buf,
+ size_t count, unsigned int *data)
+{
+ unsigned long ret;
+ char *str, *str_temp;
+
+ str = kmalloc(count + 1, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ ret = copy_from_user(str, buf, count);
+ if (ret) {
+ kfree(str);
+ return -EFAULT;
+ }
+
+ str[count] = 0;
+ str_temp = str;
+
+ ret = get_option(&str_temp, data);
+ kfree(str);
+ if (ret != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int msm_pcie_debugfs_case_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ for (i = 0; i < MSM_PCIE_MAX_DEBUGFS_OPTION; i++)
+ seq_printf(m, "\t%d:\t %s\n", i,
+ msm_pcie_debugfs_option_desc[i]);
+
+ return 0;
+}
+
+static int msm_pcie_debugfs_case_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_pcie_debugfs_case_show, NULL);
+}
+
+static ssize_t msm_pcie_debugfs_case_select(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ret;
- char str[MAX_MSG_LEN];
+ int i, ret;
unsigned int testcase = 0;
- int i;
- u32 size = sizeof(str) < count ? sizeof(str) : count;
- memset(str, 0, size);
- ret = copy_from_user(str, buf, size);
+ ret = msm_pcie_debugfs_parse_input(buf, count, &testcase);
if (ret)
- return -EFAULT;
-
- for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
- testcase = (testcase * 10) + (str[i] - '0');
-
- if (!rc_sel)
- rc_sel = 1;
+ return ret;
pr_alert("PCIe: TEST: %d\n", testcase);
for (i = 0; i < MAX_RC_NUM; i++) {
- if (!((rc_sel >> i) & 0x1))
- continue;
- msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
+ if (rc_sel & BIT(i))
+ msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
}
return count;
}
-static const struct file_operations msm_pcie_cmd_debug_ops = {
- .write = msm_pcie_cmd_debug,
+static const struct file_operations msm_pcie_debugfs_case_ops = {
+ .open = msm_pcie_debugfs_case_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = msm_pcie_debugfs_case_select,
};
-static ssize_t msm_pcie_set_rc_sel(struct file *file,
+static ssize_t msm_pcie_debugfs_rc_select(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ret;
- char str[MAX_MSG_LEN];
- int i;
+ int i, ret;
u32 new_rc_sel = 0;
- u32 size = sizeof(str) < count ? sizeof(str) : count;
- memset(str, 0, size);
- ret = copy_from_user(str, buf, size);
+ ret = msm_pcie_debugfs_parse_input(buf, count, &new_rc_sel);
if (ret)
- return -EFAULT;
-
- for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
- new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
+ return ret;
if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
@@ -2102,232 +2128,151 @@
}
pr_alert("PCIe: the following RC(s) will be tested:\n");
- for (i = 0; i < MAX_RC_NUM; i++) {
- if (!rc_sel) {
+ for (i = 0; i < MAX_RC_NUM; i++)
+ if (rc_sel & BIT(i))
pr_alert("RC %d\n", i);
- break;
- } else if (rc_sel & (1 << i)) {
- pr_alert("RC %d\n", i);
- }
- }
return count;
}
-static const struct file_operations msm_pcie_rc_sel_ops = {
- .write = msm_pcie_set_rc_sel,
+static const struct file_operations msm_pcie_debugfs_rc_select_ops = {
+ .write = msm_pcie_debugfs_rc_select,
};
-static ssize_t msm_pcie_set_base_sel(struct file *file,
+static ssize_t msm_pcie_debugfs_base_select(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ret;
- char str[MAX_MSG_LEN];
- int i;
+ int ret;
u32 new_base_sel = 0;
- char *base_sel_name;
- u32 size = sizeof(str) < count ? sizeof(str) : count;
- memset(str, 0, size);
- ret = copy_from_user(str, buf, size);
+ ret = msm_pcie_debugfs_parse_input(buf, count, &new_base_sel);
if (ret)
- return -EFAULT;
+ return ret;
- for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
- new_base_sel = (new_base_sel * 10) + (str[i] - '0');
-
- if (!new_base_sel || new_base_sel > 5) {
+ if (!new_base_sel || new_base_sel > MSM_PCIE_MAX_RES) {
pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
new_base_sel);
pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
} else {
base_sel = new_base_sel;
pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
+ pr_alert("%s\n", msm_pcie_res_info[base_sel - 1].name);
}
- switch (base_sel) {
- case 1:
- base_sel_name = "PARF";
- break;
- case 2:
- base_sel_name = "PHY";
- break;
- case 3:
- base_sel_name = "RC CONFIG SPACE";
- break;
- case 4:
- base_sel_name = "ELBI";
- break;
- case 5:
- base_sel_name = "EP CONFIG SPACE";
- break;
- default:
- base_sel_name = "INVALID";
- break;
- }
-
- pr_alert("%s\n", base_sel_name);
-
return count;
}
-static const struct file_operations msm_pcie_base_sel_ops = {
- .write = msm_pcie_set_base_sel,
+static const struct file_operations msm_pcie_debugfs_base_select_ops = {
+ .write = msm_pcie_debugfs_base_select,
};
-static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
+static ssize_t msm_pcie_debugfs_linkdown_panic(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ret;
- char str[MAX_MSG_LEN];
+ int i, ret;
u32 new_linkdown_panic = 0;
- int i;
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, sizeof(str));
+ ret = msm_pcie_debugfs_parse_input(buf, count, &new_linkdown_panic);
if (ret)
- return -EFAULT;
+ return ret;
- for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
- new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
+ new_linkdown_panic = !!new_linkdown_panic;
- if (new_linkdown_panic <= 1) {
- for (i = 0; i < MAX_RC_NUM; i++) {
- if (!rc_sel) {
- msm_pcie_dev[0].linkdown_panic =
- new_linkdown_panic;
- PCIE_DBG_FS(&msm_pcie_dev[0],
- "PCIe: RC0: linkdown_panic is now %d\n",
- msm_pcie_dev[0].linkdown_panic);
- break;
- } else if (rc_sel & (1 << i)) {
- msm_pcie_dev[i].linkdown_panic =
- new_linkdown_panic;
- PCIE_DBG_FS(&msm_pcie_dev[i],
- "PCIe: RC%d: linkdown_panic is now %d\n",
- i, msm_pcie_dev[i].linkdown_panic);
- }
+ for (i = 0; i < MAX_RC_NUM; i++) {
+ if (rc_sel & BIT(i)) {
+ msm_pcie_dev[i].linkdown_panic =
+ new_linkdown_panic;
+ PCIE_DBG_FS(&msm_pcie_dev[i],
+ "PCIe: RC%d: linkdown_panic is now %d\n",
+ i, msm_pcie_dev[i].linkdown_panic);
}
- } else {
- pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
- new_linkdown_panic);
}
return count;
}
-static const struct file_operations msm_pcie_linkdown_panic_ops = {
- .write = msm_pcie_set_linkdown_panic,
+static const struct file_operations msm_pcie_debugfs_linkdown_panic_ops = {
+ .write = msm_pcie_debugfs_linkdown_panic,
};
-static ssize_t msm_pcie_set_wr_offset(struct file *file,
+static ssize_t msm_pcie_debugfs_wr_offset(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ret;
- char str[MAX_MSG_LEN];
- int i;
- u32 size = sizeof(str) < count ? sizeof(str) : count;
-
- memset(str, 0, size);
- ret = copy_from_user(str, buf, size);
- if (ret)
- return -EFAULT;
+ int ret;
wr_offset = 0;
- for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
- wr_offset = (wr_offset * 10) + (str[i] - '0');
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &wr_offset);
+ if (ret)
+ return ret;
pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
return count;
}
-static const struct file_operations msm_pcie_wr_offset_ops = {
- .write = msm_pcie_set_wr_offset,
+static const struct file_operations msm_pcie_debugfs_wr_offset_ops = {
+ .write = msm_pcie_debugfs_wr_offset,
};
-static ssize_t msm_pcie_set_wr_mask(struct file *file,
+static ssize_t msm_pcie_debugfs_wr_mask(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ret;
- char str[MAX_MSG_LEN];
- int i;
- u32 size = sizeof(str) < count ? sizeof(str) : count;
-
- memset(str, 0, size);
- ret = copy_from_user(str, buf, size);
- if (ret)
- return -EFAULT;
+ int ret;
wr_mask = 0;
- for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
- wr_mask = (wr_mask * 10) + (str[i] - '0');
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &wr_mask);
+ if (ret)
+ return ret;
pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
return count;
}
-static const struct file_operations msm_pcie_wr_mask_ops = {
- .write = msm_pcie_set_wr_mask,
+static const struct file_operations msm_pcie_debugfs_wr_mask_ops = {
+ .write = msm_pcie_debugfs_wr_mask,
};
-static ssize_t msm_pcie_set_wr_value(struct file *file,
+static ssize_t msm_pcie_debugfs_wr_value(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ret;
- char str[MAX_MSG_LEN];
- int i;
- u32 size = sizeof(str) < count ? sizeof(str) : count;
-
- memset(str, 0, size);
- ret = copy_from_user(str, buf, size);
- if (ret)
- return -EFAULT;
+ int ret;
wr_value = 0;
- for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
- wr_value = (wr_value * 10) + (str[i] - '0');
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &wr_value);
+ if (ret)
+ return ret;
pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
return count;
}
-static const struct file_operations msm_pcie_wr_value_ops = {
- .write = msm_pcie_set_wr_value,
+static const struct file_operations msm_pcie_debugfs_wr_value_ops = {
+ .write = msm_pcie_debugfs_wr_value,
};
-static ssize_t msm_pcie_set_boot_option(struct file *file,
+static ssize_t msm_pcie_debugfs_boot_option(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ret;
- char str[MAX_MSG_LEN];
+ int i, ret;
u32 new_boot_option = 0;
- int i;
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, sizeof(str));
+ ret = msm_pcie_debugfs_parse_input(buf, count, &new_boot_option);
if (ret)
- return -EFAULT;
+ return ret;
- for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
- new_boot_option = (new_boot_option * 10) + (str[i] - '0');
-
- if (new_boot_option <= 1) {
+ if (new_boot_option <= (BIT(0) | BIT(1))) {
for (i = 0; i < MAX_RC_NUM; i++) {
- if (!rc_sel) {
- msm_pcie_dev[0].boot_option = new_boot_option;
- PCIE_DBG_FS(&msm_pcie_dev[0],
- "PCIe: RC0: boot_option is now 0x%x\n",
- msm_pcie_dev[0].boot_option);
- break;
- } else if (rc_sel & (1 << i)) {
+ if (rc_sel & BIT(i)) {
msm_pcie_dev[i].boot_option = new_boot_option;
PCIE_DBG_FS(&msm_pcie_dev[i],
"PCIe: RC%d: boot_option is now 0x%x\n",
@@ -2342,42 +2287,25 @@
return count;
}
-static const struct file_operations msm_pcie_boot_option_ops = {
- .write = msm_pcie_set_boot_option,
+static const struct file_operations msm_pcie_debugfs_boot_option_ops = {
+ .write = msm_pcie_debugfs_boot_option,
};
-static ssize_t msm_pcie_set_aer_enable(struct file *file,
+static ssize_t msm_pcie_debugfs_aer_enable(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ret;
- char str[MAX_MSG_LEN];
+ int i, ret;
u32 new_aer_enable = 0;
- u32 temp_rc_sel;
- int i;
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, sizeof(str));
+ ret = msm_pcie_debugfs_parse_input(buf, count, &new_aer_enable);
if (ret)
- return -EFAULT;
+ return ret;
- for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
- new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
-
- if (new_aer_enable > 1) {
- pr_err(
- "PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
- new_aer_enable);
- return count;
- }
-
- if (rc_sel)
- temp_rc_sel = rc_sel;
- else
- temp_rc_sel = 0x1;
+ new_aer_enable = !!new_aer_enable;
for (i = 0; i < MAX_RC_NUM; i++) {
- if (temp_rc_sel & (1 << i)) {
+ if (rc_sel & BIT(i)) {
msm_pcie_dev[i].aer_enable = new_aer_enable;
PCIE_DBG_FS(&msm_pcie_dev[i],
"PCIe: RC%d: aer_enable is now %d\n",
@@ -2398,35 +2326,29 @@
return count;
}
-static const struct file_operations msm_pcie_aer_enable_ops = {
- .write = msm_pcie_set_aer_enable,
+static const struct file_operations msm_pcie_debugfs_aer_enable_ops = {
+ .write = msm_pcie_debugfs_aer_enable,
};
-static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
+static ssize_t msm_pcie_debugfs_corr_counter_limit(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ret;
- char str[MAX_MSG_LEN];
- int i;
- u32 size = sizeof(str) < count ? sizeof(str) : count;
-
- memset(str, 0, size);
- ret = copy_from_user(str, buf, size);
- if (ret)
- return -EFAULT;
+ int ret;
corr_counter_limit = 0;
- for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
- corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
- pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
+ ret = msm_pcie_debugfs_parse_input(buf, count, &corr_counter_limit);
+ if (ret)
+ return ret;
+
+ pr_info("PCIe: corr_counter_limit is now %u\n", corr_counter_limit);
return count;
}
-static const struct file_operations msm_pcie_corr_counter_limit_ops = {
- .write = msm_pcie_set_corr_counter_limit,
+static const struct file_operations msm_pcie_debugfs_corr_counter_limit_ops = {
+ .write = msm_pcie_debugfs_corr_counter_limit,
};
static void msm_pcie_debugfs_init(void)
@@ -2442,7 +2364,7 @@
dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
dent_msm_pcie, NULL,
- &msm_pcie_rc_sel_ops);
+ &msm_pcie_debugfs_rc_select_ops);
if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
goto rc_sel_error;
@@ -2450,7 +2372,7 @@
dfile_case = debugfs_create_file("case", 0664,
dent_msm_pcie, NULL,
- &msm_pcie_cmd_debug_ops);
+ &msm_pcie_debugfs_case_ops);
if (!dfile_case || IS_ERR(dfile_case)) {
pr_err("PCIe: fail to create the file for debug_fs case.\n");
goto case_error;
@@ -2458,7 +2380,7 @@
dfile_base_sel = debugfs_create_file("base_sel", 0664,
dent_msm_pcie, NULL,
- &msm_pcie_base_sel_ops);
+ &msm_pcie_debugfs_base_select_ops);
if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
goto base_sel_error;
@@ -2466,7 +2388,7 @@
dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
dent_msm_pcie, NULL,
- &msm_pcie_linkdown_panic_ops);
+ &msm_pcie_debugfs_linkdown_panic_ops);
if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
goto linkdown_panic_error;
@@ -2474,7 +2396,7 @@
dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
dent_msm_pcie, NULL,
- &msm_pcie_wr_offset_ops);
+ &msm_pcie_debugfs_wr_offset_ops);
if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
goto wr_offset_error;
@@ -2482,7 +2404,7 @@
dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
dent_msm_pcie, NULL,
- &msm_pcie_wr_mask_ops);
+ &msm_pcie_debugfs_wr_mask_ops);
if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
goto wr_mask_error;
@@ -2490,7 +2412,7 @@
dfile_wr_value = debugfs_create_file("wr_value", 0664,
dent_msm_pcie, NULL,
- &msm_pcie_wr_value_ops);
+ &msm_pcie_debugfs_wr_value_ops);
if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
goto wr_value_error;
@@ -2498,7 +2420,7 @@
dfile_boot_option = debugfs_create_file("boot_option", 0664,
dent_msm_pcie, NULL,
- &msm_pcie_boot_option_ops);
+ &msm_pcie_debugfs_boot_option_ops);
if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
goto boot_option_error;
@@ -2506,15 +2428,15 @@
dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
dent_msm_pcie, NULL,
- &msm_pcie_aer_enable_ops);
+ &msm_pcie_debugfs_aer_enable_ops);
if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
goto aer_enable_error;
}
dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
- 0664, dent_msm_pcie, NULL,
- &msm_pcie_corr_counter_limit_ops);
+ 0664, dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_corr_counter_limit_ops);
if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
goto corr_counter_limit_error;
@@ -2586,9 +2508,38 @@
unsigned long host_addr, u32 host_end,
unsigned long target_addr)
{
- void __iomem *pcie20 = dev->dm_core;
+ void __iomem *iatu_base = dev->iatu ? dev->iatu : dev->dm_core;
- if (dev->shadow_en) {
+ u32 iatu_viewport_offset;
+ u32 iatu_ctrl1_offset;
+ u32 iatu_ctrl2_offset;
+ u32 iatu_lbar_offset;
+ u32 iatu_ubar_offset;
+ u32 iatu_lar_offset;
+ u32 iatu_ltar_offset;
+ u32 iatu_utar_offset;
+
+ if (dev->iatu) {
+ iatu_viewport_offset = 0;
+ iatu_ctrl1_offset = PCIE_IATU_CTRL1(nr);
+ iatu_ctrl2_offset = PCIE_IATU_CTRL2(nr);
+ iatu_lbar_offset = PCIE_IATU_LBAR(nr);
+ iatu_ubar_offset = PCIE_IATU_UBAR(nr);
+ iatu_lar_offset = PCIE_IATU_LAR(nr);
+ iatu_ltar_offset = PCIE_IATU_LTAR(nr);
+ iatu_utar_offset = PCIE_IATU_UTAR(nr);
+ } else {
+ iatu_viewport_offset = PCIE20_PLR_IATU_VIEWPORT;
+ iatu_ctrl1_offset = PCIE20_PLR_IATU_CTRL1;
+ iatu_ctrl2_offset = PCIE20_PLR_IATU_CTRL2;
+ iatu_lbar_offset = PCIE20_PLR_IATU_LBAR;
+ iatu_ubar_offset = PCIE20_PLR_IATU_UBAR;
+ iatu_lar_offset = PCIE20_PLR_IATU_LAR;
+ iatu_ltar_offset = PCIE20_PLR_IATU_LTAR;
+ iatu_utar_offset = PCIE20_PLR_IATU_UTAR;
+ }
+
+ if (dev->shadow_en && iatu_viewport_offset) {
dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
nr;
dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
@@ -2608,28 +2559,30 @@
}
/* select region */
- writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
- /* ensure that hardware locks it */
- wmb();
+ if (iatu_viewport_offset) {
+ writel_relaxed(nr, iatu_base + iatu_viewport_offset);
+ /* ensure that hardware locks it */
+ wmb();
+ }
/* switch off region before changing it */
- writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
+ writel_relaxed(0, iatu_base + iatu_ctrl2_offset);
/* and wait till it propagates to the hardware */
wmb();
- writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
+ writel_relaxed(type, iatu_base + iatu_ctrl1_offset);
writel_relaxed(lower_32_bits(host_addr),
- pcie20 + PCIE20_PLR_IATU_LBAR);
+ iatu_base + iatu_lbar_offset);
writel_relaxed(upper_32_bits(host_addr),
- pcie20 + PCIE20_PLR_IATU_UBAR);
- writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
+ iatu_base + iatu_ubar_offset);
+ writel_relaxed(host_end, iatu_base + iatu_lar_offset);
writel_relaxed(lower_32_bits(target_addr),
- pcie20 + PCIE20_PLR_IATU_LTAR);
+ iatu_base + iatu_ltar_offset);
writel_relaxed(upper_32_bits(target_addr),
- pcie20 + PCIE20_PLR_IATU_UTAR);
+ iatu_base + iatu_utar_offset);
/* ensure that changes propagated to the hardware */
wmb();
- writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
+ writel_relaxed(BIT(31), iatu_base + iatu_ctrl2_offset);
/* ensure that changes propagated to the hardware */
wmb();
@@ -2639,22 +2592,24 @@
dev->pcidev_table[nr].bdf >> 24,
dev->pcidev_table[nr].bdf >> 19 & 0x1f,
dev->pcidev_table[nr].bdf >> 16 & 0x07);
- PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
- readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
- PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
- readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
- PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
- readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
- PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
- readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
- PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
- readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
- PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
- readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
- PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
- readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
- PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
- readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
+ if (iatu_viewport_offset)
+ PCIE_DBG2(dev, "IATU_VIEWPORT:0x%x\n",
+ readl_relaxed(dev->dm_core +
+ PCIE20_PLR_IATU_VIEWPORT));
+ PCIE_DBG2(dev, "IATU_CTRL1:0x%x\n",
+ readl_relaxed(iatu_base + iatu_ctrl1_offset));
+ PCIE_DBG2(dev, "IATU_LBAR:0x%x\n",
+ readl_relaxed(iatu_base + iatu_lbar_offset));
+ PCIE_DBG2(dev, "IATU_UBAR:0x%x\n",
+ readl_relaxed(iatu_base + iatu_ubar_offset));
+ PCIE_DBG2(dev, "IATU_LAR:0x%x\n",
+ readl_relaxed(iatu_base + iatu_lar_offset));
+ PCIE_DBG2(dev, "IATU_LTAR:0x%x\n",
+ readl_relaxed(iatu_base + iatu_ltar_offset));
+ PCIE_DBG2(dev, "IATU_UTAR:0x%x\n",
+ readl_relaxed(iatu_base + iatu_utar_offset));
+ PCIE_DBG2(dev, "IATU_CTRL2:0x%x\n\n",
+ readl_relaxed(iatu_base + iatu_ctrl2_offset));
}
}
@@ -3385,8 +3340,8 @@
PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- cnt = of_property_count_strings((&pdev->dev)->of_node,
- "clock-names");
+ cnt = of_property_count_elems_of_size((&pdev->dev)->of_node,
+ "max-clock-frequency-hz", sizeof(u32));
if (cnt > 0) {
clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
sizeof(*clkfreq), GFP_KERNEL);
@@ -3704,6 +3659,7 @@
dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
+ dev->iatu = dev->res[MSM_PCIE_RES_IATU].base;
dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
@@ -3724,6 +3680,7 @@
{
dev->parf = NULL;
dev->elbi = NULL;
+ dev->iatu = NULL;
dev->dm_core = NULL;
dev->conf = NULL;
dev->bars = NULL;
@@ -3769,6 +3726,7 @@
long int retries = 0;
int link_check_count = 0;
unsigned long ep_up_timeout = 0;
+ u32 link_check_max_count;
PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
@@ -3935,6 +3893,11 @@
if (dev->max_link_speed == GEN3_SPEED)
msm_pcie_setup_gen3(dev);
+ if (msm_pcie_force_gen1 & BIT(dev->rc_idx))
+ msm_pcie_write_reg_field(dev->dm_core,
+ PCIE20_CAP + PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCAP_SLS, GEN1_SPEED);
+
/* set max tlp read size */
msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
0x7000, dev->tlp_rd_size);
@@ -3944,6 +3907,11 @@
PCIE_DBG(dev, "%s", "check if link is up\n");
+ if (msm_pcie_link_check_max_count & BIT(dev->rc_idx))
+ link_check_max_count = msm_pcie_link_check_max_count >> 4;
+ else
+ link_check_max_count = LINK_UP_CHECK_MAX_COUNT;
+
/* Wait for up to 100ms for the link to come up */
do {
usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
@@ -3952,7 +3920,7 @@
dev->rc_idx, (val >> 12) & 0x3f);
} while ((!(val & XMLH_LINK_UP) ||
!msm_pcie_confirm_linkup(dev, false, false, NULL))
- && (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
+ && (link_check_count++ < link_check_max_count));
if ((val & XMLH_LINK_UP) &&
msm_pcie_confirm_linkup(dev, false, false, NULL)) {
@@ -4328,9 +4296,7 @@
}
bus = pci_create_root_bus(&dev->pdev->dev, 0,
- &msm_pcie_ops,
- msm_pcie_setup_sys_data(dev),
- &res);
+ &msm_pcie_ops, dev, &res);
if (!bus) {
PCIE_ERR(dev,
"PCIe: failed to create root bus for RC%d\n",
@@ -4342,7 +4308,7 @@
scan_ret = pci_scan_child_bus(bus);
PCIE_DBG(dev,
"PCIe: RC%d: The max subordinate bus number discovered is %d\n",
- dev->rc_idx, ret);
+ dev->rc_idx, scan_ret);
msm_pcie_fixup_irqs(dev);
pci_assign_unassigned_bus_resources(bus);
@@ -4833,7 +4799,8 @@
}
static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
- struct pci_dev *pdev)
+ struct pci_dev *pdev,
+ struct msi_desc *entry)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
int bypass_en = 0;
@@ -4847,30 +4814,20 @@
iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
if (!bypass_en) {
- int ret;
- phys_addr_t pcie_base_addr =
- dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
- dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
+ dma_addr_t iova = entry->msg.address_lo;
- ret = iommu_unmap(domain, iova, PAGE_SIZE);
- if (ret != PAGE_SIZE)
- PCIE_ERR(dev,
- "PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
- dev->rc_idx, ret);
+ PCIE_DBG(dev, "PCIe: RC%d: unmap QGIC MSI IOVA\n", dev->rc_idx);
+
+ dma_unmap_resource(&pdev->dev, iova, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, 0);
}
}
-static void msm_pcie_destroy_irq(unsigned int irq, struct pci_dev *pdev)
+static void msm_pcie_destroy_irq(struct msi_desc *entry, unsigned int irq)
{
int pos;
- struct msi_desc *entry = irq_get_msi_desc(irq);
- struct msi_desc *firstentry;
struct msm_pcie_dev_t *dev;
- u32 nvec;
- int firstirq;
-
- if (!pdev)
- pdev = irq_get_chip_data(irq);
+ struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
if (!pdev) {
pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
@@ -4883,24 +4840,10 @@
return;
}
- if (!entry) {
- PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
- dev->rc_idx, irq);
- return;
- }
-
- firstentry = first_pci_msi_entry(pdev);
- if (!firstentry) {
- PCIE_ERR(dev,
- "PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
- dev->rc_idx, irq);
- return;
- }
-
- firstirq = firstentry->irq;
- nvec = (1 << entry->msi_attrib.multiple);
-
if (dev->msi_gicm_addr) {
+ int firstirq = entry->irq;
+ u32 nvec = (1 << entry->msi_attrib.multiple);
+
PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
if (irq < firstirq || irq > firstirq + nvec - 1) {
@@ -4910,7 +4853,7 @@
return;
}
if (irq == firstirq + nvec - 1)
- msm_pcie_unmap_qgic_addr(dev, pdev);
+ msm_pcie_unmap_qgic_addr(dev, pdev, entry);
pos = irq - firstirq;
} else {
PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
@@ -4929,8 +4872,12 @@
/* hookup to linux pci msi framework */
void arch_teardown_msi_irq(unsigned int irq)
{
+ struct msi_desc *entry = irq_get_msi_desc(irq);
+
PCIE_GEN_DBG("irq %d deallocated\n", irq);
- msm_pcie_destroy_irq(irq, NULL);
+
+ if (entry)
+ msm_pcie_destroy_irq(entry, irq);
}
void arch_teardown_msi_irqs(struct pci_dev *dev)
@@ -4950,7 +4897,7 @@
continue;
nvec = 1 << entry->msi_attrib.multiple;
for (i = 0; i < nvec; i++)
- msm_pcie_destroy_irq(entry->irq + i, dev);
+ msm_pcie_destroy_irq(entry, entry->irq + i);
}
}
@@ -5064,9 +5011,8 @@
struct msi_msg *msg)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
- struct iommu_domain_geometry geometry;
- int fastmap_en = 0, bypass_en = 0;
- dma_addr_t iova, addr;
+ int bypass_en = 0;
+ dma_addr_t iova;
msg->address_hi = 0;
msg->address_lo = dev->msi_gicm_addr;
@@ -5088,35 +5034,15 @@
if (bypass_en)
return 0;
- iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap_en);
- if (fastmap_en) {
- iommu_domain_get_attr(domain, DOMAIN_ATTR_GEOMETRY, &geometry);
- iova = geometry.aperture_start;
- PCIE_DBG(dev,
- "PCIe: RC%d: Use client's IOVA 0x%llx to map QGIC MSI address\n",
- dev->rc_idx, iova);
- } else {
- phys_addr_t pcie_base_addr;
-
- /*
- * Use PCIe DBI address as the IOVA since client cannot
- * use this address for their IOMMU mapping. This will
- * prevent any conflicts between PCIe host and
- * client's mapping.
- */
- pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
- iova = rounddown(pcie_base_addr, PAGE_SIZE);
- }
-
- addr = dma_map_resource(&pdev->dev, dev->msi_gicm_addr, PAGE_SIZE,
+ iova = dma_map_resource(&pdev->dev, dev->msi_gicm_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL, 0);
- if (dma_mapping_error(&pdev->dev, addr)) {
+ if (dma_mapping_error(&pdev->dev, iova)) {
PCIE_ERR(dev, "PCIe: RC%d: failed to map QGIC address",
dev->rc_idx);
return -EIO;
}
- msg->address_lo = iova + addr;
+ msg->address_lo = iova;
return 0;
}
@@ -5404,6 +5330,22 @@
PCIE_DBG2(dev, "PCIe: RC%d: LINKCTRLSTATUS:0x%x\n", dev->rc_idx, val);
}
+static int msm_pcie_config_l0s_disable(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ msm_pcie_config_l0s(pcie_dev, pdev, false);
+ return 0;
+}
+
+static int msm_pcie_config_l0s_enable(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ msm_pcie_config_l0s(pcie_dev, pdev, true);
+ return 0;
+}
+
static void msm_pcie_config_l1(struct msm_pcie_dev_t *dev,
struct pci_dev *pdev, bool enable)
{
@@ -5430,6 +5372,22 @@
PCIE_DBG2(dev, "PCIe: RC%d: LINKCTRLSTATUS:0x%x\n", dev->rc_idx, val);
}
+static int msm_pcie_config_l1_disable(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ msm_pcie_config_l1(pcie_dev, pdev, false);
+ return 0;
+}
+
+static int msm_pcie_config_l1_enable(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ msm_pcie_config_l1(pcie_dev, pdev, true);
+ return 0;
+}
+
static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev,
struct pci_dev *pdev, bool enable)
{
@@ -5499,6 +5457,22 @@
dev->rc_idx, val2);
}
+static int msm_pcie_config_l1ss_disable(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ msm_pcie_config_l1ss(pcie_dev, pdev, false);
+ return 0;
+}
+
+static int msm_pcie_config_l1ss_enable(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ msm_pcie_config_l1ss(pcie_dev, pdev, true);
+ return 0;
+}
+
static void msm_pcie_config_clock_power_management(struct msm_pcie_dev_t *dev,
struct pci_dev *pdev)
{
@@ -5834,6 +5808,21 @@
"RC%d: slv-addr-space-size: 0x%x.\n",
rc_idx, msm_pcie_dev[rc_idx].slv_addr_space_size);
+ msm_pcie_dev[rc_idx].phy_status_offset = 0;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,phy-status-offset",
+ &msm_pcie_dev[rc_idx].phy_status_offset);
+ if (ret) {
+ PCIE_ERR(&msm_pcie_dev[rc_idx],
+ "RC%d: failed to get PCIe PHY status offset.\n",
+ rc_idx);
+ goto decrease_rc_num;
+ } else {
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: phy-status-offset: 0x%x.\n",
+ rc_idx, msm_pcie_dev[rc_idx].phy_status_offset);
+ }
+
msm_pcie_dev[rc_idx].cpl_timeout = 0;
ret = of_property_read_u32((&pdev->dev)->of_node,
"qcom,cpl-timeout",
@@ -6230,10 +6219,10 @@
struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
- if (dev->hdr_type == 1)
+ if (pci_is_root_bus(dev->bus))
dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
}
-DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
msm_pcie_fixup_early);
/* Suspend the PCIe link */
@@ -6316,7 +6305,8 @@
PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
- if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
+ if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED ||
+ !pci_is_root_bus(dev->bus))
return;
spin_lock_irqsave(&pcie_dev->cfg_lock,
@@ -6341,7 +6331,7 @@
mutex_unlock(&pcie_dev->recovery_lock);
}
-DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
msm_pcie_fixup_suspend);
/* Resume the PCIe link */
@@ -6415,7 +6405,7 @@
PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
- pcie_dev->user_suspend)
+ pcie_dev->user_suspend || !pci_is_root_bus(dev->bus))
return;
mutex_lock(&pcie_dev->recovery_lock);
@@ -6427,7 +6417,7 @@
mutex_unlock(&pcie_dev->recovery_lock);
}
-DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
msm_pcie_fixup_resume);
static void msm_pcie_fixup_resume_early(struct pci_dev *dev)
@@ -6438,7 +6428,7 @@
PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
- pcie_dev->user_suspend)
+ pcie_dev->user_suspend || !pci_is_root_bus(dev->bus))
return;
mutex_lock(&pcie_dev->recovery_lock);
@@ -6449,7 +6439,7 @@
mutex_unlock(&pcie_dev->recovery_lock);
}
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
msm_pcie_fixup_resume_early);
int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index d39a17f..6515ce4 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -89,6 +89,16 @@
Technologies Inc MSM8953 platform.
If unsure say N.
+config PINCTRL_MSM8937
+ tristate "Qualcomm Technologies Inc MSM8937 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc MSM8937 platform.
+ If unsure say N.
+
config PINCTRL_SDM845
tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index d92db11..db71677 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -12,6 +12,7 @@
obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o
obj-$(CONFIG_PINCTRL_MDM9615) += pinctrl-mdm9615.o
obj-$(CONFIG_PINCTRL_MSM8953) += pinctrl-msm8953.o
+obj-$(CONFIG_PINCTRL_MSM8937) += pinctrl-msm8937.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index bf95849..1441678 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013, Sony Mobile Communications AB.
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -70,6 +70,7 @@
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs;
+ void __iomem *pdc_regs;
};
static int msm_get_groups_count(struct pinctrl_dev *pctldev)
@@ -777,6 +778,102 @@
.irq_set_wake = msm_gpio_irq_set_wake,
};
+static struct irq_chip msm_dirconn_irq_chip;
+
+static void msm_gpio_dirconn_handler(struct irq_desc *desc)
+{
+ struct irq_data *irqd = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+ generic_handle_irq(irqd->irq);
+ chained_irq_exit(chip, desc);
+}
+
+static void setup_pdc_gpio(struct irq_domain *domain,
+ unsigned int parent_irq, unsigned int gpio)
+{
+ int irq;
+
+ if (gpio != 0) {
+ irq = irq_find_mapping(domain, gpio);
+ irq_set_parent(irq, parent_irq);
+ irq_set_chip(irq, &msm_dirconn_irq_chip);
+ irq_set_handler_data(parent_irq, irq_get_irq_data(irq));
+ }
+
+ __irq_set_handler(parent_irq, msm_gpio_dirconn_handler, false, NULL);
+}
+
+static void request_dc_interrupt(struct irq_domain *domain,
+ struct irq_domain *parent, irq_hw_number_t hwirq,
+ unsigned int gpio)
+{
+ struct irq_fwspec fwspec;
+ unsigned int parent_irq;
+
+ fwspec.fwnode = parent->fwnode;
+ fwspec.param[0] = 0; /* SPI */
+ fwspec.param[1] = hwirq;
+ fwspec.param[2] = IRQ_TYPE_NONE;
+ fwspec.param_count = 3;
+
+ parent_irq = irq_create_fwspec_mapping(&fwspec);
+
+ setup_pdc_gpio(domain, parent_irq, gpio);
+}
+
+/**
+ * gpio_muxed_to_pdc: Mux the GPIO to a PDC IRQ
+ *
+ * @pdc_domain: the PDC's domain
+ * @d: the GPIO's IRQ data
+ *
+ * Find a free PDC port for the GPIO and map the GPIO's mux information to the
+ * PDC registers; so the GPIO can be used a wakeup source.
+ */
+static void gpio_muxed_to_pdc(struct irq_domain *pdc_domain, struct irq_data *d)
+{
+ int i, j;
+ unsigned int mux;
+ struct irq_desc *desc = irq_data_to_desc(d);
+ struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = d->hwirq;
+ struct msm_pinctrl *pctrl;
+ unsigned int irq;
+
+ if (!gc || !parent_data)
+ return;
+
+ pctrl = gpiochip_get_data(gc);
+
+ for (i = 0; i < pctrl->soc->n_gpio_mux_in; i++) {
+ if (gpio != pctrl->soc->gpio_mux_in[i].gpio)
+ continue;
+ mux = pctrl->soc->gpio_mux_in[i].mux;
+ for (j = 0; j < pctrl->soc->n_pdc_mux_out; j++) {
+ struct msm_pdc_mux_output *pdc_out =
+ &pctrl->soc->pdc_mux_out[j];
+
+ if (pdc_out->mux == mux)
+ break;
+ if (pdc_out->mux)
+ continue;
+ pdc_out->mux = gpio;
+ irq = irq_find_mapping(pdc_domain, pdc_out->hwirq + 32);
+ /* setup the IRQ parent for the GPIO */
+ setup_pdc_gpio(pctrl->chip.irqdomain, irq, gpio);
+ /* program pdc select grp register */
+ writel_relaxed((mux & 0x3F), pctrl->pdc_regs +
+ (0x14 * j));
+ break;
+ }
+ /* We have no more PDC port available */
+ WARN_ON(j == pctrl->soc->n_pdc_mux_out);
+ }
+}
+
static bool is_gpio_dual_edge(struct irq_data *d, irq_hw_number_t *dir_conn_irq)
{
struct irq_desc *desc = irq_data_to_desc(d);
@@ -797,6 +894,17 @@
return true;
}
}
+
+ for (i = 0; i < pctrl->soc->n_pdc_mux_out; i++) {
+ struct msm_pdc_mux_output *dir_conn =
+ &pctrl->soc->pdc_mux_out[i];
+
+ if (dir_conn->mux == d->hwirq && (dir_conn->hwirq + 32)
+ != parent_data->hwirq) {
+ *dir_conn_irq = dir_conn->hwirq + 32;
+ return true;
+ }
+ }
return false;
}
@@ -814,13 +922,48 @@
irq_get_irq_data(irq_find_mapping(parent_data->domain,
dir_conn_irq));
- if (dir_conn_data && dir_conn_data->chip->irq_mask)
+ if (!dir_conn_data)
+ return;
+ if (dir_conn_data->chip->irq_mask)
dir_conn_data->chip->irq_mask(dir_conn_data);
}
+
if (parent_data->chip->irq_mask)
parent_data->chip->irq_mask(parent_data);
}
+static void msm_dirconn_irq_enable(struct irq_data *d)
+{
+ struct irq_desc *desc = irq_data_to_desc(d);
+ struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+ irq_hw_number_t dir_conn_irq = 0;
+
+ if (!parent_data)
+ return;
+
+ if (is_gpio_dual_edge(d, &dir_conn_irq)) {
+ struct irq_data *dir_conn_data =
+ irq_get_irq_data(irq_find_mapping(parent_data->domain,
+ dir_conn_irq));
+
+ if (dir_conn_data &&
+ dir_conn_data->chip->irq_set_irqchip_state)
+ dir_conn_data->chip->irq_set_irqchip_state(
+ dir_conn_data,
+ IRQCHIP_STATE_PENDING, 0);
+
+ if (dir_conn_data && dir_conn_data->chip->irq_unmask)
+ dir_conn_data->chip->irq_unmask(dir_conn_data);
+ }
+
+ if (parent_data->chip->irq_set_irqchip_state)
+ parent_data->chip->irq_set_irqchip_state(parent_data,
+ IRQCHIP_STATE_PENDING, 0);
+
+ if (parent_data->chip->irq_unmask)
+ parent_data->chip->irq_unmask(parent_data);
+}
+
static void msm_dirconn_irq_unmask(struct irq_data *d)
{
struct irq_desc *desc = irq_data_to_desc(d);
@@ -835,7 +978,9 @@
irq_get_irq_data(irq_find_mapping(parent_data->domain,
dir_conn_irq));
- if (dir_conn_data && dir_conn_data->chip->irq_unmask)
+ if (!dir_conn_data)
+ return;
+ if (dir_conn_data->chip->irq_unmask)
dir_conn_data->chip->irq_unmask(dir_conn_data);
}
if (parent_data->chip->irq_unmask)
@@ -1058,6 +1203,7 @@
static struct irq_chip msm_dirconn_irq_chip = {
.name = "msmgpio-dc",
.irq_mask = msm_dirconn_irq_mask,
+ .irq_enable = msm_dirconn_irq_enable,
.irq_unmask = msm_dirconn_irq_unmask,
.irq_eoi = msm_dirconn_irq_eoi,
.irq_ack = msm_dirconn_irq_ack,
@@ -1103,57 +1249,53 @@
chained_irq_exit(chip, desc);
}
-static void msm_gpio_dirconn_handler(struct irq_desc *desc)
-{
- struct irq_data *irqd = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- chained_irq_enter(chip, desc);
- generic_handle_irq(irqd->irq);
- chained_irq_exit(chip, desc);
-}
-
static void msm_gpio_setup_dir_connects(struct msm_pinctrl *pctrl)
{
struct device_node *parent_node;
- struct irq_domain *parent_domain;
- struct irq_fwspec fwspec;
+ struct irq_domain *pdc_domain;
unsigned int i;
parent_node = of_irq_find_parent(pctrl->dev->of_node);
-
if (!parent_node)
return;
- parent_domain = irq_find_host(parent_node);
- if (!parent_domain)
+ pdc_domain = irq_find_host(parent_node);
+ if (!pdc_domain)
return;
- fwspec.fwnode = parent_domain->fwnode;
for (i = 0; i < pctrl->soc->n_dir_conns; i++) {
const struct msm_dir_conn *dirconn = &pctrl->soc->dir_conn[i];
- unsigned int parent_irq;
- int irq;
- fwspec.param[0] = 0; /* SPI */
- fwspec.param[1] = dirconn->hwirq;
- fwspec.param[2] = IRQ_TYPE_NONE;
- fwspec.param_count = 3;
- parent_irq = irq_create_fwspec_mapping(&fwspec);
+ request_dc_interrupt(pctrl->chip.irqdomain, pdc_domain,
+ dirconn->hwirq, dirconn->gpio);
+ }
- if (dirconn->gpio != 0) {
- irq = irq_find_mapping(pctrl->chip.irqdomain,
- dirconn->gpio);
+ for (i = 0; i < pctrl->soc->n_pdc_mux_out; i++) {
+ struct msm_pdc_mux_output *pdc_out =
+ &pctrl->soc->pdc_mux_out[i];
- irq_set_parent(irq, parent_irq);
- irq_set_chip(irq, &msm_dirconn_irq_chip);
- __irq_set_handler(parent_irq, msm_gpio_dirconn_handler,
- false, NULL);
- irq_set_handler_data(parent_irq, irq_get_irq_data(irq));
- } else {
- __irq_set_handler(parent_irq, msm_gpio_dirconn_handler,
- false, NULL);
- }
+ request_dc_interrupt(pctrl->chip.irqdomain, pdc_domain,
+ pdc_out->hwirq, 0);
+ }
+
+ /*
+ * Statically choose the GPIOs for mapping to PDC. Dynamic mux mapping
+ * is very difficult.
+ */
+ for (i = 0; i < pctrl->soc->n_pdc_mux_out; i++) {
+ unsigned int irq;
+ struct irq_data *d;
+ struct msm_gpio_mux_input *gpio_in =
+ &pctrl->soc->gpio_mux_in[i];
+ if (!gpio_in->init)
+ continue;
+
+ irq = irq_find_mapping(pctrl->chip.irqdomain, gpio_in->gpio);
+ d = irq_get_irq_data(irq);
+ if (!d)
+ continue;
+
+ gpio_muxed_to_pdc(pdc_domain, d);
}
}
@@ -1263,6 +1405,9 @@
if (IS_ERR(pctrl->regs))
return PTR_ERR(pctrl->regs);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pctrl->pdc_regs = devm_ioremap_resource(&pdev->dev, res);
+
msm_pinctrl_setup_pm_reset(pctrl);
pctrl->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 1c6df2f..9fc6660 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2013, Sony Mobile Communications AB.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -99,13 +100,35 @@
unsigned intr_detection_bit:5;
unsigned intr_detection_width:5;
unsigned dir_conn_en_bit:8;
-}
+};
+
+/**
+ * struct msm_gpio_mux_input - Map GPIO to Mux pin
+ * @mux:: The mux pin to which the GPIO is connected to
+ * @gpio: GPIO pin number
+ * @init: Setup PDC connection at probe
+ */
+struct msm_gpio_mux_input {
+ unsigned int mux;
+ unsigned int gpio;
+ bool init;
+};
+
+/**
+ * struct msm_pdc_mux_output - GPIO mux pin to PDC port
+ * @mux: GPIO mux pin number
+ * @hwirq: The PDC port (hwirq) that GPIO is connected to
+ */
+struct msm_pdc_mux_output {
+ unsigned int mux;
+ irq_hw_number_t hwirq;
+};
/**
* struct msm_dir_conn - Direct GPIO connect configuration
* @gpio: GPIO pin number
* @hwirq: The GIC interrupt that the pin is connected to
- */;
+ */
struct msm_dir_conn {
unsigned int gpio;
irq_hw_number_t hwirq;
@@ -122,8 +145,12 @@
* @ngpio: The number of pingroups the driver should expose as GPIOs.
* @dir_conn: An array describing all the pins directly connected to GIC.
* @ndirconns: The number of pins directly connected to GIC
- * @dir_conn_offsets: Direct connect register offsets for each tile.
* @dir_conn_irq_base: Direct connect interrupt base register for kpss.
+ * @gpio_mux_in: Map of GPIO pin to the hwirq.
+ * @n_gpioc_mux_in: The number of entries in @pdc_mux_in.
+ * @pdc_mux_out: Map of GPIO mux to PDC port.
+ * @n_pdc_mux_out: The number of entries in @pdc_mux_out.
+ * @n_pdc_offset: The offset for the PDC mux pins
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -136,6 +163,11 @@
const struct msm_dir_conn *dir_conn;
unsigned int n_dir_conns;
unsigned int dir_conn_irq_base;
+ struct msm_pdc_mux_output *pdc_mux_out;
+ unsigned int n_pdc_mux_out;
+ struct msm_gpio_mux_input *gpio_mux_in;
+ unsigned int n_gpio_mux_in;
+ unsigned int n_pdc_mux_offset;
};
int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8937.c b/drivers/pinctrl/qcom/pinctrl-msm8937.c
new file mode 100644
index 0000000..2b72c54
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8937.c
@@ -0,0 +1,1479 @@
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_BASE + REG_SIZE * id, \
+ .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \
+ .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc msm8937_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "SDC1_CLK"),
+ PINCTRL_PIN(135, "SDC1_CMD"),
+ PINCTRL_PIN(136, "SDC1_DATA"),
+ PINCTRL_PIN(137, "SDC1_RCLK"),
+ PINCTRL_PIN(138, "SDC2_CLK"),
+ PINCTRL_PIN(139, "SDC2_CMD"),
+ PINCTRL_PIN(140, "SDC2_DATA"),
+ PINCTRL_PIN(141, "QDSD_CLK"),
+ PINCTRL_PIN(142, "QDSD_CMD"),
+ PINCTRL_PIN(143, "QDSD_DATA0"),
+ PINCTRL_PIN(144, "QDSD_DATA1"),
+ PINCTRL_PIN(145, "QDSD_DATA2"),
+ PINCTRL_PIN(146, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+
+static const unsigned int sdc1_clk_pins[] = { 134 };
+static const unsigned int sdc1_cmd_pins[] = { 135 };
+static const unsigned int sdc1_data_pins[] = { 136 };
+static const unsigned int sdc1_rclk_pins[] = { 137 };
+static const unsigned int sdc2_clk_pins[] = { 138 };
+static const unsigned int sdc2_cmd_pins[] = { 139 };
+static const unsigned int sdc2_data_pins[] = { 140 };
+static const unsigned int qdsd_clk_pins[] = { 141 };
+static const unsigned int qdsd_cmd_pins[] = { 142 };
+static const unsigned int qdsd_data0_pins[] = { 143 };
+static const unsigned int qdsd_data1_pins[] = { 144 };
+static const unsigned int qdsd_data2_pins[] = { 145 };
+static const unsigned int qdsd_data3_pins[] = { 146 };
+
+enum msm8937_functions {
+ msm_mux_qdss_tracedata_b,
+ msm_mux_blsp_uart1,
+ msm_mux_gpio,
+ msm_mux_blsp_spi1,
+ msm_mux_adsp_ext,
+ msm_mux_blsp_i2c1,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uart3,
+ msm_mux_pbs0,
+ msm_mux_pbs1,
+ msm_mux_pwr_modem_enabled_b,
+ msm_mux_blsp_i2c3,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_ldo_update,
+ msm_mux_atest_combodac_to_gpio_native,
+ msm_mux_ldo_en,
+ msm_mux_blsp_i2c2,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_pbs2,
+ msm_mux_atest_gpsadc_dtest0_native,
+ msm_mux_blsp_spi3,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_uart4,
+ msm_mux_sec_mi2s,
+ msm_mux_pwr_nav_enabled_b,
+ msm_mux_codec_mad,
+ msm_mux_pwr_crypto_enabled_b,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_uart5,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_atest_bbrx1,
+ msm_mux_m_voc,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_blsp_i2c6,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_atest_wlan0,
+ msm_mux_atest_wlan1,
+ msm_mux_atest_bbrx0,
+ msm_mux_blsp_i2c5,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_atest_gpsadc_dtest1_native,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_uart6,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_mdp_vsync,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_sec_mi2s_mclk_a,
+ msm_mux_cam_mclk,
+ msm_mux_cci_i2c,
+ msm_mux_pwr_modem_enabled_a,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cam1_standby,
+ msm_mux_pwr_nav_enabled_a,
+ msm_mux_cam1_rst,
+ msm_mux_pwr_crypto_enabled_a,
+ msm_mux_forced_usb,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_cam2_rst,
+ msm_mux_webcam_standby,
+ msm_mux_cci_async,
+ msm_mux_webcam_rst,
+ msm_mux_ov_ldo,
+ msm_mux_sd_write,
+ msm_mux_accel_int,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_alsp_int,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_mag_int,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_blsp6_spi,
+ msm_mux_fp_int,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_uim_batt,
+ msm_mux_cam2_standby,
+ msm_mux_uim1_data,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_reset,
+ msm_mux_uim1_present,
+ msm_mux_uim2_data,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_reset,
+ msm_mux_uim2_present,
+ msm_mux_sensor_rst,
+ msm_mux_mipi_dsi0,
+ msm_mux_smb_int,
+ msm_mux_cam0_ldo,
+ msm_mux_us_euro,
+ msm_mux_atest_char3,
+ msm_mux_dbg_out,
+ msm_mux_bimc_dte0,
+ msm_mux_ts_resout,
+ msm_mux_ts_sample,
+ msm_mux_sec_mi2s_mclk_b,
+ msm_mux_pri_mi2s,
+ msm_mux_sdcard_det,
+ msm_mux_atest_char1,
+ msm_mux_ebi_cdc,
+ msm_mux_audio_reset,
+ msm_mux_atest_char0,
+ msm_mux_audio_ref,
+ msm_mux_cdc_pdm0,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_lpass_slimbus,
+ msm_mux_lpass_slimbus0,
+ msm_mux_lpass_slimbus1,
+ msm_mux_codec_int1,
+ msm_mux_codec_int2,
+ msm_mux_wcss_bt,
+ msm_mux_atest_char2,
+ msm_mux_ebi_ch0,
+ msm_mux_wcss_wlan2,
+ msm_mux_wcss_wlan1,
+ msm_mux_wcss_wlan0,
+ msm_mux_wcss_wlan,
+ msm_mux_wcss_fm,
+ msm_mux_ext_lpass,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng1,
+ msm_mux_cri_trng0,
+ msm_mux_blsp_spi7,
+ msm_mux_blsp_uart7,
+ msm_mux_pri_mi2s_ws,
+ msm_mux_blsp_i2c7,
+ msm_mux_gcc_tlmm,
+ msm_mux_dmic0_clk,
+ msm_mux_dmic0_data,
+ msm_mux_key_volp,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_us_emitter,
+ msm_mux_wsa_irq,
+ msm_mux_wsa_io,
+ msm_mux_wsa_reset,
+ msm_mux_blsp_spi8,
+ msm_mux_blsp_uart8,
+ msm_mux_blsp_i2c8,
+ msm_mux_gcc_plltest,
+ msm_mux_nav_pps_in_a,
+ msm_mux_pa_indicator,
+ msm_mux_modem_tsync,
+ msm_mux_nav_tsync,
+ msm_mux_nav_pps_in_b,
+ msm_mux_nav_pps,
+ msm_mux_gsm0_tx,
+ msm_mux_atest_char,
+ msm_mux_atest_tsens,
+ msm_mux_bimc_dte1,
+ msm_mux_ssbi_wtr1,
+ msm_mux_fp_gpio,
+ msm_mux_coex_uart,
+ msm_mux_key_snapshot,
+ msm_mux_key_focus,
+ msm_mux_nfc_pwr,
+ msm_mux_blsp8_spi,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_NA,
+};
+
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio0", "gpio1", "gpio6", "gpio7", "gpio12", "gpio13", "gpio23",
+ "gpio42", "gpio43", "gpio44", "gpio47", "gpio66", "gpio86", "gpio87",
+ "gpio88", "gpio92",
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio1",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio2",
+};
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio2",
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uart3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const pbs0_groups[] = {
+ "gpio8",
+};
+static const char * const pbs1_groups[] = {
+ "gpio9",
+};
+static const char * const pwr_modem_enabled_b_groups[] = {
+ "gpio9",
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio10",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio4",
+};
+static const char * const atest_combodac_to_gpio_native_groups[] = {
+ "gpio4", "gpio12", "gpio13", "gpio20", "gpio21", "gpio28", "gpio29",
+ "gpio30", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43", "gpio44",
+ "gpio45", "gpio46", "gpio47", "gpio48", "gpio67", "gpio115",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio5",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio6",
+};
+static const char * const pbs2_groups[] = {
+ "gpio7",
+};
+static const char * const atest_gpsadc_dtest0_native_groups[] = {
+ "gpio7",
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio11",
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const blsp_uart4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio12", "gpio13", "gpio94", "gpio95",
+};
+static const char * const pwr_nav_enabled_b_groups[] = {
+ "gpio12",
+};
+static const char * const codec_mad_groups[] = {
+ "gpio13",
+};
+static const char * const pwr_crypto_enabled_b_groups[] = {
+ "gpio13",
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio14", "gpio15",
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const blsp_uart5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio16",
+};
+static const char * const atest_bbrx1_groups[] = {
+ "gpio16",
+};
+static const char * const m_voc_groups[] = {
+ "gpio17", "gpio21",
+};
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio17",
+};
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio21",
+};
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio22",
+};
+static const char * const atest_wlan0_groups[] = {
+ "gpio22",
+};
+static const char * const atest_wlan1_groups[] = {
+ "gpio23",
+};
+static const char * const atest_bbrx0_groups[] = {
+ "gpio17",
+};
+static const char * const blsp_i2c5_groups[] = {
+ "gpio18", "gpio19",
+};
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio18",
+};
+static const char * const atest_gpsadc_dtest1_native_groups[] = {
+ "gpio18",
+};
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio19", "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio32", "gpio33", "gpio34", "gpio35", "gpio36", "gpio38", "gpio39",
+ "gpio40", "gpio50",
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const blsp_uart6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio20",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio24", "gpio25",
+};
+static const char * const pri_mi2s_mclk_a_groups[] = {
+ "gpio25",
+};
+static const char * const sec_mi2s_mclk_a_groups[] = {
+ "gpio25",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio26", "gpio27", "gpio28",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32",
+};
+static const char * const pwr_modem_enabled_a_groups[] = {
+ "gpio29",
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio33",
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio34",
+};
+static const char * const cam1_standby_groups[] = {
+ "gpio35",
+};
+static const char * const pwr_nav_enabled_a_groups[] = {
+ "gpio35",
+};
+static const char * const cam1_rst_groups[] = {
+ "gpio36",
+};
+static const char * const pwr_crypto_enabled_a_groups[] = {
+ "gpio36",
+};
+static const char * const forced_usb_groups[] = {
+ "gpio37",
+};
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio37",
+};
+static const char * const cam2_rst_groups[] = {
+ "gpio38",
+};
+static const char * const webcam_standby_groups[] = {
+ "gpio39",
+};
+static const char * const cci_async_groups[] = {
+ "gpio39",
+};
+static const char * const webcam_rst_groups[] = {
+ "gpio40",
+};
+static const char * const ov_ldo_groups[] = {
+ "gpio41",
+};
+static const char * const sd_write_groups[] = {
+ "gpio41",
+};
+static const char * const accel_int_groups[] = {
+ "gpio42",
+};
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio42",
+};
+static const char * const alsp_int_groups[] = {
+ "gpio43",
+};
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio43",
+};
+static const char * const mag_int_groups[] = {
+ "gpio44",
+};
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio44",
+};
+static const char * const blsp6_spi_groups[] = {
+ "gpio47",
+};
+static const char * const fp_int_groups[] = {
+ "gpio48",
+};
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio48",
+};
+static const char * const uim_batt_groups[] = {
+ "gpio49",
+};
+static const char * const cam2_standby_groups[] = {
+ "gpio50",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio51",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio52",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio53",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio54",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio55",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio56",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio57",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio58",
+};
+static const char * const sensor_rst_groups[] = {
+ "gpio59",
+};
+static const char * const mipi_dsi0_groups[] = {
+ "gpio60",
+};
+static const char * const smb_int_groups[] = {
+ "gpio61",
+};
+static const char * const cam0_ldo_groups[] = {
+ "gpio62",
+};
+static const char * const us_euro_groups[] = {
+ "gpio63",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio63",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio63",
+};
+static const char * const bimc_dte0_groups[] = {
+ "gpio63", "gpio65",
+};
+static const char * const ts_resout_groups[] = {
+ "gpio64",
+};
+static const char * const ts_sample_groups[] = {
+ "gpio65",
+};
+static const char * const sec_mi2s_mclk_b_groups[] = {
+ "gpio66",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio66", "gpio85", "gpio86", "gpio88", "gpio94", "gpio95",
+};
+static const char * const sdcard_det_groups[] = {
+ "gpio67",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio67",
+};
+static const char * const ebi_cdc_groups[] = {
+ "gpio67", "gpio69", "gpio118", "gpio119", "gpio120", "gpio123",
+};
+static const char * const audio_reset_groups[] = {
+ "gpio68",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio68",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio69",
+};
+static const char * const cdc_pdm0_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74",
+};
+static const char * const pri_mi2s_mclk_b_groups[] = {
+ "gpio69",
+};
+static const char * const lpass_slimbus_groups[] = {
+ "gpio70",
+};
+static const char * const lpass_slimbus0_groups[] = {
+ "gpio71",
+};
+static const char * const lpass_slimbus1_groups[] = {
+ "gpio72",
+};
+static const char * const codec_int1_groups[] = {
+ "gpio73",
+};
+static const char * const codec_int2_groups[] = {
+ "gpio74",
+};
+static const char * const wcss_bt_groups[] = {
+ "gpio75", "gpio83", "gpio84",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio75",
+};
+static const char * const ebi_ch0_groups[] = {
+ "gpio75",
+};
+static const char * const wcss_wlan2_groups[] = {
+ "gpio76",
+};
+static const char * const wcss_wlan1_groups[] = {
+ "gpio77",
+};
+static const char * const wcss_wlan0_groups[] = {
+ "gpio78",
+};
+static const char * const wcss_wlan_groups[] = {
+ "gpio79", "gpio80",
+};
+static const char * const wcss_fm_groups[] = {
+ "gpio81", "gpio82",
+};
+static const char * const ext_lpass_groups[] = {
+ "gpio81",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio82",
+};
+static const char * const cri_trng1_groups[] = {
+ "gpio83",
+};
+static const char * const cri_trng0_groups[] = {
+ "gpio84",
+};
+static const char * const blsp_spi7_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const blsp_uart7_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+ "gpio87",
+};
+static const char * const blsp_i2c7_groups[] = {
+ "gpio87", "gpio88",
+};
+static const char * const gcc_tlmm_groups[] = {
+ "gpio87",
+};
+static const char * const dmic0_clk_groups[] = {
+ "gpio89",
+};
+static const char * const dmic0_data_groups[] = {
+ "gpio90",
+};
+static const char * const key_volp_groups[] = {
+ "gpio91",
+};
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio91",
+};
+static const char * const us_emitter_groups[] = {
+ "gpio92",
+};
+static const char * const wsa_irq_groups[] = {
+ "gpio93",
+};
+static const char * const wsa_io_groups[] = {
+ "gpio94", "gpio95",
+};
+static const char * const wsa_reset_groups[] = {
+ "gpio96",
+};
+static const char * const blsp_spi8_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99",
+};
+static const char * const blsp_uart8_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99",
+};
+static const char * const blsp_i2c8_groups[] = {
+ "gpio98", "gpio99",
+};
+static const char * const gcc_plltest_groups[] = {
+ "gpio98", "gpio99",
+};
+static const char * const nav_pps_in_a_groups[] = {
+ "gpio115",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio116",
+};
+static const char * const modem_tsync_groups[] = {
+ "gpio117",
+};
+static const char * const nav_tsync_groups[] = {
+ "gpio117",
+};
+static const char * const nav_pps_in_b_groups[] = {
+ "gpio117",
+};
+static const char * const nav_pps_groups[] = {
+ "gpio117",
+};
+static const char * const gsm0_tx_groups[] = {
+ "gpio119",
+};
+static const char * const atest_char_groups[] = {
+ "gpio120",
+};
+static const char * const atest_tsens_groups[] = {
+ "gpio120",
+};
+static const char * const bimc_dte1_groups[] = {
+ "gpio121", "gpio122",
+};
+static const char * const ssbi_wtr1_groups[] = {
+ "gpio122", "gpio123",
+};
+static const char * const fp_gpio_groups[] = {
+ "gpio124",
+};
+static const char * const coex_uart_groups[] = {
+ "gpio124", "gpio127",
+};
+static const char * const key_snapshot_groups[] = {
+ "gpio127",
+};
+static const char * const key_focus_groups[] = {
+ "gpio128",
+};
+static const char * const nfc_pwr_groups[] = {
+ "gpio129",
+};
+static const char * const blsp8_spi_groups[] = {
+ "gpio130",
+};
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio132",
+};
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio133",
+};
+
+static const struct msm_function msm8937_functions[] = {
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(blsp_uart1),
+ FUNCTION(gpio),
+ FUNCTION(blsp_spi1),
+ FUNCTION(adsp_ext),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart3),
+ FUNCTION(pbs0),
+ FUNCTION(pbs1),
+ FUNCTION(pwr_modem_enabled_b),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(ldo_update),
+ FUNCTION(atest_combodac_to_gpio_native),
+ FUNCTION(ldo_en),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(pbs2),
+ FUNCTION(atest_gpsadc_dtest0_native),
+ FUNCTION(blsp_spi3),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_uart4),
+ FUNCTION(sec_mi2s),
+ FUNCTION(pwr_nav_enabled_b),
+ FUNCTION(codec_mad),
+ FUNCTION(pwr_crypto_enabled_b),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_uart5),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(atest_bbrx1),
+ FUNCTION(m_voc),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(atest_wlan0),
+ FUNCTION(atest_wlan1),
+ FUNCTION(atest_bbrx0),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(atest_gpsadc_dtest1_native),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart6),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(mdp_vsync),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(sec_mi2s_mclk_a),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_i2c),
+ FUNCTION(pwr_modem_enabled_a),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cam1_standby),
+ FUNCTION(pwr_nav_enabled_a),
+ FUNCTION(cam1_rst),
+ FUNCTION(pwr_crypto_enabled_a),
+ FUNCTION(forced_usb),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(cam2_rst),
+ FUNCTION(webcam_standby),
+ FUNCTION(cci_async),
+ FUNCTION(webcam_rst),
+ FUNCTION(ov_ldo),
+ FUNCTION(sd_write),
+ FUNCTION(accel_int),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(alsp_int),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(mag_int),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(blsp6_spi),
+ FUNCTION(fp_int),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(uim_batt),
+ FUNCTION(cam2_standby),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim1_present),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim2_present),
+ FUNCTION(sensor_rst),
+ FUNCTION(mipi_dsi0),
+ FUNCTION(smb_int),
+ FUNCTION(cam0_ldo),
+ FUNCTION(us_euro),
+ FUNCTION(atest_char3),
+ FUNCTION(dbg_out),
+ FUNCTION(bimc_dte0),
+ FUNCTION(ts_resout),
+ FUNCTION(ts_sample),
+ FUNCTION(sec_mi2s_mclk_b),
+ FUNCTION(pri_mi2s),
+ FUNCTION(sdcard_det),
+ FUNCTION(atest_char1),
+ FUNCTION(ebi_cdc),
+ FUNCTION(audio_reset),
+ FUNCTION(atest_char0),
+ FUNCTION(audio_ref),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(pri_mi2s_mclk_b),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(lpass_slimbus0),
+ FUNCTION(lpass_slimbus1),
+ FUNCTION(codec_int1),
+ FUNCTION(codec_int2),
+ FUNCTION(wcss_bt),
+ FUNCTION(atest_char2),
+ FUNCTION(ebi_ch0),
+ FUNCTION(wcss_wlan2),
+ FUNCTION(wcss_wlan1),
+ FUNCTION(wcss_wlan0),
+ FUNCTION(wcss_wlan),
+ FUNCTION(wcss_fm),
+ FUNCTION(ext_lpass),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng1),
+ FUNCTION(cri_trng0),
+ FUNCTION(blsp_spi7),
+ FUNCTION(blsp_uart7),
+ FUNCTION(pri_mi2s_ws),
+ FUNCTION(blsp_i2c7),
+ FUNCTION(gcc_tlmm),
+ FUNCTION(dmic0_clk),
+ FUNCTION(dmic0_data),
+ FUNCTION(key_volp),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(us_emitter),
+ FUNCTION(wsa_irq),
+ FUNCTION(wsa_io),
+ FUNCTION(wsa_reset),
+ FUNCTION(blsp_spi8),
+ FUNCTION(blsp_uart8),
+ FUNCTION(blsp_i2c8),
+ FUNCTION(gcc_plltest),
+ FUNCTION(nav_pps_in_a),
+ FUNCTION(pa_indicator),
+ FUNCTION(modem_tsync),
+ FUNCTION(nav_tsync),
+ FUNCTION(nav_pps_in_b),
+ FUNCTION(nav_pps),
+ FUNCTION(gsm0_tx),
+ FUNCTION(atest_char),
+ FUNCTION(atest_tsens),
+ FUNCTION(bimc_dte1),
+ FUNCTION(ssbi_wtr1),
+ FUNCTION(fp_gpio),
+ FUNCTION(coex_uart),
+ FUNCTION(key_snapshot),
+ FUNCTION(key_focus),
+ FUNCTION(nfc_pwr),
+ FUNCTION(blsp8_spi),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+};
+
+static const struct msm_pingroup msm8937_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, qdss_tracedata_b, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(1, blsp_spi1, blsp_uart1, adsp_ext, NA, NA, NA, NA, NA,
+ qdss_tracedata_b),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, prng_rosc, NA, NA, NA,
+ NA, NA),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(4, blsp_spi2, blsp_uart2, ldo_update, NA,
+ atest_combodac_to_gpio_native, NA, NA, NA, NA),
+ PINGROUP(5, blsp_spi2, blsp_uart2, ldo_en, NA, NA, NA, NA, NA, NA),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, gcc_gp1_clk_b,
+ qdss_tracedata_b, NA, NA, NA, NA),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, pbs2, NA,
+ qdss_tracedata_b, NA, atest_gpsadc_dtest0_native, NA),
+ PINGROUP(8, blsp_spi3, blsp_uart3, pbs0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(9, blsp_spi3, blsp_uart3, pbs1, pwr_modem_enabled_b, NA, NA,
+ NA, NA, NA),
+ PINGROUP(10, blsp_spi3, blsp_uart3, blsp_i2c3, gcc_gp2_clk_b, NA, NA,
+ NA, NA, NA),
+ PINGROUP(11, blsp_spi3, blsp_uart3, blsp_i2c3, gcc_gp3_clk_b, NA, NA,
+ NA, NA, NA),
+ PINGROUP(12, blsp_spi4, blsp_uart4, sec_mi2s, pwr_nav_enabled_b, NA,
+ NA, NA, NA, NA),
+ PINGROUP(13, blsp_spi4, blsp_uart4, sec_mi2s, pwr_crypto_enabled_b, NA,
+ NA, NA, NA, NA),
+ PINGROUP(14, blsp_spi4, blsp_uart4, blsp_i2c4, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, blsp_spi4, blsp_uart4, blsp_i2c4, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, blsp_spi5, blsp_uart5, NA, NA, NA, NA, qdss_traceclk_a,
+ NA, atest_bbrx1),
+ PINGROUP(17, blsp_spi5, blsp_uart5, m_voc, qdss_cti_trig_in_a0, NA,
+ atest_bbrx0, NA, NA, NA),
+ PINGROUP(18, blsp_spi5, blsp_uart5, blsp_i2c5, qdss_tracectl_a, NA,
+ atest_gpsadc_dtest1_native, NA, NA, NA),
+ PINGROUP(19, blsp_spi5, blsp_uart5, blsp_i2c5, qdss_tracedata_a, NA,
+ NA, NA, NA, NA),
+ PINGROUP(20, blsp_spi6, blsp_uart6, NA, NA, NA, NA, NA, NA,
+ qdss_tracectl_b),
+ PINGROUP(21, blsp_spi6, blsp_uart6, m_voc, NA, NA, NA, NA, NA,
+ qdss_cti_trig_in_b0),
+ PINGROUP(22, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_traceclk_b, NA,
+ atest_wlan0, NA, NA, NA),
+ PINGROUP(23, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_tracedata_b, NA,
+ atest_wlan1, NA, NA, NA),
+ PINGROUP(24, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, mdp_vsync, pri_mi2s_mclk_a, sec_mi2s_mclk_a, NA, NA, NA,
+ NA, NA, NA),
+ PINGROUP(26, cam_mclk, NA, NA, NA, NA, NA, qdss_tracedata_a, NA, NA),
+ PINGROUP(27, cam_mclk, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_a),
+ PINGROUP(28, cam_mclk, NA, NA, NA, NA, NA, qdss_tracedata_a, NA,
+ atest_combodac_to_gpio_native),
+ PINGROUP(29, cci_i2c, pwr_modem_enabled_a, NA, NA, NA, NA, NA,
+ qdss_tracedata_a, NA),
+ PINGROUP(30, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_a),
+ PINGROUP(31, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_a),
+ PINGROUP(32, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_a),
+ PINGROUP(33, cci_timer0, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_a),
+ PINGROUP(34, cci_timer1, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_a),
+ PINGROUP(35, pwr_nav_enabled_a, NA, NA, NA, NA, NA, NA, NA,
+ qdss_tracedata_a),
+ PINGROUP(36, pwr_crypto_enabled_a, NA, NA, NA, NA, NA, NA, NA,
+ qdss_tracedata_a),
+ PINGROUP(37, NA, NA, NA, NA, NA, qdss_cti_trig_out_b1, NA, NA, NA),
+ PINGROUP(38, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(39, cci_async, NA, NA, NA, NA, NA, qdss_tracedata_a, NA,
+ atest_combodac_to_gpio_native),
+ PINGROUP(40, NA, NA, NA, NA, qdss_tracedata_a, NA,
+ atest_combodac_to_gpio_native, NA, NA),
+ PINGROUP(41, sd_write, NA, NA, NA, NA, NA, NA, NA,
+ atest_combodac_to_gpio_native),
+ PINGROUP(42, gcc_gp1_clk_a, qdss_tracedata_b, NA,
+ atest_combodac_to_gpio_native, NA, NA, NA, NA, NA),
+ PINGROUP(43, gcc_gp2_clk_a, qdss_tracedata_b, NA,
+ atest_combodac_to_gpio_native, NA, NA, NA, NA, NA),
+ PINGROUP(44, gcc_gp3_clk_a, qdss_tracedata_b, NA,
+ atest_combodac_to_gpio_native, NA, NA, NA, NA, NA),
+ PINGROUP(45, NA, NA, atest_combodac_to_gpio_native, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(46, NA, NA, atest_combodac_to_gpio_native, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(47, blsp6_spi, NA, qdss_tracedata_b, NA,
+ atest_combodac_to_gpio_native, NA, NA, NA, NA),
+ PINGROUP(48, NA, qdss_cti_trig_in_b1, NA,
+ atest_combodac_to_gpio_native, NA, NA, NA, NA, NA),
+ PINGROUP(49, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, uim2_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, uim2_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, atest_char3, dbg_out, bimc_dte0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, bimc_dte0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, sec_mi2s_mclk_b, pri_mi2s, NA, qdss_tracedata_b, NA, NA,
+ NA, NA, NA),
+ PINGROUP(67, atest_char1, ebi_cdc, NA, atest_combodac_to_gpio_native,
+ NA, NA, NA, NA, NA),
+ PINGROUP(68, atest_char0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, audio_ref, cdc_pdm0, pri_mi2s_mclk_b, ebi_cdc, NA, NA, NA,
+ NA, NA),
+ PINGROUP(70, lpass_slimbus, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, lpass_slimbus0, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, lpass_slimbus1, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, wcss_bt, atest_char2, NA, ebi_ch0, NA, NA, NA, NA, NA),
+ PINGROUP(76, wcss_wlan2, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, wcss_wlan1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, wcss_wlan0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, wcss_wlan, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, wcss_wlan, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, wcss_fm, ext_lpass, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, wcss_fm, cri_trng, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, wcss_bt, cri_trng1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, wcss_bt, cri_trng0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, pri_mi2s, blsp_spi7, blsp_uart7, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, pri_mi2s, blsp_spi7, blsp_uart7, qdss_tracedata_b, NA, NA,
+ NA, NA, NA),
+ PINGROUP(87, pri_mi2s_ws, blsp_spi7, blsp_uart7, blsp_i2c7,
+ qdss_tracedata_b, gcc_tlmm, NA, NA, NA),
+ PINGROUP(88, pri_mi2s, blsp_spi7, blsp_uart7, blsp_i2c7, NA, NA, NA,
+ NA, NA),
+ PINGROUP(89, dmic0_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, dmic0_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, qdss_cti_trig_in_a1, NA, NA, NA),
+ PINGROUP(92, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, wsa_io, sec_mi2s, pri_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, wsa_io, sec_mi2s, pri_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, blsp_spi8, blsp_uart8, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, blsp_spi8, blsp_uart8, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, blsp_spi8, blsp_uart8, blsp_i2c8, gcc_plltest, NA, NA, NA,
+ NA, NA),
+ PINGROUP(99, blsp_spi8, blsp_uart8, blsp_i2c8, gcc_plltest, NA, NA, NA,
+ NA, NA),
+ PINGROUP(100, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(101, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(102, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(113, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(114, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(115, NA, NA, nav_pps_in_a, NA, atest_combodac_to_gpio_native,
+ NA, NA, NA, NA),
+ PINGROUP(116, NA, pa_indicator, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(117, NA, modem_tsync, nav_tsync, nav_pps_in_b, nav_pps, NA,
+ NA, NA, NA),
+ PINGROUP(118, NA, ebi_cdc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(119, gsm0_tx, NA, ebi_cdc, NA, NA, NA, NA, NA, NA),
+ PINGROUP(120, NA, atest_char, ebi_cdc, NA, atest_tsens, NA, NA, NA, NA),
+ PINGROUP(121, NA, NA, NA, bimc_dte1, NA, NA, NA, NA, NA),
+ PINGROUP(122, NA, ssbi_wtr1, NA, NA, bimc_dte1, NA, NA, NA, NA),
+ PINGROUP(123, NA, ssbi_wtr1, ebi_cdc, NA, NA, NA, NA, NA, NA),
+ PINGROUP(124, coex_uart, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(125, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(126, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(127, coex_uart, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(128, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(129, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(130, blsp8_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(131, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(132, qdss_cti_trig_out_a0, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(133, qdss_cti_trig_out_a1, NA, NA, NA, NA, NA, NA, NA, NA),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc1_rclk, 0x10a000, 15, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_pinctrl_soc_data msm8937_pinctrl = {
+ .pins = msm8937_pins,
+ .npins = ARRAY_SIZE(msm8937_pins),
+ .functions = msm8937_functions,
+ .nfunctions = ARRAY_SIZE(msm8937_functions),
+ .groups = msm8937_groups,
+ .ngroups = ARRAY_SIZE(msm8937_groups),
+ .ngpios = 134,
+};
+
+static int msm8937_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8937_pinctrl);
+}
+
+static const struct of_device_id msm8937_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8937-pinctrl", },
+ { },
+};
+
+static struct platform_driver msm8937_pinctrl_driver = {
+ .driver = {
+ .name = "msm8937-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = msm8937_pinctrl_of_match,
+ },
+ .probe = msm8937_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8937_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8937_pinctrl_driver);
+}
+arch_initcall(msm8937_pinctrl_init);
+
+static void __exit msm8937_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8937_pinctrl_driver);
+}
+module_exit(msm8937_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI msm8937 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8937_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
index 6ceb39a..c5f1307 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1105,6 +1105,97 @@
[106] = SDC_QDSD_PINGROUP(sdc2_data, 0x0, 9, 0),
};
+static struct msm_gpio_mux_input sdxpoorwills_mux_in[] = {
+ {0, 1},
+ {1, 2},
+ {2, 5},
+ {3, 6},
+ {4, 9},
+ {5, 10},
+ {6, 11},
+ {7, 12},
+ {8, 13},
+ {9, 14},
+ {10, 15},
+ {11, 16},
+ {12, 17},
+ {13, 18},
+ {14, 19},
+ {15, 21},
+ {16, 22},
+ {17, 24},
+ {18, 25},
+ {19, 35},
+ {20, 42, 1},
+ {21, 43},
+ {22, 45},
+ {23, 46},
+ {24, 48},
+ {25, 50},
+ {26, 52},
+ {27, 53},
+ {28, 54},
+ {29, 55},
+ {30, 56},
+ {31, 57},
+ {32, 60},
+ {33, 61},
+ {34, 64},
+ {35, 65},
+ {36, 68},
+ {37, 71},
+ {38, 75},
+ {39, 76},
+ {40, 78},
+ {41, 79},
+ {42, 80},
+ {43, 82},
+ {44, 83},
+ {45, 84},
+ {46, 86},
+ {47, 87},
+ {48, 88},
+ {49, 89},
+ {50, 90},
+ {51, 93},
+ {52, 94},
+ {53, 95},
+ {54, 97},
+ {55, 98},
+};
+
+static struct msm_pdc_mux_output sdxpoorwills_mux_out[] = {
+ {0, 167},
+ {0, 168},
+ {0, 169},
+ {0, 170},
+ {0, 171},
+ {0, 172},
+ {0, 173},
+ {0, 174},
+ {0, 175},
+ {0, 176},
+ {0, 177},
+ {0, 178},
+ {0, 179},
+ {0, 180},
+ {0, 181},
+ {0, 182},
+ {0, 183},
+ {0, 184},
+ {0, 185},
+ {0, 186},
+ {0, 187},
+ {0, 188},
+ {0, 189},
+ {0, 190},
+ {0, 191},
+ {0, 192},
+ {0, 193},
+ {0, 194},
+ {0, 195},
+};
+
static const struct msm_pinctrl_soc_data sdxpoorwills_pinctrl = {
.pins = sdxpoorwills_pins,
.npins = ARRAY_SIZE(sdxpoorwills_pins),
@@ -1112,6 +1203,11 @@
.nfunctions = ARRAY_SIZE(sdxpoorwills_functions),
.groups = sdxpoorwills_groups,
.ngroups = ARRAY_SIZE(sdxpoorwills_groups),
+ .gpio_mux_in = sdxpoorwills_mux_in,
+ .n_gpio_mux_in = ARRAY_SIZE(sdxpoorwills_mux_in),
+ .pdc_mux_out = sdxpoorwills_mux_out,
+ .n_pdc_mux_out = ARRAY_SIZE(sdxpoorwills_mux_out),
+ .n_pdc_mux_offset = 20,
.ngpios = 100,
};
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index d274490..e23541c 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -72,6 +72,11 @@
IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
+enum ipa_usb_direction {
+ IPA_USB_DIR_UL,
+ IPA_USB_DIR_DL,
+};
+
struct ipa_usb_xdci_connect_params_internal {
enum ipa_usb_max_usb_packet_size max_pkt_size;
u32 ipa_to_usb_clnt_hdl;
@@ -167,7 +172,8 @@
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
void *user_data;
enum ipa3_usb_state state;
- struct ipa_usb_xdci_chan_params ch_params;
+ struct ipa_usb_xdci_chan_params ul_ch_params;
+ struct ipa_usb_xdci_chan_params dl_ch_params;
struct ipa3_usb_teth_prot_conn_params teth_conn_params;
};
@@ -741,8 +747,8 @@
&ipa3_usb_ctx->ttype_ctx[ttype];
int result;
- /* create PM resources for the first tethering protocol only */
- if (ipa3_usb_ctx->num_init_prot > 0)
+ /* there is one PM resource for teth and one for DPL */
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype) && ipa3_usb_ctx->num_init_prot > 0)
return 0;
memset(&ttype_ctx->pm_ctx.reg_params, 0,
@@ -1073,8 +1079,6 @@
params->gevntcount_hi_addr);
IPA_USB_DBG_LOW("dir = %d\n", params->dir);
IPA_USB_DBG_LOW("xfer_ring_len = %d\n", params->xfer_ring_len);
- IPA_USB_DBG_LOW("xfer_ring_base_addr = %llx\n",
- params->xfer_ring_base_addr);
IPA_USB_DBG_LOW("last_trb_addr_iova = %x\n",
params->xfer_scratch.last_trb_addr_iova);
IPA_USB_DBG_LOW("const_buffer_size = %d\n",
@@ -1178,15 +1182,16 @@
ipa3_usb_ctx->smmu_reg_map.cnt--;
}
+
result = ipa3_smmu_map_peer_buff(params->xfer_ring_base_addr_iova,
- params->xfer_ring_base_addr, params->xfer_ring_len, map);
+ params->xfer_ring_len, map, params->sgt_xfer_rings);
if (result) {
IPA_USB_ERR("failed to map Xfer ring %d\n", result);
return result;
}
result = ipa3_smmu_map_peer_buff(params->data_buff_base_addr_iova,
- params->data_buff_base_addr, params->data_buff_base_len, map);
+ params->data_buff_base_len, map, params->sgt_data_buff);
if (result) {
IPA_USB_ERR("failed to map TRBs buff %d\n", result);
return result;
@@ -1195,13 +1200,52 @@
return 0;
}
+static int ipa3_usb_smmu_store_sgt(struct sg_table **out_ch_ptr,
+ struct sg_table *in_sgt_ptr)
+{
+ unsigned int nents;
+
+ if (in_sgt_ptr != NULL) {
+ *out_ch_ptr = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (*out_ch_ptr == NULL)
+ return -ENOMEM;
+
+ nents = in_sgt_ptr->nents;
+
+ (*out_ch_ptr)->sgl =
+ kcalloc(nents, sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if ((*out_ch_ptr)->sgl == NULL)
+ return -ENOMEM;
+
+ memcpy((*out_ch_ptr)->sgl, in_sgt_ptr->sgl,
+ nents*sizeof((*out_ch_ptr)->sgl));
+ (*out_ch_ptr)->nents = nents;
+ (*out_ch_ptr)->orig_nents = in_sgt_ptr->orig_nents;
+ }
+ return 0;
+}
+
+static int ipa3_usb_smmu_free_sgt(struct sg_table **out_sgt_ptr)
+{
+ if (*out_sgt_ptr != NULL) {
+ kfree((*out_sgt_ptr)->sgl);
+ (*out_sgt_ptr)->sgl = NULL;
+ kfree(*out_sgt_ptr);
+ *out_sgt_ptr = NULL;
+ }
+ return 0;
+}
+
static int ipa3_usb_request_xdci_channel(
struct ipa_usb_xdci_chan_params *params,
+ enum ipa_usb_direction dir,
struct ipa_req_chan_out_params *out_params)
{
int result = -EFAULT;
struct ipa_request_gsi_channel_params chan_params;
enum ipa3_usb_transport_type ttype;
+ struct ipa_usb_xdci_chan_params *xdci_ch_params;
IPA_USB_DBG_LOW("entry\n");
if (params == NULL || out_params == NULL ||
@@ -1277,8 +1321,26 @@
}
/* store channel params for SMMU unmap */
- ipa3_usb_ctx->ttype_ctx[ttype].ch_params = *params;
+ if (dir == IPA_USB_DIR_UL)
+ xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].ul_ch_params;
+ else
+ xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].dl_ch_params;
+ *xdci_ch_params = *params;
+ result = ipa3_usb_smmu_store_sgt(
+ &xdci_ch_params->sgt_xfer_rings,
+ params->sgt_xfer_rings);
+ if (result) {
+ ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings);
+ return result;
+ }
+ result = ipa3_usb_smmu_store_sgt(
+ &xdci_ch_params->sgt_data_buff,
+ params->sgt_data_buff);
+ if (result) {
+ ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_data_buff);
+ return result;
+ }
chan_params.keep_ipa_awake = params->keep_ipa_awake;
chan_params.evt_ring_params.intf = GSI_EVT_CHTYPE_XDCI_EV;
chan_params.evt_ring_params.intr = GSI_INTR_IRQ;
@@ -1286,7 +1348,7 @@
chan_params.evt_ring_params.ring_len = params->xfer_ring_len -
chan_params.evt_ring_params.re_size;
chan_params.evt_ring_params.ring_base_addr =
- params->xfer_ring_base_addr;
+ params->xfer_ring_base_addr_iova;
chan_params.evt_ring_params.ring_base_vaddr = NULL;
chan_params.evt_ring_params.int_modt = 0;
chan_params.evt_ring_params.int_modt = 0;
@@ -1306,7 +1368,7 @@
chan_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
chan_params.chan_params.ring_len = params->xfer_ring_len;
chan_params.chan_params.ring_base_addr =
- params->xfer_ring_base_addr;
+ params->xfer_ring_base_addr_iova;
chan_params.chan_params.ring_base_vaddr = NULL;
chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE;
chan_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
@@ -1332,6 +1394,10 @@
chan_params.chan_scratch.xdci.outstanding_threshold =
((params->teth_prot == IPA_USB_MBIM) ? 1 : 2) *
chan_params.chan_params.re_size;
+
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+ chan_params.chan_scratch.xdci.outstanding_threshold = 0;
+
/* max_outstanding_tre is set in ipa3_request_gsi_channel() */
result = ipa3_request_gsi_channel(&chan_params, out_params);
if (result) {
@@ -1345,9 +1411,11 @@
}
static int ipa3_usb_release_xdci_channel(u32 clnt_hdl,
+ enum ipa_usb_direction dir,
enum ipa3_usb_transport_type ttype)
{
int result = 0;
+ struct ipa_usb_xdci_chan_params *xdci_ch_params;
IPA_USB_DBG_LOW("entry\n");
if (ttype < 0 || ttype >= IPA_USB_TRANSPORT_MAX) {
@@ -1367,8 +1435,17 @@
return result;
}
- result = ipa3_usb_smmu_map_xdci_channel(
- &ipa3_usb_ctx->ttype_ctx[ttype].ch_params, false);
+ if (dir == IPA_USB_DIR_UL)
+ xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].ul_ch_params;
+ else
+ xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].dl_ch_params;
+
+ result = ipa3_usb_smmu_map_xdci_channel(xdci_ch_params, false);
+
+ if (xdci_ch_params->sgt_xfer_rings != NULL)
+ ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings);
+ if (xdci_ch_params->sgt_data_buff != NULL)
+ ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_data_buff);
/* Change ipa_usb state to INITIALIZED */
if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype))
@@ -2131,14 +2208,15 @@
if (connect_params->teth_prot != IPA_USB_DIAG) {
result = ipa3_usb_request_xdci_channel(ul_chan_params,
- ul_out_params);
+ IPA_USB_DIR_UL, ul_out_params);
if (result) {
IPA_USB_ERR("failed to allocate UL channel.\n");
goto bad_params;
}
}
- result = ipa3_usb_request_xdci_channel(dl_chan_params, dl_out_params);
+ result = ipa3_usb_request_xdci_channel(dl_chan_params, IPA_USB_DIR_DL,
+ dl_out_params);
if (result) {
IPA_USB_ERR("failed to allocate DL/DPL channel.\n");
goto alloc_dl_chan_fail;
@@ -2174,11 +2252,12 @@
return 0;
connect_fail:
- ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl,
+ ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl, IPA_USB_DIR_DL,
IPA3_USB_GET_TTYPE(dl_chan_params->teth_prot));
alloc_dl_chan_fail:
if (connect_params->teth_prot != IPA_USB_DIAG)
ipa3_usb_release_xdci_channel(ul_out_params->clnt_hdl,
+ IPA_USB_DIR_UL,
IPA3_USB_GET_TTYPE(ul_chan_params->teth_prot));
bad_params:
mutex_unlock(&ipa3_usb_ctx->general_mutex);
@@ -2250,14 +2329,16 @@
IPA_USB_ERR("failed to change state to stopped\n");
if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
- result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, ttype);
+ result = ipa3_usb_release_xdci_channel(ul_clnt_hdl,
+ IPA_USB_DIR_UL, ttype);
if (result) {
IPA_USB_ERR("failed to release UL channel.\n");
return result;
}
}
- result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, ttype);
+ result = ipa3_usb_release_xdci_channel(dl_clnt_hdl,
+ IPA_USB_DIR_DL, ttype);
if (result) {
IPA_USB_ERR("failed to release DL channel.\n");
return result;
@@ -2465,9 +2546,11 @@
if (IPA3_USB_IS_TTYPE_DPL(ttype) ||
(ipa3_usb_ctx->num_init_prot == 0)) {
if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype))
- IPA_USB_ERR("failed to change state to invalid\n");
+ IPA_USB_ERR(
+ "failed to change state to invalid\n");
if (ipa_pm_is_used()) {
ipa3_usb_deregister_pm(ttype);
+ ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
} else {
ipa_rm_delete_resource(
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
@@ -2868,9 +2951,8 @@
pr_debug("entry\n");
ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
if (ipa3_usb_ctx == NULL) {
- pr_err("failed to allocate memory\n");
pr_err(":ipa_usb init failed\n");
- return -EFAULT;
+ return -ENOMEM;
}
memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
diff --git a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
index 613bed3..2723a35 100644
--- a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
+++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -140,7 +140,7 @@
ipa_rm_it_handles[resource_name].work_in_progress = false;
pwlock = &(ipa_rm_it_handles[resource_name].w_lock);
name = ipa_rm_it_handles[resource_name].w_lock_name;
- snprintf(name, MAX_WS_NAME, "IPA_RM%d\n", resource_name);
+ snprintf(name, MAX_WS_NAME, "IPA_RM%d", resource_name);
wakeup_source_init(pwlock, name);
INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work,
ipa_rm_inactivity_timer_func);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 37614cc..f062ed2 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1455,7 +1455,11 @@
pr_err("Table Size:%d\n",
ipa_ctx->nat_mem.size_base_tables);
- pr_err("Expansion Table Size:%d\n",
+ if (!ipa_ctx->nat_mem.size_expansion_tables)
+ pr_err("Expansion Table Size:%d\n",
+ ipa_ctx->nat_mem.size_expansion_tables);
+ else
+ pr_err("Expansion Table Size:%d\n",
ipa_ctx->nat_mem.size_expansion_tables-1);
if (!ipa_ctx->nat_mem.is_sys_mem)
@@ -1470,6 +1474,8 @@
pr_err("\nBase Table:\n");
} else {
+ if (!ipa_ctx->nat_mem.size_expansion_tables)
+ continue;
tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1;
base_tbl =
(u32 *)ipa_ctx->nat_mem.ipv4_expansion_rules_addr;
@@ -1569,6 +1575,8 @@
pr_err("\nIndex Table:\n");
} else {
+ if (!ipa_ctx->nat_mem.size_expansion_tables)
+ continue;
tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1;
indx_tbl =
(u32 *)ipa_ctx->nat_mem.index_table_expansion_addr;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index a454382..cf8f0b8 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -583,7 +583,8 @@
{
IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
&pa, iova, len);
- wdi_res[res_idx].res = kzalloc(sizeof(struct ipa_wdi_res), GFP_KERNEL);
+ wdi_res[res_idx].res = kzalloc(sizeof(*wdi_res[res_idx].res),
+ GFP_KERNEL);
if (!wdi_res[res_idx].res)
BUG();
wdi_res[res_idx].nents = 1;
@@ -609,8 +610,8 @@
return;
}
- wdi_res[res_idx].res = kcalloc(sgt->nents, sizeof(struct ipa_wdi_res),
- GFP_KERNEL);
+ wdi_res[res_idx].res = kcalloc(sgt->nents,
+ sizeof(*wdi_res[res_idx].res), GFP_KERNEL);
if (!wdi_res[res_idx].res)
BUG();
wdi_res[res_idx].nents = sgt->nents;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 9f0cec9..068c6c5 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1661,6 +1661,7 @@
IPAWANERR("Failed to allocate memory.\n");
return -ENOMEM;
}
+ extend_ioctl_data.u.if_name[IFNAMSIZ-1] = '\0';
len = sizeof(wan_msg->upstream_ifname) >
sizeof(extend_ioctl_data.u.if_name) ?
sizeof(extend_ioctl_data.u.if_name) :
@@ -2808,7 +2809,8 @@
if (rc) {
kfree(sap_stats);
return rc;
- } else if (reset) {
+ } else if (data == NULL) {
+ IPAWANDBG("only reset wlan stats\n");
kfree(sap_stats);
return 0;
}
@@ -2881,6 +2883,7 @@
kfree(resp);
return rc;
} else if (data == NULL) {
+ IPAWANDBG("only reset modem stats\n");
kfree(req);
kfree(resp);
return 0;
@@ -3075,11 +3078,8 @@
int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
{
enum ipa_upstream_type upstream_type;
- struct wan_ioctl_query_tether_stats tether_stats;
int rc = 0;
- memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
-
/* prevent string buffer overflows */
data->upstreamIface[IFNAMSIZ-1] = '\0';
@@ -3100,7 +3100,7 @@
} else {
IPAWANDBG(" reset modem-backhaul stats\n");
rc = rmnet_ipa_query_tethering_stats_modem(
- &tether_stats, true);
+ NULL, true);
if (rc) {
IPAWANERR("reset MODEM stats failed\n");
return rc;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index ae24675..01c0736 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4394,6 +4394,8 @@
/* Prevent consequent calls from trying to load the FW again. */
if (ipa3_ctx->ipa_initialization_complete)
return 0;
+ /* move proxy vote for modem on ipa3_post_init */
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
/*
* indication whether working in MHI config or non MHI config is given
@@ -4858,7 +4860,6 @@
int result = 0;
int i;
struct ipa3_rt_tbl_set *rset;
- struct ipa_active_client_logging_info log_info;
IPADBG("IPA Driver initialization started\n");
@@ -5049,8 +5050,7 @@
}
mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
- IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
- ipa3_active_clients_log_inc(&log_info, false);
+ /* move proxy vote for modem to ipa3_post_init() */
atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
/* Create workqueues for power management */
@@ -5296,6 +5296,8 @@
IPADBG("ipa cdev added successful. major:%d minor:%d\n",
MAJOR(ipa3_ctx->dev_num),
MINOR(ipa3_ctx->dev_num));
+ /* proxy vote for modem is added in ipa3_post_init() phase */
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
fail_cdev_add:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 8872c24..17e4838 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -548,10 +548,17 @@
return 0;
}
-int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr, u32 size, bool map)
+int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt)
{
struct iommu_domain *smmu_domain;
int res;
+ phys_addr_t phys;
+ unsigned long va;
+ struct scatterlist *sg;
+ int count = 0;
+ size_t len;
+ int i;
+ struct page *page;
if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP])
return 0;
@@ -562,33 +569,53 @@
return -EINVAL;
}
+ /*
+ * USB GSI driver would update sgt irrespective of USB S1
+ * is enable or bypass.
+ * If USB S1 is enabled using IOMMU, iova != pa.
+ * If USB S1 is bypass, iova == pa.
+ */
if (map) {
- res = ipa3_iommu_map(smmu_domain,
- rounddown(iova, PAGE_SIZE),
- rounddown(phys_addr, PAGE_SIZE),
- roundup(size + iova - rounddown(iova, PAGE_SIZE),
- PAGE_SIZE),
- IOMMU_READ | IOMMU_WRITE);
- if (res) {
- IPAERR("Fail to map 0x%llx->0x%pa\n", iova, &phys_addr);
- return -EINVAL;
+ if (sgt != NULL) {
+ va = rounddown(iova, PAGE_SIZE);
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ page = sg_page(sg);
+ phys = page_to_phys(page);
+ len = PAGE_ALIGN(sg->offset + sg->length);
+ res = ipa3_iommu_map(smmu_domain, va, phys,
+ len, IOMMU_READ | IOMMU_WRITE);
+ if (res) {
+ IPAERR("Fail to map pa=%pa\n", &phys);
+ return -EINVAL;
+ }
+ va += len;
+ count++;
+ }
+ } else {
+ res = ipa3_iommu_map(smmu_domain,
+ rounddown(iova, PAGE_SIZE),
+ rounddown(iova, PAGE_SIZE),
+ roundup(size + iova -
+ rounddown(iova, PAGE_SIZE),
+ PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE);
+ if (res) {
+ IPAERR("Fail to map 0x%llx\n", iova);
+ return -EINVAL;
+ }
}
} else {
res = iommu_unmap(smmu_domain,
- rounddown(iova, PAGE_SIZE),
- roundup(size + iova - rounddown(iova, PAGE_SIZE),
- PAGE_SIZE));
+ rounddown(iova, PAGE_SIZE),
+ roundup(size + iova - rounddown(iova, PAGE_SIZE),
+ PAGE_SIZE));
if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE),
PAGE_SIZE)) {
- IPAERR("Fail to unmap 0x%llx->0x%pa\n",
- iova, &phys_addr);
+ IPAERR("Fail to unmap 0x%llx\n", iova);
return -EINVAL;
}
}
-
- IPADBG("Peer buff %s 0x%llx->0x%pa\n", map ? "map" : "unmap",
- iova, &phys_addr);
-
+ IPADBG("Peer buff %s 0x%llx\n", map ? "map" : "unmap", iova);
return 0;
}
@@ -714,6 +741,10 @@
sizeof(union __packed gsi_channel_scratch));
ep->chan_scratch.xdci.max_outstanding_tre =
params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv;
+
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+ ep->chan_scratch.xdci.max_outstanding_tre = 0;
+
gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
params->chan_scratch);
if (gsi_res != GSI_STATUS_SUCCESS) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 3aaae8d..90edd2b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -222,7 +222,13 @@
tx_pkt->no_unmap_dma = true;
tx_pkt->sys = sys;
spin_lock_bh(&sys->spinlock);
+ if (unlikely(!sys->nop_pending)) {
+ spin_unlock_bh(&sys->spinlock);
+ kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+ return;
+ }
list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+ sys->nop_pending = false;
spin_unlock_bh(&sys->spinlock);
memset(&nop_xfer, 0, sizeof(nop_xfer));
@@ -236,6 +242,8 @@
return;
}
sys->len_pending_xfer = 0;
+ /* make sure TAG process is sent before clocks are gated */
+ ipa3_ctx->tag_process_before_gating = true;
}
@@ -271,6 +279,7 @@
int result;
u32 mem_flag = GFP_ATOMIC;
const struct ipa_gsi_ep_config *gsi_ep_cfg;
+ bool send_nop = false;
if (unlikely(!in_atomic))
mem_flag = GFP_KERNEL;
@@ -408,10 +417,14 @@
}
kfree(gsi_xfer_elem_array);
+ if (sys->use_comm_evt_ring && !sys->nop_pending) {
+ sys->nop_pending = true;
+ send_nop = true;
+ }
spin_unlock_bh(&sys->spinlock);
/* set the timer for sending the NOP descriptor */
- if (sys->use_comm_evt_ring && !hrtimer_active(&sys->db_timer)) {
+ if (send_nop) {
ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
IPADBG_LOW("scheduling timer for ch %lu\n",
@@ -419,6 +432,9 @@
hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL);
}
+ /* make sure TAG process is sent before clocks are gated */
+ ipa3_ctx->tag_process_before_gating = true;
+
return 0;
failure_dma_map:
@@ -823,7 +839,7 @@
* pipe will be unsuspended as part of
* enabling IPA clocks
*/
- ipa_pm_activate_sync(sys->pm_hdl);
+ ipa_pm_activate(sys->pm_hdl);
ipa_pm_deferred_deactivate(sys->pm_hdl);
break;
default:
@@ -3679,6 +3695,12 @@
ch_scratch.gpi.max_outstanding_tre = gsi_ep_info->ipa_if_tlv *
GSI_CHAN_RE_SIZE_16B;
ch_scratch.gpi.outstanding_threshold = 2 * GSI_CHAN_RE_SIZE_16B;
+
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ ch_scratch.gpi.max_outstanding_tre = 0;
+ ch_scratch.gpi.outstanding_threshold = 0;
+ }
+
result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to write scratch %d\n", result);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index d7d74a3..7bd1731 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -626,6 +626,7 @@
struct delayed_work switch_to_intr_work;
enum ipa3_sys_pipe_policy policy;
bool use_comm_evt_ring;
+ bool nop_pending;
int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
void (*free_skb)(struct sk_buff *skb);
@@ -2300,8 +2301,7 @@
int ipa3_rx_poll(u32 clnt_hdl, int budget);
void ipa3_recycle_wan_skb(struct sk_buff *skb);
int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map);
-int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr,
- u32 size, bool map);
+int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt);
void ipa3_reset_freeze_vote(void);
int ipa3_ntn_init(void);
int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index cb970ba..82cd8187 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -302,6 +302,10 @@
ep_cfg->ipa_if_tlv * ch_props.re_size;
ch_scratch.mhi.outstanding_threshold =
min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size;
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ ch_scratch.mhi.max_outstanding_tre = 0;
+ ch_scratch.mhi.outstanding_threshold = 0;
+ }
ch_scratch.mhi.oob_mod_threshold = 4;
if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index bafc3ca..140afa8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -156,6 +156,7 @@
* @activate_work: work for activate (blocking case)
* @deactivate work: delayed work for deferred_deactivate function
* @complete: generic wait-for-completion handler
+ * @wlock: wake source to prevent AP suspend
*/
struct ipa_pm_client {
char name[IPA_PM_MAX_EX_CL];
@@ -170,6 +171,7 @@
struct work_struct activate_work;
struct delayed_work deactivate_work;
struct completion complete;
+ struct wakeup_source wlock;
};
/*
@@ -240,7 +242,7 @@
struct ipa_pm_client *client;
/* Create a basic array to hold throughputs*/
- for (i = 0, n = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+ for (i = 1, n = 0; i < IPA_PM_MAX_CLIENTS; i++) {
client = ipa_pm_ctx->clients[i];
if (client != NULL && IPA_PM_STATE_ACTIVE(client->state)) {
/* default case */
@@ -395,8 +397,11 @@
unsigned long flags;
client = container_of(work, struct ipa_pm_client, activate_work);
- if (!client->skip_clk_vote)
+ if (!client->skip_clk_vote) {
IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
+ if (client->group == IPA_PM_GROUP_APPS)
+ __pm_stay_awake(&client->wlock);
+ }
spin_lock_irqsave(&client->state_lock, flags);
IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
@@ -414,9 +419,11 @@
complete_all(&client->complete);
if (dec_clk) {
- ipa_set_tag_process_before_gating(true);
- if (!client->skip_clk_vote)
+ if (!client->skip_clk_vote) {
IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+ if (client->group == IPA_PM_GROUP_APPS)
+ __pm_relax(&client->wlock);
+ }
IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
return;
@@ -465,9 +472,11 @@
client->state = IPA_PM_DEACTIVATED;
IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
spin_unlock_irqrestore(&client->state_lock, flags);
- ipa_set_tag_process_before_gating(true);
- if (!client->skip_clk_vote)
+ if (!client->skip_clk_vote) {
IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+ if (client->group == IPA_PM_GROUP_APPS)
+ __pm_relax(&client->wlock);
+ }
deactivate_client(client->hdl);
do_clk_scaling();
@@ -489,7 +498,8 @@
n = -ENOBUFS;
- for (i = IPA_PM_MAX_CLIENTS - 1; i >= 0; i--) {
+ /* 0 is not a valid handle */
+ for (i = IPA_PM_MAX_CLIENTS - 1; i >= 1; i--) {
if (ipa_pm_ctx->clients[i] == NULL) {
n = i;
continue;
@@ -670,7 +680,7 @@
}
/**
- * ipa_rm_delete_register() - register an IPA PM client with the PM
+ * ipa_pm_register() - register an IPA PM client with the PM
* @register_params: params for a client like throughput, callback, etc.
* @hdl: int pointer that will be used as an index to access the client
*
@@ -682,6 +692,7 @@
int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl)
{
struct ipa_pm_client *client;
+ struct wakeup_source *wlock;
if (ipa_pm_ctx == NULL) {
IPA_PM_ERR("PM_ctx is null\n");
@@ -730,6 +741,8 @@
client->group = params->group;
client->hdl = *hdl;
client->skip_clk_vote = params->skip_clk_vote;
+ wlock = &client->wlock;
+ wakeup_source_init(wlock, client->name);
/* add client to exception list */
if (add_client_to_exception_list(*hdl)) {
@@ -793,6 +806,7 @@
if (ipa_pm_ctx->clients_by_pipe[i] == ipa_pm_ctx->clients[hdl])
ipa_pm_ctx->clients_by_pipe[i] = NULL;
}
+ wakeup_source_trash(&client->wlock);
kfree(client);
ipa_pm_ctx->clients[hdl] = NULL;
@@ -910,6 +924,8 @@
/* we got the clocks */
if (result == 0) {
client->state = IPA_PM_ACTIVATED;
+ if (client->group == IPA_PM_GROUP_APPS)
+ __pm_stay_awake(&client->wlock);
spin_unlock_irqrestore(&client->state_lock, flags);
activate_client(client->hdl);
if (sync)
@@ -1043,7 +1059,7 @@
return -EINVAL;
}
- for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+ for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
client = ipa_pm_ctx->clients[i];
if (client == NULL)
@@ -1073,9 +1089,11 @@
IPA_PM_DBG_STATE(client->hdl, client->name,
client->state);
spin_unlock_irqrestore(&client->state_lock, flags);
- ipa_set_tag_process_before_gating(true);
- if (!client->skip_clk_vote)
+ if (!client->skip_clk_vote) {
IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+ if (client->group == IPA_PM_GROUP_APPS)
+ __pm_relax(&client->wlock);
+ }
deactivate_client(client->hdl);
} else /* if activated or deactivated, we do nothing */
spin_unlock_irqrestore(&client->state_lock, flags);
@@ -1126,9 +1144,11 @@
spin_unlock_irqrestore(&client->state_lock, flags);
/* else case (Deactivates all Activated cases)*/
- ipa_set_tag_process_before_gating(true);
- if (!client->skip_clk_vote)
+ if (!client->skip_clk_vote) {
IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+ if (client->group == IPA_PM_GROUP_APPS)
+ __pm_relax(&client->wlock);
+ }
spin_lock_irqsave(&client->state_lock, flags);
client->state = IPA_PM_DEACTIVATED;
@@ -1280,7 +1300,7 @@
cnt += result;
- for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+ for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
client = ipa_pm_ctx->clients[i];
if (client == NULL)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
index b2f203a..205e7a5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,7 @@
#include <linux/msm_ipa.h>
/* internal to ipa */
-#define IPA_PM_MAX_CLIENTS 10
+#define IPA_PM_MAX_CLIENTS 11 /* actual max is value -1 since we start from 1*/
#define IPA_PM_MAX_EX_CL 64
#define IPA_PM_THRESHOLD_MAX 5
#define IPA_PM_EXCEPTION_MAX 2
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 941e489..648db5e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1209,8 +1209,6 @@
IPADBG("Skipping endpoint configuration.\n");
}
- ipa3_enable_data_path(ipa_ep_idx);
-
out->clnt_hdl = ipa_ep_idx;
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
@@ -1316,6 +1314,7 @@
struct ipa3_ep_context *ep;
union IpaHwWdiCommonChCmdData_t enable;
struct ipa_ep_cfg_holb holb_cfg;
+ struct ipahal_reg_endp_init_rsrc_grp rsrc_grp;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -1348,6 +1347,20 @@
goto uc_timeout;
}
+ /* Assign the resource group for pipe */
+ memset(&rsrc_grp, 0, sizeof(rsrc_grp));
+ rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client);
+ if (rsrc_grp.rsrc_grp == -1) {
+ IPAERR("invalid group for client %d\n", ep->client);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPADBG("Setting group %d for pipe %d\n",
+ rsrc_grp.rsrc_grp, clnt_hdl);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl,
+ &rsrc_grp);
+
if (IPA_CLIENT_IS_CONS(ep->client)) {
memset(&holb_cfg, 0, sizeof(holb_cfg));
holb_cfg.en = IPA_HOLB_TMR_DIS;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index de3711e..9974b87 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1117,7 +1117,7 @@
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- { 7, 0, 8, 16, IPA_EE_UC } },
+ { 6, 2, 8, 16, IPA_EE_UC } },
[IPA_4_0][IPA_CLIENT_USB_PROD] = {
true, IPA_v4_0_GROUP_UL_DL,
true,
@@ -1153,13 +1153,7 @@
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- { 9, 1, 8, 16, IPA_EE_UC } },
- [IPA_4_0][IPA_CLIENT_Q6_LAN_PROD] = {
- true, IPA_v4_0_GROUP_UL_DL,
- true,
- IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR,
- { 6, 2, 12, 24, IPA_EE_Q6 } },
+ { 9, 0, 8, 16, IPA_EE_UC } },
[IPA_4_0][IPA_CLIENT_Q6_WAN_PROD] = {
true, IPA_v4_0_GROUP_UL_DL,
true,
@@ -1196,13 +1190,13 @@
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- {7, 0, 8, 16, IPA_EE_UC } },
+ { 7, 9, 8, 16, IPA_EE_AP } },
[IPA_4_0][IPA_CLIENT_TEST4_PROD] = {
true, IPA_v4_0_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- { 8, 10, 8, 16, IPA_EE_AP } },
+ {8, 10, 8, 16, IPA_EE_AP } },
[IPA_4_0][IPA_CLIENT_WLAN1_CONS] = {
@@ -1210,7 +1204,7 @@
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
- { 18, 2, 6, 9, IPA_EE_UC } },
+ { 18, 3, 6, 9, IPA_EE_UC } },
[IPA_4_0][IPA_CLIENT_WLAN2_CONS] = {
true, IPA_v4_0_GROUP_UL_DL,
false,
@@ -1258,7 +1252,7 @@
true,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
- { 22, 3, 17, 17, IPA_EE_UC } },
+ { 22, 1, 17, 17, IPA_EE_UC } },
[IPA_4_0][IPA_CLIENT_Q6_LAN_CONS] = {
true, IPA_v4_0_GROUP_UL_DL,
false,
@@ -1284,25 +1278,25 @@
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_PCIE,
- { 12, 2, 5, 5, IPA_EE_AP } },
+ { 11, 6, 9, 9, IPA_EE_AP } },
[IPA_4_0][IPA_CLIENT_TEST1_CONS] = {
true, IPA_v4_0_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR,
- { 12, 2, 5, 5, IPA_EE_AP } },
+ QMB_MASTER_SELECT_PCIE,
+ { 11, 6, 9, 9, IPA_EE_AP } },
[IPA_4_0][IPA_CLIENT_TEST2_CONS] = {
true, IPA_v4_0_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_PCIE,
- { 18, 2, 6, 9, IPA_EE_UC } },
+ { 12, 2, 5, 5, IPA_EE_AP } },
[IPA_4_0][IPA_CLIENT_TEST3_CONS] = {
true, IPA_v4_0_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR,
- { 20, 13, 9, 9, IPA_EE_AP } },
+ QMB_MASTER_SELECT_PCIE,
+ { 19, 12, 9, 9, IPA_EE_AP } },
[IPA_4_0][IPA_CLIENT_TEST4_CONS] = {
true, IPA_v4_0_GROUP_UL_DL,
false,
@@ -1318,12 +1312,6 @@
{ 31, 31, 8, 8, IPA_EE_AP } },
/* IPA_4_0_MHI */
- [IPA_4_0_MHI][IPA_CLIENT_USB_PROD] = {
- false, IPA_EP_NOT_ALLOCATED,
- true,
- IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR,
- { -1, -1, -1, -1, -1 } },
[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_PROD] = {
true, IPA_v4_0_MHI_GROUP_DDR,
true,
@@ -1342,12 +1330,6 @@
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_PCIE,
{ 1, 0, 8, 16, IPA_EE_AP } },
- [IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_PROD] = {
- true, IPA_v4_0_MHI_GROUP_DDR,
- true,
- IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR,
- { 6, 2, 12, 24, IPA_EE_Q6 } },
[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD] = {
true, IPA_v4_0_GROUP_UL_DL,
true,
@@ -1404,18 +1386,7 @@
QMB_MASTER_SELECT_DDR,
{ 8, 10, 8, 16, IPA_EE_AP } },
- [IPA_4_0_MHI][IPA_CLIENT_USB_CONS] = {
- false, IPA_EP_NOT_ALLOCATED,
- false,
- IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR,
- { -1, -1, -1, -1, -1 } },
- [IPA_4_0_MHI][IPA_CLIENT_USB_DPL_CONS] = {
- false, IPA_EP_NOT_ALLOCATED,
- false,
- IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR,
- { -1, -1, -1, -1, -1 } },
+
[IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_CONS] = {
true, IPA_v4_0_MHI_GROUP_DDR,
false,
@@ -1470,25 +1441,25 @@
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_PCIE,
- { 12, 2, 5, 5, IPA_EE_AP } },
+ { 11, 6, 9, 9, IPA_EE_AP } },
[IPA_4_0_MHI][IPA_CLIENT_TEST1_CONS] = {
true, IPA_v4_0_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR,
- { 12, 2, 5, 5, IPA_EE_AP } },
+ QMB_MASTER_SELECT_PCIE,
+ { 11, 6, 9, 9, IPA_EE_AP } },
[IPA_4_0_MHI][IPA_CLIENT_TEST2_CONS] = {
true, IPA_v4_0_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_PCIE,
- { 18, 11, 6, 9, IPA_EE_AP } },
+ QMB_MASTER_SELECT_DDR,
+ { 12, 2, 5, 5, IPA_EE_AP } },
[IPA_4_0_MHI][IPA_CLIENT_TEST3_CONS] = {
true, IPA_v4_0_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR,
- { 20, 13, 9, 9, IPA_EE_AP } },
+ QMB_MASTER_SELECT_PCIE,
+ { 19, 12, 9, 9, IPA_EE_AP } },
[IPA_4_0_MHI][IPA_CLIENT_TEST4_CONS] = {
true, IPA_v4_0_GROUP_UL_DL,
false,
@@ -3993,6 +3964,7 @@
}
kfree(tag_desc);
tag_desc = NULL;
+ ipa3_ctx->tag_process_before_gating = false;
IPADBG("waiting for TAG response\n");
res = wait_for_completion_timeout(&comp->comp, timeout);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 4920942..48e7d7c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -2129,15 +2129,13 @@
return;
}
- valmask->val = (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
- IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
- valmask->mask = IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
- IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+ valmask->val = (1 << IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT) &&
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK;
+ valmask->mask = IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK;
- valmask->val |= ((0 & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) <<
- IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT);
- valmask->mask |= ((IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK <<
- IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT));
+ valmask->val |= ((0 << IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT) &&
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK);
+ valmask->mask |= IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK;
}
u32 ipahal_aggr_get_max_byte_limit(void)
@@ -2188,7 +2186,7 @@
return;
}
IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
- valmask->mask = bmsk << shft;
+ valmask->mask = bmsk;
}
void ipahal_get_fltrt_hash_flush_valmask(
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 5b0834a..512dddd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1740,6 +1740,7 @@
IPAWANERR("Failed to allocate memory.\n");
return -ENOMEM;
}
+ extend_ioctl_data.u.if_name[IFNAMSIZ-1] = '\0';
len = sizeof(wan_msg->upstream_ifname) >
sizeof(extend_ioctl_data.u.if_name) ?
sizeof(extend_ioctl_data.u.if_name) :
@@ -2711,8 +2712,13 @@
if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
/* clean up cached QMI msg/handlers */
ipa3_qmi_service_exit();
- /*hold a proxy vote for the modem*/
- ipa3_proxy_clk_vote();
+ /*
+ * hold a proxy vote for the modem.
+ * for IPA 4.0 offline charge is not needed and proxy vote
+ * is already held.
+ */
+ if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
+ ipa3_proxy_clk_vote();
ipa3_reset_freeze_vote();
IPAWANINFO("IPA BEFORE_POWERUP handling is complete\n");
break;
@@ -3112,7 +3118,8 @@
IPAWANERR("can't get ipa3_get_wlan_stats\n");
kfree(sap_stats);
return rc;
- } else if (reset) {
+ } else if (data == NULL) {
+ IPAWANDBG("only reset wlan stats\n");
kfree(sap_stats);
return 0;
}
@@ -3183,6 +3190,7 @@
kfree(resp);
return rc;
} else if (data == NULL) {
+ IPAWANDBG("only reset modem stats\n");
kfree(req);
kfree(resp);
return 0;
@@ -3377,11 +3385,8 @@
int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
{
enum ipa_upstream_type upstream_type;
- struct wan_ioctl_query_tether_stats tether_stats;
int rc = 0;
- memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
-
/* prevent string buffer overflows */
data->upstreamIface[IFNAMSIZ-1] = '\0';
@@ -3402,7 +3407,7 @@
} else {
IPAWANERR(" reset modem-backhaul stats\n");
rc = rmnet_ipa3_query_tethering_stats_modem(
- &tether_stats, true);
+ NULL, true);
if (rc) {
IPAWANERR("reset MODEM stats failed\n");
return rc;
@@ -4021,6 +4026,10 @@
ipa3_qmi_init();
/* Register for Modem SSR */
+ /* SSR is not supported yet on IPA 4.0 */
+ if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_0)
+ return platform_driver_register(&rmnet_ipa_driver);
+
rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
SUBSYS_MODEM,
&ipa3_ssr_notifier);
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 246f32e..929242a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -253,7 +253,7 @@
(struct wan_ioctl_set_data_quota *)param);
if (rc != 0) {
IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
- if (retval == -ENODEV)
+ if (rc == -ENODEV)
retval = -ENODEV;
else
retval = -EFAULT;
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
index 036902a..bcbcd87 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_framework.c
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,12 +49,14 @@
/**
* struct ipa_ut_dbgfs_test_write_work_ctx - work_queue context
- * @dbgfs: work_struct for the write_work
- * @file: file to be writen to
+ * @dbgfs_Work: work_struct for the write_work
+ * @meta_type: See enum ipa_ut_meta_test_type
+ * @user_data: user data usually used to point to suite or test object
*/
struct ipa_ut_dbgfs_test_write_work_ctx {
struct work_struct dbgfs_work;
- struct file *file;
+ long meta_type;
+ void *user_data;
};
static ssize_t ipa_ut_dbgfs_enable_read(struct file *file,
@@ -219,7 +221,6 @@
{
struct ipa_ut_dbgfs_test_write_work_ctx *write_work_ctx;
struct ipa_ut_suite *suite;
- struct file *file;
int i;
enum ipa_hw_type ipa_ver;
int rc = 0;
@@ -232,14 +233,9 @@
IPA_UT_DBG("Entry\n");
mutex_lock(&ipa_ut_ctx->lock);
- file = write_work_ctx->file;
- if (file == NULL) {
- rc = -EFAULT;
- goto unlock_mutex;
- }
- suite = file->f_inode->i_private;
+ suite = (struct ipa_ut_suite *)(write_work_ctx->user_data);
ipa_assert_on(!suite);
- meta_type = (long)(file->private_data);
+ meta_type = write_work_ctx->meta_type;
IPA_UT_DBG("Meta test type %ld\n", meta_type);
_IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE,
@@ -354,7 +350,7 @@
}
/*
- * ipa_ut_dbgfs_meta_test_write() - Debugfs write func for a for a meta test
+ * ipa_ut_dbgfs_meta_test_write() - Debugfs write func for a meta test
* @params: write fops
*
* Run all tests in a suite using a work queue so it does not race with
@@ -373,7 +369,8 @@
return -ENOMEM;
}
- write_work_ctx->file = file;
+ write_work_ctx->user_data = file->f_inode->i_private;
+ write_work_ctx->meta_type = (long)(file->private_data);
INIT_WORK(&write_work_ctx->dbgfs_work,
ipa_ut_dbgfs_meta_test_write_work_func);
@@ -515,7 +512,6 @@
struct ipa_ut_dbgfs_test_write_work_ctx *write_work_ctx;
struct ipa_ut_test *test;
struct ipa_ut_suite *suite;
- struct file *file;
bool tst_fail = false;
int rc = 0;
enum ipa_hw_type ipa_ver;
@@ -526,12 +522,7 @@
IPA_UT_DBG("Entry\n");
mutex_lock(&ipa_ut_ctx->lock);
- file = write_work_ctx->file;
- if (file == NULL) {
- rc = -EFAULT;
- goto unlock_mutex;
- }
- test = file->f_inode->i_private;
+ test = (struct ipa_ut_test *)(write_work_ctx->user_data);
ipa_assert_on(!test);
_IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE,
@@ -633,7 +624,8 @@
return -ENOMEM;
}
- write_work_ctx->file = file;
+ write_work_ctx->user_data = file->f_inode->i_private;
+ write_work_ctx->meta_type = (long)(file->private_data);
INIT_WORK(&write_work_ctx->dbgfs_work,
ipa_ut_dbgfs_test_write_work_func);
@@ -979,6 +971,7 @@
IS_ERR(ipa_ut_ctx->test_dbgfs_root)) {
IPA_UT_ERR("failed to create test debugfs dir\n");
ret = -EFAULT;
+ destroy_workqueue(ipa_ut_ctx->wq);
goto unlock_mutex;
}
@@ -988,6 +981,7 @@
if (!dfile_enable || IS_ERR(dfile_enable)) {
IPA_UT_ERR("failed to create enable debugfs file\n");
ret = -EFAULT;
+ destroy_workqueue(ipa_ut_ctx->wq);
goto fail_clean_dbgfs;
}
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index bc4df04..7a93e0e 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,20 @@
EXTCON_NONE,
};
+static int msm_ext_disp_find_index(struct extcon_dev *edev,
+ enum msm_ext_disp_type id)
+{
+ int i;
+
+ /* Find the the index of extcon cable in edev->supported_cable */
+ for (i = 0; i < edev->max_supported; i++) {
+ if (edev->supported_cable[i] == id)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
static int msm_ext_disp_extcon_register(struct msm_ext_disp *ext_disp)
{
int ret = 0;
@@ -145,7 +159,8 @@
enum msm_ext_disp_cable_state new_state)
{
int ret = 0;
- int state;
+ int state, index;
+ enum msm_ext_disp_cable_state current_state;
if (!ext_disp->ops) {
pr_err("codec not registered, skip notification\n");
@@ -154,13 +169,27 @@
}
state = ext_disp->audio_sdev.state;
- ret = extcon_set_state_sync(&ext_disp->audio_sdev,
- ext_disp->current_disp, !!new_state);
- pr_debug("Audio state %s %d\n",
- ext_disp->audio_sdev.state == state ?
- "is same" : "switched to",
- ext_disp->audio_sdev.state);
+ index = msm_ext_disp_find_index(&ext_disp->audio_sdev, type);
+ if (index < 0 || index >= ext_disp->audio_sdev.max_supported) {
+ pr_err("invalid index\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (state & BIT(index))
+ current_state = EXT_DISPLAY_CABLE_CONNECT;
+ else
+ current_state = EXT_DISPLAY_CABLE_DISCONNECT;
+
+ if (current_state == new_state) {
+ ret = -EEXIST;
+ pr_debug("same state\n");
+ } else {
+ ret = extcon_set_state_sync(&ext_disp->audio_sdev,
+ ext_disp->current_disp, !!new_state);
+ pr_debug("state changed to %d\n", new_state);
+ }
end:
return ret;
}
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index bec16dd..5d094d2 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -46,7 +46,7 @@
* @bus_bw: Client handle to the bus bandwidth request.
* @bus_mas_id: Master Endpoint ID for bus BW request.
* @bus_slv_id: Slave Endpoint ID for bus BW request.
- * @ab_ib_lock: Lock to protect the bus ab & ib values, list.
+ * @geni_dev_lock: Lock to protect the bus ab & ib values, list.
* @ab_list_head: Sorted resource list based on average bus BW.
* @ib_list_head: Sorted resource list based on instantaneous bus BW.
* @cur_ab: Current Bus Average BW request value.
@@ -67,7 +67,7 @@
struct msm_bus_client_handle *bus_bw;
u32 bus_mas_id;
u32 bus_slv_id;
- struct mutex ab_ib_lock;
+ struct mutex geni_dev_lock;
struct list_head ab_list_head;
struct list_head ib_list_head;
unsigned long cur_ab;
@@ -609,7 +609,7 @@
if (unlikely(list_empty(&rsc->ab_list) || list_empty(&rsc->ib_list)))
return -EINVAL;
- mutex_lock(&geni_se_dev->ab_ib_lock);
+ mutex_lock(&geni_se_dev->geni_dev_lock);
list_del_init(&rsc->ab_list);
geni_se_dev->cur_ab -= rsc->ab;
@@ -630,7 +630,7 @@
"%s: %lu:%lu (%lu:%lu) %d\n", __func__,
geni_se_dev->cur_ab, geni_se_dev->cur_ib,
rsc->ab, rsc->ib, bus_bw_update);
- mutex_unlock(&geni_se_dev->ab_ib_lock);
+ mutex_unlock(&geni_se_dev->geni_dev_lock);
return ret;
}
@@ -704,7 +704,7 @@
bool bus_bw_update = false;
int ret = 0;
- mutex_lock(&geni_se_dev->ab_ib_lock);
+ mutex_lock(&geni_se_dev->geni_dev_lock);
list_add(&rsc->ab_list, &geni_se_dev->ab_list_head);
geni_se_dev->cur_ab += rsc->ab;
@@ -728,7 +728,7 @@
"%s: %lu:%lu (%lu:%lu) %d\n", __func__,
geni_se_dev->cur_ab, geni_se_dev->cur_ib,
rsc->ab, rsc->ib, bus_bw_update);
- mutex_unlock(&geni_se_dev->ab_ib_lock);
+ mutex_unlock(&geni_se_dev->geni_dev_lock);
return ret;
}
@@ -878,24 +878,29 @@
struct geni_se_device *geni_se_dev;
int i;
unsigned long prev_freq = 0;
+ int ret = 0;
if (unlikely(!rsc || !rsc->wrapper_dev || !rsc->se_clk || !tbl))
return -EINVAL;
- *tbl = NULL;
geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
if (unlikely(!geni_se_dev))
return -EPROBE_DEFER;
+ mutex_lock(&geni_se_dev->geni_dev_lock);
+ *tbl = NULL;
if (geni_se_dev->clk_perf_tbl) {
*tbl = geni_se_dev->clk_perf_tbl;
- return geni_se_dev->num_clk_levels;
+ ret = geni_se_dev->num_clk_levels;
+ goto exit_se_clk_tbl_get;
}
geni_se_dev->clk_perf_tbl = kzalloc(sizeof(*geni_se_dev->clk_perf_tbl) *
MAX_CLK_PERF_LEVEL, GFP_KERNEL);
- if (!geni_se_dev->clk_perf_tbl)
- return -ENOMEM;
+ if (!geni_se_dev->clk_perf_tbl) {
+ ret = -ENOMEM;
+ goto exit_se_clk_tbl_get;
+ }
for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
geni_se_dev->clk_perf_tbl[i] = clk_round_rate(rsc->se_clk,
@@ -908,7 +913,10 @@
}
geni_se_dev->num_clk_levels = i;
*tbl = geni_se_dev->clk_perf_tbl;
- return geni_se_dev->num_clk_levels;
+ ret = geni_se_dev->num_clk_levels;
+exit_se_clk_tbl_get:
+ mutex_unlock(&geni_se_dev->geni_dev_lock);
+ return ret;
}
EXPORT_SYMBOL(geni_se_clk_tbl_get);
@@ -1437,7 +1445,7 @@
mutex_init(&geni_se_dev->iommu_lock);
INIT_LIST_HEAD(&geni_se_dev->ab_list_head);
INIT_LIST_HEAD(&geni_se_dev->ib_list_head);
- mutex_init(&geni_se_dev->ab_ib_lock);
+ mutex_init(&geni_se_dev->geni_dev_lock);
geni_se_dev->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
dev_name(geni_se_dev->dev), 0);
if (!geni_se_dev->log_ctx)
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 95e3782..dec698f 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3139,7 +3139,8 @@
}
dev = &ctx->usb_bam_pdev->dev;
- if (dev && dev->parent && !device_property_present(dev->parent,
+ if (dev && dev->parent && device_property_present(dev->parent, "iommus")
+ && !device_property_present(dev->parent,
"qcom,smmu-s1-bypass")) {
pr_info("%s: setting SPS_BAM_SMMU_EN flag with (%s)\n",
__func__, dev_name(dev));
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 99120f4..a2a35c6 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -170,6 +170,7 @@
FG_SRAM_SYS_TERM_CURR,
FG_SRAM_CHG_TERM_CURR,
FG_SRAM_CHG_TERM_BASE_CURR,
+ FG_SRAM_CUTOFF_CURR,
FG_SRAM_DELTA_MSOC_THR,
FG_SRAM_DELTA_BSOC_THR,
FG_SRAM_RECHARGE_SOC_THR,
@@ -262,6 +263,7 @@
int chg_term_curr_ma;
int chg_term_base_curr_ma;
int sys_term_curr_ma;
+ int cutoff_curr_ma;
int delta_soc_thr;
int recharge_soc_thr;
int recharge_volt_thr_mv;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 2c62218..0894f37 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -35,6 +35,8 @@
#define ESR_PULSE_THRESH_OFFSET 3
#define SLOPE_LIMIT_WORD 3
#define SLOPE_LIMIT_OFFSET 0
+#define CUTOFF_CURR_WORD 4
+#define CUTOFF_CURR_OFFSET 0
#define CUTOFF_VOLT_WORD 5
#define CUTOFF_VOLT_OFFSET 0
#define SYS_TERM_CURR_WORD 6
@@ -208,6 +210,8 @@
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_WORD, CHG_TERM_CURR_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
+ PARAM(CUTOFF_CURR, CUTOFF_CURR_WORD, CUTOFF_CURR_OFFSET, 3,
+ 1000000, 122070, 0, fg_encode_current, NULL),
PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_WORD, DELTA_MSOC_THR_OFFSET, 1,
2048, 100, 0, fg_encode_default, NULL),
PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_WORD, DELTA_BSOC_THR_OFFSET, 1,
@@ -284,6 +288,8 @@
PARAM(CHG_TERM_BASE_CURR, CHG_TERM_CURR_v2_WORD,
CHG_TERM_BASE_CURR_v2_OFFSET, 1, 1024, 1000, 0,
fg_encode_current, NULL),
+ PARAM(CUTOFF_CURR, CUTOFF_CURR_WORD, CUTOFF_CURR_OFFSET, 3,
+ 1000000, 122070, 0, fg_encode_current, NULL),
PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_v2_WORD, DELTA_MSOC_THR_v2_OFFSET,
1, 2048, 100, 0, fg_encode_default, NULL),
PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_v2_WORD, DELTA_BSOC_THR_v2_OFFSET,
@@ -3974,6 +3980,16 @@
return rc;
}
+ fg_encode(chip->sp, FG_SRAM_CUTOFF_CURR, chip->dt.cutoff_curr_ma,
+ buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_CUTOFF_CURR].addr_word,
+ chip->sp[FG_SRAM_CUTOFF_CURR].addr_byte, buf,
+ chip->sp[FG_SRAM_CUTOFF_CURR].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing cutoff_curr, rc=%d\n", rc);
+ return rc;
+ }
+
if (!(chip->wa_flags & PMI8998_V1_REV_WA)) {
fg_encode(chip->sp, FG_SRAM_CHG_TERM_BASE_CURR,
chip->dt.chg_term_base_curr_ma, buf);
@@ -4697,6 +4713,7 @@
#define DEFAULT_CHG_TERM_CURR_MA 100
#define DEFAULT_CHG_TERM_BASE_CURR_MA 75
#define DEFAULT_SYS_TERM_CURR_MA -125
+#define DEFAULT_CUTOFF_CURR_MA 500
#define DEFAULT_DELTA_SOC_THR 1
#define DEFAULT_RECHARGE_SOC_THR 95
#define DEFAULT_BATT_TEMP_COLD 0
@@ -4860,6 +4877,12 @@
else
chip->dt.chg_term_base_curr_ma = temp;
+ rc = of_property_read_u32(node, "qcom,fg-cutoff-current", &temp);
+ if (rc < 0)
+ chip->dt.cutoff_curr_ma = DEFAULT_CUTOFF_CURR_MA;
+ else
+ chip->dt.cutoff_curr_ma = temp;
+
rc = of_property_read_u32(node, "qcom,fg-delta-soc-thr", &temp);
if (rc < 0)
chip->dt.delta_soc_thr = DEFAULT_DELTA_SOC_THR;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index e7d13ae..8e367c5 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -350,6 +350,16 @@
To compile this driver as a module, choose M here: the module
will be called pwm-rcar.
+config PWM_QTI_LPG
+ tristate "Qualcomm Technologies, Inc. LPG driver"
+ depends on MFD_SPMI_PMIC && OF
+ help
+ This driver supports the LPG (Light Pulse Generator) module found in
+ Qualcomm Technologies, Inc. PMIC chips. Each LPG channel can be
+ configured to operate in PWM mode to output a fixed amplitude with
+ variable duty cycle or in LUT (Look up table) mode to output PWM
+ signal with a modulated amplitude.
+
config PWM_RENESAS_TPU
tristate "Renesas TPU PWM support"
depends on ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 24c1baf..9453eb0 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -33,6 +33,7 @@
obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
obj-$(CONFIG_PWM_QPNP) += pwm-qpnp.o
obj-$(CONFIG_PWM_RCAR) += pwm-rcar.o
+obj-$(CONFIG_PWM_QTI_LPG) += pwm-qti-lpg.o
obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o
obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
diff --git a/drivers/pwm/pwm-qti-lpg.c b/drivers/pwm/pwm-qti-lpg.c
new file mode 100644
index 0000000..328f4b6
--- /dev/null
+++ b/drivers/pwm/pwm-qti-lpg.c
@@ -0,0 +1,570 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define REG_SIZE_PER_LPG 0x100
+
+#define REG_LPG_PWM_SIZE_CLK 0x41
+#define REG_LPG_PWM_FREQ_PREDIV_CLK 0x42
+#define REG_LPG_PWM_TYPE_CONFIG 0x43
+#define REG_LPG_PWM_VALUE_LSB 0x44
+#define REG_LPG_PWM_VALUE_MSB 0x45
+#define REG_LPG_ENABLE_CONTROL 0x46
+#define REG_LPG_PWM_SYNC 0x47
+
+/* REG_LPG_PWM_SIZE_CLK */
+#define LPG_PWM_SIZE_MASK BIT(4)
+#define LPG_PWM_SIZE_SHIFT 4
+#define LPG_PWM_CLK_FREQ_SEL_MASK GENMASK(1, 0)
+
+/* REG_LPG_PWM_FREQ_PREDIV_CLK */
+#define LPG_PWM_FREQ_PREDIV_MASK GENMASK(6, 5)
+#define LPG_PWM_FREQ_PREDIV_SHIFT 5
+#define LPG_PWM_FREQ_EXPONENT_MASK GENMASK(2, 0)
+
+/* REG_LPG_PWM_TYPE_CONFIG */
+#define LPG_PWM_EN_GLITCH_REMOVAL_MASK BIT(5)
+
+/* REG_LPG_PWM_VALUE_LSB */
+#define LPG_PWM_VALUE_LSB_MASK GENMASK(7, 0)
+
+/* REG_LPG_PWM_VALUE_MSB */
+#define LPG_PWM_VALUE_MSB_MASK BIT(0)
+
+/* REG_LPG_ENABLE_CONTROL */
+#define LPG_EN_LPG_OUT_BIT BIT(7)
+#define LPG_PWM_SRC_SELECT_MASK BIT(2)
+#define LPG_PWM_SRC_SELECT_SHIFT 2
+#define LPG_EN_RAMP_GEN_MASK BIT(1)
+#define LPG_EN_RAMP_GEN_SHIFT 1
+
+/* REG_LPG_PWM_SYNC */
+#define LPG_PWM_VALUE_SYNC BIT(0)
+
+#define NUM_PWM_SIZE 2
+#define NUM_PWM_CLK 3
+#define NUM_CLK_PREDIV 4
+#define NUM_PWM_EXP 8
+
+enum {
+ LUT_PATTERN = 0,
+ PWM_OUTPUT,
+};
+
+static const int pwm_size[NUM_PWM_SIZE] = {6, 9};
+static const int clk_freq_hz[NUM_PWM_CLK] = {1024, 32768, 19200000};
+static const int clk_prediv[NUM_CLK_PREDIV] = {1, 3, 5, 6};
+static const int pwm_exponent[NUM_PWM_EXP] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+struct lpg_pwm_config {
+ u32 pwm_size;
+ u32 pwm_clk;
+ u32 prediv;
+ u32 clk_exp;
+ u16 pwm_value;
+ u32 best_period_ns;
+};
+
+struct qpnp_lpg_channel {
+ struct qpnp_lpg_chip *chip;
+ struct lpg_pwm_config pwm_config;
+ u32 lpg_idx;
+ u32 reg_base;
+ u8 src_sel;
+ int current_period_ns;
+ int current_duty_ns;
+};
+
+struct qpnp_lpg_chip {
+ struct pwm_chip pwm_chip;
+ struct regmap *regmap;
+ struct device *dev;
+ struct qpnp_lpg_channel *lpgs;
+ struct mutex bus_lock;
+ u32 num_lpgs;
+};
+
+static int qpnp_lpg_write(struct qpnp_lpg_channel *lpg, u16 addr, u8 val)
+{
+ int rc;
+
+ mutex_lock(&lpg->chip->bus_lock);
+ rc = regmap_write(lpg->chip->regmap, lpg->reg_base + addr, val);
+ if (rc < 0)
+ dev_err(lpg->chip->dev, "Write addr 0x%x with value %d failed, rc=%d\n",
+ lpg->reg_base + addr, val, rc);
+ mutex_unlock(&lpg->chip->bus_lock);
+
+ return rc;
+}
+
+static int qpnp_lpg_masked_write(struct qpnp_lpg_channel *lpg,
+ u16 addr, u8 mask, u8 val)
+{
+ int rc;
+
+ mutex_lock(&lpg->chip->bus_lock);
+ rc = regmap_update_bits(lpg->chip->regmap, lpg->reg_base + addr,
+ mask, val);
+ if (rc < 0)
+ dev_err(lpg->chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x failed, rc=%d\n",
+ lpg->reg_base + addr, val, mask, rc);
+ mutex_unlock(&lpg->chip->bus_lock);
+
+ return rc;
+}
+
+static struct qpnp_lpg_channel *pwm_dev_to_qpnp_lpg(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm) {
+
+ struct qpnp_lpg_chip *chip = container_of(pwm_chip,
+ struct qpnp_lpg_chip, pwm_chip);
+ u32 hw_idx = pwm->hwpwm;
+
+ if (hw_idx >= chip->num_lpgs) {
+ dev_err(chip->dev, "hw index %d out of range [0-%d]\n",
+ hw_idx, chip->num_lpgs - 1);
+ return NULL;
+ }
+
+ return &chip->lpgs[hw_idx];
+}
+
+static int __find_index_in_array(int member, const int array[], int length)
+{
+ int i;
+
+ for (i = 0; i < length; i++) {
+ if (member == array[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int qpnp_lpg_set_pwm_config(struct qpnp_lpg_channel *lpg)
+{
+ int rc;
+ u8 val, mask;
+ int pwm_size_idx, pwm_clk_idx, prediv_idx, clk_exp_idx;
+
+ pwm_size_idx = __find_index_in_array(lpg->pwm_config.pwm_size,
+ pwm_size, ARRAY_SIZE(pwm_size));
+ pwm_clk_idx = __find_index_in_array(lpg->pwm_config.pwm_clk,
+ clk_freq_hz, ARRAY_SIZE(clk_freq_hz));
+ prediv_idx = __find_index_in_array(lpg->pwm_config.prediv,
+ clk_prediv, ARRAY_SIZE(clk_prediv));
+ clk_exp_idx = __find_index_in_array(lpg->pwm_config.clk_exp,
+ pwm_exponent, ARRAY_SIZE(pwm_exponent));
+
+ if (pwm_size_idx < 0 || pwm_clk_idx < 0
+ || prediv_idx < 0 || clk_exp_idx < 0)
+ return -EINVAL;
+
+ /* pwm_clk_idx is 1 bit lower than the register value */
+ pwm_clk_idx += 1;
+ val = pwm_size_idx << LPG_PWM_SIZE_SHIFT | pwm_clk_idx;
+ mask = LPG_PWM_SIZE_MASK | LPG_PWM_CLK_FREQ_SEL_MASK;
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_PWM_SIZE_CLK, mask, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PWM_SIZE_CLK failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ val = prediv_idx << LPG_PWM_FREQ_PREDIV_SHIFT | clk_exp_idx;
+ mask = LPG_PWM_FREQ_PREDIV_MASK | LPG_PWM_FREQ_EXPONENT_MASK;
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_PWM_FREQ_PREDIV_CLK, mask, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PWM_FREQ_PREDIV_CLK failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ val = lpg->pwm_config.pwm_value & LPG_PWM_VALUE_LSB_MASK;
+ rc = qpnp_lpg_write(lpg, REG_LPG_PWM_VALUE_LSB, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PWM_VALUE_LSB failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ val = lpg->pwm_config.pwm_value >> 8;
+ mask = LPG_PWM_VALUE_MSB_MASK;
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_PWM_VALUE_MSB, mask, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PWM_VALUE_MSB failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ val = LPG_PWM_VALUE_SYNC;
+ rc = qpnp_lpg_write(lpg, REG_LPG_PWM_SYNC, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PWM_SYNC failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void __qpnp_lpg_calc_pwm_period(int period_ns,
+ struct lpg_pwm_config *pwm_config)
+{
+ struct lpg_pwm_config configs[NUM_PWM_SIZE];
+ int i, j, m, n;
+ int tmp1, tmp2;
+ int clk_period_ns = 0, pwm_clk_period_ns;
+ int clk_delta_ns = INT_MAX, min_clk_delta_ns = INT_MAX;
+ int pwm_period_delta = INT_MAX, min_pwm_period_delta = INT_MAX;
+ int pwm_size_step;
+
+ /*
+ * (2^pwm_size) * (2^pwm_exp) * prediv * NSEC_PER_SEC
+ * pwm_period = ---------------------------------------------------
+ * clk_freq_hz
+ *
+ * Searching the closest settings for the requested PWM period.
+ */
+ for (n = 0; n < ARRAY_SIZE(pwm_size); n++) {
+ pwm_clk_period_ns = period_ns >> pwm_size[n];
+ for (i = ARRAY_SIZE(clk_freq_hz) - 1; i >= 0; i--) {
+ for (j = 0; j < ARRAY_SIZE(clk_prediv); j++) {
+ for (m = 0; m < ARRAY_SIZE(pwm_exponent); m++) {
+ tmp1 = 1 << pwm_exponent[m];
+ tmp1 *= clk_prediv[j];
+ tmp2 = NSEC_PER_SEC / clk_freq_hz[i];
+
+ clk_period_ns = tmp1 * tmp2;
+
+ clk_delta_ns = abs(pwm_clk_period_ns
+ - clk_period_ns);
+ /*
+ * Find the closest setting for
+ * PWM frequency predivide value
+ */
+ if (clk_delta_ns < min_clk_delta_ns) {
+ min_clk_delta_ns
+ = clk_delta_ns;
+ configs[n].pwm_clk
+ = clk_freq_hz[i];
+ configs[n].prediv
+ = clk_prediv[j];
+ configs[n].clk_exp
+ = pwm_exponent[m];
+ configs[n].pwm_size
+ = pwm_size[n];
+ configs[n].best_period_ns
+ = clk_period_ns;
+ }
+ }
+ }
+ }
+
+ configs[n].best_period_ns *= 1 << pwm_size[n];
+ /* Find the closest setting for PWM period */
+ if (min_clk_delta_ns < INT_MAX >> pwm_size[n])
+ pwm_period_delta = min_clk_delta_ns << pwm_size[n];
+ else
+ pwm_period_delta = INT_MAX;
+ if (pwm_period_delta < min_pwm_period_delta) {
+ min_pwm_period_delta = pwm_period_delta;
+ memcpy(pwm_config, &configs[n],
+ sizeof(struct lpg_pwm_config));
+ }
+ }
+
+ /* Larger PWM size can achieve better resolution for PWM duty */
+ for (n = ARRAY_SIZE(pwm_size) - 1; n > 0; n--) {
+ if (pwm_config->pwm_size >= pwm_size[n])
+ break;
+ pwm_size_step = pwm_size[n] - pwm_config->pwm_size;
+ if (pwm_config->clk_exp >= pwm_size_step) {
+ pwm_config->pwm_size = pwm_size[n];
+ pwm_config->clk_exp -= pwm_size_step;
+ }
+ }
+ pr_debug("PWM setting for period_ns %d: pwm_clk = %dHZ, prediv = %d, exponent = %d, pwm_size = %d\n",
+ period_ns, pwm_config->pwm_clk, pwm_config->prediv,
+ pwm_config->clk_exp, pwm_config->pwm_size);
+ pr_debug("Actual period: %dns\n", pwm_config->best_period_ns);
+}
+
+static void __qpnp_lpg_calc_pwm_duty(int period_ns, int duty_ns,
+ struct lpg_pwm_config *pwm_config)
+{
+ u16 pwm_value, max_pwm_value;
+
+ if ((1 << pwm_config->pwm_size) > (INT_MAX / duty_ns))
+ pwm_value = duty_ns / (period_ns >> pwm_config->pwm_size);
+ else
+ pwm_value = (duty_ns << pwm_config->pwm_size) / period_ns;
+
+ max_pwm_value = (1 << pwm_config->pwm_size) - 1;
+ if (pwm_value > max_pwm_value)
+ pwm_value = max_pwm_value;
+ pwm_config->pwm_value = pwm_value;
+}
+
+static int qpnp_lpg_pwm_config(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ struct qpnp_lpg_channel *lpg;
+ int rc = 0;
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return -ENODEV;
+ }
+
+ if (duty_ns > period_ns) {
+ dev_err(pwm_chip->dev, "Duty %dns is larger than period %dns\n",
+ duty_ns, period_ns);
+ return -EINVAL;
+ }
+
+ if (period_ns != lpg->current_period_ns)
+ __qpnp_lpg_calc_pwm_period(period_ns, &lpg->pwm_config);
+
+ if (period_ns != lpg->current_period_ns ||
+ duty_ns != lpg->current_duty_ns)
+ __qpnp_lpg_calc_pwm_duty(period_ns, duty_ns, &lpg->pwm_config);
+
+ rc = qpnp_lpg_set_pwm_config(lpg);
+ if (rc < 0)
+ dev_err(pwm_chip->dev, "Config PWM failed for channel %d, rc=%d\n",
+ lpg->lpg_idx, rc);
+
+ return rc;
+}
+
+static int qpnp_lpg_pwm_enable(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm)
+{
+ struct qpnp_lpg_channel *lpg;
+ int rc = 0;
+ u8 mask, val;
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return -ENODEV;
+ }
+
+ mask = LPG_PWM_SRC_SELECT_MASK | LPG_EN_LPG_OUT_BIT;
+ val = lpg->src_sel << LPG_PWM_SRC_SELECT_SHIFT | LPG_EN_LPG_OUT_BIT;
+
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_ENABLE_CONTROL, mask, val);
+ if (rc < 0)
+ dev_err(pwm_chip->dev, "Enable PWM output failed for channel %d, rc=%d\n",
+ lpg->lpg_idx, rc);
+
+ return rc;
+}
+
+static void qpnp_lpg_pwm_disable(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm)
+{
+ struct qpnp_lpg_channel *lpg;
+ int rc;
+ u8 mask, val;
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return;
+ }
+
+ mask = LPG_PWM_SRC_SELECT_MASK | LPG_EN_LPG_OUT_BIT;
+ val = lpg->src_sel << LPG_PWM_SRC_SELECT_SHIFT;
+
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_ENABLE_CONTROL, mask, val);
+ if (rc < 0)
+ dev_err(pwm_chip->dev, "Disable PWM output failed for channel %d, rc=%d\n",
+ lpg->lpg_idx, rc);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void qpnp_lpg_pwm_dbg_show(struct pwm_chip *pwm_chip, struct seq_file *s)
+{
+ struct qpnp_lpg_channel *lpg;
+ struct lpg_pwm_config *cfg;
+ struct pwm_device *pwm;
+ int i;
+
+ for (i = 0; i < pwm_chip->npwm; i++) {
+ pwm = &pwm_chip->pwms[i];
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return;
+ }
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
+ seq_printf(s, "LPG %d is requested by %s\n",
+ lpg->lpg_idx + 1, pwm->label);
+ } else {
+ seq_printf(s, "LPG %d is free\n",
+ lpg->lpg_idx + 1);
+ continue;
+ }
+
+ if (pwm_is_enabled(pwm)) {
+ seq_puts(s, " enabled\n");
+ } else {
+ seq_puts(s, " disabled\n");
+ continue;
+ }
+
+ cfg = &lpg->pwm_config;
+ seq_printf(s, " clk = %dHz\n", cfg->pwm_clk);
+ seq_printf(s, " pwm_size = %d\n", cfg->pwm_size);
+ seq_printf(s, " prediv = %d\n", cfg->prediv);
+ seq_printf(s, " exponent = %d\n", cfg->clk_exp);
+ seq_printf(s, " pwm_value = %d\n", cfg->pwm_value);
+ seq_printf(s, " Requested period: %dns, best period = %dns\n",
+ pwm_get_period(pwm), cfg->best_period_ns);
+ }
+}
+#endif
+
+static const struct pwm_ops qpnp_lpg_pwm_ops = {
+ .config = qpnp_lpg_pwm_config,
+ .enable = qpnp_lpg_pwm_enable,
+ .disable = qpnp_lpg_pwm_disable,
+#ifdef CONFIG_DEBUG_FS
+ .dbg_show = qpnp_lpg_pwm_dbg_show,
+#endif
+ .owner = THIS_MODULE,
+};
+
+static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
+{
+ int rc = 0, i;
+ u64 base, length;
+ const __be32 *addr;
+
+ addr = of_get_address(chip->dev->of_node, 0, NULL, NULL);
+ if (!addr) {
+ dev_err(chip->dev, "Getting address failed\n");
+ return -EINVAL;
+ }
+ base = be32_to_cpu(addr[0]);
+ length = be32_to_cpu(addr[1]);
+
+ chip->num_lpgs = length / REG_SIZE_PER_LPG;
+ chip->lpgs = devm_kcalloc(chip->dev, chip->num_lpgs,
+ sizeof(*chip->lpgs), GFP_KERNEL);
+ if (!chip->lpgs)
+ return -ENOMEM;
+
+ for (i = 0; i < chip->num_lpgs; i++) {
+ chip->lpgs[i].chip = chip;
+ chip->lpgs[i].lpg_idx = i;
+ chip->lpgs[i].reg_base = base + i * REG_SIZE_PER_LPG;
+ chip->lpgs[i].src_sel = PWM_OUTPUT;
+ }
+
+ return rc;
+}
+
+static int qpnp_lpg_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct qpnp_lpg_chip *chip;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+ if (!chip->regmap) {
+ dev_err(chip->dev, "Getting regmap failed\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_lpg_parse_dt(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Devicetree properties parsing failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ dev_set_drvdata(chip->dev, chip);
+
+ mutex_init(&chip->bus_lock);
+ chip->pwm_chip.dev = chip->dev;
+ chip->pwm_chip.base = -1;
+ chip->pwm_chip.npwm = chip->num_lpgs;
+ chip->pwm_chip.ops = &qpnp_lpg_pwm_ops;
+
+ rc = pwmchip_add(&chip->pwm_chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Add pwmchip failed, rc=%d\n", rc);
+ mutex_destroy(&chip->bus_lock);
+ }
+
+ return rc;
+}
+
+static int qpnp_lpg_remove(struct platform_device *pdev)
+{
+ struct qpnp_lpg_chip *chip = dev_get_drvdata(&pdev->dev);
+ int rc = 0;
+
+ rc = pwmchip_remove(&chip->pwm_chip);
+ if (rc < 0)
+ dev_err(chip->dev, "Remove pwmchip failed, rc=%d\n", rc);
+
+ mutex_destroy(&chip->bus_lock);
+ dev_set_drvdata(chip->dev, NULL);
+
+ return rc;
+}
+
+static const struct of_device_id qpnp_lpg_of_match[] = {
+ { .compatible = "qcom,pwm-lpg",},
+ { },
+};
+
+static struct platform_driver qpnp_lpg_driver = {
+ .driver = {
+ .name = "qcom,pwm-lpg",
+ .of_match_table = qpnp_lpg_of_match,
+ },
+ .probe = qpnp_lpg_probe,
+ .remove = qpnp_lpg_remove,
+};
+module_platform_driver(qpnp_lpg_driver);
+
+MODULE_DESCRIPTION("QTI LPG driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("pwm:pwm-lpg");
diff --git a/drivers/regulator/cpr-regulator.c b/drivers/regulator/cpr-regulator.c
index 9c47e82..cabcf7f 100644
--- a/drivers/regulator/cpr-regulator.c
+++ b/drivers/regulator/cpr-regulator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,8 +38,6 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/cpr-regulator.h>
-#include <linux/msm_thermal.h>
-#include <linux/msm_tsens.h>
#include <soc/qcom/scm.h>
/* Register Offsets for RB-CPR and Bit Definitions */
@@ -287,7 +285,6 @@
int corner;
int ceiling_max;
struct dentry *debugfs;
- struct device *dev;
/* eFuse parameters */
phys_addr_t efuse_addr;
@@ -319,14 +316,6 @@
/* mem-acc regulator */
struct regulator *mem_acc_vreg;
- /* thermal monitor */
- int tsens_id;
- int cpr_disable_temp_threshold;
- int cpr_enable_temp_threshold;
- bool cpr_disable_on_temperature;
- bool cpr_thermal_disable;
- struct threshold_info tsens_threshold_config;
-
/* CPR parameters */
u32 num_fuse_corners;
u64 cpr_fuse_bits;
@@ -565,8 +554,7 @@
static bool cpr_is_allowed(struct cpr_regulator *cpr_vreg)
{
- if (cpr_vreg->cpr_fuse_disable || !cpr_vreg->enable ||
- cpr_vreg->cpr_thermal_disable)
+ if (cpr_vreg->cpr_fuse_disable || !cpr_vreg->enable)
return false;
else
return true;
@@ -5102,145 +5090,6 @@
return rc;
}
-static int cpr_disable_on_temp(struct cpr_regulator *cpr_vreg, bool disable)
-{
- int rc = 0;
-
- mutex_lock(&cpr_vreg->cpr_mutex);
-
- if (cpr_vreg->cpr_fuse_disable ||
- (cpr_vreg->cpr_thermal_disable == disable))
- goto out;
-
- cpr_vreg->cpr_thermal_disable = disable;
-
- if (cpr_vreg->enable && cpr_vreg->corner) {
- if (disable) {
- cpr_debug(cpr_vreg, "Disabling CPR - below temperature threshold [%d]\n",
- cpr_vreg->cpr_disable_temp_threshold);
- /* disable CPR and force open-loop */
- cpr_ctl_disable(cpr_vreg);
- rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
- cpr_vreg->corner, false);
- if (rc < 0)
- cpr_err(cpr_vreg, "Failed to set voltage, rc=%d\n",
- rc);
- } else {
- /* enable CPR */
- cpr_debug(cpr_vreg, "Enabling CPR - above temperature thresold [%d]\n",
- cpr_vreg->cpr_enable_temp_threshold);
- rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
- cpr_vreg->corner, true);
- if (rc < 0)
- cpr_err(cpr_vreg, "Failed to set voltage, rc=%d\n",
- rc);
- }
- }
-out:
- mutex_unlock(&cpr_vreg->cpr_mutex);
- return rc;
-}
-
-static void tsens_threshold_notify(struct therm_threshold *tsens_cb_data)
-{
- struct threshold_info *info = tsens_cb_data->parent;
- struct cpr_regulator *cpr_vreg = container_of(info,
- struct cpr_regulator, tsens_threshold_config);
- int rc = 0;
-
- cpr_debug(cpr_vreg, "Triggered tsens-notification trip_type=%d for thermal_zone_id=%d\n",
- tsens_cb_data->trip_triggered, tsens_cb_data->sensor_id);
-
- switch (tsens_cb_data->trip_triggered) {
- case THERMAL_TRIP_CONFIGURABLE_HI:
- rc = cpr_disable_on_temp(cpr_vreg, false);
- if (rc < 0)
- cpr_err(cpr_vreg, "Failed to enable CPR, rc=%d\n", rc);
- break;
- case THERMAL_TRIP_CONFIGURABLE_LOW:
- rc = cpr_disable_on_temp(cpr_vreg, true);
- if (rc < 0)
- cpr_err(cpr_vreg, "Failed to disable CPR, rc=%d\n", rc);
- break;
- default:
- cpr_debug(cpr_vreg, "trip-type %d not supported\n",
- tsens_cb_data->trip_triggered);
- break;
- }
-
- if (tsens_cb_data->cur_state != tsens_cb_data->trip_triggered) {
- rc = sensor_mgr_set_threshold(tsens_cb_data->sensor_id,
- tsens_cb_data->threshold);
- if (rc < 0)
- cpr_err(cpr_vreg,
- "Failed to set temp. threshold, rc=%d\n", rc);
- else
- tsens_cb_data->cur_state =
- tsens_cb_data->trip_triggered;
- }
-}
-
-static int cpr_check_tsens(struct cpr_regulator *cpr_vreg)
-{
- int rc = 0;
- struct tsens_device tsens_dev;
- unsigned long temp = 0;
- bool disable;
-
- if (tsens_is_ready() > 0) {
- tsens_dev.sensor_num = cpr_vreg->tsens_id;
- rc = tsens_get_temp(&tsens_dev, &temp);
- if (rc < 0) {
- cpr_err(cpr_vreg, "Faled to read tsens, rc=%d\n", rc);
- return rc;
- }
-
- disable = (int) temp <= cpr_vreg->cpr_disable_temp_threshold;
- rc = cpr_disable_on_temp(cpr_vreg, disable);
- if (rc)
- cpr_err(cpr_vreg, "Failed to %s CPR, rc=%d\n",
- disable ? "disable" : "enable", rc);
- }
-
- return rc;
-}
-
-static int cpr_thermal_init(struct cpr_regulator *cpr_vreg)
-{
- int rc;
- struct device_node *of_node = cpr_vreg->dev->of_node;
-
- if (!of_find_property(of_node, "qcom,cpr-thermal-sensor-id", NULL))
- return 0;
-
- CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-thermal-sensor-id",
- &cpr_vreg->tsens_id, rc);
- if (rc < 0)
- return rc;
-
- CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-disable-temp-threshold",
- &cpr_vreg->cpr_disable_temp_threshold, rc);
- if (rc < 0)
- return rc;
-
- CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-enable-temp-threshold",
- &cpr_vreg->cpr_enable_temp_threshold, rc);
- if (rc < 0)
- return rc;
-
- if (cpr_vreg->cpr_disable_temp_threshold >=
- cpr_vreg->cpr_enable_temp_threshold) {
- cpr_err(cpr_vreg, "Invalid temperature threshold cpr_disable_temp[%d] >= cpr_enable_temp[%d]\n",
- cpr_vreg->cpr_disable_temp_threshold,
- cpr_vreg->cpr_enable_temp_threshold);
- return -EINVAL;
- }
-
- cpr_vreg->cpr_disable_on_temperature = true;
-
- return 0;
-}
-
static int cpr_init_cpr(struct platform_device *pdev,
struct cpr_regulator *cpr_vreg)
{
@@ -6067,7 +5916,13 @@
return -EINVAL;
}
- init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+ cpr_vreg = devm_kzalloc(&pdev->dev, sizeof(struct cpr_regulator),
+ GFP_KERNEL);
+ if (!cpr_vreg)
+ return -ENOMEM;
+
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+ &cpr_vreg->rdesc);
if (!init_data) {
dev_err(dev, "regulator init data is missing\n");
return -EINVAL;
@@ -6078,14 +5933,6 @@
|= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS;
}
- cpr_vreg = devm_kzalloc(&pdev->dev, sizeof(struct cpr_regulator),
- GFP_KERNEL);
- if (!cpr_vreg) {
- dev_err(dev, "Can't allocate cpr_regulator memory\n");
- return -ENOMEM;
- }
-
- cpr_vreg->dev = &pdev->dev;
cpr_vreg->rdesc.name = init_data->constraints.name;
if (cpr_vreg->rdesc.name == NULL) {
dev_err(dev, "regulator-name missing\n");
@@ -6182,12 +6029,6 @@
return rc;
}
- rc = cpr_thermal_init(cpr_vreg);
- if (rc) {
- cpr_err(cpr_vreg, "Thermal intialization failed rc=%d\n", rc);
- return rc;
- }
-
if (of_property_read_bool(pdev->dev.of_node,
"qcom,disable-closed-loop-in-pc")) {
rc = cpr_init_pm_notification(cpr_vreg);
@@ -6247,17 +6088,6 @@
platform_set_drvdata(pdev, cpr_vreg);
cpr_debugfs_init(cpr_vreg);
- if (cpr_vreg->cpr_disable_on_temperature) {
- rc = cpr_check_tsens(cpr_vreg);
- if (rc < 0) {
- cpr_err(cpr_vreg, "Unable to config CPR on tsens, rc=%d\n",
- rc);
- cpr_apc_exit(cpr_vreg);
- cpr_debugfs_remove(cpr_vreg);
- return rc;
- }
- }
-
/* Register panic notification call back */
cpr_vreg->panic_notifier.notifier_call = cpr_panic_callback;
atomic_notifier_chain_register(&panic_notifier_list,
@@ -6293,10 +6123,6 @@
if (cpr_vreg->cpu_notifier.notifier_call)
unregister_hotcpu_notifier(&cpr_vreg->cpu_notifier);
- if (cpr_vreg->cpr_disable_on_temperature)
- sensor_mgr_remove_threshold(
- &cpr_vreg->tsens_threshold_config);
-
atomic_notifier_chain_unregister(&panic_notifier_list,
&cpr_vreg->panic_notifier);
@@ -6325,56 +6151,6 @@
.resume = cpr_regulator_resume,
};
-static int initialize_tsens_monitor(struct cpr_regulator *cpr_vreg)
-{
- int rc;
-
- rc = cpr_check_tsens(cpr_vreg);
- if (rc < 0) {
- cpr_err(cpr_vreg, "Unable to check tsens, rc=%d\n", rc);
- return rc;
- }
-
- rc = sensor_mgr_init_threshold(&cpr_vreg->tsens_threshold_config,
- cpr_vreg->tsens_id,
- cpr_vreg->cpr_enable_temp_threshold, /* high */
- cpr_vreg->cpr_disable_temp_threshold, /* low */
- tsens_threshold_notify);
- if (rc < 0) {
- cpr_err(cpr_vreg, "Failed to init tsens monitor, rc=%d\n", rc);
- return rc;
- }
-
- rc = sensor_mgr_convert_id_and_set_threshold(
- &cpr_vreg->tsens_threshold_config);
- if (rc < 0)
- cpr_err(cpr_vreg, "Failed to set tsens threshold, rc=%d\n",
- rc);
-
- return rc;
-}
-
-int __init cpr_regulator_late_init(void)
-{
- int rc;
- struct cpr_regulator *cpr_vreg;
-
- mutex_lock(&cpr_regulator_list_mutex);
-
- list_for_each_entry(cpr_vreg, &cpr_regulator_list, list) {
- if (cpr_vreg->cpr_disable_on_temperature) {
- rc = initialize_tsens_monitor(cpr_vreg);
- if (rc)
- cpr_err(cpr_vreg, "Failed to initialize temperature monitor, rc=%d\n",
- rc);
- }
- }
-
- mutex_unlock(&cpr_regulator_list_mutex);
- return 0;
-}
-late_initcall(cpr_regulator_late_init);
-
/**
* cpr_regulator_init() - register cpr-regulator driver
*
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index f457eea..88c5697 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2840,8 +2840,11 @@
return true;
if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE &&
- labibb->mode == QPNP_LABIBB_LCD_MODE)
+ labibb->mode == QPNP_LABIBB_LCD_MODE) {
+ if (labibb->ttw_en)
+ return false;
return true;
+ }
return false;
}
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index db4e7bb..1b283b2 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -83,8 +83,6 @@
tristate "QCOM specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
select PHY_QCOM_UFS
- select EXTCON
- select EXTCON_GPIO
help
This selects the QCOM specific additions to UFSHCD platform driver.
UFS host on QCOM needs some vendor specific configuration before
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 31cf232..c3b2ca8 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -774,3 +774,5 @@
The driver will help route diag traffic from modem side over the QDSS
sub-system to USB on APSS side. The driver acts as a bridge between the
MHI and USB interface. If unsure, say N.
+
+source "drivers/soc/qcom/wcnss/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index f34b714..0255761 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -75,6 +75,7 @@
obj-y += subsystem_notif.o
obj-y += subsystem_restart.o
obj-y += ramdump.o
+ obj-y += microdump_collector.o
endif
obj-$(CONFIG_MSM_JTAGV8) += jtagv8.o jtagv8-etm.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
@@ -95,3 +96,4 @@
obj-$(CONFIG_MSM_REMOTEQDSS) += remoteqdss.o
obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
+obj-$(CONFIG_WCNSS_CORE) += wcnss/
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index d8cc2c4..59897ea 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -376,7 +376,7 @@
static struct channel_ctx *ch_name_to_ch_ctx_create(
struct glink_core_xprt_ctx *xprt_ctx,
- const char *name);
+ const char *name, bool local);
static void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
uint32_t riid, void *cookie);
@@ -1836,13 +1836,14 @@
* it is not found and get reference of context.
* @xprt_ctx: Transport to search for a matching channel.
* @name: Name of the desired channel.
+ * @local: If called from local open or not
*
* Return: The channel corresponding to @name, NULL if a matching channel was
* not found AND a new channel could not be created.
*/
static struct channel_ctx *ch_name_to_ch_ctx_create(
struct glink_core_xprt_ctx *xprt_ctx,
- const char *name)
+ const char *name, bool local)
{
struct channel_ctx *entry;
struct channel_ctx *ctx;
@@ -1886,10 +1887,23 @@
list_for_each_entry_safe(entry, temp, &xprt_ctx->channels,
port_list_node)
if (!strcmp(entry->name, name) && !entry->pending_delete) {
+ rwref_get(&entry->ch_state_lhb2);
+ /* port already exists */
+ if (entry->local_open_state != GLINK_CHANNEL_CLOSED
+ && local) {
+ /* not ready to be re-opened */
+ GLINK_INFO_CH_XPRT(entry, xprt_ctx,
+ "%s: Ch not ready. State: %u\n",
+ __func__, entry->local_open_state);
+ rwref_put(&entry->ch_state_lhb2);
+ entry = NULL;
+ } else if (local) {
+ entry->local_open_state =
+ GLINK_CHANNEL_OPENING;
+ }
spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
flags);
kfree(ctx);
- rwref_get(&entry->ch_state_lhb2);
rwref_write_put(&xprt_ctx->xprt_state_lhb0);
return entry;
}
@@ -1919,6 +1933,8 @@
ctx->transport_ptr = xprt_ctx;
rwref_get(&ctx->ch_state_lhb2);
+ if (local)
+ ctx->local_open_state = GLINK_CHANNEL_OPENING;
list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
@@ -2604,23 +2620,13 @@
* look for an existing port structure which can occur in
* reopen and remote-open-first cases
*/
- ctx = ch_name_to_ch_ctx_create(transport_ptr, cfg->name);
+ ctx = ch_name_to_ch_ctx_create(transport_ptr, cfg->name, true);
if (ctx == NULL) {
GLINK_ERR("%s:%s %s: Error - unable to allocate new channel\n",
cfg->transport, cfg->edge, __func__);
return ERR_PTR(-ENOMEM);
}
- /* port already exists */
- if (ctx->local_open_state != GLINK_CHANNEL_CLOSED) {
- /* not ready to be re-opened */
- GLINK_INFO_CH_XPRT(ctx, transport_ptr,
- "%s: Channel not ready to be re-opened. State: %u\n",
- __func__, ctx->local_open_state);
- rwref_put(&ctx->ch_state_lhb2);
- return ERR_PTR(-EBUSY);
- }
-
/* initialize port structure */
ctx->user_priv = cfg->priv;
ctx->rx_intent_req_timeout_jiffies =
@@ -2651,7 +2657,6 @@
ctx->local_xprt_req = best_id;
ctx->no_migrate = cfg->transport &&
!(cfg->options & GLINK_OPT_INITIAL_XPORT);
- ctx->local_open_state = GLINK_CHANNEL_OPENING;
GLINK_INFO_PERF_CH(ctx,
"%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n",
__func__);
@@ -4912,7 +4917,7 @@
bool do_migrate;
glink_core_migration_edge_lock(if_ptr->glink_core_priv);
- ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name);
+ ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name, false);
if (ctx == NULL) {
GLINK_ERR_XPRT(if_ptr->glink_core_priv,
"%s: invalid rcid %u received, name '%s'\n",
@@ -5015,6 +5020,7 @@
struct channel_ctx *ctx;
bool is_ch_fully_closed;
struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+ unsigned long flags;
ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
if (!ctx) {
@@ -5032,11 +5038,13 @@
rwref_put(&ctx->ch_state_lhb2);
return;
}
+ spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+ ctx->pending_delete = true;
+ spin_unlock_irqrestore(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__);
is_ch_fully_closed = glink_core_remote_close_common(ctx, false);
- ctx->pending_delete = true;
if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid);
if (is_ch_fully_closed) {
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index dd436da..239f2c1 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,7 @@
#define GLINK_SSR_EVENT_INIT ~0
#define NUM_LOG_PAGES 3
+#define GLINK_SSR_PRIORITY 1
#define GLINK_SSR_LOG(x...) do { \
if (glink_ssr_log_ctx) \
ipc_log_string(glink_ssr_log_ctx, x); \
@@ -596,25 +597,6 @@
strlcpy(do_cleanup_data->name, ss_info->edge,
do_cleanup_data->name_len + 1);
- ret = glink_queue_rx_intent(handle, do_cleanup_data,
- sizeof(struct cleanup_done_msg));
- if (ret) {
- GLINK_SSR_ERR(
- "%s %s: %s, ret[%d], resp. remaining[%d]\n",
- "<SSR>", __func__,
- "queue_rx_intent failed", ret,
- atomic_read(&responses_remaining));
- kfree(do_cleanup_data);
-
- if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
- panic("%s: Could not queue intent for RPM!\n",
- __func__);
- atomic_dec(&responses_remaining);
- kref_put(&ss_leaf_entry->cb_data->cb_kref,
- cb_data_release);
- continue;
- }
-
if (strcmp(ss_leaf_entry->ssr_name, "rpm"))
ret = glink_tx(handle, do_cleanup_data,
do_cleanup_data,
@@ -640,6 +622,24 @@
cb_data_release);
continue;
}
+ ret = glink_queue_rx_intent(handle, do_cleanup_data,
+ sizeof(struct cleanup_done_msg));
+ if (ret) {
+ GLINK_SSR_ERR(
+ "%s %s: %s, ret[%d], resp. remaining[%d]\n",
+ "<SSR>", __func__,
+ "queue_rx_intent failed", ret,
+ atomic_read(&responses_remaining));
+ kfree(do_cleanup_data);
+
+ if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
+ panic("%s: Could not queue intent for RPM!\n",
+ __func__);
+ atomic_dec(&responses_remaining);
+ kref_put(&ss_leaf_entry->cb_data->cb_kref,
+ cb_data_release);
+ continue;
+ }
sequence_number++;
kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
}
@@ -946,6 +946,7 @@
nb->subsystem = subsys_name;
nb->nb.notifier_call = glink_ssr_restart_notifier_cb;
+ nb->nb.priority = GLINK_SSR_PRIORITY;
handle = subsys_notif_register_notifier(nb->subsystem, &nb->nb);
if (IS_ERR_OR_NULL(handle)) {
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index a28644d..e3a50e3 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2262,8 +2262,6 @@
set_bit(ICNSS_FW_READY, &penv->state);
- icnss_call_driver_uevent(penv, ICNSS_UEVENT_FW_READY, NULL);
-
icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state);
icnss_hw_power_off(penv);
@@ -2356,29 +2354,6 @@
return 0;
}
-static int icnss_call_driver_remove(struct icnss_priv *priv)
-{
- icnss_pr_dbg("Calling driver remove state: 0x%lx\n", priv->state);
-
- clear_bit(ICNSS_FW_READY, &priv->state);
-
- if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
- return 0;
-
- if (!priv->ops || !priv->ops->remove)
- return 0;
-
- set_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
- penv->ops->remove(&priv->pdev->dev);
-
- clear_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
- clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
-
- icnss_hw_power_off(penv);
-
- return 0;
-}
-
static int icnss_fw_crashed(struct icnss_priv *priv,
struct icnss_event_pd_service_down_data *event_data)
{
@@ -2418,10 +2393,7 @@
if (priv->force_err_fatal)
ICNSS_ASSERT(0);
- if (event_data->crashed)
- icnss_fw_crashed(priv, event_data);
- else
- icnss_call_driver_remove(priv);
+ icnss_fw_crashed(priv, event_data);
out:
kfree(data);
@@ -3104,6 +3076,12 @@
if (!dev)
return -ENODEV;
+ if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+ icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n",
+ penv->state);
+ return -EINVAL;
+ }
+
icnss_pr_dbg("FW log mode: %u\n", fw_log_mode);
ret = wlfw_ini_send_sync_msg(fw_log_mode);
@@ -3197,6 +3175,12 @@
if (!dev)
return -ENODEV;
+ if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+ icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n",
+ penv->state);
+ return -EINVAL;
+ }
+
icnss_pr_dbg("Mode: %d, config: %p, host_version: %s\n",
mode, config, host_version);
diff --git a/drivers/soc/qcom/microdump_collector.c b/drivers/soc/qcom/microdump_collector.c
new file mode 100644
index 0000000..47f3336
--- /dev/null
+++ b/drivers/soc/qcom/microdump_collector.c
@@ -0,0 +1,159 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/smem.h>
+
+/*
+ * This program collects the data from SMEM regions whenever the modem crashes
+ * and stores it in /dev/ramdump_microdump_modem so as to expose it to
+ * user space.
+ */
+
+struct microdump_data {
+ struct ramdump_device *microdump_dev;
+ void *microdump_modem_notify_handler;
+ struct notifier_block microdump_modem_ssr_nb;
+};
+
+static struct microdump_data *drv;
+
+static int microdump_modem_notifier_nb(struct notifier_block *nb,
+ unsigned long code, void *data)
+{
+ int ret = 0;
+ unsigned int size_reason = 0, size_data = 0;
+ char *crash_reason = NULL;
+ char *crash_data = NULL;
+ unsigned int smem_id = 611;
+ struct ramdump_segment segment[2];
+
+ if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
+
+ memset(segment, 0, sizeof(segment));
+
+ crash_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size_reason
+ , 0, SMEM_ANY_HOST_FLAG);
+ if (IS_ERR_OR_NULL(crash_reason)) {
+ pr_err("%s: Error in getting SMEM_reason pointer\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ segment[0].v_address = crash_reason;
+ segment[0].size = size_reason;
+
+ crash_data = smem_get_entry(smem_id, &size_data, SMEM_MODEM, 0);
+ if (IS_ERR_OR_NULL(crash_data)) {
+ pr_err("%s: Error in getting SMEM_data pointer\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ segment[1].v_address = crash_data;
+ segment[1].size = size_data;
+
+ ret = do_ramdump(drv->microdump_dev, segment, 2);
+ if (ret)
+ pr_err("%s: do_ramdump() failed\n", __func__);
+ }
+
+ return ret;
+}
+
+static int microdump_modem_ssr_register_notifier(struct microdump_data *drv)
+{
+ int ret = 0;
+
+ drv->microdump_modem_ssr_nb.notifier_call = microdump_modem_notifier_nb;
+
+ drv->microdump_modem_notify_handler =
+ subsys_notif_register_notifier("modem",
+ &drv->microdump_modem_ssr_nb);
+
+ if (IS_ERR(drv->microdump_modem_notify_handler)) {
+ pr_err("Modem register notifier failed: %ld\n",
+ PTR_ERR(drv->microdump_modem_notify_handler));
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void microdump_modem_ssr_unregister_notifier(struct microdump_data *drv)
+{
+ subsys_notif_unregister_notifier(drv->microdump_modem_notify_handler,
+ &drv->microdump_modem_ssr_nb);
+ drv->microdump_modem_notify_handler = NULL;
+}
+
+/*
+ * microdump_init() - Registers kernel module for microdump collector
+ *
+ * Creates device file /dev/ramdump_microdump_modem and registers handler for
+ * modem SSR events.
+ *
+ * Returns 0 on success and negative error code in case of errors
+ */
+static int __init microdump_init(void)
+{
+ int ret = -ENOMEM;
+
+ drv = kzalloc(sizeof(struct microdump_data), GFP_KERNEL);
+ if (!drv)
+ goto out;
+
+ drv->microdump_dev = create_ramdump_device("microdump_modem", NULL);
+ if (!drv->microdump_dev) {
+ pr_err("%s: Unable to create a microdump_modem ramdump device\n"
+ , __func__);
+ ret = -ENODEV;
+ goto out_kfree;
+ }
+
+ ret = microdump_modem_ssr_register_notifier(drv);
+ if (ret) {
+ destroy_ramdump_device(drv->microdump_dev);
+ goto out_kfree;
+ }
+ return ret;
+out_kfree:
+ pr_err("%s: Failed to register microdump collector\n", __func__);
+ kfree(drv);
+ drv = NULL;
+out:
+ return ret;
+}
+
+static void __exit microdump_exit(void)
+{
+ if (!drv)
+ return;
+
+ if (!IS_ERR(drv->microdump_modem_notify_handler))
+ microdump_modem_ssr_unregister_notifier(drv);
+
+ if (drv->microdump_dev)
+ destroy_ramdump_device(drv->microdump_dev);
+
+ kfree(drv);
+}
+
+module_init(microdump_init);
+module_exit(microdump_exit);
+
+MODULE_DESCRIPTION("Microdump Collector");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
index 95c127d..974f74e 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -243,7 +243,7 @@
(M_BKE_GC_GC_BMSK >> \
(M_BKE_GC_GC_SHFT + 1))
-static int bimc_div(int64_t *a, uint32_t b)
+static int bimc_div(uint64_t *a, uint32_t b)
{
if ((*a > 0) && (*a < b)) {
*a = 0;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 5a110bb..c00749c 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -550,9 +550,9 @@
struct tcs_cmd *cmdlist_sleep = NULL;
struct rpmh_client *cur_mbox = NULL;
struct list_head *cur_bcm_clist = NULL;
- int *n_active = NULL;
- int *n_wake = NULL;
- int *n_sleep = NULL;
+ int n_active[VCD_MAX_CNT];
+ int n_wake[VCD_MAX_CNT];
+ int n_sleep[VCD_MAX_CNT];
int cnt_vcd = 0;
int cnt_active = 0;
int cnt_wake = 0;
@@ -573,8 +573,15 @@
cur_mbox = cur_rsc->rscdev->mbox;
cur_bcm_clist = cur_rsc->rscdev->bcm_clist;
+ cmdlist_active = cur_rsc->rscdev->cmdlist_active;
+ cmdlist_wake = cur_rsc->rscdev->cmdlist_wake;
+ cmdlist_sleep = cur_rsc->rscdev->cmdlist_sleep;
for (i = 0; i < VCD_MAX_CNT; i++) {
+ n_active[i] = 0;
+ n_wake[i] = 0;
+ n_sleep[i] = 0;
+
if (list_empty(&cur_bcm_clist[i]))
continue;
list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
@@ -600,27 +607,6 @@
if (!cnt_active)
goto exit_msm_bus_commit_data;
- n_active = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
- if (!n_active)
- return -ENOMEM;
-
- n_wake = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
- if (!n_wake)
- return -ENOMEM;
-
- n_sleep = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
- if (!n_sleep)
- return -ENOMEM;
-
- if (cnt_active)
- cmdlist_active = kcalloc(cnt_active, sizeof(struct tcs_cmd),
- GFP_KERNEL);
- if (cnt_sleep && cnt_wake) {
- cmdlist_wake = kcalloc(cnt_wake, sizeof(struct tcs_cmd),
- GFP_KERNEL);
- cmdlist_sleep = kcalloc(cnt_sleep, sizeof(struct tcs_cmd),
- GFP_KERNEL);
- }
bcm_cnt = tcs_cmd_list_gen(n_active, n_wake, n_sleep, cmdlist_active,
cmdlist_wake, cmdlist_sleep, cur_bcm_clist);
@@ -654,8 +640,6 @@
if (ret)
MSM_BUS_ERR("%s: error sending wake sets: %d\n",
__func__, ret);
- kfree(n_wake);
- kfree(cmdlist_wake);
}
if (cnt_sleep) {
ret = rpmh_write_batch(cur_mbox, RPMH_SLEEP_STATE,
@@ -663,14 +647,8 @@
if (ret)
MSM_BUS_ERR("%s: error sending sleep sets: %d\n",
__func__, ret);
- kfree(n_sleep);
- kfree(cmdlist_sleep);
}
- kfree(cmdlist_active);
- kfree(n_active);
-
-
list_for_each_entry_safe(node, node_tmp, clist, link) {
if (unlikely(node->node_info->defer_qos))
msm_bus_dev_init_qos(&node->dev, NULL);
@@ -1168,6 +1146,41 @@
return ret;
}
+static int msm_bus_postcon_setup(struct device *bus_dev, void *data)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+ struct msm_bus_rsc_device_type *rscdev;
+
+ bus_node = to_msm_bus_node(bus_dev);
+ if (!bus_node) {
+ MSM_BUS_ERR("%s: Can't get device info", __func__);
+ return -ENODEV;
+ }
+
+ if (bus_node->node_info->is_rsc_dev) {
+ rscdev = bus_node->rscdev;
+ rscdev->cmdlist_active = devm_kcalloc(bus_dev,
+ rscdev->num_bcm_devs,
+ sizeof(struct tcs_cmd), GFP_KERNEL);
+ if (!rscdev->cmdlist_active)
+ return -ENOMEM;
+
+ rscdev->cmdlist_wake = devm_kcalloc(bus_dev,
+ rscdev->num_bcm_devs,
+ sizeof(struct tcs_cmd), GFP_KERNEL);
+ if (!rscdev->cmdlist_wake)
+ return -ENOMEM;
+
+ rscdev->cmdlist_sleep = devm_kcalloc(bus_dev,
+ rscdev->num_bcm_devs,
+ sizeof(struct tcs_cmd), GFP_KERNEL);
+ if (!rscdev->cmdlist_sleep)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int msm_bus_init_clk(struct device *bus_dev,
struct msm_bus_node_device_type *pdata)
{
@@ -1641,6 +1654,7 @@
goto exit_setup_dev_conn;
}
rsc_node = to_msm_bus_node(bus_node->node_info->rsc_devs[j]);
+ rsc_node->rscdev->num_bcm_devs++;
}
exit_setup_dev_conn:
@@ -1771,6 +1785,13 @@
goto exit_device_probe;
}
+ ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+ msm_bus_postcon_setup);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error post connection setup", __func__);
+ goto exit_device_probe;
+ }
+
/*
* Setup the QoS for the nodes, don't check the error codes as we
* defer QoS programming to the first transaction in cases of failure
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index 8929959..b023f72 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -88,6 +88,10 @@
int req_state;
uint32_t acv[NUM_CTX];
uint32_t query_acv[NUM_CTX];
+ struct tcs_cmd *cmdlist_active;
+ struct tcs_cmd *cmdlist_wake;
+ struct tcs_cmd *cmdlist_sleep;
+ int num_bcm_devs;
};
struct msm_bus_bcm_device_type {
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 0efd287..3b6c0bd 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
#include <soc/qcom/ramdump.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/secure_buffer.h>
+#include <soc/qcom/smem.h>
#include <linux/uaccess.h>
#include <asm/setup.h>
@@ -55,10 +56,9 @@
#endif
#define PIL_NUM_DESC 10
-#define NUM_OF_ENCRYPTED_KEY 3
#define MAX_LEN 96
static void __iomem *pil_info_base;
-static void __iomem *pil_minidump_base;
+static struct md_global_toc *g_md_toc;
/**
* proxy_timeout - Override for proxy vote timeouts
@@ -81,18 +81,6 @@
};
/**
- * struct boot_minidump_smem_region - Representation of SMEM TOC
- * @region_name: Name of modem segment to be dumped
- * @region_base_address: Where segment start from
- * @region_size: Size of segment to be dumped
- */
-struct boot_minidump_smem_region {
- char region_name[16];
- u64 region_base_address;
- u64 region_size;
-};
-
-/**
* struct pil_seg - memory map representing one segment
* @next: points to next seg mentor NULL if last segment
* @paddr: physical start address of segment
@@ -146,8 +134,6 @@
phys_addr_t region_end;
void *region;
struct pil_image_info __iomem *info;
- struct md_ssr_ss_info __iomem *minidump;
- int minidump_id;
int id;
int unvoted_flag;
size_t region_size;
@@ -155,24 +141,27 @@
static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
{
- struct boot_minidump_smem_region __iomem *region_info;
+ struct md_ss_region __iomem *region_info;
struct ramdump_segment *ramdump_segs, *s;
struct pil_priv *priv = desc->priv;
- void __iomem *subsys_smem_base;
+ void __iomem *subsys_segtable_base;
+ u64 ss_region_ptr = 0;
void __iomem *offset;
int ss_mdump_seg_cnt;
+ int ss_valid_seg_cnt;
int ret, i;
- memcpy(&offset, &priv->minidump, sizeof(priv->minidump));
- offset = offset + sizeof(priv->minidump->md_ss_smem_regions_baseptr);
- /* There are 3 encryption keys which also need to be dumped */
- ss_mdump_seg_cnt = readb_relaxed(offset) +
- NUM_OF_ENCRYPTED_KEY;
-
- subsys_smem_base = ioremap(__raw_readl(priv->minidump),
- ss_mdump_seg_cnt * sizeof(*region_info));
- region_info =
- (struct boot_minidump_smem_region __iomem *)subsys_smem_base;
+ ss_region_ptr = desc->minidump->md_ss_smem_regions_baseptr;
+ if (!ramdump_dev)
+ return -ENODEV;
+ ss_mdump_seg_cnt = desc->minidump->ss_region_count;
+ subsys_segtable_base =
+ ioremap((unsigned long)ss_region_ptr,
+ ss_mdump_seg_cnt * sizeof(struct md_ss_region));
+ region_info = (struct md_ss_region __iomem *)subsys_segtable_base;
+ if (!region_info)
+ return -EINVAL;
+ pr_info("Minidump : Segments in minidump 0x%x\n", ss_mdump_seg_cnt);
ramdump_segs = kcalloc(ss_mdump_seg_cnt,
sizeof(*ramdump_segs), GFP_KERNEL);
if (!ramdump_segs)
@@ -183,23 +172,30 @@
(priv->region_end - priv->region_start));
s = ramdump_segs;
+ ss_valid_seg_cnt = ss_mdump_seg_cnt;
for (i = 0; i < ss_mdump_seg_cnt; i++) {
memcpy(&offset, ®ion_info, sizeof(region_info));
- memcpy(&s->name, ®ion_info, sizeof(region_info));
- offset = offset + sizeof(region_info->region_name);
- s->address = __raw_readl(offset);
- offset = offset + sizeof(region_info->region_base_address);
- s->size = __raw_readl(offset);
+ offset = offset + sizeof(region_info->name) +
+ sizeof(region_info->seq_num);
+ if (__raw_readl(offset) == MD_REGION_VALID) {
+ memcpy(&s->name, ®ion_info, sizeof(region_info));
+ offset = offset + sizeof(region_info->md_valid);
+ s->address = __raw_readl(offset);
+ offset = offset +
+ sizeof(region_info->region_base_address);
+ s->size = __raw_readl(offset);
+ pr_info("Minidump : Dumping segment %s with address 0x%lx and size 0x%x\n",
+ s->name, s->address, (unsigned int)s->size);
+ } else
+ ss_valid_seg_cnt--;
s++;
region_info++;
}
- ret = do_minidump(ramdump_dev, ramdump_segs, ss_mdump_seg_cnt);
+ ret = do_minidump(ramdump_dev, ramdump_segs, ss_valid_seg_cnt);
kfree(ramdump_segs);
if (ret)
- pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
+ pil_err(desc, "%s: Minidump collection failed for subsys %s rc:%d\n",
__func__, desc->name, ret);
- writel_relaxed(0, &priv->minidump->md_ss_smem_regions_baseptr);
- writeb_relaxed(1, &priv->minidump->md_ss_ssr_cause);
if (desc->subsys_vmid > 0)
ret = pil_assign_mem_to_subsys(desc, priv->region_start,
@@ -215,16 +211,45 @@
* Calls the ramdump API with a list of segments generated from the addresses
* that the descriptor corresponds to.
*/
-int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+int pil_do_ramdump(struct pil_desc *desc,
+ void *ramdump_dev, void *minidump_dev)
{
+ struct ramdump_segment *ramdump_segs, *s;
struct pil_priv *priv = desc->priv;
struct pil_seg *seg;
int count = 0, ret;
- struct ramdump_segment *ramdump_segs, *s;
- if (priv->minidump && (__raw_readl(priv->minidump) > 0))
- return pil_do_minidump(desc, ramdump_dev);
-
+ if (desc->minidump) {
+ pr_info("Minidump : md_ss_toc->md_ss_toc_init is 0x%x\n",
+ (unsigned int)desc->minidump->md_ss_toc_init);
+ pr_info("Minidump : md_ss_toc->md_ss_enable_status is 0x%x\n",
+ (unsigned int)desc->minidump->md_ss_enable_status);
+ pr_info("Minidump : md_ss_toc->encryption_status is 0x%x\n",
+ (unsigned int)desc->minidump->encryption_status);
+ pr_info("Minidump : md_ss_toc->ss_region_count is 0x%x\n",
+ (unsigned int)desc->minidump->ss_region_count);
+ pr_info("Minidump : md_ss_toc->md_ss_smem_regions_baseptr is 0x%x\n",
+ (unsigned int)
+ desc->minidump->md_ss_smem_regions_baseptr);
+ /**
+ * Collect minidump if SS ToC is valid and segment table
+ * is initialized in memory and encryption status is set.
+ */
+ if ((desc->minidump->md_ss_smem_regions_baseptr != 0) &&
+ (desc->minidump->md_ss_toc_init == true) &&
+ (desc->minidump->md_ss_enable_status ==
+ MD_SS_ENABLED)) {
+ if (desc->minidump->encryption_status ==
+ MD_SS_ENCR_DONE) {
+ pr_info("Minidump : Dumping for %s\n",
+ desc->name);
+ return pil_do_minidump(desc, minidump_dev);
+ }
+ pr_info("Minidump : aborted for %s\n", desc->name);
+ return -EINVAL;
+ }
+ }
+ pr_debug("Continuing with full SSR dump for %s\n", desc->name);
list_for_each_entry(seg, &priv->segs, list)
count++;
@@ -1127,7 +1152,8 @@
{
struct pil_priv *priv;
void __iomem *addr;
- int ret, ss_imem_offset_mdump;
+ void *ss_toc_addr;
+ int ret;
char buf[sizeof(priv->info->name)];
struct device_node *ofnode = desc->dev->of_node;
@@ -1153,19 +1179,15 @@
__iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
}
if (of_property_read_u32(ofnode, "qcom,minidump-id",
- &priv->minidump_id))
- pr_debug("minidump-id not found for %s\n", desc->name);
+ &desc->minidump_id))
+ pr_err("minidump-id not found for %s\n", desc->name);
else {
- ss_imem_offset_mdump =
- sizeof(struct md_ssr_ss_info) * priv->minidump_id;
- if (pil_minidump_base) {
- /* Add 0x4 to get start of struct md_ssr_ss_info base
- * from struct md_ssr_toc for any subsystem,
- * struct md_ssr_ss_info is actually the pointer
- * of ToC in smem for any subsystem.
- */
- addr = pil_minidump_base + ss_imem_offset_mdump + 0x4;
- priv->minidump = (struct md_ssr_ss_info __iomem *)addr;
+ if (g_md_toc && g_md_toc->md_toc_init == true) {
+ ss_toc_addr = &g_md_toc->md_ss_toc[desc->minidump_id];
+ pr_debug("Minidump : ss_toc_addr is %pa and desc->minidump_id is %d\n",
+ &ss_toc_addr, desc->minidump_id);
+ memcpy(&desc->minidump, &ss_toc_addr,
+ sizeof(ss_toc_addr));
}
}
@@ -1254,6 +1276,7 @@
struct device_node *np;
struct resource res;
int i;
+ unsigned int size;
np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil");
if (!np) {
@@ -1276,20 +1299,14 @@
for (i = 0; i < resource_size(&res)/sizeof(u32); i++)
writel_relaxed(0, pil_info_base + (i * sizeof(u32)));
- np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-minidump");
- if (!np) {
- pr_warn("pil: failed to find qcom,msm-imem-minidump node\n");
- goto out;
- } else {
- pil_minidump_base = of_iomap(np, 0);
- if (!pil_minidump_base) {
- pr_err("unable to map pil minidump imem offset\n");
- goto out;
- }
+ /* Get Global minidump ToC*/
+ g_md_toc = smem_get_entry(SBL_MINIDUMP_SMEM_ID, &size, 0,
+ SMEM_ANY_HOST_FLAG);
+ pr_debug("Minidump: g_md_toc is %pa\n", &g_md_toc);
+ if (PTR_ERR(g_md_toc) == -EPROBE_DEFER) {
+ pr_err("SMEM is not initialized.\n");
+ return -EPROBE_DEFER;
}
- for (i = 0; i < sizeof(struct md_ssr_toc)/sizeof(u32); i++)
- writel_relaxed(0, pil_minidump_base + (i * sizeof(u32)));
- writel_relaxed(1, pil_minidump_base);
out:
return register_pm_notifier(&pil_pm_notifier);
}
@@ -1300,8 +1317,6 @@
unregister_pm_notifier(&pil_pm_notifier);
if (pil_info_base)
iounmap(pil_info_base);
- if (pil_minidump_base)
- iounmap(pil_minidump_base);
}
module_exit(msm_pil_exit);
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index 27ed336..78c00fe 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include <linux/mailbox_client.h>
#include <linux/mailbox/qmp.h>
+#include "minidump_private.h"
struct device;
struct module;
@@ -63,6 +64,8 @@
bool signal_aop;
struct mbox_client cl;
struct mbox_chan *mbox;
+ struct md_ss_toc *minidump;
+ int minidump_id;
};
/**
@@ -77,34 +80,6 @@
__le32 size;
} __attribute__((__packed__));
-#define MAX_NUM_OF_SS 3
-
-/**
- * struct md_ssr_ss_info - Info in imem about smem ToC
- * @md_ss_smem_regions_baseptr: Start physical address of SMEM TOC
- * @md_ss_num_of_regions: number of segments that need to be dumped
- * @md_ss_encryption_status: status of encryption of segments
- * @md_ss_ssr_cause: ssr cause enum
- */
-struct md_ssr_ss_info {
- u32 md_ss_smem_regions_baseptr;
- u8 md_ss_num_of_regions;
- u8 md_ss_encryption_status;
- u8 md_ss_ssr_cause;
- u8 reserved;
-};
-
-/**
- * struct md_ssr_toc - Wrapper of struct md_ssr_ss_info
- * @md_ssr_toc_init: flag to indicate to MSS SW about imem init done
- * @md_ssr_ss: Instance of struct md_ssr_ss_info for a subsystem
- */
-struct md_ssr_toc /* Shared IMEM ToC struct */
-{
- u32 md_ssr_toc_init;
- struct md_ssr_ss_info md_ssr_ss[MAX_NUM_OF_SS];
-};
-
/**
* struct pil_reset_ops - PIL operations
* @init_image: prepare an image for authentication
@@ -137,7 +112,8 @@
extern void pil_free_memory(struct pil_desc *desc);
extern void pil_desc_release(struct pil_desc *desc);
extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
-extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev,
+ void *minidump_dev);
extern int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
size_t size);
extern int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
@@ -157,7 +133,8 @@
{
return 0;
}
-static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+static inline int pil_do_ramdump(struct pil_desc *desc,
+ void *ramdump_dev, void *minidump_dev)
{
return 0;
}
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index e0f912d..bc47a95 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -35,6 +35,7 @@
/* Q6 Register Offsets */
#define QDSP6SS_RST_EVB 0x010
#define QDSP6SS_DBG_CFG 0x018
+#define QDSP6SS_NMI_CFG 0x40
/* AXI Halting Registers */
#define MSS_Q6_HALT_BASE 0x180
@@ -366,10 +367,10 @@
ret);
}
- pil_mss_assert_resets(drv);
+ pil_mss_restart_reg(drv, true);
/* Wait 6 32kHz sleep cycles for reset */
udelay(200);
- ret = pil_mss_deassert_resets(drv);
+ ret = pil_mss_restart_reg(drv, false);
if (drv->is_booted) {
pil_mss_disable_clks(drv);
@@ -411,7 +412,7 @@
/* In case of any failure where reclaiming MBA and DP memory
* could not happen, free the memory here
*/
- if (drv->q6->mba_dp_virt) {
+ if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
if (pil->subsys_vmid > 0)
pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
drv->q6->mba_dp_size);
@@ -556,7 +557,7 @@
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
phys_addr_t start_addr = pil_get_entry_addr(pil);
- u32 debug_val;
+ u32 debug_val = 0;
int ret;
trace_pil_func(__func__);
@@ -575,8 +576,10 @@
if (ret)
goto err_clks;
- /* Save state of modem debug register before full reset */
- debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
+ if (!pil->minidump || !pil->modem_ssr) {
+ /* Save state of modem debug register before full reset */
+ debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
+ }
/* Assert reset to subsystem */
pil_mss_assert_resets(drv);
@@ -586,9 +589,12 @@
if (ret)
goto err_restart;
- writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
- if (modem_dbg_cfg)
- writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
+ if (!pil->minidump || !pil->modem_ssr) {
+ writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
+ if (modem_dbg_cfg)
+ writel_relaxed(modem_dbg_cfg,
+ drv->reg_base + QDSP6SS_DBG_CFG);
+ }
/* Program Image Address */
if (drv->self_auth) {
@@ -648,7 +654,7 @@
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
struct modem_data *md = dev_get_drvdata(pil->dev);
- const struct firmware *fw, *dp_fw = NULL;
+ const struct firmware *fw = NULL, *dp_fw = NULL;
char fw_name_legacy[10] = "mba.b00";
char fw_name[10] = "mba.mbn";
char *dp_name = "msadp";
@@ -660,6 +666,8 @@
struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
trace_pil_func(__func__);
+ if (drv->mba_dp_virt && md->mba_mem_dev_fixed)
+ goto mss_reset;
fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
ret = request_firmware(&fw, fw_name_p, pil->dev);
if (ret) {
@@ -749,17 +757,19 @@
goto err_mba_data;
}
}
+ if (dp_fw)
+ release_firmware(dp_fw);
+ release_firmware(fw);
+ dp_fw = NULL;
+ fw = NULL;
+mss_reset:
ret = pil_mss_reset(pil);
if (ret) {
dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
goto err_mss_reset;
}
- if (dp_fw)
- release_firmware(dp_fw);
- release_firmware(fw);
-
return 0;
err_mss_reset:
@@ -772,11 +782,66 @@
err_invalid_fw:
if (dp_fw)
release_firmware(dp_fw);
- release_firmware(fw);
+ if (fw)
+ release_firmware(fw);
drv->mba_dp_virt = NULL;
return ret;
}
+int pil_mss_debug_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ int ret;
+
+ if (!pil->minidump)
+ return 0;
+ /*
+ * Bring subsystem out of reset and enable required
+ * regulators and clocks.
+ */
+ ret = pil_mss_enable_clks(drv);
+ if (ret)
+ return ret;
+
+ if (pil->minidump) {
+ writel_relaxed(0x1, drv->reg_base + QDSP6SS_NMI_CFG);
+ /* Let write complete before proceeding */
+ mb();
+ udelay(2);
+ }
+ /* Assert reset to subsystem */
+ pil_mss_restart_reg(drv, true);
+ /* Wait 6 32kHz sleep cycles for reset */
+ udelay(200);
+ ret = pil_mss_restart_reg(drv, false);
+ if (ret)
+ goto err_restart;
+ /* Let write complete before proceeding */
+ mb();
+ udelay(200);
+ ret = pil_q6v5_reset(pil);
+ /*
+ * Need to Wait for timeout for debug reset sequence to
+ * complete before returning
+ */
+ pr_info("Minidump: waiting encryption to complete\n");
+ msleep(10000);
+ if (pil->minidump) {
+ writel_relaxed(0x2, drv->reg_base + QDSP6SS_NMI_CFG);
+ /* Let write complete before proceeding */
+ mb();
+ udelay(200);
+ }
+ if (ret)
+ goto err_restart;
+ return 0;
+err_restart:
+ pil_mss_disable_clks(drv);
+ if (drv->ahb_clk_vote)
+ clk_disable_unprepare(drv->ahb_clk);
+ return ret;
+}
+
static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
size_t size, phys_addr_t region_start,
void *region)
@@ -851,10 +916,12 @@
if (pil->subsys_vmid > 0)
pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
drv->q6->mba_dp_size);
- dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+ if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+ dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
drv->attrs_dma);
- drv->q6->mba_dp_virt = NULL;
+ drv->q6->mba_dp_virt = NULL;
+ }
}
return ret;
@@ -921,7 +988,7 @@
}
if (drv->q6) {
- if (drv->q6->mba_dp_virt) {
+ if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
/* Reclaim MBA and DP (if allocated) memory. */
if (pil->subsys_vmid > 0)
pil_assign_mem_to_linux(pil,
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
index 0f1e75b..a302a14 100644
--- a/drivers/soc/qcom/pil-msa.h
+++ b/drivers/soc/qcom/pil-msa.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,7 @@
struct subsys_device *subsys;
struct subsys_desc subsys_desc;
void *ramdump_dev;
+ void *minidump_dev;
bool crash_shutdown;
u32 pas_id;
bool ignore_errors;
@@ -46,4 +47,5 @@
int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path);
int pil_mss_assert_resets(struct q6v5_data *drv);
int pil_mss_deassert_resets(struct q6v5_data *drv);
+int pil_mss_debug_reset(struct pil_desc *pil);
#endif
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 3fdacf2..ac322f8 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -163,11 +163,21 @@
if (ret)
return ret;
+ ret = pil_mss_debug_reset(&drv->q6->desc);
+ if (ret)
+ return ret;
+
+ pil_mss_remove_proxy_votes(&drv->q6->desc);
+ ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+ if (ret)
+ return ret;
+
ret = pil_mss_reset_load_mba(&drv->q6->desc);
if (ret)
return ret;
- ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
+ ret = pil_do_ramdump(&drv->q6->desc,
+ drv->ramdump_dev, drv->minidump_dev);
if (ret < 0)
pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
@@ -242,9 +252,18 @@
ret = -ENOMEM;
goto err_ramdump;
}
+ drv->minidump_dev = create_ramdump_device("md_modem", &pdev->dev);
+ if (!drv->minidump_dev) {
+ pr_err("%s: Unable to create a modem minidump device.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_minidump;
+ }
return 0;
+err_minidump:
+ destroy_ramdump_device(drv->ramdump_dev);
err_ramdump:
subsys_unregister(drv->subsys);
err_subsys:
@@ -420,6 +439,7 @@
subsys_unregister(drv->subsys);
destroy_ramdump_device(drv->ramdump_dev);
+ destroy_ramdump_device(drv->minidump_dev);
pil_desc_release(&drv->q6->desc);
return 0;
}
diff --git a/drivers/soc/qcom/qmp-debugfs-client.c b/drivers/soc/qcom/qmp-debugfs-client.c
index 578e7f0..d7a473e 100644
--- a/drivers/soc/qcom/qmp-debugfs-client.c
+++ b/drivers/soc/qcom/qmp-debugfs-client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,37 +20,55 @@
#include <linux/platform_device.h>
#include <linux/mailbox/qmp.h>
#include <linux/uaccess.h>
+#include <linux/mailbox_controller.h>
#define MAX_MSG_SIZE 96 /* Imposed by the remote*/
+struct qmp_debugfs_data {
+ struct qmp_pkt pkt;
+ char buf[MAX_MSG_SIZE + 1];
+};
+
+static struct qmp_debugfs_data data_pkt[MBOX_TX_QUEUE_LEN];
static struct mbox_chan *chan;
static struct mbox_client *cl;
+static DEFINE_MUTEX(qmp_debugfs_mutex);
+
static ssize_t aop_msg_write(struct file *file, const char __user *userstr,
size_t len, loff_t *pos)
{
- char buf[MAX_MSG_SIZE + 1] = {0};
- struct qmp_pkt pkt;
+ static int count;
int rc;
if (!len || (len > MAX_MSG_SIZE))
return len;
- rc = copy_from_user(buf, userstr, len);
+ mutex_lock(&qmp_debugfs_mutex);
+
+ if (count >= MBOX_TX_QUEUE_LEN)
+ count = 0;
+
+ memset(&(data_pkt[count]), 0, sizeof(data_pkt[count]));
+ rc = copy_from_user(data_pkt[count].buf, userstr, len);
if (rc) {
pr_err("%s copy from user failed, rc=%d\n", __func__, rc);
+ mutex_unlock(&qmp_debugfs_mutex);
return len;
}
/*
* Controller expects a 4 byte aligned buffer
*/
- pkt.size = (len + 0x3) & ~0x3;
- pkt.data = buf;
+ data_pkt[count].pkt.size = (len + 0x3) & ~0x3;
+ data_pkt[count].pkt.data = data_pkt[count].buf;
- if (mbox_send_message(chan, &pkt) < 0)
+ if (mbox_send_message(chan, &(data_pkt[count].pkt)) < 0)
pr_err("Failed to send qmp request\n");
+ else
+ count++;
+ mutex_unlock(&qmp_debugfs_mutex);
return len;
}
@@ -68,7 +86,7 @@
cl->dev = &pdev->dev;
cl->tx_block = true;
- cl->tx_tout = 100;
+ cl->tx_tout = 1000;
cl->knows_txdone = false;
chan = mbox_request_channel(cl, 0);
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index 492b68c..fec6f17 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -185,9 +185,8 @@
case SCM_ENOMEM:
return -ENOMEM;
case SCM_EBUSY:
- return SCM_EBUSY;
case SCM_V2_EBUSY:
- return SCM_V2_EBUSY;
+ return -EBUSY;
}
return -EINVAL;
}
@@ -338,13 +337,13 @@
do {
ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len,
resp_buf, resp_len, cmd, len);
- if (ret == SCM_EBUSY)
+ if (ret == -EBUSY)
msleep(SCM_EBUSY_WAIT_MS);
if (retry_count == 33)
pr_warn("scm: secure world has been busy for 1 second!\n");
- } while (ret == SCM_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
+ } while (ret == -EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
- if (ret == SCM_EBUSY)
+ if (ret == -EBUSY)
pr_err("scm: secure world busy (rc = SCM_EBUSY)\n");
return ret;
@@ -666,7 +665,7 @@
if (unlikely(!is_scm_armv8()))
return -ENODEV;
- ret = allocate_extra_arg_buffer(desc, GFP_KERNEL);
+ ret = allocate_extra_arg_buffer(desc, GFP_NOIO);
if (ret)
return ret;
@@ -799,7 +798,7 @@
ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
resp_len, cmd, len);
- if (unlikely(ret == SCM_EBUSY))
+ if (unlikely(ret == -EBUSY))
ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len,
resp_buf, resp_len, cmd, PAGE_ALIGN(len));
kfree(cmd);
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 82dea32..685b384 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -591,6 +591,11 @@
[349] = {MSM_CPU_SDM632, "SDM632"},
[350] = {MSM_CPU_SDA632, "SDA632"},
+ /*MSM8937 ID */
+ [294] = {MSM_CPU_8937, "MSM8937"},
+ [295] = {MSM_CPU_8937, "APQ8937"},
+
+
/* Uninitialized IDs are not known to run Linux.
* MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
* considered as unknown CPU.
@@ -652,6 +657,55 @@
return "UNKNOWN SOC TYPE";
}
+const char * __init arch_read_machine_name(void)
+{
+ static char msm_machine_name[256] = "Qualcomm Technologies, Inc. ";
+ static bool string_generated;
+ u32 len = 0;
+ const char *name;
+
+ if (string_generated)
+ return msm_machine_name;
+
+ len = strlen(msm_machine_name);
+ name = of_get_flat_dt_prop(of_get_flat_dt_root(),
+ "qcom,msm-name", NULL);
+ if (name)
+ len += snprintf(msm_machine_name + len,
+ sizeof(msm_machine_name) - len,
+ "%s", name);
+ else
+ goto no_prop_path;
+
+ name = of_get_flat_dt_prop(of_get_flat_dt_root(),
+ "qcom,pmic-name", NULL);
+ if (name) {
+ len += snprintf(msm_machine_name + len,
+ sizeof(msm_machine_name) - len,
+ "%s", " ");
+ len += snprintf(msm_machine_name + len,
+ sizeof(msm_machine_name) - len,
+ "%s", name);
+ } else
+ goto no_prop_path;
+
+ name = of_flat_dt_get_machine_name();
+ if (name) {
+ len += snprintf(msm_machine_name + len,
+ sizeof(msm_machine_name) - len,
+ "%s", " ");
+ len += snprintf(msm_machine_name + len,
+ sizeof(msm_machine_name) - len,
+ "%s", name);
+ } else
+ goto no_prop_path;
+
+ string_generated = true;
+ return msm_machine_name;
+no_prop_path:
+ return of_flat_dt_get_machine_name();
+}
+
uint32_t socinfo_get_raw_id(void)
{
return socinfo ?
@@ -1469,6 +1523,10 @@
dummy_socinfo.id = 293;
strlcpy(dummy_socinfo.build_id, "msm8953 - ",
sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_msm8937()) {
+ dummy_socinfo.id = 294;
+ strlcpy(dummy_socinfo.build_id, "msm8937 - ",
+ sizeof(dummy_socinfo.build_id));
} else if (early_machine_is_sdm450()) {
dummy_socinfo.id = 338;
strlcpy(dummy_socinfo.build_id, "sdm450 - ",
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 92b6423..e221e6b 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -905,7 +905,7 @@
if (!enable)
return 0;
- return pil_do_ramdump(&d->desc, d->ramdump_dev);
+ return pil_do_ramdump(&d->desc, d->ramdump_dev, NULL);
}
static void subsys_free_memory(const struct subsys_desc *subsys)
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index f5e76e0..6d58d6b 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -691,7 +691,7 @@
wdog_dd->user_pet_complete = true;
wdog_dd->user_pet_enabled = false;
wake_up_process(wdog_dd->watchdog_task);
- init_timer(&wdog_dd->pet_timer);
+ init_timer_deferrable(&wdog_dd->pet_timer);
wdog_dd->pet_timer.data = (unsigned long)wdog_dd;
wdog_dd->pet_timer.function = pet_task_wakeup;
wdog_dd->pet_timer.expires = jiffies + delay_time;
diff --git a/drivers/soc/qcom/wcnss/Kconfig b/drivers/soc/qcom/wcnss/Kconfig
new file mode 100644
index 0000000..5d8d010
--- /dev/null
+++ b/drivers/soc/qcom/wcnss/Kconfig
@@ -0,0 +1,39 @@
+config WCNSS_CORE
+ tristate "Qualcomm Technologies Inc. WCNSS CORE driver"
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select WEXT_CORE
+ select WEXT_SPY
+ help
+ This module adds support for WLAN connectivity subsystem
+ This module is responsible for communicating WLAN on/off
+ Core driver for the Qualcomm Technologies Inc. WCNSS triple play
+ connectivity subsystem, Enable WCNSS core platform driver
+ for WLAN.
+
+config WCNSS_CORE_PRONTO
+ tristate "Qualcomm Technologies Inc. WCNSS Pronto Support"
+ depends on WCNSS_CORE
+ help
+ Pronto Support for the Qualcomm Technologies Inc. WCNSS triple
+ play connectivity subsystem, Enable WCNSS core platform driver
+ for WLAN. This module adds support for WLAN connectivity subsystem
+ This module is responsible for communicating WLAN on/off
+
+config WCNSS_REGISTER_DUMP_ON_BITE
+ bool "Enable/disable WCNSS register dump when there is a WCNSS bite"
+ depends on WCNSS_CORE_PRONTO
+ help
+ When Apps receives a WDOG bite from WCNSS, collecting a register dump
+ of WCNSS is helpful to root cause the failure. WCNSS may not be
+ properly clocked in some WCNSS bite cases, and that may cause unclocked
+ register access failures. So this feature is to enable/disable the
+ register dump on WCNSS WDOG bite.
+
+config CNSS_CRYPTO
+ tristate "Enable CNSS crypto support"
+ help
+ Add crypto support for the WLAN driver module.
+ This feature enable wlan driver to use the crypto APIs exported
+ from cnss platform driver. This crypto APIs used to generate cipher
+ key and add support for the WLAN driver module security protocol.
diff --git a/drivers/soc/qcom/wcnss/Makefile b/drivers/soc/qcom/wcnss/Makefile
new file mode 100644
index 0000000..072fef8
--- /dev/null
+++ b/drivers/soc/qcom/wcnss/Makefile
@@ -0,0 +1,6 @@
+
+# Makefile for WCNSS triple-play driver
+
+wcnsscore-objs += wcnss_wlan.o wcnss_vreg.o
+
+obj-$(CONFIG_WCNSS_CORE) += wcnsscore.o
diff --git a/drivers/soc/qcom/wcnss/wcnss_vreg.c b/drivers/soc/qcom/wcnss/wcnss_vreg.c
new file mode 100644
index 0000000..5ce2e82
--- /dev/null
+++ b/drivers/soc/qcom/wcnss/wcnss_vreg.c
@@ -0,0 +1,842 @@
+/* Copyright (c) 2011-2015, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/wcnss_wlan.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+static void __iomem *msm_wcnss_base;
+static LIST_HEAD(power_on_lock_list);
+static DEFINE_MUTEX(list_lock);
+static DEFINE_SEMAPHORE(wcnss_power_on_lock);
+static int auto_detect;
+static int is_power_on;
+
+#define RIVA_PMU_OFFSET 0x28
+
+#define RIVA_SPARE_OFFSET 0x0b4
+#define PRONTO_SPARE_OFFSET 0x1088
+#define NVBIN_DLND_BIT BIT(25)
+
+#define PRONTO_IRIS_REG_READ_OFFSET 0x1134
+#define PRONTO_IRIS_REG_CHIP_ID 0x04
+/* IRIS card chip ID's */
+#define WCN3660 0x0200
+#define WCN3660A 0x0300
+#define WCN3660B 0x0400
+#define WCN3620 0x5111
+#define WCN3620A 0x5112
+#define WCN3610 0x9101
+#define WCN3610V1 0x9110
+
+#define WCNSS_PMU_CFG_IRIS_XO_CFG BIT(3)
+#define WCNSS_PMU_CFG_IRIS_XO_EN BIT(4)
+#define WCNSS_PMU_CFG_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
+
+#define WCNSS_PMU_CFG_IRIS_RESET BIT(7)
+#define WCNSS_PMU_CFG_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */
+#define WCNSS_PMU_CFG_IRIS_XO_READ BIT(9)
+#define WCNSS_PMU_CFG_IRIS_XO_READ_STS BIT(10)
+
+#define WCNSS_PMU_CFG_IRIS_XO_MODE 0x6
+#define WCNSS_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
+
+#define VREG_NULL_CONFIG 0x0000
+#define VREG_GET_REGULATOR_MASK 0x0001
+#define VREG_SET_VOLTAGE_MASK 0x0002
+#define VREG_OPTIMUM_MODE_MASK 0x0004
+#define VREG_ENABLE_MASK 0x0008
+#define VDD_PA "qcom,iris-vddpa"
+
+#define WCNSS_INVALID_IRIS_REG 0xbaadbaad
+
+struct vregs_info {
+ const char * const name;
+ const char * const curr;
+ const char * const volt;
+ int state;
+ bool required;
+ struct regulator *regulator;
+};
+
+/* IRIS regulators for Pronto hardware */
+static struct vregs_info iris_vregs[] = {
+ {"qcom,iris-vddxo", "qcom,iris-vddxo-current",
+ "qcom,iris-vddxo-voltage-level", VREG_NULL_CONFIG, true, NULL},
+ {"qcom,iris-vddrfa", "qcom,iris-vddrfa-current",
+ "qcom,iris-vddrfa-voltage-level", VREG_NULL_CONFIG, true, NULL},
+ {"qcom,iris-vddpa", "qcom,iris-vddpa-current",
+ "qcom,iris-vddpa-voltage-level", VREG_NULL_CONFIG, false, NULL},
+ {"qcom,iris-vdddig", "qcom,iris-vdddig-current",
+ "qcom,iris-vdddig-voltage-level", VREG_NULL_CONFIG, true, NULL},
+};
+
+/* WCNSS regulators for Pronto hardware */
+static struct vregs_info pronto_vregs[] = {
+ {"qcom,pronto-vddmx", "qcom,pronto-vddmx-current",
+ "qcom,vddmx-voltage-level", VREG_NULL_CONFIG, true, NULL},
+ {"qcom,pronto-vddcx", "qcom,pronto-vddcx-current",
+ "qcom,vddcx-voltage-level", VREG_NULL_CONFIG, true, NULL},
+ {"qcom,pronto-vddpx", "qcom,pronto-vddpx-current",
+ "qcom,vddpx-voltage-level", VREG_NULL_CONFIG, true, NULL},
+};
+
+struct host_driver {
+ char name[20];
+ struct list_head list;
+};
+
+enum {
+ IRIS_3660, /* also 3660A and 3680 */
+ IRIS_3620,
+ IRIS_3610
+};
+
+int xo_auto_detect(u32 reg)
+{
+ reg >>= 30;
+
+ switch (reg) {
+ case IRIS_3660:
+ return WCNSS_XO_48MHZ;
+
+ case IRIS_3620:
+ return WCNSS_XO_19MHZ;
+
+ case IRIS_3610:
+ return WCNSS_XO_19MHZ;
+
+ default:
+ return WCNSS_XO_INVALID;
+ }
+}
+
+int wcnss_get_iris_name(char *iris_name)
+{
+ struct wcnss_wlan_config *cfg = NULL;
+ int iris_id;
+
+ cfg = wcnss_get_wlan_config();
+
+ if (cfg) {
+ iris_id = cfg->iris_id;
+ iris_id = iris_id >> 16;
+ } else {
+ return 1;
+ }
+
+ switch (iris_id) {
+ case WCN3660:
+ memcpy(iris_name, "WCN3660", sizeof("WCN3660"));
+ break;
+ case WCN3660A:
+ memcpy(iris_name, "WCN3660A", sizeof("WCN3660A"));
+ break;
+ case WCN3660B:
+ memcpy(iris_name, "WCN3660B", sizeof("WCN3660B"));
+ break;
+ case WCN3620:
+ memcpy(iris_name, "WCN3620", sizeof("WCN3620"));
+ break;
+ case WCN3620A:
+ memcpy(iris_name, "WCN3620A", sizeof("WCN3620A"));
+ break;
+ case WCN3610:
+ memcpy(iris_name, "WCN3610", sizeof("WCN3610"));
+ break;
+ case WCN3610V1:
+ memcpy(iris_name, "WCN3610V1", sizeof("WCN3610V1"));
+ break;
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wcnss_get_iris_name);
+
+int validate_iris_chip_id(u32 reg)
+{
+ int iris_id;
+
+ iris_id = reg >> 16;
+
+ switch (iris_id) {
+ case WCN3660:
+ case WCN3660A:
+ case WCN3660B:
+ case WCN3620:
+ case WCN3620A:
+ case WCN3610:
+ case WCN3610V1:
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static void wcnss_free_regulator(void)
+{
+ int vreg_i;
+
+ /* Free pronto voltage regulators from device node */
+ for (vreg_i = 0; vreg_i < PRONTO_REGULATORS; vreg_i++) {
+ if (pronto_vregs[vreg_i].state) {
+ regulator_put(pronto_vregs[vreg_i].regulator);
+ pronto_vregs[vreg_i].state = VREG_NULL_CONFIG;
+ }
+ }
+
+ /* Free IRIS voltage regulators from device node */
+ for (vreg_i = 0; vreg_i < IRIS_REGULATORS; vreg_i++) {
+ if (iris_vregs[vreg_i].state) {
+ regulator_put(iris_vregs[vreg_i].regulator);
+ iris_vregs[vreg_i].state = VREG_NULL_CONFIG;
+ }
+ }
+}
+
+static int
+wcnss_dt_parse_vreg_level(struct device *dev, int index,
+ const char *current_vreg_name, const char *vreg_name,
+ struct vregs_level *vlevel)
+{
+ int ret = 0;
+ /* array used to store nominal, low and high voltage values */
+ u32 voltage_levels[3], current_vreg;
+
+ ret = of_property_read_u32_array(dev->of_node, vreg_name,
+ voltage_levels,
+ ARRAY_SIZE(voltage_levels));
+ if (ret) {
+ dev_err(dev, "error reading %s property\n", vreg_name);
+ return ret;
+ }
+
+ vlevel[index].nominal_min = voltage_levels[0];
+ vlevel[index].low_power_min = voltage_levels[1];
+ vlevel[index].max_voltage = voltage_levels[2];
+
+ ret = of_property_read_u32(dev->of_node, current_vreg_name,
+ ¤t_vreg);
+ if (ret) {
+ dev_err(dev, "error reading %s property\n", current_vreg_name);
+ return ret;
+ }
+
+ vlevel[index].uA_load = current_vreg;
+
+ return ret;
+}
+
+int
+wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
+ struct device *dev)
+{
+ int rc, vreg_i;
+
+ /* Parse pronto voltage regulators from device node */
+ for (vreg_i = 0; vreg_i < PRONTO_REGULATORS; vreg_i++) {
+ pronto_vregs[vreg_i].regulator =
+ regulator_get(dev, pronto_vregs[vreg_i].name);
+ if (IS_ERR(pronto_vregs[vreg_i].regulator)) {
+ if (pronto_vregs[vreg_i].required) {
+ rc = PTR_ERR(pronto_vregs[vreg_i].regulator);
+ dev_err(dev, "regulator get of %s failed (%d)\n",
+ pronto_vregs[vreg_i].name, rc);
+ goto wcnss_vreg_get_err;
+ } else {
+ dev_dbg(dev, "Skip optional regulator configuration: %s\n",
+ pronto_vregs[vreg_i].name);
+ continue;
+ }
+ }
+
+ pronto_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
+ rc = wcnss_dt_parse_vreg_level(dev, vreg_i,
+ pronto_vregs[vreg_i].curr,
+ pronto_vregs[vreg_i].volt,
+ wlan_config->pronto_vlevel);
+ if (rc) {
+ dev_err(dev, "error reading voltage-level property\n");
+ goto wcnss_vreg_get_err;
+ }
+ }
+
+ /* Parse iris voltage regulators from device node */
+ for (vreg_i = 0; vreg_i < IRIS_REGULATORS; vreg_i++) {
+ iris_vregs[vreg_i].regulator =
+ regulator_get(dev, iris_vregs[vreg_i].name);
+ if (IS_ERR(iris_vregs[vreg_i].regulator)) {
+ if (iris_vregs[vreg_i].required) {
+ rc = PTR_ERR(iris_vregs[vreg_i].regulator);
+ dev_err(dev, "regulator get of %s failed (%d)\n",
+ iris_vregs[vreg_i].name, rc);
+ goto wcnss_vreg_get_err;
+ } else {
+ dev_dbg(dev, "Skip optional regulator configuration: %s\n",
+ iris_vregs[vreg_i].name);
+ continue;
+ }
+ }
+
+ iris_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
+ rc = wcnss_dt_parse_vreg_level(dev, vreg_i,
+ iris_vregs[vreg_i].curr,
+ iris_vregs[vreg_i].volt,
+ wlan_config->iris_vlevel);
+ if (rc) {
+ dev_err(dev, "error reading voltage-level property\n");
+ goto wcnss_vreg_get_err;
+ }
+ }
+
+ return 0;
+
+wcnss_vreg_get_err:
+ wcnss_free_regulator();
+ return rc;
+}
+
+void wcnss_iris_reset(u32 reg, void __iomem *pmu_conf_reg)
+{
+ /* Reset IRIS */
+ reg |= WCNSS_PMU_CFG_IRIS_RESET;
+ writel_relaxed(reg, pmu_conf_reg);
+
+ /* Wait for PMU_CFG.iris_reg_reset_sts */
+ while (readl_relaxed(pmu_conf_reg) &
+ WCNSS_PMU_CFG_IRIS_RESET_STS)
+ cpu_relax();
+
+ /* Reset iris reset bit */
+ reg &= ~WCNSS_PMU_CFG_IRIS_RESET;
+ writel_relaxed(reg, pmu_conf_reg);
+}
+
+static int
+configure_iris_xo(struct device *dev,
+ struct wcnss_wlan_config *cfg,
+ int on, int *iris_xo_set)
+{
+ u32 reg = 0, i = 0;
+ u32 iris_reg = WCNSS_INVALID_IRIS_REG;
+ int rc = 0;
+ int pmu_offset = 0;
+ int spare_offset = 0;
+ void __iomem *pmu_conf_reg;
+ void __iomem *spare_reg;
+ void __iomem *iris_read_reg;
+ struct clk *clk;
+ struct clk *clk_rf = NULL;
+ bool use_48mhz_xo;
+
+ use_48mhz_xo = cfg->use_48mhz_xo;
+
+ if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
+ pmu_offset = PRONTO_PMU_OFFSET;
+ spare_offset = PRONTO_SPARE_OFFSET;
+
+ clk = clk_get(dev, "xo");
+ if (IS_ERR(clk)) {
+ pr_err("Couldn't get xo clock\n");
+ return PTR_ERR(clk);
+ }
+
+ } else {
+ pmu_offset = RIVA_PMU_OFFSET;
+ spare_offset = RIVA_SPARE_OFFSET;
+
+ clk = clk_get(dev, "cxo");
+ if (IS_ERR(clk)) {
+ pr_err("Couldn't get cxo clock\n");
+ return PTR_ERR(clk);
+ }
+ }
+
+ if (on) {
+ msm_wcnss_base = cfg->msm_wcnss_base;
+ if (!msm_wcnss_base) {
+ pr_err("ioremap wcnss physical failed\n");
+ goto fail;
+ }
+
+ /* Enable IRIS XO */
+ rc = clk_prepare_enable(clk);
+ if (rc) {
+ pr_err("clk enable failed\n");
+ goto fail;
+ }
+
+ /* NV bit is set to indicate that platform driver is capable
+ * of doing NV download.
+ */
+ pr_debug("wcnss: Indicate NV bin download\n");
+ spare_reg = msm_wcnss_base + spare_offset;
+ reg = readl_relaxed(spare_reg);
+ reg |= NVBIN_DLND_BIT;
+ writel_relaxed(reg, spare_reg);
+
+ pmu_conf_reg = msm_wcnss_base + pmu_offset;
+ writel_relaxed(0, pmu_conf_reg);
+ reg = readl_relaxed(pmu_conf_reg);
+ reg |= WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+ WCNSS_PMU_CFG_IRIS_XO_EN;
+ writel_relaxed(reg, pmu_conf_reg);
+
+ if (wcnss_xo_auto_detect_enabled()) {
+ iris_read_reg = msm_wcnss_base +
+ PRONTO_IRIS_REG_READ_OFFSET;
+ iris_reg = readl_relaxed(iris_read_reg);
+ }
+
+ wcnss_iris_reset(reg, pmu_conf_reg);
+
+ if (iris_reg != WCNSS_INVALID_IRIS_REG) {
+ iris_reg &= 0xffff;
+ iris_reg |= PRONTO_IRIS_REG_CHIP_ID;
+ writel_relaxed(iris_reg, iris_read_reg);
+ do {
+ /* Iris read */
+ reg = readl_relaxed(pmu_conf_reg);
+ reg |= WCNSS_PMU_CFG_IRIS_XO_READ;
+ writel_relaxed(reg, pmu_conf_reg);
+
+ /* Wait for PMU_CFG.iris_reg_read_sts */
+ while (readl_relaxed(pmu_conf_reg) &
+ WCNSS_PMU_CFG_IRIS_XO_READ_STS)
+ cpu_relax();
+
+ iris_reg = readl_relaxed(iris_read_reg);
+ pr_info("wcnss: IRIS Reg: %08x\n", iris_reg);
+
+ if (validate_iris_chip_id(iris_reg) && i >= 4) {
+ pr_info("wcnss: IRIS Card absent/invalid\n");
+ auto_detect = WCNSS_XO_INVALID;
+ /* Reset iris read bit */
+ reg &= ~WCNSS_PMU_CFG_IRIS_XO_READ;
+ /* Clear XO_MODE[b2:b1] bits.
+ * Clear implies 19.2 MHz TCXO
+ */
+ reg &= ~(WCNSS_PMU_CFG_IRIS_XO_MODE);
+ goto xo_configure;
+ } else if (!validate_iris_chip_id(iris_reg)) {
+ pr_debug("wcnss: IRIS Card is present\n");
+ break;
+ }
+ reg &= ~WCNSS_PMU_CFG_IRIS_XO_READ;
+ writel_relaxed(reg, pmu_conf_reg);
+ wcnss_iris_reset(reg, pmu_conf_reg);
+ } while (i++ < 5);
+ auto_detect = xo_auto_detect(iris_reg);
+
+ /* Reset iris read bit */
+ reg &= ~WCNSS_PMU_CFG_IRIS_XO_READ;
+
+ } else if (wcnss_xo_auto_detect_enabled()) {
+ /* Default to 48 MHZ */
+ auto_detect = WCNSS_XO_48MHZ;
+ } else {
+ auto_detect = WCNSS_XO_INVALID;
+ }
+
+ cfg->iris_id = iris_reg;
+
+ /* Clear XO_MODE[b2:b1] bits. Clear implies 19.2 MHz TCXO */
+ reg &= ~(WCNSS_PMU_CFG_IRIS_XO_MODE);
+
+ if ((use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) ||
+ auto_detect == WCNSS_XO_48MHZ) {
+ reg |= WCNSS_PMU_CFG_IRIS_XO_MODE_48;
+
+ if (iris_xo_set)
+ *iris_xo_set = WCNSS_XO_48MHZ;
+ }
+
+xo_configure:
+ writel_relaxed(reg, pmu_conf_reg);
+
+ wcnss_iris_reset(reg, pmu_conf_reg);
+
+ /* Start IRIS XO configuration */
+ reg |= WCNSS_PMU_CFG_IRIS_XO_CFG;
+ writel_relaxed(reg, pmu_conf_reg);
+
+ /* Wait for XO configuration to finish */
+ while (readl_relaxed(pmu_conf_reg) &
+ WCNSS_PMU_CFG_IRIS_XO_CFG_STS)
+ cpu_relax();
+
+ /* Stop IRIS XO configuration */
+ reg &= ~(WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+ WCNSS_PMU_CFG_IRIS_XO_CFG);
+ writel_relaxed(reg, pmu_conf_reg);
+ clk_disable_unprepare(clk);
+
+ if ((!use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) ||
+ auto_detect == WCNSS_XO_19MHZ) {
+ clk_rf = clk_get(dev, "rf_clk");
+ if (IS_ERR(clk_rf)) {
+ pr_err("Couldn't get rf_clk\n");
+ goto fail;
+ }
+
+ rc = clk_prepare_enable(clk_rf);
+ if (rc) {
+ pr_err("clk_rf enable failed\n");
+ goto fail;
+ }
+ if (iris_xo_set)
+ *iris_xo_set = WCNSS_XO_19MHZ;
+ }
+
+ } else if ((!use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) ||
+ auto_detect == WCNSS_XO_19MHZ) {
+ clk_rf = clk_get(dev, "rf_clk");
+ if (IS_ERR(clk_rf)) {
+ pr_err("Couldn't get rf_clk\n");
+ goto fail;
+ }
+ clk_disable_unprepare(clk_rf);
+ }
+
+ /* Add some delay for XO to settle */
+ msleep(20);
+
+fail:
+ clk_put(clk);
+
+ if (clk_rf)
+ clk_put(clk_rf);
+
+ return rc;
+}
+
+/* Helper routine to turn off all WCNSS & IRIS vregs */
+static void wcnss_vregs_off(struct vregs_info regulators[], uint size,
+ struct vregs_level *voltage_level)
+{
+ int i, rc = 0;
+ struct wcnss_wlan_config *cfg;
+
+ cfg = wcnss_get_wlan_config();
+
+ if (!cfg) {
+ pr_err("Failed to get WLAN configuration\n");
+ return;
+ }
+
+ /* Regulators need to be turned off in the reverse order */
+ for (i = (size - 1); i >= 0; i--) {
+ if (regulators[i].state == VREG_NULL_CONFIG)
+ continue;
+
+ /* Remove PWM mode */
+ if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) {
+ rc = regulator_set_load(regulators[i].regulator, 0);
+ if (rc < 0) {
+ pr_err("regulator set load(%s) failed (%d)\n",
+ regulators[i].name, rc);
+ }
+ }
+
+ /* Set voltage to lowest level */
+ if (regulators[i].state & VREG_SET_VOLTAGE_MASK) {
+ if (cfg->is_pronto_vadc) {
+ if (cfg->vbatt < WCNSS_VBATT_THRESHOLD &&
+ !memcmp(regulators[i].name,
+ VDD_PA, sizeof(VDD_PA))) {
+ voltage_level[i].max_voltage =
+ WCNSS_VBATT_LOW;
+ }
+ }
+
+ rc = regulator_set_voltage(regulators[i].regulator,
+ voltage_level[i].
+ low_power_min,
+ voltage_level[i].
+ max_voltage);
+
+ if (rc)
+ pr_err("regulator_set_voltage(%s) failed (%d)\n",
+ regulators[i].name, rc);
+ }
+
+ /* Disable regulator */
+ if (regulators[i].state & VREG_ENABLE_MASK) {
+ rc = regulator_disable(regulators[i].regulator);
+ if (rc < 0)
+ pr_err("vreg %s disable failed (%d)\n",
+ regulators[i].name, rc);
+ }
+
+ /* Free the regulator source */
+ if (regulators[i].state & VREG_GET_REGULATOR_MASK)
+ regulator_put(regulators[i].regulator);
+
+ regulators[i].state = VREG_NULL_CONFIG;
+ }
+}
+
+/* Common helper routine to turn on all WCNSS & IRIS vregs */
+static int wcnss_vregs_on(struct device *dev,
+ struct vregs_info regulators[], uint size,
+ struct vregs_level *voltage_level)
+{
+ int i, rc = 0, reg_cnt;
+ struct wcnss_wlan_config *cfg;
+
+ cfg = wcnss_get_wlan_config();
+
+ if (!cfg) {
+ pr_err("Failed to get WLAN configuration\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (regulators[i].state == VREG_NULL_CONFIG)
+ continue;
+
+ reg_cnt = regulator_count_voltages(regulators[i].regulator);
+ /* Set voltage to nominal. Exclude swtiches e.g. LVS */
+ if ((voltage_level[i].nominal_min ||
+ voltage_level[i].max_voltage) && (reg_cnt > 0)) {
+ if (cfg->is_pronto_vadc) {
+ if (cfg->vbatt < WCNSS_VBATT_THRESHOLD &&
+ !memcmp(regulators[i].name,
+ VDD_PA, sizeof(VDD_PA))) {
+ voltage_level[i].nominal_min =
+ WCNSS_VBATT_INITIAL;
+ voltage_level[i].max_voltage =
+ WCNSS_VBATT_LOW;
+ }
+ }
+
+ rc = regulator_set_voltage(regulators[i].regulator,
+ voltage_level[i].nominal_min,
+ voltage_level[i].
+ max_voltage);
+
+ if (rc) {
+ pr_err("regulator_set_voltage(%s) failed (%d)\n",
+ regulators[i].name, rc);
+ goto fail;
+ }
+ regulators[i].state |= VREG_SET_VOLTAGE_MASK;
+ }
+
+ /* Vote for PWM/PFM mode if needed */
+ if (voltage_level[i].uA_load && (reg_cnt > 0)) {
+ rc = regulator_set_load(regulators[i].regulator,
+ voltage_level[i].uA_load);
+ if (rc < 0) {
+ pr_err("regulator set load(%s) failed (%d)\n",
+ regulators[i].name, rc);
+ goto fail;
+ }
+ regulators[i].state |= VREG_OPTIMUM_MODE_MASK;
+ }
+
+ /* Enable the regulator */
+ rc = regulator_enable(regulators[i].regulator);
+ if (rc) {
+ pr_err("vreg %s enable failed (%d)\n",
+ regulators[i].name, rc);
+ goto fail;
+ }
+ regulators[i].state |= VREG_ENABLE_MASK;
+ }
+
+ return rc;
+
+fail:
+ wcnss_vregs_off(regulators, size, voltage_level);
+ return rc;
+}
+
+static void wcnss_iris_vregs_off(enum wcnss_hw_type hw_type,
+ struct wcnss_wlan_config *cfg)
+{
+ switch (hw_type) {
+ case WCNSS_PRONTO_HW:
+ wcnss_vregs_off(iris_vregs, ARRAY_SIZE(iris_vregs),
+ cfg->iris_vlevel);
+ break;
+ default:
+ pr_err("%s invalid hardware %d\n", __func__, hw_type);
+ }
+}
+
+static int wcnss_iris_vregs_on(struct device *dev,
+ enum wcnss_hw_type hw_type,
+ struct wcnss_wlan_config *cfg)
+{
+ int ret = -1;
+
+ switch (hw_type) {
+ case WCNSS_PRONTO_HW:
+ ret = wcnss_vregs_on(dev, iris_vregs, ARRAY_SIZE(iris_vregs),
+ cfg->iris_vlevel);
+ break;
+ default:
+ pr_err("%s invalid hardware %d\n", __func__, hw_type);
+ }
+ return ret;
+}
+
+static void wcnss_core_vregs_off(enum wcnss_hw_type hw_type,
+ struct wcnss_wlan_config *cfg)
+{
+ switch (hw_type) {
+ case WCNSS_PRONTO_HW:
+ wcnss_vregs_off(pronto_vregs,
+ ARRAY_SIZE(pronto_vregs), cfg->pronto_vlevel);
+ break;
+ default:
+ pr_err("%s invalid hardware %d\n", __func__, hw_type);
+ }
+}
+
+static int wcnss_core_vregs_on(struct device *dev,
+ enum wcnss_hw_type hw_type,
+ struct wcnss_wlan_config *cfg)
+{
+ int ret = -1;
+
+ switch (hw_type) {
+ case WCNSS_PRONTO_HW:
+ ret = wcnss_vregs_on(dev, pronto_vregs,
+ ARRAY_SIZE(pronto_vregs),
+ cfg->pronto_vlevel);
+ break;
+ default:
+ pr_err("%s invalid hardware %d\n", __func__, hw_type);
+ }
+
+ return ret;
+}
+
+int wcnss_wlan_power(struct device *dev,
+ struct wcnss_wlan_config *cfg,
+ enum wcnss_opcode on, int *iris_xo_set)
+{
+ int rc = 0;
+ enum wcnss_hw_type hw_type = wcnss_hardware_type();
+
+ down(&wcnss_power_on_lock);
+ if (on) {
+ /* RIVA regulator settings */
+ rc = wcnss_core_vregs_on(dev, hw_type,
+ cfg);
+ if (rc)
+ goto fail_wcnss_on;
+
+ /* IRIS regulator settings */
+ rc = wcnss_iris_vregs_on(dev, hw_type,
+ cfg);
+ if (rc)
+ goto fail_iris_on;
+
+ /* Configure IRIS XO */
+ rc = configure_iris_xo(dev, cfg,
+ WCNSS_WLAN_SWITCH_ON, iris_xo_set);
+ if (rc)
+ goto fail_iris_xo;
+
+ is_power_on = true;
+
+ } else if (is_power_on) {
+ is_power_on = false;
+ configure_iris_xo(dev, cfg,
+ WCNSS_WLAN_SWITCH_OFF, NULL);
+ wcnss_iris_vregs_off(hw_type, cfg);
+ wcnss_core_vregs_off(hw_type, cfg);
+ }
+
+ up(&wcnss_power_on_lock);
+ return rc;
+
+fail_iris_xo:
+ wcnss_iris_vregs_off(hw_type, cfg);
+
+fail_iris_on:
+ wcnss_core_vregs_off(hw_type, cfg);
+
+fail_wcnss_on:
+ up(&wcnss_power_on_lock);
+ return rc;
+}
+EXPORT_SYMBOL(wcnss_wlan_power);
+
+/*
+ * During SSR WCNSS should not be 'powered on' until all the host drivers
+ * finish their shutdown routines. Host drivers use below APIs to
+ * synchronize power-on. WCNSS will not be 'powered on' until all the
+ * requests(to lock power-on) are freed.
+ */
+int wcnss_req_power_on_lock(char *driver_name)
+{
+ struct host_driver *node;
+
+ if (!driver_name)
+ goto err;
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ goto err;
+ strlcpy(node->name, driver_name, sizeof(node->name));
+
+ mutex_lock(&list_lock);
+ /* Lock when the first request is added */
+ if (list_empty(&power_on_lock_list))
+ down(&wcnss_power_on_lock);
+ list_add(&node->list, &power_on_lock_list);
+ mutex_unlock(&list_lock);
+
+ return 0;
+
+err:
+ return -EINVAL;
+}
+EXPORT_SYMBOL(wcnss_req_power_on_lock);
+
+int wcnss_free_power_on_lock(char *driver_name)
+{
+ int ret = -1;
+ struct host_driver *node;
+
+ mutex_lock(&list_lock);
+ list_for_each_entry(node, &power_on_lock_list, list) {
+ if (!strcmp(node->name, driver_name)) {
+ list_del(&node->list);
+ kfree(node);
+ ret = 0;
+ break;
+ }
+ }
+ /* unlock when the last host driver frees the lock */
+ if (list_empty(&power_on_lock_list))
+ up(&wcnss_power_on_lock);
+ mutex_unlock(&list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(wcnss_free_power_on_lock);
diff --git a/drivers/soc/qcom/wcnss/wcnss_wlan.c b/drivers/soc/qcom/wcnss/wcnss_wlan.c
new file mode 100644
index 0000000..db3974b
--- /dev/null
+++ b/drivers/soc/qcom/wcnss/wcnss_wlan.c
@@ -0,0 +1,3588 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/wcnss_wlan.h>
+#include <linux/platform_data/qcom_wcnss_device.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/gpio.h>
+#include <linux/pm_wakeup.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <linux/ratelimit.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/suspend.h>
+#include <linux/rwsem.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_qos.h>
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <soc/qcom/socinfo.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+
+#include <soc/qcom/smd.h>
+
+#define DEVICE "wcnss_wlan"
+#define CTRL_DEVICE "wcnss_ctrl"
+#define VERSION "1.01"
+#define WCNSS_PIL_DEVICE "wcnss"
+
+#define WCNSS_PINCTRL_STATE_DEFAULT "wcnss_default"
+#define WCNSS_PINCTRL_STATE_SLEEP "wcnss_sleep"
+#define WCNSS_PINCTRL_GPIO_STATE_DEFAULT "wcnss_gpio_default"
+
+#define WCNSS_DISABLE_PC_LATENCY 100
+#define WCNSS_ENABLE_PC_LATENCY PM_QOS_DEFAULT_VALUE
+#define WCNSS_PM_QOS_TIMEOUT 15000
+#define IS_CAL_DATA_PRESENT 0
+#define WAIT_FOR_CBC_IND 2
+#define WCNSS_DUAL_BAND_CAPABILITY_OFFSET BIT(8)
+
+/* module params */
+#define WCNSS_CONFIG_UNSPECIFIED (-1)
+#define UINT32_MAX (0xFFFFFFFFU)
+
+#define SUBSYS_NOTIF_MIN_INDEX 0
+#define SUBSYS_NOTIF_MAX_INDEX 9
+char *wcnss_subsys_notif_type[] = {
+ "SUBSYS_BEFORE_SHUTDOWN",
+ "SUBSYS_AFTER_SHUTDOWN",
+ "SUBSYS_BEFORE_POWERUP",
+ "SUBSYS_AFTER_POWERUP",
+ "SUBSYS_RAMDUMP_NOTIFICATION",
+ "SUBSYS_POWERUP_FAILURE",
+ "SUBSYS_PROXY_VOTE",
+ "SUBSYS_PROXY_UNVOTE",
+ "SUBSYS_SOC_RESET",
+ "SUBSYS_NOTIF_TYPE_COUNT"
+};
+
+static int has_48mhz_xo = WCNSS_CONFIG_UNSPECIFIED;
+module_param(has_48mhz_xo, int, 0644);
+MODULE_PARM_DESC(has_48mhz_xo, "Is an external 48 MHz XO present");
+
+static int has_calibrated_data = WCNSS_CONFIG_UNSPECIFIED;
+module_param(has_calibrated_data, int, 0644);
+MODULE_PARM_DESC(has_calibrated_data, "whether calibrated data file available");
+
+static int has_autodetect_xo = WCNSS_CONFIG_UNSPECIFIED;
+module_param(has_autodetect_xo, int, 0644);
+MODULE_PARM_DESC(has_autodetect_xo, "Perform auto detect to configure IRIS XO");
+
+static int do_not_cancel_vote = WCNSS_CONFIG_UNSPECIFIED;
+module_param(do_not_cancel_vote, int, 0644);
+MODULE_PARM_DESC(do_not_cancel_vote, "Do not cancel votes for wcnss");
+
+static DEFINE_SPINLOCK(reg_spinlock);
+
+#define RIVA_SPARE_OFFSET 0x0b4
+#define RIVA_SUSPEND_BIT BIT(24)
+
+#define CCU_RIVA_INVALID_ADDR_OFFSET 0x100
+#define CCU_RIVA_LAST_ADDR0_OFFSET 0x104
+#define CCU_RIVA_LAST_ADDR1_OFFSET 0x108
+#define CCU_RIVA_LAST_ADDR2_OFFSET 0x10c
+
+#define PRONTO_PMU_SPARE_OFFSET 0x1088
+#define PMU_A2XB_CFG_HSPLIT_RESP_LIMIT_OFFSET 0x117C
+
+#define PRONTO_PMU_COM_GDSCR_OFFSET 0x0024
+#define PRONTO_PMU_COM_GDSCR_SW_COLLAPSE BIT(0)
+#define PRONTO_PMU_COM_GDSCR_HW_CTRL BIT(1)
+
+#define PRONTO_PMU_WLAN_BCR_OFFSET 0x0050
+#define PRONTO_PMU_WLAN_BCR_BLK_ARES BIT(0)
+
+#define PRONTO_PMU_WLAN_GDSCR_OFFSET 0x0054
+#define PRONTO_PMU_WLAN_GDSCR_SW_COLLAPSE BIT(0)
+
+#define PRONTO_PMU_WDOG_CTL 0x0068
+
+#define PRONTO_PMU_CBCR_OFFSET 0x0008
+#define PRONTO_PMU_CBCR_CLK_EN BIT(0)
+
+#define PRONTO_PMU_COM_CPU_CBCR_OFFSET 0x0030
+#define PRONTO_PMU_COM_AHB_CBCR_OFFSET 0x0034
+
+#define PRONTO_PMU_WLAN_AHB_CBCR_OFFSET 0x0074
+#define PRONTO_PMU_WLAN_AHB_CBCR_CLK_EN BIT(0)
+#define PRONTO_PMU_WLAN_AHB_CBCR_CLK_OFF BIT(31)
+
+#define PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET 0x0120
+#define PRONTO_PMU_CPU_AHB_CMD_RCGR_ROOT_EN BIT(1)
+
+#define PRONTO_PMU_CFG_OFFSET 0x1004
+#define PRONTO_PMU_COM_CSR_OFFSET 0x1040
+#define PRONTO_PMU_SOFT_RESET_OFFSET 0x104C
+
+#define PRONTO_QFUSE_DUAL_BAND_OFFSET 0x0018
+
+#define A2XB_CFG_OFFSET 0x00
+#define A2XB_INT_SRC_OFFSET 0x0c
+#define A2XB_TSTBUS_CTRL_OFFSET 0x14
+#define A2XB_TSTBUS_OFFSET 0x18
+#define A2XB_ERR_INFO_OFFSET 0x1c
+#define A2XB_FIFO_FILL_OFFSET 0x07
+#define A2XB_READ_FIFO_FILL_MASK 0x3F
+#define A2XB_CMD_FIFO_FILL_MASK 0x0F
+#define A2XB_WRITE_FIFO_FILL_MASK 0x1F
+#define A2XB_FIFO_EMPTY 0x2
+#define A2XB_FIFO_COUNTER 0xA
+
+#define WCNSS_TSTBUS_CTRL_EN BIT(0)
+#define WCNSS_TSTBUS_CTRL_AXIM (0x02 << 1)
+#define WCNSS_TSTBUS_CTRL_CMDFIFO (0x03 << 1)
+#define WCNSS_TSTBUS_CTRL_WRFIFO (0x04 << 1)
+#define WCNSS_TSTBUS_CTRL_RDFIFO (0x05 << 1)
+#define WCNSS_TSTBUS_CTRL_CTRL (0x07 << 1)
+#define WCNSS_TSTBUS_CTRL_AXIM_CFG0 (0x00 << 8)
+#define WCNSS_TSTBUS_CTRL_AXIM_CFG1 (0x01 << 8)
+#define WCNSS_TSTBUS_CTRL_CTRL_CFG0 (0x00 << 28)
+#define WCNSS_TSTBUS_CTRL_CTRL_CFG1 (0x01 << 28)
+
+#define CCU_PRONTO_INVALID_ADDR_OFFSET 0x08
+#define CCU_PRONTO_LAST_ADDR0_OFFSET 0x0c
+#define CCU_PRONTO_LAST_ADDR1_OFFSET 0x10
+#define CCU_PRONTO_LAST_ADDR2_OFFSET 0x14
+
+#define CCU_PRONTO_AOWBR_ERR_ADDR_OFFSET 0x28
+#define CCU_PRONTO_AOWBR_TIMEOUT_REG_OFFSET 0xcc
+#define CCU_PRONTO_AOWBR_ERR_TIMEOUT_OFFSET 0xd0
+#define CCU_PRONTO_A2AB_ERR_ADDR_OFFSET 0x18
+
+#define PRONTO_SAW2_SPM_STS_OFFSET 0x0c
+#define PRONTO_SAW2_SPM_CTL 0x30
+#define PRONTO_SAW2_SAW2_VERSION 0xFD0
+#define PRONTO_SAW2_MAJOR_VER_OFFSET 0x1C
+
+#define PRONTO_PLL_STATUS_OFFSET 0x1c
+#define PRONTO_PLL_MODE_OFFSET 0x1c0
+
+#define MCU_APB2PHY_STATUS_OFFSET 0xec
+#define MCU_CBR_CCAHB_ERR_OFFSET 0x380
+#define MCU_CBR_CAHB_ERR_OFFSET 0x384
+#define MCU_CBR_CCAHB_TIMEOUT_OFFSET 0x388
+#define MCU_CBR_CAHB_TIMEOUT_OFFSET 0x38c
+#define MCU_DBR_CDAHB_ERR_OFFSET 0x390
+#define MCU_DBR_DAHB_ERR_OFFSET 0x394
+#define MCU_DBR_CDAHB_TIMEOUT_OFFSET 0x398
+#define MCU_DBR_DAHB_TIMEOUT_OFFSET 0x39c
+#define MCU_FDBR_CDAHB_ERR_OFFSET 0x3a0
+#define MCU_FDBR_FDAHB_ERR_OFFSET 0x3a4
+#define MCU_FDBR_CDAHB_TIMEOUT_OFFSET 0x3a8
+#define MCU_FDBR_FDAHB_TIMEOUT_OFFSET 0x3ac
+#define PRONTO_PMU_CCPU_BOOT_REMAP_OFFSET 0x2004
+
+#define WCNSS_DEF_WLAN_RX_BUFF_COUNT 1024
+
+#define WCNSS_CTRL_CHANNEL "WCNSS_CTRL"
+#define WCNSS_MAX_FRAME_SIZE (4 * 1024)
+#define WCNSS_VERSION_LEN 30
+#define WCNSS_MAX_BUILD_VER_LEN 256
+#define WCNSS_MAX_CMD_LEN (128)
+#define WCNSS_MIN_CMD_LEN (3)
+
+/* control messages from userspace */
+#define WCNSS_USR_CTRL_MSG_START 0x00000000
+#define WCNSS_USR_HAS_CAL_DATA (WCNSS_USR_CTRL_MSG_START + 2)
+#define WCNSS_USR_WLAN_MAC_ADDR (WCNSS_USR_CTRL_MSG_START + 3)
+
+#define MAC_ADDRESS_STR "%02x:%02x:%02x:%02x:%02x:%02x"
+#define SHOW_MAC_ADDRESS_STR "%02x:%02x:%02x:%02x:%02x:%02x\n"
+#define WCNSS_USER_MAC_ADDR_LENGTH 18
+
+/* message types */
+#define WCNSS_CTRL_MSG_START 0x01000000
+#define WCNSS_VERSION_REQ (WCNSS_CTRL_MSG_START + 0)
+#define WCNSS_VERSION_RSP (WCNSS_CTRL_MSG_START + 1)
+#define WCNSS_NVBIN_DNLD_REQ (WCNSS_CTRL_MSG_START + 2)
+#define WCNSS_NVBIN_DNLD_RSP (WCNSS_CTRL_MSG_START + 3)
+#define WCNSS_CALDATA_UPLD_REQ (WCNSS_CTRL_MSG_START + 4)
+#define WCNSS_CALDATA_UPLD_RSP (WCNSS_CTRL_MSG_START + 5)
+#define WCNSS_CALDATA_DNLD_REQ (WCNSS_CTRL_MSG_START + 6)
+#define WCNSS_CALDATA_DNLD_RSP (WCNSS_CTRL_MSG_START + 7)
+#define WCNSS_VBATT_LEVEL_IND (WCNSS_CTRL_MSG_START + 8)
+#define WCNSS_BUILD_VER_REQ (WCNSS_CTRL_MSG_START + 9)
+#define WCNSS_BUILD_VER_RSP (WCNSS_CTRL_MSG_START + 10)
+#define WCNSS_PM_CONFIG_REQ (WCNSS_CTRL_MSG_START + 11)
+#define WCNSS_CBC_COMPLETE_IND (WCNSS_CTRL_MSG_START + 12)
+
+/* max 20mhz channel count */
+#define WCNSS_MAX_CH_NUM 45
+#define WCNSS_MAX_PIL_RETRY 2
+
+#define VALID_VERSION(version) \
+ ((strcmp(version, "INVALID")) ? 1 : 0)
+
+#define FW_CALDATA_CAPABLE() \
+ ((penv->fw_major >= 1) && (penv->fw_minor >= 5) ? 1 : 0)
+
+static int wcnss_pinctrl_set_state(bool active);
+
+struct smd_msg_hdr {
+ unsigned int msg_type;
+ unsigned int msg_len;
+};
+
+struct wcnss_version {
+ struct smd_msg_hdr hdr;
+ unsigned char major;
+ unsigned char minor;
+ unsigned char version;
+ unsigned char revision;
+};
+
+struct wcnss_pmic_dump {
+ char reg_name[10];
+ u16 reg_addr;
+};
+
+static int wcnss_notif_cb(struct notifier_block *this, unsigned long code,
+ void *ss_handle);
+
+static struct notifier_block wnb = {
+ .notifier_call = wcnss_notif_cb,
+};
+
+#define NVBIN_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+
+/* On SMD channel 4K of maximum data can be transferred, including message
+ * header, so NV fragment size as next multiple of 1Kb is 3Kb.
+ */
+#define NV_FRAGMENT_SIZE 3072
+#define MAX_CALIBRATED_DATA_SIZE (64 * 1024)
+#define LAST_FRAGMENT BIT(0)
+#define MESSAGE_TO_FOLLOW BIT(1)
+#define CAN_RECEIVE_CALDATA BIT(15)
+#define WCNSS_RESP_SUCCESS 1
+#define WCNSS_RESP_FAIL 0
+
+/* Macro to find the total number fragments of the NV bin Image */
+#define TOTALFRAGMENTS(x) ((((x) % NV_FRAGMENT_SIZE) == 0) ? \
+ ((x) / NV_FRAGMENT_SIZE) : (((x) / NV_FRAGMENT_SIZE) + 1))
+
+struct nvbin_dnld_req_params {
+ /* Fragment sequence number of the NV bin Image. NV Bin Image
+ * might not fit into one message due to size limitation of
+ * the SMD channel FIFO so entire NV blob is chopped into
+ * multiple fragments starting with seqeunce number 0. The
+ * last fragment is indicated by marking is_last_fragment field
+ * to 1. At receiving side, NV blobs would be concatenated
+ * together without any padding bytes in between.
+ */
+ unsigned short frag_number;
+
+ /* bit 0: When set to 1 it indicates that no more fragments will
+ * be sent.
+ * bit 1: When set, a new message will be followed by this message
+ * bit 2- bit 14: Reserved
+ * bit 15: when set, it indicates that the sender is capable of
+ * receiving Calibrated data.
+ */
+ unsigned short msg_flags;
+
+ /* NV Image size (number of bytes) */
+ unsigned int nvbin_buffer_size;
+
+ /* Following the 'nvbin_buffer_size', there should be
+ * nvbin_buffer_size bytes of NV bin Image i.e.
+ * uint8[nvbin_buffer_size].
+ */
+};
+
+struct nvbin_dnld_req_msg {
+ /* Note: The length specified in nvbin_dnld_req_msg messages
+ * should be hdr.msg_len = sizeof(nvbin_dnld_req_msg) +
+ * nvbin_buffer_size.
+ */
+ struct smd_msg_hdr hdr;
+ struct nvbin_dnld_req_params dnld_req_params;
+};
+
+struct cal_data_params {
+ /* The total size of the calibrated data, including all the
+ * fragments.
+ */
+ unsigned int total_size;
+ unsigned short frag_number;
+ /* bit 0: When set to 1 it indicates that no more fragments will
+ * be sent.
+ * bit 1: When set, a new message will be followed by this message
+ * bit 2- bit 15: Reserved
+ */
+ unsigned short msg_flags;
+ /* fragment size
+ */
+ unsigned int frag_size;
+ /* Following the frag_size, frag_size of fragmented
+ * data will be followed.
+ */
+};
+
+struct cal_data_msg {
+ /* The length specified in cal_data_msg should be
+ * hdr.msg_len = sizeof(cal_data_msg) + frag_size
+ */
+ struct smd_msg_hdr hdr;
+ struct cal_data_params cal_params;
+};
+
+struct vbatt_level {
+ u32 curr_volt;
+ u32 threshold;
+};
+
+struct vbatt_message {
+ struct smd_msg_hdr hdr;
+ struct vbatt_level vbatt;
+};
+
+static struct {
+ struct platform_device *pdev;
+ void *pil;
+ struct resource *mmio_res;
+ struct resource *tx_irq_res;
+ struct resource *rx_irq_res;
+ struct resource *gpios_5wire;
+ const struct dev_pm_ops *pm_ops;
+ int triggered;
+ int smd_channel_ready;
+ u32 wlan_rx_buff_count;
+ int is_vsys_adc_channel;
+ int is_a2xb_split_reg;
+ smd_channel_t *smd_ch;
+ unsigned char wcnss_version[WCNSS_VERSION_LEN];
+ unsigned char fw_major;
+ unsigned char fw_minor;
+ unsigned int serial_number;
+ int thermal_mitigation;
+ enum wcnss_hw_type wcnss_hw_type;
+ void (*tm_notify)(struct device *, int);
+ struct wcnss_wlan_config wlan_config;
+ struct delayed_work wcnss_work;
+ struct delayed_work vbatt_work;
+ struct work_struct wcnssctrl_version_work;
+ struct work_struct wcnss_pm_config_work;
+ struct work_struct wcnssctrl_nvbin_dnld_work;
+ struct work_struct wcnssctrl_rx_work;
+ struct work_struct wcnss_vadc_work;
+ struct wakeup_source wcnss_wake_lock;
+ void __iomem *msm_wcnss_base;
+ void __iomem *riva_ccu_base;
+ void __iomem *pronto_a2xb_base;
+ void __iomem *pronto_ccpu_base;
+ void __iomem *pronto_saw2_base;
+ void __iomem *pronto_pll_base;
+ void __iomem *pronto_mcu_base;
+ void __iomem *pronto_qfuse;
+ void __iomem *wlan_tx_status;
+ void __iomem *wlan_tx_phy_aborts;
+ void __iomem *wlan_brdg_err_source;
+ void __iomem *alarms_txctl;
+ void __iomem *alarms_tactl;
+ void __iomem *fiq_reg;
+ int nv_downloaded;
+ int is_cbc_done;
+ unsigned char *fw_cal_data;
+ unsigned char *user_cal_data;
+ int fw_cal_rcvd;
+ int fw_cal_exp_frag;
+ int fw_cal_available;
+ int user_cal_read;
+ int user_cal_available;
+ u32 user_cal_rcvd;
+ u32 user_cal_exp_size;
+ int iris_xo_mode_set;
+ int fw_vbatt_state;
+ char wlan_nv_mac_addr[WLAN_MAC_ADDR_SIZE];
+ int ctrl_device_opened;
+ /* dev node lock */
+ struct mutex dev_lock;
+ /* dev control lock */
+ struct mutex ctrl_lock;
+ wait_queue_head_t read_wait;
+ struct qpnp_adc_tm_btm_param vbat_monitor_params;
+ struct qpnp_adc_tm_chip *adc_tm_dev;
+ struct qpnp_vadc_chip *vadc_dev;
+ /* battery monitor lock */
+ struct mutex vbat_monitor_mutex;
+ u16 unsafe_ch_count;
+ u16 unsafe_ch_list[WCNSS_MAX_CH_NUM];
+ void *wcnss_notif_hdle;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *wcnss_5wire_active;
+ struct pinctrl_state *wcnss_5wire_suspend;
+ struct pinctrl_state *wcnss_gpio_active;
+ int gpios[WCNSS_WLAN_MAX_GPIO];
+ int use_pinctrl;
+ u8 is_shutdown;
+ struct pm_qos_request wcnss_pm_qos_request;
+ int pc_disabled;
+ struct delayed_work wcnss_pm_qos_del_req;
+ /* power manager QOS lock */
+ struct mutex pm_qos_mutex;
+ struct clk *snoc_wcnss;
+ unsigned int snoc_wcnss_clock_freq;
+ bool is_dual_band_disabled;
+ dev_t dev_ctrl, dev_node;
+ struct class *node_class;
+ struct cdev ctrl_dev, node_dev;
+} *penv = NULL;
+
+static ssize_t wcnss_wlan_macaddr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int index;
+ int mac_addr[WLAN_MAC_ADDR_SIZE];
+
+ if (!penv)
+ return -ENODEV;
+
+ if (strlen(buf) != WCNSS_USER_MAC_ADDR_LENGTH) {
+ dev_err(dev, "%s: Invalid MAC addr length\n", __func__);
+ return -EINVAL;
+ }
+
+ if (sscanf(buf, MAC_ADDRESS_STR, &mac_addr[0], &mac_addr[1],
+ &mac_addr[2], &mac_addr[3], &mac_addr[4],
+ &mac_addr[5]) != WLAN_MAC_ADDR_SIZE) {
+ pr_err("%s: Failed to Copy MAC\n", __func__);
+ return -EINVAL;
+ }
+
+ for (index = 0; index < WLAN_MAC_ADDR_SIZE; index++) {
+ memcpy(&penv->wlan_nv_mac_addr[index],
+ (char *)&mac_addr[index], sizeof(char));
+ }
+
+ pr_info("%s: Write MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
+ penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
+ penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
+ penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
+
+ return count;
+}
+
+static ssize_t wcnss_wlan_macaddr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (!penv)
+ return -ENODEV;
+
+ return scnprintf(buf, PAGE_SIZE, SHOW_MAC_ADDRESS_STR,
+ penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
+ penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
+ penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
+}
+
+static DEVICE_ATTR(wcnss_mac_addr, 0600, wcnss_wlan_macaddr_show,
+ wcnss_wlan_macaddr_store);
+
+static ssize_t wcnss_thermal_mitigation_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (!penv)
+ return -ENODEV;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", penv->thermal_mitigation);
+}
+
+static ssize_t wcnss_thermal_mitigation_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value;
+
+ if (!penv)
+ return -ENODEV;
+
+ if (kstrtoint(buf, 10, &value) != 1)
+ return -EINVAL;
+ penv->thermal_mitigation = value;
+ if (penv->tm_notify)
+ penv->tm_notify(dev, value);
+ return count;
+}
+
+static DEVICE_ATTR(thermal_mitigation, 0600, wcnss_thermal_mitigation_show,
+ wcnss_thermal_mitigation_store);
+
+static ssize_t wcnss_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (!penv)
+ return -ENODEV;
+
+ return scnprintf(buf, PAGE_SIZE, "%s", penv->wcnss_version);
+}
+
+static DEVICE_ATTR(wcnss_version, 0400, wcnss_version_show, NULL);
+
+/* wcnss_reset_fiq() is invoked when host drivers fails to
+ * communicate with WCNSS over SMD; so logging these registers
+ * helps to know WCNSS failure reason
+ */
+void wcnss_riva_log_debug_regs(void)
+{
+ void __iomem *ccu_reg;
+ u32 reg = 0;
+
+ ccu_reg = penv->riva_ccu_base + CCU_RIVA_INVALID_ADDR_OFFSET;
+ reg = readl_relaxed(ccu_reg);
+ pr_info_ratelimited("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg);
+
+ ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR0_OFFSET;
+ reg = readl_relaxed(ccu_reg);
+ pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg);
+
+ ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR1_OFFSET;
+ reg = readl_relaxed(ccu_reg);
+ pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg);
+
+ ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR2_OFFSET;
+ reg = readl_relaxed(ccu_reg);
+ pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
+}
+EXPORT_SYMBOL(wcnss_riva_log_debug_regs);
+
+void wcnss_pronto_is_a2xb_bus_stall(void *tst_addr, u32 fifo_mask, char *type)
+{
+ u32 iter = 0, reg = 0;
+ u32 axi_fifo_count = 0, axi_fifo_count_last = 0;
+
+ reg = readl_relaxed(tst_addr);
+ axi_fifo_count = (reg >> A2XB_FIFO_FILL_OFFSET) & fifo_mask;
+ while ((++iter < A2XB_FIFO_COUNTER) && axi_fifo_count) {
+ axi_fifo_count_last = axi_fifo_count;
+ reg = readl_relaxed(tst_addr);
+ axi_fifo_count = (reg >> A2XB_FIFO_FILL_OFFSET) & fifo_mask;
+ if (axi_fifo_count < axi_fifo_count_last)
+ break;
+ }
+
+ if (iter == A2XB_FIFO_COUNTER) {
+ pr_err("%s data FIFO testbus possibly stalled reg%08x\n",
+ type, reg);
+ } else {
+ pr_err("%s data FIFO tstbus not stalled reg%08x\n",
+ type, reg);
+ }
+}
+
+int wcnss_get_dual_band_capability_info(struct platform_device *pdev)
+{
+ u32 reg = 0;
+ struct resource *res;
+
+ res = platform_get_resource_byname(
+ pdev, IORESOURCE_MEM, "pronto_qfuse");
+ if (!res)
+ return -EINVAL;
+
+ penv->pronto_qfuse = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(penv->pronto_qfuse))
+ return -ENOMEM;
+
+ reg = readl_relaxed(penv->pronto_qfuse +
+ PRONTO_QFUSE_DUAL_BAND_OFFSET);
+ if (reg & WCNSS_DUAL_BAND_CAPABILITY_OFFSET)
+ penv->is_dual_band_disabled = true;
+ else
+ penv->is_dual_band_disabled = false;
+
+ return 0;
+}
+
+/* Log pronto debug registers during SSR Timeout CB */
+void wcnss_pronto_log_debug_regs(void)
+{
+ void __iomem *reg_addr, *tst_addr, *tst_ctrl_addr;
+ u32 reg = 0, reg2 = 0, reg3 = 0, reg4 = 0;
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SPARE_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PMU_SPARE %08x\n", reg);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CPU_CBCR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PMU_COM_CPU_CBCR %08x\n", reg);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_AHB_CBCR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PMU_COM_AHB_CBCR %08x\n", reg);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CFG_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PMU_CFG %08x\n", reg);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CSR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PMU_COM_CSR %08x\n", reg);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SOFT_RESET_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PMU_SOFT_RESET %08x\n", reg);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WDOG_CTL;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PMU_WDOG_CTL %08x\n", reg);
+
+ reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SPM_STS_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_SAW2_SPM_STS %08x\n", reg);
+
+ reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SPM_CTL;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_SAW2_SPM_CTL %08x\n", reg);
+
+ if (penv->is_a2xb_split_reg) {
+ reg_addr = penv->msm_wcnss_base +
+ PMU_A2XB_CFG_HSPLIT_RESP_LIMIT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PMU_A2XB_CFG_HSPLIT_RESP_LIMIT %08x\n", reg);
+ }
+
+ reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SAW2_VERSION;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_SAW2_SAW2_VERSION %08x\n", reg);
+ reg >>= PRONTO_SAW2_MAJOR_VER_OFFSET;
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CCPU_BOOT_REMAP_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PMU_CCPU_BOOT_REMAP %08x\n", reg);
+
+ reg_addr = penv->pronto_pll_base + PRONTO_PLL_STATUS_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PLL_STATUS %08x\n", reg);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET;
+ reg4 = readl_relaxed(reg_addr);
+ pr_err("PMU_CPU_CMD_RCGR %08x\n", reg4);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_GDSCR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PMU_COM_GDSCR %08x\n", reg);
+ reg >>= 31;
+
+ if (!reg) {
+ pr_err("Cannot log, Pronto common SS is power collapsed\n");
+ return;
+ }
+ reg &= ~(PRONTO_PMU_COM_GDSCR_SW_COLLAPSE
+ | PRONTO_PMU_COM_GDSCR_HW_CTRL);
+ writel_relaxed(reg, reg_addr);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CBCR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ reg |= PRONTO_PMU_CBCR_CLK_EN;
+ writel_relaxed(reg, reg_addr);
+
+ reg_addr = penv->pronto_a2xb_base + A2XB_CFG_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("A2XB_CFG_OFFSET %08x\n", reg);
+
+ reg_addr = penv->pronto_a2xb_base + A2XB_INT_SRC_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("A2XB_INT_SRC_OFFSET %08x\n", reg);
+
+ reg_addr = penv->pronto_a2xb_base + A2XB_ERR_INFO_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("A2XB_ERR_INFO_OFFSET %08x\n", reg);
+
+ reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_INVALID_ADDR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("CCU_CCPU_INVALID_ADDR %08x\n", reg);
+
+ reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR0_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("CCU_CCPU_LAST_ADDR0 %08x\n", reg);
+
+ reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR1_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("CCU_CCPU_LAST_ADDR1 %08x\n", reg);
+
+ reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR2_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("CCU_CCPU_LAST_ADDR2 %08x\n", reg);
+
+ reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_AOWBR_ERR_ADDR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("CCU_PRONTO_AOWBR_ERR_ADDR_OFFSET %08x\n", reg);
+
+ reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_AOWBR_TIMEOUT_REG_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("CCU_PRONTO_AOWBR_TIMEOUT_REG_OFFSET %08x\n", reg);
+
+ reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_AOWBR_ERR_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("CCU_PRONTO_AOWBR_ERR_TIMEOUT_OFFSET %08x\n", reg);
+
+ reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_A2AB_ERR_ADDR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("CCU_PRONTO_A2AB_ERR_ADDR_OFFSET %08x\n", reg);
+
+ tst_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_OFFSET;
+ tst_ctrl_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_CTRL_OFFSET;
+
+ /* read data FIFO */
+ reg = 0;
+ reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_RDFIFO;
+ writel_relaxed(reg, tst_ctrl_addr);
+ reg = readl_relaxed(tst_addr);
+ if (!(reg & A2XB_FIFO_EMPTY)) {
+ wcnss_pronto_is_a2xb_bus_stall(tst_addr,
+ A2XB_READ_FIFO_FILL_MASK,
+ "Read");
+ } else {
+ pr_err("Read data FIFO testbus %08x\n", reg);
+ }
+ /* command FIFO */
+ reg = 0;
+ reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CMDFIFO;
+ writel_relaxed(reg, tst_ctrl_addr);
+ reg = readl_relaxed(tst_addr);
+ if (!(reg & A2XB_FIFO_EMPTY)) {
+ wcnss_pronto_is_a2xb_bus_stall(tst_addr,
+ A2XB_CMD_FIFO_FILL_MASK, "Cmd");
+ } else {
+ pr_err("Command FIFO testbus %08x\n", reg);
+ }
+
+ /* write data FIFO */
+ reg = 0;
+ reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_WRFIFO;
+ writel_relaxed(reg, tst_ctrl_addr);
+ reg = readl_relaxed(tst_addr);
+ if (!(reg & A2XB_FIFO_EMPTY)) {
+ wcnss_pronto_is_a2xb_bus_stall(tst_addr,
+ A2XB_WRITE_FIFO_FILL_MASK,
+ "Write");
+ } else {
+ pr_err("Write data FIFO testbus %08x\n", reg);
+ }
+
+ /* AXIM SEL CFG0 */
+ reg = 0;
+ reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_AXIM |
+ WCNSS_TSTBUS_CTRL_AXIM_CFG0;
+ writel_relaxed(reg, tst_ctrl_addr);
+ reg = readl_relaxed(tst_addr);
+ pr_err("AXIM SEL CFG0 testbus %08x\n", reg);
+
+ /* AXIM SEL CFG1 */
+ reg = 0;
+ reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_AXIM |
+ WCNSS_TSTBUS_CTRL_AXIM_CFG1;
+ writel_relaxed(reg, tst_ctrl_addr);
+ reg = readl_relaxed(tst_addr);
+ pr_err("AXIM SEL CFG1 testbus %08x\n", reg);
+
+ /* CTRL SEL CFG0 */
+ reg = 0;
+ reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CTRL |
+ WCNSS_TSTBUS_CTRL_CTRL_CFG0;
+ writel_relaxed(reg, tst_ctrl_addr);
+ reg = readl_relaxed(tst_addr);
+ pr_err("CTRL SEL CFG0 testbus %08x\n", reg);
+
+ /* CTRL SEL CFG1 */
+ reg = 0;
+ reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CTRL |
+ WCNSS_TSTBUS_CTRL_CTRL_CFG1;
+ writel_relaxed(reg, tst_ctrl_addr);
+ reg = readl_relaxed(tst_addr);
+ pr_err("CTRL SEL CFG1 testbus %08x\n", reg);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_BCR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_GDSCR_OFFSET;
+ reg2 = readl_relaxed(reg_addr);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_AHB_CBCR_OFFSET;
+ reg3 = readl_relaxed(reg_addr);
+ pr_err("PMU_WLAN_AHB_CBCR %08x\n", reg3);
+
+ msleep(50);
+
+ if ((reg & PRONTO_PMU_WLAN_BCR_BLK_ARES) ||
+ (reg2 & PRONTO_PMU_WLAN_GDSCR_SW_COLLAPSE) ||
+ (!(reg4 & PRONTO_PMU_CPU_AHB_CMD_RCGR_ROOT_EN)) ||
+ (reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_OFF) ||
+ (!(reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_EN))) {
+ pr_err("Cannot log, wlan domain is power collapsed\n");
+ return;
+ }
+
+ reg = readl_relaxed(penv->wlan_tx_phy_aborts);
+ pr_err("WLAN_TX_PHY_ABORTS %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_APB2PHY_STATUS_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_APB2PHY_STATUS %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_CBR_CCAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_CBR_CCAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_CBR_CAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_CBR_CAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_CBR_CCAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_CBR_CCAHB_TIMEOUT %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_CBR_CAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_CBR_CAHB_TIMEOUT %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_DBR_CDAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_DBR_CDAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_DBR_DAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_DBR_DAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_DBR_CDAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_DBR_CDAHB_TIMEOUT %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_DBR_DAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_DBR_DAHB_TIMEOUT %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_FDBR_CDAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_FDBR_CDAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_FDBR_FDAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_FDBR_FDAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_FDBR_CDAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_FDBR_CDAHB_TIMEOUT %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_FDBR_FDAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_FDBR_FDAHB_TIMEOUT %08x\n", reg);
+
+ reg = readl_relaxed(penv->wlan_brdg_err_source);
+ pr_err("WLAN_BRDG_ERR_SOURCE %08x\n", reg);
+
+ reg = readl_relaxed(penv->wlan_tx_status);
+ pr_err("WLAN_TXP_STATUS %08x\n", reg);
+
+ reg = readl_relaxed(penv->alarms_txctl);
+ pr_err("ALARMS_TXCTL %08x\n", reg);
+
+ reg = readl_relaxed(penv->alarms_tactl);
+ pr_err("ALARMS_TACTL %08x\n", reg);
+}
+EXPORT_SYMBOL(wcnss_pronto_log_debug_regs);
+
+#ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE
+
+static int wcnss_gpio_set_state(bool is_enable)
+{
+ struct pinctrl_state *pin_state;
+ int ret;
+ int i;
+
+ if (!is_enable) {
+ for (i = 0; i < WCNSS_WLAN_MAX_GPIO; i++) {
+ if (gpio_is_valid(penv->gpios[i]))
+ gpio_free(penv->gpios[i]);
+ }
+
+ return 0;
+ }
+
+ pin_state = penv->wcnss_gpio_active;
+ if (!IS_ERR_OR_NULL(pin_state)) {
+ ret = pinctrl_select_state(penv->pinctrl, pin_state);
+ if (ret < 0) {
+ pr_err("%s: can not set gpio pins err: %d\n",
+ __func__, ret);
+ goto pinctrl_set_err;
+ }
+
+ } else {
+ pr_err("%s: invalid gpio pinstate err: %lu\n",
+ __func__, PTR_ERR(pin_state));
+ goto pinctrl_set_err;
+ }
+
+ for (i = WCNSS_WLAN_DATA2; i <= WCNSS_WLAN_DATA0; i++) {
+ ret = gpio_request_one(penv->gpios[i],
+ GPIOF_DIR_IN, NULL);
+ if (ret) {
+ pr_err("%s: request failed for gpio:%d\n",
+ __func__, penv->gpios[i]);
+ i--;
+ goto gpio_req_err;
+ }
+ }
+
+ for (i = WCNSS_WLAN_SET; i <= WCNSS_WLAN_CLK; i++) {
+ ret = gpio_request_one(penv->gpios[i],
+ GPIOF_OUT_INIT_LOW, NULL);
+ if (ret) {
+ pr_err("%s: request failed for gpio:%d\n",
+ __func__, penv->gpios[i]);
+ i--;
+ goto gpio_req_err;
+ }
+ }
+
+ return 0;
+
+gpio_req_err:
+ for (; i >= WCNSS_WLAN_DATA2; --i)
+ gpio_free(penv->gpios[i]);
+
+pinctrl_set_err:
+ return -EINVAL;
+}
+
+static u32 wcnss_rf_read_reg(u32 rf_reg_addr)
+{
+ int count = 0;
+ u32 rf_cmd_and_addr = 0;
+ u32 rf_data_received = 0;
+ u32 rf_bit = 0;
+
+ if (wcnss_gpio_set_state(true))
+ return 0;
+
+ /* Reset the signal if it is already being used. */
+ gpio_set_value(penv->gpios[WCNSS_WLAN_SET], 0);
+ gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 0);
+
+ /* We start with cmd_set high penv->gpio_base + WCNSS_WLAN_SET = 1. */
+ gpio_set_value(penv->gpios[WCNSS_WLAN_SET], 1);
+
+ gpio_direction_output(penv->gpios[WCNSS_WLAN_DATA0], 1);
+ gpio_direction_output(penv->gpios[WCNSS_WLAN_DATA1], 1);
+ gpio_direction_output(penv->gpios[WCNSS_WLAN_DATA2], 1);
+
+ gpio_set_value(penv->gpios[WCNSS_WLAN_DATA0], 0);
+ gpio_set_value(penv->gpios[WCNSS_WLAN_DATA1], 0);
+ gpio_set_value(penv->gpios[WCNSS_WLAN_DATA2], 0);
+
+ /* Prepare command and RF register address that need to sent out. */
+ rf_cmd_and_addr = (((WLAN_RF_READ_REG_CMD) |
+ (rf_reg_addr << WLAN_RF_REG_ADDR_START_OFFSET)) &
+ WLAN_RF_READ_CMD_MASK);
+ /* Send 15 bit RF register address */
+ for (count = 0; count < WLAN_RF_PREPARE_CMD_DATA; count++) {
+ gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 0);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(penv->gpios[WCNSS_WLAN_DATA0],
+ rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(penv->gpios[WCNSS_WLAN_DATA1], rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(penv->gpios[WCNSS_WLAN_DATA2], rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ /* Send the data out penv->gpio_base + WCNSS_WLAN_CLK = 1 */
+ gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 1);
+ }
+
+ /* Pull down the clock signal */
+ gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 0);
+
+ /* Configure data pins to input IO pins */
+ gpio_direction_input(penv->gpios[WCNSS_WLAN_DATA0]);
+ gpio_direction_input(penv->gpios[WCNSS_WLAN_DATA1]);
+ gpio_direction_input(penv->gpios[WCNSS_WLAN_DATA2]);
+
+ for (count = 0; count < WLAN_RF_CLK_WAIT_CYCLE; count++) {
+ gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 1);
+ gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 0);
+ }
+
+ rf_bit = 0;
+ /* Read 16 bit RF register value */
+ for (count = 0; count < WLAN_RF_READ_DATA; count++) {
+ gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 1);
+ gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 0);
+
+ rf_bit = gpio_get_value(penv->gpios[WCNSS_WLAN_DATA0]);
+ rf_data_received |= (rf_bit << (count * WLAN_RF_DATA_LEN
+ + WLAN_RF_DATA0_SHIFT));
+
+ if (count != 5) {
+ rf_bit = gpio_get_value(penv->gpios[WCNSS_WLAN_DATA1]);
+ rf_data_received |= (rf_bit << (count * WLAN_RF_DATA_LEN
+ + WLAN_RF_DATA1_SHIFT));
+
+ rf_bit = gpio_get_value(penv->gpios[WCNSS_WLAN_DATA2]);
+ rf_data_received |= (rf_bit << (count * WLAN_RF_DATA_LEN
+ + WLAN_RF_DATA2_SHIFT));
+ }
+ }
+
+ gpio_set_value(penv->gpios[WCNSS_WLAN_SET], 0);
+ wcnss_gpio_set_state(false);
+ wcnss_pinctrl_set_state(true);
+
+ return rf_data_received;
+}
+
+static void wcnss_log_iris_regs(void)
+{
+ int i;
+ u32 reg_val;
+ u32 regs_array[] = {
+ 0x04, 0x05, 0x11, 0x1e, 0x40, 0x48,
+ 0x49, 0x4b, 0x00, 0x01, 0x4d};
+
+ pr_info("%s: IRIS Registers [address] : value\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(regs_array); i++) {
+ reg_val = wcnss_rf_read_reg(regs_array[i]);
+
+ pr_info("[0x%08x] : 0x%08x\n", regs_array[i], reg_val);
+ }
+}
+
+int wcnss_get_mux_control(void)
+{
+ void __iomem *pmu_conf_reg;
+ u32 reg = 0;
+
+ if (!penv)
+ return 0;
+
+ pmu_conf_reg = penv->msm_wcnss_base + PRONTO_PMU_OFFSET;
+ reg = readl_relaxed(pmu_conf_reg);
+ reg |= WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP;
+ writel_relaxed(reg, pmu_conf_reg);
+ return 1;
+}
+
+void wcnss_log_debug_regs_on_bite(void)
+{
+ struct platform_device *pdev = wcnss_get_platform_device();
+ struct clk *measure;
+ struct clk *wcnss_debug_mux;
+ unsigned long clk_rate;
+
+ if (wcnss_hardware_type() != WCNSS_PRONTO_HW)
+ return;
+
+ measure = clk_get(&pdev->dev, "measure");
+ wcnss_debug_mux = clk_get(&pdev->dev, "wcnss_debug");
+
+ if (!IS_ERR(measure) && !IS_ERR(wcnss_debug_mux)) {
+ if (clk_set_parent(measure, wcnss_debug_mux)) {
+ pr_err("Setting measure clk parent failed\n");
+ return;
+ }
+
+ if (clk_prepare_enable(measure)) {
+ pr_err("measure clk enable failed\n");
+ return;
+ }
+
+ clk_rate = clk_get_rate(measure);
+ pr_debug("wcnss: clock frequency is: %luHz\n", clk_rate);
+
+ if (clk_rate) {
+ wcnss_pronto_log_debug_regs();
+ if (wcnss_get_mux_control())
+ wcnss_log_iris_regs();
+ } else {
+ pr_err("clock frequency is zero, cannot access PMU or other registers\n");
+ wcnss_log_iris_regs();
+ }
+
+ clk_disable_unprepare(measure);
+ }
+}
+#endif
+
+/* interface to reset wcnss by sending the reset interrupt */
+void wcnss_reset_fiq(bool clk_chk_en)
+{
+ if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
+ if (clk_chk_en) {
+ wcnss_log_debug_regs_on_bite();
+ } else {
+ wcnss_pronto_log_debug_regs();
+ if (wcnss_get_mux_control())
+ wcnss_log_iris_regs();
+ }
+ if (!wcnss_device_is_shutdown()) {
+ /* Insert memory barrier before writing fiq register */
+ wmb();
+ __raw_writel(1 << 16, penv->fiq_reg);
+ } else {
+ pr_info("%s: Block FIQ during power up sequence\n",
+ __func__);
+ }
+ } else {
+ wcnss_riva_log_debug_regs();
+ }
+}
+EXPORT_SYMBOL(wcnss_reset_fiq);
+
+static int wcnss_create_sysfs(struct device *dev)
+{
+ int ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ ret = device_create_file(dev, &dev_attr_thermal_mitigation);
+ if (ret)
+ return ret;
+
+ ret = device_create_file(dev, &dev_attr_wcnss_version);
+ if (ret)
+ goto remove_thermal;
+
+ ret = device_create_file(dev, &dev_attr_wcnss_mac_addr);
+ if (ret)
+ goto remove_version;
+
+ return 0;
+
+remove_version:
+ device_remove_file(dev, &dev_attr_wcnss_version);
+remove_thermal:
+ device_remove_file(dev, &dev_attr_thermal_mitigation);
+
+ return ret;
+}
+
+static void wcnss_remove_sysfs(struct device *dev)
+{
+ if (dev) {
+ device_remove_file(dev, &dev_attr_thermal_mitigation);
+ device_remove_file(dev, &dev_attr_wcnss_version);
+ device_remove_file(dev, &dev_attr_wcnss_mac_addr);
+ }
+}
+
+static void wcnss_pm_qos_add_request(void)
+{
+ pr_info("%s: add request\n", __func__);
+ pm_qos_add_request(&penv->wcnss_pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+}
+
+static void wcnss_pm_qos_remove_request(void)
+{
+ pr_info("%s: remove request\n", __func__);
+ pm_qos_remove_request(&penv->wcnss_pm_qos_request);
+}
+
+void wcnss_pm_qos_update_request(int val)
+{
+ pr_info("%s: update request %d\n", __func__, val);
+ pm_qos_update_request(&penv->wcnss_pm_qos_request, val);
+}
+
+void wcnss_disable_pc_remove_req(void)
+{
+ mutex_lock(&penv->pm_qos_mutex);
+ if (penv->pc_disabled) {
+ penv->pc_disabled = 0;
+ wcnss_pm_qos_update_request(WCNSS_ENABLE_PC_LATENCY);
+ wcnss_pm_qos_remove_request();
+ wcnss_allow_suspend();
+ }
+ mutex_unlock(&penv->pm_qos_mutex);
+}
+
+void wcnss_disable_pc_add_req(void)
+{
+ mutex_lock(&penv->pm_qos_mutex);
+ if (!penv->pc_disabled) {
+ wcnss_pm_qos_add_request();
+ wcnss_prevent_suspend();
+ wcnss_pm_qos_update_request(WCNSS_DISABLE_PC_LATENCY);
+ penv->pc_disabled = 1;
+ }
+ mutex_unlock(&penv->pm_qos_mutex);
+}
+
+static void wcnss_smd_notify_event(void *data, unsigned int event)
+{
+ int len = 0;
+
+ if (penv != data) {
+ pr_err("wcnss: invalid env pointer in smd callback\n");
+ return;
+ }
+ switch (event) {
+ case SMD_EVENT_DATA:
+ len = smd_read_avail(penv->smd_ch);
+ if (len < 0) {
+ pr_err("wcnss: failed to read from smd %d\n", len);
+ return;
+ }
+ schedule_work(&penv->wcnssctrl_rx_work);
+ break;
+
+ case SMD_EVENT_OPEN:
+ pr_debug("wcnss: opening WCNSS SMD channel :%s",
+ WCNSS_CTRL_CHANNEL);
+ schedule_work(&penv->wcnssctrl_version_work);
+ schedule_work(&penv->wcnss_pm_config_work);
+ cancel_delayed_work(&penv->wcnss_pm_qos_del_req);
+ schedule_delayed_work(&penv->wcnss_pm_qos_del_req, 0);
+ if (penv->wlan_config.is_pronto_vadc && (penv->vadc_dev))
+ schedule_work(&penv->wcnss_vadc_work);
+ break;
+
+ case SMD_EVENT_CLOSE:
+ pr_debug("wcnss: closing WCNSS SMD channel :%s",
+ WCNSS_CTRL_CHANNEL);
+ penv->nv_downloaded = 0;
+ penv->is_cbc_done = 0;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int
+wcnss_pinctrl_set_state(bool active)
+{
+ struct pinctrl_state *pin_state;
+ int ret;
+
+ pr_debug("%s: Set GPIO state : %d\n", __func__, active);
+
+ pin_state = active ? penv->wcnss_5wire_active
+ : penv->wcnss_5wire_suspend;
+
+ if (!IS_ERR_OR_NULL(pin_state)) {
+ ret = pinctrl_select_state(penv->pinctrl, pin_state);
+ if (ret < 0) {
+ pr_err("%s: can not set %s pins\n", __func__,
+ active ? WCNSS_PINCTRL_STATE_DEFAULT
+ : WCNSS_PINCTRL_STATE_SLEEP);
+ return ret;
+ }
+ } else {
+ pr_err("%s: invalid '%s' pinstate\n", __func__,
+ active ? WCNSS_PINCTRL_STATE_DEFAULT
+ : WCNSS_PINCTRL_STATE_SLEEP);
+ return PTR_ERR(pin_state);
+ }
+
+ return 0;
+}
+
+static int
+wcnss_pinctrl_init(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int i;
+
+ /* Get pinctrl if target uses pinctrl */
+ penv->pinctrl = devm_pinctrl_get(&pdev->dev);
+
+ if (IS_ERR_OR_NULL(penv->pinctrl)) {
+ pr_err("%s: failed to get pinctrl\n", __func__);
+ return PTR_ERR(penv->pinctrl);
+ }
+
+ penv->wcnss_5wire_active
+ = pinctrl_lookup_state(penv->pinctrl,
+ WCNSS_PINCTRL_STATE_DEFAULT);
+
+ if (IS_ERR_OR_NULL(penv->wcnss_5wire_active)) {
+ pr_err("%s: can not get default pinstate\n", __func__);
+ return PTR_ERR(penv->wcnss_5wire_active);
+ }
+
+ penv->wcnss_5wire_suspend
+ = pinctrl_lookup_state(penv->pinctrl,
+ WCNSS_PINCTRL_STATE_SLEEP);
+
+ if (IS_ERR_OR_NULL(penv->wcnss_5wire_suspend)) {
+ pr_warn("%s: can not get sleep pinstate\n", __func__);
+ return PTR_ERR(penv->wcnss_5wire_suspend);
+ }
+
+ penv->wcnss_gpio_active = pinctrl_lookup_state(penv->pinctrl,
+ WCNSS_PINCTRL_GPIO_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(penv->wcnss_gpio_active))
+ pr_warn("%s: can not get gpio default pinstate\n", __func__);
+
+ for (i = 0; i < WCNSS_WLAN_MAX_GPIO; i++) {
+ penv->gpios[i] = of_get_gpio(node, i);
+ if (penv->gpios[i] < 0)
+ pr_warn("%s: Fail to get 5wire gpio: %d\n",
+ __func__, i);
+ }
+
+ return 0;
+}
+
+static int
+wcnss_pronto_gpios_config(struct platform_device *pdev, bool enable)
+{
+ int rc = 0;
+ int i, j;
+ int WCNSS_WLAN_NUM_GPIOS = 5;
+
+ /* Use Pinctrl to configure 5 wire GPIOs */
+ rc = wcnss_pinctrl_init(pdev);
+ if (rc) {
+ pr_err("%s: failed to get pin resources\n", __func__);
+ penv->pinctrl = NULL;
+ goto gpio_probe;
+ } else {
+ rc = wcnss_pinctrl_set_state(true);
+ if (rc)
+ pr_err("%s: failed to set pin state\n",
+ __func__);
+ penv->use_pinctrl = true;
+ return rc;
+ }
+
+gpio_probe:
+ for (i = 0; i < WCNSS_WLAN_NUM_GPIOS; i++) {
+ int gpio = of_get_gpio(pdev->dev.of_node, i);
+
+ if (enable) {
+ rc = gpio_request(gpio, "wcnss_wlan");
+ if (rc) {
+ pr_err("WCNSS gpio_request %d err %d\n",
+ gpio, rc);
+ goto fail;
+ }
+ } else {
+ gpio_free(gpio);
+ }
+ }
+ return rc;
+
+fail:
+ for (j = WCNSS_WLAN_NUM_GPIOS - 1; j >= 0; j--) {
+ int gpio = of_get_gpio(pdev->dev.of_node, i);
+
+ gpio_free(gpio);
+ }
+ return rc;
+}
+
+static int
+wcnss_gpios_config(struct resource *gpios_5wire, bool enable)
+{
+ int i, j;
+ int rc = 0;
+
+ for (i = gpios_5wire->start; i <= gpios_5wire->end; i++) {
+ if (enable) {
+ rc = gpio_request(i, gpios_5wire->name);
+ if (rc) {
+ pr_err("WCNSS gpio_request %d err %d\n", i, rc);
+ goto fail;
+ }
+ } else {
+ gpio_free(i);
+ }
+ }
+
+ return rc;
+
+fail:
+ for (j = i - 1; j >= gpios_5wire->start; j--)
+ gpio_free(j);
+ return rc;
+}
+
+static int
+wcnss_wlan_ctrl_probe(struct platform_device *pdev)
+{
+ if (!penv || !penv->triggered)
+ return -ENODEV;
+
+ penv->smd_channel_ready = 1;
+
+ pr_info("%s: SMD ctrl channel up\n", __func__);
+ return 0;
+}
+
+static int
+wcnss_wlan_ctrl_remove(struct platform_device *pdev)
+{
+ if (penv)
+ penv->smd_channel_ready = 0;
+
+ pr_info("%s: SMD ctrl channel down\n", __func__);
+
+ return 0;
+}
+
+static struct platform_driver wcnss_wlan_ctrl_driver = {
+ .driver = {
+ .name = "WLAN_CTRL",
+ .owner = THIS_MODULE,
+ },
+ .probe = wcnss_wlan_ctrl_probe,
+ .remove = wcnss_wlan_ctrl_remove,
+};
+
+static int
+wcnss_ctrl_remove(struct platform_device *pdev)
+{
+ if (penv && penv->smd_ch)
+ smd_close(penv->smd_ch);
+
+ return 0;
+}
+
+static int
+wcnss_ctrl_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (!penv || !penv->triggered)
+ return -ENODEV;
+
+ ret = smd_named_open_on_edge(WCNSS_CTRL_CHANNEL, SMD_APPS_WCNSS,
+ &penv->smd_ch, penv,
+ wcnss_smd_notify_event);
+ if (ret < 0) {
+ pr_err("wcnss: cannot open the smd command channel %s: %d\n",
+ WCNSS_CTRL_CHANNEL, ret);
+ return -ENODEV;
+ }
+ smd_disable_read_intr(penv->smd_ch);
+
+ return 0;
+}
+
+/* platform device for WCNSS_CTRL SMD channel */
+static struct platform_driver wcnss_ctrl_driver = {
+ .driver = {
+ .name = "WCNSS_CTRL",
+ .owner = THIS_MODULE,
+ },
+ .probe = wcnss_ctrl_probe,
+ .remove = wcnss_ctrl_remove,
+};
+
+struct device *wcnss_wlan_get_device(void)
+{
+ if (penv && penv->pdev && penv->smd_channel_ready)
+ return &penv->pdev->dev;
+ return NULL;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_device);
+
+void wcnss_get_monotonic_boottime(struct timespec *ts)
+{
+ get_monotonic_boottime(ts);
+}
+EXPORT_SYMBOL(wcnss_get_monotonic_boottime);
+
+struct platform_device *wcnss_get_platform_device(void)
+{
+ if (penv && penv->pdev)
+ return penv->pdev;
+ return NULL;
+}
+EXPORT_SYMBOL(wcnss_get_platform_device);
+
+struct wcnss_wlan_config *wcnss_get_wlan_config(void)
+{
+ if (penv && penv->pdev)
+ return &penv->wlan_config;
+ return NULL;
+}
+EXPORT_SYMBOL(wcnss_get_wlan_config);
+
+int wcnss_is_hw_pronto_ver3(void)
+{
+ if (penv && penv->pdev) {
+ if (penv->wlan_config.is_pronto_v3)
+ return penv->wlan_config.is_pronto_v3;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(wcnss_is_hw_pronto_ver3);
+
+int wcnss_device_ready(void)
+{
+ if (penv && penv->pdev && penv->nv_downloaded &&
+ !wcnss_device_is_shutdown())
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(wcnss_device_ready);
+
+bool wcnss_cbc_complete(void)
+{
+ if (penv && penv->pdev && penv->is_cbc_done &&
+ !wcnss_device_is_shutdown())
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(wcnss_cbc_complete);
+
+int wcnss_device_is_shutdown(void)
+{
+ if (penv && penv->is_shutdown)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(wcnss_device_is_shutdown);
+
+struct resource *wcnss_wlan_get_memory_map(struct device *dev)
+{
+ if (penv && dev && (dev == &penv->pdev->dev) && penv->smd_channel_ready)
+ return penv->mmio_res;
+ return NULL;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_memory_map);
+
+int wcnss_wlan_get_dxe_tx_irq(struct device *dev)
+{
+ if (penv && dev && (dev == &penv->pdev->dev) &&
+ penv->tx_irq_res && penv->smd_channel_ready)
+ return penv->tx_irq_res->start;
+ return WCNSS_WLAN_IRQ_INVALID;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_dxe_tx_irq);
+
+int wcnss_wlan_get_dxe_rx_irq(struct device *dev)
+{
+ if (penv && dev && (dev == &penv->pdev->dev) &&
+ penv->rx_irq_res && penv->smd_channel_ready)
+ return penv->rx_irq_res->start;
+ return WCNSS_WLAN_IRQ_INVALID;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_dxe_rx_irq);
+
+void wcnss_wlan_register_pm_ops(struct device *dev,
+ const struct dev_pm_ops *pm_ops)
+{
+ if (penv && dev && (dev == &penv->pdev->dev) && pm_ops)
+ penv->pm_ops = pm_ops;
+}
+EXPORT_SYMBOL(wcnss_wlan_register_pm_ops);
+
+void wcnss_wlan_unregister_pm_ops(struct device *dev,
+ const struct dev_pm_ops *pm_ops)
+{
+ if (penv && dev && (dev == &penv->pdev->dev) && pm_ops) {
+ if (!penv->pm_ops) {
+ pr_err("%s: pm_ops is already unregistered.\n",
+ __func__);
+ return;
+ }
+
+ if (pm_ops->suspend != penv->pm_ops->suspend ||
+ pm_ops->resume != penv->pm_ops->resume)
+ pr_err("PM APIs dont match with registered APIs\n");
+ penv->pm_ops = NULL;
+ }
+}
+EXPORT_SYMBOL(wcnss_wlan_unregister_pm_ops);
+
+void wcnss_register_thermal_mitigation(struct device *dev,
+ void (*tm_notify)(struct device *, int))
+{
+ if (penv && dev && tm_notify)
+ penv->tm_notify = tm_notify;
+}
+EXPORT_SYMBOL(wcnss_register_thermal_mitigation);
+
+void wcnss_unregister_thermal_mitigation(
+ void (*tm_notify)(struct device *, int))
+{
+ if (penv && tm_notify) {
+ if (tm_notify != penv->tm_notify)
+ pr_err("tm_notify doesn't match registered\n");
+ penv->tm_notify = NULL;
+ }
+}
+EXPORT_SYMBOL(wcnss_unregister_thermal_mitigation);
+
+unsigned int wcnss_get_serial_number(void)
+{
+ if (penv) {
+ penv->serial_number = socinfo_get_serial_number();
+ pr_info("%s: Device serial number: %u\n",
+ __func__, penv->serial_number);
+ return penv->serial_number;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wcnss_get_serial_number);
+
+int wcnss_get_wlan_mac_address(char mac_addr[WLAN_MAC_ADDR_SIZE])
+{
+ if (!penv)
+ return -ENODEV;
+
+ memcpy(mac_addr, penv->wlan_nv_mac_addr, WLAN_MAC_ADDR_SIZE);
+ pr_debug("%s: Get MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
+ penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
+ penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
+ penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
+ return 0;
+}
+EXPORT_SYMBOL(wcnss_get_wlan_mac_address);
+
+static int enable_wcnss_suspend_notify;
+
+static int enable_wcnss_suspend_notify_set(const char *val,
+ struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_int(val, kp);
+ if (ret)
+ return ret;
+
+ if (enable_wcnss_suspend_notify)
+ pr_debug("Suspend notification activated for wcnss\n");
+
+ return 0;
+}
+module_param_call(enable_wcnss_suspend_notify, enable_wcnss_suspend_notify_set,
+ param_get_int, &enable_wcnss_suspend_notify, 0644);
+
+int wcnss_xo_auto_detect_enabled(void)
+{
+ return (has_autodetect_xo == 1 ? 1 : 0);
+}
+
+void wcnss_set_iris_xo_mode(int iris_xo_mode_set)
+{
+ penv->iris_xo_mode_set = iris_xo_mode_set;
+}
+EXPORT_SYMBOL(wcnss_set_iris_xo_mode);
+
+int wcnss_wlan_iris_xo_mode(void)
+{
+ if (penv && penv->pdev && penv->smd_channel_ready)
+ return penv->iris_xo_mode_set;
+ return -ENODEV;
+}
+EXPORT_SYMBOL(wcnss_wlan_iris_xo_mode);
+
+int wcnss_wlan_dual_band_disabled(void)
+{
+ if (penv && penv->pdev)
+ return penv->is_dual_band_disabled;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(wcnss_wlan_dual_band_disabled);
+
+void wcnss_suspend_notify(void)
+{
+ void __iomem *pmu_spare_reg;
+ u32 reg = 0;
+ unsigned long flags;
+
+ if (!enable_wcnss_suspend_notify)
+ return;
+
+ if (wcnss_hardware_type() == WCNSS_PRONTO_HW)
+ return;
+
+ /* For Riva */
+ pmu_spare_reg = penv->msm_wcnss_base + RIVA_SPARE_OFFSET;
+ spin_lock_irqsave(®_spinlock, flags);
+ reg = readl_relaxed(pmu_spare_reg);
+ reg |= RIVA_SUSPEND_BIT;
+ writel_relaxed(reg, pmu_spare_reg);
+ spin_unlock_irqrestore(®_spinlock, flags);
+}
+EXPORT_SYMBOL(wcnss_suspend_notify);
+
+void wcnss_resume_notify(void)
+{
+ void __iomem *pmu_spare_reg;
+ u32 reg = 0;
+ unsigned long flags;
+
+ if (!enable_wcnss_suspend_notify)
+ return;
+
+ if (wcnss_hardware_type() == WCNSS_PRONTO_HW)
+ return;
+
+ /* For Riva */
+ pmu_spare_reg = penv->msm_wcnss_base + RIVA_SPARE_OFFSET;
+
+ spin_lock_irqsave(®_spinlock, flags);
+ reg = readl_relaxed(pmu_spare_reg);
+ reg &= ~RIVA_SUSPEND_BIT;
+ writel_relaxed(reg, pmu_spare_reg);
+ spin_unlock_irqrestore(®_spinlock, flags);
+}
+EXPORT_SYMBOL(wcnss_resume_notify);
+
+static int wcnss_wlan_suspend(struct device *dev)
+{
+ if (penv && dev && (dev == &penv->pdev->dev) &&
+ penv->smd_channel_ready &&
+ penv->pm_ops && penv->pm_ops->suspend)
+ return penv->pm_ops->suspend(dev);
+ return 0;
+}
+
+static int wcnss_wlan_resume(struct device *dev)
+{
+ if (penv && dev && (dev == &penv->pdev->dev) &&
+ penv->smd_channel_ready &&
+ penv->pm_ops && penv->pm_ops->resume)
+ return penv->pm_ops->resume(dev);
+ return 0;
+}
+
+void wcnss_prevent_suspend(void)
+{
+ if (penv)
+ __pm_stay_awake(&penv->wcnss_wake_lock);
+}
+EXPORT_SYMBOL(wcnss_prevent_suspend);
+
+void wcnss_allow_suspend(void)
+{
+ if (penv)
+ __pm_relax(&penv->wcnss_wake_lock);
+}
+EXPORT_SYMBOL(wcnss_allow_suspend);
+
+int wcnss_hardware_type(void)
+{
+ if (penv)
+ return penv->wcnss_hw_type;
+ else
+ return -ENODEV;
+}
+EXPORT_SYMBOL(wcnss_hardware_type);
+
+int fw_cal_data_available(void)
+{
+ if (penv)
+ return penv->fw_cal_available;
+ else
+ return -ENODEV;
+}
+
+u32 wcnss_get_wlan_rx_buff_count(void)
+{
+ if (penv)
+ return penv->wlan_rx_buff_count;
+ else
+ return WCNSS_DEF_WLAN_RX_BUFF_COUNT;
+}
+EXPORT_SYMBOL(wcnss_get_wlan_rx_buff_count);
+
+int wcnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
+{
+ if (penv && unsafe_ch_list &&
+ (ch_count <= WCNSS_MAX_CH_NUM)) {
+ memcpy((char *)penv->unsafe_ch_list,
+ (char *)unsafe_ch_list, ch_count * sizeof(u16));
+ penv->unsafe_ch_count = ch_count;
+ return 0;
+ } else {
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(wcnss_set_wlan_unsafe_channel);
+
+int wcnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 buffer_size,
+ u16 *ch_count)
+{
+ if (penv) {
+ if (buffer_size < penv->unsafe_ch_count * sizeof(u16))
+ return -ENODEV;
+ memcpy((char *)unsafe_ch_list,
+ (char *)penv->unsafe_ch_list,
+ penv->unsafe_ch_count * sizeof(u16));
+ *ch_count = penv->unsafe_ch_count;
+ return 0;
+ } else {
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(wcnss_get_wlan_unsafe_channel);
+
+static int wcnss_smd_tx(void *data, int len)
+{
+ int ret = 0;
+
+ ret = smd_write_avail(penv->smd_ch);
+ if (ret < len) {
+ pr_err("wcnss: no space available for smd frame\n");
+ return -ENOSPC;
+ }
+ ret = smd_write(penv->smd_ch, data, len);
+ if (ret < len) {
+ pr_err("wcnss: failed to write Command %d", len);
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+static int wcnss_get_battery_volt(int *result_uv)
+{
+ int rc = -1;
+ struct qpnp_vadc_result adc_result;
+
+ if (!penv->vadc_dev) {
+ pr_err("wcnss: not setting up vadc\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_read(penv->vadc_dev, VBAT_SNS, &adc_result);
+ if (rc) {
+ pr_err("error reading adc channel = %d, rc = %d\n",
+ VBAT_SNS, rc);
+ return rc;
+ }
+
+ pr_info("Battery mvolts phy=%lld meas=0x%llx\n", adc_result.physical,
+ adc_result.measurement);
+ *result_uv = (int)adc_result.physical;
+
+ return 0;
+}
+
+static void wcnss_notify_vbat(enum qpnp_tm_state state, void *ctx)
+{
+ int rc = 0;
+
+ mutex_lock(&penv->vbat_monitor_mutex);
+ cancel_delayed_work_sync(&penv->vbatt_work);
+
+ if (state == ADC_TM_LOW_STATE) {
+ pr_debug("wcnss: low voltage notification triggered\n");
+ penv->vbat_monitor_params.state_request =
+ ADC_TM_HIGH_THR_ENABLE;
+ penv->vbat_monitor_params.high_thr = WCNSS_VBATT_THRESHOLD +
+ WCNSS_VBATT_GUARD;
+ penv->vbat_monitor_params.low_thr = 0;
+ } else if (state == ADC_TM_HIGH_STATE) {
+ penv->vbat_monitor_params.state_request =
+ ADC_TM_LOW_THR_ENABLE;
+ penv->vbat_monitor_params.low_thr = WCNSS_VBATT_THRESHOLD -
+ WCNSS_VBATT_GUARD;
+ penv->vbat_monitor_params.high_thr = 0;
+ pr_debug("wcnss: high voltage notification triggered\n");
+ } else {
+ pr_debug("wcnss: unknown voltage notification state: %d\n",
+ state);
+ mutex_unlock(&penv->vbat_monitor_mutex);
+ return;
+ }
+ pr_debug("wcnss: set low thr to %d and high to %d\n",
+ penv->vbat_monitor_params.low_thr,
+ penv->vbat_monitor_params.high_thr);
+
+ rc = qpnp_adc_tm_channel_measure(penv->adc_tm_dev,
+ &penv->vbat_monitor_params);
+
+ if (rc)
+ pr_err("%s: tm setup failed: %d\n", __func__, rc);
+ else
+ schedule_delayed_work(&penv->vbatt_work,
+ msecs_to_jiffies(2000));
+
+ mutex_unlock(&penv->vbat_monitor_mutex);
+}
+
+static int wcnss_setup_vbat_monitoring(void)
+{
+ int rc = -1;
+
+ if (!penv->adc_tm_dev) {
+ pr_err("wcnss: not setting up vbatt\n");
+ return rc;
+ }
+ penv->vbat_monitor_params.low_thr = WCNSS_VBATT_THRESHOLD;
+ penv->vbat_monitor_params.high_thr = WCNSS_VBATT_THRESHOLD;
+ penv->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_ENABLE;
+
+ if (penv->is_vsys_adc_channel)
+ penv->vbat_monitor_params.channel = VSYS;
+ else
+ penv->vbat_monitor_params.channel = VBAT_SNS;
+
+ penv->vbat_monitor_params.btm_ctx = (void *)penv;
+ penv->vbat_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S;
+ penv->vbat_monitor_params.threshold_notification = &wcnss_notify_vbat;
+ pr_debug("wcnss: set low thr to %d and high to %d\n",
+ penv->vbat_monitor_params.low_thr,
+ penv->vbat_monitor_params.high_thr);
+
+ rc = qpnp_adc_tm_channel_measure(penv->adc_tm_dev,
+ &penv->vbat_monitor_params);
+ if (rc)
+ pr_err("%s: tm setup failed: %d\n", __func__, rc);
+
+ return rc;
+}
+
+static void wcnss_send_vbatt_indication(struct work_struct *work)
+{
+ struct vbatt_message vbatt_msg;
+ int ret = 0;
+
+ vbatt_msg.hdr.msg_type = WCNSS_VBATT_LEVEL_IND;
+ vbatt_msg.hdr.msg_len = sizeof(struct vbatt_message);
+ vbatt_msg.vbatt.threshold = WCNSS_VBATT_THRESHOLD;
+
+ mutex_lock(&penv->vbat_monitor_mutex);
+ vbatt_msg.vbatt.curr_volt = penv->wlan_config.vbatt;
+ mutex_unlock(&penv->vbat_monitor_mutex);
+ pr_debug("wcnss: send curr_volt: %d to FW\n",
+ vbatt_msg.vbatt.curr_volt);
+
+ ret = wcnss_smd_tx(&vbatt_msg, vbatt_msg.hdr.msg_len);
+ if (ret < 0)
+ pr_err("wcnss: smd tx failed\n");
+}
+
+static void wcnss_update_vbatt(struct work_struct *work)
+{
+ struct vbatt_message vbatt_msg;
+ int ret = 0;
+
+ vbatt_msg.hdr.msg_type = WCNSS_VBATT_LEVEL_IND;
+ vbatt_msg.hdr.msg_len = sizeof(struct vbatt_message);
+ vbatt_msg.vbatt.threshold = WCNSS_VBATT_THRESHOLD;
+
+ mutex_lock(&penv->vbat_monitor_mutex);
+ if (penv->vbat_monitor_params.low_thr &&
+ (penv->fw_vbatt_state == WCNSS_VBATT_LOW ||
+ penv->fw_vbatt_state == WCNSS_CONFIG_UNSPECIFIED)) {
+ vbatt_msg.vbatt.curr_volt = WCNSS_VBATT_HIGH;
+ penv->fw_vbatt_state = WCNSS_VBATT_HIGH;
+ pr_debug("wcnss: send HIGH BATT to FW\n");
+ } else if (!penv->vbat_monitor_params.low_thr &&
+ (penv->fw_vbatt_state == WCNSS_VBATT_HIGH ||
+ penv->fw_vbatt_state == WCNSS_CONFIG_UNSPECIFIED)){
+ vbatt_msg.vbatt.curr_volt = WCNSS_VBATT_LOW;
+ penv->fw_vbatt_state = WCNSS_VBATT_LOW;
+ pr_debug("wcnss: send LOW BATT to FW\n");
+ } else {
+ mutex_unlock(&penv->vbat_monitor_mutex);
+ return;
+ }
+ mutex_unlock(&penv->vbat_monitor_mutex);
+ ret = wcnss_smd_tx(&vbatt_msg, vbatt_msg.hdr.msg_len);
+ if (ret < 0)
+ pr_err("wcnss: smd tx failed\n");
+}
+
+static unsigned char wcnss_fw_status(void)
+{
+ int len = 0;
+ int rc = 0;
+
+ unsigned char fw_status = 0xFF;
+
+ len = smd_read_avail(penv->smd_ch);
+ if (len < 1) {
+ pr_err("%s: invalid firmware status", __func__);
+ return fw_status;
+ }
+
+ rc = smd_read(penv->smd_ch, &fw_status, 1);
+ if (rc < 0) {
+ pr_err("%s: incomplete data read from smd\n", __func__);
+ return fw_status;
+ }
+ return fw_status;
+}
+
+static void wcnss_send_cal_rsp(unsigned char fw_status)
+{
+ struct smd_msg_hdr *rsphdr;
+ unsigned char *msg = NULL;
+ int rc;
+
+ msg = kmalloc((sizeof(*rsphdr) + 1), GFP_KERNEL);
+ if (!msg)
+ return;
+
+ rsphdr = (struct smd_msg_hdr *)msg;
+ rsphdr->msg_type = WCNSS_CALDATA_UPLD_RSP;
+ rsphdr->msg_len = sizeof(struct smd_msg_hdr) + 1;
+ memcpy(msg + sizeof(struct smd_msg_hdr), &fw_status, 1);
+
+ rc = wcnss_smd_tx(msg, rsphdr->msg_len);
+ if (rc < 0)
+ pr_err("wcnss: smd tx failed\n");
+
+ kfree(msg);
+}
+
+/* Collect calibrated data from WCNSS */
+void extract_cal_data(int len)
+{
+ int rc;
+ struct cal_data_params calhdr;
+ unsigned char fw_status = WCNSS_RESP_FAIL;
+
+ if (len < sizeof(struct cal_data_params)) {
+ pr_err("wcnss: incomplete cal header length\n");
+ return;
+ }
+
+ mutex_lock(&penv->dev_lock);
+ rc = smd_read(penv->smd_ch, (unsigned char *)&calhdr,
+ sizeof(struct cal_data_params));
+ if (rc < sizeof(struct cal_data_params)) {
+ pr_err("wcnss: incomplete cal header read from smd\n");
+ mutex_unlock(&penv->dev_lock);
+ return;
+ }
+
+ if (penv->fw_cal_exp_frag != calhdr.frag_number) {
+ pr_err("wcnss: Invalid frgament");
+ goto unlock_exit;
+ }
+
+ if (calhdr.frag_size > WCNSS_MAX_FRAME_SIZE) {
+ pr_err("wcnss: Invalid fragment size");
+ goto unlock_exit;
+ }
+
+ if (penv->fw_cal_available) {
+ /* ignore cal upload from SSR */
+ smd_read(penv->smd_ch, NULL, calhdr.frag_size);
+ penv->fw_cal_exp_frag++;
+ if (calhdr.msg_flags & LAST_FRAGMENT) {
+ penv->fw_cal_exp_frag = 0;
+ goto unlock_exit;
+ }
+ mutex_unlock(&penv->dev_lock);
+ return;
+ }
+
+ if (calhdr.frag_number == 0) {
+ if (calhdr.total_size > MAX_CALIBRATED_DATA_SIZE) {
+ pr_err("wcnss: Invalid cal data size %d",
+ calhdr.total_size);
+ goto unlock_exit;
+ }
+ kfree(penv->fw_cal_data);
+ penv->fw_cal_rcvd = 0;
+ penv->fw_cal_data = kmalloc(calhdr.total_size,
+ GFP_KERNEL);
+ if (!penv->fw_cal_data) {
+ smd_read(penv->smd_ch, NULL, calhdr.frag_size);
+ goto unlock_exit;
+ }
+ }
+
+ if (penv->fw_cal_rcvd + calhdr.frag_size >
+ MAX_CALIBRATED_DATA_SIZE) {
+ pr_err("calibrated data size is more than expected %d",
+ penv->fw_cal_rcvd + calhdr.frag_size);
+ penv->fw_cal_exp_frag = 0;
+ penv->fw_cal_rcvd = 0;
+ smd_read(penv->smd_ch, NULL, calhdr.frag_size);
+ goto unlock_exit;
+ }
+
+ rc = smd_read(penv->smd_ch, penv->fw_cal_data + penv->fw_cal_rcvd,
+ calhdr.frag_size);
+ if (rc < calhdr.frag_size)
+ goto unlock_exit;
+
+ penv->fw_cal_exp_frag++;
+ penv->fw_cal_rcvd += calhdr.frag_size;
+
+ if (calhdr.msg_flags & LAST_FRAGMENT) {
+ penv->fw_cal_exp_frag = 0;
+ penv->fw_cal_available = true;
+ pr_info("wcnss: cal data collection completed\n");
+ }
+ mutex_unlock(&penv->dev_lock);
+ wake_up(&penv->read_wait);
+
+ if (penv->fw_cal_available) {
+ fw_status = WCNSS_RESP_SUCCESS;
+ wcnss_send_cal_rsp(fw_status);
+ }
+ return;
+
+unlock_exit:
+ mutex_unlock(&penv->dev_lock);
+ wcnss_send_cal_rsp(fw_status);
+}
+
+static void wcnssctrl_rx_handler(struct work_struct *worker)
+{
+ int len = 0;
+ int rc = 0;
+ unsigned char buf[sizeof(struct wcnss_version)];
+ unsigned char build[WCNSS_MAX_BUILD_VER_LEN + 1];
+ struct smd_msg_hdr *phdr;
+ struct smd_msg_hdr smd_msg;
+ struct wcnss_version *pversion;
+ int hw_type;
+ unsigned char fw_status = 0;
+
+ len = smd_read_avail(penv->smd_ch);
+ if (len > WCNSS_MAX_FRAME_SIZE) {
+ pr_err("wcnss: frame larger than the allowed size\n");
+ smd_read(penv->smd_ch, NULL, len);
+ return;
+ }
+ if (len < sizeof(struct smd_msg_hdr)) {
+ pr_err("wcnss: incomplete header available len = %d\n", len);
+ return;
+ }
+
+ rc = smd_read(penv->smd_ch, buf, sizeof(struct smd_msg_hdr));
+ if (rc < sizeof(struct smd_msg_hdr)) {
+ pr_err("wcnss: incomplete header read from smd\n");
+ return;
+ }
+ len -= sizeof(struct smd_msg_hdr);
+
+ phdr = (struct smd_msg_hdr *)buf;
+
+ switch (phdr->msg_type) {
+ case WCNSS_VERSION_RSP:
+ if (len != sizeof(struct wcnss_version)
+ - sizeof(struct smd_msg_hdr)) {
+ pr_err("wcnss: invalid version data from wcnss %d\n",
+ len);
+ return;
+ }
+ rc = smd_read(penv->smd_ch, buf + sizeof(struct smd_msg_hdr),
+ len);
+ if (rc < len) {
+ pr_err("wcnss: incomplete data read from smd\n");
+ return;
+ }
+ pversion = (struct wcnss_version *)buf;
+ penv->fw_major = pversion->major;
+ penv->fw_minor = pversion->minor;
+ snprintf(penv->wcnss_version, WCNSS_VERSION_LEN,
+ "%02x%02x%02x%02x", pversion->major, pversion->minor,
+ pversion->version, pversion->revision);
+ pr_info("wcnss: version %s\n", penv->wcnss_version);
+ /* schedule work to download nvbin to ccpu */
+ hw_type = wcnss_hardware_type();
+ switch (hw_type) {
+ case WCNSS_RIVA_HW:
+ /* supported only if riva major >= 1 and minor >= 4 */
+ if ((pversion->major >= 1) && (pversion->minor >= 4)) {
+ pr_info("wcnss: schedule dnld work for riva\n");
+ schedule_work(&penv->wcnssctrl_nvbin_dnld_work);
+ }
+ break;
+
+ case WCNSS_PRONTO_HW:
+ smd_msg.msg_type = WCNSS_BUILD_VER_REQ;
+ smd_msg.msg_len = sizeof(smd_msg);
+ rc = wcnss_smd_tx(&smd_msg, smd_msg.msg_len);
+ if (rc < 0)
+ pr_err("wcnss: smd tx failed: %s\n", __func__);
+
+ /* supported only if pronto major >= 1 and minor >= 4 */
+ if ((pversion->major >= 1) && (pversion->minor >= 4)) {
+ pr_info("wcnss: schedule dnld work for pronto\n");
+ schedule_work(&penv->wcnssctrl_nvbin_dnld_work);
+ }
+ break;
+
+ default:
+ pr_info("wcnss: unknown hw type (%d), will not schedule dnld work\n",
+ hw_type);
+ break;
+ }
+ break;
+
+ case WCNSS_BUILD_VER_RSP:
+ if (len > WCNSS_MAX_BUILD_VER_LEN) {
+ pr_err("wcnss: invalid build version data from wcnss %d\n",
+ len);
+ return;
+ }
+ rc = smd_read(penv->smd_ch, build, len);
+ if (rc < len) {
+ pr_err("wcnss: incomplete data read from smd\n");
+ return;
+ }
+ build[len] = 0;
+ pr_info("wcnss: build version %s\n", build);
+ break;
+
+ case WCNSS_NVBIN_DNLD_RSP:
+ penv->nv_downloaded = true;
+ fw_status = wcnss_fw_status();
+ pr_debug("wcnss: received WCNSS_NVBIN_DNLD_RSP from ccpu %u\n",
+ fw_status);
+ if (fw_status != WAIT_FOR_CBC_IND)
+ penv->is_cbc_done = 1;
+ wcnss_setup_vbat_monitoring();
+ break;
+
+ case WCNSS_CALDATA_DNLD_RSP:
+ penv->nv_downloaded = true;
+ fw_status = wcnss_fw_status();
+ pr_debug("wcnss: received WCNSS_CALDATA_DNLD_RSP from ccpu %u\n",
+ fw_status);
+ break;
+ case WCNSS_CBC_COMPLETE_IND:
+ penv->is_cbc_done = 1;
+ pr_debug("wcnss: received WCNSS_CBC_COMPLETE_IND from FW\n");
+ break;
+
+ case WCNSS_CALDATA_UPLD_REQ:
+ extract_cal_data(len);
+ break;
+
+ default:
+ pr_err("wcnss: invalid message type %d\n", phdr->msg_type);
+ }
+}
+
+static void wcnss_send_version_req(struct work_struct *worker)
+{
+ struct smd_msg_hdr smd_msg;
+ int ret = 0;
+
+ smd_msg.msg_type = WCNSS_VERSION_REQ;
+ smd_msg.msg_len = sizeof(smd_msg);
+ ret = wcnss_smd_tx(&smd_msg, smd_msg.msg_len);
+ if (ret < 0)
+ pr_err("wcnss: smd tx failed\n");
+}
+
+static void wcnss_send_pm_config(struct work_struct *worker)
+{
+ struct smd_msg_hdr *hdr;
+ unsigned char *msg = NULL;
+ int rc, prop_len;
+ u32 *payload;
+
+ if (!of_find_property(penv->pdev->dev.of_node,
+ "qcom,wcnss-pm", &prop_len))
+ return;
+
+ msg = kmalloc((sizeof(struct smd_msg_hdr) + prop_len), GFP_KERNEL);
+ if (!msg)
+ return;
+
+ payload = (u32 *)(msg + sizeof(struct smd_msg_hdr));
+
+ prop_len /= sizeof(int);
+
+ rc = of_property_read_u32_array(penv->pdev->dev.of_node,
+ "qcom,wcnss-pm", payload, prop_len);
+ if (rc < 0) {
+ pr_err("wcnss: property read failed\n");
+ kfree(msg);
+ return;
+ }
+
+ pr_debug("%s:size=%d: <%d, %d, %d, %d, %d %d>\n", __func__,
+ prop_len, *payload, *(payload + 1), *(payload + 2),
+ *(payload + 3), *(payload + 4), *(payload + 5));
+
+ hdr = (struct smd_msg_hdr *)msg;
+ hdr->msg_type = WCNSS_PM_CONFIG_REQ;
+ hdr->msg_len = sizeof(struct smd_msg_hdr) + (prop_len * sizeof(int));
+
+ rc = wcnss_smd_tx(msg, hdr->msg_len);
+ if (rc < 0)
+ pr_err("wcnss: smd tx failed\n");
+
+ kfree(msg);
+}
+
+static void wcnss_pm_qos_enable_pc(struct work_struct *worker)
+{
+ wcnss_disable_pc_remove_req();
+}
+
+static DECLARE_RWSEM(wcnss_pm_sem);
+
+static void wcnss_nvbin_dnld(void)
+{
+ int ret = 0;
+ struct nvbin_dnld_req_msg *dnld_req_msg;
+ unsigned short total_fragments = 0;
+ unsigned short count = 0;
+ unsigned short retry_count = 0;
+ unsigned short cur_frag_size = 0;
+ unsigned char *outbuffer = NULL;
+ const void *nv_blob_addr = NULL;
+ unsigned int nv_blob_size = 0;
+ const struct firmware *nv = NULL;
+ struct device *dev = &penv->pdev->dev;
+
+ down_read(&wcnss_pm_sem);
+
+ ret = request_firmware(&nv, NVBIN_FILE, dev);
+
+ if (ret || !nv || !nv->data || !nv->size) {
+ pr_err("wcnss: %s: request_firmware failed for %s (ret = %d)\n",
+ __func__, NVBIN_FILE, ret);
+ goto out;
+ }
+
+ /* First 4 bytes in nv blob is validity bitmap.
+ * We cannot validate nv, so skip those 4 bytes.
+ */
+ nv_blob_addr = nv->data + 4;
+ nv_blob_size = nv->size - 4;
+
+ total_fragments = TOTALFRAGMENTS(nv_blob_size);
+
+ pr_info("wcnss: NV bin size: %d, total_fragments: %d\n",
+ nv_blob_size, total_fragments);
+
+ /* get buffer for nv bin dnld req message */
+ outbuffer = kmalloc((sizeof(struct nvbin_dnld_req_msg) +
+ NV_FRAGMENT_SIZE), GFP_KERNEL);
+ if (!outbuffer)
+ goto err_free_nv;
+
+ dnld_req_msg = (struct nvbin_dnld_req_msg *)outbuffer;
+
+ dnld_req_msg->hdr.msg_type = WCNSS_NVBIN_DNLD_REQ;
+ dnld_req_msg->dnld_req_params.msg_flags = 0;
+
+ for (count = 0; count < total_fragments; count++) {
+ dnld_req_msg->dnld_req_params.frag_number = count;
+
+ if (count == (total_fragments - 1)) {
+ /* last fragment, take care of boundary condition */
+ cur_frag_size = nv_blob_size % NV_FRAGMENT_SIZE;
+ if (!cur_frag_size)
+ cur_frag_size = NV_FRAGMENT_SIZE;
+
+ dnld_req_msg->dnld_req_params.msg_flags |=
+ LAST_FRAGMENT;
+ dnld_req_msg->dnld_req_params.msg_flags |=
+ CAN_RECEIVE_CALDATA;
+ } else {
+ cur_frag_size = NV_FRAGMENT_SIZE;
+ dnld_req_msg->dnld_req_params.msg_flags &=
+ ~LAST_FRAGMENT;
+ }
+
+ dnld_req_msg->dnld_req_params.nvbin_buffer_size =
+ cur_frag_size;
+
+ dnld_req_msg->hdr.msg_len =
+ sizeof(struct nvbin_dnld_req_msg) + cur_frag_size;
+
+ /* copy NV fragment */
+ memcpy((outbuffer + sizeof(struct nvbin_dnld_req_msg)),
+ (nv_blob_addr + count * NV_FRAGMENT_SIZE),
+ cur_frag_size);
+
+ ret = wcnss_smd_tx(outbuffer, dnld_req_msg->hdr.msg_len);
+
+ retry_count = 0;
+ while ((ret == -ENOSPC) && (retry_count <= 3)) {
+ pr_debug("wcnss: %s: smd tx failed, ENOSPC\n",
+ __func__);
+ pr_debug("fragment: %d, len: %d, TotFragments: %d, retry_count: %d\n",
+ count, dnld_req_msg->hdr.msg_len,
+ total_fragments, retry_count);
+
+ /* wait and try again */
+ msleep(20);
+ retry_count++;
+ ret = wcnss_smd_tx(outbuffer,
+ dnld_req_msg->hdr.msg_len);
+ }
+
+ if (ret < 0) {
+ pr_err("wcnss: %s: smd tx failed\n", __func__);
+ pr_err("fragment %d, len: %d, TotFragments: %d, retry_count: %d\n",
+ count, dnld_req_msg->hdr.msg_len,
+ total_fragments, retry_count);
+ goto err_dnld;
+ }
+ }
+
+err_dnld:
+ /* free buffer */
+ kfree(outbuffer);
+
+err_free_nv:
+ /* release firmware */
+ release_firmware(nv);
+
+out:
+ up_read(&wcnss_pm_sem);
+}
+
+static void wcnss_caldata_dnld(const void *cal_data,
+ unsigned int cal_data_size, bool msg_to_follow)
+{
+ int ret = 0;
+ struct cal_data_msg *cal_msg;
+ unsigned short total_fragments = 0;
+ unsigned short count = 0;
+ unsigned short retry_count = 0;
+ unsigned short cur_frag_size = 0;
+ unsigned char *outbuffer = NULL;
+
+ total_fragments = TOTALFRAGMENTS(cal_data_size);
+
+ outbuffer = kmalloc((sizeof(struct cal_data_msg) +
+ NV_FRAGMENT_SIZE), GFP_KERNEL);
+ if (!outbuffer)
+ return;
+
+ cal_msg = (struct cal_data_msg *)outbuffer;
+
+ cal_msg->hdr.msg_type = WCNSS_CALDATA_DNLD_REQ;
+ cal_msg->cal_params.msg_flags = 0;
+
+ for (count = 0; count < total_fragments; count++) {
+ cal_msg->cal_params.frag_number = count;
+
+ if (count == (total_fragments - 1)) {
+ cur_frag_size = cal_data_size % NV_FRAGMENT_SIZE;
+ if (!cur_frag_size)
+ cur_frag_size = NV_FRAGMENT_SIZE;
+
+ cal_msg->cal_params.msg_flags
+ |= LAST_FRAGMENT;
+ if (msg_to_follow)
+ cal_msg->cal_params.msg_flags |=
+ MESSAGE_TO_FOLLOW;
+ } else {
+ cur_frag_size = NV_FRAGMENT_SIZE;
+ cal_msg->cal_params.msg_flags &=
+ ~LAST_FRAGMENT;
+ }
+
+ cal_msg->cal_params.total_size = cal_data_size;
+ cal_msg->cal_params.frag_size =
+ cur_frag_size;
+
+ cal_msg->hdr.msg_len =
+ sizeof(struct cal_data_msg) + cur_frag_size;
+
+ memcpy((outbuffer + sizeof(struct cal_data_msg)),
+ (cal_data + count * NV_FRAGMENT_SIZE),
+ cur_frag_size);
+
+ ret = wcnss_smd_tx(outbuffer, cal_msg->hdr.msg_len);
+
+ retry_count = 0;
+ while ((ret == -ENOSPC) && (retry_count <= 3)) {
+ pr_debug("wcnss: %s: smd tx failed, ENOSPC\n",
+ __func__);
+ pr_debug("fragment: %d, len: %d, TotFragments: %d, retry_count: %d\n",
+ count, cal_msg->hdr.msg_len,
+ total_fragments, retry_count);
+
+ /* wait and try again */
+ msleep(20);
+ retry_count++;
+ ret = wcnss_smd_tx(outbuffer,
+ cal_msg->hdr.msg_len);
+ }
+
+ if (ret < 0) {
+ pr_err("wcnss: %s: smd tx failed\n", __func__);
+ pr_err("fragment %d, len: %d, TotFragments: %d, retry_count: %d\n",
+ count, cal_msg->hdr.msg_len,
+ total_fragments, retry_count);
+ goto err_dnld;
+ }
+ }
+
+err_dnld:
+ /* free buffer */
+ kfree(outbuffer);
+}
+
+static void wcnss_nvbin_dnld_main(struct work_struct *worker)
+{
+ int retry = 0;
+
+ if (!FW_CALDATA_CAPABLE())
+ goto nv_download;
+
+ if (!penv->fw_cal_available && IS_CAL_DATA_PRESENT
+ != has_calibrated_data && !penv->user_cal_available) {
+ while (!penv->user_cal_available && retry++ < 5)
+ msleep(500);
+ }
+ if (penv->fw_cal_available) {
+ pr_info_ratelimited("wcnss: cal download, using fw cal");
+ wcnss_caldata_dnld(penv->fw_cal_data, penv->fw_cal_rcvd, true);
+
+ } else if (penv->user_cal_available) {
+ pr_info_ratelimited("wcnss: cal download, using user cal");
+ wcnss_caldata_dnld(penv->user_cal_data,
+ penv->user_cal_rcvd, true);
+ }
+
+nv_download:
+ pr_info_ratelimited("wcnss: NV download");
+ wcnss_nvbin_dnld();
+}
+
+static int wcnss_pm_notify(struct notifier_block *b,
+ unsigned long event, void *p)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ down_write(&wcnss_pm_sem);
+ break;
+
+ case PM_POST_SUSPEND:
+ up_write(&wcnss_pm_sem);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block wcnss_pm_notifier = {
+ .notifier_call = wcnss_pm_notify,
+};
+
+static int wcnss_ctrl_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+
+ if (!penv || penv->ctrl_device_opened)
+ return -EFAULT;
+
+ penv->ctrl_device_opened = 1;
+
+ return rc;
+}
+
+void process_usr_ctrl_cmd(u8 *buf, size_t len)
+{
+ u16 cmd = buf[0] << 8 | buf[1];
+
+ switch (cmd) {
+ case WCNSS_USR_HAS_CAL_DATA:
+ if (buf[2] > 1)
+ pr_err("%s: Invalid data for cal %d\n", __func__,
+ buf[2]);
+ has_calibrated_data = buf[2];
+ break;
+ case WCNSS_USR_WLAN_MAC_ADDR:
+ memcpy(&penv->wlan_nv_mac_addr, &buf[2],
+ sizeof(penv->wlan_nv_mac_addr));
+ pr_debug("%s: MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
+ penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
+ penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
+ penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
+ break;
+ default:
+ pr_err("%s: Invalid command %d\n", __func__, cmd);
+ break;
+ }
+}
+
+static ssize_t wcnss_ctrl_write(struct file *fp, const char __user
+ *user_buffer, size_t count, loff_t *position)
+{
+ int rc = 0;
+ u8 buf[WCNSS_MAX_CMD_LEN];
+
+ if (!penv || !penv->ctrl_device_opened || WCNSS_MAX_CMD_LEN < count ||
+ count < WCNSS_MIN_CMD_LEN)
+ return -EFAULT;
+
+ mutex_lock(&penv->ctrl_lock);
+ rc = copy_from_user(buf, user_buffer, count);
+ if (rc == 0)
+ process_usr_ctrl_cmd(buf, count);
+
+ mutex_unlock(&penv->ctrl_lock);
+
+ return rc;
+}
+
+static const struct file_operations wcnss_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = wcnss_ctrl_open,
+ .write = wcnss_ctrl_write,
+};
+
+static int
+wcnss_trigger_config(struct platform_device *pdev)
+{
+ int ret = 0;
+ int rc;
+ struct qcom_wcnss_opts *pdata;
+ struct resource *res;
+ int is_pronto_vadc;
+ int is_pronto_v3;
+ int pil_retry = 0;
+ struct device_node *node = (&pdev->dev)->of_node;
+ int has_pronto_hw = of_property_read_bool(node, "qcom,has-pronto-hw");
+
+ is_pronto_vadc = of_property_read_bool(node, "qcom,is-pronto-vadc");
+ is_pronto_v3 = of_property_read_bool(node, "qcom,is-pronto-v3");
+
+ penv->is_vsys_adc_channel =
+ of_property_read_bool(node, "qcom,has-vsys-adc-channel");
+ penv->is_a2xb_split_reg =
+ of_property_read_bool(node, "qcom,has-a2xb-split-reg");
+
+ if (of_property_read_u32(node, "qcom,wlan-rx-buff-count",
+ &penv->wlan_rx_buff_count)) {
+ penv->wlan_rx_buff_count = WCNSS_DEF_WLAN_RX_BUFF_COUNT;
+ }
+
+ rc = wcnss_parse_voltage_regulator(&penv->wlan_config, &pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to parse voltage regulators\n");
+ goto fail;
+ }
+
+ /* make sure we are only triggered once */
+ if (penv->triggered)
+ return 0;
+ penv->triggered = 1;
+
+ /* initialize the WCNSS device configuration */
+ pdata = pdev->dev.platform_data;
+ if (has_48mhz_xo == WCNSS_CONFIG_UNSPECIFIED) {
+ if (has_pronto_hw) {
+ has_48mhz_xo =
+ of_property_read_bool(node, "qcom,has-48mhz-xo");
+ } else {
+ has_48mhz_xo = pdata->has_48mhz_xo;
+ }
+ }
+ penv->wcnss_hw_type = (has_pronto_hw) ? WCNSS_PRONTO_HW : WCNSS_RIVA_HW;
+ penv->wlan_config.use_48mhz_xo = has_48mhz_xo;
+ penv->wlan_config.is_pronto_vadc = is_pronto_vadc;
+ penv->wlan_config.is_pronto_v3 = is_pronto_v3;
+
+ if (has_autodetect_xo == WCNSS_CONFIG_UNSPECIFIED && has_pronto_hw) {
+ has_autodetect_xo =
+ of_property_read_bool(node, "qcom,has-autodetect-xo");
+ }
+
+ penv->thermal_mitigation = 0;
+ strlcpy(penv->wcnss_version, "INVALID", WCNSS_VERSION_LEN);
+
+ /* Configure 5 wire GPIOs */
+ if (!has_pronto_hw) {
+ penv->gpios_5wire = platform_get_resource_byname(pdev,
+ IORESOURCE_IO, "wcnss_gpios_5wire");
+
+ /* allocate 5-wire GPIO resources */
+ if (!penv->gpios_5wire) {
+ dev_err(&pdev->dev, "insufficient IO resources\n");
+ ret = -ENOENT;
+ goto fail_gpio_res;
+ }
+ ret = wcnss_gpios_config(penv->gpios_5wire, true);
+ } else {
+ ret = wcnss_pronto_gpios_config(pdev, true);
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev, "WCNSS gpios config failed.\n");
+ goto fail_gpio_res;
+ }
+
+ /* allocate resources */
+ penv->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "wcnss_mmio");
+ penv->tx_irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlantx_irq");
+ penv->rx_irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlanrx_irq");
+
+ if (!(penv->mmio_res && penv->tx_irq_res && penv->rx_irq_res)) {
+ dev_err(&pdev->dev, "insufficient resources\n");
+ ret = -ENOENT;
+ goto fail_res;
+ }
+ INIT_WORK(&penv->wcnssctrl_rx_work, wcnssctrl_rx_handler);
+ INIT_WORK(&penv->wcnssctrl_version_work, wcnss_send_version_req);
+ INIT_WORK(&penv->wcnss_pm_config_work, wcnss_send_pm_config);
+ INIT_WORK(&penv->wcnssctrl_nvbin_dnld_work, wcnss_nvbin_dnld_main);
+ INIT_DELAYED_WORK(&penv->wcnss_pm_qos_del_req, wcnss_pm_qos_enable_pc);
+
+ wakeup_source_init(&penv->wcnss_wake_lock, "wcnss");
+ /* Add pm_qos request to disable power collapse for DDR */
+ wcnss_disable_pc_add_req();
+
+ if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "pronto_phy_base");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource pronto_phy_base failed\n",
+ __func__);
+ goto fail_ioremap;
+ }
+ penv->msm_wcnss_base =
+ devm_ioremap_resource(&pdev->dev, res);
+ } else {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "riva_phy_base");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource riva_phy_base failed\n",
+ __func__);
+ goto fail_ioremap;
+ }
+ penv->msm_wcnss_base =
+ devm_ioremap_resource(&pdev->dev, res);
+ }
+
+ if (!penv->msm_wcnss_base) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap wcnss physical failed\n", __func__);
+ goto fail_ioremap;
+ }
+
+ penv->wlan_config.msm_wcnss_base = penv->msm_wcnss_base;
+ if (wcnss_hardware_type() == WCNSS_RIVA_HW) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "riva_ccu_base");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource riva_ccu_base failed\n",
+ __func__);
+ goto fail_ioremap;
+ }
+ penv->riva_ccu_base =
+ devm_ioremap_resource(&pdev->dev, res);
+
+ if (!penv->riva_ccu_base) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap riva ccu physical failed\n",
+ __func__);
+ goto fail_ioremap;
+ }
+ } else {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "pronto_a2xb_base");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource pronto_a2xb_base failed\n",
+ __func__);
+ goto fail_ioremap;
+ }
+ penv->pronto_a2xb_base =
+ devm_ioremap_resource(&pdev->dev, res);
+
+ if (!penv->pronto_a2xb_base) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap pronto a2xb physical failed\n",
+ __func__);
+ goto fail_ioremap;
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "pronto_ccpu_base");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource pronto_ccpu_base failed\n",
+ __func__);
+ goto fail_ioremap;
+ }
+ penv->pronto_ccpu_base =
+ devm_ioremap_resource(&pdev->dev, res);
+
+ if (!penv->pronto_ccpu_base) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap pronto ccpu physical failed\n",
+ __func__);
+ goto fail_ioremap;
+ }
+
+ /* for reset FIQ */
+ res = platform_get_resource_byname(penv->pdev,
+ IORESOURCE_MEM, "wcnss_fiq");
+ if (!res) {
+ dev_err(&pdev->dev, "insufficient irq mem resources\n");
+ ret = -ENOENT;
+ goto fail_ioremap;
+ }
+ penv->fiq_reg = ioremap_nocache(res->start, resource_size(res));
+ if (!penv->fiq_reg) {
+ pr_err("wcnss: %s: ioremap_nocache() failed fiq_reg addr:%pr\n",
+ __func__, &res->start);
+ ret = -ENOMEM;
+ goto fail_ioremap;
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "pronto_saw2_base");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource pronto_saw2_base failed\n",
+ __func__);
+ goto fail_ioremap2;
+ }
+ penv->pronto_saw2_base =
+ devm_ioremap_resource(&pdev->dev, res);
+
+ if (!penv->pronto_saw2_base) {
+ pr_err("%s: ioremap wcnss physical(saw2) failed\n",
+ __func__);
+ ret = -ENOMEM;
+ goto fail_ioremap2;
+ }
+
+ penv->pronto_pll_base =
+ penv->msm_wcnss_base + PRONTO_PLL_MODE_OFFSET;
+ if (!penv->pronto_pll_base) {
+ pr_err("%s: ioremap wcnss physical(pll) failed\n",
+ __func__);
+ ret = -ENOMEM;
+ goto fail_ioremap2;
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "wlan_tx_phy_aborts");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource wlan_tx_phy_aborts failed\n",
+ __func__);
+ goto fail_ioremap2;
+ }
+ penv->wlan_tx_phy_aborts =
+ devm_ioremap_resource(&pdev->dev, res);
+
+ if (!penv->wlan_tx_phy_aborts) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap wlan TX PHY failed\n", __func__);
+ goto fail_ioremap2;
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "wlan_brdg_err_source");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource wlan_brdg_err_source failed\n",
+ __func__);
+ goto fail_ioremap2;
+ }
+ penv->wlan_brdg_err_source =
+ devm_ioremap_resource(&pdev->dev, res);
+
+ if (!penv->wlan_brdg_err_source) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap wlan BRDG ERR failed\n", __func__);
+ goto fail_ioremap2;
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "wlan_tx_status");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource wlan_tx_status failed\n",
+ __func__);
+ goto fail_ioremap2;
+ }
+ penv->wlan_tx_status =
+ devm_ioremap_resource(&pdev->dev, res);
+
+ if (!penv->wlan_tx_status) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap wlan TX STATUS failed\n", __func__);
+ goto fail_ioremap2;
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "alarms_txctl");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource alarms_txctl failed\n",
+ __func__);
+ goto fail_ioremap2;
+ }
+ penv->alarms_txctl =
+ devm_ioremap_resource(&pdev->dev, res);
+
+ if (!penv->alarms_txctl) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap alarms TXCTL failed\n", __func__);
+ goto fail_ioremap2;
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "alarms_tactl");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource alarms_tactl failed\n",
+ __func__);
+ goto fail_ioremap2;
+ }
+ penv->alarms_tactl =
+ devm_ioremap_resource(&pdev->dev, res);
+
+ if (!penv->alarms_tactl) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap alarms TACTL failed\n", __func__);
+ goto fail_ioremap2;
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "pronto_mcu_base");
+ if (!res) {
+ ret = -EIO;
+ pr_err("%s: resource pronto_mcu_base failed\n",
+ __func__);
+ goto fail_ioremap2;
+ }
+ penv->pronto_mcu_base =
+ devm_ioremap_resource(&pdev->dev, res);
+
+ if (!penv->pronto_mcu_base) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap pronto mcu physical failed\n",
+ __func__);
+ goto fail_ioremap2;
+ }
+
+ if (of_property_read_bool(node,
+ "qcom,is-dual-band-disabled")) {
+ ret = wcnss_get_dual_band_capability_info(pdev);
+ if (ret) {
+ pr_err("%s: failed to get dual band info\n",
+ __func__);
+ goto fail_ioremap2;
+ }
+ }
+ }
+
+ penv->adc_tm_dev = qpnp_get_adc_tm(&penv->pdev->dev, "wcnss");
+ if (IS_ERR(penv->adc_tm_dev)) {
+ pr_err("%s: adc get failed\n", __func__);
+ penv->adc_tm_dev = NULL;
+ } else {
+ INIT_DELAYED_WORK(&penv->vbatt_work, wcnss_update_vbatt);
+ penv->fw_vbatt_state = WCNSS_CONFIG_UNSPECIFIED;
+ }
+
+ penv->snoc_wcnss = devm_clk_get(&penv->pdev->dev, "snoc_wcnss");
+ if (IS_ERR(penv->snoc_wcnss)) {
+ pr_err("%s: couldn't get snoc_wcnss\n", __func__);
+ penv->snoc_wcnss = NULL;
+ } else {
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,snoc-wcnss-clock-freq",
+ &penv->snoc_wcnss_clock_freq)) {
+ pr_debug("%s: wcnss snoc clock frequency is not defined\n",
+ __func__);
+ devm_clk_put(&penv->pdev->dev, penv->snoc_wcnss);
+ penv->snoc_wcnss = NULL;
+ }
+ }
+
+ if (penv->wlan_config.is_pronto_vadc) {
+ penv->vadc_dev = qpnp_get_vadc(&penv->pdev->dev, "wcnss");
+
+ if (IS_ERR(penv->vadc_dev)) {
+ pr_debug("%s: vadc get failed\n", __func__);
+ penv->vadc_dev = NULL;
+ } else {
+ rc = wcnss_get_battery_volt(&penv->wlan_config.vbatt);
+ INIT_WORK(&penv->wcnss_vadc_work,
+ wcnss_send_vbatt_indication);
+
+ if (rc < 0)
+ pr_err("Failed to get battery voltage with error= %d\n",
+ rc);
+ }
+ }
+
+ do {
+ /* trigger initialization of the WCNSS */
+ penv->pil = subsystem_get(WCNSS_PIL_DEVICE);
+ if (IS_ERR(penv->pil)) {
+ dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
+ ret = PTR_ERR(penv->pil);
+ wcnss_disable_pc_add_req();
+ wcnss_pronto_log_debug_regs();
+ }
+ } while (pil_retry++ < WCNSS_MAX_PIL_RETRY && IS_ERR(penv->pil));
+
+ if (IS_ERR(penv->pil)) {
+ wcnss_reset_fiq(false);
+ if (penv->wcnss_notif_hdle)
+ subsys_notif_unregister_notifier(penv->wcnss_notif_hdle,
+ &wnb);
+ penv->pil = NULL;
+ goto fail_ioremap2;
+ }
+ /* Remove pm_qos request */
+ wcnss_disable_pc_remove_req();
+
+ return 0;
+
+fail_ioremap2:
+ if (penv->fiq_reg)
+ iounmap(penv->fiq_reg);
+fail_ioremap:
+ wakeup_source_trash(&penv->wcnss_wake_lock);
+fail_res:
+ if (!has_pronto_hw)
+ wcnss_gpios_config(penv->gpios_5wire, false);
+ else if (penv->use_pinctrl)
+ wcnss_pinctrl_set_state(false);
+ else
+ wcnss_pronto_gpios_config(pdev, false);
+fail_gpio_res:
+ wcnss_disable_pc_remove_req();
+fail:
+ if (penv->wcnss_notif_hdle)
+ subsys_notif_unregister_notifier(penv->wcnss_notif_hdle, &wnb);
+ penv = NULL;
+ return ret;
+}
+
+/* Driver requires to directly vote the snoc clocks
+ * To enable and disable snoc clock, it call
+ * wcnss_snoc_vote function
+ */
+void wcnss_snoc_vote(bool clk_chk_en)
+{
+ int rc;
+
+ if (!penv->snoc_wcnss) {
+ pr_err("%s: couldn't get clk snoc_wcnss\n", __func__);
+ return;
+ }
+
+ if (clk_chk_en) {
+ rc = clk_set_rate(penv->snoc_wcnss,
+ penv->snoc_wcnss_clock_freq);
+ if (rc) {
+ pr_err("%s: snoc_wcnss_clk-clk_set_rate failed =%d\n",
+ __func__, rc);
+ return;
+ }
+
+ if (clk_prepare_enable(penv->snoc_wcnss)) {
+ pr_err("%s: snoc_wcnss clk enable failed\n", __func__);
+ return;
+ }
+ } else {
+ clk_disable_unprepare(penv->snoc_wcnss);
+ }
+}
+EXPORT_SYMBOL(wcnss_snoc_vote);
+
+/* wlan prop driver cannot invoke cancel_work_sync
+ * function directly, so to invoke this function it
+ * call wcnss_flush_work function
+ */
+void wcnss_flush_work(struct work_struct *work)
+{
+ struct work_struct *cnss_work = work;
+
+ if (cnss_work)
+ cancel_work_sync(cnss_work);
+}
+EXPORT_SYMBOL(wcnss_flush_work);
+
+/* wlan prop driver cannot invoke show_stack
+ * function directly, so to invoke this function it
+ * call wcnss_dump_stack function
+ */
+void wcnss_dump_stack(struct task_struct *task)
+{
+ show_stack(task, NULL);
+}
+EXPORT_SYMBOL(wcnss_dump_stack);
+
+/* wlan prop driver cannot invoke cancel_delayed_work_sync
+ * function directly, so to invoke this function it call
+ * wcnss_flush_delayed_work function
+ */
+void wcnss_flush_delayed_work(struct delayed_work *dwork)
+{
+ struct delayed_work *cnss_dwork = dwork;
+
+ if (cnss_dwork)
+ cancel_delayed_work_sync(cnss_dwork);
+}
+EXPORT_SYMBOL(wcnss_flush_delayed_work);
+
+/* wlan prop driver cannot invoke INIT_WORK function
+ * directly, so to invoke this function call
+ * wcnss_init_work function.
+ */
+void wcnss_init_work(struct work_struct *work, void *callbackptr)
+{
+ if (work && callbackptr)
+ INIT_WORK(work, callbackptr);
+}
+EXPORT_SYMBOL(wcnss_init_work);
+
+/* wlan prop driver cannot invoke INIT_DELAYED_WORK
+ * function directly, so to invoke this function
+ * call wcnss_init_delayed_work function.
+ */
+void wcnss_init_delayed_work(struct delayed_work *dwork, void *callbackptr)
+{
+ if (dwork && callbackptr)
+ INIT_DELAYED_WORK(dwork, callbackptr);
+}
+EXPORT_SYMBOL(wcnss_init_delayed_work);
+
+static int wcnss_node_open(struct inode *inode, struct file *file)
+{
+ struct platform_device *pdev;
+ int rc = 0;
+
+ if (!penv)
+ return -EFAULT;
+
+ if (!penv->triggered) {
+ pr_info(DEVICE " triggered by userspace\n");
+ pdev = penv->pdev;
+ rc = wcnss_trigger_config(pdev);
+ if (rc)
+ return -EFAULT;
+ }
+
+ return rc;
+}
+
+static ssize_t wcnss_wlan_read(struct file *fp, char __user
+ *buffer, size_t count, loff_t *position)
+{
+ int rc = 0;
+
+ if (!penv)
+ return -EFAULT;
+
+ rc = wait_event_interruptible(penv->read_wait, penv->fw_cal_rcvd
+ > penv->user_cal_read || penv->fw_cal_available);
+
+ if (rc < 0)
+ return rc;
+
+ mutex_lock(&penv->dev_lock);
+
+ if (penv->fw_cal_available && penv->fw_cal_rcvd
+ == penv->user_cal_read) {
+ rc = 0;
+ goto exit;
+ }
+
+ if (count > penv->fw_cal_rcvd - penv->user_cal_read)
+ count = penv->fw_cal_rcvd - penv->user_cal_read;
+
+ rc = copy_to_user(buffer, penv->fw_cal_data +
+ penv->user_cal_read, count);
+ if (rc == 0) {
+ penv->user_cal_read += count;
+ rc = count;
+ }
+
+exit:
+ mutex_unlock(&penv->dev_lock);
+ return rc;
+}
+
+/* first (valid) write to this device should be 4 bytes cal file size */
+static ssize_t wcnss_wlan_write(struct file *fp, const char __user
+ *user_buffer, size_t count, loff_t *position)
+{
+ int rc = 0;
+ char *cal_data = NULL;
+
+ if (!penv || penv->user_cal_available)
+ return -EFAULT;
+
+ if (!penv->user_cal_rcvd && count >= 4 && !penv->user_cal_exp_size) {
+ mutex_lock(&penv->dev_lock);
+ rc = copy_from_user((void *)&penv->user_cal_exp_size,
+ user_buffer, 4);
+ if (!penv->user_cal_exp_size ||
+ penv->user_cal_exp_size > MAX_CALIBRATED_DATA_SIZE) {
+ pr_err(DEVICE " invalid size to write %d\n",
+ penv->user_cal_exp_size);
+ penv->user_cal_exp_size = 0;
+ mutex_unlock(&penv->dev_lock);
+ return -EFAULT;
+ }
+ mutex_unlock(&penv->dev_lock);
+ return count;
+ } else if (!penv->user_cal_rcvd && count < 4) {
+ return -EFAULT;
+ }
+
+ mutex_lock(&penv->dev_lock);
+ if ((UINT32_MAX - count < penv->user_cal_rcvd) ||
+ (penv->user_cal_exp_size < count + penv->user_cal_rcvd)) {
+ pr_err(DEVICE " invalid size to write %zu\n", count +
+ penv->user_cal_rcvd);
+ mutex_unlock(&penv->dev_lock);
+ return -ENOMEM;
+ }
+
+ cal_data = kmalloc(count, GFP_KERNEL);
+ if (!cal_data) {
+ mutex_unlock(&penv->dev_lock);
+ return -ENOMEM;
+ }
+
+ rc = copy_from_user(cal_data, user_buffer, count);
+ if (!rc) {
+ memcpy(penv->user_cal_data + penv->user_cal_rcvd,
+ cal_data, count);
+ penv->user_cal_rcvd += count;
+ rc += count;
+ }
+
+ kfree(cal_data);
+ if (penv->user_cal_rcvd == penv->user_cal_exp_size) {
+ penv->user_cal_available = true;
+ pr_info_ratelimited("wcnss: user cal written");
+ }
+ mutex_unlock(&penv->dev_lock);
+
+ return rc;
+}
+
+static int wcnss_node_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int wcnss_notif_cb(struct notifier_block *this, unsigned long code,
+ void *ss_handle)
+{
+ struct platform_device *pdev = wcnss_get_platform_device();
+ struct wcnss_wlan_config *pwlanconfig = wcnss_get_wlan_config();
+ struct notif_data *data = (struct notif_data *)ss_handle;
+ int ret, xo_mode;
+
+ if (!(code >= SUBSYS_NOTIF_MIN_INDEX) &&
+ (code <= SUBSYS_NOTIF_MAX_INDEX)) {
+ pr_debug("%s: Invaild subsystem notification code: %lu\n",
+ __func__, code);
+ return NOTIFY_DONE;
+ }
+
+ pr_debug("%s: wcnss notification event: %lu : %s\n",
+ __func__, code, wcnss_subsys_notif_type[code]);
+
+ if (code == SUBSYS_PROXY_VOTE) {
+ if (pdev && pwlanconfig) {
+ ret = wcnss_wlan_power(&pdev->dev, pwlanconfig,
+ WCNSS_WLAN_SWITCH_ON, &xo_mode);
+ wcnss_set_iris_xo_mode(xo_mode);
+ if (ret)
+ pr_err("Failed to execute wcnss_wlan_power\n");
+ }
+ } else if (code == SUBSYS_PROXY_UNVOTE) {
+ if (pdev && pwlanconfig) {
+ /* Temporary workaround as some pronto images have an
+ * issue of sending an interrupt that it is capable of
+ * voting for it's resources too early.
+ */
+ msleep(20);
+ wcnss_wlan_power(&pdev->dev, pwlanconfig,
+ WCNSS_WLAN_SWITCH_OFF, NULL);
+ }
+ } else if ((code == SUBSYS_BEFORE_SHUTDOWN && data && data->crashed) ||
+ code == SUBSYS_SOC_RESET) {
+ wcnss_disable_pc_add_req();
+ schedule_delayed_work(&penv->wcnss_pm_qos_del_req,
+ msecs_to_jiffies(WCNSS_PM_QOS_TIMEOUT));
+ penv->is_shutdown = 1;
+ wcnss_log_debug_regs_on_bite();
+ } else if (code == SUBSYS_POWERUP_FAILURE) {
+ if (pdev && pwlanconfig)
+ wcnss_wlan_power(&pdev->dev, pwlanconfig,
+ WCNSS_WLAN_SWITCH_OFF, NULL);
+ wcnss_pronto_log_debug_regs();
+ wcnss_disable_pc_remove_req();
+ } else if (code == SUBSYS_BEFORE_SHUTDOWN) {
+ wcnss_disable_pc_add_req();
+ schedule_delayed_work(&penv->wcnss_pm_qos_del_req,
+ msecs_to_jiffies(WCNSS_PM_QOS_TIMEOUT));
+ penv->is_shutdown = 1;
+ } else if (code == SUBSYS_AFTER_POWERUP) {
+ penv->is_shutdown = 0;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static const struct file_operations wcnss_node_fops = {
+ .owner = THIS_MODULE,
+ .open = wcnss_node_open,
+ .read = wcnss_wlan_read,
+ .write = wcnss_wlan_write,
+ .release = wcnss_node_release,
+};
+
+static int wcnss_cdev_register(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = alloc_chrdev_region(&penv->dev_ctrl, 0, 1, CTRL_DEVICE);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "CTRL Device Registration failed\n");
+ goto alloc_region_ctrl;
+ }
+ ret = alloc_chrdev_region(&penv->dev_node, 0, 1, DEVICE);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "NODE Device Registration failed\n");
+ goto alloc_region_node;
+ }
+
+ penv->node_class = class_create(THIS_MODULE, "wcnss");
+ if (!penv->node_class) {
+ dev_err(&pdev->dev, "NODE Device Class Creation failed\n");
+ goto class_create_node;
+ }
+
+ if (device_create(penv->node_class, NULL, penv->dev_ctrl, NULL,
+ CTRL_DEVICE) == NULL) {
+ dev_err(&pdev->dev, "CTRL Device Creation failed\n");
+ goto device_create_ctrl;
+ }
+
+ if (device_create(penv->node_class, NULL, penv->dev_node, NULL,
+ DEVICE) == NULL) {
+ dev_err(&pdev->dev, "NODE Device Creation failed\n");
+ goto device_create_node;
+ }
+
+ cdev_init(&penv->ctrl_dev, &wcnss_ctrl_fops);
+ cdev_init(&penv->node_dev, &wcnss_node_fops);
+
+ if (cdev_add(&penv->ctrl_dev, penv->dev_ctrl, 1) == -1) {
+ dev_err(&pdev->dev, "CTRL Device addition failed\n");
+ goto cdev_add_ctrl;
+ }
+ if (cdev_add(&penv->node_dev, penv->dev_node, 1) == -1) {
+ dev_err(&pdev->dev, "NODE Device addition failed\n");
+ goto cdev_add_node;
+ }
+
+ return 0;
+
+cdev_add_node:
+ cdev_del(&penv->ctrl_dev);
+cdev_add_ctrl:
+ device_destroy(penv->node_class, penv->dev_node);
+device_create_node:
+ device_destroy(penv->node_class, penv->dev_ctrl);
+device_create_ctrl:
+ class_destroy(penv->node_class);
+class_create_node:
+ unregister_chrdev_region(penv->dev_node, 1);
+alloc_region_node:
+ unregister_chrdev_region(penv->dev_ctrl, 1);
+alloc_region_ctrl:
+ return -ENOMEM;
+}
+
+static void wcnss_cdev_unregister(struct platform_device *pdev)
+{
+ dev_err(&pdev->dev, "Unregistering cdev devices\n");
+ cdev_del(&penv->ctrl_dev);
+ cdev_del(&penv->node_dev);
+ device_destroy(penv->node_class, penv->dev_ctrl);
+ device_destroy(penv->node_class, penv->dev_node);
+ class_destroy(penv->node_class);
+ unregister_chrdev_region(penv->dev_ctrl, 1);
+ unregister_chrdev_region(penv->dev_node, 1);
+}
+
+static int
+wcnss_wlan_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ /* verify we haven't been called more than once */
+ if (penv) {
+ dev_err(&pdev->dev, "cannot handle multiple devices.\n");
+ return -ENODEV;
+ }
+
+ /* create an environment to track the device */
+ penv = devm_kzalloc(&pdev->dev, sizeof(*penv), GFP_KERNEL);
+ if (!penv)
+ return -ENOMEM;
+
+ penv->pdev = pdev;
+
+ penv->user_cal_data =
+ devm_kzalloc(&pdev->dev, MAX_CALIBRATED_DATA_SIZE, GFP_KERNEL);
+ if (!penv->user_cal_data) {
+ dev_err(&pdev->dev, "Failed to alloc memory for cal data.\n");
+ return -ENOMEM;
+ }
+
+ /* register sysfs entries */
+ ret = wcnss_create_sysfs(&pdev->dev);
+ if (ret) {
+ penv = NULL;
+ return -ENOENT;
+ }
+
+ /* register wcnss event notification */
+ penv->wcnss_notif_hdle = subsys_notif_register_notifier("wcnss", &wnb);
+ if (IS_ERR(penv->wcnss_notif_hdle)) {
+ pr_err("wcnss: register event notification failed!\n");
+ return PTR_ERR(penv->wcnss_notif_hdle);
+ }
+
+ mutex_init(&penv->dev_lock);
+ mutex_init(&penv->ctrl_lock);
+ mutex_init(&penv->vbat_monitor_mutex);
+ mutex_init(&penv->pm_qos_mutex);
+ init_waitqueue_head(&penv->read_wait);
+
+ penv->user_cal_rcvd = 0;
+ penv->user_cal_read = 0;
+ penv->user_cal_exp_size = 0;
+ penv->user_cal_available = false;
+
+ /* Since we were built into the kernel we'll be called as part
+ * of kernel initialization. We don't know if userspace
+ * applications are available to service PIL at this time
+ * (they probably are not), so we simply create a device node
+ * here. When userspace is available it should touch the
+ * device so that we know that WCNSS configuration can take
+ * place
+ */
+ pr_info(DEVICE " probed in built-in mode\n");
+
+ return wcnss_cdev_register(pdev);
+}
+
+static int
+wcnss_wlan_remove(struct platform_device *pdev)
+{
+ if (penv->wcnss_notif_hdle)
+ subsys_notif_unregister_notifier(penv->wcnss_notif_hdle, &wnb);
+ wcnss_cdev_unregister(pdev);
+ wcnss_remove_sysfs(&pdev->dev);
+ penv = NULL;
+ return 0;
+}
+
+static const struct dev_pm_ops wcnss_wlan_pm_ops = {
+ .suspend = wcnss_wlan_suspend,
+ .resume = wcnss_wlan_resume,
+};
+
+#ifdef CONFIG_WCNSS_CORE_PRONTO
+static const struct of_device_id msm_wcnss_pronto_match[] = {
+ {.compatible = "qcom,wcnss_wlan"},
+ {}
+};
+#endif
+
+static struct platform_driver wcnss_wlan_driver = {
+ .driver = {
+ .name = DEVICE,
+ .owner = THIS_MODULE,
+ .pm = &wcnss_wlan_pm_ops,
+#ifdef CONFIG_WCNSS_CORE_PRONTO
+ .of_match_table = msm_wcnss_pronto_match,
+#endif
+ },
+ .probe = wcnss_wlan_probe,
+ .remove = wcnss_wlan_remove,
+};
+
+static int __init wcnss_wlan_init(void)
+{
+ platform_driver_register(&wcnss_wlan_driver);
+ platform_driver_register(&wcnss_wlan_ctrl_driver);
+ platform_driver_register(&wcnss_ctrl_driver);
+ register_pm_notifier(&wcnss_pm_notifier);
+
+ return 0;
+}
+
+static void __exit wcnss_wlan_exit(void)
+{
+ if (penv) {
+ if (penv->pil)
+ subsystem_put(penv->pil);
+ penv = NULL;
+ }
+
+ unregister_pm_notifier(&wcnss_pm_notifier);
+ platform_driver_unregister(&wcnss_ctrl_driver);
+ platform_driver_unregister(&wcnss_wlan_ctrl_driver);
+ platform_driver_unregister(&wcnss_wlan_driver);
+}
+
+module_init(wcnss_wlan_init);
+module_exit(wcnss_wlan_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DEVICE "Driver");
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 58a9308..a074763 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -28,6 +28,7 @@
#define SPI_NUM_CHIPSELECT (4)
#define SPI_XFER_TIMEOUT_MS (250)
+#define SPI_AUTO_SUSPEND_DELAY (250)
/* SPI SE specific registers */
#define SE_SPI_CPHA (0x224)
#define SE_SPI_LOOPBACK (0x22C)
@@ -153,6 +154,7 @@
int num_rx_eot;
int num_xfers;
void *ipc;
+ bool shared_se;
};
static struct spi_master *get_spi_master(struct device *dev)
@@ -647,11 +649,11 @@
&mas->gsi[mas->num_xfers].desc_cb;
mas->gsi[mas->num_xfers].tx_cookie =
dmaengine_submit(mas->gsi[mas->num_xfers].tx_desc);
- if (mas->num_rx_eot)
+ if (cmd & SPI_RX_ONLY)
mas->gsi[mas->num_xfers].rx_cookie =
dmaengine_submit(mas->gsi[mas->num_xfers].rx_desc);
dma_async_issue_pending(mas->tx);
- if (mas->num_rx_eot)
+ if (cmd & SPI_RX_ONLY)
dma_async_issue_pending(mas->rx);
mas->num_xfers++;
return ret;
@@ -726,7 +728,6 @@
memset(mas->gsi, 0,
(sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
geni_se_select_mode(mas->base, GSI_DMA);
- dmaengine_resume(mas->tx);
ret = spi_geni_map_buf(mas, spi_msg);
} else {
dev_err(mas->dev, "%s: Couldn't select mode %d", __func__,
@@ -743,10 +744,8 @@
mas->cur_speed_hz = 0;
mas->cur_word_len = 0;
- if (mas->cur_xfer_mode == GSI_DMA) {
- dmaengine_pause(mas->tx);
+ if (mas->cur_xfer_mode == GSI_DMA)
spi_geni_unmap_buf(mas, spi_msg);
- }
return 0;
}
@@ -760,9 +759,22 @@
/* Adjust the AB/IB based on the max speed of the slave.*/
rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
rsc->ab = max_speed * DEFAULT_BUS_WIDTH;
+ if (mas->shared_se) {
+ struct se_geni_rsc *rsc;
+ int ret = 0;
+
+ rsc = &mas->spi_rsc;
+ ret = pinctrl_select_state(rsc->geni_pinctrl,
+ rsc->geni_gpio_active);
+ if (ret)
+ GENI_SE_ERR(mas->ipc, false, NULL,
+ "%s: Error %d pinctrl_select_state\n", __func__, ret);
+ }
+
ret = pm_runtime_get_sync(mas->dev);
if (ret < 0) {
- dev_err(mas->dev, "Error enabling SE resources\n");
+ dev_err(mas->dev, "%s:Error enabling SE resources %d\n",
+ __func__, ret);
pm_runtime_put_noidle(mas->dev);
goto exit_prepare_transfer_hardware;
} else {
@@ -854,6 +866,9 @@
"%s:Major:%d Minor:%d step:%dos%d\n",
__func__, major, minor, step, mas->oversampling);
}
+ mas->shared_se =
+ (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
+ FIFO_IF_DISABLE);
}
exit_prepare_transfer_hardware:
return ret;
@@ -863,7 +878,20 @@
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
- pm_runtime_put_sync(mas->dev);
+ if (mas->shared_se) {
+ struct se_geni_rsc *rsc;
+ int ret = 0;
+
+ rsc = &mas->spi_rsc;
+ ret = pinctrl_select_state(rsc->geni_pinctrl,
+ rsc->geni_gpio_sleep);
+ if (ret)
+ GENI_SE_ERR(mas->ipc, false, NULL,
+ "%s: Error %d pinctrl_select_state\n", __func__, ret);
+ }
+
+ pm_runtime_mark_last_busy(mas->dev);
+ pm_runtime_put_autosuspend(mas->dev);
return 0;
}
@@ -1336,6 +1364,9 @@
init_completion(&geni_mas->xfer_done);
init_completion(&geni_mas->tx_cb);
init_completion(&geni_mas->rx_cb);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTO_SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = spi_register_master(spi);
if (ret) {
@@ -1369,7 +1400,14 @@
struct spi_master *spi = get_spi_master(dev);
struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
- ret = se_geni_resources_off(&geni_mas->spi_rsc);
+ if (geni_mas->shared_se) {
+ ret = se_geni_clks_off(&geni_mas->spi_rsc);
+ if (ret)
+ GENI_SE_ERR(geni_mas->ipc, false, NULL,
+ "%s: Error %d turning off clocks\n", __func__, ret);
+ } else {
+ ret = se_geni_resources_off(&geni_mas->spi_rsc);
+ }
return ret;
}
@@ -1379,7 +1417,14 @@
struct spi_master *spi = get_spi_master(dev);
struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
- ret = se_geni_resources_on(&geni_mas->spi_rsc);
+ if (geni_mas->shared_se) {
+ ret = se_geni_clks_on(&geni_mas->spi_rsc);
+ if (ret)
+ GENI_SE_ERR(geni_mas->ipc, false, NULL,
+ "%s: Error %d turning on clocks\n", __func__, ret);
+ } else {
+ ret = se_geni_resources_on(&geni_mas->spi_rsc);
+ }
return ret;
}
@@ -1390,9 +1435,29 @@
static int spi_geni_suspend(struct device *dev)
{
- if (!pm_runtime_status_suspended(dev))
- return -EBUSY;
- return 0;
+ int ret = 0;
+
+ if (!pm_runtime_status_suspended(dev)) {
+ struct spi_master *spi = get_spi_master(dev);
+ struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
+
+ if (list_empty(&spi->queue) && !spi->cur_msg) {
+ GENI_SE_ERR(geni_mas->ipc, true, dev,
+ "%s: Force suspend", __func__);
+ ret = spi_geni_runtime_suspend(dev);
+ if (ret) {
+ GENI_SE_ERR(geni_mas->ipc, true, dev,
+ "Force suspend Failed:%d", ret);
+ } else {
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+ }
+ } else {
+ ret = -EBUSY;
+ }
+ }
+ return ret;
}
#else
static int spi_geni_runtime_suspend(struct device *dev)
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 41f1a19..83b46d4 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -561,13 +561,15 @@
task_lock(selected);
send_sig(SIGKILL, selected, 0);
- if (selected->mm)
+ if (selected->mm) {
task_set_lmk_waiting(selected);
- if (oom_reaper)
- mark_lmk_victim(selected);
+ if (!test_bit(MMF_OOM_SKIP, &selected->mm->flags) &&
+ oom_reaper) {
+ mark_lmk_victim(selected);
+ wake_oom_reaper(selected);
+ }
+ }
task_unlock(selected);
- if (oom_reaper)
- wake_oom_reaper(selected);
trace_lowmemory_kill(selected, cache_size, cache_limit, free);
lowmem_print(1, "Killing '%s' (%d) (tgid %d), adj %hd,\n"
"to free %ldkB on behalf of '%s' (%d) because\n"
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index f25bade..9e96f8a 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -330,7 +330,7 @@
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- unsigned long clipped_freq, floor_freq;
+ unsigned long clipped_freq = ULONG_MAX, floor_freq = 0;
struct cpufreq_cooling_device *cpufreq_dev;
if (event != CPUFREQ_ADJUST)
@@ -338,31 +338,30 @@
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
- if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+ if (!cpumask_intersects(&cpufreq_dev->allowed_cpus,
+ policy->related_cpus))
continue;
-
- /*
- * policy->max is the maximum allowed frequency defined by user
- * and clipped_freq is the maximum that thermal constraints
- * allow.
- *
- * If clipped_freq is lower than policy->max, then we need to
- * readjust policy->max.
- *
- * But, if clipped_freq is greater than policy->max, we don't
- * need to do anything.
- *
- * Similarly, if policy minimum set by the user is less than
- * the floor_frequency, then adjust the policy->min.
- */
- clipped_freq = cpufreq_dev->clipped_freq;
- floor_freq = cpufreq_dev->floor_freq;
-
- if (policy->max > clipped_freq || policy->min < floor_freq)
- cpufreq_verify_within_limits(policy, floor_freq,
- clipped_freq);
- break;
+ if (cpufreq_dev->clipped_freq < clipped_freq)
+ clipped_freq = cpufreq_dev->clipped_freq;
+ if (cpufreq_dev->floor_freq > floor_freq)
+ floor_freq = cpufreq_dev->floor_freq;
}
+ /*
+ * policy->max is the maximum allowed frequency defined by user
+ * and clipped_freq is the maximum that thermal constraints
+ * allow.
+ *
+ * If clipped_freq is lower than policy->max, then we need to
+ * readjust policy->max.
+ *
+ * But, if clipped_freq is greater than policy->max, we don't
+ * need to do anything.
+ *
+ * Similarly, if policy minimum set by the user is less than
+ * the floor_frequency, then adjust the policy->min.
+ */
+ if (policy->max > clipped_freq || policy->min < floor_freq)
+ cpufreq_verify_within_limits(policy, floor_freq, clipped_freq);
mutex_unlock(&cooling_list_lock);
return NOTIFY_OK;
diff --git a/drivers/thermal/qcom/bcl_peripheral.c b/drivers/thermal/qcom/bcl_peripheral.c
index 75e553f..3dccff5 100644
--- a/drivers/thermal/qcom/bcl_peripheral.c
+++ b/drivers/thermal/qcom/bcl_peripheral.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -748,10 +748,10 @@
}
bcl_get_devicetree_data(pdev);
+ bcl_configure_lmh_peripheral();
bcl_probe_ibat(pdev);
bcl_probe_vbat(pdev);
bcl_probe_soc(pdev);
- bcl_configure_lmh_peripheral();
dev_set_drvdata(&pdev->dev, bcl_perph);
ret = bcl_write_register(BCL_MONITOR_EN, BIT(7));
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 80c3f91..f8a9a2f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -2591,11 +2591,9 @@
case PM_POST_SUSPEND:
atomic_set(&in_suspend, 0);
list_for_each_entry(tz, &thermal_tz_list, node) {
- mutex_lock(&tz->lock);
thermal_zone_device_reset(tz);
- mod_delayed_work(system_freezable_power_efficient_wq,
- &tz->poll_queue, 0);
- mutex_unlock(&tz->lock);
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
}
break;
default:
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index d79b95764..185a9e2 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -894,7 +894,6 @@
msm_geni_serial_prep_dma_tx(uport);
}
- IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
return;
check_flow_ctrl:
geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
@@ -963,7 +962,18 @@
SE_GENI_M_IRQ_CLEAR);
}
geni_write_reg_nolog(M_CMD_CANCEL_EN, uport, SE_GENI_M_IRQ_CLEAR);
- IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__);
+ /*
+ * If we end up having to cancel an on-going Tx for non-console usecase
+ * then it means there was some unsent data in the Tx FIFO, consequently
+ * it means that there is a vote imbalance as we put in a vote during
+ * start_tx() that is removed only as part of a "done" ISR. To balance
+ * this out, remove the vote put in during start_tx().
+ */
+ if (!uart_console(uport)) {
+ IPC_LOG_MSG(port->ipc_log_misc, "%s:Removing vote\n", __func__);
+ msm_geni_serial_power_off(uport);
+ }
+ IPC_LOG_MSG(port->ipc_log_misc, "%s:\n", __func__);
}
static void msm_geni_serial_stop_tx(struct uart_port *uport)
@@ -2529,6 +2539,8 @@
* doing a stop_rx else we could end up flowing off the peer.
*/
mb();
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s: Manual Flow ON 0x%x\n",
+ __func__, uart_manual_rfr);
}
stop_rx_sequencer(&port->uport);
if ((geni_status & M_GENI_CMD_ACTIVE))
@@ -2610,6 +2622,7 @@
mutex_unlock(&tty_port->mutex);
return -EBUSY;
}
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s\n", __func__);
mutex_unlock(&tty_port->mutex);
}
return 0;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 465a1c6..e0321a1 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -903,6 +903,16 @@
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
+ /*
+ * Enable hardware control of sending remote wakeup in HS when
+ * the device is in the L1 state.
+ */
+ if (dwc->revision >= DWC3_REVISION_290A) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
return 0;
err2:
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e1dc7c8..b91642a 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -217,6 +217,9 @@
/* Global Debug LTSSM Register */
#define DWC3_GDBGLTSSM_LINKSTATE_MASK (0xF << 22)
+/* Global User Control 1 Register */
+#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW (1 << 24)
+
/* Global USB2 PHY Configuration Register */
#define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31)
#define DWC3_GUSB2PHYCFG_ENBLSLPM (1 << 8)
@@ -1036,6 +1039,7 @@
#define DWC3_REVISION_260A 0x5533260a
#define DWC3_REVISION_270A 0x5533270a
#define DWC3_REVISION_280A 0x5533280a
+#define DWC3_REVISION_290A 0x5533290a
#define DWC3_REVISION_300A 0x5533300a
#define DWC3_REVISION_310A 0x5533310a
#define DWC3_REVISION_320A 0x5533320a
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index cc239b0..89bf6b7 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1061,6 +1061,8 @@
struct dwc3_trb *trb;
int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
: (req->num_bufs + 2);
+ struct scatterlist *sg;
+ struct sg_table *sgt;
dep->trb_pool = dma_zalloc_coherent(dwc->sysdev,
num_trbs * sizeof(struct dwc3_trb),
@@ -1073,6 +1075,19 @@
}
dep->num_trbs = num_trbs;
+ dma_get_sgtable(dwc->sysdev, &req->sgt_trb_xfer_ring, dep->trb_pool,
+ dep->trb_pool_dma, num_trbs * sizeof(struct dwc3_trb));
+
+ sgt = &req->sgt_trb_xfer_ring;
+ dev_dbg(dwc->dev, "%s(): trb_pool:%pK trb_pool_dma:%lx\n",
+ __func__, dep->trb_pool, (unsigned long)dep->trb_pool_dma);
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ dev_dbg(dwc->dev,
+ "%i: page_link:%lx offset:%x length:%x address:%lx\n",
+ i, sg->page_link, sg->offset, sg->length,
+ (unsigned long)sg->dma_address);
+
/* IN direction */
if (dep->direction) {
for (i = 0; i < num_trbs ; i++) {
@@ -1138,11 +1153,13 @@
}
}
- pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name);
+ dev_dbg(dwc->dev, "%s: Initialized TRB Ring for %s\n",
+ __func__, dep->name);
trb = &dep->trb_pool[0];
if (trb) {
for (i = 0; i < num_trbs; i++) {
- pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n",
+ dev_dbg(dwc->dev,
+ "TRB %d: ADDR:%lx bpl:%x bph:%x sz:%x ctl:%x\n",
i, (unsigned long)dwc3_trb_dma_offset(dep,
&dep->trb_pool[i]), trb->bpl, trb->bph,
trb->size, trb->ctrl);
@@ -1159,7 +1176,7 @@
* @usb_ep - pointer to usb_ep instance.
*
*/
-static void gsi_free_trbs(struct usb_ep *ep)
+static void gsi_free_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
@@ -1176,6 +1193,7 @@
dep->trb_pool = NULL;
dep->trb_pool_dma = 0;
}
+ sg_free_table(&req->sgt_trb_xfer_ring);
}
/*
* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
@@ -1365,7 +1383,8 @@
break;
case GSI_EP_OP_FREE_TRBS:
dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
- gsi_free_trbs(ep);
+ request = (struct usb_gsi_request *)op_data;
+ gsi_free_trbs(ep, request);
break;
case GSI_EP_OP_CONFIG:
request = (struct usb_gsi_request *)op_data;
@@ -3331,6 +3350,8 @@
* turning on usb gdsc regulator clk is stuck off.
*/
dwc3_msm_config_gdsc(mdwc, 1);
+ clk_prepare_enable(mdwc->iface_clk);
+ clk_prepare_enable(mdwc->core_clk);
clk_prepare_enable(mdwc->cfg_ahb_clk);
/* Configure AHB2PHY for one wait state read/write*/
val = readl_relaxed(mdwc->ahb2phy_base +
@@ -3343,6 +3364,8 @@
mb();
}
clk_disable_unprepare(mdwc->cfg_ahb_clk);
+ clk_disable_unprepare(mdwc->core_clk);
+ clk_disable_unprepare(mdwc->iface_clk);
dwc3_msm_config_gdsc(mdwc, 0);
}
}
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index cbce880..986c97c 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -816,6 +816,16 @@
if (!dwc->gadget_driver)
goto out;
+ /*
+ * Workaround for SNPS STAR: 9001046257 which affects dwc3 core
+ * 3.10a or earlier. LPM Not rejected during control transfer. Device
+ * is programmed to reject LPM when SETUP packet is received and
+ * ACK LPM after completing STATUS stage.
+ */
+ if (dwc->has_lpm_erratum && dwc->revision <= DWC3_REVISION_310A)
+ dwc3_masked_write_readback(dwc->regs, DWC3_DCTL,
+ DWC3_DCTL_LPM_ERRATA_MASK, DWC3_DCTL_LPM_ERRATA(0));
+
trace_dwc3_ctrl_req(ctrl);
len = le16_to_cpu(ctrl->wLength);
@@ -990,6 +1000,11 @@
dbg_print(dep->number, "DONE", status, "STATUS");
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
+
+ if (dwc->has_lpm_erratum && dwc->revision <= DWC3_REVISION_310A)
+ dwc3_masked_write_readback(dwc->regs, DWC3_DCTL,
+ DWC3_DCTL_LPM_ERRATA_MASK,
+ DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold));
}
static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index a06f9a8..1f75b58 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -64,4 +64,28 @@
base - DWC3_GLOBALS_REGS_START + offset, value);
}
+static inline void dwc3_masked_write_readback(void __iomem *base,
+ u32 offset, const u32 mask, u32 value)
+{
+ u32 write_val, tmp;
+
+ tmp = readl_relaxed(base + offset - DWC3_GLOBALS_REGS_START);
+ tmp &= ~mask; /* retain other bits */
+ write_val = tmp | value;
+
+ writel_relaxed(write_val, base + offset - DWC3_GLOBALS_REGS_START);
+
+ /* Read back to see if value was written */
+ tmp = readl_relaxed(base + offset - DWC3_GLOBALS_REGS_START);
+
+ dwc3_trace(trace_dwc3_masked_write_readback,
+ "addr %p readback val %08x",
+ base - DWC3_GLOBALS_REGS_START + offset, tmp);
+
+ tmp &= mask; /* clear other bits */
+ if (tmp != value)
+ pr_err("%s: write: %x to %x FAILED\n",
+ __func__, value, offset);
+}
+
#endif /* __DRIVERS_USB_DWC3_IO_H */
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index d24cefd..88f5fb8 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -47,6 +47,11 @@
TP_ARGS(vaf)
);
+DEFINE_EVENT(dwc3_log_msg, dwc3_masked_write_readback,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
DEFINE_EVENT(dwc3_log_msg, dwc3_gadget,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 598a67d..2bde573 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -10,3 +10,5 @@
libcomposite-y += composite.o functions.o configfs.o u_f.o
obj-$(CONFIG_USB_GADGET) += udc/ function/ legacy/
+
+obj-$(CONFIG_USB_CI13XXX_MSM) += ci13xxx_msm.o
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
new file mode 100644
index 0000000..a9c073b
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -0,0 +1,378 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/msm_hsusb_hw.h>
+#include <linux/usb/ulpi.h>
+#include <linux/gpio.h>
+
+#include "ci13xxx_udc.c"
+
+#define MSM_USB_BASE (udc->regs)
+
+#define CI13XXX_MSM_MAX_LOG2_ITC 7
+
+struct ci13xxx_udc_context {
+ int irq;
+ void __iomem *regs;
+ int wake_gpio;
+ int wake_irq;
+ bool wake_irq_state;
+};
+
+static struct ci13xxx_udc_context _udc_ctxt;
+
+static irqreturn_t msm_udc_irq(int irq, void *data)
+{
+ return udc_irq();
+}
+
+static void ci13xxx_msm_suspend(void)
+{
+ struct device *dev = _udc->gadget.dev.parent;
+ dev_dbg(dev, "ci13xxx_msm_suspend\n");
+
+ if (_udc_ctxt.wake_irq && !_udc_ctxt.wake_irq_state) {
+ enable_irq_wake(_udc_ctxt.wake_irq);
+ enable_irq(_udc_ctxt.wake_irq);
+ _udc_ctxt.wake_irq_state = true;
+ }
+}
+
+static void ci13xxx_msm_resume(void)
+{
+ struct device *dev = _udc->gadget.dev.parent;
+ dev_dbg(dev, "ci13xxx_msm_resume\n");
+
+ if (_udc_ctxt.wake_irq && _udc_ctxt.wake_irq_state) {
+ disable_irq_wake(_udc_ctxt.wake_irq);
+ disable_irq_nosync(_udc_ctxt.wake_irq);
+ _udc_ctxt.wake_irq_state = false;
+ }
+}
+
+static void ci13xxx_msm_disconnect(void)
+{
+ struct ci13xxx *udc = _udc;
+ struct usb_phy *phy = udc->transceiver;
+
+ if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP))
+ usb_phy_io_write(phy,
+ ULPI_MISC_A_VBUSVLDEXT |
+ ULPI_MISC_A_VBUSVLDEXTSEL,
+ ULPI_CLR(ULPI_MISC_A));
+}
+
+/* Link power management will reduce power consumption by
+ * short time HW suspend/resume.
+ */
+static void ci13xxx_msm_set_l1(struct ci13xxx *udc)
+{
+ int temp;
+ struct device *dev = udc->gadget.dev.parent;
+
+ dev_dbg(dev, "Enable link power management\n");
+
+ /* Enable remote wakeup and L1 for IN EPs */
+ writel_relaxed(0xffff0000, USB_L1_EP_CTRL);
+
+ temp = readl_relaxed(USB_L1_CONFIG);
+ temp |= L1_CONFIG_LPM_EN | L1_CONFIG_REMOTE_WAKEUP |
+ L1_CONFIG_GATE_SYS_CLK | L1_CONFIG_PHY_LPM |
+ L1_CONFIG_PLL;
+ writel_relaxed(temp, USB_L1_CONFIG);
+}
+
+static void ci13xxx_msm_connect(void)
+{
+ struct ci13xxx *udc = _udc;
+ struct usb_phy *phy = udc->transceiver;
+
+ if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) {
+ int temp;
+
+ usb_phy_io_write(phy,
+ ULPI_MISC_A_VBUSVLDEXT |
+ ULPI_MISC_A_VBUSVLDEXTSEL,
+ ULPI_SET(ULPI_MISC_A));
+
+ temp = readl_relaxed(USB_GENCONFIG2);
+ temp |= GENCFG2_SESS_VLD_CTRL_EN;
+ writel_relaxed(temp, USB_GENCONFIG2);
+
+ temp = readl_relaxed(USB_USBCMD);
+ temp |= USBCMD_SESS_VLD_CTRL;
+ writel_relaxed(temp, USB_USBCMD);
+
+ /*
+ * Add memory barrier as it is must to complete
+ * above USB PHY and Link register writes before
+ * moving ahead with USB peripheral mode enumeration,
+ * otherwise USB peripheral mode may not work.
+ */
+ mb();
+ }
+}
+
+static void ci13xxx_msm_reset(void)
+{
+ struct ci13xxx *udc = _udc;
+ struct usb_phy *phy = udc->transceiver;
+ struct device *dev = udc->gadget.dev.parent;
+
+ writel_relaxed(0, USB_AHBBURST);
+ writel_relaxed(0x08, USB_AHBMODE);
+
+ if (udc->gadget.l1_supported)
+ ci13xxx_msm_set_l1(udc);
+
+ if (phy && (phy->flags & ENABLE_SECONDARY_PHY)) {
+ int temp;
+
+ dev_dbg(dev, "using secondary hsphy\n");
+ temp = readl_relaxed(USB_PHY_CTRL2);
+ temp |= (1<<16);
+ writel_relaxed(temp, USB_PHY_CTRL2);
+
+ /*
+ * Add memory barrier to make sure above LINK writes are
+ * complete before moving ahead with USB peripheral mode
+ * enumeration.
+ */
+ mb();
+ }
+}
+
+static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event)
+{
+ struct device *dev = udc->gadget.dev.parent;
+
+ switch (event) {
+ case CI13XXX_CONTROLLER_RESET_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
+ ci13xxx_msm_reset();
+ break;
+ case CI13XXX_CONTROLLER_DISCONNECT_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_DISCONNECT_EVENT received\n");
+ ci13xxx_msm_disconnect();
+ ci13xxx_msm_resume();
+ break;
+ case CI13XXX_CONTROLLER_CONNECT_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_CONNECT_EVENT received\n");
+ ci13xxx_msm_connect();
+ break;
+ case CI13XXX_CONTROLLER_SUSPEND_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_SUSPEND_EVENT received\n");
+ ci13xxx_msm_suspend();
+ break;
+ case CI13XXX_CONTROLLER_RESUME_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_RESUME_EVENT received\n");
+ ci13xxx_msm_resume();
+ break;
+
+ default:
+ dev_dbg(dev, "unknown ci13xxx_udc event\n");
+ break;
+ }
+}
+
+static irqreturn_t ci13xxx_msm_resume_irq(int irq, void *data)
+{
+ struct ci13xxx *udc = _udc;
+
+ if (udc->transceiver && udc->vbus_active && udc->suspended)
+ usb_phy_set_suspend(udc->transceiver, 0);
+ else if (!udc->suspended)
+ ci13xxx_msm_resume();
+
+ return IRQ_HANDLED;
+}
+
+static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = {
+ .name = "ci13xxx_msm",
+ .flags = CI13XXX_REGS_SHARED |
+ CI13XXX_REQUIRE_TRANSCEIVER |
+ CI13XXX_PULLUP_ON_VBUS |
+ CI13XXX_ZERO_ITC |
+ CI13XXX_DISABLE_STREAMING |
+ CI13XXX_IS_OTG,
+ .nz_itc = 0,
+ .notify_event = ci13xxx_msm_notify_event,
+};
+
+static int ci13xxx_msm_install_wake_gpio(struct platform_device *pdev,
+ struct resource *res)
+{
+ int wake_irq;
+ int ret;
+
+ dev_dbg(&pdev->dev, "ci13xxx_msm_install_wake_gpio\n");
+
+ _udc_ctxt.wake_gpio = res->start;
+ gpio_request(_udc_ctxt.wake_gpio, "USB_RESUME");
+ gpio_direction_input(_udc_ctxt.wake_gpio);
+ wake_irq = gpio_to_irq(_udc_ctxt.wake_gpio);
+ if (wake_irq < 0) {
+ dev_err(&pdev->dev, "could not register USB_RESUME GPIO.\n");
+ return -ENXIO;
+ }
+
+ dev_dbg(&pdev->dev, "_udc_ctxt.gpio_irq = %d and irq = %d\n",
+ _udc_ctxt.wake_gpio, wake_irq);
+ ret = request_irq(wake_irq, ci13xxx_msm_resume_irq,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "usb resume", NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not register USB_RESUME IRQ.\n");
+ goto gpio_free;
+ }
+ disable_irq(wake_irq);
+ _udc_ctxt.wake_irq = wake_irq;
+
+ return 0;
+
+gpio_free:
+ gpio_free(_udc_ctxt.wake_gpio);
+ _udc_ctxt.wake_gpio = 0;
+ return ret;
+}
+
+static void ci13xxx_msm_uninstall_wake_gpio(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "ci13xxx_msm_uninstall_wake_gpio\n");
+
+ if (_udc_ctxt.wake_gpio) {
+ gpio_free(_udc_ctxt.wake_gpio);
+ _udc_ctxt.wake_gpio = 0;
+ }
+}
+
+static int ci13xxx_msm_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+ struct ci13xxx_platform_data *pdata = pdev->dev.platform_data;
+ bool is_l1_supported = false;
+
+ dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n");
+
+ if (pdata) {
+ /* Acceptable values for nz_itc are: 0,1,2,4,8,16,32,64 */
+ if (pdata->log2_itc > CI13XXX_MSM_MAX_LOG2_ITC ||
+ pdata->log2_itc <= 0)
+ ci13xxx_msm_udc_driver.nz_itc = 0;
+ else
+ ci13xxx_msm_udc_driver.nz_itc =
+ 1 << (pdata->log2_itc-1);
+
+ is_l1_supported = pdata->l1_supported;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get platform resource mem\n");
+ return -ENXIO;
+ }
+
+ _udc_ctxt.regs = ioremap(res->start, resource_size(res));
+ if (!_udc_ctxt.regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ ret = udc_probe(&ci13xxx_msm_udc_driver, &pdev->dev, _udc_ctxt.regs);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "udc_probe failed\n");
+ goto iounmap;
+ }
+
+ _udc->gadget.l1_supported = is_l1_supported;
+
+ _udc_ctxt.irq = platform_get_irq(pdev, 0);
+ if (_udc_ctxt.irq < 0) {
+ dev_err(&pdev->dev, "IRQ not found\n");
+ ret = -ENXIO;
+ goto udc_remove;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "USB_RESUME");
+ if (res) {
+ ret = ci13xxx_msm_install_wake_gpio(pdev, res);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio irq install failed\n");
+ goto udc_remove;
+ }
+ }
+
+ ret = request_irq(_udc_ctxt.irq, msm_udc_irq, IRQF_SHARED, pdev->name,
+ pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto gpio_uninstall;
+ }
+
+ pm_runtime_no_callbacks(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+gpio_uninstall:
+ ci13xxx_msm_uninstall_wake_gpio(pdev);
+udc_remove:
+ udc_remove();
+iounmap:
+ iounmap(_udc_ctxt.regs);
+
+ return ret;
+}
+
+int ci13xxx_msm_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ free_irq(_udc_ctxt.irq, pdev);
+ ci13xxx_msm_uninstall_wake_gpio(pdev);
+ udc_remove();
+ iounmap(_udc_ctxt.regs);
+ return 0;
+}
+
+void msm_hw_bam_disable(bool bam_disable)
+{
+ u32 val;
+ struct ci13xxx *udc = _udc;
+
+ if (bam_disable)
+ val = readl_relaxed(USB_GENCONFIG) | GENCONFIG_BAM_DISABLE;
+ else
+ val = readl_relaxed(USB_GENCONFIG) & ~GENCONFIG_BAM_DISABLE;
+
+ writel_relaxed(val, USB_GENCONFIG);
+}
+
+static struct platform_driver ci13xxx_msm_driver = {
+ .probe = ci13xxx_msm_probe,
+ .driver = {
+ .name = "msm_hsusb",
+ },
+ .remove = ci13xxx_msm_remove,
+};
+MODULE_ALIAS("platform:msm_hsusb");
+
+static int __init ci13xxx_msm_init(void)
+{
+ return platform_driver_register(&ci13xxx_msm_driver);
+}
+module_init(ci13xxx_msm_init);
+
+static void __exit ci13xxx_msm_exit(void)
+{
+ platform_driver_unregister(&ci13xxx_msm_driver);
+}
+module_exit(ci13xxx_msm_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
new file mode 100644
index 0000000..b8389e2
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -0,0 +1,3876 @@
+/*
+ * ci13xxx_udc.c - MIPS USB IP core family device controller
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Description: MIPS USB IP core family device controller
+ * Currently it only supports IP part number CI13412
+ *
+ * This driver is composed of several blocks:
+ * - HW: hardware interface
+ * - DBG: debug facilities (optional)
+ * - UTIL: utilities
+ * - ISR: interrupts handling
+ * - ENDPT: endpoint operations (Gadget API)
+ * - GADGET: gadget operations (Gadget API)
+ * - BUS: bus glue code, bus abstraction layer
+ *
+ * Compile Options
+ * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
+ * - STALL_IN: non-empty bulk-in pipes cannot be halted
+ * if defined mass storage compliance succeeds but with warnings
+ * => case 4: Hi > Dn
+ * => case 5: Hi > Di
+ * => case 8: Hi <> Do
+ * if undefined usbtest 13 fails
+ * - TRACE: enable function tracing (depends on DEBUG)
+ *
+ * Main Features
+ * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
+ * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
+ * - Normal & LPM support
+ *
+ * USBTEST Report
+ * - OK: 0-12, 13 (STALL_IN defined) & 14
+ * - Not Supported: 15 & 16 (ISO)
+ *
+ * TODO List
+ * - OTG
+ * - Isochronous & Interrupt Traffic
+ * - Handle requests which spawns into several TDs
+ * - GET_STATUS(device) - always reports 0
+ * - Gadget API (majority of optional features)
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/ratelimit.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/tracepoint.h>
+#include <mach/usb_trace.h>
+#include "ci13xxx_udc.h"
+
+/* Turns on streaming. overrides CI13XXX_DISABLE_STREAMING */
+static unsigned int streaming;
+module_param(streaming, uint, S_IRUGO | S_IWUSR);
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+#define USB_MAX_TIMEOUT 25 /* 25msec timeout */
+#define EP_PRIME_CHECK_DELAY (jiffies + msecs_to_jiffies(1000))
+#define MAX_PRIME_CHECK_RETRY 3 /*Wait for 3sec for EP prime failure */
+
+/* ctrl register bank access */
+static DEFINE_SPINLOCK(udc_lock);
+
+/* control endpoint description */
+static const struct usb_endpoint_descriptor
+ctrl_endpt_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+static const struct usb_endpoint_descriptor
+ctrl_endpt_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+/* UDC descriptor */
+static struct ci13xxx *_udc;
+
+/* Interrupt statistics */
+#define ISR_MASK 0x1F
+static struct {
+ u32 test;
+ u32 ui;
+ u32 uei;
+ u32 pci;
+ u32 uri;
+ u32 sli;
+ u32 none;
+ struct {
+ u32 cnt;
+ u32 buf[ISR_MASK+1];
+ u32 idx;
+ } hndl;
+} isr_statistics;
+
+/**
+ * ffs_nr: find first (least significant) bit set
+ * @x: the word to search
+ *
+ * This function returns bit number (instead of position)
+ */
+static int ffs_nr(u32 x)
+{
+ int n = ffs(x);
+
+ return n ? n-1 : 32;
+}
+
+struct ci13xxx_ebi_err_entry {
+ u32 *usb_req_buf;
+ u32 usb_req_length;
+ u32 ep_info;
+ struct ci13xxx_ebi_err_entry *next;
+};
+
+struct ci13xxx_ebi_err_data {
+ u32 ebi_err_addr;
+ u32 apkt0;
+ u32 apkt1;
+ struct ci13xxx_ebi_err_entry *ebi_err_entry;
+};
+static struct ci13xxx_ebi_err_data *ebi_err_data;
+
+/******************************************************************************
+ * HW block
+ *****************************************************************************/
+/* register bank descriptor */
+static struct {
+ unsigned lpm; /* is LPM? */
+ void __iomem *abs; /* bus map offset */
+ void __iomem *cap; /* bus map offset + CAP offset + CAP data */
+ size_t size; /* bank size */
+} hw_bank;
+
+/* MSM specific */
+#define ABS_AHBBURST (0x0090UL)
+#define ABS_AHBMODE (0x0098UL)
+/* UDC register map */
+#define ABS_CAPLENGTH (0x100UL)
+#define ABS_HCCPARAMS (0x108UL)
+#define ABS_DCCPARAMS (0x124UL)
+#define ABS_TESTMODE (hw_bank.lpm ? 0x0FCUL : 0x138UL)
+/* offset to CAPLENTGH (addr + data) */
+#define CAP_USBCMD (0x000UL)
+#define CAP_USBSTS (0x004UL)
+#define CAP_USBINTR (0x008UL)
+#define CAP_DEVICEADDR (0x014UL)
+#define CAP_ENDPTLISTADDR (0x018UL)
+#define CAP_PORTSC (0x044UL)
+#define CAP_DEVLC (0x084UL)
+#define CAP_ENDPTPIPEID (0x0BCUL)
+#define CAP_USBMODE (hw_bank.lpm ? 0x0C8UL : 0x068UL)
+#define CAP_ENDPTSETUPSTAT (hw_bank.lpm ? 0x0D8UL : 0x06CUL)
+#define CAP_ENDPTPRIME (hw_bank.lpm ? 0x0DCUL : 0x070UL)
+#define CAP_ENDPTFLUSH (hw_bank.lpm ? 0x0E0UL : 0x074UL)
+#define CAP_ENDPTSTAT (hw_bank.lpm ? 0x0E4UL : 0x078UL)
+#define CAP_ENDPTCOMPLETE (hw_bank.lpm ? 0x0E8UL : 0x07CUL)
+#define CAP_ENDPTCTRL (hw_bank.lpm ? 0x0ECUL : 0x080UL)
+#define CAP_LAST (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
+
+#define REMOTE_WAKEUP_DELAY msecs_to_jiffies(200)
+
+/* maximum number of enpoints: valid only after hw_device_reset() */
+static unsigned hw_ep_max;
+static void dbg_usb_op_fail(u8 addr, const char *name,
+ const struct ci13xxx_ep *mep);
+/**
+ * hw_ep_bit: calculates the bit number
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns bit number
+ */
+static inline int hw_ep_bit(int num, int dir)
+{
+ return num + (dir ? 16 : 0);
+}
+
+static int ep_to_bit(int n)
+{
+ int fill = 16 - hw_ep_max / 2;
+
+ if (n >= hw_ep_max / 2)
+ n += fill;
+
+ return n;
+}
+
+/**
+ * hw_aread: reads from register bitfield
+ * @addr: address relative to bus map
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_aread(u32 addr, u32 mask)
+{
+ return ioread32(addr + hw_bank.abs) & mask;
+}
+
+/**
+ * hw_awrite: writes to register bitfield
+ * @addr: address relative to bus map
+ * @mask: bitfield mask
+ * @data: new data
+ */
+static void hw_awrite(u32 addr, u32 mask, u32 data)
+{
+ iowrite32(hw_aread(addr, ~mask) | (data & mask),
+ addr + hw_bank.abs);
+}
+
+/**
+ * hw_cread: reads from register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_cread(u32 addr, u32 mask)
+{
+ return ioread32(addr + hw_bank.cap) & mask;
+}
+
+/**
+ * hw_cwrite: writes to register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ * @data: new data
+ */
+static void hw_cwrite(u32 addr, u32 mask, u32 data)
+{
+ iowrite32(hw_cread(addr, ~mask) | (data & mask),
+ addr + hw_bank.cap);
+}
+
+/**
+ * hw_ctest_and_clear: tests & clears register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_ctest_and_clear(u32 addr, u32 mask)
+{
+ u32 reg = hw_cread(addr, mask);
+
+ iowrite32(reg, addr + hw_bank.cap);
+ return reg;
+}
+
+/**
+ * hw_ctest_and_write: tests & writes register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ * @data: new data
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
+{
+ u32 reg = hw_cread(addr, ~0);
+
+ iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap);
+ return (reg & mask) >> ffs_nr(mask);
+}
+
+static int hw_device_init(void __iomem *base)
+{
+ u32 reg;
+
+ /* bank is a module variable */
+ hw_bank.abs = base;
+
+ hw_bank.cap = hw_bank.abs;
+ hw_bank.cap += ABS_CAPLENGTH;
+ hw_bank.cap += ioread8(hw_bank.cap);
+
+ reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN);
+ hw_bank.lpm = reg;
+ hw_bank.size = hw_bank.cap - hw_bank.abs;
+ hw_bank.size += CAP_LAST;
+ hw_bank.size /= sizeof(u32);
+
+ reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
+ hw_ep_max = reg * 2; /* cache hw ENDPT_MAX */
+
+ if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
+ return -ENODEV;
+
+ /* setup lock mode ? */
+
+ /* ENDPTSETUPSTAT is '0' by default */
+
+ /* HCSPARAMS.bf.ppc SHOULD BE zero for device */
+
+ return 0;
+}
+/**
+ * hw_device_reset: resets chip (execute without interruption)
+ * @base: register base address
+ *
+ * This function returns an error code
+ */
+static int hw_device_reset(struct ci13xxx *udc)
+{
+ int delay_count = 25; /* 250 usec */
+
+ /* should flush & stop before reset */
+ hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
+ hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+
+ hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
+ while (delay_count-- && hw_cread(CAP_USBCMD, USBCMD_RST))
+ udelay(10);
+ if (delay_count < 0)
+ pr_err("USB controller reset failed\n");
+
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_RESET_EVENT);
+
+ /* USBMODE should be configured step by step */
+ hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
+ hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
+ hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM); /* HW >= 2.3 */
+
+ /*
+ * ITC (Interrupt Threshold Control) field is to set the maximum
+ * rate at which the device controller will issue interrupts.
+ * The maximum interrupt interval measured in micro frames.
+ * Valid values are 0, 1, 2, 4, 8, 16, 32, 64. The default value is
+ * 8 micro frames. If CPU can handle interrupts at faster rate, ITC
+ * can be set to lesser value to gain performance.
+ */
+ if (udc->udc_driver->nz_itc)
+ hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK,
+ USBCMD_ITC(udc->udc_driver->nz_itc));
+ else if (udc->udc_driver->flags & CI13XXX_ZERO_ITC)
+ hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK, USBCMD_ITC(0));
+
+ if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
+ pr_err("cannot enter in device mode");
+ pr_err("lpm = %i", hw_bank.lpm);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * hw_device_state: enables/disables interrupts & starts/stops device (execute
+ * without interruption)
+ * @dma: 0 => disable, !0 => enable and set dma engine
+ *
+ * This function returns an error code
+ */
+static int hw_device_state(u32 dma)
+{
+ struct ci13xxx *udc = _udc;
+
+ if (dma) {
+ if (streaming || !(udc->udc_driver->flags &
+ CI13XXX_DISABLE_STREAMING))
+ hw_cwrite(CAP_USBMODE, USBMODE_SDIS, 0);
+ else
+ hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS);
+
+ hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma);
+
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_CONNECT_EVENT);
+
+ /* interrupt, error, port change, reset, sleep/suspend */
+ hw_cwrite(CAP_USBINTR, ~0,
+ USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
+ hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
+ } else {
+ hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+ hw_cwrite(CAP_USBINTR, ~0, 0);
+ }
+ return 0;
+}
+
+static void debug_ept_flush_info(int ep_num, int dir)
+{
+ struct ci13xxx *udc = _udc;
+ struct ci13xxx_ep *mep;
+
+ if (dir)
+ mep = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
+ else
+ mep = &udc->ci13xxx_ep[ep_num];
+
+ pr_err_ratelimited("USB Registers\n");
+ pr_err_ratelimited("USBCMD:%x\n", hw_cread(CAP_USBCMD, ~0));
+ pr_err_ratelimited("USBSTS:%x\n", hw_cread(CAP_USBSTS, ~0));
+ pr_err_ratelimited("ENDPTLISTADDR:%x\n",
+ hw_cread(CAP_ENDPTLISTADDR, ~0));
+ pr_err_ratelimited("PORTSC:%x\n", hw_cread(CAP_PORTSC, ~0));
+ pr_err_ratelimited("USBMODE:%x\n", hw_cread(CAP_USBMODE, ~0));
+ pr_err_ratelimited("ENDPTSTAT:%x\n", hw_cread(CAP_ENDPTSTAT, ~0));
+
+ dbg_usb_op_fail(0xFF, "FLUSHF", mep);
+}
+/**
+ * hw_ep_flush: flush endpoint fifo (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_flush(int num, int dir)
+{
+ ktime_t start, diff;
+ int n = hw_ep_bit(num, dir);
+ struct ci13xxx_ep *mEp = &_udc->ci13xxx_ep[n];
+
+ /* Flush ep0 even when queue is empty */
+ if (_udc->skip_flush || (num && list_empty(&mEp->qh.queue)))
+ return 0;
+
+ start = ktime_get();
+ do {
+ /* flush any pending transfer */
+ hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n));
+ while (hw_cread(CAP_ENDPTFLUSH, BIT(n))) {
+ cpu_relax();
+ diff = ktime_sub(ktime_get(), start);
+ if (ktime_to_ms(diff) > USB_MAX_TIMEOUT) {
+ printk_ratelimited(KERN_ERR
+ "%s: Failed to flush ep#%d %s\n",
+ __func__, num,
+ dir ? "IN" : "OUT");
+ debug_ept_flush_info(num, dir);
+ _udc->skip_flush = true;
+ return 0;
+ }
+ }
+ } while (hw_cread(CAP_ENDPTSTAT, BIT(n)));
+
+ return 0;
+}
+
+/**
+ * hw_ep_disable: disables endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_disable(int num, int dir)
+{
+ hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32),
+ dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
+ return 0;
+}
+
+/**
+ * hw_ep_enable: enables endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @type: endpoint type
+ *
+ * This function returns an error code
+ */
+static int hw_ep_enable(int num, int dir, int type)
+{
+ u32 mask, data;
+
+ if (dir) {
+ mask = ENDPTCTRL_TXT; /* type */
+ data = type << ffs_nr(mask);
+
+ mask |= ENDPTCTRL_TXS; /* unstall */
+ mask |= ENDPTCTRL_TXR; /* reset data toggle */
+ data |= ENDPTCTRL_TXR;
+ mask |= ENDPTCTRL_TXE; /* enable */
+ data |= ENDPTCTRL_TXE;
+ } else {
+ mask = ENDPTCTRL_RXT; /* type */
+ data = type << ffs_nr(mask);
+
+ mask |= ENDPTCTRL_RXS; /* unstall */
+ mask |= ENDPTCTRL_RXR; /* reset data toggle */
+ data |= ENDPTCTRL_RXR;
+ mask |= ENDPTCTRL_RXE; /* enable */
+ data |= ENDPTCTRL_RXE;
+ }
+ hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
+
+ /* make sure endpoint is enabled before returning */
+ mb();
+
+ return 0;
+}
+
+/**
+ * hw_ep_get_halt: return endpoint halt status
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns 1 if endpoint halted
+ */
+static int hw_ep_get_halt(int num, int dir)
+{
+ u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+
+ return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0;
+}
+
+/**
+ * hw_test_and_clear_setup_status: test & clear setup status (execute without
+ * interruption)
+ * @n: endpoint number
+ *
+ * This function returns setup status
+ */
+static int hw_test_and_clear_setup_status(int n)
+{
+ n = ep_to_bit(n);
+ return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
+}
+
+/**
+ * hw_ep_prime: primes endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @is_ctrl: true if control endpoint
+ *
+ * This function returns an error code
+ */
+static int hw_ep_prime(int num, int dir, int is_ctrl)
+{
+ int n = hw_ep_bit(num, dir);
+
+ if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+ return -EAGAIN;
+
+ hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
+
+ if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+ return -EAGAIN;
+
+ /* status shoult be tested according with manual but it doesn't work */
+ return 0;
+}
+
+/**
+ * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
+ * without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @value: true => stall, false => unstall
+ *
+ * This function returns an error code
+ */
+static int hw_ep_set_halt(int num, int dir, int value)
+{
+ u32 addr, mask_xs, mask_xr;
+
+ if (value != 0 && value != 1)
+ return -EINVAL;
+
+ do {
+ if (hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+ return 0;
+
+ addr = CAP_ENDPTCTRL + num * sizeof(u32);
+ mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+ mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
+
+ /* data toggle - reserved for EP0 but it's in ESS */
+ hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr);
+
+ } while (value != hw_ep_get_halt(num, dir));
+
+ return 0;
+}
+
+/**
+ * hw_intr_clear: disables interrupt & clears interrupt status (execute without
+ * interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_clear(int n)
+{
+ if (n >= REG_BITS)
+ return -EINVAL;
+
+ hw_cwrite(CAP_USBINTR, BIT(n), 0);
+ hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
+ return 0;
+}
+
+/**
+ * hw_intr_force: enables interrupt & forces interrupt status (execute without
+ * interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_force(int n)
+{
+ if (n >= REG_BITS)
+ return -EINVAL;
+
+ hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
+ hw_cwrite(CAP_USBINTR, BIT(n), BIT(n));
+ hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
+ hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0);
+ return 0;
+}
+
+/**
+ * hw_is_port_high_speed: test if port is high speed
+ *
+ * This function returns true if high speed port
+ */
+static int hw_port_is_high_speed(void)
+{
+ return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) :
+ hw_cread(CAP_PORTSC, PORTSC_HSP);
+}
+
+/**
+ * hw_port_test_get: reads port test mode value
+ *
+ * This function returns port test mode value
+ */
+static u8 hw_port_test_get(void)
+{
+ return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC);
+}
+
+/**
+ * hw_port_test_set: writes port test mode (execute without interruption)
+ * @mode: new value
+ *
+ * This function returns an error code
+ */
+static int hw_port_test_set(u8 mode)
+{
+ const u8 TEST_MODE_MAX = 7;
+
+ if (mode > TEST_MODE_MAX)
+ return -EINVAL;
+
+ hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC));
+ return 0;
+}
+
+/**
+ * hw_read_intr_enable: returns interrupt enable register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_enable(void)
+{
+ return hw_cread(CAP_USBINTR, ~0);
+}
+
+/**
+ * hw_read_intr_status: returns interrupt status register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_status(void)
+{
+ return hw_cread(CAP_USBSTS, ~0);
+}
+
+/**
+ * hw_register_read: reads all device registers (execute without interruption)
+ * @buf: destination buffer
+ * @size: buffer size
+ *
+ * This function returns number of registers read
+ */
+static size_t hw_register_read(u32 *buf, size_t size)
+{
+ unsigned i;
+
+ if (size > hw_bank.size)
+ size = hw_bank.size;
+
+ for (i = 0; i < size; i++)
+ buf[i] = hw_aread(i * sizeof(u32), ~0);
+
+ return size;
+}
+
+/**
+ * hw_register_write: writes to register
+ * @addr: register address
+ * @data: register value
+ *
+ * This function returns an error code
+ */
+static int hw_register_write(u16 addr, u32 data)
+{
+ /* align */
+ addr /= sizeof(u32);
+
+ if (addr >= hw_bank.size)
+ return -EINVAL;
+
+ /* align */
+ addr *= sizeof(u32);
+
+ hw_awrite(addr, ~0, data);
+ return 0;
+}
+
+/**
+ * hw_test_and_clear_complete: test & clear complete status (execute without
+ * interruption)
+ * @n: endpoint number
+ *
+ * This function returns complete status
+ */
+static int hw_test_and_clear_complete(int n)
+{
+ n = ep_to_bit(n);
+ return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
+}
+
+/**
+ * hw_test_and_clear_intr_active: test & clear active interrupts (execute
+ * without interruption)
+ *
+ * This function returns active interrutps
+ */
+static u32 hw_test_and_clear_intr_active(void)
+{
+ u32 reg = hw_read_intr_status() & hw_read_intr_enable();
+
+ hw_cwrite(CAP_USBSTS, ~0, reg);
+ return reg;
+}
+
+/**
+ * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
+ * interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_clear_setup_guard(void)
+{
+ return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0);
+}
+
+/**
+ * hw_test_and_set_setup_guard: test & set setup guard (execute without
+ * interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_set_setup_guard(void)
+{
+ return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
+}
+
+/**
+ * hw_usb_set_address: configures USB address (execute without interruption)
+ * @value: new USB address
+ *
+ * This function returns an error code
+ */
+static int hw_usb_set_address(u8 value)
+{
+ /* advance */
+ hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA,
+ value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA);
+ return 0;
+}
+
+/**
+ * hw_usb_reset: restart device after a bus reset (execute without
+ * interruption)
+ *
+ * This function returns an error code
+ */
+static int hw_usb_reset(void)
+{
+ int delay_count = 10; /* 100 usec delay */
+
+ hw_usb_set_address(0);
+
+ /* ESS flushes only at end?!? */
+ hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0); /* flush all EPs */
+
+ /* clear setup token semaphores */
+ hw_cwrite(CAP_ENDPTSETUPSTAT, 0, 0); /* writes its content */
+
+ /* clear complete status */
+ hw_cwrite(CAP_ENDPTCOMPLETE, 0, 0); /* writes its content */
+
+ /* wait until all bits cleared */
+ while (delay_count-- && hw_cread(CAP_ENDPTPRIME, ~0))
+ udelay(10);
+ if (delay_count < 0)
+ pr_err("ENDPTPRIME is not cleared during bus reset\n");
+
+ /* reset all endpoints ? */
+
+ /* reset internal status and wait for further instructions
+ no need to verify the port reset status (ESS does it) */
+
+ return 0;
+}
+
+/******************************************************************************
+ * DBG block
+ *****************************************************************************/
+/**
+ * show_device: prints information about device capabilities and status
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_device(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct usb_gadget *gadget = &udc->gadget;
+ int n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
+ gadget->speed);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed = %d\n",
+ gadget->max_speed);
+ /* TODO: Scheduled for removal in 3.8. */
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
+ gadget_is_dualspeed(gadget));
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
+ gadget->is_otg);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
+ gadget->is_a_peripheral);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable = %d\n",
+ gadget->b_hnp_enable);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support = %d\n",
+ gadget->a_hnp_support);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
+ gadget->a_alt_hnp_support);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "name = %s\n",
+ (gadget->name ? gadget->name : ""));
+
+ return n;
+}
+static DEVICE_ATTR(device, S_IRUSR, show_device, NULL);
+
+/**
+ * show_driver: prints information about attached gadget (if any)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct usb_gadget_driver *driver = udc->driver;
+ int n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ if (driver == NULL)
+ return scnprintf(buf, PAGE_SIZE,
+ "There is no gadget attached!\n");
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
+ (driver->function ? driver->function : ""));
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
+ driver->max_speed);
+
+ return n;
+}
+static DEVICE_ATTR(driver, S_IRUSR, show_driver, NULL);
+
+/* Maximum event message length */
+#define DBG_DATA_MSG 64UL
+
+/* Maximum event messages */
+#define DBG_DATA_MAX 128UL
+
+/* Event buffer descriptor */
+static struct {
+ char (buf[DBG_DATA_MAX])[DBG_DATA_MSG]; /* buffer */
+ unsigned idx; /* index */
+ unsigned tty; /* print to console? */
+ rwlock_t lck; /* lock */
+} dbg_data = {
+ .idx = 0,
+ .tty = 0,
+ .lck = __RW_LOCK_UNLOCKED(lck)
+};
+
+/**
+ * dbg_dec: decrements debug event index
+ * @idx: buffer index
+ */
+static void dbg_dec(unsigned *idx)
+{
+ *idx = (*idx - 1) & (DBG_DATA_MAX-1);
+}
+
+/**
+ * dbg_inc: increments debug event index
+ * @idx: buffer index
+ */
+static void dbg_inc(unsigned *idx)
+{
+ *idx = (*idx + 1) & (DBG_DATA_MAX-1);
+}
+
+
+static unsigned int ep_addr_txdbg_mask;
+module_param(ep_addr_txdbg_mask, uint, S_IRUGO | S_IWUSR);
+static unsigned int ep_addr_rxdbg_mask;
+module_param(ep_addr_rxdbg_mask, uint, S_IRUGO | S_IWUSR);
+
+static int allow_dbg_print(u8 addr)
+{
+ int dir, num;
+
+ /* allow bus wide events */
+ if (addr == 0xff)
+ return 1;
+
+ dir = addr & USB_ENDPOINT_DIR_MASK ? TX : RX;
+ num = addr & ~USB_ENDPOINT_DIR_MASK;
+ num = 1 << num;
+
+ if ((dir == TX) && (num & ep_addr_txdbg_mask))
+ return 1;
+ if ((dir == RX) && (num & ep_addr_rxdbg_mask))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * dbg_print: prints the common part of the event
+ * @addr: endpoint address
+ * @name: event name
+ * @status: status
+ * @extra: extra information
+ */
+static void dbg_print(u8 addr, const char *name, int status, const char *extra)
+{
+ struct timeval tval;
+ unsigned int stamp;
+ unsigned long flags;
+
+ if (!allow_dbg_print(addr))
+ return;
+
+ write_lock_irqsave(&dbg_data.lck, flags);
+
+ do_gettimeofday(&tval);
+ stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s */
+ stamp = stamp * 1000000 + tval.tv_usec;
+
+ scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
+ "%04X\t? %02X %-7.7s %4i ?\t%s\n",
+ stamp, addr, name, status, extra);
+
+ dbg_inc(&dbg_data.idx);
+
+ write_unlock_irqrestore(&dbg_data.lck, flags);
+
+ if (dbg_data.tty != 0)
+ pr_notice("%04X\t? %02X %-7.7s %4i ?\t%s\n",
+ stamp, addr, name, status, extra);
+}
+
+/**
+ * dbg_done: prints a DONE event
+ * @addr: endpoint address
+ * @td: transfer descriptor
+ * @status: status
+ */
+static void dbg_done(u8 addr, const u32 token, int status)
+{
+ char msg[DBG_DATA_MSG];
+
+ scnprintf(msg, sizeof(msg), "%d %02X",
+ (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
+ (int)(token & TD_STATUS) >> ffs_nr(TD_STATUS));
+ dbg_print(addr, "DONE", status, msg);
+}
+
+/**
+ * dbg_event: prints a generic event
+ * @addr: endpoint address
+ * @name: event name
+ * @status: status
+ */
+static void dbg_event(u8 addr, const char *name, int status)
+{
+ if (name != NULL)
+ dbg_print(addr, name, status, "");
+}
+
+/*
+ * dbg_queue: prints a QUEUE event
+ * @addr: endpoint address
+ * @req: USB request
+ * @status: status
+ */
+static void dbg_queue(u8 addr, const struct usb_request *req, int status)
+{
+ char msg[DBG_DATA_MSG];
+
+ if (req != NULL) {
+ scnprintf(msg, sizeof(msg),
+ "%d %d", !req->no_interrupt, req->length);
+ dbg_print(addr, "QUEUE", status, msg);
+ }
+}
+
+/**
+ * dbg_setup: prints a SETUP event
+ * @addr: endpoint address
+ * @req: setup request
+ */
+static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req)
+{
+ char msg[DBG_DATA_MSG];
+
+ if (req != NULL) {
+ scnprintf(msg, sizeof(msg),
+ "%02X %02X %04X %04X %d", req->bRequestType,
+ req->bRequest, le16_to_cpu(req->wValue),
+ le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
+ dbg_print(addr, "SETUP", 0, msg);
+ }
+}
+
+/**
+ * dbg_usb_op_fail: prints USB Operation FAIL event
+ * @addr: endpoint address
+ * @mEp: endpoint structure
+ */
+static void dbg_usb_op_fail(u8 addr, const char *name,
+ const struct ci13xxx_ep *mep)
+{
+ char msg[DBG_DATA_MSG];
+ struct ci13xxx_req *req;
+ struct list_head *ptr = NULL;
+
+ if (mep != NULL) {
+ scnprintf(msg, sizeof(msg),
+ "%s Fail EP%d%s QH:%08X",
+ name, mep->num,
+ mep->dir ? "IN" : "OUT", mep->qh.ptr->cap);
+ dbg_print(addr, name, 0, msg);
+ scnprintf(msg, sizeof(msg),
+ "cap:%08X %08X %08X\n",
+ mep->qh.ptr->curr, mep->qh.ptr->td.next,
+ mep->qh.ptr->td.token);
+ dbg_print(addr, "QHEAD", 0, msg);
+
+ list_for_each(ptr, &mep->qh.queue) {
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+ scnprintf(msg, sizeof(msg),
+ "%08X:%08X:%08X\n",
+ req->dma, req->ptr->next,
+ req->ptr->token);
+ dbg_print(addr, "REQ", 0, msg);
+ scnprintf(msg, sizeof(msg), "%08X:%d\n",
+ req->ptr->page[0],
+ req->req.status);
+ dbg_print(addr, "REQPAGE", 0, msg);
+ }
+ }
+}
+
+/**
+ * show_events: displays the event buffer
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_events(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long flags;
+ unsigned i, j, n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ read_lock_irqsave(&dbg_data.lck, flags);
+
+ i = dbg_data.idx;
+ for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) {
+ n += strlen(dbg_data.buf[i]);
+ if (n >= PAGE_SIZE) {
+ n -= strlen(dbg_data.buf[i]);
+ break;
+ }
+ }
+ for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i))
+ j += scnprintf(buf + j, PAGE_SIZE - j,
+ "%s", dbg_data.buf[i]);
+
+ read_unlock_irqrestore(&dbg_data.lck, flags);
+
+ return n;
+}
+
+/**
+ * store_events: configure if events are going to be also printed to console
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_events(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned tty;
+
+ dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%u", &tty) != 1 || tty > 1) {
+ dev_err(dev, "<1|0>: enable|disable console log\n");
+ goto done;
+ }
+
+ dbg_data.tty = tty;
+ dev_info(dev, "tty = %u", dbg_data.tty);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(events, S_IRUSR | S_IWUSR, show_events, store_events);
+
+/**
+ * show_inters: interrupt status, enable status and historic
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ u32 intr;
+ unsigned i, j, n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "status = %08x\n", hw_read_intr_status());
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "enable = %08x\n", hw_read_intr_enable());
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
+ isr_statistics.test);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? ui = %d\n",
+ isr_statistics.ui);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? uei = %d\n",
+ isr_statistics.uei);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? pci = %d\n",
+ isr_statistics.pci);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? uri = %d\n",
+ isr_statistics.uri);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? sli = %d\n",
+ isr_statistics.sli);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
+ isr_statistics.none);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n",
+ isr_statistics.hndl.cnt);
+
+ for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) {
+ i &= ISR_MASK;
+ intr = isr_statistics.hndl.buf[i];
+
+ if (USBi_UI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "ui ");
+ intr &= ~USBi_UI;
+ if (USBi_UEI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "uei ");
+ intr &= ~USBi_UEI;
+ if (USBi_PCI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "pci ");
+ intr &= ~USBi_PCI;
+ if (USBi_URI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "uri ");
+ intr &= ~USBi_URI;
+ if (USBi_SLI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "sli ");
+ intr &= ~USBi_SLI;
+ if (intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "??? ");
+ if (isr_statistics.hndl.buf[i])
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+ }
+
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return n;
+}
+
+/**
+ * store_inters: enable & force or disable an individual interrutps
+ * (to be used for test purposes only)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_inters(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned en, bit;
+
+ dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) {
+ dev_err(dev, "<1|0> <bit>: enable|disable interrupt");
+ goto done;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (en) {
+ if (hw_intr_force(bit))
+ dev_err(dev, "invalid bit number\n");
+ else
+ isr_statistics.test++;
+ } else {
+ if (hw_intr_clear(bit))
+ dev_err(dev, "invalid bit number\n");
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(inters, S_IRUSR | S_IWUSR, show_inters, store_inters);
+
+/**
+ * show_port_test: reads port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_port_test(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned mode;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ mode = hw_port_test_get();
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
+}
+
+/**
+ * store_port_test: writes port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_port_test(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned mode;
+
+ dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%u", &mode) != 1) {
+ dev_err(dev, "<mode>: set port test mode");
+ goto done;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (hw_port_test_set(mode))
+ dev_err(dev, "invalid mode\n");
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(port_test, S_IRUSR | S_IWUSR,
+ show_port_test, store_port_test);
+
+/**
+ * show_qheads: DMA contents of all queue heads
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned i, j, n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ for (i = 0; i < hw_ep_max/2; i++) {
+ struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
+ struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "EP=%02i: RX=%08X TX=%08X\n",
+ i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
+ for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ " %04X: %08X %08X\n", j,
+ *((u32 *)mEpRx->qh.ptr + j),
+ *((u32 *)mEpTx->qh.ptr + j));
+ }
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL);
+
+/**
+ * show_registers: dumps all registers
+ *
+ * Check "device.h" for details
+ */
+#define DUMP_ENTRIES 512
+static ssize_t show_registers(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ u32 *dump;
+ unsigned i, k, n = 0;
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ dump = kmalloc(sizeof(u32) * DUMP_ENTRIES, GFP_KERNEL);
+ if (!dump) {
+ dev_err(dev, "%s: out of memory\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ k = hw_register_read(dump, DUMP_ENTRIES);
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ for (i = 0; i < k; i++) {
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "reg[0x%04X] = 0x%08X\n",
+ i * (unsigned)sizeof(u32), dump[i]);
+ }
+ kfree(dump);
+
+ return n;
+}
+
+/**
+ * store_registers: writes value to register address
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_registers(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long addr, data, flags;
+
+ dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%li %li", &addr, &data) != 2) {
+ dev_err(dev, "<addr> <data>: write data to register address");
+ goto done;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (hw_register_write(addr, data))
+ dev_err(dev, "invalid address range\n");
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(registers, S_IRUSR | S_IWUSR,
+ show_registers, store_registers);
+
+/**
+ * show_requests: DMA contents of all requests currently queued (all endpts)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ struct list_head *ptr = NULL;
+ struct ci13xxx_req *req = NULL;
+ unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
+
+ dbg_trace("[%s] %p\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ for (i = 0; i < hw_ep_max; i++)
+ list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
+ {
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "EP=%02i: TD=%08X %s\n",
+ i % hw_ep_max/2, (u32)req->dma,
+ ((i < hw_ep_max/2) ? "RX" : "TX"));
+
+ for (j = 0; j < qSize; j++)
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ " %04X: %08X\n", j,
+ *((u32 *)req->ptr + j));
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
+
+/* EP# and Direction */
+static ssize_t prime_ept(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct ci13xxx_ep *mEp;
+ unsigned int ep_num, dir;
+ int n;
+ struct ci13xxx_req *mReq = NULL;
+
+ if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
+ dev_err(dev, "<ep_num> <dir>: prime the ep");
+ goto done;
+ }
+
+ if (dir)
+ mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
+ else
+ mEp = &udc->ci13xxx_ep[ep_num];
+
+ n = hw_ep_bit(mEp->num, mEp->dir);
+ mReq = list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue);
+ mEp->qh.ptr->td.next = mReq->dma;
+ mEp->qh.ptr->td.token &= ~TD_STATUS;
+
+ wmb();
+
+ hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
+ while (hw_cread(CAP_ENDPTPRIME, BIT(n)))
+ cpu_relax();
+
+ pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s\n", __func__,
+ hw_cread(CAP_ENDPTPRIME, ~0),
+ hw_cread(CAP_ENDPTSTAT, ~0),
+ mEp->num, mEp->dir ? "IN" : "OUT");
+done:
+ return count;
+
+}
+static DEVICE_ATTR(prime, S_IWUSR, NULL, prime_ept);
+
+/* EP# and Direction */
+static ssize_t print_dtds(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct ci13xxx_ep *mEp;
+ unsigned int ep_num, dir;
+ int n;
+ struct list_head *ptr = NULL;
+ struct ci13xxx_req *req = NULL;
+
+ if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
+ dev_err(dev, "<ep_num> <dir>: to print dtds");
+ goto done;
+ }
+
+ if (dir)
+ mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
+ else
+ mEp = &udc->ci13xxx_ep[ep_num];
+
+ n = hw_ep_bit(mEp->num, mEp->dir);
+ pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s"
+ "dTD_update_fail_count: %lu "
+ "mEp->dTD_update_fail_count: %lu"
+ "mEp->prime_fail_count: %lu\n", __func__,
+ hw_cread(CAP_ENDPTPRIME, ~0),
+ hw_cread(CAP_ENDPTSTAT, ~0),
+ mEp->num, mEp->dir ? "IN" : "OUT",
+ udc->dTD_update_fail_count,
+ mEp->dTD_update_fail_count,
+ mEp->prime_fail_count);
+
+ pr_info("QH: cap:%08x cur:%08x next:%08x token:%08x\n",
+ mEp->qh.ptr->cap, mEp->qh.ptr->curr,
+ mEp->qh.ptr->td.next, mEp->qh.ptr->td.token);
+
+ list_for_each(ptr, &mEp->qh.queue) {
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+
+ pr_info("\treq:%08x next:%08x token:%08x page0:%08x status:%d\n",
+ req->dma, req->ptr->next, req->ptr->token,
+ req->ptr->page[0], req->req.status);
+ }
+done:
+ return count;
+
+}
+static DEVICE_ATTR(dtds, S_IWUSR, NULL, print_dtds);
+
+static int ci13xxx_wakeup(struct usb_gadget *_gadget)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+ int ret = 0;
+
+ trace();
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (!udc->remote_wakeup) {
+ ret = -EOPNOTSUPP;
+ dbg_trace("remote wakeup feature is not enabled\n");
+ goto out;
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT);
+
+ if (udc->transceiver)
+ usb_phy_set_suspend(udc->transceiver, 0);
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
+ ret = -EINVAL;
+ dbg_trace("port is not suspended\n");
+ goto out;
+ }
+ hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
+out:
+ spin_unlock_irqrestore(udc->lock, flags);
+ return ret;
+}
+
+static void usb_do_remote_wakeup(struct work_struct *w)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long flags;
+ bool do_wake;
+
+ /*
+ * This work can not be canceled from interrupt handler. Check
+ * if wakeup conditions are still met.
+ */
+ spin_lock_irqsave(udc->lock, flags);
+ do_wake = udc->suspended && udc->remote_wakeup;
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (do_wake)
+ ci13xxx_wakeup(&udc->gadget);
+}
+
+static ssize_t usb_remote_wakeup(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+
+ ci13xxx_wakeup(&udc->gadget);
+
+ return count;
+}
+static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup);
+
+/**
+ * dbg_create_files: initializes the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+__maybe_unused static int dbg_create_files(struct device *dev)
+{
+ int retval = 0;
+
+ if (dev == NULL)
+ return -EINVAL;
+ retval = device_create_file(dev, &dev_attr_device);
+ if (retval)
+ goto done;
+ retval = device_create_file(dev, &dev_attr_driver);
+ if (retval)
+ goto rm_device;
+ retval = device_create_file(dev, &dev_attr_events);
+ if (retval)
+ goto rm_driver;
+ retval = device_create_file(dev, &dev_attr_inters);
+ if (retval)
+ goto rm_events;
+ retval = device_create_file(dev, &dev_attr_port_test);
+ if (retval)
+ goto rm_inters;
+ retval = device_create_file(dev, &dev_attr_qheads);
+ if (retval)
+ goto rm_port_test;
+ retval = device_create_file(dev, &dev_attr_registers);
+ if (retval)
+ goto rm_qheads;
+ retval = device_create_file(dev, &dev_attr_requests);
+ if (retval)
+ goto rm_registers;
+ retval = device_create_file(dev, &dev_attr_wakeup);
+ if (retval)
+ goto rm_remote_wakeup;
+ retval = device_create_file(dev, &dev_attr_prime);
+ if (retval)
+ goto rm_prime;
+ retval = device_create_file(dev, &dev_attr_dtds);
+ if (retval)
+ goto rm_dtds;
+
+ return 0;
+
+rm_dtds:
+ device_remove_file(dev, &dev_attr_dtds);
+rm_prime:
+ device_remove_file(dev, &dev_attr_prime);
+rm_remote_wakeup:
+ device_remove_file(dev, &dev_attr_wakeup);
+ rm_registers:
+ device_remove_file(dev, &dev_attr_registers);
+ rm_qheads:
+ device_remove_file(dev, &dev_attr_qheads);
+ rm_port_test:
+ device_remove_file(dev, &dev_attr_port_test);
+ rm_inters:
+ device_remove_file(dev, &dev_attr_inters);
+ rm_events:
+ device_remove_file(dev, &dev_attr_events);
+ rm_driver:
+ device_remove_file(dev, &dev_attr_driver);
+ rm_device:
+ device_remove_file(dev, &dev_attr_device);
+ done:
+ return retval;
+}
+
+/**
+ * dbg_remove_files: destroys the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+__maybe_unused static int dbg_remove_files(struct device *dev)
+{
+ if (dev == NULL)
+ return -EINVAL;
+ device_remove_file(dev, &dev_attr_requests);
+ device_remove_file(dev, &dev_attr_registers);
+ device_remove_file(dev, &dev_attr_qheads);
+ device_remove_file(dev, &dev_attr_port_test);
+ device_remove_file(dev, &dev_attr_inters);
+ device_remove_file(dev, &dev_attr_events);
+ device_remove_file(dev, &dev_attr_driver);
+ device_remove_file(dev, &dev_attr_device);
+ device_remove_file(dev, &dev_attr_wakeup);
+ return 0;
+}
+
+static void dump_usb_info(void *ignore, unsigned int ebi_addr,
+ unsigned int ebi_apacket0, unsigned int ebi_apacket1)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long flags;
+ struct list_head *ptr = NULL;
+ struct ci13xxx_req *req = NULL;
+ struct ci13xxx_ep *mEp;
+ unsigned i;
+ struct ci13xxx_ebi_err_entry *temp_dump;
+ static int count;
+ u32 epdir = 0;
+
+ if (count)
+ return;
+ count++;
+
+ pr_info("%s: USB EBI error detected\n", __func__);
+
+ ebi_err_data = kmalloc(sizeof(struct ci13xxx_ebi_err_data),
+ GFP_ATOMIC);
+ if (!ebi_err_data) {
+ pr_err("%s: memory alloc failed for ebi_err_data\n", __func__);
+ return;
+ }
+
+ ebi_err_data->ebi_err_entry = kmalloc(
+ sizeof(struct ci13xxx_ebi_err_entry),
+ GFP_ATOMIC);
+ if (!ebi_err_data->ebi_err_entry) {
+ kfree(ebi_err_data);
+ pr_err("%s: memory alloc failed for ebi_err_entry\n", __func__);
+ return;
+ }
+
+ ebi_err_data->ebi_err_addr = ebi_addr;
+ ebi_err_data->apkt0 = ebi_apacket0;
+ ebi_err_data->apkt1 = ebi_apacket1;
+
+ temp_dump = ebi_err_data->ebi_err_entry;
+ pr_info("\n DUMPING USB Requests Information\n");
+ spin_lock_irqsave(udc->lock, flags);
+ for (i = 0; i < hw_ep_max; i++) {
+ list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue) {
+ mEp = &udc->ci13xxx_ep[i];
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+
+ temp_dump->usb_req_buf = req->req.buf;
+ temp_dump->usb_req_length = req->req.length;
+ epdir = mEp->dir;
+ temp_dump->ep_info = mEp->num | (epdir << 15);
+
+ temp_dump->next = kmalloc(
+ sizeof(struct ci13xxx_ebi_err_entry),
+ GFP_ATOMIC);
+ if (!temp_dump->next) {
+ pr_err("%s: memory alloc failed\n", __func__);
+ spin_unlock_irqrestore(udc->lock, flags);
+ return;
+ }
+ temp_dump = temp_dump->next;
+ }
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+}
+
+/******************************************************************************
+ * UTIL block
+ *****************************************************************************/
+/**
+ * _usb_addr: calculates endpoint address from direction & number
+ * @ep: endpoint
+ */
+static inline u8 _usb_addr(struct ci13xxx_ep *ep)
+{
+ return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
+}
+
+static void ep_prime_timer_func(unsigned long data)
+{
+ struct ci13xxx_ep *mep = (struct ci13xxx_ep *)data;
+ struct ci13xxx_req *req;
+ struct list_head *ptr = NULL;
+ int n = hw_ep_bit(mep->num, mep->dir);
+ unsigned long flags;
+
+
+ spin_lock_irqsave(mep->lock, flags);
+
+ if (_udc && (!_udc->vbus_active || _udc->suspended)) {
+ pr_debug("ep%d%s prime timer when vbus_active=%d,suspend=%d\n",
+ mep->num, mep->dir ? "IN" : "OUT",
+ _udc->vbus_active, _udc->suspended);
+ goto out;
+ }
+
+ if (!hw_cread(CAP_ENDPTPRIME, BIT(n)))
+ goto out;
+
+ if (list_empty(&mep->qh.queue))
+ goto out;
+
+ req = list_entry(mep->qh.queue.next, struct ci13xxx_req, queue);
+
+ mb();
+ if (!(TD_STATUS_ACTIVE & req->ptr->token))
+ goto out;
+
+ mep->prime_timer_count++;
+ if (mep->prime_timer_count == MAX_PRIME_CHECK_RETRY) {
+ mep->prime_timer_count = 0;
+ pr_info("ep%d dir:%s QH:cap:%08x cur:%08x next:%08x tkn:%08x\n",
+ mep->num, mep->dir ? "IN" : "OUT",
+ mep->qh.ptr->cap, mep->qh.ptr->curr,
+ mep->qh.ptr->td.next, mep->qh.ptr->td.token);
+ list_for_each(ptr, &mep->qh.queue) {
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+ pr_info("\treq:%08xnext:%08xtkn:%08xpage0:%08xsts:%d\n",
+ req->dma, req->ptr->next,
+ req->ptr->token, req->ptr->page[0],
+ req->req.status);
+ }
+ dbg_usb_op_fail(0xFF, "PRIMEF", mep);
+ mep->prime_fail_count++;
+ } else {
+ mod_timer(&mep->prime_timer, EP_PRIME_CHECK_DELAY);
+ }
+
+ spin_unlock_irqrestore(mep->lock, flags);
+ return;
+
+out:
+ mep->prime_timer_count = 0;
+ spin_unlock_irqrestore(mep->lock, flags);
+
+}
+
+/**
+ * _hardware_queue: configures a request at hardware level
+ * @gadget: gadget
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+ unsigned i;
+ int ret = 0;
+ unsigned length = mReq->req.length;
+ struct ci13xxx *udc = _udc;
+
+ trace("%p, %p", mEp, mReq);
+
+ /* don't queue twice */
+ if (mReq->req.status == -EALREADY)
+ return -EALREADY;
+
+ mReq->req.status = -EALREADY;
+ if (length && mReq->req.dma == DMA_ADDR_INVALID) {
+ mReq->req.dma = \
+ dma_map_single(mEp->device, mReq->req.buf,
+ length, mEp->dir ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ if (mReq->req.dma == 0)
+ return -ENOMEM;
+
+ mReq->map = 1;
+ }
+
+ if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
+ mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
+ &mReq->zdma);
+ if (mReq->zptr == NULL) {
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma,
+ length, mEp->dir ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ mReq->req.dma = DMA_ADDR_INVALID;
+ mReq->map = 0;
+ }
+ return -ENOMEM;
+ }
+ memset(mReq->zptr, 0, sizeof(*mReq->zptr));
+ mReq->zptr->next = TD_TERMINATE;
+ mReq->zptr->token = TD_STATUS_ACTIVE;
+ if (!mReq->req.no_interrupt)
+ mReq->zptr->token |= TD_IOC;
+ }
+ /*
+ * TD configuration
+ * TODO - handle requests which spawns into several TDs
+ */
+ memset(mReq->ptr, 0, sizeof(*mReq->ptr));
+ mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES);
+ mReq->ptr->token &= TD_TOTAL_BYTES;
+ mReq->ptr->token |= TD_STATUS_ACTIVE;
+ if (mReq->zptr) {
+ mReq->ptr->next = mReq->zdma;
+ } else {
+ mReq->ptr->next = TD_TERMINATE;
+ if (!mReq->req.no_interrupt)
+ mReq->ptr->token |= TD_IOC;
+ }
+
+ /* MSM Specific: updating the request as required for
+ * SPS mode. Enable MSM DMA engine acording
+ * to the UDC private data in the request.
+ */
+ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
+ if (mReq->req.udc_priv & MSM_SPS_MODE) {
+ mReq->ptr->token = TD_STATUS_ACTIVE;
+ if (mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER)
+ mReq->ptr->next = TD_TERMINATE;
+ else
+ mReq->ptr->next = MSM_ETD_TYPE | mReq->dma;
+ if (!mReq->req.no_interrupt)
+ mReq->ptr->token |= MSM_ETD_IOC;
+ }
+ mReq->req.dma = 0;
+ }
+
+ mReq->ptr->page[0] = mReq->req.dma;
+ for (i = 1; i < 5; i++)
+ mReq->ptr->page[i] = (mReq->req.dma + i * CI13XXX_PAGE_SIZE) &
+ ~TD_RESERVED_MASK;
+ wmb();
+
+ /* Remote Wakeup */
+ if (udc->suspended) {
+ if (!udc->remote_wakeup) {
+ mReq->req.status = -EAGAIN;
+ dev_dbg(mEp->device, "%s: queue failed (suspend) ept #%d\n",
+ __func__, mEp->num);
+ return -EAGAIN;
+ }
+ usb_phy_set_suspend(udc->transceiver, 0);
+ schedule_delayed_work(&udc->rw_work, REMOTE_WAKEUP_DELAY);
+ }
+
+ if (!list_empty(&mEp->qh.queue)) {
+ struct ci13xxx_req *mReqPrev;
+ int n = hw_ep_bit(mEp->num, mEp->dir);
+ int tmp_stat;
+ ktime_t start, diff;
+
+ mReqPrev = list_entry(mEp->qh.queue.prev,
+ struct ci13xxx_req, queue);
+ if (mReqPrev->zptr)
+ mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
+ else
+ mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
+ wmb();
+ if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
+ goto done;
+ start = ktime_get();
+ do {
+ hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
+ tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n));
+ diff = ktime_sub(ktime_get(), start);
+ /* poll for max. 100ms */
+ if (ktime_to_ms(diff) > USB_MAX_TIMEOUT) {
+ if (hw_cread(CAP_USBCMD, USBCMD_ATDTW))
+ break;
+ printk_ratelimited(KERN_ERR
+ "%s:queue failed ep#%d %s\n",
+ __func__, mEp->num, mEp->dir ? "IN" : "OUT");
+ return -EAGAIN;
+ }
+ } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW));
+ hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0);
+ if (tmp_stat)
+ goto done;
+ }
+
+ /* QH configuration */
+ if (!list_empty(&mEp->qh.queue)) {
+ struct ci13xxx_req *mReq = \
+ list_entry(mEp->qh.queue.next,
+ struct ci13xxx_req, queue);
+
+ if (TD_STATUS_ACTIVE & mReq->ptr->token) {
+ mEp->qh.ptr->td.next = mReq->dma;
+ mEp->qh.ptr->td.token &= ~TD_STATUS;
+ goto prime;
+ }
+ }
+
+ mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
+
+ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
+ if (mReq->req.udc_priv & MSM_SPS_MODE) {
+ mEp->qh.ptr->td.next |= MSM_ETD_TYPE;
+ i = hw_cread(CAP_ENDPTPIPEID +
+ mEp->num * sizeof(u32), ~0);
+ /* Read current value of this EPs pipe id */
+ i = (mEp->dir == TX) ?
+ ((i >> MSM_TX_PIPE_ID_OFS) & MSM_PIPE_ID_MASK) :
+ (i & MSM_PIPE_ID_MASK);
+ /* If requested pipe id is different from current,
+ then write it */
+ if (i != (mReq->req.udc_priv & MSM_PIPE_ID_MASK)) {
+ if (mEp->dir == TX)
+ hw_cwrite(
+ CAP_ENDPTPIPEID +
+ mEp->num * sizeof(u32),
+ MSM_PIPE_ID_MASK <<
+ MSM_TX_PIPE_ID_OFS,
+ (mReq->req.udc_priv &
+ MSM_PIPE_ID_MASK)
+ << MSM_TX_PIPE_ID_OFS);
+ else
+ hw_cwrite(
+ CAP_ENDPTPIPEID +
+ mEp->num * sizeof(u32),
+ MSM_PIPE_ID_MASK,
+ mReq->req.udc_priv &
+ MSM_PIPE_ID_MASK);
+ }
+ }
+ }
+
+ mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
+ mEp->qh.ptr->cap |= QH_ZLT;
+
+prime:
+ wmb(); /* synchronize before ep prime */
+
+ ret = hw_ep_prime(mEp->num, mEp->dir,
+ mEp->type == USB_ENDPOINT_XFER_CONTROL);
+ if (!ret)
+ mod_timer(&mEp->prime_timer, EP_PRIME_CHECK_DELAY);
+done:
+ return ret;
+}
+
+/**
+ * _hardware_dequeue: handles a request at hardware level
+ * @gadget: gadget
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+ trace("%p, %p", mEp, mReq);
+
+ if (mReq->req.status != -EALREADY)
+ return -EINVAL;
+
+ /* clean speculative fetches on req->ptr->token */
+ mb();
+
+ if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
+ return -EBUSY;
+
+ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID)
+ if ((mReq->req.udc_priv & MSM_SPS_MODE) &&
+ (mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER))
+ return -EBUSY;
+ if (mReq->zptr) {
+ if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
+ return -EBUSY;
+
+ /* The controller may access this dTD one more time.
+ * Defer freeing this to next zero length dTD completion.
+ * It is safe to assume that controller will no longer
+ * access the previous dTD after next dTD completion.
+ */
+ if (mEp->last_zptr)
+ dma_pool_free(mEp->td_pool, mEp->last_zptr,
+ mEp->last_zdma);
+ mEp->last_zptr = mReq->zptr;
+ mEp->last_zdma = mReq->zdma;
+
+ mReq->zptr = NULL;
+ }
+
+ mReq->req.status = 0;
+
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
+ mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mReq->req.dma = DMA_ADDR_INVALID;
+ mReq->map = 0;
+ }
+
+ mReq->req.status = mReq->ptr->token & TD_STATUS;
+ if ((TD_STATUS_HALTED & mReq->req.status) != 0)
+ mReq->req.status = -1;
+ else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
+ mReq->req.status = -1;
+ else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
+ mReq->req.status = -1;
+
+ mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
+ mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
+ mReq->req.actual = mReq->req.length - mReq->req.actual;
+ mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
+
+ return mReq->req.actual;
+}
+
+/**
+ * restore_original_req: Restore original req's attributes
+ * @mReq: Request
+ *
+ * This function restores original req's attributes. Call
+ * this function before completing the large req (>16K).
+ */
+static void restore_original_req(struct ci13xxx_req *mReq)
+{
+ mReq->req.buf = mReq->multi.buf;
+ mReq->req.length = mReq->multi.len;
+ if (!mReq->req.status)
+ mReq->req.actual = mReq->multi.actual;
+
+ mReq->multi.len = 0;
+ mReq->multi.actual = 0;
+ mReq->multi.buf = NULL;
+}
+
+/**
+ * _ep_nuke: dequeues all endpoint requests
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int _ep_nuke(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ struct ci13xxx_ep *mEpTemp = mEp;
+ unsigned val;
+
+ trace("%p", mEp);
+
+ if (mEp == NULL)
+ return -EINVAL;
+
+ del_timer(&mEp->prime_timer);
+ mEp->prime_timer_count = 0;
+
+ hw_ep_flush(mEp->num, mEp->dir);
+
+ while (!list_empty(&mEp->qh.queue)) {
+
+ /* pop oldest request */
+ struct ci13xxx_req *mReq = \
+ list_entry(mEp->qh.queue.next,
+ struct ci13xxx_req, queue);
+ list_del_init(&mReq->queue);
+
+ /* MSM Specific: Clear end point specific register */
+ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
+ if (mReq->req.udc_priv & MSM_SPS_MODE) {
+ val = hw_cread(CAP_ENDPTPIPEID +
+ mEp->num * sizeof(u32),
+ ~0);
+
+ if (val != MSM_EP_PIPE_ID_RESET_VAL)
+ hw_cwrite(
+ CAP_ENDPTPIPEID +
+ mEp->num * sizeof(u32),
+ ~0, MSM_EP_PIPE_ID_RESET_VAL);
+ }
+ }
+ mReq->req.status = -ESHUTDOWN;
+
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma,
+ mReq->req.length,
+ mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mReq->req.dma = DMA_ADDR_INVALID;
+ mReq->map = 0;
+ }
+
+ if (mEp->multi_req) {
+ restore_original_req(mReq);
+ mEp->multi_req = false;
+ }
+
+ if (mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
+ mReq->req.length)
+ mEpTemp = &_udc->ep0in;
+ mReq->req.complete(&mEpTemp->ep, &mReq->req);
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mReq->req.complete = NULL;
+ spin_lock(mEp->lock);
+ }
+ }
+ return 0;
+}
+
+/**
+ * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
+ * @gadget: gadget
+ *
+ * This function returns an error code
+ */
+static int _gadget_stop_activity(struct usb_gadget *gadget)
+{
+ struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+
+ trace("%p", gadget);
+
+ if (gadget == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(udc->lock, flags);
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->remote_wakeup = 0;
+ udc->suspended = 0;
+ udc->configured = 0;
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ gadget->b_hnp_enable = 0;
+ gadget->a_hnp_support = 0;
+ gadget->host_request = 0;
+ gadget->otg_srp_reqd = 0;
+
+ udc->driver->disconnect(gadget);
+
+ spin_lock_irqsave(udc->lock, flags);
+ _ep_nuke(&udc->ep0out);
+ _ep_nuke(&udc->ep0in);
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (udc->ep0in.last_zptr) {
+ dma_pool_free(udc->ep0in.td_pool, udc->ep0in.last_zptr,
+ udc->ep0in.last_zdma);
+ udc->ep0in.last_zptr = NULL;
+ }
+
+ return 0;
+}
+
+/******************************************************************************
+ * ISR block
+ *****************************************************************************/
+/**
+ * isr_reset_handler: USB reset interrupt handler
+ * @udc: UDC device
+ *
+ * This function resets USB engine after a bus reset occurred
+ */
+static void isr_reset_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+ int retval;
+
+ trace("%p", udc);
+
+ if (udc == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+ dbg_event(0xFF, "BUS RST", 0);
+
+ spin_unlock(udc->lock);
+
+ if (udc->suspended) {
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_RESUME_EVENT);
+ if (udc->transceiver)
+ usb_phy_set_suspend(udc->transceiver, 0);
+ udc->driver->resume(&udc->gadget);
+ udc->suspended = 0;
+ }
+
+ /*stop charging upon reset */
+ if (udc->transceiver)
+ usb_phy_set_power(udc->transceiver, 100);
+
+ retval = _gadget_stop_activity(&udc->gadget);
+ if (retval)
+ goto done;
+
+ _udc->skip_flush = false;
+ retval = hw_usb_reset();
+ if (retval)
+ goto done;
+
+ spin_lock(udc->lock);
+
+ done:
+ if (retval)
+ err("error: %i", retval);
+}
+
+/**
+ * isr_resume_handler: USB PCI interrupt handler
+ * @udc: UDC device
+ *
+ */
+static void isr_resume_handler(struct ci13xxx *udc)
+{
+ udc->gadget.speed = hw_port_is_high_speed() ?
+ USB_SPEED_HIGH : USB_SPEED_FULL;
+ if (udc->suspended) {
+ spin_unlock(udc->lock);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_RESUME_EVENT);
+ if (udc->transceiver)
+ usb_phy_set_suspend(udc->transceiver, 0);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(udc->lock);
+ udc->suspended = 0;
+ }
+}
+
+/**
+ * isr_resume_handler: USB SLI interrupt handler
+ * @udc: UDC device
+ *
+ */
+static void isr_suspend_handler(struct ci13xxx *udc)
+{
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
+ udc->vbus_active) {
+ if (udc->suspended == 0) {
+ spin_unlock(udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_SUSPEND_EVENT);
+ if (udc->transceiver)
+ usb_phy_set_suspend(udc->transceiver, 1);
+ spin_lock(udc->lock);
+ udc->suspended = 1;
+ }
+ }
+}
+
+/**
+ * isr_get_status_complete: get_status request complete function
+ * @ep: endpoint
+ * @req: request handled
+ *
+ * Caller must release lock
+ */
+static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ trace("%p, %p", ep, req);
+
+ if (ep == NULL || req == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+ if (req->status)
+ err("GET_STATUS failed");
+}
+
+/**
+ * isr_get_status_response: get_status request response
+ * @udc: udc struct
+ * @setup: setup request packet
+ *
+ * This function returns an error code
+ */
+static int isr_get_status_response(struct ci13xxx *udc,
+ struct usb_ctrlrequest *setup)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ struct ci13xxx_ep *mEp = &udc->ep0in;
+ struct usb_request *req = udc->status;
+ int dir, num, retval;
+
+ trace("%p, %p", mEp, setup);
+
+ if (mEp == NULL || setup == NULL)
+ return -EINVAL;
+
+ req->complete = isr_get_status_complete;
+ req->length = 2;
+ req->buf = udc->status_buf;
+
+ if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+ if (setup->wIndex == OTG_STATUS_SELECTOR) {
+ *((u8 *)req->buf) = _udc->gadget.host_request <<
+ HOST_REQUEST_FLAG;
+ req->length = 1;
+ } else {
+ /* Assume that device is bus powered for now. */
+ *((u16 *)req->buf) = _udc->remote_wakeup << 1;
+ }
+ /* TODO: D1 - Remote Wakeup; D0 - Self Powered */
+ retval = 0;
+ } else if ((setup->bRequestType & USB_RECIP_MASK) \
+ == USB_RECIP_ENDPOINT) {
+ dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
+ TX : RX;
+ num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+ *((u16 *)req->buf) = hw_ep_get_halt(num, dir);
+ }
+ /* else do nothing; reserved for future use */
+
+ spin_unlock(mEp->lock);
+ retval = usb_ep_queue(&mEp->ep, req, GFP_ATOMIC);
+ spin_lock(mEp->lock);
+ return retval;
+}
+
+/**
+ * isr_setup_status_complete: setup_status request complete function
+ * @ep: endpoint
+ * @req: request handled
+ *
+ * Caller must release lock. Put the port in test mode if test mode
+ * feature is selected.
+ */
+static void
+isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx *udc = req->context;
+ unsigned long flags;
+
+ trace("%p, %p", ep, req);
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (udc->test_mode)
+ hw_port_test_set(udc->test_mode);
+ spin_unlock_irqrestore(udc->lock, flags);
+}
+
+/**
+ * isr_setup_status_phase: queues the status phase of a setup transation
+ * @udc: udc struct
+ *
+ * This function returns an error code
+ */
+static int isr_setup_status_phase(struct ci13xxx *udc)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ int retval;
+ struct ci13xxx_ep *mEp;
+
+ trace("%p", udc);
+
+ mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
+ udc->status->context = udc;
+ udc->status->complete = isr_setup_status_complete;
+ udc->status->length = 0;
+
+ spin_unlock(mEp->lock);
+ retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
+ spin_lock(mEp->lock);
+
+ return retval;
+}
+
+/**
+ * isr_tr_complete_low: transaction complete low level handler
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ struct ci13xxx_req *mReq, *mReqTemp;
+ struct ci13xxx_ep *mEpTemp = mEp;
+ int uninitialized_var(retval);
+ int req_dequeue = 1;
+ struct ci13xxx *udc = _udc;
+
+ trace("%p", mEp);
+
+ if (list_empty(&mEp->qh.queue))
+ return 0;
+
+ del_timer(&mEp->prime_timer);
+ mEp->prime_timer_count = 0;
+ list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
+ queue) {
+dequeue:
+ retval = _hardware_dequeue(mEp, mReq);
+ if (retval < 0) {
+ /*
+ * FIXME: don't know exact delay
+ * required for HW to update dTD status
+ * bits. This is a temporary workaround till
+ * HW designers come back on this.
+ */
+ if (retval == -EBUSY && req_dequeue &&
+ (mEp->dir == 0 || mEp->num == 0)) {
+ req_dequeue = 0;
+ udc->dTD_update_fail_count++;
+ mEp->dTD_update_fail_count++;
+ udelay(10);
+ goto dequeue;
+ }
+ break;
+ }
+ req_dequeue = 0;
+
+ if (mEp->multi_req) { /* Large request in progress */
+ unsigned remain_len;
+
+ mReq->multi.actual += mReq->req.actual;
+ remain_len = mReq->multi.len - mReq->multi.actual;
+ if (mReq->req.status || !remain_len ||
+ (mReq->req.actual != mReq->req.length)) {
+ restore_original_req(mReq);
+ mEp->multi_req = false;
+ } else {
+ mReq->req.buf = mReq->multi.buf +
+ mReq->multi.actual;
+ mReq->req.length = min_t(unsigned, remain_len,
+ (4 * CI13XXX_PAGE_SIZE));
+
+ mReq->req.status = -EINPROGRESS;
+ mReq->req.actual = 0;
+ list_del_init(&mReq->queue);
+ retval = _hardware_enqueue(mEp, mReq);
+ if (retval) {
+ err("Large req failed in middle");
+ mReq->req.status = retval;
+ restore_original_req(mReq);
+ mEp->multi_req = false;
+ goto done;
+ } else {
+ list_add_tail(&mReq->queue,
+ &mEp->qh.queue);
+ return 0;
+ }
+ }
+ }
+ list_del_init(&mReq->queue);
+done:
+
+ dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
+
+ if (mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
+ mReq->req.length)
+ mEpTemp = &_udc->ep0in;
+ mReq->req.complete(&mEpTemp->ep, &mReq->req);
+ spin_lock(mEp->lock);
+ }
+ }
+
+ if (retval == -EBUSY)
+ retval = 0;
+ if (retval < 0)
+ dbg_event(_usb_addr(mEp), "DONE", retval);
+
+ return retval;
+}
+
+/**
+ * isr_tr_complete_handler: transaction complete interrupt handler
+ * @udc: UDC descriptor
+ *
+ * This function handles traffic events
+ */
+static void isr_tr_complete_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+ unsigned i;
+ u8 tmode = 0;
+
+ trace("%p", udc);
+
+ if (udc == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+ int type, num, dir, err = -EINVAL;
+ struct usb_ctrlrequest req;
+
+ if (mEp->desc == NULL)
+ continue; /* not configured */
+
+ if (hw_test_and_clear_complete(i)) {
+ err = isr_tr_complete_low(mEp);
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+ if (err > 0) /* needs status phase */
+ err = isr_setup_status_phase(udc);
+ if (err < 0) {
+ dbg_event(_usb_addr(mEp),
+ "ERROR", err);
+ spin_unlock(udc->lock);
+ if (usb_ep_set_halt(&mEp->ep))
+ err("error: ep_set_halt");
+ spin_lock(udc->lock);
+ }
+ }
+ }
+
+ if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
+ !hw_test_and_clear_setup_status(i))
+ continue;
+
+ if (i != 0) {
+ warn("ctrl traffic received at endpoint");
+ continue;
+ }
+
+ /*
+ * Flush data and handshake transactions of previous
+ * setup packet.
+ */
+ _ep_nuke(&udc->ep0out);
+ _ep_nuke(&udc->ep0in);
+
+ /* read_setup_packet */
+ do {
+ hw_test_and_set_setup_guard();
+ memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
+ /* Ensure buffer is read before acknowledging to h/w */
+ mb();
+ } while (!hw_test_and_clear_setup_guard());
+
+ type = req.bRequestType;
+
+ udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
+
+ dbg_setup(_usb_addr(mEp), &req);
+
+ switch (req.bRequest) {
+ case USB_REQ_CLEAR_FEATURE:
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ dir = num & USB_ENDPOINT_DIR_MASK;
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (dir) /* TX */
+ num += hw_ep_max/2;
+ if (!udc->ci13xxx_ep[num].wedge) {
+ spin_unlock(udc->lock);
+ err = usb_ep_clear_halt(
+ &udc->ci13xxx_ep[num].ep);
+ spin_lock(udc->lock);
+ if (err)
+ break;
+ }
+ err = isr_setup_status_phase(udc);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
+ le16_to_cpu(req.wValue) ==
+ USB_DEVICE_REMOTE_WAKEUP) {
+ if (req.wLength != 0)
+ break;
+ udc->remote_wakeup = 0;
+ err = isr_setup_status_phase(udc);
+ } else {
+ goto delegate;
+ }
+ break;
+ case USB_REQ_GET_STATUS:
+ if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
+ type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
+ type != (USB_DIR_IN|USB_RECIP_INTERFACE))
+ goto delegate;
+ if (le16_to_cpu(req.wValue) != 0)
+ break;
+ err = isr_get_status_response(udc, &req);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 0 ||
+ le16_to_cpu(req.wIndex) != 0)
+ break;
+ err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
+ if (err)
+ break;
+ err = isr_setup_status_phase(udc);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ if (type == (USB_DIR_OUT|USB_TYPE_STANDARD))
+ udc->configured = !!req.wValue;
+ goto delegate;
+ case USB_REQ_SET_FEATURE:
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ dir = num & USB_ENDPOINT_DIR_MASK;
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (dir) /* TX */
+ num += hw_ep_max/2;
+
+ spin_unlock(udc->lock);
+ err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
+ spin_lock(udc->lock);
+ if (!err)
+ isr_setup_status_phase(udc);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
+ if (req.wLength != 0)
+ break;
+ switch (le16_to_cpu(req.wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ udc->remote_wakeup = 1;
+ err = isr_setup_status_phase(udc);
+ break;
+ case USB_DEVICE_B_HNP_ENABLE:
+ udc->gadget.b_hnp_enable = 1;
+ err = isr_setup_status_phase(udc);
+ break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ udc->gadget.a_hnp_support = 1;
+ err = isr_setup_status_phase(udc);
+ break;
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ break;
+ case USB_DEVICE_TEST_MODE:
+ tmode = le16_to_cpu(req.wIndex) >> 8;
+ switch (tmode) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ udc->test_mode = tmode;
+ err = isr_setup_status_phase(
+ udc);
+ break;
+ case TEST_OTG_SRP_REQD:
+ udc->gadget.otg_srp_reqd = 1;
+ err = isr_setup_status_phase(
+ udc);
+ break;
+ case TEST_OTG_HNP_REQD:
+ udc->gadget.host_request = 1;
+ err = isr_setup_status_phase(
+ udc);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ } else {
+ goto delegate;
+ }
+ break;
+ default:
+delegate:
+ if (req.wLength == 0) /* no data phase */
+ udc->ep0_dir = TX;
+
+ spin_unlock(udc->lock);
+ err = udc->driver->setup(&udc->gadget, &req);
+ spin_lock(udc->lock);
+ break;
+ }
+
+ if (err < 0) {
+ dbg_event(_usb_addr(mEp), "ERROR", err);
+
+ spin_unlock(udc->lock);
+ if (usb_ep_set_halt(&mEp->ep))
+ err("error: ep_set_halt");
+ spin_lock(udc->lock);
+ }
+ }
+}
+
+/******************************************************************************
+ * ENDPT block
+ *****************************************************************************/
+/**
+ * ep_enable: configure endpoint, making it usable
+ *
+ * Check usb_ep_enable() at "usb_gadget.h" for details
+ */
+static int ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int retval = 0;
+ unsigned long flags;
+ unsigned mult = 0;
+
+ trace("ep = %p, desc = %p", ep, desc);
+
+ if (ep == NULL || desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ /* only internal SW should enable ctrl endpts */
+
+ mEp->desc = desc;
+
+ if (!list_empty(&mEp->qh.queue))
+ warn("enabling a non-empty endpoint!");
+
+ mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
+ mEp->num = usb_endpoint_num(desc);
+ mEp->type = usb_endpoint_type(desc);
+
+ mEp->ep.maxpacket = usb_endpoint_maxp(desc);
+
+ dbg_event(_usb_addr(mEp), "ENABLE", 0);
+
+ mEp->qh.ptr->cap = 0;
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+ mEp->qh.ptr->cap |= QH_IOS;
+ } else if (mEp->type == USB_ENDPOINT_XFER_ISOC) {
+ mEp->qh.ptr->cap &= ~QH_MULT;
+ mult = ((mEp->ep.maxpacket >> QH_MULT_SHIFT) + 1) & 0x03;
+ mEp->qh.ptr->cap |= (mult << ffs_nr(QH_MULT));
+ } else {
+ mEp->qh.ptr->cap |= QH_ZLT;
+ }
+
+ mEp->qh.ptr->cap |=
+ (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
+ mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
+
+ /* complete all the updates to ept->head before enabling endpoint*/
+ mb();
+
+ /*
+ * Enable endpoints in the HW other than ep0 as ep0
+ * is always enabled
+ */
+ if (mEp->num)
+ retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_disable: endpoint is no longer usable
+ *
+ * Check usb_ep_disable() at "usb_gadget.h" for details
+ */
+static int ep_disable(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int direction, retval = 0;
+ unsigned long flags;
+
+ trace("%p", ep);
+
+ if (ep == NULL)
+ return -EINVAL;
+ else if (mEp->desc == NULL)
+ return -EBUSY;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ /* only internal SW should disable ctrl endpts */
+
+ direction = mEp->dir;
+ do {
+ dbg_event(_usb_addr(mEp), "DISABLE", 0);
+
+ retval |= _ep_nuke(mEp);
+ retval |= hw_ep_disable(mEp->num, mEp->dir);
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+ } while (mEp->dir != direction);
+
+ if (mEp->last_zptr) {
+ dma_pool_free(mEp->td_pool, mEp->last_zptr,
+ mEp->last_zdma);
+ mEp->last_zptr = NULL;
+ }
+
+ mEp->desc = NULL;
+ mEp->ep.desc = NULL;
+ mEp->ep.maxpacket = USHRT_MAX;
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_alloc_request: allocate a request object to use with this endpoint
+ *
+ * Check usb_ep_alloc_request() at "usb_gadget.h" for details
+ */
+static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = NULL;
+
+ trace("%p, %i", ep, gfp_flags);
+
+ if (ep == NULL) {
+ err("EINVAL");
+ return NULL;
+ }
+
+ mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
+ if (mReq != NULL) {
+ INIT_LIST_HEAD(&mReq->queue);
+ mReq->req.dma = DMA_ADDR_INVALID;
+
+ mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
+ &mReq->dma);
+ if (mReq->ptr == NULL) {
+ kfree(mReq);
+ mReq = NULL;
+ }
+ }
+
+ dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
+
+ return (mReq == NULL) ? NULL : &mReq->req;
+}
+
+/**
+ * ep_free_request: frees a request object
+ *
+ * Check usb_ep_free_request() at "usb_gadget.h" for details
+ */
+static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ unsigned long flags;
+
+ trace("%p, %p", ep, req);
+
+ if (ep == NULL || req == NULL) {
+ err("EINVAL");
+ return;
+ } else if (!list_empty(&mReq->queue)) {
+ err("EBUSY");
+ return;
+ }
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ if (mReq->ptr)
+ dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
+ kfree(mReq);
+
+ dbg_event(_usb_addr(mEp), "FREE", 0);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * ep_queue: queues (submits) an I/O request to an endpoint
+ *
+ * Check usb_ep_queue()* at usb_gadget.h" for details
+ */
+static int ep_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t __maybe_unused gfp_flags)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ int retval = 0;
+ unsigned long flags;
+ struct ci13xxx *udc = _udc;
+
+ trace("%p, %p, %X", ep, req, gfp_flags);
+
+ spin_lock_irqsave(mEp->lock, flags);
+ if (ep == NULL || req == NULL || mEp->desc == NULL) {
+ retval = -EINVAL;
+ goto done;
+ }
+
+ if (!udc->softconnect) {
+ retval = -ENODEV;
+ goto done;
+ }
+
+ if (!udc->configured && mEp->type !=
+ USB_ENDPOINT_XFER_CONTROL) {
+ trace("usb is not configured"
+ "ept #%d, ept name#%s\n",
+ mEp->num, mEp->ep.name);
+ retval = -ESHUTDOWN;
+ goto done;
+ }
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+ if (req->length)
+ mEp = (_udc->ep0_dir == RX) ?
+ &_udc->ep0out : &_udc->ep0in;
+ if (!list_empty(&mEp->qh.queue)) {
+ _ep_nuke(mEp);
+ retval = -EOVERFLOW;
+ warn("endpoint ctrl %X nuked", _usb_addr(mEp));
+ }
+ }
+
+ /* first nuke then test link, e.g. previous status has not sent */
+ if (!list_empty(&mReq->queue)) {
+ retval = -EBUSY;
+ err("request already in queue");
+ goto done;
+ }
+ if (mEp->multi_req) {
+ retval = -EAGAIN;
+ err("Large request is in progress. come again");
+ goto done;
+ }
+
+ if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
+ if (!list_empty(&mEp->qh.queue)) {
+ retval = -EAGAIN;
+ err("Queue is busy. Large req is not allowed");
+ goto done;
+ }
+ if ((mEp->type != USB_ENDPOINT_XFER_BULK) ||
+ (mEp->dir != RX)) {
+ retval = -EINVAL;
+ err("Larger req is supported only for Bulk OUT");
+ goto done;
+ }
+ mEp->multi_req = true;
+ mReq->multi.len = req->length;
+ mReq->multi.buf = req->buf;
+ req->length = (4 * CI13XXX_PAGE_SIZE);
+ }
+
+ dbg_queue(_usb_addr(mEp), req, retval);
+
+ /* push request */
+ mReq->req.status = -EINPROGRESS;
+ mReq->req.actual = 0;
+
+ retval = _hardware_enqueue(mEp, mReq);
+
+ if (retval == -EALREADY) {
+ dbg_event(_usb_addr(mEp), "QUEUE", retval);
+ retval = 0;
+ }
+ if (!retval)
+ list_add_tail(&mReq->queue, &mEp->qh.queue);
+ else if (mEp->multi_req)
+ mEp->multi_req = false;
+
+ done:
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
+ *
+ * Check usb_ep_dequeue() at "usb_gadget.h" for details
+ */
+static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_ep *mEpTemp = mEp;
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ unsigned long flags;
+
+ trace("%p, %p", ep, req);
+
+ spin_lock_irqsave(mEp->lock, flags);
+ /*
+ * Only ep0 IN is exposed to composite. When a req is dequeued
+ * on ep0, check both ep0 IN and ep0 OUT queues.
+ */
+ if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
+ mEp->desc == NULL || list_empty(&mReq->queue) ||
+ (list_empty(&mEp->qh.queue) && ((mEp->type !=
+ USB_ENDPOINT_XFER_CONTROL) ||
+ list_empty(&_udc->ep0out.qh.queue)))) {
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return -EINVAL;
+ }
+
+ dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
+
+ if ((mEp->type == USB_ENDPOINT_XFER_CONTROL)) {
+ hw_ep_flush(_udc->ep0out.num, RX);
+ hw_ep_flush(_udc->ep0in.num, TX);
+ } else {
+ hw_ep_flush(mEp->num, mEp->dir);
+ }
+
+ /* pop request */
+ list_del_init(&mReq->queue);
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
+ mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mReq->req.dma = DMA_ADDR_INVALID;
+ mReq->map = 0;
+ }
+ req->status = -ECONNRESET;
+ if (mEp->multi_req) {
+ restore_original_req(mReq);
+ mEp->multi_req = false;
+ }
+
+ if (mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
+ mReq->req.length)
+ mEpTemp = &_udc->ep0in;
+ mReq->req.complete(&mEpTemp->ep, &mReq->req);
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mReq->req.complete = NULL;
+ spin_lock(mEp->lock);
+ }
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return 0;
+}
+
+static int is_sps_req(struct ci13xxx_req *mReq)
+{
+ return (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID &&
+ mReq->req.udc_priv & MSM_SPS_MODE);
+}
+
+/**
+ * ep_set_halt: sets the endpoint halt feature
+ *
+ * Check usb_ep_set_halt() at "usb_gadget.h" for details
+ */
+static int ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int direction, retval = 0;
+ unsigned long flags;
+
+ trace("%p, %i", ep, value);
+
+ if (ep == NULL || mEp->desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+#ifndef STALL_IN
+ /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
+ if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
+ !list_empty(&mEp->qh.queue) &&
+ !is_sps_req(list_entry(mEp->qh.queue.next, struct ci13xxx_req,
+ queue))){
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return -EAGAIN;
+ }
+#endif
+
+ direction = mEp->dir;
+ do {
+ dbg_event(_usb_addr(mEp), "HALT", value);
+ retval |= hw_ep_set_halt(mEp->num, mEp->dir, value);
+
+ if (!value)
+ mEp->wedge = 0;
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+ } while (mEp->dir != direction);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_set_wedge: sets the halt feature and ignores clear requests
+ *
+ * Check usb_ep_set_wedge() at "usb_gadget.h" for details
+ */
+static int ep_set_wedge(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ unsigned long flags;
+
+ trace("%p", ep);
+
+ if (ep == NULL || mEp->desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ dbg_event(_usb_addr(mEp), "WEDGE", 0);
+ mEp->wedge = 1;
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+
+ return usb_ep_set_halt(ep);
+}
+
+/**
+ * ep_fifo_flush: flushes contents of a fifo
+ *
+ * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
+ */
+static void ep_fifo_flush(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ unsigned long flags;
+
+ trace("%p", ep);
+
+ if (ep == NULL) {
+ err("%02X: -EINVAL", _usb_addr(mEp));
+ return;
+ }
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ dbg_event(_usb_addr(mEp), "FFLUSH", 0);
+ /*
+ * _ep_nuke() takes care of flushing the endpoint.
+ * some function drivers expect udc to retire all
+ * pending requests upon flushing an endpoint. There
+ * is no harm in doing it.
+ */
+ _ep_nuke(mEp);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * Endpoint-specific part of the API to the USB controller hardware
+ * Check "usb_gadget.h" for details
+ */
+static const struct usb_ep_ops usb_ep_ops = {
+ .enable = ep_enable,
+ .disable = ep_disable,
+ .alloc_request = ep_alloc_request,
+ .free_request = ep_free_request,
+ .queue = ep_queue,
+ .dequeue = ep_dequeue,
+ .set_halt = ep_set_halt,
+ .set_wedge = ep_set_wedge,
+ .fifo_flush = ep_fifo_flush,
+};
+
+/******************************************************************************
+ * GADGET block
+ *****************************************************************************/
+static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+ int gadget_ready = 0;
+
+ if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(udc->lock, flags);
+ udc->vbus_active = is_active;
+ if (udc->driver)
+ gadget_ready = 1;
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (gadget_ready) {
+ if (is_active) {
+ pm_runtime_get_sync(&_gadget->dev);
+ hw_device_reset(udc);
+ if (udc->softconnect)
+ hw_device_state(udc->ep0out.qh.dma);
+ } else {
+ hw_device_state(0);
+ _gadget_stop_activity(&udc->gadget);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_DISCONNECT_EVENT);
+ pm_runtime_put_sync(&_gadget->dev);
+ }
+ }
+
+ return 0;
+}
+
+static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+
+ if (udc->transceiver)
+ return usb_phy_set_power(udc->transceiver, mA);
+ return -ENOTSUPP;
+}
+
+static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(udc->lock, flags);
+ udc->softconnect = is_active;
+ if (((udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) &&
+ !udc->vbus_active) || !udc->driver) {
+ spin_unlock_irqrestore(udc->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (is_active)
+ hw_device_state(udc->ep0out.qh.dma);
+ else
+ hw_device_state(0);
+
+ return 0;
+}
+
+static int ci13xxx_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int ci13xxx_stop(struct usb_gadget_driver *driver);
+
+/**
+ * Device operations part of the API to the USB controller hardware,
+ * which don't involve endpoints (or i/o)
+ * Check "usb_gadget.h" for details
+ */
+static const struct usb_gadget_ops usb_gadget_ops = {
+ .vbus_session = ci13xxx_vbus_session,
+ .wakeup = ci13xxx_wakeup,
+ .vbus_draw = ci13xxx_vbus_draw,
+ .pullup = ci13xxx_pullup,
+ .start = ci13xxx_start,
+ .stop = ci13xxx_stop,
+};
+
+/**
+ * ci13xxx_start: register a gadget driver
+ * @driver: the driver being registered
+ * @bind: the driver's bind callback
+ *
+ * Check ci13xxx_start() at <linux/usb/gadget.h> for details.
+ * Interrupts are enabled here.
+ */
+static int ci13xxx_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *))
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long flags;
+ int i, j;
+ int retval = -ENOMEM;
+ bool put = false;
+
+ trace("%p", driver);
+
+ if (driver == NULL ||
+ bind == NULL ||
+ driver->setup == NULL ||
+ driver->disconnect == NULL)
+ return -EINVAL;
+ else if (udc == NULL)
+ return -ENODEV;
+ else if (udc->driver != NULL)
+ return -EBUSY;
+
+ /* alloc resources */
+ udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev,
+ sizeof(struct ci13xxx_qh),
+ 64, CI13XXX_PAGE_SIZE);
+ if (udc->qh_pool == NULL)
+ return -ENOMEM;
+
+ udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev,
+ sizeof(struct ci13xxx_td),
+ 64, CI13XXX_PAGE_SIZE);
+ if (udc->td_pool == NULL) {
+ dma_pool_destroy(udc->qh_pool);
+ udc->qh_pool = NULL;
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+
+ info("hw_ep_max = %d", hw_ep_max);
+
+ udc->gadget.dev.driver = NULL;
+
+ retval = 0;
+ for (i = 0; i < hw_ep_max/2; i++) {
+ for (j = RX; j <= TX; j++) {
+ int k = i + j * hw_ep_max/2;
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
+
+ scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
+ (j == TX) ? "in" : "out");
+
+ mEp->lock = udc->lock;
+ mEp->device = &udc->gadget.dev;
+ mEp->td_pool = udc->td_pool;
+
+ mEp->ep.name = mEp->name;
+ mEp->ep.ops = &usb_ep_ops;
+ mEp->ep.maxpacket =
+ k ? USHRT_MAX : CTRL_PAYLOAD_MAX;
+
+ INIT_LIST_HEAD(&mEp->qh.queue);
+ spin_unlock_irqrestore(udc->lock, flags);
+ mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
+ &mEp->qh.dma);
+ spin_lock_irqsave(udc->lock, flags);
+ if (mEp->qh.ptr == NULL)
+ retval = -ENOMEM;
+ else
+ memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
+
+ /* skip ep0 out and in endpoints */
+ if (i == 0)
+ continue;
+
+ list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
+ }
+ }
+ if (retval)
+ goto done;
+ spin_unlock_irqrestore(udc->lock, flags);
+ udc->ep0out.ep.desc = &ctrl_endpt_out_desc;
+ retval = usb_ep_enable(&udc->ep0out.ep);
+ if (retval)
+ return retval;
+
+ udc->ep0in.ep.desc = &ctrl_endpt_in_desc;
+ retval = usb_ep_enable(&udc->ep0in.ep);
+ if (retval)
+ return retval;
+ udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_KERNEL);
+ if (!udc->status)
+ return -ENOMEM;
+ udc->status_buf = kzalloc(2, GFP_KERNEL); /* for GET_STATUS */
+ if (!udc->status_buf) {
+ usb_ep_free_request(&udc->ep0in.ep, udc->status);
+ return -ENOMEM;
+ }
+ spin_lock_irqsave(udc->lock, flags);
+
+ udc->gadget.ep0 = &udc->ep0in.ep;
+ /* bind gadget */
+ driver->driver.bus = NULL;
+ udc->gadget.dev.driver = &driver->driver;
+ udc->softconnect = 1;
+
+ spin_unlock_irqrestore(udc->lock, flags);
+ pm_runtime_get_sync(&udc->gadget.dev);
+ retval = bind(&udc->gadget); /* MAY SLEEP */
+ spin_lock_irqsave(udc->lock, flags);
+
+ if (retval) {
+ udc->gadget.dev.driver = NULL;
+ goto done;
+ }
+
+ udc->driver = driver;
+ if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
+ if (udc->vbus_active) {
+ if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
+ hw_device_reset(udc);
+ } else {
+ put = true;
+ goto done;
+ }
+ }
+
+ if (!udc->softconnect) {
+ put = true;
+ goto done;
+ }
+
+ retval = hw_device_state(udc->ep0out.qh.dma);
+
+ done:
+ spin_unlock_irqrestore(udc->lock, flags);
+ if (retval || put)
+ pm_runtime_put_sync(&udc->gadget.dev);
+
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_UDC_STARTED_EVENT);
+
+ return retval;
+}
+
+/**
+ * ci13xxx_stop: unregister a gadget driver
+ *
+ * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details
+ */
+static int ci13xxx_stop(struct usb_gadget_driver *driver)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long i, flags;
+
+ trace("%p", driver);
+
+ if (driver == NULL ||
+ driver->unbind == NULL ||
+ driver->setup == NULL ||
+ driver->disconnect == NULL ||
+ driver != udc->driver)
+ return -EINVAL;
+
+ spin_lock_irqsave(udc->lock, flags);
+
+ if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
+ udc->vbus_active) {
+ hw_device_state(0);
+ spin_unlock_irqrestore(udc->lock, flags);
+ _gadget_stop_activity(&udc->gadget);
+ spin_lock_irqsave(udc->lock, flags);
+ pm_runtime_put(&udc->gadget.dev);
+ }
+
+ /* unbind gadget */
+ spin_unlock_irqrestore(udc->lock, flags);
+ driver->unbind(&udc->gadget); /* MAY SLEEP */
+ spin_lock_irqsave(udc->lock, flags);
+
+ usb_ep_free_request(&udc->ep0in.ep, udc->status);
+ kfree(udc->status_buf);
+
+ udc->gadget.dev.driver = NULL;
+
+ /* free resources */
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+
+ if (!list_empty(&mEp->ep.ep_list))
+ list_del_init(&mEp->ep.ep_list);
+
+ if (mEp->qh.ptr != NULL)
+ dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
+ }
+
+ udc->gadget.ep0 = NULL;
+ udc->driver = NULL;
+
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (udc->td_pool != NULL) {
+ dma_pool_destroy(udc->td_pool);
+ udc->td_pool = NULL;
+ }
+ if (udc->qh_pool != NULL) {
+ dma_pool_destroy(udc->qh_pool);
+ udc->qh_pool = NULL;
+ }
+
+ return 0;
+}
+
+/******************************************************************************
+ * BUS block
+ *****************************************************************************/
+/**
+ * udc_irq: global interrupt handler
+ *
+ * This function returns IRQ_HANDLED if the IRQ has been handled
+ * It locks access to registers
+ */
+static irqreturn_t udc_irq(void)
+{
+ struct ci13xxx *udc = _udc;
+ irqreturn_t retval;
+ u32 intr;
+
+ trace();
+
+ if (udc == NULL) {
+ err("ENODEV");
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(udc->lock);
+
+ if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
+ if (hw_cread(CAP_USBMODE, USBMODE_CM) !=
+ USBMODE_CM_DEVICE) {
+ spin_unlock(udc->lock);
+ return IRQ_NONE;
+ }
+ }
+ intr = hw_test_and_clear_intr_active();
+ if (intr) {
+ isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
+ isr_statistics.hndl.idx &= ISR_MASK;
+ isr_statistics.hndl.cnt++;
+
+ /* order defines priority - do NOT change it */
+ if (USBi_URI & intr) {
+ isr_statistics.uri++;
+ isr_reset_handler(udc);
+ }
+ if (USBi_PCI & intr) {
+ isr_statistics.pci++;
+ isr_resume_handler(udc);
+ }
+ if (USBi_UEI & intr)
+ isr_statistics.uei++;
+ if (USBi_UI & intr) {
+ isr_statistics.ui++;
+ isr_tr_complete_handler(udc);
+ }
+ if (USBi_SLI & intr) {
+ isr_suspend_handler(udc);
+ isr_statistics.sli++;
+ }
+ retval = IRQ_HANDLED;
+ } else {
+ isr_statistics.none++;
+ retval = IRQ_NONE;
+ }
+ spin_unlock(udc->lock);
+
+ return retval;
+}
+
+/**
+ * udc_release: driver release function
+ * @dev: device
+ *
+ * Currently does nothing
+ */
+static void udc_release(struct device *dev)
+{
+ trace("%p", dev);
+
+ if (dev == NULL)
+ err("EINVAL");
+}
+
+/**
+ * udc_probe: parent probe must call this to initialize UDC
+ * @dev: parent device
+ * @regs: registers base address
+ * @name: driver name
+ *
+ * This function returns an error code
+ * No interrupts active, the IRQ has not been requested yet
+ * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
+ */
+static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
+ void __iomem *regs)
+{
+ struct ci13xxx *udc;
+ struct ci13xxx_platform_data *pdata;
+ int retval = 0, i;
+
+ trace("%p, %p, %p", dev, regs, driver->name);
+
+ if (dev == NULL || regs == NULL || driver == NULL ||
+ driver->name == NULL)
+ return -EINVAL;
+
+ udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
+ if (udc == NULL)
+ return -ENOMEM;
+
+ udc->lock = &udc_lock;
+ udc->regs = regs;
+ udc->udc_driver = driver;
+
+ udc->gadget.ops = &usb_gadget_ops;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ if (udc->udc_driver->flags & CI13XXX_IS_OTG)
+ udc->gadget.is_otg = 1;
+ else
+ udc->gadget.is_otg = 0;
+ udc->gadget.name = driver->name;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ udc->gadget.ep0 = NULL;
+
+ pdata = dev->platform_data;
+ if (pdata)
+ udc->gadget.usb_core_id = pdata->usb_core_id;
+
+ dev_set_name(&udc->gadget.dev, "gadget");
+ udc->gadget.dev.dma_mask = dev->dma_mask;
+ udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
+ udc->gadget.dev.parent = dev;
+ udc->gadget.dev.release = udc_release;
+
+ if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
+ udc->transceiver = usb_get_transceiver();
+ if (udc->transceiver == NULL) {
+ retval = -ENODEV;
+ goto free_udc;
+ }
+ }
+
+ INIT_DELAYED_WORK(&udc->rw_work, usb_do_remote_wakeup);
+
+ retval = hw_device_init(regs);
+ if (retval < 0)
+ goto put_transceiver;
+
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+ INIT_LIST_HEAD(&mEp->ep.ep_list);
+ setup_timer(&mEp->prime_timer, ep_prime_timer_func,
+ (unsigned long) mEp);
+ }
+
+ if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
+ retval = hw_device_reset(udc);
+ if (retval)
+ goto put_transceiver;
+ }
+
+ retval = device_register(&udc->gadget.dev);
+ if (retval) {
+ put_device(&udc->gadget.dev);
+ goto put_transceiver;
+ }
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ retval = dbg_create_files(&udc->gadget.dev);
+#endif
+ if (retval)
+ goto unreg_device;
+
+ if (udc->transceiver) {
+ retval = otg_set_peripheral(udc->transceiver->otg,
+ &udc->gadget);
+ if (retval)
+ goto remove_dbg;
+ }
+
+ retval = usb_add_gadget_udc(dev, &udc->gadget);
+ if (retval)
+ goto remove_trans;
+
+ pm_runtime_no_callbacks(&udc->gadget.dev);
+ pm_runtime_enable(&udc->gadget.dev);
+
+ if (register_trace_usb_daytona_invalid_access(dump_usb_info, NULL))
+ pr_err("Registering trace failed\n");
+
+ _udc = udc;
+ return retval;
+
+remove_trans:
+ if (udc->transceiver) {
+ otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+ usb_put_transceiver(udc->transceiver);
+ }
+
+ err("error = %i", retval);
+remove_dbg:
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ dbg_remove_files(&udc->gadget.dev);
+#endif
+unreg_device:
+ device_unregister(&udc->gadget.dev);
+put_transceiver:
+ if (udc->transceiver)
+ usb_put_transceiver(udc->transceiver);
+free_udc:
+ kfree(udc);
+ _udc = NULL;
+ return retval;
+}
+
+/**
+ * udc_remove: parent remove must call this to remove UDC
+ *
+ * No interrupts active, the IRQ has been released
+ */
+static void udc_remove(void)
+{
+ struct ci13xxx *udc = _udc;
+ int retval;
+
+ if (udc == NULL) {
+ err("EINVAL");
+ return;
+ }
+ retval = unregister_trace_usb_daytona_invalid_access(dump_usb_info,
+ NULL);
+ if (retval)
+ pr_err("Unregistering trace failed\n");
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ if (udc->transceiver) {
+ otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+ usb_put_transceiver(udc->transceiver);
+ }
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ dbg_remove_files(&udc->gadget.dev);
+#endif
+ device_unregister(&udc->gadget.dev);
+
+ kfree(udc);
+ _udc = NULL;
+}
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
new file mode 100644
index 0000000..f90ea86
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -0,0 +1,274 @@
+/*
+ * ci13xxx_udc.h - structures, registers, and macros MIPS USB IP core
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description: MIPS USB IP core family device controller
+ * Structures, registers and logging macros
+ */
+
+#ifndef _CI13XXX_h_
+#define _CI13XXX_h_
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+#define CI13XXX_PAGE_SIZE 4096ul /* page size for TD's */
+#define ENDPT_MAX (32)
+#define CTRL_PAYLOAD_MAX (64)
+#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */
+#define TX (1) /* similar to USB_DIR_IN but can be used as an index */
+
+/* UDC private data:
+ * 16MSb - Vendor ID | 16 LSb Vendor private data
+ */
+#define CI13XX_REQ_VENDOR_ID(id) (id & 0xFFFF0000UL)
+
+#define MSM_ETD_TYPE BIT(1)
+#define MSM_EP_PIPE_ID_RESET_VAL 0x1F001F
+
+/******************************************************************************
+ * STRUCTURES
+ *****************************************************************************/
+/* DMA layout of transfer descriptors */
+struct ci13xxx_td {
+ /* 0 */
+ u32 next;
+#define TD_TERMINATE BIT(0)
+#define TD_ADDR_MASK (0xFFFFFFEUL << 5)
+ /* 1 */
+ u32 token;
+#define TD_STATUS (0x00FFUL << 0)
+#define TD_STATUS_TR_ERR BIT(3)
+#define TD_STATUS_DT_ERR BIT(5)
+#define TD_STATUS_HALTED BIT(6)
+#define TD_STATUS_ACTIVE BIT(7)
+#define TD_MULTO (0x0003UL << 10)
+#define TD_IOC BIT(15)
+#define TD_TOTAL_BYTES (0x7FFFUL << 16)
+ /* 2 */
+ u32 page[5];
+#define TD_CURR_OFFSET (0x0FFFUL << 0)
+#define TD_FRAME_NUM (0x07FFUL << 0)
+#define TD_RESERVED_MASK (0x0FFFUL << 0)
+} __attribute__ ((packed, aligned(4)));
+
+/* DMA layout of queue heads */
+struct ci13xxx_qh {
+ /* 0 */
+ u32 cap;
+#define QH_IOS BIT(15)
+#define QH_MAX_PKT (0x07FFUL << 16)
+#define QH_ZLT BIT(29)
+#define QH_MULT (0x0003UL << 30)
+#define QH_MULT_SHIFT 11
+ /* 1 */
+ u32 curr;
+ /* 2 - 8 */
+ struct ci13xxx_td td;
+ /* 9 */
+ u32 RESERVED;
+ struct usb_ctrlrequest setup;
+} __attribute__ ((packed, aligned(4)));
+
+/* cache of larger request's original attributes */
+struct ci13xxx_multi_req {
+ unsigned len;
+ unsigned actual;
+ void *buf;
+};
+
+/* Extension of usb_request */
+struct ci13xxx_req {
+ struct usb_request req;
+ unsigned map;
+ struct list_head queue;
+ struct ci13xxx_td *ptr;
+ dma_addr_t dma;
+ struct ci13xxx_td *zptr;
+ dma_addr_t zdma;
+ struct ci13xxx_multi_req multi;
+};
+
+/* Extension of usb_ep */
+struct ci13xxx_ep {
+ struct usb_ep ep;
+ const struct usb_endpoint_descriptor *desc;
+ u8 dir;
+ u8 num;
+ u8 type;
+ char name[16];
+ struct {
+ struct list_head queue;
+ struct ci13xxx_qh *ptr;
+ dma_addr_t dma;
+ } qh;
+ int wedge;
+
+ /* global resources */
+ spinlock_t *lock;
+ struct device *device;
+ struct dma_pool *td_pool;
+ struct ci13xxx_td *last_zptr;
+ dma_addr_t last_zdma;
+ unsigned long dTD_update_fail_count;
+ unsigned long prime_fail_count;
+ int prime_timer_count;
+ struct timer_list prime_timer;
+
+ bool multi_req;
+};
+
+struct ci13xxx;
+struct ci13xxx_udc_driver {
+ const char *name;
+ unsigned long flags;
+ unsigned int nz_itc;
+#define CI13XXX_REGS_SHARED BIT(0)
+#define CI13XXX_REQUIRE_TRANSCEIVER BIT(1)
+#define CI13XXX_PULLUP_ON_VBUS BIT(2)
+#define CI13XXX_DISABLE_STREAMING BIT(3)
+#define CI13XXX_ZERO_ITC BIT(4)
+#define CI13XXX_IS_OTG BIT(5)
+
+#define CI13XXX_CONTROLLER_RESET_EVENT 0
+#define CI13XXX_CONTROLLER_CONNECT_EVENT 1
+#define CI13XXX_CONTROLLER_SUSPEND_EVENT 2
+#define CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT 3
+#define CI13XXX_CONTROLLER_RESUME_EVENT 4
+#define CI13XXX_CONTROLLER_DISCONNECT_EVENT 5
+#define CI13XXX_CONTROLLER_UDC_STARTED_EVENT 6
+
+ void (*notify_event) (struct ci13xxx *udc, unsigned event);
+};
+
+/* CI13XXX UDC descriptor & global resources */
+struct ci13xxx {
+ spinlock_t *lock; /* ctrl register bank access */
+ void __iomem *regs; /* registers address space */
+
+ struct dma_pool *qh_pool; /* DMA pool for queue heads */
+ struct dma_pool *td_pool; /* DMA pool for transfer descs */
+ struct usb_request *status; /* ep0 status request */
+ void *status_buf;/* GET_STATUS buffer */
+
+ struct usb_gadget gadget; /* USB slave device */
+ struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
+ u32 ep0_dir; /* ep0 direction */
+#define ep0out ci13xxx_ep[0]
+#define ep0in ci13xxx_ep[hw_ep_max / 2]
+ u8 remote_wakeup; /* Is remote wakeup feature
+ enabled by the host? */
+ u8 suspended; /* suspended by the host */
+ u8 configured; /* is device configured */
+ u8 test_mode; /* the selected test mode */
+
+ struct delayed_work rw_work; /* remote wakeup delayed work */
+ struct usb_gadget_driver *driver; /* 3rd party gadget driver */
+ struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
+ int vbus_active; /* is VBUS active */
+ int softconnect; /* is pull-up enable allowed */
+ unsigned long dTD_update_fail_count;
+ struct usb_phy *transceiver; /* Transceiver struct */
+ bool skip_flush; /* skip flushing remaining EP
+ upon flush timeout for the
+ first EP. */
+};
+
+/******************************************************************************
+ * REGISTERS
+ *****************************************************************************/
+/* register size */
+#define REG_BITS (32)
+
+/* HCCPARAMS */
+#define HCCPARAMS_LEN BIT(17)
+
+/* DCCPARAMS */
+#define DCCPARAMS_DEN (0x1F << 0)
+#define DCCPARAMS_DC BIT(7)
+
+/* TESTMODE */
+#define TESTMODE_FORCE BIT(0)
+
+/* USBCMD */
+#define USBCMD_RS BIT(0)
+#define USBCMD_RST BIT(1)
+#define USBCMD_SUTW BIT(13)
+#define USBCMD_ATDTW BIT(14)
+
+/* USBSTS & USBINTR */
+#define USBi_UI BIT(0)
+#define USBi_UEI BIT(1)
+#define USBi_PCI BIT(2)
+#define USBi_URI BIT(6)
+#define USBi_SLI BIT(8)
+
+/* DEVICEADDR */
+#define DEVICEADDR_USBADRA BIT(24)
+#define DEVICEADDR_USBADR (0x7FUL << 25)
+
+/* PORTSC */
+#define PORTSC_FPR BIT(6)
+#define PORTSC_SUSP BIT(7)
+#define PORTSC_HSP BIT(9)
+#define PORTSC_PTC (0x0FUL << 16)
+
+/* DEVLC */
+#define DEVLC_PSPD (0x03UL << 25)
+#define DEVLC_PSPD_HS (0x02UL << 25)
+
+/* USBMODE */
+#define USBMODE_CM (0x03UL << 0)
+#define USBMODE_CM_IDLE (0x00UL << 0)
+#define USBMODE_CM_DEVICE (0x02UL << 0)
+#define USBMODE_CM_HOST (0x03UL << 0)
+#define USBMODE_SLOM BIT(3)
+#define USBMODE_SDIS BIT(4)
+#define USBCMD_ITC(n) (n << 16) /* n = 0, 1, 2, 4, 8, 16, 32, 64 */
+#define USBCMD_ITC_MASK (0xFF << 16)
+
+/* ENDPTCTRL */
+#define ENDPTCTRL_RXS BIT(0)
+#define ENDPTCTRL_RXT (0x03UL << 2)
+#define ENDPTCTRL_RXR BIT(6) /* reserved for port 0 */
+#define ENDPTCTRL_RXE BIT(7)
+#define ENDPTCTRL_TXS BIT(16)
+#define ENDPTCTRL_TXT (0x03UL << 18)
+#define ENDPTCTRL_TXR BIT(22) /* reserved for port 0 */
+#define ENDPTCTRL_TXE BIT(23)
+
+/******************************************************************************
+ * LOGGING
+ *****************************************************************************/
+#define ci13xxx_printk(level, format, args...) \
+do { \
+ if (_udc == NULL) \
+ printk(level "[%s] " format "\n", __func__, ## args); \
+ else \
+ dev_printk(level, _udc->gadget.dev.parent, \
+ "[%s] " format "\n", __func__, ## args); \
+} while (0)
+
+#ifndef err
+#define err(format, args...) ci13xxx_printk(KERN_ERR, format, ## args)
+#endif
+
+#define warn(format, args...) ci13xxx_printk(KERN_WARNING, format, ## args)
+#define info(format, args...) ci13xxx_printk(KERN_INFO, format, ## args)
+
+#ifdef TRACE
+#define trace(format, args...) ci13xxx_printk(KERN_DEBUG, format, ## args)
+#define dbg_trace(format, args...) dev_dbg(dev, format, ##args)
+#else
+#define trace(format, args...) do {} while (0)
+#define dbg_trace(format, args...) do {} while (0)
+#endif
+
+#endif /* _CI13XXX_h_ */
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index 9f7a29a..05a66b2 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -158,6 +158,13 @@
.bInterval = 4, /* poll 1 per millisecond */
};
+static struct usb_ss_ep_comp_descriptor ss_as_in_comp_desc = {
+ .bLength = sizeof(ss_as_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ .wBytesPerInterval = cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+};
+
/* Standard ISO IN Endpoint Descriptor for highspeed */
static struct usb_endpoint_descriptor fs_as_in_ep_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
@@ -198,6 +205,26 @@
NULL,
};
+static struct usb_descriptor_header *ss_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&hs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&ss_as_in_comp_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
static struct usb_descriptor_header *fs_audio_desc[] = {
(struct usb_descriptor_header *)&ac_interface_desc,
(struct usb_descriptor_header *)&ac_header_desc,
@@ -673,6 +700,7 @@
f->fs_descriptors = fs_audio_desc;
f->hs_descriptors = hs_audio_desc;
+ f->ss_descriptors = ss_audio_desc;
for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index dc368c7..4bdfadf 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -300,15 +300,17 @@
gsi_channel_info.gevntcount_hi_addr;
in_params->dir = GSI_CHAN_DIR_FROM_GSI;
in_params->xfer_ring_len = gsi_channel_info.xfer_ring_len;
- in_params->xfer_ring_base_addr = gsi_channel_info.xfer_ring_base_addr;
in_params->xfer_scratch.last_trb_addr_iova =
gsi_channel_info.last_trb_addr;
- in_params->xfer_ring_base_addr = in_params->xfer_ring_base_addr_iova =
+ in_params->xfer_ring_base_addr_iova =
gsi_channel_info.xfer_ring_base_addr;
in_params->data_buff_base_len = d_port->in_request.buf_len *
d_port->in_request.num_bufs;
- in_params->data_buff_base_addr = in_params->data_buff_base_addr_iova =
- d_port->in_request.dma;
+ in_params->data_buff_base_addr_iova = d_port->in_request.dma;
+ in_params->sgt_xfer_rings = &d_port->in_request.sgt_trb_xfer_ring;
+ in_params->sgt_data_buff = &d_port->in_request.sgt_data_buff;
+ log_event_dbg("%s(): IN: sgt_xfer_rings:%pK sgt_data_buff:%pK\n",
+ __func__, in_params->sgt_xfer_rings, in_params->sgt_data_buff);
in_params->xfer_scratch.const_buffer_size =
gsi_channel_info.const_buffer_size;
in_params->xfer_scratch.depcmd_low_addr =
@@ -340,14 +342,19 @@
out_params->dir = GSI_CHAN_DIR_TO_GSI;
out_params->xfer_ring_len =
gsi_channel_info.xfer_ring_len;
- out_params->xfer_ring_base_addr =
- out_params->xfer_ring_base_addr_iova =
+ out_params->xfer_ring_base_addr_iova =
gsi_channel_info.xfer_ring_base_addr;
out_params->data_buff_base_len = d_port->out_request.buf_len *
d_port->out_request.num_bufs;
- out_params->data_buff_base_addr =
- out_params->data_buff_base_addr_iova =
+ out_params->data_buff_base_addr_iova =
d_port->out_request.dma;
+ out_params->sgt_xfer_rings =
+ &d_port->out_request.sgt_trb_xfer_ring;
+ out_params->sgt_data_buff = &d_port->out_request.sgt_data_buff;
+ log_event_dbg("%s(): OUT: sgt_xfer_rings:%pK sgt_data_buff:%pK\n",
+ __func__, out_params->sgt_xfer_rings,
+ out_params->sgt_data_buff);
+
out_params->xfer_scratch.last_trb_addr_iova =
gsi_channel_info.last_trb_addr;
out_params->xfer_scratch.const_buffer_size =
@@ -501,10 +508,12 @@
gsi->d_port.in_channel_handle = -EINVAL;
gsi->d_port.out_channel_handle = -EINVAL;
- usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_FREE_TRBS);
+ usb_gsi_ep_op(gsi->d_port.in_ep, &gsi->d_port.in_request,
+ GSI_EP_OP_FREE_TRBS);
if (gsi->d_port.out_ep)
- usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_FREE_TRBS);
+ usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
+ GSI_EP_OP_FREE_TRBS);
/* free buffers allocated with each TRB */
gsi_free_trb_buffer(gsi);
@@ -1949,6 +1958,11 @@
ret = -ENOMEM;
goto fail1;
}
+
+ dma_get_sgtable(dev->parent,
+ &gsi->d_port.in_request.sgt_data_buff,
+ gsi->d_port.in_request.buf_base_addr,
+ gsi->d_port.in_request.dma, len_in);
}
if (gsi->d_port.out_ep && !gsi->d_port.out_request.buf_base_addr) {
@@ -1968,6 +1982,11 @@
ret = -ENOMEM;
goto fail;
}
+
+ dma_get_sgtable(dev->parent,
+ &gsi->d_port.out_request.sgt_data_buff,
+ gsi->d_port.out_request.buf_base_addr,
+ gsi->d_port.out_request.dma, len_out);
}
log_event_dbg("finished allocating trb's buffer\n");
@@ -1998,6 +2017,7 @@
gsi->d_port.out_request.buf_base_addr,
gsi->d_port.out_request.dma);
gsi->d_port.out_request.buf_base_addr = NULL;
+ sg_free_table(&gsi->d_port.out_request.sgt_data_buff);
}
if (gsi->d_port.in_ep &&
@@ -2008,6 +2028,7 @@
gsi->d_port.in_request.buf_base_addr,
gsi->d_port.in_request.dma);
gsi->d_port.in_request.buf_base_addr = NULL;
+ sg_free_table(&gsi->d_port.in_request.sgt_data_buff);
}
}
@@ -3069,7 +3090,7 @@
ipa_chnl_params->xfer_ring_len);
len += scnprintf(buf + len, PAGE_SIZE - len,
"%25s %10x\n", "IN TRB Base Addr: ", (unsigned int)
- ipa_chnl_params->xfer_ring_base_addr);
+ ipa_chnl_params->xfer_ring_base_addr_iova);
len += scnprintf(buf + len, PAGE_SIZE - len,
"%25s %10x\n", "GEVENTCNTLO IN Addr: ",
ipa_chnl_params->gevntcount_low_addr);
@@ -3103,7 +3124,7 @@
ipa_chnl_params->xfer_ring_len);
len += scnprintf(buf + len, PAGE_SIZE - len,
"%25s %10x\n", "OUT TRB Base Addr: ", (unsigned int)
- ipa_chnl_params->xfer_ring_base_addr);
+ ipa_chnl_params->xfer_ring_base_addr_iova);
len += scnprintf(buf + len, PAGE_SIZE - len,
"%25s %10x\n", "GEVENTCNTLO OUT Addr: ",
ipa_chnl_params->gevntcount_low_addr);
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index a0fecb2..6ae2693 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -183,15 +183,28 @@
}
/*----------------------------------------------------------------------*/
-static void qdss_ctrl_write_complete(struct usb_ep *ep,
+static void qdss_write_complete(struct usb_ep *ep,
struct usb_request *req)
{
struct f_qdss *qdss = ep->driver_data;
struct qdss_request *d_req = req->context;
+ struct usb_ep *in;
+ struct list_head *list_pool;
+ enum qdss_state state;
unsigned long flags;
pr_debug("qdss_ctrl_write_complete\n");
+ if (qdss->debug_inface_enabled) {
+ in = qdss->port.ctrl_in;
+ list_pool = &qdss->ctrl_write_pool;
+ state = USB_QDSS_CTRL_WRITE_DONE;
+ } else {
+ in = qdss->port.data;
+ list_pool = &qdss->data_write_pool;
+ state = USB_QDSS_DATA_WRITE_DONE;
+ }
+
if (!req->status) {
/* send zlp */
if ((req->length >= ep->maxpacket) &&
@@ -199,13 +212,13 @@
req->length = 0;
d_req->actual = req->actual;
d_req->status = req->status;
- if (!usb_ep_queue(qdss->port.ctrl_in, req, GFP_ATOMIC))
+ if (!usb_ep_queue(in, req, GFP_ATOMIC))
return;
}
}
spin_lock_irqsave(&qdss->lock, flags);
- list_add_tail(&req->list, &qdss->ctrl_write_pool);
+ list_add_tail(&req->list, list_pool);
if (req->length != 0) {
d_req->actual = req->actual;
d_req->status = req->status;
@@ -213,8 +226,7 @@
spin_unlock_irqrestore(&qdss->lock, flags);
if (qdss->ch.notify)
- qdss->ch.notify(qdss->ch.priv, USB_QDSS_CTRL_WRITE_DONE, d_req,
- NULL);
+ qdss->ch.notify(qdss->ch.priv, state, d_req, NULL);
}
static void qdss_ctrl_read_complete(struct usb_ep *ep,
@@ -252,6 +264,12 @@
return;
}
+ list_for_each_safe(act, tmp, &qdss->data_write_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(qdss->port.data, req);
+ }
+
list_for_each_safe(act, tmp, &qdss->ctrl_write_pool) {
req = list_entry(act, struct usb_request, list);
list_del(&req->list);
@@ -271,23 +289,41 @@
{
struct f_qdss *qdss = ch->priv_usb;
struct usb_request *req;
+ struct usb_ep *in;
+ struct list_head *list_pool;
int i;
pr_debug("usb_qdss_alloc_req\n");
- if (no_write_buf <= 0 || no_read_buf <= 0 || !qdss) {
+ if (!qdss) {
+ pr_err("usb_qdss_alloc_req: channel %s closed\n", ch->name);
+ return -ENODEV;
+ }
+
+ if ((qdss->debug_inface_enabled &&
+ (no_write_buf <= 0 || no_read_buf <= 0)) ||
+ (!qdss->debug_inface_enabled &&
+ (no_write_buf <= 0 || no_read_buf))) {
pr_err("usb_qdss_alloc_req: missing params\n");
return -ENODEV;
}
+ if (qdss->debug_inface_enabled) {
+ in = qdss->port.ctrl_in;
+ list_pool = &qdss->ctrl_write_pool;
+ } else {
+ in = qdss->port.data;
+ list_pool = &qdss->data_write_pool;
+ }
+
for (i = 0; i < no_write_buf; i++) {
- req = usb_ep_alloc_request(qdss->port.ctrl_in, GFP_ATOMIC);
+ req = usb_ep_alloc_request(in, GFP_ATOMIC);
if (!req) {
pr_err("usb_qdss_alloc_req: ctrl_in allocation err\n");
goto fail;
}
- req->complete = qdss_ctrl_write_complete;
- list_add_tail(&req->list, &qdss->ctrl_write_pool);
+ req->complete = qdss_write_complete;
+ list_add_tail(&req->list, list_pool);
}
for (i = 0; i < no_read_buf; i++) {
@@ -378,6 +414,10 @@
qdss_ctrl_intf_desc.iInterface = id;
}
+ /* for non-accelerated path keep tx fifo size 1k */
+ if (!strcmp(qdss->ch.name, USB_QDSS_CH_MDM))
+ qdss_data_ep_comp_desc.bMaxBurst = 0;
+
ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_data_desc,
&qdss_data_ep_comp_desc);
if (!ep) {
@@ -490,21 +530,20 @@
qdss = container_of(work, struct f_qdss, disconnect_w);
pr_debug("usb_qdss_disconnect_work\n");
- /*
- * Uninitialized init data i.e. ep specific operation.
- * Notify qdss to cancel all active transfers.
- */
- if (qdss->ch.app_conn) {
+
+ /* Notify qdss to cancel all active transfers */
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv,
+ USB_QDSS_DISCONNECT,
+ NULL,
+ NULL);
+
+ /* Uninitialized init data i.e. ep specific operation */
+ if (qdss->ch.app_conn && !strcmp(qdss->ch.name, USB_QDSS_CH_MSM)) {
status = uninit_data(qdss->port.data);
if (status)
pr_err("%s: uninit_data error\n", __func__);
- if (qdss->ch.notify)
- qdss->ch.notify(qdss->ch.priv,
- USB_QDSS_DISCONNECT,
- NULL,
- NULL);
-
status = set_qdss_data_connection(qdss, 0);
if (status)
pr_err("qdss_disconnect error");
@@ -561,15 +600,16 @@
}
pr_debug("usb_qdss_connect_work\n");
+
+ if (!strcmp(qdss->ch.name, USB_QDSS_CH_MDM))
+ goto notify;
+
status = set_qdss_data_connection(qdss, 1);
if (status) {
pr_err("set_qdss_data_connection error(%d)", status);
return;
}
- if (qdss->ch.notify)
- qdss->ch.notify(qdss->ch.priv, USB_QDSS_CONNECT,
- NULL, &qdss->ch);
spin_lock_irqsave(&qdss->lock, flags);
req = qdss->endless_req;
spin_unlock_irqrestore(&qdss->lock, flags);
@@ -577,8 +617,15 @@
return;
status = usb_ep_queue(qdss->port.data, req, GFP_ATOMIC);
- if (status)
+ if (status) {
pr_err("%s: usb_ep_queue error (%d)\n", __func__, status);
+ return;
+ }
+
+notify:
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv, USB_QDSS_CONNECT,
+ NULL, &qdss->ch);
}
static int qdss_set_alt(struct usb_function *f, unsigned int intf,
@@ -718,6 +765,7 @@
spin_lock_init(&qdss->lock);
INIT_LIST_HEAD(&qdss->ctrl_read_pool);
INIT_LIST_HEAD(&qdss->ctrl_write_pool);
+ INIT_LIST_HEAD(&qdss->data_write_pool);
INIT_WORK(&qdss->connect_w, usb_qdss_connect_work);
INIT_WORK(&qdss->disconnect_w, usb_qdss_disconnect_work);
@@ -813,6 +861,50 @@
}
EXPORT_SYMBOL(usb_qdss_ctrl_write);
+int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ pr_debug("usb_qdss_ctrl_write\n");
+
+ if (!qdss)
+ return -ENODEV;
+
+ spin_lock_irqsave(&qdss->lock, flags);
+
+ if (qdss->usb_connected == 0) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EIO;
+ }
+
+ if (list_empty(&qdss->data_write_pool)) {
+ pr_err("error: usb_qdss_data_write list is empty\n");
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&qdss->data_write_pool, struct usb_request,
+ list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+ if (usb_ep_queue(qdss->port.data, req, GFP_ATOMIC)) {
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, &qdss->data_write_pool);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ pr_err("qdss usb_ep_queue failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_qdss_write);
+
struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv,
void (*notify)(void *priv, unsigned int event,
struct qdss_request *d_req, struct usb_qdss_ch *))
@@ -870,7 +962,9 @@
pr_debug("usb_qdss_close\n");
spin_lock_irqsave(&qdss_lock, flags);
- if (!qdss || !qdss->usb_connected) {
+ ch->priv_usb = NULL;
+ if (!qdss || !qdss->usb_connected ||
+ !strcmp(qdss->ch.name, USB_QDSS_CH_MDM)) {
ch->app_conn = 0;
spin_unlock_irqrestore(&qdss_lock, flags);
return;
diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h
index 72edb90..57c76f8 100644
--- a/drivers/usb/gadget/function/f_qdss.h
+++ b/drivers/usb/gadget/function/f_qdss.h
@@ -59,6 +59,10 @@
struct usb_qdss_ch ch;
struct list_head ctrl_read_pool;
struct list_head ctrl_write_pool;
+
+ /* for mdm channel SW path */
+ struct list_head data_write_pool;
+
struct work_struct connect_w;
struct work_struct disconnect_w;
spinlock_t lock;
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index cb00ada..9625248 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -31,13 +31,43 @@
struct gserial port;
u8 data_id;
u8 port_num;
+ spinlock_t lock;
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+
+ u8 online;
+ u8 pending;
+ struct usb_cdc_line_coding port_line_coding;
+
+ /* SetControlLineState request */
+ u16 port_handshake_bits;
+#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
+#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
+
+ /* SerialState notification */
+ u16 serial_state;
+#define ACM_CTRL_OVERRUN (1 << 6)
+#define ACM_CTRL_PARITY (1 << 5)
+#define ACM_CTRL_FRAMING (1 << 4)
+#define ACM_CTRL_RI (1 << 3)
+#define ACM_CTRL_BRK (1 << 2)
+#define ACM_CTRL_DSR (1 << 1)
+#define ACM_CTRL_DCD (1 << 0)
};
+static inline struct f_gser *port_to_gser(struct gserial *p)
+{
+ return container_of(p, struct f_gser, port);
+}
+
static inline struct f_gser *func_to_gser(struct usb_function *f)
{
return container_of(f, struct f_gser, port.func);
}
+#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
/*-------------------------------------------------------------------------*/
/* interface descriptor: */
@@ -46,15 +76,55 @@
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
- .bNumEndpoints = 2,
+ .bNumEndpoints = 3,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
/* .iInterface = DYNAMIC */
};
+static struct usb_cdc_header_desc gser_header_desc = {
+ .bLength = sizeof(gser_header_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor
+gser_call_mgmt_descriptor = {
+ .bLength = sizeof(gser_call_mgmt_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+ .bmCapabilities = 0,
+ /* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor gser_descriptor = {
+ .bLength = sizeof(gser_descriptor),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+ .bmCapabilities = USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc gser_union_desc = {
+ .bLength = sizeof(gser_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
/* full speed support: */
+static struct usb_endpoint_descriptor gser_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+
static struct usb_endpoint_descriptor gser_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -71,12 +141,25 @@
static struct usb_descriptor_header *gser_fs_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_fs_notify_desc,
(struct usb_descriptor_header *) &gser_fs_in_desc,
(struct usb_descriptor_header *) &gser_fs_out_desc,
NULL,
};
/* high speed support: */
+static struct usb_endpoint_descriptor gser_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
static struct usb_endpoint_descriptor gser_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
@@ -94,6 +177,11 @@
static struct usb_descriptor_header *gser_hs_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_hs_notify_desc,
(struct usb_descriptor_header *) &gser_hs_in_desc,
(struct usb_descriptor_header *) &gser_hs_out_desc,
NULL,
@@ -118,8 +206,33 @@
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
};
+static struct usb_endpoint_descriptor gser_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+ .bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_ss_ep_comp_descriptor gser_ss_notify_comp_desc = {
+ .bLength = sizeof(gser_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(GS_NOTIFY_MAXPACKET),
+};
+
static struct usb_descriptor_header *gser_ss_function[] = {
(struct usb_descriptor_header *) &gser_interface_desc,
+ (struct usb_descriptor_header *) &gser_header_desc,
+ (struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &gser_descriptor,
+ (struct usb_descriptor_header *) &gser_union_desc,
+ (struct usb_descriptor_header *) &gser_ss_notify_desc,
+ (struct usb_descriptor_header *) &gser_ss_notify_comp_desc,
(struct usb_descriptor_header *) &gser_ss_in_desc,
(struct usb_descriptor_header *) &gser_ss_bulk_comp_desc,
(struct usb_descriptor_header *) &gser_ss_out_desc,
@@ -130,7 +243,7 @@
/* string descriptors: */
static struct usb_string gser_string_defs[] = {
- [0].s = "Generic Serial",
+ [0].s = "DUN over Serial",
{ } /* end of list */
};
@@ -145,13 +258,131 @@
};
/*-------------------------------------------------------------------------*/
+static void gser_complete_set_line_coding(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gser *gser = ep->driver_data;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ if (req->status != 0) {
+ dev_dbg(&cdev->gadget->dev, "gser ttyGS%d completion, err %d\n",
+ gser->port_num, req->status);
+ return;
+ }
+
+ /* normal completion */
+ if (req->actual != sizeof(gser->port_line_coding)) {
+ dev_dbg(&cdev->gadget->dev, "gser ttyGS%d short resp, len %d\n",
+ gser->port_num, req->actual);
+ usb_ep_set_halt(ep);
+ } else {
+ struct usb_cdc_line_coding *value = req->buf;
+
+ gser->port_line_coding = *value;
+ }
+}
+
+static int
+gser_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_gser *gser = func_to_gser(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* SET_LINE_CODING ... just read and save what the host sends */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_LINE_CODING:
+ if (w_length != sizeof(struct usb_cdc_line_coding))
+ goto invalid;
+
+ value = w_length;
+ cdev->gadget->ep0->driver_data = gser;
+ req->complete = gser_complete_set_line_coding;
+ break;
+
+ /* GET_LINE_CODING ... return what host sent, or initial value */
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_GET_LINE_CODING:
+ value = min_t(unsigned int, w_length,
+ sizeof(struct usb_cdc_line_coding));
+ memcpy(req->buf, &gser->port_line_coding, value);
+ break;
+
+ /* SET_CONTROL_LINE_STATE ... save what the host sent */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+
+ value = 0;
+ gser->port_handshake_bits = w_value;
+ pr_debug("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE: DTR:%d RST:%d\n",
+ __func__, w_value & ACM_CTRL_DTR ? 1 : 0,
+ w_value & ACM_CTRL_RTS ? 1 : 0);
+
+ if (gser->port.notify_modem)
+ gser->port.notify_modem(&gser->port, 0, w_value);
+
+ break;
+
+ default:
+invalid:
+ dev_dbg(&cdev->gadget->dev,
+ "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ dev_dbg(&cdev->gadget->dev,
+ "gser ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
+ gser->port_num, ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "gser response on ttyGS%d, err %d\n",
+ gser->port_num, value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_gser *gser = func_to_gser(f);
struct usb_composite_dev *cdev = f->config->cdev;
+ int rc = 0;
/* we know alt == 0, so this is an activation or a reset */
+ if (gser->notify->driver_data) {
+ dev_dbg(&cdev->gadget->dev,
+ "reset generic ctl ttyGS%d\n", gser->port_num);
+ usb_ep_disable(gser->notify);
+ }
+
+ if (!gser->notify->desc) {
+ if (config_ep_by_speed(cdev->gadget, f, gser->notify)) {
+ gser->notify->desc = NULL;
+ return -EINVAL;
+ }
+ }
+
+ rc = usb_ep_enable(gser->notify);
+ if (rc) {
+ ERROR(cdev, "can't enable %s, result %d\n",
+ gser->notify->name, rc);
+ return rc;
+ }
+ gser->notify->driver_data = gser;
if (gser->port.in->enabled) {
dev_dbg(&cdev->gadget->dev,
@@ -169,7 +400,9 @@
}
}
gserial_connect(&gser->port, gser->port_num);
- return 0;
+ gser->online = 1;
+
+ return rc;
}
static void gser_disable(struct usb_function *f)
@@ -180,6 +413,178 @@
dev_dbg(&cdev->gadget->dev,
"generic ttyGS%d deactivated\n", gser->port_num);
gserial_disconnect(&gser->port);
+ usb_ep_disable(gser->notify);
+ gser->notify->driver_data = NULL;
+ gser->online = 0;
+}
+
+static int gser_notify(struct f_gser *gser, u8 type, u16 value,
+ void *data, unsigned int length)
+{
+ struct usb_ep *ep = gser->notify;
+ struct usb_request *req;
+ struct usb_cdc_notification *notify;
+ const unsigned int len = sizeof(*notify) + length;
+ void *buf;
+ int status;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ req = gser->notify_req;
+ gser->notify_req = NULL;
+ gser->pending = false;
+
+ req->length = len;
+ notify = req->buf;
+ buf = notify + 1;
+
+ notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ notify->bNotificationType = type;
+ notify->wValue = cpu_to_le16(value);
+ notify->wIndex = cpu_to_le16(gser->data_id);
+ notify->wLength = cpu_to_le16(length);
+ memcpy(buf, data, length);
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status < 0) {
+ ERROR(cdev, "gser ttyGS%d can't notify serial state, %d\n",
+ gser->port_num, status);
+ gser->notify_req = req;
+ }
+
+ return status;
+}
+
+static int gser_notify_serial_state(struct f_gser *gser)
+{
+ int status;
+ unsigned long flags;
+ struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+ spin_lock_irqsave(&gser->lock, flags);
+ if (gser->notify_req) {
+ DBG(cdev, "gser ttyGS%d serial state %04x\n",
+ gser->port_num, gser->serial_state);
+ status = gser_notify(gser, USB_CDC_NOTIFY_SERIAL_STATE,
+ 0, &gser->serial_state,
+ sizeof(gser->serial_state));
+ } else {
+ gser->pending = true;
+ status = 0;
+ }
+
+ spin_unlock_irqrestore(&gser->lock, flags);
+ return status;
+}
+
+static void gser_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_gser *gser = req->context;
+ u8 doit = false;
+ unsigned long flags;
+
+ /* on this call path we do NOT hold the port spinlock,
+ * which is why ACM needs its own spinlock
+ */
+
+ spin_lock_irqsave(&gser->lock, flags);
+ if (req->status != -ESHUTDOWN)
+ doit = gser->pending;
+
+ gser->notify_req = req;
+ spin_unlock_irqrestore(&gser->lock, flags);
+
+ if (doit && gser->online)
+ gser_notify_serial_state(gser);
+}
+
+static void gser_connect(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+ gser_notify_serial_state(gser);
+}
+
+static unsigned int gser_get_dtr(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ if (gser->port_handshake_bits & ACM_CTRL_DTR)
+ return 1;
+ else
+ return 0;
+}
+
+static unsigned int gser_get_rts(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ if (gser->port_handshake_bits & ACM_CTRL_RTS)
+ return 1;
+ else
+ return 0;
+}
+
+static unsigned int gser_send_carrier_detect(struct gserial *port,
+ unsigned int yes)
+{
+ u16 state;
+ struct f_gser *gser = port_to_gser(port);
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_DCD;
+ if (yes)
+ state |= ACM_CTRL_DCD;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+}
+
+static unsigned int gser_send_ring_indicator(struct gserial *port,
+ unsigned int yes)
+{
+ u16 state;
+ struct f_gser *gser = port_to_gser(port);
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_RI;
+ if (yes)
+ state |= ACM_CTRL_RI;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+}
+
+static void gser_disconnect(struct gserial *port)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+ gser_notify_serial_state(gser);
+}
+
+static int gser_send_break(struct gserial *port, int duration)
+{
+ u16 state;
+ struct f_gser *gser = port_to_gser(port);
+
+ state = gser->serial_state;
+ state &= ~ACM_CTRL_BRK;
+ if (duration)
+ state |= ACM_CTRL_BRK;
+
+ gser->serial_state = state;
+ return gser_notify_serial_state(gser);
+}
+
+static int gser_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits)
+{
+ struct f_gser *gser = port_to_gser(port);
+
+ gser->serial_state = ctrl_bits;
+
+ return gser_notify_serial_state(gser);
}
/*-------------------------------------------------------------------------*/
@@ -225,6 +630,21 @@
goto fail;
gser->port.out = ep;
+ ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ gser->notify = ep;
+ ep->driver_data = cdev; /* claim */
+ /* allocate notification */
+ gser->notify_req = gs_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2,
+ GFP_KERNEL);
+ if (!gser->notify_req)
+ goto fail;
+
+ gser->notify_req->complete = gser_notify_complete;
+ gser->notify_req->context = gser;
+
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
@@ -235,6 +655,15 @@
gser_ss_in_desc.bEndpointAddress = gser_fs_in_desc.bEndpointAddress;
gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ gser_hs_notify_desc.bEndpointAddress =
+ gser_fs_notify_desc.bEndpointAddress;
+ }
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ gser_ss_notify_desc.bEndpointAddress =
+ gser_fs_notify_desc.bEndpointAddress;
+ }
+
status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
gser_ss_function, NULL);
if (status)
@@ -247,6 +676,18 @@
return 0;
fail:
+ if (gser->notify_req)
+ gs_free_req(gser->notify, gser->notify_req);
+
+ /* we might as well release our claims on endpoints */
+ if (gser->notify)
+ gser->notify->driver_data = NULL;
+ /* we might as well release our claims on endpoints */
+ if (gser->port.out)
+ gser->port.out->driver_data = NULL;
+ if (gser->port.in)
+ gser->port.in->driver_data = NULL;
+
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
@@ -327,7 +768,10 @@
static void gser_unbind(struct usb_configuration *c, struct usb_function *f)
{
+ struct f_gser *gser = func_to_gser(f);
+
usb_free_all_descriptors(f);
+ gs_free_req(gser->notify, gser->notify_req);
}
static struct usb_function *gser_alloc(struct usb_function_instance *fi)
@@ -342,6 +786,7 @@
opts = container_of(fi, struct f_serial_opts, func_inst);
+ spin_lock_init(&gser->lock);
gser->port_num = opts->port_num;
gser->port.func.name = "gser";
@@ -351,6 +796,15 @@
gser->port.func.set_alt = gser_set_alt;
gser->port.func.disable = gser_disable;
gser->port.func.free_func = gser_free;
+ gser->port.func.setup = gser_setup;
+ gser->port.connect = gser_connect;
+ gser->port.get_dtr = gser_get_dtr;
+ gser->port.get_rts = gser_get_rts;
+ gser->port.send_carrier_detect = gser_send_carrier_detect;
+ gser->port.send_ring_indicator = gser_send_ring_indicator;
+ gser->port.send_modem_ctrl_bits = gser_send_modem_ctrl_bits;
+ gser->port.disconnect = gser_disconnect;
+ gser->port.send_break = gser_send_break;
return &gser->port.func;
}
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index cb1ecfa..54220a5 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -382,15 +382,20 @@
static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
{
- int status;
+ int status = 0;
spin_lock(&dev->req_lock);
- status = prealloc(&dev->tx_reqs, link->in_ep, n);
- if (status < 0)
- goto fail;
- status = prealloc(&dev->rx_reqs, link->out_ep, n);
- if (status < 0)
- goto fail;
+ if (link->in_ep) {
+ status = prealloc(&dev->tx_reqs, link->in_ep, n);
+ if (status < 0)
+ goto fail;
+ }
+
+ if (link->out_ep) {
+ status = prealloc(&dev->rx_reqs, link->out_ep, n);
+ if (status < 0)
+ goto fail;
+ }
goto done;
fail:
DBG(dev, "can't alloc requests\n");
@@ -838,16 +843,24 @@
* their own pace; the network stack can handle old packets.
* For the moment we leave this here, since it works.
*/
- in = link->in_ep->desc;
- out = link->out_ep->desc;
- usb_ep_disable(link->in_ep);
- usb_ep_disable(link->out_ep);
- if (netif_carrier_ok(net)) {
- DBG(dev, "host still using in/out endpoints\n");
- link->in_ep->desc = in;
- link->out_ep->desc = out;
- usb_ep_enable(link->in_ep);
- usb_ep_enable(link->out_ep);
+ if (link->in_ep) {
+ in = link->in_ep->desc;
+ usb_ep_disable(link->in_ep);
+ if (netif_carrier_ok(net)) {
+ DBG(dev, "host still using in endpoints\n");
+ link->in_ep->desc = in;
+ usb_ep_enable(link->in_ep);
+ }
+ }
+
+ if (link->out_ep) {
+ out = link->out_ep->desc;
+ usb_ep_disable(link->out_ep);
+ if (netif_carrier_ok(net)) {
+ DBG(dev, "host still using out endpoints\n");
+ link->out_ep->desc = out;
+ usb_ep_enable(link->out_ep);
+ }
}
}
spin_unlock_irqrestore(&dev->lock, flags);
@@ -1201,20 +1214,24 @@
if (!dev)
return ERR_PTR(-EINVAL);
- link->in_ep->driver_data = dev;
- result = usb_ep_enable(link->in_ep);
- if (result != 0) {
- DBG(dev, "enable %s --> %d\n",
- link->in_ep->name, result);
- goto fail0;
+ if (link->in_ep) {
+ link->in_ep->driver_data = dev;
+ result = usb_ep_enable(link->in_ep);
+ if (result != 0) {
+ DBG(dev, "enable %s --> %d\n",
+ link->in_ep->name, result);
+ goto fail0;
+ }
}
- link->out_ep->driver_data = dev;
- result = usb_ep_enable(link->out_ep);
- if (result != 0) {
- DBG(dev, "enable %s --> %d\n",
- link->out_ep->name, result);
- goto fail1;
+ if (link->out_ep) {
+ link->out_ep->driver_data = dev;
+ result = usb_ep_enable(link->out_ep);
+ if (result != 0) {
+ DBG(dev, "enable %s --> %d\n",
+ link->out_ep->name, result);
+ goto fail1;
+ }
}
if (result == 0)
@@ -1252,9 +1269,11 @@
/* on error, disable any endpoints */
} else {
- (void) usb_ep_disable(link->out_ep);
+ if (link->out_ep)
+ (void) usb_ep_disable(link->out_ep);
fail1:
- (void) usb_ep_disable(link->in_ep);
+ if (link->in_ep)
+ (void) usb_ep_disable(link->in_ep);
}
fail0:
/* caller is responsible for cleanup on error */
@@ -1295,41 +1314,45 @@
* of all pending i/o. then free the request objects
* and forget about the endpoints.
*/
- usb_ep_disable(link->in_ep);
- spin_lock(&dev->req_lock);
- while (!list_empty(&dev->tx_reqs)) {
- req = container_of(dev->tx_reqs.next,
- struct usb_request, list);
- list_del(&req->list);
-
- spin_unlock(&dev->req_lock);
- if (link->multi_pkt_xfer)
- kfree(req->buf);
- usb_ep_free_request(link->in_ep, req);
+ if (link->in_ep) {
+ usb_ep_disable(link->in_ep);
spin_lock(&dev->req_lock);
- }
- spin_unlock(&dev->req_lock);
- link->in_ep->desc = NULL;
+ while (!list_empty(&dev->tx_reqs)) {
+ req = container_of(dev->tx_reqs.next,
+ struct usb_request, list);
+ list_del(&req->list);
- usb_ep_disable(link->out_ep);
- spin_lock(&dev->req_lock);
- while (!list_empty(&dev->rx_reqs)) {
- req = container_of(dev->rx_reqs.next,
- struct usb_request, list);
- list_del(&req->list);
-
+ spin_unlock(&dev->req_lock);
+ if (link->multi_pkt_xfer)
+ kfree(req->buf);
+ usb_ep_free_request(link->in_ep, req);
+ spin_lock(&dev->req_lock);
+ }
spin_unlock(&dev->req_lock);
- usb_ep_free_request(link->out_ep, req);
- spin_lock(&dev->req_lock);
+ link->in_ep->desc = NULL;
}
- spin_unlock(&dev->req_lock);
- spin_lock(&dev->rx_frames.lock);
- while ((skb = __skb_dequeue(&dev->rx_frames)))
- dev_kfree_skb_any(skb);
- spin_unlock(&dev->rx_frames.lock);
+ if (link->out_ep) {
+ usb_ep_disable(link->out_ep);
+ spin_lock(&dev->req_lock);
+ while (!list_empty(&dev->rx_reqs)) {
+ req = container_of(dev->rx_reqs.next,
+ struct usb_request, list);
+ list_del(&req->list);
- link->out_ep->desc = NULL;
+ spin_unlock(&dev->req_lock);
+ usb_ep_free_request(link->out_ep, req);
+ spin_lock(&dev->req_lock);
+ }
+ spin_unlock(&dev->req_lock);
+
+ spin_lock(&dev->rx_frames.lock);
+ while ((skb = __skb_dequeue(&dev->rx_frames)))
+ dev_kfree_skb_any(skb);
+ spin_unlock(&dev->rx_frames.lock);
+
+ link->out_ep->desc = NULL;
+ }
/* finish forgetting about this USB link episode */
dev->header_len = 0;
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index c20210c..f367dc5 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -45,11 +45,23 @@
/* REVISIT avoid this CDC-ACM support harder ... */
struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */
+ u16 serial_state;
+
+ /* control signal callbacks*/
+ unsigned int (*get_dtr)(struct gserial *p);
+ unsigned int (*get_rts)(struct gserial *p);
/* notification callbacks */
void (*connect)(struct gserial *p);
void (*disconnect)(struct gserial *p);
int (*send_break)(struct gserial *p, int duration);
+ unsigned int (*send_carrier_detect)(struct gserial *p,
+ unsigned int yes);
+ unsigned int (*send_ring_indicator)(struct gserial *p,
+ unsigned int yes);
+ int (*send_modem_ctrl_bits)(struct gserial *p, int ctrl_bits);
+ /* notification changes to modem */
+ void (*notify_modem)(void *gser, u8 portno, int ctrl_bits);
};
/* utilities to allocate/free request and buffer */
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 658b8da..f472b2b 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -389,6 +389,22 @@
dynamically linked module called "udc-xilinx" and force all
gadget drivers to also be dynamically linked.
+config USB_CI13XXX_MSM
+ tristate "MIPS USB CI13xxx for MSM"
+ depends on ARCH_MSM
+ select USB_MSM_OTG
+ help
+ MSM SoC has chipidea USB controller. This driver uses
+ ci13xxx_udc core.
+ This driver depends on OTG driver for PHY initialization,
+ clock management, powering up VBUS, and power management.
+ This driver is not supported on boards like trout which
+ has an external PHY.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "ci13xxx_msm" and force all
+ gadget drivers to also be dynamically linked.
+
#
# LAST -- dummy/emulated controller
#
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 0ce6929..a1dedf0 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -193,7 +193,7 @@
{
const struct of_device_id *match;
const struct hc_driver *driver;
- struct device *sysdev;
+ struct device *sysdev, *phydev;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd;
@@ -220,6 +220,9 @@
* 3. xhci_plat is grandchild of a pci device (dwc3-pci)
*/
sysdev = &pdev->dev;
+ phydev = &pdev->dev;
+ if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
+ phydev = sysdev->parent;
/*
* If sysdev->parent->parent is available and part of IOMMU group
* (indicating possible usage of SMMU enablement), then use
@@ -327,7 +330,7 @@
if (device_property_read_u32(&pdev->dev, "usb-core-id", &xhci->core_id))
xhci->core_id = -EINVAL;
- hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
+ hcd->usb_phy = devm_usb_get_phy_by_phandle(phydev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
if (ret == -EPROBE_DEFER)
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 17e8edb..4e223f5 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -253,4 +253,15 @@
the high-speed PHY which is usually paired with either the ChipIdea or
Synopsys DWC3 USB IPs on MSM SOCs. This driver expects to configure the
PHY with a dedicated register I/O memory region.
+
+config USB_MSM_OTG
+ tristate "Qualcomm Technologies, Inc. on-chip USB OTG controller support"
+ depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
+ select USB_PHY
+ help
+ Enable this to support the USB OTG transceiver on Qualcomm chips. It
+ handles PHY initialization, clock management, and workarounds
+ required after resetting the hardware and power management.
+ This driver is required even for peripheral only or host only
+ mode configurations.
endmenu
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index 285659d..7e9ffa0 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -31,3 +31,4 @@
obj-$(CONFIG_USB_MSM_SSPHY_QMP) += phy-msm-ssusb-qmp.o
obj-$(CONFIG_MSM_QUSB_PHY) += phy-msm-qusb.o phy-msm-qusb-v2.o
obj-$(CONFIG_MSM_HSUSB_PHY) += phy-msm-snps-hs.o
+obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index cce17e0..cc1a0ea 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,6 +26,7 @@
#include <linux/regulator/machine.h>
#include <linux/usb/phy.h>
#include <linux/reset.h>
+#include <linux/nvmem-consumer.h>
#include <linux/debugfs.h>
#include <linux/hrtimer.h>
@@ -146,8 +147,65 @@
u8 tune[5];
struct hrtimer timer;
+ int soc_min_rev;
};
+#ifdef CONFIG_NVMEM
+/* Parse qfprom data for deciding on errata work-arounds */
+static long qfprom_read(struct device *dev, const char *name)
+{
+ struct nvmem_cell *cell;
+ ssize_t len = 0;
+ u32 *buf, val = 0;
+ long err = 0;
+
+ cell = nvmem_cell_get(dev, name);
+ if (IS_ERR(cell)) {
+ err = PTR_ERR(cell);
+ dev_err(dev, "failed opening nvmem cell err : %ld\n", err);
+ /* If entry does not exist, then that is not an error */
+ if (err == -ENOENT)
+ err = 0;
+ return err;
+ }
+
+ buf = (u32 *)nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf) || !len) {
+ dev_err(dev, "Failed reading nvmem cell, err: %u, bytes fetched: %zd\n",
+ *buf, len);
+ if (!IS_ERR(buf)) {
+ kfree(buf);
+ err = -EINVAL;
+ } else {
+ err = PTR_ERR(buf);
+ }
+ } else {
+ val = *buf;
+ kfree(buf);
+ }
+
+ nvmem_cell_put(cell);
+ return err ? err : (long) val;
+}
+
+/* Reads the SoC version */
+static int qusb_phy_get_socrev(struct device *dev, struct qusb_phy *qphy)
+{
+ qphy->soc_min_rev = qfprom_read(dev, "minor_rev");
+ if (qphy->soc_min_rev < 0)
+ dev_err(dev, "failed getting soc_min_rev, err : %d\n",
+ qphy->soc_min_rev);
+
+ return qphy->soc_min_rev;
+};
+#else
+/* Reads the SoC version */
+static int qusb_phy_get_socrev(struct device *dev, struct qusb_phy *qphy)
+{
+ return 0;
+}
+#endif
+
static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
{
dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d on:%d\n",
@@ -1125,6 +1183,11 @@
return PTR_ERR(qphy->vdda18);
}
+ ret = qusb_phy_get_socrev(&pdev->dev, qphy);
+ if (ret == -EPROBE_DEFER) {
+ dev_err(&pdev->dev, "SoC version rd: fail: defer for now\n");
+ return ret;
+ }
qphy->pinctrl = devm_pinctrl_get(dev);
if (IS_ERR(qphy->pinctrl)) {
ret = PTR_ERR(qphy->pinctrl);
@@ -1159,7 +1222,14 @@
qphy->phy.type = USB_PHY_TYPE_USB2;
qphy->phy.notify_connect = qusb_phy_notify_connect;
qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
- qphy->phy.disable_chirp = qusb_phy_disable_chirp;
+
+ /*
+ * qusb_phy_disable_chirp is not required if soc version is
+ * mentioned and is not base version.
+ */
+ if (qphy->soc_min_rev == 0)
+ qphy->phy.disable_chirp = qusb_phy_disable_chirp;
+
qphy->phy.start_port_reset = qusb_phy_enable_ext_pulldown;
ret = usb_add_phy_dev(&qphy->phy);
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
new file mode 100644
index 0000000..c5cdddc
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -0,0 +1,5473 @@
+/* Copyright (c) 2009-2018, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/irqchip/msm-mpm-irq.h>
+#include <linux/pm_wakeup.h>
+#include <linux/reset.h>
+#include <linux/extcon.h>
+#include <soc/qcom/scm.h>
+
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb/msm_hsusb_hw.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/qpnp/qpnp-adc.h>
+
+#include <linux/msm-bus.h>
+
+/**
+ * Requested USB votes for BUS bandwidth
+ *
+ * USB_NO_PERF_VOTE BUS Vote for inactive USB session or disconnect
+ * USB_MAX_PERF_VOTE Maximum BUS bandwidth vote
+ * USB_MIN_PERF_VOTE Minimum BUS bandwidth vote (for some hw same as NO_PERF)
+ *
+ */
+enum usb_bus_vote {
+ USB_NO_PERF_VOTE = 0,
+ USB_MAX_PERF_VOTE,
+ USB_MIN_PERF_VOTE,
+};
+
+/**
+ * Supported USB modes
+ *
+ * USB_PERIPHERAL Only peripheral mode is supported.
+ * USB_HOST Only host mode is supported.
+ * USB_OTG OTG mode is supported.
+ *
+ */
+enum usb_mode_type {
+ USB_NONE = 0,
+ USB_PERIPHERAL,
+ USB_HOST,
+ USB_OTG,
+};
+
+/**
+ * OTG control
+ *
+ * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host
+ * only configuration.
+ * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY.
+ * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware.
+ * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs.
+ *
+ */
+enum otg_control_type {
+ OTG_NO_CONTROL = 0,
+ OTG_PHY_CONTROL,
+ OTG_PMIC_CONTROL,
+ OTG_USER_CONTROL,
+};
+
+/**
+ * PHY used in
+ *
+ * INVALID_PHY Unsupported PHY
+ * CI_PHY Chipidea PHY
+ * SNPS_PICO_PHY Synopsis Pico PHY
+ * SNPS_FEMTO_PHY Synopsis Femto PHY
+ * QUSB_ULPI_PHY
+ *
+ */
+enum msm_usb_phy_type {
+ INVALID_PHY = 0,
+ CI_PHY, /* not supported */
+ SNPS_PICO_PHY,
+ SNPS_FEMTO_PHY,
+ QUSB_ULPI_PHY,
+};
+
+#define IDEV_CHG_MAX 1500
+#define IUNIT 100
+#define IDEV_HVDCP_CHG_MAX 1800
+
+/**
+ * Used different VDDCX voltage values
+ */
+enum usb_vdd_value {
+ VDD_NONE = 0,
+ VDD_MIN,
+ VDD_MAX,
+ VDD_VAL_MAX,
+};
+
+/**
+ * struct msm_otg_platform_data - platform device data
+ * for msm_otg driver.
+ * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
+ * "do not overwrite default value at this address".
+ * @vbus_power: VBUS power on/off routine.It should return result
+ * as success(zero value) or failure(non-zero value).
+ * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
+ * @mode: Supported mode (OTG/peripheral/host).
+ * @otg_control: OTG switch controlled by user/Id pin
+ * @default_mode: Default operational mode. Applicable only if
+ * OTG switch is controller by user.
+ * @pmic_id_irq: IRQ number assigned for PMIC USB ID line.
+ * @mpm_otgsessvld_int: MPM wakeup pin assigned for OTG SESSVLD
+ * interrupt. Used when .otg_control == OTG_PHY_CONTROL.
+ * @mpm_dpshv_int: MPM wakeup pin assigned for DP SHV interrupt.
+ * Used during host bus suspend.
+ * @mpm_dmshv_int: MPM wakeup pin assigned for DM SHV interrupt.
+ * Used during host bus suspend.
+ * @disable_reset_on_disconnect: perform USB PHY and LINK reset
+ * on USB cable disconnection.
+ * @pnoc_errata_fix: workaround needed for PNOC hardware bug that
+ * affects USB performance.
+ * @enable_lpm_on_suspend: Enable the USB core to go into Low
+ * Power Mode, when USB bus is suspended but cable
+ * is connected.
+ * @core_clk_always_on_workaround: Don't disable core_clk when
+ * USB enters LPM.
+ * @delay_lpm_on_disconnect: Use a delay before entering LPM
+ * upon USB cable disconnection.
+ * @enable_sec_phy: Use second HSPHY with USB2 core
+ * @bus_scale_table: parameters for bus bandwidth requirements
+ * @log2_itc: value of 2^(log2_itc-1) will be used as the
+ * interrupt threshold (ITC), when log2_itc is
+ * between 1 to 7.
+ * @l1_supported: enable link power management support.
+ * @dpdm_pulldown_added: Indicates whether pull down resistors are
+ * connected on data lines or not.
+ * @vddmin_gpio: dedictaed gpio in the platform that is used for
+ * pullup the D+ line in case of bus suspend with
+ * phy retention.
+ * @enable_ahb2ahb_bypass: Indicates whether enable AHB2AHB BYPASS
+ * mode with controller in device mode.
+ * @bool disable_retention_with_vdd_min: Indicates whether to enable
+ allowing VDDmin without putting PHY into retention.
+ * @bool enable_phy_id_pullup: Indicates whether phy id pullup is
+ enabled or not.
+ * @usb_id_gpio: Gpio used for USB ID detection.
+ * @hub_reset_gpio: Gpio used for hub reset.
+ * @switch_sel_gpio: Gpio used for controlling switch that
+ routing D+/D- from the USB HUB to the USB jack type B
+ for peripheral mode.
+ * @bool phy_dvdd_always_on: PHY DVDD is supplied by always on PMIC LDO.
+ * @bool emulation: Indicates whether we are running on emulation platform.
+ * @bool enable_streaming: Indicates whether streaming to be enabled by default.
+ * @bool enable_axi_prefetch: Indicates whether AXI Prefetch interface is used
+ for improving data performance.
+ * @bool enable_sdp_typec_current_limit: Indicates whether type-c current for
+ sdp charger to be limited.
+ * @usbeth_reset_gpio: Gpio used for external usb-to-eth reset.
+ */
+struct msm_otg_platform_data {
+ int *phy_init_seq;
+ int phy_init_sz;
+ int (*vbus_power)(bool on);
+ unsigned int power_budget;
+ enum usb_mode_type mode;
+ enum otg_control_type otg_control;
+ enum usb_mode_type default_mode;
+ enum msm_usb_phy_type phy_type;
+ int pmic_id_irq;
+ unsigned int mpm_otgsessvld_int;
+ unsigned int mpm_dpshv_int;
+ unsigned int mpm_dmshv_int;
+ bool disable_reset_on_disconnect;
+ bool pnoc_errata_fix;
+ bool enable_lpm_on_dev_suspend;
+ bool core_clk_always_on_workaround;
+ bool delay_lpm_on_disconnect;
+ bool dp_manual_pullup;
+ bool enable_sec_phy;
+ struct msm_bus_scale_pdata *bus_scale_table;
+ int log2_itc;
+ bool l1_supported;
+ bool dpdm_pulldown_added;
+ int vddmin_gpio;
+ bool enable_ahb2ahb_bypass;
+ bool disable_retention_with_vdd_min;
+ bool enable_phy_id_pullup;
+ int usb_id_gpio;
+ int hub_reset_gpio;
+ int usbeth_reset_gpio;
+ int switch_sel_gpio;
+ bool phy_dvdd_always_on;
+ bool emulation;
+ bool enable_streaming;
+ bool enable_axi_prefetch;
+ bool enable_sdp_typec_current_limit;
+ bool vbus_low_as_hostmode;
+};
+
+#define USB_CHG_BLOCK_ULPI 1
+
+#define USB_REQUEST_5V 1
+#define USB_REQUEST_9V 2
+/**
+ * struct msm_usb_chg_info - MSM USB charger block details.
+ * @chg_block_type: The type of charger block. QSCRATCH/ULPI.
+ * @page_offset: USB charger register base may not be aligned to
+ * PAGE_SIZE. The kernel driver aligns the base
+ * address and use it for memory mapping. This
+ * page_offset is used by user space to calaculate
+ * the corret charger register base address.
+ * @length: The length of the charger register address space.
+ */
+struct msm_usb_chg_info {
+ uint32_t chg_block_type;
+ __kernel_off_t page_offset;
+ size_t length;
+};
+
+/* Get the MSM USB charger block information */
+#define MSM_USB_EXT_CHG_INFO _IOW('M', 0, struct msm_usb_chg_info)
+
+/* Vote against USB hardware low power mode */
+#define MSM_USB_EXT_CHG_BLOCK_LPM _IOW('M', 1, int)
+
+/* To tell kernel about voltage being voted */
+#define MSM_USB_EXT_CHG_VOLTAGE_INFO _IOW('M', 2, int)
+
+/* To tell kernel about voltage request result */
+#define MSM_USB_EXT_CHG_RESULT _IOW('M', 3, int)
+
+/* To tell kernel whether charger connected is external charger or not */
+#define MSM_USB_EXT_CHG_TYPE _IOW('M', 4, int)
+
+#define MSM_USB_BASE (motg->regs)
+#define MSM_USB_PHY_CSR_BASE (motg->phy_csr_regs)
+
+#define DRIVER_NAME "msm_otg"
+
+#define CHG_RECHECK_DELAY (jiffies + msecs_to_jiffies(2000))
+#define ULPI_IO_TIMEOUT_USEC (10 * 1000)
+#define USB_PHY_3P3_VOL_MIN 3050000 /* uV */
+#define USB_PHY_3P3_VOL_MAX 3300000 /* uV */
+#define USB_PHY_3P3_HPM_LOAD 50000 /* uA */
+#define USB_PHY_3P3_LPM_LOAD 4000 /* uA */
+
+#define USB_PHY_1P8_VOL_MIN 1800000 /* uV */
+#define USB_PHY_1P8_VOL_MAX 1800000 /* uV */
+#define USB_PHY_1P8_HPM_LOAD 50000 /* uA */
+#define USB_PHY_1P8_LPM_LOAD 4000 /* uA */
+
+#define USB_PHY_VDD_DIG_VOL_NONE 0 /*uV */
+#define USB_PHY_VDD_DIG_VOL_MIN 1045000 /* uV */
+#define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */
+
+#define USB_SUSPEND_DELAY_TIME (500 * HZ/1000) /* 500 msec */
+
+#define USB_DEFAULT_SYSTEM_CLOCK 80000000 /* 80 MHz */
+
+#define PM_QOS_SAMPLE_SEC 2
+#define PM_QOS_THRESHOLD 400
+
+#define MICRO_5V 5000000
+#define MICRO_9V 9000000
+
+#define SDP_CURRENT_UA 500000
+#define CDP_CURRENT_UA 1500000
+#define DCP_CURRENT_UA 1500000
+#define HVDCP_CURRENT_UA 3000000
+
+enum msm_otg_phy_reg_mode {
+ USB_PHY_REG_OFF,
+ USB_PHY_REG_ON,
+ USB_PHY_REG_LPM_ON,
+ USB_PHY_REG_LPM_OFF,
+ USB_PHY_REG_3P3_ON,
+ USB_PHY_REG_3P3_OFF,
+};
+
+static char *override_phy_init;
+module_param(override_phy_init, charp, 0644);
+MODULE_PARM_DESC(override_phy_init,
+ "Override HSUSB PHY Init Settings");
+
+unsigned int lpm_disconnect_thresh = 1000;
+module_param(lpm_disconnect_thresh, uint, 0644);
+MODULE_PARM_DESC(lpm_disconnect_thresh,
+ "Delay before entering LPM on USB disconnect");
+
+static bool floated_charger_enable;
+module_param(floated_charger_enable, bool, 0644);
+MODULE_PARM_DESC(floated_charger_enable,
+ "Whether to enable floated charger");
+
+/* by default debugging is enabled */
+static unsigned int enable_dbg_log = 1;
+module_param(enable_dbg_log, uint, 0644);
+MODULE_PARM_DESC(enable_dbg_log, "Debug buffer events");
+
+/* Max current to be drawn for HVDCP charger */
+static int hvdcp_max_current = IDEV_HVDCP_CHG_MAX;
+module_param(hvdcp_max_current, int, 0644);
+MODULE_PARM_DESC(hvdcp_max_current, "max current drawn for HVDCP charger");
+
+/* Max current to be drawn for DCP charger */
+static int dcp_max_current = IDEV_CHG_MAX;
+module_param(dcp_max_current, int, 0644);
+MODULE_PARM_DESC(dcp_max_current, "max current drawn for DCP charger");
+
+static DECLARE_COMPLETION(pmic_vbus_init);
+static struct msm_otg *the_msm_otg;
+static bool debug_bus_voting_enabled;
+
+static struct regulator *hsusb_3p3;
+static struct regulator *hsusb_1p8;
+static struct regulator *hsusb_vdd;
+static struct regulator *vbus_otg;
+static struct power_supply *psy;
+
+static int vdd_val[VDD_VAL_MAX];
+static u32 bus_freqs[USB_NOC_NUM_VOTE][USB_NUM_BUS_CLOCKS] /*bimc,snoc,pcnoc*/;
+static char bus_clkname[USB_NUM_BUS_CLOCKS][20] = {"bimc_clk", "snoc_clk",
+ "pcnoc_clk"};
+static bool bus_clk_rate_set;
+
+static void dbg_inc(unsigned int *idx)
+{
+ *idx = (*idx + 1) & (DEBUG_MAX_MSG-1);
+}
+
+static void
+msm_otg_dbg_log_event(struct usb_phy *phy, char *event, int d1, int d2)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ unsigned long flags;
+ unsigned long long t;
+ unsigned long nanosec;
+
+ if (!enable_dbg_log)
+ return;
+
+ write_lock_irqsave(&motg->dbg_lock, flags);
+ t = cpu_clock(smp_processor_id());
+ nanosec = do_div(t, 1000000000)/1000;
+ scnprintf(motg->buf[motg->dbg_idx], DEBUG_MSG_LEN,
+ "[%5lu.%06lu]: %s :%d:%d",
+ (unsigned long)t, nanosec, event, d1, d2);
+
+ motg->dbg_idx++;
+ motg->dbg_idx = motg->dbg_idx % DEBUG_MAX_MSG;
+ write_unlock_irqrestore(&motg->dbg_lock, flags);
+}
+
+static int msm_hsusb_ldo_init(struct msm_otg *motg, int init)
+{
+ int rc = 0;
+
+ if (init) {
+ hsusb_3p3 = devm_regulator_get(motg->phy.dev, "HSUSB_3p3");
+ if (IS_ERR(hsusb_3p3)) {
+ dev_err(motg->phy.dev, "unable to get hsusb 3p3\n");
+ return PTR_ERR(hsusb_3p3);
+ }
+
+ rc = regulator_set_voltage(hsusb_3p3, USB_PHY_3P3_VOL_MIN,
+ USB_PHY_3P3_VOL_MAX);
+ if (rc) {
+ dev_err(motg->phy.dev, "unable to set voltage level for hsusb 3p3\n"
+ );
+ return rc;
+ }
+ hsusb_1p8 = devm_regulator_get(motg->phy.dev, "HSUSB_1p8");
+ if (IS_ERR(hsusb_1p8)) {
+ dev_err(motg->phy.dev, "unable to get hsusb 1p8\n");
+ rc = PTR_ERR(hsusb_1p8);
+ goto put_3p3_lpm;
+ }
+ rc = regulator_set_voltage(hsusb_1p8, USB_PHY_1P8_VOL_MIN,
+ USB_PHY_1P8_VOL_MAX);
+ if (rc) {
+ dev_err(motg->phy.dev, "unable to set voltage level for hsusb 1p8\n"
+ );
+ goto put_1p8;
+ }
+
+ return 0;
+ }
+
+put_1p8:
+ regulator_set_voltage(hsusb_1p8, 0, USB_PHY_1P8_VOL_MAX);
+put_3p3_lpm:
+ regulator_set_voltage(hsusb_3p3, 0, USB_PHY_3P3_VOL_MAX);
+ return rc;
+}
+
+static int msm_hsusb_config_vddcx(int high)
+{
+ struct msm_otg *motg = the_msm_otg;
+ int max_vol = vdd_val[VDD_MAX];
+ int min_vol;
+ int ret;
+
+ min_vol = vdd_val[!!high];
+ ret = regulator_set_voltage(hsusb_vdd, min_vol, max_vol);
+ if (ret) {
+ pr_err("%s: unable to set the voltage for regulator HSUSB_VDDCX\n",
+ __func__);
+ return ret;
+ }
+
+ pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
+ msm_otg_dbg_log_event(&motg->phy, "CONFIG VDDCX", min_vol, max_vol);
+
+ return ret;
+}
+
+static int msm_hsusb_ldo_enable(struct msm_otg *motg,
+ enum msm_otg_phy_reg_mode mode)
+{
+ int ret = 0;
+
+ if (IS_ERR(hsusb_1p8)) {
+ pr_err("%s: HSUSB_1p8 is not initialized\n", __func__);
+ return -ENODEV;
+ }
+
+ if (IS_ERR(hsusb_3p3)) {
+ pr_err("%s: HSUSB_3p3 is not initialized\n", __func__);
+ return -ENODEV;
+ }
+
+ switch (mode) {
+ case USB_PHY_REG_ON:
+ ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_HPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set HPM of the regulator HSUSB_1p8\n",
+ __func__);
+ return ret;
+ }
+
+ ret = regulator_enable(hsusb_1p8);
+ if (ret) {
+ dev_err(motg->phy.dev, "%s: unable to enable the hsusb 1p8\n",
+ __func__);
+ regulator_set_load(hsusb_1p8, 0);
+ return ret;
+ }
+
+ /* fall through */
+ case USB_PHY_REG_3P3_ON:
+ ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_HPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set HPM of the regulator HSUSB_3p3\n",
+ __func__);
+ if (mode == USB_PHY_REG_ON) {
+ regulator_set_load(hsusb_1p8, 0);
+ regulator_disable(hsusb_1p8);
+ }
+ return ret;
+ }
+
+ ret = regulator_enable(hsusb_3p3);
+ if (ret) {
+ dev_err(motg->phy.dev, "%s: unable to enable the hsusb 3p3\n",
+ __func__);
+ regulator_set_load(hsusb_3p3, 0);
+ if (mode == USB_PHY_REG_ON) {
+ regulator_set_load(hsusb_1p8, 0);
+ regulator_disable(hsusb_1p8);
+ }
+ return ret;
+ }
+
+ break;
+
+ case USB_PHY_REG_OFF:
+ ret = regulator_disable(hsusb_1p8);
+ if (ret) {
+ dev_err(motg->phy.dev, "%s: unable to disable the hsusb 1p8\n",
+ __func__);
+ return ret;
+ }
+
+ ret = regulator_set_load(hsusb_1p8, 0);
+ if (ret < 0)
+ pr_err("%s: Unable to set LPM of the regulator HSUSB_1p8\n",
+ __func__);
+
+ /* fall through */
+ case USB_PHY_REG_3P3_OFF:
+ ret = regulator_disable(hsusb_3p3);
+ if (ret) {
+ dev_err(motg->phy.dev, "%s: unable to disable the hsusb 3p3\n",
+ __func__);
+ return ret;
+ }
+ ret = regulator_set_load(hsusb_3p3, 0);
+ if (ret < 0)
+ pr_err("%s: Unable to set LPM of the regulator HSUSB_3p3\n",
+ __func__);
+
+ break;
+
+ case USB_PHY_REG_LPM_ON:
+ ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_LPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set LPM of the regulator: HSUSB_1p8\n",
+ __func__);
+ return ret;
+ }
+
+ ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_LPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set LPM of the regulator: HSUSB_3p3\n",
+ __func__);
+ regulator_set_load(hsusb_1p8, USB_PHY_REG_ON);
+ return ret;
+ }
+
+ break;
+
+ case USB_PHY_REG_LPM_OFF:
+ ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_HPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set HPM of the regulator: HSUSB_1p8\n",
+ __func__);
+ return ret;
+ }
+
+ ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_HPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set HPM of the regulator: HSUSB_3p3\n",
+ __func__);
+ regulator_set_load(hsusb_1p8, USB_PHY_REG_ON);
+ return ret;
+ }
+
+ break;
+
+ default:
+ pr_err("%s: Unsupported mode (%d).", __func__, mode);
+ return -ENOTSUPP;
+ }
+
+ pr_debug("%s: USB reg mode (%d) (OFF/HPM/LPM)\n", __func__, mode);
+ msm_otg_dbg_log_event(&motg->phy, "USB REG MODE", mode, ret);
+ return ret < 0 ? ret : 0;
+}
+
+static int ulpi_read(struct usb_phy *phy, u32 reg)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ int cnt = 0;
+
+ if (motg->pdata->emulation)
+ return 0;
+
+ if (motg->pdata->phy_type == QUSB_ULPI_PHY && reg > 0x3F) {
+ pr_debug("%s: ULPI vendor-specific reg 0x%02x not supported\n",
+ __func__, reg);
+ return 0;
+ }
+
+ /* initiate read operation */
+ writel_relaxed(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+ USB_ULPI_VIEWPORT);
+
+ /* wait for completion */
+ while (cnt < ULPI_IO_TIMEOUT_USEC) {
+ if (!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN))
+ break;
+ udelay(1);
+ cnt++;
+ }
+
+ if (cnt >= ULPI_IO_TIMEOUT_USEC) {
+ dev_err(phy->dev, "ulpi_read: timeout %08x\n",
+ readl_relaxed(USB_ULPI_VIEWPORT));
+ dev_err(phy->dev, "PORTSC: %08x USBCMD: %08x\n",
+ readl_relaxed(USB_PORTSC), readl_relaxed(USB_USBCMD));
+ return -ETIMEDOUT;
+ }
+ return ULPI_DATA_READ(readl_relaxed(USB_ULPI_VIEWPORT));
+}
+
+static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ int cnt = 0;
+
+ if (motg->pdata->emulation)
+ return 0;
+
+ if (motg->pdata->phy_type == QUSB_ULPI_PHY && reg > 0x3F) {
+ pr_debug("%s: ULPI vendor-specific reg 0x%02x not supported\n",
+ __func__, reg);
+ return 0;
+ }
+
+ /* initiate write operation */
+ writel_relaxed(ULPI_RUN | ULPI_WRITE |
+ ULPI_ADDR(reg) | ULPI_DATA(val),
+ USB_ULPI_VIEWPORT);
+
+ /* wait for completion */
+ while (cnt < ULPI_IO_TIMEOUT_USEC) {
+ if (!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN))
+ break;
+ udelay(1);
+ cnt++;
+ }
+
+ if (cnt >= ULPI_IO_TIMEOUT_USEC) {
+ dev_err(phy->dev, "ulpi_write: timeout\n");
+ dev_err(phy->dev, "PORTSC: %08x USBCMD: %08x\n",
+ readl_relaxed(USB_PORTSC), readl_relaxed(USB_USBCMD));
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static struct usb_phy_io_ops msm_otg_io_ops = {
+ .read = ulpi_read,
+ .write = ulpi_write,
+};
+
+static void ulpi_init(struct msm_otg *motg)
+{
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ int aseq[10];
+ int *seq = NULL;
+
+ if (override_phy_init) {
+ pr_debug("%s(): HUSB PHY Init:%s\n", __func__,
+ override_phy_init);
+ get_options(override_phy_init, ARRAY_SIZE(aseq), aseq);
+ seq = &aseq[1];
+ } else {
+ seq = pdata->phy_init_seq;
+ }
+
+ if (!seq)
+ return;
+
+ while (seq[0] >= 0) {
+ if (override_phy_init)
+ pr_debug("ulpi: write 0x%02x to 0x%02x\n",
+ seq[0], seq[1]);
+
+ dev_vdbg(motg->phy.dev, "ulpi: write 0x%02x to 0x%02x\n",
+ seq[0], seq[1]);
+ msm_otg_dbg_log_event(&motg->phy, "ULPI WRITE", seq[0], seq[1]);
+ ulpi_write(&motg->phy, seq[0], seq[1]);
+ seq += 2;
+ }
+}
+
+static int msm_otg_phy_clk_reset(struct msm_otg *motg)
+{
+ int ret;
+
+ if (!motg->phy_reset_clk)
+ return 0;
+
+ if (motg->sleep_clk)
+ clk_disable_unprepare(motg->sleep_clk);
+ if (motg->phy_csr_clk)
+ clk_disable_unprepare(motg->phy_csr_clk);
+
+ ret = reset_control_assert(motg->phy_reset);
+ if (ret) {
+ pr_err("phy_reset_clk assert failed %d\n", ret);
+ return ret;
+ }
+ /*
+ * As per databook, 10 usec delay is required between
+ * PHY POR assert and de-assert.
+ */
+ usleep_range(10, 15);
+ ret = reset_control_deassert(motg->phy_reset);
+ if (ret) {
+ pr_err("phy_reset_clk de-assert failed %d\n", ret);
+ return ret;
+ }
+ /*
+ * As per databook, it takes 75 usec for PHY to stabilize
+ * after the reset.
+ */
+ usleep_range(80, 100);
+
+ if (motg->phy_csr_clk)
+ clk_prepare_enable(motg->phy_csr_clk);
+ if (motg->sleep_clk)
+ clk_prepare_enable(motg->sleep_clk);
+
+ return 0;
+}
+
+static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert)
+{
+ int ret;
+
+ if (assert) {
+ /* Using asynchronous block reset to the hardware */
+ dev_dbg(motg->phy.dev, "block_reset ASSERT\n");
+ clk_disable_unprepare(motg->pclk);
+ clk_disable_unprepare(motg->core_clk);
+ ret = reset_control_assert(motg->core_reset);
+ if (ret)
+ dev_err(motg->phy.dev, "usb hs_clk assert failed\n");
+ } else {
+ dev_dbg(motg->phy.dev, "block_reset DEASSERT\n");
+ ret = reset_control_deassert(motg->core_reset);
+ ndelay(200);
+ ret = clk_prepare_enable(motg->core_clk);
+ WARN(ret, "USB core_clk enable failed\n");
+ ret = clk_prepare_enable(motg->pclk);
+ WARN(ret, "USB pclk enable failed\n");
+ if (ret)
+ dev_err(motg->phy.dev, "usb hs_clk deassert failed\n");
+ }
+ return ret;
+}
+
+static int msm_otg_phy_reset(struct msm_otg *motg)
+{
+ u32 val;
+ int ret;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ /*
+ * AHB2AHB Bypass mode shouldn't be enable before doing
+ * async clock reset. If it is enable, disable the same.
+ */
+ val = readl_relaxed(USB_AHBMODE);
+ if (val & AHB2AHB_BYPASS) {
+ pr_err("%s(): AHB2AHB_BYPASS SET: AHBMODE:%x\n",
+ __func__, val);
+ val &= ~AHB2AHB_BYPASS_BIT_MASK;
+ writel_relaxed(val | AHB2AHB_BYPASS_CLEAR, USB_AHBMODE);
+ pr_err("%s(): AHBMODE: %x\n", __func__,
+ readl_relaxed(USB_AHBMODE));
+ }
+
+ ret = msm_otg_link_clk_reset(motg, 1);
+ if (ret)
+ return ret;
+
+ msm_otg_phy_clk_reset(motg);
+
+ /* wait for 1ms delay as suggested in HPG. */
+ usleep_range(1000, 1200);
+
+ ret = msm_otg_link_clk_reset(motg, 0);
+ if (ret)
+ return ret;
+
+ if (pdata && pdata->enable_sec_phy)
+ writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
+ USB_PHY_CTRL2);
+ val = readl_relaxed(USB_PORTSC) & ~PORTSC_PTS_MASK;
+ writel_relaxed(val | PORTSC_PTS_ULPI, USB_PORTSC);
+
+ dev_info(motg->phy.dev, "phy_reset: success\n");
+ msm_otg_dbg_log_event(&motg->phy, "PHY RESET SUCCESS",
+ motg->inputs, motg->phy.otg->state);
+ return 0;
+}
+
+#define LINK_RESET_TIMEOUT_USEC (250 * 1000)
+static int msm_otg_link_reset(struct msm_otg *motg)
+{
+ int cnt = 0;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ writel_relaxed(USBCMD_RESET, USB_USBCMD);
+ while (cnt < LINK_RESET_TIMEOUT_USEC) {
+ if (!(readl_relaxed(USB_USBCMD) & USBCMD_RESET))
+ break;
+ udelay(1);
+ cnt++;
+ }
+ if (cnt >= LINK_RESET_TIMEOUT_USEC)
+ return -ETIMEDOUT;
+
+ /* select ULPI phy */
+ writel_relaxed(0x80000000, USB_PORTSC);
+ writel_relaxed(0x0, USB_AHBBURST);
+ writel_relaxed(0x08, USB_AHBMODE);
+
+ if (pdata && pdata->enable_sec_phy)
+ writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
+ USB_PHY_CTRL2);
+ return 0;
+}
+
+#define QUSB2PHY_PORT_POWERDOWN 0xB4
+#define QUSB2PHY_PORT_UTMI_CTRL2 0xC4
+
+static void msm_usb_phy_reset(struct msm_otg *motg)
+{
+ u32 val;
+ int ret, *seq;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ /* Assert USB PHY_PON */
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ val &= ~PHY_POR_BIT_MASK;
+ val |= PHY_POR_ASSERT;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+
+ /* wait for minimum 10 microseconds as
+ * suggested in HPG.
+ */
+ usleep_range(10, 15);
+
+ /* Deassert USB PHY_PON */
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ val &= ~PHY_POR_BIT_MASK;
+ val |= PHY_POR_DEASSERT;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+ break;
+ case QUSB_ULPI_PHY:
+ ret = reset_control_assert(motg->phy_reset);
+ if (ret) {
+ pr_err("phy_reset_clk assert failed %d\n", ret);
+ break;
+ }
+
+ /* need to delay 10us for PHY to reset */
+ usleep_range(10, 20);
+
+ ret = reset_control_deassert(motg->phy_reset);
+ if (ret) {
+ pr_err("phy_reset_clk de-assert failed %d\n", ret);
+ break;
+ }
+
+ /* Ensure that RESET operation is completed. */
+ mb();
+
+ writel_relaxed(0x23,
+ motg->phy_csr_regs + QUSB2PHY_PORT_POWERDOWN);
+ writel_relaxed(0x0,
+ motg->phy_csr_regs + QUSB2PHY_PORT_UTMI_CTRL2);
+
+ /* Program tuning parameters for PHY */
+ seq = motg->pdata->phy_init_seq;
+ if (seq) {
+ while (seq[0] >= 0) {
+ writel_relaxed(seq[1],
+ motg->phy_csr_regs + seq[0]);
+ seq += 2;
+ }
+ }
+
+ /* ensure above writes are completed before re-enabling PHY */
+ wmb();
+ writel_relaxed(0x22,
+ motg->phy_csr_regs + QUSB2PHY_PORT_POWERDOWN);
+ break;
+ case SNPS_FEMTO_PHY:
+ if (!motg->phy_por_clk) {
+ pr_err("phy_por_clk missing\n");
+ break;
+ }
+ ret = reset_control_assert(motg->phy_por_reset);
+ if (ret) {
+ pr_err("phy_por_clk assert failed %d\n", ret);
+ break;
+ }
+ /*
+ * The Femto PHY is POR reset in the following scenarios.
+ *
+ * 1. After overriding the parameter registers.
+ * 2. Low power mode exit from PHY retention.
+ *
+ * Ensure that SIDDQ is cleared before bringing the PHY
+ * out of reset.
+ *
+ */
+
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL_COMMON0);
+ val &= ~SIDDQ;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL_COMMON0);
+
+ /*
+ * As per databook, 10 usec delay is required between
+ * PHY POR assert and de-assert.
+ */
+ usleep_range(10, 20);
+ ret = reset_control_deassert(motg->phy_por_reset);
+ if (ret) {
+ pr_err("phy_por_clk de-assert failed %d\n", ret);
+ break;
+ }
+ /*
+ * As per databook, it takes 75 usec for PHY to stabilize
+ * after the reset.
+ */
+ usleep_range(80, 100);
+ break;
+ default:
+ break;
+ }
+ /* Ensure that RESET operation is completed. */
+ mb();
+}
+
+static int msm_otg_reset(struct usb_phy *phy)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ int ret;
+ u32 val = 0;
+ u32 ulpi_val = 0;
+
+ msm_otg_dbg_log_event(&motg->phy, "USB RESET", phy->otg->state,
+ get_pm_runtime_counter(phy->dev));
+ /*
+ * USB PHY and Link reset also reset the USB BAM.
+ * Thus perform reset operation only once to avoid
+ * USB BAM reset on other cases e.g. USB cable disconnections.
+ * If hardware reported error then it must be reset for recovery.
+ */
+ if (motg->err_event_seen)
+ dev_info(phy->dev, "performing USB h/w reset for recovery\n");
+ else if (pdata->disable_reset_on_disconnect && motg->reset_counter)
+ return 0;
+
+ motg->reset_counter++;
+
+ disable_irq(motg->irq);
+ if (motg->phy_irq)
+ disable_irq(motg->phy_irq);
+
+ ret = msm_otg_phy_reset(motg);
+ if (ret) {
+ dev_err(phy->dev, "phy_reset failed\n");
+ if (motg->phy_irq)
+ enable_irq(motg->phy_irq);
+
+ enable_irq(motg->irq);
+ return ret;
+ }
+
+ if (motg->phy_irq)
+ enable_irq(motg->phy_irq);
+
+ enable_irq(motg->irq);
+ ret = msm_otg_link_reset(motg);
+ if (ret) {
+ dev_err(phy->dev, "link reset failed\n");
+ return ret;
+ }
+
+ msleep(100);
+
+ /* Reset USB PHY after performing USB Link RESET */
+ msm_usb_phy_reset(motg);
+
+ /* Program USB PHY Override registers. */
+ ulpi_init(motg);
+
+ /*
+ * It is required to reset USB PHY after programming
+ * the USB PHY Override registers to get the new
+ * values into effect.
+ */
+ msm_usb_phy_reset(motg);
+
+ if (pdata->otg_control == OTG_PHY_CONTROL) {
+ val = readl_relaxed(USB_OTGSC);
+ if (pdata->mode == USB_OTG) {
+ ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
+ val |= OTGSC_IDIE | OTGSC_BSVIE;
+ } else if (pdata->mode == USB_PERIPHERAL) {
+ ulpi_val = ULPI_INT_SESS_VALID;
+ val |= OTGSC_BSVIE;
+ }
+ writel_relaxed(val, USB_OTGSC);
+ ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_RISE);
+ ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_FALL);
+ } else if (pdata->otg_control == OTG_PMIC_CONTROL) {
+ ulpi_write(phy, OTG_COMP_DISABLE,
+ ULPI_SET(ULPI_PWR_CLK_MNG_REG));
+ if (motg->phy_irq)
+ writeb_relaxed(USB_PHY_ID_MASK,
+ USB2_PHY_USB_PHY_INTERRUPT_MASK1);
+ }
+
+ if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)
+ writel_relaxed(readl_relaxed(USB_OTGSC) & ~(OTGSC_IDPU),
+ USB_OTGSC);
+
+ msm_otg_dbg_log_event(&motg->phy, "USB RESET DONE", phy->otg->state,
+ get_pm_runtime_counter(phy->dev));
+
+ if (pdata->enable_axi_prefetch)
+ writel_relaxed(readl_relaxed(USB_HS_APF_CTRL) | (APF_CTRL_EN),
+ USB_HS_APF_CTRL);
+
+ /*
+ * Disable USB BAM as block reset resets USB BAM registers.
+ */
+ msm_usb_bam_enable(CI_CTRL, false);
+
+ return 0;
+}
+
+static void msm_otg_kick_sm_work(struct msm_otg *motg)
+{
+ if (atomic_read(&motg->in_lpm))
+ motg->resume_pending = true;
+
+ /* For device mode, resume now. Let pm_resume handle other cases */
+ if (atomic_read(&motg->pm_suspended) &&
+ motg->phy.otg->state != OTG_STATE_B_SUSPEND) {
+ motg->sm_work_pending = true;
+ } else if (!motg->sm_work_pending) {
+ /* process event only if previous one is not pending */
+ queue_work(motg->otg_wq, &motg->sm_work);
+ }
+}
+
+/*
+ * UDC calls usb_phy_set_suspend() to notify during bus suspend/resume.
+ * Update relevant state-machine inputs and queue sm_work.
+ * LPM enter/exit doesn't happen directly from this routine.
+ */
+
+static int msm_otg_set_suspend(struct usb_phy *phy, int suspend)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+
+ pr_debug("%s(%d) in %s state\n", __func__, suspend,
+ usb_otg_state_string(phy->otg->state));
+ msm_otg_dbg_log_event(phy, "SET SUSPEND", suspend, phy->otg->state);
+
+ if (!(motg->caps & ALLOW_LPM_ON_DEV_SUSPEND))
+ return 0;
+
+ if (suspend) {
+ /* called in suspend interrupt context */
+ pr_debug("peripheral bus suspend\n");
+ msm_otg_dbg_log_event(phy, "PERIPHERAL BUS SUSPEND",
+ motg->inputs, phy->otg->state);
+
+ set_bit(A_BUS_SUSPEND, &motg->inputs);
+ } else {
+ /* host resume or remote-wakeup */
+ pr_debug("peripheral bus resume\n");
+ msm_otg_dbg_log_event(phy, "PERIPHERAL BUS RESUME",
+ motg->inputs, phy->otg->state);
+
+ clear_bit(A_BUS_SUSPEND, &motg->inputs);
+ }
+ /* use kick_sm_work to handle race with pm_resume */
+ msm_otg_kick_sm_work(motg);
+
+ return 0;
+}
+
+static int msm_otg_bus_freq_set(struct msm_otg *motg, enum usb_noc_mode mode)
+{
+ int i, ret;
+ long rate;
+
+ for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
+ rate = bus_freqs[mode][i];
+ if (!rate) {
+ pr_debug("%s rate not available\n", bus_clkname[i]);
+ continue;
+ }
+
+ ret = clk_set_rate(motg->bus_clks[i], rate);
+ if (ret) {
+ pr_err("%s set rate failed: %d\n", bus_clkname[i], ret);
+ return ret;
+ }
+ pr_debug("%s set to %lu Hz\n", bus_clkname[i],
+ clk_get_rate(motg->bus_clks[i]));
+ msm_otg_dbg_log_event(&motg->phy, "OTG BUS FREQ SET", i, rate);
+ }
+
+ bus_clk_rate_set = true;
+
+ return 0;
+}
+
+static int msm_otg_bus_freq_get(struct msm_otg *motg)
+{
+ struct device *dev = motg->phy.dev;
+ struct device_node *np = dev->of_node;
+ int len = 0, i, count = USB_NUM_BUS_CLOCKS;
+
+ if (!np)
+ return -EINVAL;
+
+ /* SVS requires extra set of frequencies for perf_mode sysfs node */
+ if (motg->default_noc_mode == USB_NOC_SVS_VOTE)
+ count *= 2;
+
+ len = of_property_count_elems_of_size(np, "qcom,bus-clk-rate",
+ sizeof(len));
+ if (!len || (len != count)) {
+ pr_err("Invalid bus rate:%d %u\n", len, motg->default_noc_mode);
+ return -EINVAL;
+ }
+ of_property_read_u32_array(np, "qcom,bus-clk-rate", bus_freqs[0],
+ count);
+ for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
+ if (bus_freqs[0][i] == 0) {
+ motg->bus_clks[i] = NULL;
+ pr_debug("%s not available\n", bus_clkname[i]);
+ continue;
+ }
+
+ motg->bus_clks[i] = devm_clk_get(dev, bus_clkname[i]);
+ if (IS_ERR(motg->bus_clks[i])) {
+ pr_err("%s get failed\n", bus_clkname[i]);
+ return PTR_ERR(motg->bus_clks[i]);
+ }
+ }
+ return 0;
+}
+
+static void msm_otg_bus_clks_enable(struct msm_otg *motg)
+{
+ int i;
+ int ret;
+
+ if (!bus_clk_rate_set || motg->bus_clks_enabled)
+ return;
+
+ for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
+ if (motg->bus_clks[i] == NULL)
+ continue;
+ ret = clk_prepare_enable(motg->bus_clks[i]);
+ if (ret) {
+ pr_err("%s enable rate failed: %d\n", bus_clkname[i],
+ ret);
+ goto err_clk_en;
+ }
+ }
+ motg->bus_clks_enabled = true;
+ return;
+err_clk_en:
+ for (--i; i >= 0; --i) {
+ if (motg->bus_clks[i] != NULL)
+ clk_disable_unprepare(motg->bus_clks[i]);
+ }
+}
+
+static void msm_otg_bus_clks_disable(struct msm_otg *motg)
+{
+ int i;
+
+ if (!bus_clk_rate_set || !motg->bus_clks_enabled)
+ return;
+
+ for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
+ if (motg->bus_clks[i] != NULL)
+ clk_disable_unprepare(motg->bus_clks[i]);
+ }
+ motg->bus_clks_enabled = false;
+}
+
+static void msm_otg_bus_vote(struct msm_otg *motg, enum usb_bus_vote vote)
+{
+ int ret;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ msm_otg_dbg_log_event(&motg->phy, "BUS VOTE", vote,
+ motg->phy.otg->state);
+ /* Check if target allows min_vote to be same as no_vote */
+ if (pdata->bus_scale_table &&
+ vote >= pdata->bus_scale_table->num_usecases)
+ vote = USB_NO_PERF_VOTE;
+
+ if (motg->bus_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ motg->bus_perf_client, vote);
+ if (ret)
+ dev_err(motg->phy.dev, "%s: Failed to vote (%d)\n"
+ "for bus bw %d\n", __func__, vote, ret);
+ }
+
+ if (vote == USB_MAX_PERF_VOTE)
+ msm_otg_bus_clks_enable(motg);
+ else
+ msm_otg_bus_clks_disable(motg);
+}
+
+static void msm_otg_enable_phy_hv_int(struct msm_otg *motg)
+{
+ bool bsv_id_hv_int = false;
+ bool dp_dm_hv_int = false;
+ u32 val;
+
+ if (motg->pdata->otg_control == OTG_PHY_CONTROL ||
+ motg->phy_irq)
+ bsv_id_hv_int = true;
+ if (motg->host_bus_suspend || motg->device_bus_suspend)
+ dp_dm_hv_int = true;
+
+ if (!bsv_id_hv_int && !dp_dm_hv_int)
+ return;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ if (bsv_id_hv_int)
+ val |= (PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN);
+ if (dp_dm_hv_int)
+ val |= PHY_CLAMP_DPDMSE_EN;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+ break;
+ case SNPS_FEMTO_PHY:
+ if (bsv_id_hv_int) {
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL1);
+ val |= ID_HV_CLAMP_EN_N;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL1);
+ }
+
+ if (dp_dm_hv_int) {
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL3);
+ val |= CLAMP_MPM_DPSE_DMSE_EN_N;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL3);
+ }
+ break;
+ default:
+ break;
+ }
+ pr_debug("%s: bsv_id_hv = %d dp_dm_hv_int = %d\n",
+ __func__, bsv_id_hv_int, dp_dm_hv_int);
+ msm_otg_dbg_log_event(&motg->phy, "PHY HV INTR ENABLED",
+ bsv_id_hv_int, dp_dm_hv_int);
+}
+
+static void msm_otg_disable_phy_hv_int(struct msm_otg *motg)
+{
+ bool bsv_id_hv_int = false;
+ bool dp_dm_hv_int = false;
+ u32 val;
+
+ if (motg->pdata->otg_control == OTG_PHY_CONTROL ||
+ motg->phy_irq)
+ bsv_id_hv_int = true;
+ if (motg->host_bus_suspend || motg->device_bus_suspend)
+ dp_dm_hv_int = true;
+
+ if (!bsv_id_hv_int && !dp_dm_hv_int)
+ return;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ if (bsv_id_hv_int)
+ val &= ~(PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN);
+ if (dp_dm_hv_int)
+ val &= ~PHY_CLAMP_DPDMSE_EN;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+ break;
+ case SNPS_FEMTO_PHY:
+ if (bsv_id_hv_int) {
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL1);
+ val &= ~ID_HV_CLAMP_EN_N;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL1);
+ }
+
+ if (dp_dm_hv_int) {
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL3);
+ val &= ~CLAMP_MPM_DPSE_DMSE_EN_N;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL3);
+ }
+ break;
+ default:
+ break;
+ }
+ pr_debug("%s: bsv_id_hv = %d dp_dm_hv_int = %d\n",
+ __func__, bsv_id_hv_int, dp_dm_hv_int);
+ msm_otg_dbg_log_event(&motg->phy, "PHY HV INTR DISABLED",
+ bsv_id_hv_int, dp_dm_hv_int);
+}
+
+static void msm_otg_enter_phy_retention(struct msm_otg *motg)
+{
+ u32 val;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ val &= ~PHY_RETEN;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+ break;
+ case SNPS_FEMTO_PHY:
+ /* Retention is supported via SIDDQ */
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL_COMMON0);
+ val |= SIDDQ;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL_COMMON0);
+ break;
+ default:
+ break;
+ }
+ pr_debug("USB PHY is in retention\n");
+ msm_otg_dbg_log_event(&motg->phy, "USB PHY ENTER RETENTION",
+ motg->pdata->phy_type, 0);
+}
+
+static void msm_otg_exit_phy_retention(struct msm_otg *motg)
+{
+ int val;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ val |= PHY_RETEN;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+ break;
+ case SNPS_FEMTO_PHY:
+ /*
+ * It is required to do USB block reset to bring Femto PHY out
+ * of retention.
+ */
+ msm_otg_reset(&motg->phy);
+ break;
+ default:
+ break;
+ }
+ pr_debug("USB PHY is exited from retention\n");
+ msm_otg_dbg_log_event(&motg->phy, "USB PHY EXIT RETENTION",
+ motg->pdata->phy_type, 0);
+}
+
+static void msm_id_status_w(struct work_struct *w);
+static irqreturn_t msm_otg_phy_irq_handler(int irq, void *data)
+{
+ struct msm_otg *motg = data;
+
+ msm_otg_dbg_log_event(&motg->phy, "PHY ID IRQ",
+ atomic_read(&motg->in_lpm), motg->phy.otg->state);
+ if (atomic_read(&motg->in_lpm)) {
+ pr_debug("PHY ID IRQ in LPM\n");
+ motg->phy_irq_pending = true;
+ msm_otg_kick_sm_work(motg);
+ } else {
+ pr_debug("PHY ID IRQ outside LPM\n");
+ msm_id_status_w(&motg->id_status_work.work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define PHY_SUSPEND_TIMEOUT_USEC (5 * 1000)
+#define PHY_DEVICE_BUS_SUSPEND_TIMEOUT_USEC 100
+#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
+
+#define PHY_SUSPEND_RETRIES_MAX 3
+
+static void msm_otg_set_vbus_state(int online);
+static void msm_otg_perf_vote_update(struct msm_otg *motg, bool perf_mode);
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_otg_suspend(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ struct usb_bus *bus = phy->otg->host;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ int cnt;
+ bool host_bus_suspend, device_bus_suspend, dcp, prop_charger;
+ bool floated_charger, sm_work_busy;
+ u32 cmd_val;
+ u32 portsc, config2;
+ u32 func_ctrl;
+ int phcd_retry_cnt = 0, ret;
+ unsigned int phy_suspend_timeout;
+
+ cnt = 0;
+ msm_otg_dbg_log_event(phy, "LPM ENTER START",
+ motg->inputs, phy->otg->state);
+
+ if (atomic_read(&motg->in_lpm))
+ return 0;
+
+ cancel_delayed_work_sync(&motg->perf_vote_work);
+
+ disable_irq(motg->irq);
+ if (motg->phy_irq)
+ disable_irq(motg->phy_irq);
+lpm_start:
+ host_bus_suspend = phy->otg->host && !test_bit(ID, &motg->inputs);
+ device_bus_suspend = phy->otg->gadget && test_bit(ID, &motg->inputs) &&
+ test_bit(A_BUS_SUSPEND, &motg->inputs) &&
+ motg->caps & ALLOW_LPM_ON_DEV_SUSPEND;
+
+ if (host_bus_suspend)
+ msm_otg_perf_vote_update(motg, false);
+ /*
+ * Allow putting PHY into SIDDQ with wall charger connected in
+ * case of external charger detection.
+ */
+ dcp = (motg->chg_type == USB_DCP_CHARGER) && !motg->is_ext_chg_dcp;
+ prop_charger = motg->chg_type == USB_NONCOMPLIANT_CHARGER;
+ floated_charger = motg->chg_type == USB_FLOATED_CHARGER;
+
+ /* !BSV, but its handling is in progress by otg sm_work */
+ sm_work_busy = !test_bit(B_SESS_VLD, &motg->inputs) &&
+ phy->otg->state == OTG_STATE_B_PERIPHERAL;
+
+ /* Perform block reset to recover from UDC error events on disconnect */
+ if (motg->err_event_seen)
+ msm_otg_reset(phy);
+
+ /* Enable line state difference wakeup fix for only device and host
+ * bus suspend scenarios. Otherwise PHY can not be suspended when
+ * a charger that pulls DP/DM high is connected.
+ */
+ config2 = readl_relaxed(USB_GENCONFIG_2);
+ if (device_bus_suspend)
+ config2 |= GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN;
+ else
+ config2 &= ~GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN;
+ writel_relaxed(config2, USB_GENCONFIG_2);
+
+ /*
+ * Abort suspend when,
+ * 1. charging detection in progress due to cable plug-in
+ * 2. host mode activation in progress due to Micro-A cable insertion
+ * 3. !BSV, but its handling is in progress by otg sm_work
+ * Don't abort suspend in case of dcp detected by PMIC
+ */
+
+ if ((test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend &&
+ !dcp && !motg->is_ext_chg_dcp && !prop_charger &&
+ !floated_charger) || sm_work_busy) {
+ msm_otg_dbg_log_event(phy, "LPM ENTER ABORTED",
+ motg->inputs, motg->chg_type);
+ enable_irq(motg->irq);
+ if (motg->phy_irq)
+ enable_irq(motg->phy_irq);
+ return -EBUSY;
+ }
+
+ if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED) {
+ /* put the controller in non-driving mode */
+ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+ func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+ ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+ ulpi_write(phy, ULPI_IFC_CTRL_AUTORESUME,
+ ULPI_CLR(ULPI_IFC_CTRL));
+ }
+
+ /*
+ * PHY suspend sequence as mentioned in the databook.
+ *
+ * Device bus suspend: The controller may abort PHY suspend if
+ * there is an incoming reset or resume from the host. If PHCD
+ * is not set within 100 usec. Abort the LPM sequence.
+ *
+ * Host bus suspend: If the peripheral is attached, PHY is already
+ * put into suspend along with the peripheral bus suspend. poll for
+ * PHCD upto 5 msec. If the peripheral is not attached i.e entering
+ * LPM with Micro-A cable, set the PHCD and poll for it for 5 msec.
+ *
+ * No cable connected: Set the PHCD to suspend the PHY. Poll for PHCD
+ * upto 5 msec.
+ *
+ * The controller aborts PHY suspend only in device bus suspend case.
+ * In other cases, it is observed that PHCD may not get set within
+ * the timeout. If so, set the PHCD again and poll for it before
+ * reset recovery.
+ */
+
+phcd_retry:
+ if (device_bus_suspend)
+ phy_suspend_timeout = PHY_DEVICE_BUS_SUSPEND_TIMEOUT_USEC;
+ else
+ phy_suspend_timeout = PHY_SUSPEND_TIMEOUT_USEC;
+
+ cnt = 0;
+ portsc = readl_relaxed(USB_PORTSC);
+ if (!(portsc & PORTSC_PHCD)) {
+ writel_relaxed(portsc | PORTSC_PHCD,
+ USB_PORTSC);
+ while (cnt < phy_suspend_timeout) {
+ if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD)
+ break;
+ udelay(1);
+ cnt++;
+ }
+ }
+
+ if (cnt >= phy_suspend_timeout) {
+ if (phcd_retry_cnt > PHY_SUSPEND_RETRIES_MAX) {
+ msm_otg_dbg_log_event(phy, "PHY SUSPEND FAILED",
+ phcd_retry_cnt, phy->otg->state);
+ dev_err(phy->dev, "PHY suspend failed\n");
+ ret = -EBUSY;
+ goto phy_suspend_fail;
+ }
+
+ if (device_bus_suspend) {
+ dev_dbg(phy->dev, "PHY suspend aborted\n");
+ ret = -EBUSY;
+ goto phy_suspend_fail;
+ } else {
+ if (phcd_retry_cnt++ < PHY_SUSPEND_RETRIES_MAX) {
+ dev_dbg(phy->dev, "PHY suspend retry\n");
+ goto phcd_retry;
+ } else {
+ dev_err(phy->dev, "reset attempt during PHY suspend\n");
+ phcd_retry_cnt++;
+ motg->reset_counter = 0;
+ msm_otg_reset(phy);
+ goto lpm_start;
+ }
+ }
+ }
+
+ /*
+ * PHY has capability to generate interrupt asynchronously in low
+ * power mode (LPM). This interrupt is level triggered. So USB IRQ
+ * line must be disabled till async interrupt enable bit is cleared
+ * in USBCMD register. Assert STP (ULPI interface STOP signal) to
+ * block data communication from PHY.
+ *
+ * PHY retention mode is disallowed while entering to LPM with wall
+ * charger connected. But PHY is put into suspend mode. Hence
+ * enable asynchronous interrupt to detect charger disconnection when
+ * PMIC notifications are unavailable.
+ */
+ cmd_val = readl_relaxed(USB_USBCMD);
+ if (host_bus_suspend || device_bus_suspend ||
+ (motg->pdata->otg_control == OTG_PHY_CONTROL))
+ cmd_val |= ASYNC_INTR_CTRL | ULPI_STP_CTRL;
+ else
+ cmd_val |= ULPI_STP_CTRL;
+ writel_relaxed(cmd_val, USB_USBCMD);
+
+ /*
+ * BC1.2 spec mandates PD to enable VDP_SRC when charging from DCP.
+ * PHY retention and collapse can not happen with VDP_SRC enabled.
+ */
+
+
+ /*
+ * We come here in 3 scenarios.
+ *
+ * (1) No cable connected (out of session):
+ * - BSV/ID HV interrupts are enabled for PHY based detection.
+ * - PHY is put in retention.
+ * - If allowed (PMIC based detection), PHY is power collapsed.
+ * - DVDD (CX/MX) minimization and XO shutdown are allowed.
+ * - The wakeup is through VBUS/ID interrupt from PHY/PMIC/user.
+ * (2) USB wall charger:
+ * - BSV/ID HV interrupts are enabled for PHY based detection.
+ * - For BC1.2 compliant charger, retention is not allowed to
+ * keep VDP_SRC on. XO shutdown is allowed.
+ * - The wakeup is through VBUS/ID interrupt from PHY/PMIC/user.
+ * (3) Device/Host Bus suspend (if LPM is enabled):
+ * - BSV/ID HV interrupts are enabled for PHY based detection.
+ * - D+/D- MPM pin are configured to wakeup from line state
+ * change through PHY HV interrupts. PHY HV interrupts are
+ * also enabled. If MPM pins are not available, retention and
+ * XO is not allowed.
+ * - PHY is put into retention only if a gpio is used to keep
+ * the D+ pull-up. ALLOW_BUS_SUSPEND_WITHOUT_REWORK capability
+ * is set means, PHY can enable D+ pull-up or D+/D- pull-down
+ * without any re-work and PHY should not be put into retention.
+ * - DVDD (CX/MX) minimization and XO shutdown is allowed if
+ * ALLOW_BUS_SUSPEND_WITHOUT_REWORK is set (PHY DVDD is supplied
+ * via PMIC LDO) or board level re-work is present.
+ * - The wakeup is through VBUS/ID interrupt from PHY/PMIC/user
+ * or USB link asynchronous interrupt for line state change.
+ *
+ */
+ motg->host_bus_suspend = host_bus_suspend;
+ motg->device_bus_suspend = device_bus_suspend;
+
+ if (motg->caps & ALLOW_PHY_RETENTION && !device_bus_suspend && !dcp &&
+ (!host_bus_suspend || (motg->caps &
+ ALLOW_BUS_SUSPEND_WITHOUT_REWORK) ||
+ ((motg->caps & ALLOW_HOST_PHY_RETENTION)
+ && (pdata->dpdm_pulldown_added || !(portsc & PORTSC_CCS))))) {
+ msm_otg_enable_phy_hv_int(motg);
+ if ((!host_bus_suspend || !(motg->caps &
+ ALLOW_BUS_SUSPEND_WITHOUT_REWORK)) &&
+ !(motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) {
+ msm_otg_enter_phy_retention(motg);
+ motg->lpm_flags |= PHY_RETENTIONED;
+ }
+ } else if (device_bus_suspend && !dcp &&
+ (pdata->mpm_dpshv_int || pdata->mpm_dmshv_int)) {
+ /* DP DM HV interrupts are used for bus resume from XO off */
+ msm_otg_enable_phy_hv_int(motg);
+ if (motg->caps & ALLOW_PHY_RETENTION && pdata->vddmin_gpio) {
+
+ /*
+ * This is HW WA needed when PHY_CLAMP_DPDMSE_EN is
+ * enabled and we put the phy in retention mode.
+ * Without this WA, the async_irq will be fired right
+ * after suspending whithout any bus resume.
+ */
+ config2 = readl_relaxed(USB_GENCONFIG_2);
+ config2 &= ~GENCONFIG_2_DPSE_DMSE_HV_INTR_EN;
+ writel_relaxed(config2, USB_GENCONFIG_2);
+
+ msm_otg_enter_phy_retention(motg);
+ motg->lpm_flags |= PHY_RETENTIONED;
+ gpio_direction_output(pdata->vddmin_gpio, 1);
+ }
+ }
+
+ /* Ensure that above operation is completed before turning off clocks */
+ mb();
+ /* Consider clocks on workaround flag only in case of bus suspend */
+ if (!(phy->otg->state == OTG_STATE_B_PERIPHERAL &&
+ test_bit(A_BUS_SUSPEND, &motg->inputs)) ||
+ !motg->pdata->core_clk_always_on_workaround) {
+ clk_disable_unprepare(motg->pclk);
+ clk_disable_unprepare(motg->core_clk);
+ if (motg->phy_csr_clk)
+ clk_disable_unprepare(motg->phy_csr_clk);
+ motg->lpm_flags |= CLOCKS_DOWN;
+ }
+
+ /* usb phy no more require TCXO clock, hence vote for TCXO disable */
+ if (!host_bus_suspend || (motg->caps &
+ ALLOW_BUS_SUSPEND_WITHOUT_REWORK) ||
+ ((motg->caps & ALLOW_HOST_PHY_RETENTION) &&
+ (pdata->dpdm_pulldown_added || !(portsc & PORTSC_CCS)))) {
+ if (motg->xo_clk) {
+ clk_disable_unprepare(motg->xo_clk);
+ motg->lpm_flags |= XO_SHUTDOWN;
+ }
+ }
+
+ if (motg->caps & ALLOW_PHY_POWER_COLLAPSE &&
+ !host_bus_suspend && !dcp && !device_bus_suspend) {
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
+ motg->lpm_flags |= PHY_PWR_COLLAPSED;
+ } else if (motg->caps & ALLOW_PHY_REGULATORS_LPM &&
+ !host_bus_suspend && !device_bus_suspend && !dcp) {
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_LPM_ON);
+ motg->lpm_flags |= PHY_REGULATORS_LPM;
+ }
+
+ if (motg->lpm_flags & PHY_RETENTIONED ||
+ (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) {
+ regulator_disable(hsusb_vdd);
+ msm_hsusb_config_vddcx(0);
+ }
+
+ if (device_may_wakeup(phy->dev)) {
+ if (host_bus_suspend || device_bus_suspend) {
+ enable_irq_wake(motg->async_irq);
+ enable_irq_wake(motg->irq);
+ }
+
+ if (motg->phy_irq)
+ enable_irq_wake(motg->phy_irq);
+ if (motg->pdata->pmic_id_irq)
+ enable_irq_wake(motg->pdata->pmic_id_irq);
+ if (motg->ext_id_irq)
+ enable_irq_wake(motg->ext_id_irq);
+ if (pdata->otg_control == OTG_PHY_CONTROL &&
+ pdata->mpm_otgsessvld_int)
+ msm_mpm_set_pin_wake(pdata->mpm_otgsessvld_int, 1);
+ if ((host_bus_suspend || device_bus_suspend) &&
+ pdata->mpm_dpshv_int)
+ msm_mpm_set_pin_wake(pdata->mpm_dpshv_int, 1);
+ if ((host_bus_suspend || device_bus_suspend) &&
+ pdata->mpm_dmshv_int)
+ msm_mpm_set_pin_wake(pdata->mpm_dmshv_int, 1);
+ }
+ if (bus)
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
+
+ msm_otg_bus_vote(motg, USB_NO_PERF_VOTE);
+
+ atomic_set(&motg->in_lpm, 1);
+
+ /* Enable ASYNC IRQ during LPM */
+ enable_irq(motg->async_irq);
+ if (motg->phy_irq)
+ enable_irq(motg->phy_irq);
+
+ enable_irq(motg->irq);
+ pm_relax(&motg->pdev->dev);
+
+ dev_dbg(phy->dev, "LPM caps = %lu flags = %lu\n",
+ motg->caps, motg->lpm_flags);
+ dev_info(phy->dev, "USB in low power mode\n");
+ msm_otg_dbg_log_event(phy, "LPM ENTER DONE",
+ motg->caps, motg->lpm_flags);
+
+ if (motg->err_event_seen) {
+ motg->err_event_seen = false;
+ if (motg->vbus_state != test_bit(B_SESS_VLD, &motg->inputs))
+ msm_otg_set_vbus_state(motg->vbus_state);
+ if (motg->id_state != test_bit(ID, &motg->inputs))
+ msm_id_status_w(&motg->id_status_work.work);
+ }
+
+ return 0;
+
+phy_suspend_fail:
+ enable_irq(motg->irq);
+ if (motg->phy_irq)
+ enable_irq(motg->phy_irq);
+ return ret;
+}
+
+static int msm_otg_resume(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ struct usb_bus *bus = phy->otg->host;
+ struct usb_hcd *hcd = bus_to_hcd(phy->otg->host);
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ int cnt = 0;
+ unsigned int temp;
+ unsigned int ret;
+ u32 func_ctrl;
+
+ msm_otg_dbg_log_event(phy, "LPM EXIT START", motg->inputs,
+ phy->otg->state);
+ if (!atomic_read(&motg->in_lpm)) {
+ msm_otg_dbg_log_event(phy, "USB NOT IN LPM",
+ atomic_read(&motg->in_lpm), phy->otg->state);
+ return 0;
+ }
+
+ disable_irq(motg->irq);
+ pm_stay_awake(&motg->pdev->dev);
+
+ /*
+ * If we are resuming from the device bus suspend, restore
+ * the max performance bus vote. Otherwise put a minimum
+ * bus vote to satisfy the requirement for enabling clocks.
+ */
+
+ if (motg->device_bus_suspend && debug_bus_voting_enabled)
+ msm_otg_bus_vote(motg, USB_MAX_PERF_VOTE);
+ else
+ msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
+
+ /* Vote for TCXO when waking up the phy */
+ if (motg->lpm_flags & XO_SHUTDOWN) {
+ if (motg->xo_clk)
+ clk_prepare_enable(motg->xo_clk);
+ motg->lpm_flags &= ~XO_SHUTDOWN;
+ }
+
+ if (motg->lpm_flags & CLOCKS_DOWN) {
+ if (motg->phy_csr_clk) {
+ ret = clk_prepare_enable(motg->phy_csr_clk);
+ WARN(ret, "USB phy_csr_clk enable failed\n");
+ }
+ ret = clk_prepare_enable(motg->core_clk);
+ WARN(ret, "USB core_clk enable failed\n");
+ ret = clk_prepare_enable(motg->pclk);
+ WARN(ret, "USB pclk enable failed\n");
+ motg->lpm_flags &= ~CLOCKS_DOWN;
+ }
+
+ if (motg->lpm_flags & PHY_PWR_COLLAPSED) {
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_ON);
+ motg->lpm_flags &= ~PHY_PWR_COLLAPSED;
+ } else if (motg->lpm_flags & PHY_REGULATORS_LPM) {
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_LPM_OFF);
+ motg->lpm_flags &= ~PHY_REGULATORS_LPM;
+ }
+
+ if (motg->lpm_flags & PHY_RETENTIONED ||
+ (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) {
+ msm_hsusb_config_vddcx(1);
+ ret = regulator_enable(hsusb_vdd);
+ WARN(ret, "hsusb_vdd LDO enable failed\n");
+ msm_otg_disable_phy_hv_int(motg);
+ msm_otg_exit_phy_retention(motg);
+ motg->lpm_flags &= ~PHY_RETENTIONED;
+ if (pdata->vddmin_gpio && motg->device_bus_suspend)
+ gpio_direction_input(pdata->vddmin_gpio);
+ } else if (motg->device_bus_suspend) {
+ msm_otg_disable_phy_hv_int(motg);
+ }
+
+ temp = readl_relaxed(USB_USBCMD);
+ temp &= ~ASYNC_INTR_CTRL;
+ temp &= ~ULPI_STP_CTRL;
+ writel_relaxed(temp, USB_USBCMD);
+
+ /*
+ * PHY comes out of low power mode (LPM) in case of wakeup
+ * from asynchronous interrupt.
+ */
+ if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD))
+ goto skip_phy_resume;
+
+ writel_relaxed(readl_relaxed(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
+
+ while (cnt < PHY_RESUME_TIMEOUT_USEC) {
+ if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD))
+ break;
+ udelay(1);
+ cnt++;
+ }
+
+ if (cnt >= PHY_RESUME_TIMEOUT_USEC) {
+ /*
+ * This is a fatal error. Reset the link and
+ * PHY. USB state can not be restored. Re-insertion
+ * of USB cable is the only way to get USB working.
+ */
+ dev_err(phy->dev, "Unable to resume USB. Re-plugin the cable\n"
+ );
+ msm_otg_reset(phy);
+ }
+
+skip_phy_resume:
+ if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED) {
+ /* put the controller in normal mode */
+ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+ func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+ ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+ }
+
+ if (device_may_wakeup(phy->dev)) {
+ if (motg->host_bus_suspend || motg->device_bus_suspend) {
+ disable_irq_wake(motg->async_irq);
+ disable_irq_wake(motg->irq);
+ }
+
+ if (motg->phy_irq)
+ disable_irq_wake(motg->phy_irq);
+ if (motg->pdata->pmic_id_irq)
+ disable_irq_wake(motg->pdata->pmic_id_irq);
+ if (motg->ext_id_irq)
+ disable_irq_wake(motg->ext_id_irq);
+ if (pdata->otg_control == OTG_PHY_CONTROL &&
+ pdata->mpm_otgsessvld_int)
+ msm_mpm_set_pin_wake(pdata->mpm_otgsessvld_int, 0);
+ if ((motg->host_bus_suspend || motg->device_bus_suspend) &&
+ pdata->mpm_dpshv_int)
+ msm_mpm_set_pin_wake(pdata->mpm_dpshv_int, 0);
+ if ((motg->host_bus_suspend || motg->device_bus_suspend) &&
+ pdata->mpm_dmshv_int)
+ msm_mpm_set_pin_wake(pdata->mpm_dmshv_int, 0);
+ }
+ if (bus)
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
+
+ atomic_set(&motg->in_lpm, 0);
+
+ if (motg->async_int) {
+ /* Match the disable_irq call from ISR */
+ enable_irq(motg->async_int);
+ motg->async_int = 0;
+ }
+ enable_irq(motg->irq);
+
+ /* Enable ASYNC_IRQ only during LPM */
+ disable_irq(motg->async_irq);
+
+ if (motg->phy_irq_pending) {
+ motg->phy_irq_pending = false;
+ msm_id_status_w(&motg->id_status_work.work);
+ }
+
+ if (motg->host_bus_suspend) {
+ usb_hcd_resume_root_hub(hcd);
+ schedule_delayed_work(&motg->perf_vote_work,
+ msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+ }
+
+ dev_info(phy->dev, "USB exited from low power mode\n");
+ msm_otg_dbg_log_event(phy, "LPM EXIT DONE",
+ motg->caps, motg->lpm_flags);
+
+ return 0;
+}
+#endif
+
+static void msm_otg_notify_host_mode(struct msm_otg *motg, bool host_mode)
+{
+ if (!psy) {
+ pr_err("No USB power supply registered!\n");
+ return;
+ }
+
+ motg->host_mode = host_mode;
+ power_supply_changed(psy);
+}
+
+static int msm_otg_notify_chg_type(struct msm_otg *motg)
+{
+ static int charger_type;
+ union power_supply_propval pval = {0};
+
+ /*
+ * TODO
+ * Unify OTG driver charger types and power supply charger types
+ */
+ if (charger_type == motg->chg_type)
+ return 0;
+
+ if (motg->chg_type == USB_SDP_CHARGER)
+ charger_type = POWER_SUPPLY_TYPE_USB;
+ else if (motg->chg_type == USB_CDP_CHARGER)
+ charger_type = POWER_SUPPLY_TYPE_USB_CDP;
+ else if (motg->chg_type == USB_DCP_CHARGER ||
+ motg->chg_type == USB_NONCOMPLIANT_CHARGER ||
+ motg->chg_type == USB_FLOATED_CHARGER)
+ charger_type = POWER_SUPPLY_TYPE_USB_DCP;
+ else
+ charger_type = POWER_SUPPLY_TYPE_UNKNOWN;
+
+ if (!psy) {
+ pr_err("No USB power supply registered!\n");
+ return -EINVAL;
+ }
+
+ pr_debug("setting usb power supply type %d\n", charger_type);
+ msm_otg_dbg_log_event(&motg->phy, "SET USB PWR SUPPLY TYPE",
+ motg->chg_type, charger_type);
+ pval.intval = charger_type;
+ power_supply_set_property(psy, POWER_SUPPLY_PROP_TYPE, &pval);
+ return 0;
+}
+
+static int msm_otg_notify_power_supply(struct msm_otg *motg, unsigned int mA)
+{
+ union power_supply_propval pval = {0};
+ bool enable;
+ int limit;
+
+ if (!psy) {
+ dev_dbg(motg->phy.dev, "no usb power supply registered\n");
+ goto psy_error;
+ }
+
+ if (motg->cur_power == 0 && mA > 2) {
+ /* Enable charging */
+ enable = true;
+ limit = 1000 * mA;
+ } else if (motg->cur_power >= 0 && (mA == 0 || mA == 2)) {
+ /* Disable charging */
+ enable = false;
+ /* Set max current limit in uA */
+ limit = 1000 * mA;
+ } else {
+ enable = true;
+ /* Current has changed (100/2 --> 500) */
+ limit = 1000 * mA;
+ }
+
+ pval.intval = enable;
+ if (power_supply_set_property(psy, POWER_SUPPLY_PROP_ONLINE, &pval))
+ goto psy_error;
+
+ pval.intval = limit;
+ if (power_supply_set_property(psy, POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval))
+ goto psy_error;
+
+ power_supply_changed(psy);
+ return 0;
+
+psy_error:
+ dev_dbg(motg->phy.dev, "power supply error when setting property\n");
+ return -ENXIO;
+}
+
+static void msm_otg_set_online_status(struct msm_otg *motg)
+{
+ union power_supply_propval pval = {0};
+
+ if (!psy) {
+ dev_dbg(motg->phy.dev, "no usb power supply registered\n");
+ return;
+ }
+
+ /* Set power supply online status to false */
+ pval.intval = false;
+ if (power_supply_set_property(psy, POWER_SUPPLY_PROP_ONLINE, &pval))
+ dev_dbg(motg->phy.dev, "error setting power supply property\n");
+}
+
+static void msm_otg_notify_charger(struct msm_otg *motg, unsigned int mA)
+{
+ struct usb_gadget *g = motg->phy.otg->gadget;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ if (g && g->is_a_peripheral)
+ return;
+
+ dev_dbg(motg->phy.dev, "Requested curr from USB = %u, max-type-c:%u\n",
+ mA, motg->typec_current_max);
+ /* Save bc1.2 max_curr if type-c charger later moves to diff mode */
+ motg->bc1p2_current_max = mA;
+
+ /*
+ * Limit type-c charger current to 500 for SDP charger to avoid more
+ * current drawn than 500 with Hosts that don't support type C due to
+ * non compliant type-c to standard A cables.
+ */
+ if (pdata->enable_sdp_typec_current_limit &&
+ (motg->chg_type == USB_SDP_CHARGER) &&
+ motg->typec_current_max > 500)
+ motg->typec_current_max = 500;
+
+ /* Override mA if type-c charger used (use hvdcp/bc1.2 if it is 500) */
+ if (motg->typec_current_max > 500 && mA < motg->typec_current_max)
+ mA = motg->typec_current_max;
+
+ if (msm_otg_notify_chg_type(motg))
+ dev_err(motg->phy.dev,
+ "Failed notifying %d charger type to PMIC\n",
+ motg->chg_type);
+
+ /*
+ * This condition will be true when usb cable is disconnected
+ * during bootup before enumeration. Check charger type also
+ * to avoid clearing online flag in case of valid charger.
+ */
+ if (motg->online && motg->cur_power == 0 && mA == 0 &&
+ (motg->chg_type == USB_INVALID_CHARGER))
+ msm_otg_set_online_status(motg);
+
+ if (motg->cur_power == mA)
+ return;
+
+ dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA);
+ msm_otg_dbg_log_event(&motg->phy, "AVAIL CURR FROM USB",
+ mA, motg->chg_type);
+
+ msm_otg_notify_power_supply(motg, mA);
+
+ motg->cur_power = mA;
+}
+
+static int msm_otg_set_power(struct usb_phy *phy, unsigned int mA)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+
+ /*
+ * Gadget driver uses set_power method to notify about the
+ * available current based on suspend/configured states.
+ *
+ * IDEV_CHG can be drawn irrespective of suspend/un-configured
+ * states when CDP/ACA is connected.
+ */
+ if (motg->chg_type == USB_SDP_CHARGER)
+ msm_otg_notify_charger(motg, mA);
+
+ return 0;
+}
+
+static void msm_hsusb_vbus_power(struct msm_otg *motg, bool on);
+
+static void msm_otg_perf_vote_update(struct msm_otg *motg, bool perf_mode)
+{
+ static bool curr_perf_mode;
+ int ret, latency = motg->pm_qos_latency;
+ long clk_rate;
+
+ if (curr_perf_mode == perf_mode)
+ return;
+
+ if (perf_mode) {
+ if (latency)
+ pm_qos_update_request(&motg->pm_qos_req_dma, latency);
+ msm_otg_bus_vote(motg, USB_MAX_PERF_VOTE);
+ clk_rate = motg->core_clk_rate;
+ } else {
+ if (latency)
+ pm_qos_update_request(&motg->pm_qos_req_dma,
+ PM_QOS_DEFAULT_VALUE);
+ msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
+ clk_rate = motg->core_clk_svs_rate;
+ }
+
+ if (clk_rate) {
+ ret = clk_set_rate(motg->core_clk, clk_rate);
+ if (ret)
+ dev_err(motg->phy.dev, "sys_clk set_rate fail:%d %ld\n",
+ ret, clk_rate);
+ }
+ curr_perf_mode = perf_mode;
+ pr_debug("%s: latency updated to: %d, core_freq to: %ld\n", __func__,
+ latency, clk_rate);
+}
+
+static void msm_otg_perf_vote_work(struct work_struct *w)
+{
+ struct msm_otg *motg = container_of(w, struct msm_otg,
+ perf_vote_work.work);
+ unsigned int curr_sample_int_count;
+ bool in_perf_mode = false;
+
+ curr_sample_int_count = motg->usb_irq_count;
+ motg->usb_irq_count = 0;
+
+ if (curr_sample_int_count >= PM_QOS_THRESHOLD)
+ in_perf_mode = true;
+
+ msm_otg_perf_vote_update(motg, in_perf_mode);
+ pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%u\n",
+ __func__, in_perf_mode, curr_sample_int_count);
+
+ schedule_delayed_work(&motg->perf_vote_work,
+ msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+}
+
+static void msm_otg_start_host(struct usb_otg *otg, int on)
+{
+ struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ struct usb_hcd *hcd;
+ u32 val;
+
+ if (!otg->host)
+ return;
+
+ hcd = bus_to_hcd(otg->host);
+
+ msm_otg_dbg_log_event(&motg->phy, "PM RT: StartHost GET",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ if (on) {
+ dev_dbg(otg->usb_phy->dev, "host on\n");
+ msm_otg_dbg_log_event(&motg->phy, "HOST ON",
+ motg->inputs, otg->state);
+ msm_hsusb_vbus_power(motg, 1);
+ msm_otg_reset(&motg->phy);
+
+ if (pdata->otg_control == OTG_PHY_CONTROL)
+ ulpi_write(otg->usb_phy, OTG_COMP_DISABLE,
+ ULPI_SET(ULPI_PWR_CLK_MNG_REG));
+
+ if (pdata->enable_axi_prefetch) {
+ val = readl_relaxed(USB_HS_APF_CTRL);
+ val &= ~APF_CTRL_EN;
+ writel_relaxed(val, USB_HS_APF_CTRL);
+ }
+ usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+#ifdef CONFIG_SMP
+ motg->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+ motg->pm_qos_req_dma.irq = motg->irq;
+#endif
+ pm_qos_add_request(&motg->pm_qos_req_dma,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ /* start in perf mode for better performance initially */
+ msm_otg_perf_vote_update(motg, true);
+ schedule_delayed_work(&motg->perf_vote_work,
+ msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+ } else {
+ dev_dbg(otg->usb_phy->dev, "host off\n");
+ msm_otg_dbg_log_event(&motg->phy, "HOST OFF",
+ motg->inputs, otg->state);
+ msm_hsusb_vbus_power(motg, 0);
+
+ cancel_delayed_work_sync(&motg->perf_vote_work);
+ msm_otg_perf_vote_update(motg, false);
+ pm_qos_remove_request(&motg->pm_qos_req_dma);
+
+ pm_runtime_disable(&hcd->self.root_hub->dev);
+ pm_runtime_barrier(&hcd->self.root_hub->dev);
+ usb_remove_hcd(hcd);
+ msm_otg_reset(&motg->phy);
+
+ if (pdata->enable_axi_prefetch)
+ writel_relaxed(readl_relaxed(USB_HS_APF_CTRL)
+ | (APF_CTRL_EN), USB_HS_APF_CTRL);
+
+ /* HCD core reset all bits of PORTSC. select ULPI phy */
+ writel_relaxed(0x80000000, USB_PORTSC);
+
+ if (pdata->otg_control == OTG_PHY_CONTROL)
+ ulpi_write(otg->usb_phy, OTG_COMP_DISABLE,
+ ULPI_CLR(ULPI_PWR_CLK_MNG_REG));
+ }
+ msm_otg_dbg_log_event(&motg->phy, "PM RT: StartHost PUT",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+
+ pm_runtime_mark_last_busy(otg->usb_phy->dev);
+ pm_runtime_put_autosuspend(otg->usb_phy->dev);
+}
+
+static void msm_hsusb_vbus_power(struct msm_otg *motg, bool on)
+{
+ int ret;
+ static bool vbus_is_on;
+
+ msm_otg_dbg_log_event(&motg->phy, "VBUS POWER", on, vbus_is_on);
+ if (vbus_is_on == on)
+ return;
+
+ if (motg->pdata->vbus_power) {
+ ret = motg->pdata->vbus_power(on);
+ if (!ret)
+ vbus_is_on = on;
+ return;
+ }
+
+ if (!vbus_otg) {
+ pr_err("vbus_otg is NULL.");
+ return;
+ }
+
+ /*
+ * if entering host mode tell the charger to not draw any current
+ * from usb before turning on the boost.
+ * if exiting host mode disable the boost before enabling to draw
+ * current from the source.
+ */
+ if (on) {
+ msm_otg_notify_host_mode(motg, on);
+ ret = regulator_enable(vbus_otg);
+ if (ret) {
+ pr_err("unable to enable vbus_otg\n");
+ return;
+ }
+ vbus_is_on = true;
+ } else {
+ ret = regulator_disable(vbus_otg);
+ if (ret) {
+ pr_err("unable to disable vbus_otg\n");
+ return;
+ }
+ msm_otg_notify_host_mode(motg, on);
+ vbus_is_on = false;
+ }
+}
+
+static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
+{
+ struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
+ struct usb_hcd *hcd;
+
+ /*
+ * Fail host registration if this board can support
+ * only peripheral configuration.
+ */
+ if (motg->pdata->mode == USB_PERIPHERAL) {
+ dev_info(otg->usb_phy->dev, "Host mode is not supported\n");
+ return -ENODEV;
+ }
+
+ if (!motg->pdata->vbus_power && host) {
+ vbus_otg = devm_regulator_get(motg->phy.dev, "vbus_otg");
+ if (IS_ERR(vbus_otg)) {
+ msm_otg_dbg_log_event(&motg->phy,
+ "UNABLE TO GET VBUS_OTG",
+ otg->state, 0);
+ pr_err("Unable to get vbus_otg\n");
+ return PTR_ERR(vbus_otg);
+ }
+ }
+
+ if (!host) {
+ if (otg->state == OTG_STATE_A_HOST) {
+ msm_otg_start_host(otg, 0);
+ otg->host = NULL;
+ otg->state = OTG_STATE_UNDEFINED;
+ queue_work(motg->otg_wq, &motg->sm_work);
+ } else {
+ otg->host = NULL;
+ }
+
+ return 0;
+ }
+
+ hcd = bus_to_hcd(host);
+ hcd->power_budget = motg->pdata->power_budget;
+
+ otg->host = host;
+ dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
+ msm_otg_dbg_log_event(&motg->phy, "HOST DRIVER REGISTERED",
+ hcd->power_budget, motg->pdata->mode);
+
+ /*
+ * Kick the state machine work, if peripheral is not supported
+ * or peripheral is already registered with us.
+ */
+ if (motg->pdata->mode == USB_HOST || otg->gadget)
+ queue_work(motg->otg_wq, &motg->sm_work);
+
+ return 0;
+}
+
+static void msm_otg_start_peripheral(struct usb_otg *otg, int on)
+{
+ struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ struct pinctrl_state *set_state;
+ int ret;
+
+ if (!otg->gadget)
+ return;
+
+ msm_otg_dbg_log_event(&motg->phy, "PM RT: StartPeri GET",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ if (on) {
+ dev_dbg(otg->usb_phy->dev, "gadget on\n");
+ msm_otg_dbg_log_event(&motg->phy, "GADGET ON",
+ motg->inputs, otg->state);
+
+ /* Configure BUS performance parameters for MAX bandwidth */
+ if (debug_bus_voting_enabled)
+ msm_otg_bus_vote(motg, USB_MAX_PERF_VOTE);
+ /* bump up usb core_clk to default */
+ clk_set_rate(motg->core_clk, motg->core_clk_rate);
+
+ usb_gadget_vbus_connect(otg->gadget);
+
+ /*
+ * Request VDD min gpio, if need to support VDD
+ * minimazation during peripheral bus suspend.
+ */
+ if (pdata->vddmin_gpio) {
+ if (motg->phy_pinctrl) {
+ set_state =
+ pinctrl_lookup_state(motg->phy_pinctrl,
+ "hsusb_active");
+ if (IS_ERR(set_state)) {
+ pr_err("cannot get phy pinctrl active state\n");
+ } else {
+ pinctrl_select_state(motg->phy_pinctrl,
+ set_state);
+ }
+ }
+
+ ret = gpio_request(pdata->vddmin_gpio,
+ "MSM_OTG_VDD_MIN_GPIO");
+ if (ret < 0) {
+ dev_err(otg->usb_phy->dev, "gpio req failed for vdd min:%d\n",
+ ret);
+ pdata->vddmin_gpio = 0;
+ }
+ }
+ } else {
+ dev_dbg(otg->usb_phy->dev, "gadget off\n");
+ msm_otg_dbg_log_event(&motg->phy, "GADGET OFF",
+ motg->inputs, otg->state);
+ usb_gadget_vbus_disconnect(otg->gadget);
+ clear_bit(A_BUS_SUSPEND, &motg->inputs);
+ /* Configure BUS performance parameters to default */
+ msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
+
+ if (pdata->vddmin_gpio) {
+ gpio_free(pdata->vddmin_gpio);
+ if (motg->phy_pinctrl) {
+ set_state =
+ pinctrl_lookup_state(motg->phy_pinctrl,
+ "hsusb_sleep");
+ if (IS_ERR(set_state))
+ pr_err("cannot get phy pinctrl sleep state\n");
+ else
+ pinctrl_select_state(motg->phy_pinctrl,
+ set_state);
+ }
+ }
+ }
+ msm_otg_dbg_log_event(&motg->phy, "PM RT: StartPeri PUT",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+ pm_runtime_mark_last_busy(otg->usb_phy->dev);
+ pm_runtime_put_autosuspend(otg->usb_phy->dev);
+}
+
+static int msm_otg_set_peripheral(struct usb_otg *otg,
+ struct usb_gadget *gadget)
+{
+ struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
+
+ /*
+ * Fail peripheral registration if this board can support
+ * only host configuration.
+ */
+ if (motg->pdata->mode == USB_HOST) {
+ dev_info(otg->usb_phy->dev, "Peripheral mode is not supported\n");
+ return -ENODEV;
+ }
+
+ if (!gadget) {
+ if (otg->state == OTG_STATE_B_PERIPHERAL) {
+ msm_otg_dbg_log_event(&motg->phy,
+ "PM RUNTIME: PERIPHERAL GET1",
+ get_pm_runtime_counter(otg->usb_phy->dev), 0);
+ msm_otg_start_peripheral(otg, 0);
+ otg->gadget = NULL;
+ otg->state = OTG_STATE_UNDEFINED;
+ queue_work(motg->otg_wq, &motg->sm_work);
+ } else {
+ otg->gadget = NULL;
+ }
+
+ return 0;
+ }
+ otg->gadget = gadget;
+ dev_dbg(otg->usb_phy->dev, "peripheral driver registered w/ tranceiver\n");
+ msm_otg_dbg_log_event(&motg->phy, "PERIPHERAL DRIVER REGISTERED",
+ otg->state, motg->pdata->mode);
+
+ /*
+ * Kick the state machine work, if host is not supported
+ * or host is already registered with us.
+ */
+ if (motg->pdata->mode == USB_PERIPHERAL || otg->host)
+ queue_work(motg->otg_wq, &motg->sm_work);
+
+ return 0;
+}
+
+static bool msm_otg_read_pmic_id_state(struct msm_otg *motg)
+{
+ unsigned long flags;
+ bool id;
+ int ret;
+
+ if (!motg->pdata->pmic_id_irq)
+ return -ENODEV;
+
+ local_irq_save(flags);
+ ret = irq_get_irqchip_state(motg->pdata->pmic_id_irq,
+ IRQCHIP_STATE_LINE_LEVEL, &id);
+ local_irq_restore(flags);
+
+ /*
+ * If we can not read ID line state for some reason, treat
+ * it as float. This would prevent MHL discovery and kicking
+ * host mode unnecessarily.
+ */
+ if (ret < 0)
+ return true;
+
+ return !!id;
+}
+
+static bool msm_otg_read_phy_id_state(struct msm_otg *motg)
+{
+ u8 val;
+
+ /*
+ * clear the pending/outstanding interrupts and
+ * read the ID status from the SRC_STATUS register.
+ */
+ writeb_relaxed(USB_PHY_ID_MASK, USB2_PHY_USB_PHY_INTERRUPT_CLEAR1);
+
+ writeb_relaxed(0x1, USB2_PHY_USB_PHY_IRQ_CMD);
+ /*
+ * Databook says 200 usec delay is required for
+ * clearing the interrupts.
+ */
+ udelay(200);
+ writeb_relaxed(0x0, USB2_PHY_USB_PHY_IRQ_CMD);
+
+ val = readb_relaxed(USB2_PHY_USB_PHY_INTERRUPT_SRC_STATUS);
+ if (val & USB_PHY_IDDIG_1_0)
+ return false; /* ID is grounded */
+ else
+ return true;
+}
+
+static void msm_otg_chg_check_timer_func(unsigned long data)
+{
+ struct msm_otg *motg = (struct msm_otg *) data;
+ struct usb_otg *otg = motg->phy.otg;
+
+ if (atomic_read(&motg->in_lpm) ||
+ !test_bit(B_SESS_VLD, &motg->inputs) ||
+ otg->state != OTG_STATE_B_PERIPHERAL ||
+ otg->gadget->speed != USB_SPEED_UNKNOWN) {
+ dev_dbg(otg->usb_phy->dev, "Nothing to do in chg_check_timer\n");
+ return;
+ }
+
+ if ((readl_relaxed(USB_PORTSC) & PORTSC_LS) == PORTSC_LS) {
+ dev_dbg(otg->usb_phy->dev, "DCP is detected as SDP\n");
+ msm_otg_dbg_log_event(&motg->phy, "DCP IS DETECTED AS SDP",
+ otg->state, 0);
+ set_bit(B_FALSE_SDP, &motg->inputs);
+ queue_work(motg->otg_wq, &motg->sm_work);
+ }
+}
+
+static bool msm_chg_check_secondary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 chg_det;
+ bool ret = false;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
+ chg_det = ulpi_read(phy, 0x87);
+ ret = chg_det & 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static void msm_chg_enable_secondary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
+ /*
+ * Configure DM as current source, DP as current sink
+ * and enable battery charging comparators.
+ */
+ ulpi_write(phy, 0x8, 0x85);
+ ulpi_write(phy, 0x2, 0x85);
+ ulpi_write(phy, 0x1, 0x85);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool msm_chg_check_primary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 chg_det;
+ bool ret = false;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
+ chg_det = ulpi_read(phy, 0x87);
+ ret = chg_det & 1;
+ /* Turn off VDP_SRC */
+ ulpi_write(phy, 0x3, 0x86);
+ msleep(20);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static void msm_chg_enable_primary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
+ /*
+ * Configure DP as current source, DM as current sink
+ * and enable battery charging comparators.
+ */
+ ulpi_write(phy, 0x2, 0x85);
+ ulpi_write(phy, 0x1, 0x85);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool msm_chg_check_dcd(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 line_state;
+ bool ret = false;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
+ line_state = ulpi_read(phy, 0x87);
+ ret = line_state & 2;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static void msm_chg_disable_dcd(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ ulpi_write(phy, 0x10, 0x86);
+ break;
+ case SNPS_FEMTO_PHY:
+ ulpi_write(phy, 0x10, 0x86);
+ /*
+ * Disable the Rdm_down after
+ * the DCD is completed.
+ */
+ ulpi_write(phy, 0x04, 0x0C);
+ break;
+ default:
+ break;
+ }
+}
+
+static void msm_chg_enable_dcd(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ /* Data contact detection enable */
+ ulpi_write(phy, 0x10, 0x85);
+ break;
+ case SNPS_FEMTO_PHY:
+ /*
+ * Idp_src and Rdm_down are de-coupled
+ * on Femto PHY. If Idp_src alone is
+ * enabled, DCD timeout is observed with
+ * wall charger. But a genuine DCD timeout
+ * may be incorrectly interpreted. Also
+ * BC1.2 compliance testers expect Rdm_down
+ * to enabled during DCD. Enable Rdm_down
+ * explicitly before enabling the DCD.
+ */
+ ulpi_write(phy, 0x04, 0x0B);
+ ulpi_write(phy, 0x10, 0x85);
+ break;
+ default:
+ break;
+ }
+}
+
+static void msm_chg_block_on(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 func_ctrl;
+
+ /* put the controller in non-driving mode */
+ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+ func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+ ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
+ /* disable DP and DM pull down resistors */
+ ulpi_write(phy, 0x6, 0xC);
+ /* Clear charger detecting control bits */
+ ulpi_write(phy, 0x1F, 0x86);
+ /* Clear alt interrupt latch and enable bits */
+ ulpi_write(phy, 0x1F, 0x92);
+ ulpi_write(phy, 0x1F, 0x95);
+ udelay(100);
+ break;
+ default:
+ break;
+ }
+}
+
+static void msm_chg_block_off(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 func_ctrl;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
+ /* Clear charger detecting control bits */
+ ulpi_write(phy, 0x3F, 0x86);
+ /* Clear alt interrupt latch and enable bits */
+ ulpi_write(phy, 0x1F, 0x92);
+ ulpi_write(phy, 0x1F, 0x95);
+ /* re-enable DP and DM pull down resistors */
+ ulpi_write(phy, 0x6, 0xB);
+ break;
+ default:
+ break;
+ }
+
+ /* put the controller in normal mode */
+ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+ func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+ ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+}
+
+static const char *chg_to_string(enum usb_chg_type chg_type)
+{
+ switch (chg_type) {
+ case USB_SDP_CHARGER: return "USB_SDP_CHARGER";
+ case USB_DCP_CHARGER: return "USB_DCP_CHARGER";
+ case USB_CDP_CHARGER: return "USB_CDP_CHARGER";
+ case USB_NONCOMPLIANT_CHARGER: return "USB_NONCOMPLIANT_CHARGER";
+ case USB_FLOATED_CHARGER: return "USB_FLOATED_CHARGER";
+ default: return "INVALID_CHARGER";
+ }
+}
+
+#define MSM_CHG_DCD_TIMEOUT (750 * HZ/1000) /* 750 msec */
+#define MSM_CHG_DCD_POLL_TIME (50 * HZ/1000) /* 50 msec */
+#define MSM_CHG_PRIMARY_DET_TIME (50 * HZ/1000) /* TVDPSRC_ON */
+#define MSM_CHG_SECONDARY_DET_TIME (50 * HZ/1000) /* TVDMSRC_ON */
+static void msm_chg_detect_work(struct work_struct *w)
+{
+ struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
+ struct usb_phy *phy = &motg->phy;
+ bool is_dcd = false, tmout, vout;
+ static bool dcd;
+ u32 line_state, dm_vlgc;
+ unsigned long delay;
+
+ dev_dbg(phy->dev, "chg detection work\n");
+ msm_otg_dbg_log_event(phy, "CHG DETECTION WORK",
+ motg->chg_state, get_pm_runtime_counter(phy->dev));
+
+ switch (motg->chg_state) {
+ case USB_CHG_STATE_UNDEFINED:
+ case USB_CHG_STATE_IN_PROGRESS:
+ msm_chg_block_on(motg);
+ msm_chg_enable_dcd(motg);
+ motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
+ motg->dcd_time = 0;
+ delay = MSM_CHG_DCD_POLL_TIME;
+ break;
+ case USB_CHG_STATE_WAIT_FOR_DCD:
+ is_dcd = msm_chg_check_dcd(motg);
+ motg->dcd_time += MSM_CHG_DCD_POLL_TIME;
+ tmout = motg->dcd_time >= MSM_CHG_DCD_TIMEOUT;
+ if (is_dcd || tmout) {
+ if (is_dcd)
+ dcd = true;
+ else
+ dcd = false;
+ msm_chg_disable_dcd(motg);
+ msm_chg_enable_primary_det(motg);
+ delay = MSM_CHG_PRIMARY_DET_TIME;
+ motg->chg_state = USB_CHG_STATE_DCD_DONE;
+ } else {
+ delay = MSM_CHG_DCD_POLL_TIME;
+ }
+ break;
+ case USB_CHG_STATE_DCD_DONE:
+ vout = msm_chg_check_primary_det(motg);
+ line_state = readl_relaxed(USB_PORTSC) & PORTSC_LS;
+ dm_vlgc = line_state & PORTSC_LS_DM;
+ if (vout && !dm_vlgc) { /* VDAT_REF < DM < VLGC */
+ if (line_state) { /* DP > VLGC */
+ motg->chg_type = USB_NONCOMPLIANT_CHARGER;
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ delay = 0;
+ } else {
+ msm_chg_enable_secondary_det(motg);
+ delay = MSM_CHG_SECONDARY_DET_TIME;
+ motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
+ }
+ } else { /* DM < VDAT_REF || DM > VLGC */
+ if (line_state) /* DP > VLGC or/and DM > VLGC */
+ motg->chg_type = USB_NONCOMPLIANT_CHARGER;
+ else if (!dcd && floated_charger_enable)
+ motg->chg_type = USB_FLOATED_CHARGER;
+ else
+ motg->chg_type = USB_SDP_CHARGER;
+
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ delay = 0;
+ goto state_detected;
+ }
+ break;
+ case USB_CHG_STATE_PRIMARY_DONE:
+ vout = msm_chg_check_secondary_det(motg);
+ if (vout)
+ motg->chg_type = USB_DCP_CHARGER;
+ else
+ motg->chg_type = USB_CDP_CHARGER;
+ motg->chg_state = USB_CHG_STATE_SECONDARY_DONE;
+ /* fall through */
+ case USB_CHG_STATE_SECONDARY_DONE:
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ case USB_CHG_STATE_DETECTED:
+state_detected:
+ /*
+ * Notify the charger type to power supply
+ * owner as soon as we determine the charger.
+ */
+ if (motg->chg_type == USB_DCP_CHARGER && motg->ext_chg_opened) {
+ init_completion(&motg->ext_chg_wait);
+ motg->ext_chg_active = DEFAULT;
+ }
+ msm_otg_notify_chg_type(motg);
+ msm_chg_block_off(motg);
+
+ /* Enable VDP_SRC in case of DCP charger */
+ if (motg->chg_type == USB_DCP_CHARGER)
+ ulpi_write(phy, 0x2, 0x85);
+
+ dev_dbg(phy->dev, "chg_type = %s\n",
+ chg_to_string(motg->chg_type));
+ msm_otg_dbg_log_event(phy, "CHG WORK PUT: CHG_TYPE",
+ motg->chg_type, get_pm_runtime_counter(phy->dev));
+ /* to match _get from sm_work before starting chg_det_work */
+ pm_runtime_mark_last_busy(phy->dev);
+ pm_runtime_put_autosuspend(phy->dev);
+
+ queue_work(motg->otg_wq, &motg->sm_work);
+ return;
+ default:
+ return;
+ }
+
+ msm_otg_dbg_log_event(phy, "CHG WORK: QUEUE", motg->chg_type, delay);
+ queue_delayed_work(motg->otg_wq, &motg->chg_work, delay);
+}
+
+#define VBUS_INIT_TIMEOUT msecs_to_jiffies(5000)
+
+/*
+ * We support OTG, Peripheral only and Host only configurations. In case
+ * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
+ * via Id pin status or user request (debugfs). Id/BSV interrupts are not
+ * enabled when switch is controlled by user and default mode is supplied
+ * by board file, which can be changed by userspace later.
+ */
+static void msm_otg_init_sm(struct msm_otg *motg)
+{
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ u32 otgsc = readl_relaxed(USB_OTGSC);
+ int ret;
+
+ switch (pdata->mode) {
+ case USB_OTG:
+ if (pdata->otg_control == OTG_USER_CONTROL) {
+ if (pdata->default_mode == USB_HOST) {
+ clear_bit(ID, &motg->inputs);
+ } else if (pdata->default_mode == USB_PERIPHERAL) {
+ set_bit(ID, &motg->inputs);
+ set_bit(B_SESS_VLD, &motg->inputs);
+ } else {
+ set_bit(ID, &motg->inputs);
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ }
+ } else if (pdata->otg_control == OTG_PHY_CONTROL) {
+ if (otgsc & OTGSC_ID)
+ set_bit(ID, &motg->inputs);
+ else
+ clear_bit(ID, &motg->inputs);
+ if (otgsc & OTGSC_BSV)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ } else if (pdata->otg_control == OTG_PMIC_CONTROL) {
+ if (pdata->pmic_id_irq) {
+ if (msm_otg_read_pmic_id_state(motg))
+ set_bit(ID, &motg->inputs);
+ else
+ clear_bit(ID, &motg->inputs);
+ } else if (motg->ext_id_irq) {
+ if (gpio_get_value(pdata->usb_id_gpio))
+ set_bit(ID, &motg->inputs);
+ else
+ clear_bit(ID, &motg->inputs);
+ } else if (motg->phy_irq) {
+ if (msm_otg_read_phy_id_state(motg))
+ set_bit(ID, &motg->inputs);
+ else
+ clear_bit(ID, &motg->inputs);
+ }
+ /*
+ * VBUS initial state is reported after PMIC
+ * driver initialization. Wait for it.
+ */
+ ret = wait_for_completion_timeout(&pmic_vbus_init,
+ VBUS_INIT_TIMEOUT);
+ if (!ret) {
+ dev_dbg(motg->phy.dev, "%s: timeout waiting for PMIC VBUS\n",
+ __func__);
+ msm_otg_dbg_log_event(&motg->phy,
+ "PMIC VBUS WAIT TMOUT", motg->inputs,
+ motg->phy.otg->state);
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ pmic_vbus_init.done = 1;
+ }
+ }
+ break;
+ case USB_HOST:
+ clear_bit(ID, &motg->inputs);
+ break;
+ case USB_PERIPHERAL:
+ set_bit(ID, &motg->inputs);
+ if (pdata->otg_control == OTG_PHY_CONTROL) {
+ if (otgsc & OTGSC_BSV)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ } else if (pdata->otg_control == OTG_PMIC_CONTROL) {
+ /*
+ * VBUS initial state is reported after PMIC
+ * driver initialization. Wait for it.
+ */
+ ret = wait_for_completion_timeout(&pmic_vbus_init,
+ VBUS_INIT_TIMEOUT);
+ if (!ret) {
+ dev_dbg(motg->phy.dev, "%s: timeout waiting for PMIC VBUS\n",
+ __func__);
+ msm_otg_dbg_log_event(&motg->phy,
+ "PMIC VBUS WAIT TMOUT", motg->inputs,
+ motg->phy.otg->state);
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ pmic_vbus_init.done = 1;
+ }
+ } else if (pdata->otg_control == OTG_USER_CONTROL) {
+ set_bit(ID, &motg->inputs);
+ set_bit(B_SESS_VLD, &motg->inputs);
+ }
+ break;
+ default:
+ break;
+ }
+ msm_otg_dbg_log_event(&motg->phy, "SM INIT", pdata->mode, motg->inputs);
+ if (motg->id_state != USB_ID_GROUND)
+ motg->id_state = (test_bit(ID, &motg->inputs)) ? USB_ID_FLOAT :
+ USB_ID_GROUND;
+}
+
+static void msm_otg_wait_for_ext_chg_done(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ unsigned long t;
+
+ /*
+ * Defer next cable connect event till external charger
+ * detection is completed.
+ */
+
+ if (motg->ext_chg_active == ACTIVE) {
+
+do_wait:
+ pr_debug("before msm_otg ext chg wait\n");
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: WAIT", 0, 0);
+
+ t = wait_for_completion_timeout(&motg->ext_chg_wait,
+ msecs_to_jiffies(3000));
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: DONE", t, 0);
+
+ if (!t)
+ pr_err("msm_otg ext chg wait timeout\n");
+ else if (motg->ext_chg_active == ACTIVE)
+ goto do_wait;
+ else
+ pr_debug("msm_otg ext chg wait done\n");
+ }
+
+ if (motg->ext_chg_opened) {
+ if (phy->flags & ENABLE_DP_MANUAL_PULLUP) {
+ ulpi_write(phy, ULPI_MISC_A_VBUSVLDEXT |
+ ULPI_MISC_A_VBUSVLDEXTSEL,
+ ULPI_CLR(ULPI_MISC_A));
+ }
+ /* clear charging register bits */
+ ulpi_write(phy, 0x3F, 0x86);
+ /* re-enable DP and DM pull-down resistors*/
+ ulpi_write(phy, 0x6, 0xB);
+ }
+}
+
+static void msm_otg_sm_work(struct work_struct *w)
+{
+ struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
+ struct usb_otg *otg = motg->phy.otg;
+ struct device *dev = otg->usb_phy->dev;
+ bool work = 0, dcp;
+ int ret;
+
+ pr_debug("%s work\n", usb_otg_state_string(otg->state));
+ msm_otg_dbg_log_event(&motg->phy, "SM WORK:",
+ otg->state, motg->inputs);
+
+ /* Just resume h/w if reqd, pm_count is handled based on state/inputs */
+ if (motg->resume_pending) {
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ if (atomic_read(&motg->in_lpm)) {
+ dev_err(dev, "SM WORK: USB is in LPM\n");
+ msm_otg_dbg_log_event(&motg->phy,
+ "SM WORK: USB IS IN LPM",
+ otg->state, motg->inputs);
+ msm_otg_resume(motg);
+ }
+ motg->resume_pending = false;
+ pm_runtime_put_noidle(otg->usb_phy->dev);
+ }
+
+ switch (otg->state) {
+ case OTG_STATE_UNDEFINED:
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ msm_otg_reset(otg->usb_phy);
+ /* Add child device only after block reset */
+ ret = of_platform_populate(motg->pdev->dev.of_node, NULL, NULL,
+ &motg->pdev->dev);
+ if (ret)
+ dev_dbg(&motg->pdev->dev, "failed to add BAM core\n");
+
+ msm_otg_init_sm(motg);
+ otg->state = OTG_STATE_B_IDLE;
+ if (!test_bit(B_SESS_VLD, &motg->inputs) &&
+ test_bit(ID, &motg->inputs)) {
+ msm_otg_dbg_log_event(&motg->phy,
+ "PM RUNTIME: UNDEF PUT",
+ get_pm_runtime_counter(otg->usb_phy->dev), 0);
+ pm_runtime_put_sync(otg->usb_phy->dev);
+ break;
+ }
+ pm_runtime_put(otg->usb_phy->dev);
+ /* FALL THROUGH */
+ case OTG_STATE_B_IDLE:
+ if (!test_bit(ID, &motg->inputs) && otg->host) {
+ pr_debug("!id\n");
+ msm_otg_dbg_log_event(&motg->phy, "!ID",
+ motg->inputs, otg->state);
+
+ msm_otg_start_host(otg, 1);
+ otg->state = OTG_STATE_A_HOST;
+ } else if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ pr_debug("b_sess_vld\n");
+ msm_otg_dbg_log_event(&motg->phy, "B_SESS_VLD",
+ motg->inputs, otg->state);
+ switch (motg->chg_state) {
+ case USB_CHG_STATE_UNDEFINED:
+ /* put at the end of chg_det or disconnect */
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ msm_otg_dbg_log_event(&motg->phy, "PM CHG GET",
+ get_pm_runtime_counter(dev), 0);
+ motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+ msm_chg_detect_work(&motg->chg_work.work);
+ break;
+ case USB_CHG_STATE_DETECTED:
+ switch (motg->chg_type) {
+ case USB_DCP_CHARGER:
+ /* fall through */
+ case USB_NONCOMPLIANT_CHARGER:
+ msm_otg_notify_charger(motg,
+ dcp_max_current);
+ if (!motg->is_ext_chg_dcp)
+ otg->state =
+ OTG_STATE_B_CHARGER;
+ break;
+ case USB_FLOATED_CHARGER:
+ msm_otg_notify_charger(motg,
+ IDEV_CHG_MAX);
+ otg->state = OTG_STATE_B_CHARGER;
+ break;
+ case USB_CDP_CHARGER:
+ msm_otg_notify_charger(motg,
+ IDEV_CHG_MAX);
+ /* fall through */
+ case USB_SDP_CHARGER:
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ msm_otg_start_peripheral(otg, 1);
+ otg->state =
+ OTG_STATE_B_PERIPHERAL;
+ mod_timer(&motg->chg_check_timer,
+ CHG_RECHECK_DELAY);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ pr_debug("chg_work cancel");
+ msm_otg_dbg_log_event(&motg->phy, "CHG_WORK CANCEL",
+ motg->inputs, otg->state);
+ del_timer_sync(&motg->chg_check_timer);
+ clear_bit(B_FALSE_SDP, &motg->inputs);
+ cancel_delayed_work_sync(&motg->chg_work);
+ /*
+ * Find out whether chg_w couldn't start or finished.
+ * In both the cases, runtime ref_count vote is missing
+ */
+ if (motg->chg_state == USB_CHG_STATE_UNDEFINED ||
+ motg->chg_state == USB_CHG_STATE_DETECTED) {
+ msm_otg_dbg_log_event(&motg->phy, "RT !CHG GET",
+ get_pm_runtime_counter(otg->usb_phy->dev), 0);
+ pm_runtime_get_sync(dev);
+ }
+
+ dcp = (motg->chg_type == USB_DCP_CHARGER);
+ motg->chg_state = USB_CHG_STATE_UNDEFINED;
+ motg->chg_type = USB_INVALID_CHARGER;
+ msm_otg_notify_charger(motg, 0);
+ if (dcp) {
+ if (motg->ext_chg_active == DEFAULT)
+ motg->ext_chg_active = INACTIVE;
+ msm_otg_wait_for_ext_chg_done(motg);
+ /* Turn off VDP_SRC */
+ ulpi_write(otg->usb_phy, 0x2, 0x86);
+ }
+ msm_chg_block_off(motg);
+ msm_otg_dbg_log_event(&motg->phy, "RT: CHG A PUT",
+ get_pm_runtime_counter(otg->usb_phy->dev), 0);
+ /* Delay used only if autosuspend enabled */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ if (test_bit(B_SESS_VLD, &motg->inputs) &&
+ test_bit(B_FALSE_SDP, &motg->inputs)) {
+ pr_debug("B_FALSE_SDP\n");
+ msm_otg_start_peripheral(otg, 0);
+ motg->chg_type = USB_DCP_CHARGER;
+ clear_bit(B_FALSE_SDP, &motg->inputs);
+ otg->state = OTG_STATE_B_IDLE;
+ msm_otg_dbg_log_event(&motg->phy, "B_FALSE_SDP PUT",
+ get_pm_runtime_counter(dev), motg->inputs);
+ pm_runtime_put_sync(dev);
+ /* schedule work to update charging current */
+ work = 1;
+ } else if (!test_bit(B_SESS_VLD, &motg->inputs)) {
+ msm_otg_start_peripheral(otg, 0);
+ msm_otg_dbg_log_event(&motg->phy, "RT PM: B_PERI A PUT",
+ get_pm_runtime_counter(dev), 0);
+ /* _put for _get done on cable connect in B_IDLE */
+ pm_runtime_put_noidle(dev);
+ /* Schedule work to finish cable disconnect processing*/
+ otg->state = OTG_STATE_B_IDLE;
+ work = 1;
+ } else if (test_bit(A_BUS_SUSPEND, &motg->inputs)) {
+ pr_debug("a_bus_suspend\n");
+ msm_otg_dbg_log_event(&motg->phy,
+ "BUS_SUSPEND: PM RT PUT",
+ get_pm_runtime_counter(dev), 0);
+ otg->state = OTG_STATE_B_SUSPEND;
+ /* _get on connect in B_IDLE or host resume in B_SUSP */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+ break;
+ case OTG_STATE_B_SUSPEND:
+ if (!test_bit(B_SESS_VLD, &motg->inputs)) {
+ msm_otg_start_peripheral(otg, 0);
+ otg->state = OTG_STATE_B_IDLE;
+ /* Schedule work to finish cable disconnect processing*/
+ work = 1;
+ } else if (!test_bit(A_BUS_SUSPEND, &motg->inputs)) {
+ pr_debug("!a_bus_suspend\n");
+ otg->state = OTG_STATE_B_PERIPHERAL;
+ msm_otg_dbg_log_event(&motg->phy,
+ "BUS_RESUME: PM RT GET",
+ get_pm_runtime_counter(dev), 0);
+ pm_runtime_get_sync(dev);
+ }
+ break;
+
+ case OTG_STATE_B_CHARGER:
+ if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ pr_debug("BSV set again\n");
+ msm_otg_dbg_log_event(&motg->phy, "BSV SET AGAIN",
+ motg->inputs, otg->state);
+ } else if (!test_bit(B_SESS_VLD, &motg->inputs)) {
+ otg->state = OTG_STATE_B_IDLE;
+ work = 1;
+ }
+ break;
+ case OTG_STATE_A_HOST:
+ if (test_bit(ID, &motg->inputs)) {
+ msm_otg_start_host(otg, 0);
+ otg->state = OTG_STATE_B_IDLE;
+ work = 1;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (work)
+ queue_work(motg->otg_wq, &motg->sm_work);
+}
+
+static irqreturn_t msm_otg_irq(int irq, void *data)
+{
+ struct msm_otg *motg = data;
+ struct usb_otg *otg = motg->phy.otg;
+ u32 otgsc = 0;
+ bool work = 0;
+
+ if (atomic_read(&motg->in_lpm)) {
+ pr_debug("OTG IRQ: %d in LPM\n", irq);
+ msm_otg_dbg_log_event(&motg->phy, "OTG IRQ IS IN LPM",
+ irq, otg->state);
+ /*Ignore interrupt if one interrupt already seen in LPM*/
+ if (motg->async_int)
+ return IRQ_HANDLED;
+
+ disable_irq_nosync(irq);
+ motg->async_int = irq;
+ msm_otg_kick_sm_work(motg);
+
+ return IRQ_HANDLED;
+ }
+ motg->usb_irq_count++;
+
+ otgsc = readl_relaxed(USB_OTGSC);
+ if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS)))
+ return IRQ_NONE;
+
+ if ((otgsc & OTGSC_IDIS) && (otgsc & OTGSC_IDIE)) {
+ if (otgsc & OTGSC_ID) {
+ dev_dbg(otg->usb_phy->dev, "ID set\n");
+ msm_otg_dbg_log_event(&motg->phy, "ID SET",
+ motg->inputs, otg->state);
+ set_bit(ID, &motg->inputs);
+ } else {
+ dev_dbg(otg->usb_phy->dev, "ID clear\n");
+ msm_otg_dbg_log_event(&motg->phy, "ID CLEAR",
+ motg->inputs, otg->state);
+ clear_bit(ID, &motg->inputs);
+ }
+ work = 1;
+ } else if ((otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) {
+ if (otgsc & OTGSC_BSV) {
+ dev_dbg(otg->usb_phy->dev, "BSV set\n");
+ msm_otg_dbg_log_event(&motg->phy, "BSV SET",
+ motg->inputs, otg->state);
+ set_bit(B_SESS_VLD, &motg->inputs);
+ } else {
+ dev_dbg(otg->usb_phy->dev, "BSV clear\n");
+ msm_otg_dbg_log_event(&motg->phy, "BSV CLEAR",
+ motg->inputs, otg->state);
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ clear_bit(A_BUS_SUSPEND, &motg->inputs);
+ }
+ work = 1;
+ }
+ if (work)
+ queue_work(motg->otg_wq, &motg->sm_work);
+
+ writel_relaxed(otgsc, USB_OTGSC);
+
+ return IRQ_HANDLED;
+}
+
+static void msm_otg_set_vbus_state(int online)
+{
+ struct msm_otg *motg = the_msm_otg;
+ static bool init;
+
+ motg->vbus_state = online;
+
+ if (motg->err_event_seen)
+ return;
+
+ if (online) {
+ pr_debug("PMIC: BSV set\n");
+ msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV SET",
+ init, motg->inputs);
+ if (test_and_set_bit(B_SESS_VLD, &motg->inputs) && init)
+ return;
+ } else {
+ pr_debug("PMIC: BSV clear\n");
+ msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV CLEAR",
+ init, motg->inputs);
+ motg->is_ext_chg_dcp = false;
+ if (!test_and_clear_bit(B_SESS_VLD, &motg->inputs) && init)
+ return;
+ }
+
+ /* do not queue state m/c work if id is grounded */
+ if (!test_bit(ID, &motg->inputs) &&
+ !motg->pdata->vbus_low_as_hostmode) {
+ /*
+ * state machine work waits for initial VBUS
+ * completion in UNDEFINED state. Process
+ * the initial VBUS event in ID_GND state.
+ */
+ if (init)
+ return;
+ }
+
+ if (!init) {
+ init = true;
+ if (pmic_vbus_init.done &&
+ test_bit(B_SESS_VLD, &motg->inputs)) {
+ pr_debug("PMIC: BSV came late\n");
+ msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV CAME LATE",
+ init, motg->inputs);
+ goto out;
+ }
+
+ if (motg->pdata->vbus_low_as_hostmode &&
+ !test_bit(B_SESS_VLD, &motg->inputs)) {
+ motg->id_state = USB_ID_GROUND;
+ clear_bit(ID, &motg->inputs);
+ }
+ complete(&pmic_vbus_init);
+ pr_debug("PMIC: BSV init complete\n");
+ msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV INIT COMPLETE",
+ init, motg->inputs);
+ return;
+ }
+
+out:
+ if (motg->is_ext_chg_dcp) {
+ if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ msm_otg_notify_charger(motg, IDEV_CHG_MAX);
+ } else {
+ motg->is_ext_chg_dcp = false;
+ motg->chg_state = USB_CHG_STATE_UNDEFINED;
+ motg->chg_type = USB_INVALID_CHARGER;
+ msm_otg_notify_charger(motg, 0);
+ }
+ return;
+ }
+
+ msm_otg_dbg_log_event(&motg->phy, "CHECK VBUS EVENT DURING SUSPEND",
+ atomic_read(&motg->pm_suspended),
+ motg->sm_work_pending);
+
+ /* Move to host mode on vbus low if required */
+ if (motg->pdata->vbus_low_as_hostmode) {
+ if (!test_bit(B_SESS_VLD, &motg->inputs))
+ clear_bit(ID, &motg->inputs);
+ else
+ set_bit(ID, &motg->inputs);
+ }
+ msm_otg_kick_sm_work(motg);
+}
+
+static void msm_id_status_w(struct work_struct *w)
+{
+ struct msm_otg *motg = container_of(w, struct msm_otg,
+ id_status_work.work);
+ int work = 0;
+
+ dev_dbg(motg->phy.dev, "ID status_w\n");
+
+ if (motg->pdata->pmic_id_irq)
+ motg->id_state = msm_otg_read_pmic_id_state(motg);
+ else if (motg->ext_id_irq)
+ motg->id_state = gpio_get_value(motg->pdata->usb_id_gpio);
+ else if (motg->phy_irq)
+ motg->id_state = msm_otg_read_phy_id_state(motg);
+
+ if (motg->err_event_seen)
+ return;
+
+ if (motg->id_state) {
+ if (gpio_is_valid(motg->pdata->switch_sel_gpio))
+ gpio_direction_input(motg->pdata->switch_sel_gpio);
+ if (!test_and_set_bit(ID, &motg->inputs)) {
+ pr_debug("ID set\n");
+ msm_otg_dbg_log_event(&motg->phy, "ID SET",
+ motg->inputs, motg->phy.otg->state);
+ work = 1;
+ }
+ } else {
+ if (gpio_is_valid(motg->pdata->switch_sel_gpio))
+ gpio_direction_output(motg->pdata->switch_sel_gpio, 1);
+ if (test_and_clear_bit(ID, &motg->inputs)) {
+ pr_debug("ID clear\n");
+ msm_otg_dbg_log_event(&motg->phy, "ID CLEAR",
+ motg->inputs, motg->phy.otg->state);
+ work = 1;
+ }
+ }
+
+ if (work && (motg->phy.otg->state != OTG_STATE_UNDEFINED)) {
+ msm_otg_dbg_log_event(&motg->phy,
+ "CHECK ID EVENT DURING SUSPEND",
+ atomic_read(&motg->pm_suspended),
+ motg->sm_work_pending);
+ msm_otg_kick_sm_work(motg);
+ }
+}
+
+#define MSM_ID_STATUS_DELAY 5 /* 5msec */
+static irqreturn_t msm_id_irq(int irq, void *data)
+{
+ struct msm_otg *motg = data;
+
+ /*schedule delayed work for 5msec for ID line state to settle*/
+ queue_delayed_work(motg->otg_wq, &motg->id_status_work,
+ msecs_to_jiffies(MSM_ID_STATUS_DELAY));
+
+ return IRQ_HANDLED;
+}
+
+int msm_otg_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused)
+{
+ struct msm_otg *motg = container_of(
+ notify_block, struct msm_otg, pm_notify);
+
+ dev_dbg(motg->phy.dev, "OTG PM notify:%lx, sm_pending:%u\n", mode,
+ motg->sm_work_pending);
+ msm_otg_dbg_log_event(&motg->phy, "PM NOTIFY",
+ mode, motg->sm_work_pending);
+
+ switch (mode) {
+ case PM_POST_SUSPEND:
+ /* OTG sm_work can be armed now */
+ atomic_set(&motg->pm_suspended, 0);
+
+ /* Handle any deferred wakeup events from USB during suspend */
+ if (motg->sm_work_pending) {
+ motg->sm_work_pending = false;
+ queue_work(motg->otg_wq, &motg->sm_work);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int msm_otg_mode_show(struct seq_file *s, void *unused)
+{
+ struct msm_otg *motg = s->private;
+ struct usb_otg *otg = motg->phy.otg;
+
+ switch (otg->state) {
+ case OTG_STATE_A_HOST:
+ seq_puts(s, "host\n");
+ break;
+ case OTG_STATE_B_IDLE:
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_SUSPEND:
+ seq_puts(s, "peripheral\n");
+ break;
+ default:
+ seq_puts(s, "none\n");
+ break;
+ }
+
+ return 0;
+}
+
+static int msm_otg_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_otg_mode_show, inode->i_private);
+}
+
+static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct msm_otg *motg = s->private;
+ char buf[16];
+ struct usb_phy *phy = &motg->phy;
+ int status = count;
+ enum usb_mode_type req_mode;
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
+ status = -EFAULT;
+ goto out;
+ }
+
+ if (!strncmp(buf, "host", 4)) {
+ req_mode = USB_HOST;
+ } else if (!strncmp(buf, "peripheral", 10)) {
+ req_mode = USB_PERIPHERAL;
+ } else if (!strncmp(buf, "none", 4)) {
+ req_mode = USB_NONE;
+ } else {
+ status = -EINVAL;
+ goto out;
+ }
+
+ switch (req_mode) {
+ case USB_NONE:
+ switch (phy->otg->state) {
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_SUSPEND:
+ set_bit(ID, &motg->inputs);
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case USB_PERIPHERAL:
+ switch (phy->otg->state) {
+ case OTG_STATE_B_IDLE:
+ case OTG_STATE_A_HOST:
+ set_bit(ID, &motg->inputs);
+ set_bit(B_SESS_VLD, &motg->inputs);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case USB_HOST:
+ switch (phy->otg->state) {
+ case OTG_STATE_B_IDLE:
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_SUSPEND:
+ clear_bit(ID, &motg->inputs);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ default:
+ goto out;
+ }
+
+ motg->id_state = (test_bit(ID, &motg->inputs)) ? USB_ID_FLOAT :
+ USB_ID_GROUND;
+ queue_work(motg->otg_wq, &motg->sm_work);
+out:
+ return status;
+}
+
+const struct file_operations msm_otg_mode_fops = {
+ .open = msm_otg_mode_open,
+ .read = seq_read,
+ .write = msm_otg_mode_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int msm_otg_show_otg_state(struct seq_file *s, void *unused)
+{
+ struct msm_otg *motg = s->private;
+ struct usb_phy *phy = &motg->phy;
+
+ seq_printf(s, "%s\n", usb_otg_state_string(phy->otg->state));
+ return 0;
+}
+
+static int msm_otg_otg_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_otg_show_otg_state, inode->i_private);
+}
+
+const struct file_operations msm_otg_state_fops = {
+ .open = msm_otg_otg_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int msm_otg_show_chg_type(struct seq_file *s, void *unused)
+{
+ struct msm_otg *motg = s->private;
+
+ seq_printf(s, "%s\n", chg_to_string(motg->chg_type));
+ return 0;
+}
+
+static int msm_otg_chg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_otg_show_chg_type, inode->i_private);
+}
+
+const struct file_operations msm_otg_chg_fops = {
+ .open = msm_otg_chg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int msm_otg_bus_show(struct seq_file *s, void *unused)
+{
+ if (debug_bus_voting_enabled)
+ seq_puts(s, "enabled\n");
+ else
+ seq_puts(s, "disabled\n");
+
+ return 0;
+}
+
+static int msm_otg_bus_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_otg_bus_show, inode->i_private);
+}
+
+static ssize_t msm_otg_bus_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[8];
+ struct seq_file *s = file->private_data;
+ struct msm_otg *motg = s->private;
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "enable", 6)) {
+ /* Do not vote here. Let OTG statemachine decide when to vote */
+ debug_bus_voting_enabled = true;
+ } else {
+ debug_bus_voting_enabled = false;
+ msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
+ }
+
+ return count;
+}
+
+static int msm_otg_dbg_buff_show(struct seq_file *s, void *unused)
+{
+ struct msm_otg *motg = s->private;
+ unsigned long flags;
+ unsigned int i;
+
+ read_lock_irqsave(&motg->dbg_lock, flags);
+
+ i = motg->dbg_idx;
+ if (strnlen(motg->buf[i], DEBUG_MSG_LEN))
+ seq_printf(s, "%s\n", motg->buf[i]);
+ for (dbg_inc(&i); i != motg->dbg_idx; dbg_inc(&i)) {
+ if (!strnlen(motg->buf[i], DEBUG_MSG_LEN))
+ continue;
+ seq_printf(s, "%s\n", motg->buf[i]);
+ }
+ read_unlock_irqrestore(&motg->dbg_lock, flags);
+
+ return 0;
+}
+
+static int msm_otg_dbg_buff_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_otg_dbg_buff_show, inode->i_private);
+}
+
+const struct file_operations msm_otg_dbg_buff_fops = {
+ .open = msm_otg_dbg_buff_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int msm_otg_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+ int ret = 0;
+ struct msm_otg *motg = rdev_get_drvdata(rdev);
+
+ if (!motg->rm_pulldown) {
+ ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_3P3_ON);
+ if (!ret) {
+ motg->rm_pulldown = true;
+ msm_otg_dbg_log_event(&motg->phy, "RM Pulldown",
+ motg->rm_pulldown, 0);
+ }
+ }
+
+ return ret;
+}
+
+static int msm_otg_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+ int ret = 0;
+ struct msm_otg *motg = rdev_get_drvdata(rdev);
+
+ if (motg->rm_pulldown) {
+ ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_3P3_OFF);
+ if (!ret) {
+ motg->rm_pulldown = false;
+ msm_otg_dbg_log_event(&motg->phy, "RM Pulldown",
+ motg->rm_pulldown, 0);
+ }
+ }
+
+ return ret;
+}
+
+static int msm_otg_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct msm_otg *motg = rdev_get_drvdata(rdev);
+
+ return motg->rm_pulldown;
+}
+
+static struct regulator_ops msm_otg_dpdm_regulator_ops = {
+ .enable = msm_otg_dpdm_regulator_enable,
+ .disable = msm_otg_dpdm_regulator_disable,
+ .is_enabled = msm_otg_dpdm_regulator_is_enabled,
+};
+
+static int usb_phy_regulator_init(struct msm_otg *motg)
+{
+ struct device *dev = motg->phy.dev;
+ struct regulator_config cfg = {};
+ struct regulator_init_data *init_data;
+
+ init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+ if (!init_data)
+ return -ENOMEM;
+
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+ motg->dpdm_rdesc.owner = THIS_MODULE;
+ motg->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+ motg->dpdm_rdesc.ops = &msm_otg_dpdm_regulator_ops;
+ motg->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+ cfg.dev = dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = motg;
+ cfg.of_node = dev->of_node;
+
+ motg->dpdm_rdev = devm_regulator_register(dev, &motg->dpdm_rdesc, &cfg);
+ if (IS_ERR(motg->dpdm_rdev))
+ return PTR_ERR(motg->dpdm_rdev);
+
+ return 0;
+}
+
+const struct file_operations msm_otg_bus_fops = {
+ .open = msm_otg_bus_open,
+ .read = seq_read,
+ .write = msm_otg_bus_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *msm_otg_dbg_root;
+
+static int msm_otg_debugfs_init(struct msm_otg *motg)
+{
+ struct dentry *msm_otg_dentry;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ msm_otg_dbg_root = debugfs_create_dir("msm_otg", NULL);
+
+ if (!msm_otg_dbg_root || IS_ERR(msm_otg_dbg_root))
+ return -ENODEV;
+
+ if ((pdata->mode == USB_OTG || pdata->mode == USB_PERIPHERAL) &&
+ pdata->otg_control == OTG_USER_CONTROL) {
+
+ msm_otg_dentry = debugfs_create_file("mode", 0644,
+ msm_otg_dbg_root, motg, &msm_otg_mode_fops);
+
+ if (!msm_otg_dentry) {
+ debugfs_remove(msm_otg_dbg_root);
+ msm_otg_dbg_root = NULL;
+ return -ENODEV;
+ }
+ }
+
+ msm_otg_dentry = debugfs_create_file("chg_type", 0444, msm_otg_dbg_root,
+ motg, &msm_otg_chg_fops);
+
+ if (!msm_otg_dentry) {
+ debugfs_remove_recursive(msm_otg_dbg_root);
+ return -ENODEV;
+ }
+
+ msm_otg_dentry = debugfs_create_file("bus_voting", 0644,
+ msm_otg_dbg_root, motg, &msm_otg_bus_fops);
+
+ if (!msm_otg_dentry) {
+ debugfs_remove_recursive(msm_otg_dbg_root);
+ return -ENODEV;
+ }
+
+ msm_otg_dentry = debugfs_create_file("otg_state", 0444,
+ msm_otg_dbg_root, motg, &msm_otg_state_fops);
+
+ if (!msm_otg_dentry) {
+ debugfs_remove_recursive(msm_otg_dbg_root);
+ return -ENODEV;
+ }
+
+ msm_otg_dentry = debugfs_create_file("dbg_buff", 0444,
+ msm_otg_dbg_root, motg, &msm_otg_dbg_buff_fops);
+
+ if (!msm_otg_dentry) {
+ debugfs_remove_recursive(msm_otg_dbg_root);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void msm_otg_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(msm_otg_dbg_root);
+}
+
+static ssize_t
+set_msm_otg_perf_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct msm_otg *motg = the_msm_otg;
+ int ret;
+ long clk_rate;
+
+ pr_debug("%s: enable:%d\n", __func__, !strncasecmp(buf, "enable", 6));
+
+ if (!strncasecmp(buf, "enable", 6)) {
+ clk_rate = motg->core_clk_nominal_rate;
+ msm_otg_bus_freq_set(motg, USB_NOC_NOM_VOTE);
+ } else {
+ clk_rate = motg->core_clk_svs_rate;
+ msm_otg_bus_freq_set(motg, USB_NOC_SVS_VOTE);
+ }
+
+ if (clk_rate) {
+ pr_debug("Set usb sys_clk rate:%ld\n", clk_rate);
+ ret = clk_set_rate(motg->core_clk, clk_rate);
+ if (ret)
+ pr_err("sys_clk set_rate fail:%d %ld\n", ret, clk_rate);
+ msm_otg_dbg_log_event(&motg->phy, "OTG PERF SET",
+ clk_rate, ret);
+ } else {
+ pr_err("usb sys_clk rate is undefined\n");
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(perf_mode, 0200, NULL, set_msm_otg_perf_mode);
+
+#define MSM_OTG_CMD_ID 0x09
+#define MSM_OTG_DEVICE_ID 0x04
+#define MSM_OTG_VMID_IDX 0xFF
+#define MSM_OTG_MEM_TYPE 0x02
+struct msm_otg_scm_cmd_buf {
+ unsigned int device_id;
+ unsigned int vmid_idx;
+ unsigned int mem_type;
+} __attribute__ ((__packed__));
+
+static void msm_otg_pnoc_errata_fix(struct msm_otg *motg)
+{
+ int ret;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ struct msm_otg_scm_cmd_buf cmd_buf;
+
+ if (!pdata->pnoc_errata_fix)
+ return;
+
+ dev_dbg(motg->phy.dev, "applying fix for pnoc h/w issue\n");
+
+ cmd_buf.device_id = MSM_OTG_DEVICE_ID;
+ cmd_buf.vmid_idx = MSM_OTG_VMID_IDX;
+ cmd_buf.mem_type = MSM_OTG_MEM_TYPE;
+
+ ret = scm_call(SCM_SVC_MP, MSM_OTG_CMD_ID, &cmd_buf,
+ sizeof(cmd_buf), NULL, 0);
+
+ if (ret)
+ dev_err(motg->phy.dev, "scm command failed to update VMIDMT\n");
+}
+
+static u64 msm_otg_dma_mask = DMA_BIT_MASK(32);
+static struct platform_device *msm_otg_add_pdev(
+ struct platform_device *ofdev, const char *name)
+{
+ struct platform_device *pdev;
+ const struct resource *res = ofdev->resource;
+ unsigned int num = ofdev->num_resources;
+ int retval;
+ struct ci13xxx_platform_data ci_pdata;
+ struct msm_otg_platform_data *otg_pdata;
+ struct msm_otg *motg;
+
+ pdev = platform_device_alloc(name, -1);
+ if (!pdev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &msm_otg_dma_mask;
+ pdev->dev.parent = &ofdev->dev;
+
+ if (num) {
+ retval = platform_device_add_resources(pdev, res, num);
+ if (retval)
+ goto error;
+ }
+
+ if (!strcmp(name, "msm_hsusb")) {
+ otg_pdata =
+ (struct msm_otg_platform_data *)
+ ofdev->dev.platform_data;
+ motg = platform_get_drvdata(ofdev);
+ ci_pdata.log2_itc = otg_pdata->log2_itc;
+ ci_pdata.usb_core_id = 0;
+ ci_pdata.l1_supported = otg_pdata->l1_supported;
+ ci_pdata.enable_ahb2ahb_bypass =
+ otg_pdata->enable_ahb2ahb_bypass;
+ ci_pdata.enable_streaming = otg_pdata->enable_streaming;
+ ci_pdata.enable_axi_prefetch = otg_pdata->enable_axi_prefetch;
+ retval = platform_device_add_data(pdev, &ci_pdata,
+ sizeof(ci_pdata));
+ if (retval)
+ goto error;
+ }
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto error;
+
+ return pdev;
+
+error:
+ platform_device_put(pdev);
+ return ERR_PTR(retval);
+}
+
+static int msm_otg_setup_devices(struct platform_device *ofdev,
+ enum usb_mode_type mode, bool init)
+{
+ const char *gadget_name = "msm_hsusb";
+ const char *host_name = "msm_hsusb_host";
+ static struct platform_device *gadget_pdev;
+ static struct platform_device *host_pdev;
+ int retval = 0;
+
+ if (!init) {
+ if (gadget_pdev) {
+ platform_device_unregister(gadget_pdev);
+ device_remove_file(&gadget_pdev->dev,
+ &dev_attr_perf_mode);
+ }
+ if (host_pdev)
+ platform_device_unregister(host_pdev);
+ return 0;
+ }
+
+ switch (mode) {
+ case USB_OTG:
+ /* fall through */
+ case USB_PERIPHERAL:
+ gadget_pdev = msm_otg_add_pdev(ofdev, gadget_name);
+ if (IS_ERR(gadget_pdev)) {
+ retval = PTR_ERR(gadget_pdev);
+ break;
+ }
+ if (device_create_file(&gadget_pdev->dev, &dev_attr_perf_mode))
+ dev_err(&gadget_pdev->dev, "perf_mode file failed\n");
+ if (mode == USB_PERIPHERAL)
+ break;
+ /* fall through */
+ case USB_HOST:
+ host_pdev = msm_otg_add_pdev(ofdev, host_name);
+ if (IS_ERR(host_pdev)) {
+ retval = PTR_ERR(host_pdev);
+ if (mode == USB_OTG) {
+ platform_device_unregister(gadget_pdev);
+ device_remove_file(&gadget_pdev->dev,
+ &dev_attr_perf_mode);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return retval;
+}
+
+static int msm_otg_ext_chg_open(struct inode *inode, struct file *file)
+{
+ struct msm_otg *motg = the_msm_otg;
+
+ pr_debug("msm_otg ext chg open\n");
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: OPEN",
+ motg->inputs, motg->phy.otg->state);
+
+ motg->ext_chg_opened = true;
+ file->private_data = (void *)motg;
+ return 0;
+}
+
+static long
+msm_otg_ext_chg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct msm_otg *motg = file->private_data;
+ struct msm_usb_chg_info info = {0};
+ int ret = 0, val;
+
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: IOCTL", cmd, 0);
+ switch (cmd) {
+ case MSM_USB_EXT_CHG_INFO:
+ info.chg_block_type = USB_CHG_BLOCK_ULPI;
+ info.page_offset = motg->io_res->start & ~PAGE_MASK;
+ /* mmap() works on PAGE granularity */
+ info.length = PAGE_SIZE;
+
+ if (copy_to_user((void __user *)arg, &info, sizeof(info))) {
+ pr_err("%s: copy to user failed\n\n", __func__);
+ ret = -EFAULT;
+ }
+ break;
+ case MSM_USB_EXT_CHG_BLOCK_LPM:
+ if (get_user(val, (int __user *)arg)) {
+ pr_err("%s: get_user failed\n\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+ pr_debug("%s: LPM block request %d\n", __func__, val);
+ msm_otg_dbg_log_event(&motg->phy, "LPM BLOCK REQ", val, 0);
+ if (val) { /* block LPM */
+ if (motg->chg_type == USB_DCP_CHARGER) {
+ motg->ext_chg_active = ACTIVE;
+ msm_otg_dbg_log_event(&motg->phy,
+ "PM RUNTIME: EXT_CHG GET",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+ pm_runtime_get_sync(motg->phy.dev);
+ } else {
+ motg->ext_chg_active = INACTIVE;
+ complete(&motg->ext_chg_wait);
+ ret = -ENODEV;
+ }
+ } else {
+ motg->ext_chg_active = INACTIVE;
+ complete(&motg->ext_chg_wait);
+ /*
+ * If usb cable is disconnected and then userspace
+ * calls ioctl to unblock low power mode, make sure
+ * otg_sm work for usb disconnect is processed first
+ * followed by decrementing the PM usage counters.
+ */
+ flush_work(&motg->sm_work);
+ msm_otg_dbg_log_event(&motg->phy,
+ "PM RUNTIME: EXT_CHG PUT",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+ pm_runtime_put_sync(motg->phy.dev);
+ }
+ break;
+ case MSM_USB_EXT_CHG_VOLTAGE_INFO:
+ if (get_user(val, (int __user *)arg)) {
+ pr_err("%s: get_user failed\n\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: VOL REQ", cmd, val);
+
+ if (val == USB_REQUEST_5V)
+ pr_debug("%s:voting 5V voltage request\n", __func__);
+ else if (val == USB_REQUEST_9V)
+ pr_debug("%s:voting 9V voltage request\n", __func__);
+ break;
+ case MSM_USB_EXT_CHG_RESULT:
+ if (get_user(val, (int __user *)arg)) {
+ pr_err("%s: get_user failed\n\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: VOL REQ", cmd, val);
+
+ if (!val)
+ pr_debug("%s:voltage request successful\n", __func__);
+ else
+ pr_debug("%s:voltage request failed\n", __func__);
+ break;
+ case MSM_USB_EXT_CHG_TYPE:
+ if (get_user(val, (int __user *)arg)) {
+ pr_err("%s: get_user failed\n\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: VOL REQ", cmd, val);
+
+ if (val)
+ pr_debug("%s:charger is external charger\n", __func__);
+ else
+ pr_debug("%s:charger is not ext charger\n", __func__);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int msm_otg_ext_chg_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct msm_otg *motg = file->private_data;
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ int ret;
+
+ if (vma->vm_pgoff || vsize > PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_pgoff = __phys_to_pfn(motg->io_res->start);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vsize, vma->vm_page_prot);
+ if (ret < 0) {
+ pr_err("%s: failed with return val %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_otg_ext_chg_release(struct inode *inode, struct file *file)
+{
+ struct msm_otg *motg = file->private_data;
+
+ pr_debug("msm_otg ext chg release\n");
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: RELEASE",
+ motg->inputs, motg->phy.otg->state);
+
+ motg->ext_chg_opened = false;
+
+ return 0;
+}
+
+static const struct file_operations msm_otg_ext_chg_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_otg_ext_chg_open,
+ .unlocked_ioctl = msm_otg_ext_chg_ioctl,
+ .mmap = msm_otg_ext_chg_mmap,
+ .release = msm_otg_ext_chg_release,
+};
+
+static int msm_otg_setup_ext_chg_cdev(struct msm_otg *motg)
+{
+ int ret;
+
+ if (motg->pdata->enable_sec_phy || motg->pdata->mode == USB_HOST ||
+ motg->pdata->otg_control != OTG_PMIC_CONTROL) {
+ pr_debug("usb ext chg is not supported by msm otg\n");
+ return -ENODEV;
+ }
+
+ ret = alloc_chrdev_region(&motg->ext_chg_dev, 0, 1, "usb_ext_chg");
+ if (ret < 0) {
+ pr_err("Fail to allocate usb ext char dev region\n");
+ return ret;
+ }
+ motg->ext_chg_class = class_create(THIS_MODULE, "msm_ext_chg");
+ if (ret < 0) {
+ pr_err("Fail to create usb ext chg class\n");
+ goto unreg_chrdev;
+ }
+ cdev_init(&motg->ext_chg_cdev, &msm_otg_ext_chg_fops);
+ motg->ext_chg_cdev.owner = THIS_MODULE;
+
+ ret = cdev_add(&motg->ext_chg_cdev, motg->ext_chg_dev, 1);
+ if (ret < 0) {
+ pr_err("Fail to add usb ext chg cdev\n");
+ goto destroy_class;
+ }
+ motg->ext_chg_device = device_create(motg->ext_chg_class,
+ NULL, motg->ext_chg_dev, NULL,
+ "usb_ext_chg");
+ if (IS_ERR(motg->ext_chg_device)) {
+ pr_err("Fail to create usb ext chg device\n");
+ ret = PTR_ERR(motg->ext_chg_device);
+ motg->ext_chg_device = NULL;
+ goto del_cdev;
+ }
+
+ init_completion(&motg->ext_chg_wait);
+ pr_debug("msm otg ext chg cdev setup success\n");
+ return 0;
+
+del_cdev:
+ cdev_del(&motg->ext_chg_cdev);
+destroy_class:
+ class_destroy(motg->ext_chg_class);
+unreg_chrdev:
+ unregister_chrdev_region(motg->ext_chg_dev, 1);
+
+ return ret;
+}
+
+static ssize_t dpdm_pulldown_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct msm_otg *motg = the_msm_otg;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", pdata->dpdm_pulldown_added ?
+ "enabled" : "disabled");
+}
+
+static ssize_t dpdm_pulldown_enable_store(struct device *dev,
+ struct device_attribute *attr, const char
+ *buf, size_t size)
+{
+ struct msm_otg *motg = the_msm_otg;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ if (!strncasecmp(buf, "enable", 6)) {
+ pdata->dpdm_pulldown_added = true;
+ return size;
+ } else if (!strncasecmp(buf, "disable", 7)) {
+ pdata->dpdm_pulldown_added = false;
+ return size;
+ }
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(dpdm_pulldown_enable, 0644,
+ dpdm_pulldown_enable_show, dpdm_pulldown_enable_store);
+
+static int msm_otg_vbus_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct msm_otg *motg = container_of(nb, struct msm_otg, vbus_nb);
+
+ if (event)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+
+ queue_work(motg->otg_wq, &motg->sm_work);
+
+ return NOTIFY_DONE;
+}
+
+static int msm_otg_id_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct msm_otg *motg = container_of(nb, struct msm_otg, id_nb);
+
+ if (event)
+ clear_bit(ID, &motg->inputs);
+ else
+ set_bit(ID, &motg->inputs);
+
+ queue_work(motg->otg_wq, &motg->sm_work);
+
+ return NOTIFY_DONE;
+}
+
+static int msm_otg_extcon_register(struct msm_otg *motg)
+{
+ struct device_node *node = motg->pdev->dev.of_node;
+ struct extcon_dev *edev;
+ int ret = 0;
+
+ if (!of_property_read_bool(node, "extcon"))
+ return 0;
+
+ edev = extcon_get_edev_by_phandle(&motg->pdev->dev, 0);
+ if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
+ return PTR_ERR(edev);
+
+ if (!IS_ERR(edev)) {
+ motg->extcon_vbus = edev;
+ motg->vbus_nb.notifier_call = msm_otg_vbus_notifier;
+ ret = extcon_register_notifier(edev, EXTCON_USB,
+ &motg->vbus_nb);
+ if (ret < 0) {
+ dev_err(&motg->pdev->dev, "failed to register notifier for USB\n");
+ return ret;
+ }
+ }
+
+ if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
+ edev = extcon_get_edev_by_phandle(&motg->pdev->dev, 1);
+ if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
+ ret = PTR_ERR(edev);
+ goto err;
+ }
+ }
+
+ if (!IS_ERR(edev)) {
+ motg->extcon_id = edev;
+ motg->id_nb.notifier_call = msm_otg_id_notifier;
+ ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+ &motg->id_nb);
+ if (ret < 0) {
+ dev_err(&motg->pdev->dev, "failed to register notifier for USB-HOST\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ if (motg->extcon_vbus)
+ extcon_unregister_notifier(motg->extcon_vbus, EXTCON_USB,
+ &motg->vbus_nb);
+
+ return ret;
+}
+
+struct msm_otg_platform_data *msm_otg_dt_to_pdata(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct msm_otg_platform_data *pdata;
+ int len = 0;
+ int res_gpio;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ len = of_property_count_elems_of_size(node,
+ "qcom,hsusb-otg-phy-init-seq", sizeof(len));
+ if (len > 0) {
+ pdata->phy_init_seq = devm_kzalloc(&pdev->dev,
+ len * sizeof(len), GFP_KERNEL);
+ if (!pdata->phy_init_seq)
+ return NULL;
+ of_property_read_u32_array(node, "qcom,hsusb-otg-phy-init-seq",
+ pdata->phy_init_seq, len);
+ }
+ of_property_read_u32(node, "qcom,hsusb-otg-power-budget",
+ &pdata->power_budget);
+ of_property_read_u32(node, "qcom,hsusb-otg-mode",
+ &pdata->mode);
+ of_property_read_u32(node, "qcom,hsusb-otg-otg-control",
+ &pdata->otg_control);
+ of_property_read_u32(node, "qcom,hsusb-otg-default-mode",
+ &pdata->default_mode);
+ of_property_read_u32(node, "qcom,hsusb-otg-phy-type",
+ &pdata->phy_type);
+ pdata->disable_reset_on_disconnect = of_property_read_bool(node,
+ "qcom,hsusb-otg-disable-reset");
+ pdata->pnoc_errata_fix = of_property_read_bool(node,
+ "qcom,hsusb-otg-pnoc-errata-fix");
+ pdata->enable_lpm_on_dev_suspend = of_property_read_bool(node,
+ "qcom,hsusb-otg-lpm-on-dev-suspend");
+ pdata->core_clk_always_on_workaround = of_property_read_bool(node,
+ "qcom,hsusb-otg-clk-always-on-workaround");
+ pdata->delay_lpm_on_disconnect = of_property_read_bool(node,
+ "qcom,hsusb-otg-delay-lpm");
+ pdata->dp_manual_pullup = of_property_read_bool(node,
+ "qcom,dp-manual-pullup");
+ pdata->enable_sec_phy = of_property_read_bool(node,
+ "qcom,usb2-enable-hsphy2");
+ of_property_read_u32(node, "qcom,hsusb-log2-itc",
+ &pdata->log2_itc);
+
+ of_property_read_u32(node, "qcom,hsusb-otg-mpm-dpsehv-int",
+ &pdata->mpm_dpshv_int);
+ of_property_read_u32(node, "qcom,hsusb-otg-mpm-dmsehv-int",
+ &pdata->mpm_dmshv_int);
+ pdata->pmic_id_irq = platform_get_irq_byname(pdev, "pmic_id_irq");
+ if (pdata->pmic_id_irq < 0)
+ pdata->pmic_id_irq = 0;
+
+ pdata->hub_reset_gpio = of_get_named_gpio(
+ node, "qcom,hub-reset-gpio", 0);
+ if (!gpio_is_valid(pdata->hub_reset_gpio))
+ pr_debug("hub_reset_gpio is not available\n");
+
+ pdata->usbeth_reset_gpio = of_get_named_gpio(
+ node, "qcom,usbeth-reset-gpio", 0);
+ if (!gpio_is_valid(pdata->usbeth_reset_gpio))
+ pr_debug("usbeth_reset_gpio is not available\n");
+
+ pdata->switch_sel_gpio =
+ of_get_named_gpio(node, "qcom,sw-sel-gpio", 0);
+ if (!gpio_is_valid(pdata->switch_sel_gpio))
+ pr_debug("switch_sel_gpio is not available\n");
+
+ pdata->usb_id_gpio =
+ of_get_named_gpio(node, "qcom,usbid-gpio", 0);
+ if (!gpio_is_valid(pdata->usb_id_gpio))
+ pr_debug("usb_id_gpio is not available\n");
+
+ pdata->l1_supported = of_property_read_bool(node,
+ "qcom,hsusb-l1-supported");
+ pdata->enable_ahb2ahb_bypass = of_property_read_bool(node,
+ "qcom,ahb-async-bridge-bypass");
+ pdata->disable_retention_with_vdd_min = of_property_read_bool(node,
+ "qcom,disable-retention-with-vdd-min");
+ pdata->enable_phy_id_pullup = of_property_read_bool(node,
+ "qcom,enable-phy-id-pullup");
+ pdata->phy_dvdd_always_on = of_property_read_bool(node,
+ "qcom,phy-dvdd-always-on");
+
+ res_gpio = of_get_named_gpio(node, "qcom,hsusb-otg-vddmin-gpio", 0);
+ if (!gpio_is_valid(res_gpio))
+ res_gpio = 0;
+ pdata->vddmin_gpio = res_gpio;
+
+ pdata->emulation = of_property_read_bool(node,
+ "qcom,emulation");
+
+ pdata->enable_streaming = of_property_read_bool(node,
+ "qcom,boost-sysclk-with-streaming");
+
+ pdata->enable_axi_prefetch = of_property_read_bool(node,
+ "qcom,axi-prefetch-enable");
+
+ pdata->enable_sdp_typec_current_limit = of_property_read_bool(node,
+ "qcom,enable-sdp-typec-current-limit");
+ pdata->vbus_low_as_hostmode = of_property_read_bool(node,
+ "qcom,vbus-low-as-hostmode");
+ return pdata;
+}
+
+static int msm_otg_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int len = 0;
+ u32 tmp[3];
+ struct resource *res;
+ struct msm_otg *motg;
+ struct usb_phy *phy;
+ struct msm_otg_platform_data *pdata;
+ void __iomem *tcsr;
+ int id_irq = 0;
+
+ dev_info(&pdev->dev, "msm_otg probe\n");
+
+ motg = kzalloc(sizeof(struct msm_otg), GFP_KERNEL);
+ if (!motg) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ /*
+ * USB Core is running its protocol engine based on CORE CLK,
+ * CORE CLK must be running at >55Mhz for correct HSUSB
+ * operation and USB core cannot tolerate frequency changes on
+ * CORE CLK. For such USB cores, vote for maximum clk frequency
+ * on pclk source
+ */
+ motg->core_clk = clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(motg->core_clk)) {
+ ret = PTR_ERR(motg->core_clk);
+ motg->core_clk = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get core_clk\n");
+ goto free_motg;
+ }
+
+ motg->core_reset = devm_reset_control_get(&pdev->dev, "core_reset");
+ if (IS_ERR(motg->core_reset)) {
+ dev_err(&pdev->dev, "failed to get core_reset\n");
+ ret = PTR_ERR(motg->core_reset);
+ goto put_core_clk;
+ }
+
+ /*
+ * USB Core CLK can run at max freq if streaming is enabled. Hence,
+ * get Max supported clk frequency for USB Core CLK and request to set
+ * the same. Otherwise set USB Core CLK to defined default value.
+ */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-nominal-sysclk-rate", &ret)) {
+ ret = -EINVAL;
+ goto put_core_clk;
+ } else {
+ motg->core_clk_nominal_rate = clk_round_rate(motg->core_clk,
+ ret);
+ }
+
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-svs-sysclk-rate", &ret)) {
+ dev_dbg(&pdev->dev, "core_clk svs freq not specified\n");
+ } else {
+ motg->core_clk_svs_rate = clk_round_rate(motg->core_clk, ret);
+ }
+
+ motg->default_noc_mode = USB_NOC_NOM_VOTE;
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,default-mode-svs")) {
+ motg->core_clk_rate = motg->core_clk_svs_rate;
+ motg->default_noc_mode = USB_NOC_SVS_VOTE;
+ } else if (of_property_read_bool(pdev->dev.of_node,
+ "qcom,boost-sysclk-with-streaming")) {
+ motg->core_clk_rate = motg->core_clk_nominal_rate;
+ } else {
+ motg->core_clk_rate = clk_round_rate(motg->core_clk,
+ USB_DEFAULT_SYSTEM_CLOCK);
+ }
+
+ if (IS_ERR_VALUE(motg->core_clk_rate)) {
+ dev_err(&pdev->dev, "fail to get core clk max freq.\n");
+ } else {
+ ret = clk_set_rate(motg->core_clk, motg->core_clk_rate);
+ if (ret)
+ dev_err(&pdev->dev, "fail to set core_clk freq:%d\n",
+ ret);
+ }
+
+ motg->pclk = clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(motg->pclk)) {
+ ret = PTR_ERR(motg->pclk);
+ motg->pclk = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get iface_clk\n");
+ goto put_core_clk;
+ }
+
+ motg->xo_clk = clk_get(&pdev->dev, "xo");
+ if (IS_ERR(motg->xo_clk)) {
+ ret = PTR_ERR(motg->xo_clk);
+ motg->xo_clk = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto put_pclk;
+ }
+
+ /*
+ * On few platforms USB PHY is fed with sleep clk.
+ * Hence don't fail probe.
+ */
+ motg->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
+ if (IS_ERR(motg->sleep_clk)) {
+ ret = PTR_ERR(motg->sleep_clk);
+ motg->sleep_clk = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto put_xo_clk;
+ else
+ dev_dbg(&pdev->dev, "failed to get sleep_clk\n");
+ } else {
+ ret = clk_prepare_enable(motg->sleep_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed to vote sleep_clk%d\n",
+ __func__, ret);
+ goto put_xo_clk;
+ }
+ }
+
+ /*
+ * If present, phy_reset_clk is used to reset the PHY, ULPI bridge
+ * and CSR Wrapper. This is a reset only clock.
+ */
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "phy_reset_clk") >= 0) {
+ motg->phy_reset_clk = devm_clk_get(&pdev->dev, "phy_reset_clk");
+ if (IS_ERR(motg->phy_reset_clk)) {
+ ret = PTR_ERR(motg->phy_reset_clk);
+ goto disable_sleep_clk;
+ }
+
+ motg->phy_reset = devm_reset_control_get(&pdev->dev,
+ "phy_reset");
+ if (IS_ERR(motg->phy_reset)) {
+ dev_err(&pdev->dev, "failed to get phy_reset\n");
+ ret = PTR_ERR(motg->phy_reset);
+ goto disable_sleep_clk;
+ }
+ }
+
+ /*
+ * If present, phy_por_clk is used to assert/de-assert phy POR
+ * input. This is a reset only clock. phy POR must be asserted
+ * after overriding the parameter registers via CSR wrapper or
+ * ULPI bridge.
+ */
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "phy_por_clk") >= 0) {
+ motg->phy_por_clk = devm_clk_get(&pdev->dev, "phy_por_clk");
+ if (IS_ERR(motg->phy_por_clk)) {
+ ret = PTR_ERR(motg->phy_por_clk);
+ goto disable_sleep_clk;
+ }
+
+ motg->phy_por_reset = devm_reset_control_get(&pdev->dev,
+ "phy_por_reset");
+ if (IS_ERR(motg->phy_por_reset)) {
+ dev_err(&pdev->dev, "failed to get phy_por_reset\n");
+ ret = PTR_ERR(motg->phy_por_reset);
+ goto disable_sleep_clk;
+ }
+ }
+
+ /*
+ * If present, phy_csr_clk is required for accessing PHY
+ * CSR registers via AHB2PHY interface.
+ */
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "phy_csr_clk") >= 0) {
+ motg->phy_csr_clk = devm_clk_get(&pdev->dev, "phy_csr_clk");
+ if (IS_ERR(motg->phy_csr_clk)) {
+ ret = PTR_ERR(motg->phy_csr_clk);
+ goto disable_sleep_clk;
+ } else {
+ ret = clk_prepare_enable(motg->phy_csr_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to enable phy csr clk %d\n", ret);
+ goto disable_sleep_clk;
+ }
+ }
+ }
+
+ of_property_read_u32(pdev->dev.of_node, "qcom,pm-qos-latency",
+ &motg->pm_qos_latency);
+
+ pdata = msm_otg_dt_to_pdata(pdev);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto disable_phy_csr_clk;
+ }
+ pdev->dev.platform_data = pdata;
+
+ pdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (!pdata->bus_scale_table)
+ dev_dbg(&pdev->dev, "bus scaling is disabled\n");
+
+ if (pdata->phy_type == QUSB_ULPI_PHY) {
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "phy_ref_clk") >= 0) {
+ motg->phy_ref_clk = devm_clk_get(&pdev->dev,
+ "phy_ref_clk");
+ if (IS_ERR(motg->phy_ref_clk)) {
+ ret = PTR_ERR(motg->phy_ref_clk);
+ goto disable_phy_csr_clk;
+ } else {
+ ret = clk_prepare_enable(motg->phy_ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to enable phy ref clk %d\n",
+ ret);
+ goto disable_phy_csr_clk;
+ }
+ }
+ }
+ }
+
+ motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
+ GFP_KERNEL);
+ if (!motg->phy.otg) {
+ ret = -ENOMEM;
+ goto disable_phy_csr_clk;
+ }
+
+ the_msm_otg = motg;
+ motg->pdata = pdata;
+ phy = &motg->phy;
+ phy->dev = &pdev->dev;
+ motg->pdev = pdev;
+ motg->dbg_idx = 0;
+ motg->dbg_lock = __RW_LOCK_UNLOCKED(lck);
+
+ if (motg->pdata->bus_scale_table) {
+ motg->bus_perf_client =
+ msm_bus_scale_register_client(motg->pdata->bus_scale_table);
+ if (!motg->bus_perf_client) {
+ dev_err(motg->phy.dev, "%s: Failed to register BUS\n"
+ "scaling client!!\n", __func__);
+ } else {
+ debug_bus_voting_enabled = true;
+ /* Some platforms require BUS vote to control clocks */
+ msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
+ }
+ }
+
+ ret = msm_otg_bus_freq_get(motg);
+ if (ret) {
+ pr_err("failed to get noc clocks: %d\n", ret);
+ } else {
+ ret = msm_otg_bus_freq_set(motg, motg->default_noc_mode);
+ if (ret)
+ pr_err("failed to vote explicit noc rates: %d\n", ret);
+ }
+
+ /* initialize reset counter */
+ motg->reset_counter = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get core iomem resource\n");
+ ret = -ENODEV;
+ goto devote_bus_bw;
+ }
+
+ motg->io_res = res;
+ motg->regs = ioremap(res->start, resource_size(res));
+ if (!motg->regs) {
+ dev_err(&pdev->dev, "core iomem ioremap failed\n");
+ ret = -ENOMEM;
+ goto devote_bus_bw;
+ }
+ dev_info(&pdev->dev, "OTG regs = %pK\n", motg->regs);
+
+ if (pdata->enable_sec_phy) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "tcsr");
+ if (!res) {
+ dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
+ } else {
+ tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!tcsr) {
+ dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
+ } else {
+ /* Enable USB2 on secondary HSPHY. */
+ writel_relaxed(0x1, tcsr);
+ /*
+ * Ensure that TCSR write is completed before
+ * USB registers initialization.
+ */
+ mb();
+ }
+ }
+ }
+
+ if (pdata->enable_sec_phy)
+ motg->usb_phy_ctrl_reg = USB_PHY_CTRL2;
+ else
+ motg->usb_phy_ctrl_reg = USB_PHY_CTRL;
+
+ /*
+ * The USB PHY wrapper provides a register interface
+ * through AHB2PHY for performing PHY related operations
+ * like retention, HV interrupts and overriding parameter
+ * registers etc. The registers start at 4 byte boundary
+ * but only the first byte is valid and remaining are not
+ * used. Relaxed versions of readl/writel should be used.
+ *
+ * The link does not have any PHY specific registers.
+ * Hence set motg->usb_phy_ctrl_reg to.
+ */
+ if (motg->pdata->phy_type == SNPS_FEMTO_PHY ||
+ pdata->phy_type == QUSB_ULPI_PHY) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "phy_csr");
+ if (!res) {
+ dev_err(&pdev->dev, "PHY CSR IOMEM missing!\n");
+ ret = -ENODEV;
+ goto free_regs;
+ }
+ motg->phy_csr_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(motg->phy_csr_regs)) {
+ ret = PTR_ERR(motg->phy_csr_regs);
+ dev_err(&pdev->dev, "PHY CSR ioremap failed!\n");
+ goto free_regs;
+ }
+ motg->usb_phy_ctrl_reg = 0;
+ }
+
+ motg->irq = platform_get_irq(pdev, 0);
+ if (!motg->irq) {
+ dev_err(&pdev->dev, "platform_get_irq failed\n");
+ ret = -ENODEV;
+ goto free_regs;
+ }
+
+ motg->async_irq = platform_get_irq_byname(pdev, "async_irq");
+ if (motg->async_irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq for async_int failed\n");
+ motg->async_irq = 0;
+ goto free_regs;
+ }
+
+ if (motg->xo_clk) {
+ ret = clk_prepare_enable(motg->xo_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s failed to vote for TCXO %d\n",
+ __func__, ret);
+ goto free_xo_handle;
+ }
+ }
+
+
+ clk_prepare_enable(motg->pclk);
+
+ hsusb_vdd = devm_regulator_get(motg->phy.dev, "hsusb_vdd_dig");
+ if (IS_ERR(hsusb_vdd)) {
+ hsusb_vdd = devm_regulator_get(motg->phy.dev, "HSUSB_VDDCX");
+ if (IS_ERR(hsusb_vdd)) {
+ dev_err(motg->phy.dev, "unable to get hsusb vddcx\n");
+ ret = PTR_ERR(hsusb_vdd);
+ goto devote_xo_handle;
+ }
+ }
+
+ len = of_property_count_elems_of_size(pdev->dev.of_node,
+ "qcom,vdd-voltage-level", sizeof(len));
+ if (len > 0) {
+ if (len == sizeof(tmp) / sizeof(len)) {
+ of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,vdd-voltage-level",
+ tmp, len);
+ vdd_val[0] = tmp[0];
+ vdd_val[1] = tmp[1];
+ vdd_val[2] = tmp[2];
+ } else {
+ dev_dbg(&pdev->dev,
+ "Using default hsusb vdd config.\n");
+ goto devote_xo_handle;
+ }
+ } else {
+ goto devote_xo_handle;
+ }
+
+ ret = msm_hsusb_config_vddcx(1);
+ if (ret) {
+ dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
+ goto devote_xo_handle;
+ }
+
+ ret = regulator_enable(hsusb_vdd);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable the hsusb vddcx\n");
+ goto free_config_vddcx;
+ }
+
+ ret = msm_hsusb_ldo_init(motg, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
+ goto free_hsusb_vdd;
+ }
+
+ /* Get pinctrl if target uses pinctrl */
+ motg->phy_pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(motg->phy_pinctrl)) {
+ if (of_property_read_bool(pdev->dev.of_node, "pinctrl-names")) {
+ dev_err(&pdev->dev, "Error encountered while getting pinctrl");
+ ret = PTR_ERR(motg->phy_pinctrl);
+ goto free_ldo_init;
+ }
+ dev_dbg(&pdev->dev, "Target does not use pinctrl\n");
+ motg->phy_pinctrl = NULL;
+ }
+
+ ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_ON);
+ if (ret) {
+ dev_err(&pdev->dev, "hsusb vreg enable failed\n");
+ goto free_ldo_init;
+ }
+ clk_prepare_enable(motg->core_clk);
+
+ /* Check if USB mem_type change is needed to workaround PNOC hw issue */
+ msm_otg_pnoc_errata_fix(motg);
+
+ writel_relaxed(0, USB_USBINTR);
+ writel_relaxed(0, USB_OTGSC);
+ /* Ensure that above STOREs are completed before enabling interrupts */
+ mb();
+
+ motg->id_state = USB_ID_FLOAT;
+ set_bit(ID, &motg->inputs);
+ INIT_WORK(&motg->sm_work, msm_otg_sm_work);
+ INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
+ INIT_DELAYED_WORK(&motg->id_status_work, msm_id_status_w);
+ INIT_DELAYED_WORK(&motg->perf_vote_work, msm_otg_perf_vote_work);
+ setup_timer(&motg->chg_check_timer, msm_otg_chg_check_timer_func,
+ (unsigned long) motg);
+ motg->otg_wq = alloc_ordered_workqueue("k_otg", 0);
+ if (!motg->otg_wq) {
+ pr_err("%s: Unable to create workqueue otg_wq\n",
+ __func__);
+ goto disable_core_clk;
+ }
+
+ ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED,
+ "msm_otg", motg);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed\n");
+ goto destroy_wq;
+ }
+
+ motg->phy_irq = platform_get_irq_byname(pdev, "phy_irq");
+ if (motg->phy_irq < 0) {
+ dev_dbg(&pdev->dev, "phy_irq is not present\n");
+ motg->phy_irq = 0;
+ } else {
+
+ /* clear all interrupts before enabling the IRQ */
+ writeb_relaxed(0xFF, USB2_PHY_USB_PHY_INTERRUPT_CLEAR0);
+ writeb_relaxed(0xFF, USB2_PHY_USB_PHY_INTERRUPT_CLEAR1);
+
+ writeb_relaxed(0x1, USB2_PHY_USB_PHY_IRQ_CMD);
+ /*
+ * Databook says 200 usec delay is required for
+ * clearing the interrupts.
+ */
+ udelay(200);
+ writeb_relaxed(0x0, USB2_PHY_USB_PHY_IRQ_CMD);
+
+ ret = request_irq(motg->phy_irq, msm_otg_phy_irq_handler,
+ IRQF_TRIGGER_RISING, "msm_otg_phy_irq", motg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "phy_irq request fail %d\n", ret);
+ goto free_irq;
+ }
+ }
+
+ ret = request_irq(motg->async_irq, msm_otg_irq,
+ IRQF_TRIGGER_RISING, "msm_otg", motg);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed (ASYNC INT)\n");
+ goto free_phy_irq;
+ }
+ disable_irq(motg->async_irq);
+
+ if (pdata->otg_control == OTG_PHY_CONTROL && pdata->mpm_otgsessvld_int)
+ msm_mpm_enable_pin(pdata->mpm_otgsessvld_int, 1);
+
+ if (pdata->mpm_dpshv_int)
+ msm_mpm_enable_pin(pdata->mpm_dpshv_int, 1);
+ if (pdata->mpm_dmshv_int)
+ msm_mpm_enable_pin(pdata->mpm_dmshv_int, 1);
+
+ phy->init = msm_otg_reset;
+ phy->set_power = msm_otg_set_power;
+ phy->set_suspend = msm_otg_set_suspend;
+ phy->dbg_event = msm_otg_dbg_log_event;
+
+ phy->io_ops = &msm_otg_io_ops;
+
+ phy->otg->usb_phy = &motg->phy;
+ phy->otg->set_host = msm_otg_set_host;
+ phy->otg->set_peripheral = msm_otg_set_peripheral;
+ if (pdata->dp_manual_pullup)
+ phy->flags |= ENABLE_DP_MANUAL_PULLUP;
+
+ if (pdata->enable_sec_phy)
+ phy->flags |= ENABLE_SECONDARY_PHY;
+
+ ret = usb_add_phy(&motg->phy, USB_PHY_TYPE_USB2);
+ if (ret) {
+ dev_err(&pdev->dev, "usb_add_phy failed\n");
+ goto free_async_irq;
+ }
+
+ ret = usb_phy_regulator_init(motg);
+ if (ret) {
+ dev_err(&pdev->dev, "usb_phy_regulator_init failed\n");
+ goto remove_phy;
+ }
+
+ if (motg->pdata->mode == USB_OTG &&
+ motg->pdata->otg_control == OTG_PMIC_CONTROL &&
+ !motg->phy_irq) {
+
+ if (gpio_is_valid(motg->pdata->usb_id_gpio)) {
+ /* usb_id_gpio request */
+ ret = gpio_request(motg->pdata->usb_id_gpio,
+ "USB_ID_GPIO");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio req failed for id\n");
+ motg->pdata->usb_id_gpio = 0;
+ goto remove_phy;
+ }
+
+ /*
+ * The following code implements switch between the HOST
+ * mode to device mode when used different HW components
+ * on the same port: USB HUB and the usb jack type B
+ * for device mode In this case HUB should be gone
+ * only once out of reset at the boot time and after
+ * that always stay on
+ */
+ if (gpio_is_valid(motg->pdata->hub_reset_gpio)) {
+ ret = devm_gpio_request(&pdev->dev,
+ motg->pdata->hub_reset_gpio,
+ "qcom,hub-reset-gpio");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio req failed for hub reset\n");
+ goto remove_phy;
+ }
+ gpio_direction_output(
+ motg->pdata->hub_reset_gpio, 1);
+ }
+
+ if (gpio_is_valid(motg->pdata->switch_sel_gpio)) {
+ ret = devm_gpio_request(&pdev->dev,
+ motg->pdata->switch_sel_gpio,
+ "qcom,sw-sel-gpio");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio req failed for switch sel\n");
+ goto remove_phy;
+ }
+ if (gpio_get_value(motg->pdata->usb_id_gpio))
+ gpio_direction_input(
+ motg->pdata->switch_sel_gpio);
+
+ else
+ gpio_direction_output(
+ motg->pdata->switch_sel_gpio,
+ 1);
+ }
+
+ /* usb_id_gpio to irq */
+ id_irq = gpio_to_irq(motg->pdata->usb_id_gpio);
+ motg->ext_id_irq = id_irq;
+ } else if (motg->pdata->pmic_id_irq) {
+ id_irq = motg->pdata->pmic_id_irq;
+ }
+
+ if (id_irq) {
+ ret = request_irq(id_irq,
+ msm_id_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "msm_otg", motg);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed for ID\n");
+ goto remove_phy;
+ }
+ } else {
+ /* PMIC does USB ID detection and notifies through
+ * USB_OTG property of USB powersupply.
+ */
+ dev_dbg(&pdev->dev, "PMIC does ID detection\n");
+ }
+ }
+
+ platform_set_drvdata(pdev, motg);
+ device_init_wakeup(&pdev->dev, 1);
+
+ ret = msm_otg_debugfs_init(motg);
+ if (ret)
+ dev_dbg(&pdev->dev, "mode debugfs file is not available\n");
+
+ if (motg->pdata->otg_control == OTG_PMIC_CONTROL &&
+ (!(motg->pdata->mode == USB_OTG) ||
+ motg->pdata->pmic_id_irq || motg->ext_id_irq ||
+ !motg->phy_irq))
+ motg->caps = ALLOW_PHY_POWER_COLLAPSE | ALLOW_PHY_RETENTION;
+
+ if (motg->pdata->otg_control == OTG_PHY_CONTROL || motg->phy_irq ||
+ motg->pdata->enable_phy_id_pullup)
+ motg->caps = ALLOW_PHY_RETENTION | ALLOW_PHY_REGULATORS_LPM;
+
+ if (motg->pdata->mpm_dpshv_int || motg->pdata->mpm_dmshv_int)
+ motg->caps |= ALLOW_HOST_PHY_RETENTION;
+
+ device_create_file(&pdev->dev, &dev_attr_dpdm_pulldown_enable);
+
+ if (motg->pdata->enable_lpm_on_dev_suspend)
+ motg->caps |= ALLOW_LPM_ON_DEV_SUSPEND;
+
+ if (motg->pdata->disable_retention_with_vdd_min)
+ motg->caps |= ALLOW_VDD_MIN_WITH_RETENTION_DISABLED;
+
+ /*
+ * PHY DVDD is supplied by a always on PMIC LDO (unlike
+ * vddcx/vddmx). PHY can keep D+ pull-up and D+/D-
+ * pull-down during suspend without any additional
+ * hardware re-work.
+ */
+ if (motg->pdata->phy_type == SNPS_FEMTO_PHY)
+ motg->caps |= ALLOW_BUS_SUSPEND_WITHOUT_REWORK;
+
+ pm_stay_awake(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ if (motg->pdata->delay_lpm_on_disconnect) {
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ lpm_disconnect_thresh);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ }
+
+ ret = msm_otg_setup_ext_chg_cdev(motg);
+ if (ret)
+ dev_dbg(&pdev->dev, "fail to setup cdev\n");
+
+ if (pdev->dev.of_node) {
+ ret = msm_otg_setup_devices(pdev, pdata->mode, true);
+ if (ret) {
+ dev_err(&pdev->dev, "devices setup failed\n");
+ goto remove_cdev;
+ }
+ }
+
+ psy = power_supply_get_by_name("usb");
+ if (!psy) {
+ dev_dbg(&pdev->dev, "Could not get usb power_supply\n");
+ ret = -EPROBE_DEFER;
+ goto otg_remove_devices;
+ }
+
+
+ ret = msm_otg_extcon_register(motg);
+ if (ret)
+ goto put_psy;
+
+ if (motg->extcon_vbus) {
+ ret = extcon_get_cable_state_(motg->extcon_vbus, EXTCON_USB);
+ if (ret)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ }
+
+ if (motg->extcon_id) {
+ ret = extcon_get_cable_state_(motg->extcon_id, EXTCON_USB_HOST);
+ if (ret)
+ clear_bit(ID, &motg->inputs);
+ else
+ set_bit(ID, &motg->inputs);
+ }
+
+ if (gpio_is_valid(motg->pdata->hub_reset_gpio)) {
+ ret = devm_gpio_request(&pdev->dev,
+ motg->pdata->hub_reset_gpio,
+ "HUB_RESET");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio req failed for hub_reset\n");
+ } else {
+ gpio_direction_output(
+ motg->pdata->hub_reset_gpio, 0);
+ /* 5 microsecs reset signaling to usb hub */
+ usleep_range(5, 10);
+ gpio_direction_output(
+ motg->pdata->hub_reset_gpio, 1);
+ }
+ }
+
+ if (gpio_is_valid(motg->pdata->usbeth_reset_gpio)) {
+ ret = devm_gpio_request(&pdev->dev,
+ motg->pdata->usbeth_reset_gpio,
+ "ETH_RESET");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio req failed for usbeth_reset\n");
+ } else {
+ gpio_direction_output(
+ motg->pdata->usbeth_reset_gpio, 0);
+ /* 100 microsecs reset signaling to usb-to-eth */
+ usleep_range(100, 110);
+ gpio_direction_output(
+ motg->pdata->usbeth_reset_gpio, 1);
+ }
+ }
+
+ motg->pm_notify.notifier_call = msm_otg_pm_notify;
+ register_pm_notifier(&motg->pm_notify);
+ msm_otg_dbg_log_event(phy, "OTG PROBE", motg->caps, motg->lpm_flags);
+
+ return 0;
+
+put_psy:
+ if (psy)
+ power_supply_put(psy);
+otg_remove_devices:
+ if (pdev->dev.of_node)
+ msm_otg_setup_devices(pdev, motg->pdata->mode, false);
+remove_cdev:
+ if (!motg->ext_chg_device) {
+ device_destroy(motg->ext_chg_class, motg->ext_chg_dev);
+ cdev_del(&motg->ext_chg_cdev);
+ class_destroy(motg->ext_chg_class);
+ unregister_chrdev_region(motg->ext_chg_dev, 1);
+ }
+remove_phy:
+ usb_remove_phy(&motg->phy);
+free_async_irq:
+ free_irq(motg->async_irq, motg);
+free_phy_irq:
+ if (motg->phy_irq)
+ free_irq(motg->phy_irq, motg);
+free_irq:
+ free_irq(motg->irq, motg);
+destroy_wq:
+ destroy_workqueue(motg->otg_wq);
+disable_core_clk:
+ clk_disable_unprepare(motg->core_clk);
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
+free_ldo_init:
+ msm_hsusb_ldo_init(motg, 0);
+free_hsusb_vdd:
+ regulator_disable(hsusb_vdd);
+free_config_vddcx:
+ regulator_set_voltage(hsusb_vdd,
+ vdd_val[VDD_NONE],
+ vdd_val[VDD_MAX]);
+devote_xo_handle:
+ clk_disable_unprepare(motg->pclk);
+ if (motg->xo_clk)
+ clk_disable_unprepare(motg->xo_clk);
+free_xo_handle:
+ if (motg->xo_clk) {
+ clk_put(motg->xo_clk);
+ motg->xo_clk = NULL;
+ }
+free_regs:
+ iounmap(motg->regs);
+devote_bus_bw:
+ if (motg->bus_perf_client) {
+ msm_otg_bus_vote(motg, USB_NO_PERF_VOTE);
+ msm_bus_scale_unregister_client(motg->bus_perf_client);
+ }
+disable_phy_csr_clk:
+ if (motg->phy_csr_clk)
+ clk_disable_unprepare(motg->phy_csr_clk);
+disable_sleep_clk:
+ if (motg->sleep_clk)
+ clk_disable_unprepare(motg->sleep_clk);
+put_xo_clk:
+ if (motg->xo_clk)
+ clk_put(motg->xo_clk);
+put_pclk:
+ if (motg->pclk)
+ clk_put(motg->pclk);
+put_core_clk:
+ if (motg->core_clk)
+ clk_put(motg->core_clk);
+free_motg:
+ kfree(motg);
+ return ret;
+}
+
+static int msm_otg_remove(struct platform_device *pdev)
+{
+ struct msm_otg *motg = platform_get_drvdata(pdev);
+ struct usb_phy *phy = &motg->phy;
+ int cnt = 0;
+
+ if (phy->otg->host || phy->otg->gadget)
+ return -EBUSY;
+
+ unregister_pm_notifier(&motg->pm_notify);
+
+ extcon_unregister_notifier(motg->extcon_id, EXTCON_USB_HOST,
+ &motg->id_nb);
+ extcon_unregister_notifier(motg->extcon_vbus, EXTCON_USB,
+ &motg->vbus_nb);
+
+ if (!motg->ext_chg_device) {
+ device_destroy(motg->ext_chg_class, motg->ext_chg_dev);
+ cdev_del(&motg->ext_chg_cdev);
+ class_destroy(motg->ext_chg_class);
+ unregister_chrdev_region(motg->ext_chg_dev, 1);
+ }
+
+ if (pdev->dev.of_node)
+ msm_otg_setup_devices(pdev, motg->pdata->mode, false);
+ if (psy)
+ power_supply_put(psy);
+ msm_otg_debugfs_cleanup();
+ cancel_delayed_work_sync(&motg->chg_work);
+ cancel_delayed_work_sync(&motg->id_status_work);
+ cancel_delayed_work_sync(&motg->perf_vote_work);
+ msm_otg_perf_vote_update(motg, false);
+ cancel_work_sync(&motg->sm_work);
+ destroy_workqueue(motg->otg_wq);
+
+ pm_runtime_resume(&pdev->dev);
+
+ device_init_wakeup(&pdev->dev, 0);
+ pm_runtime_disable(&pdev->dev);
+
+ if (motg->phy_irq)
+ free_irq(motg->phy_irq, motg);
+ if (motg->pdata->pmic_id_irq)
+ free_irq(motg->pdata->pmic_id_irq, motg);
+ usb_remove_phy(phy);
+ free_irq(motg->irq, motg);
+
+ if (motg->pdata->mpm_dpshv_int || motg->pdata->mpm_dmshv_int)
+ device_remove_file(&pdev->dev,
+ &dev_attr_dpdm_pulldown_enable);
+ if (motg->pdata->otg_control == OTG_PHY_CONTROL &&
+ motg->pdata->mpm_otgsessvld_int)
+ msm_mpm_enable_pin(motg->pdata->mpm_otgsessvld_int, 0);
+
+ if (motg->pdata->mpm_dpshv_int)
+ msm_mpm_enable_pin(motg->pdata->mpm_dpshv_int, 0);
+ if (motg->pdata->mpm_dmshv_int)
+ msm_mpm_enable_pin(motg->pdata->mpm_dmshv_int, 0);
+
+ /*
+ * Put PHY in low power mode.
+ */
+ ulpi_read(phy, 0x14);
+ ulpi_write(phy, 0x08, 0x09);
+
+ writel_relaxed(readl_relaxed(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+ while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
+ if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD)
+ break;
+ udelay(1);
+ cnt++;
+ }
+ if (cnt >= PHY_SUSPEND_TIMEOUT_USEC)
+ dev_err(phy->dev, "Unable to suspend PHY\n");
+
+ clk_disable_unprepare(motg->pclk);
+ clk_disable_unprepare(motg->core_clk);
+ if (motg->phy_csr_clk)
+ clk_disable_unprepare(motg->phy_csr_clk);
+ if (motg->xo_clk) {
+ clk_disable_unprepare(motg->xo_clk);
+ clk_put(motg->xo_clk);
+ }
+
+ if (!IS_ERR(motg->sleep_clk))
+ clk_disable_unprepare(motg->sleep_clk);
+
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
+ msm_hsusb_ldo_init(motg, 0);
+ regulator_disable(hsusb_vdd);
+ regulator_set_voltage(hsusb_vdd,
+ vdd_val[VDD_NONE],
+ vdd_val[VDD_MAX]);
+
+ iounmap(motg->regs);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ clk_put(motg->pclk);
+ clk_put(motg->core_clk);
+
+ if (motg->bus_perf_client) {
+ msm_otg_bus_vote(motg, USB_NO_PERF_VOTE);
+ msm_bus_scale_unregister_client(motg->bus_perf_client);
+ }
+
+ return 0;
+}
+
+static void msm_otg_shutdown(struct platform_device *pdev)
+{
+ struct msm_otg *motg = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "OTG shutdown\n");
+ msm_hsusb_vbus_power(motg, 0);
+}
+
+#ifdef CONFIG_PM
+static int msm_otg_runtime_idle(struct device *dev)
+{
+ struct msm_otg *motg = dev_get_drvdata(dev);
+ struct usb_phy *phy = &motg->phy;
+
+ dev_dbg(dev, "OTG runtime idle\n");
+ msm_otg_dbg_log_event(phy, "RUNTIME IDLE",
+ phy->otg->state, motg->ext_chg_active);
+
+ if (phy->otg->state == OTG_STATE_UNDEFINED)
+ return -EAGAIN;
+
+ if (motg->ext_chg_active == DEFAULT) {
+ dev_dbg(dev, "Deferring LPM\n");
+ /*
+ * Charger detection may happen in user space.
+ * Delay entering LPM by 3 sec. Otherwise we
+ * have to exit LPM when user space begins
+ * charger detection.
+ *
+ * This timer will be canceled when user space
+ * votes against LPM by incrementing PM usage
+ * counter. We enter low power mode when
+ * PM usage counter is decremented.
+ */
+ pm_schedule_suspend(dev, 3000);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int msm_otg_runtime_suspend(struct device *dev)
+{
+ struct msm_otg *motg = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "OTG runtime suspend\n");
+ msm_otg_dbg_log_event(&motg->phy, "RUNTIME SUSPEND",
+ get_pm_runtime_counter(dev), 0);
+ return msm_otg_suspend(motg);
+}
+
+static int msm_otg_runtime_resume(struct device *dev)
+{
+ struct msm_otg *motg = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "OTG runtime resume\n");
+ msm_otg_dbg_log_event(&motg->phy, "RUNTIME RESUME",
+ get_pm_runtime_counter(dev), 0);
+
+ return msm_otg_resume(motg);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_otg_pm_suspend(struct device *dev)
+{
+ struct msm_otg *motg = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "OTG PM suspend\n");
+ msm_otg_dbg_log_event(&motg->phy, "PM SUSPEND START",
+ get_pm_runtime_counter(dev),
+ atomic_read(&motg->pm_suspended));
+
+ /* flush any pending sm_work first */
+ flush_work(&motg->sm_work);
+ if (!atomic_read(&motg->in_lpm)) {
+ dev_err(dev, "Abort PM suspend!! (USB is outside LPM)\n");
+ return -EBUSY;
+ }
+ atomic_set(&motg->pm_suspended, 1);
+
+ return 0;
+}
+
+static int msm_otg_pm_resume(struct device *dev)
+{
+ int ret = 0;
+ struct msm_otg *motg = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "OTG PM resume\n");
+ msm_otg_dbg_log_event(&motg->phy, "PM RESUME START",
+ get_pm_runtime_counter(dev), pm_runtime_suspended(dev));
+
+ if (motg->resume_pending || motg->phy_irq_pending) {
+ msm_otg_dbg_log_event(&motg->phy, "PM RESUME BY USB",
+ motg->async_int, motg->resume_pending);
+ /* sm work if pending will start in pm notify to exit LPM */
+ }
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops msm_otg_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
+ SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
+ msm_otg_runtime_idle)
+};
+#endif
+
+static const struct of_device_id msm_otg_dt_match[] = {
+ { .compatible = "qcom,hsusb-otg",
+ },
+ {}
+};
+
+static struct platform_driver msm_otg_driver = {
+ .probe = msm_otg_probe,
+ .remove = msm_otg_remove,
+ .shutdown = msm_otg_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &msm_otg_dev_pm_ops,
+#endif
+ .of_match_table = msm_otg_dt_match,
+ },
+};
+
+module_platform_driver(msm_otg_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM USB transceiver driver");
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 5d3b0db..eb44e99 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2328,6 +2328,20 @@
Select this option if display contents should be inherited as set by
the bootloader.
+config FB_MSM
+ tristate "MSM Framebuffer support"
+ depends on FB && ARCH_QCOM
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select SYNC
+ select SW_SYNC
+ ---help---
+ The MSM driver implements a frame buffer interface to
+ provide access to the display hardware and provide
+ a way for users to display graphics
+ on connected display panels.
+
config FB_MX3
tristate "MX3 Framebuffer support"
depends on FB && MX3_IPU
@@ -2448,6 +2462,7 @@
source "drivers/video/fbdev/omap/Kconfig"
source "drivers/video/fbdev/omap2/Kconfig"
source "drivers/video/fbdev/mmp/Kconfig"
+source "drivers/video/fbdev/msm/Kconfig"
config FB_SH_MOBILE_MERAM
tristate "SuperH Mobile MERAM read ahead support"
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index ee8c814..c16b198 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -130,6 +130,11 @@
obj-$(CONFIG_FB_OPENCORES) += ocfb.o
obj-$(CONFIG_FB_SM712) += sm712fb.o
+ifeq ($(CONFIG_FB_MSM),y)
+obj-y += msm/
+else
+obj-$(CONFIG_MSM_DBA) += msm/msm_dba/
+endif
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
obj-$(CONFIG_FB_VESA) += vesafb.o
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index f8a3839..2ef33d4 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1085,7 +1085,7 @@
EXPORT_SYMBOL(fb_blank);
static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg, struct file *file)
{
struct fb_ops *fb;
struct fb_var_screeninfo var;
@@ -1222,7 +1222,9 @@
if (!lock_fb_info(info))
return -ENODEV;
fb = info->fbops;
- if (fb->fb_ioctl)
+ if (fb->fb_ioctl_v2)
+ ret = fb->fb_ioctl_v2(info, cmd, arg, file);
+ else if (fb->fb_ioctl)
ret = fb->fb_ioctl(info, cmd, arg);
else
ret = -ENOTTY;
@@ -1237,7 +1239,7 @@
if (!info)
return -ENODEV;
- return do_fb_ioctl(info, cmd, arg);
+ return do_fb_ioctl(info, cmd, arg, file);
}
#ifdef CONFIG_COMPAT
@@ -1268,7 +1270,7 @@
};
static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg, struct file *file)
{
struct fb_cmap_user __user *cmap;
struct fb_cmap32 __user *cmap32;
@@ -1291,7 +1293,7 @@
put_user(compat_ptr(data), &cmap->transp))
return -EFAULT;
- err = do_fb_ioctl(info, cmd, (unsigned long) cmap);
+ err = do_fb_ioctl(info, cmd, (unsigned long) cmap, file);
if (!err) {
if (copy_in_user(&cmap32->start,
@@ -1336,7 +1338,7 @@
}
static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg, struct file *file)
{
mm_segment_t old_fs;
struct fb_fix_screeninfo fix;
@@ -1347,7 +1349,7 @@
old_fs = get_fs();
set_fs(KERNEL_DS);
- err = do_fb_ioctl(info, cmd, (unsigned long) &fix);
+ err = do_fb_ioctl(info, cmd, (unsigned long) &fix, file);
set_fs(old_fs);
if (!err)
@@ -1374,20 +1376,22 @@
case FBIOPUT_CON2FBMAP:
arg = (unsigned long) compat_ptr(arg);
case FBIOBLANK:
- ret = do_fb_ioctl(info, cmd, arg);
+ ret = do_fb_ioctl(info, cmd, arg, file);
break;
case FBIOGET_FSCREENINFO:
- ret = fb_get_fscreeninfo(info, cmd, arg);
+ ret = fb_get_fscreeninfo(info, cmd, arg, file);
break;
case FBIOGETCMAP:
case FBIOPUTCMAP:
- ret = fb_getput_cmap(info, cmd, arg);
+ ret = fb_getput_cmap(info, cmd, arg, file);
break;
default:
- if (fb->fb_compat_ioctl)
+ if (fb->fb_compat_ioctl_v2)
+ ret = fb->fb_compat_ioctl_v2(info, cmd, arg, file);
+ else if (fb->fb_compat_ioctl)
ret = fb->fb_compat_ioctl(info, cmd, arg);
break;
}
diff --git a/drivers/video/fbdev/msm/Kconfig b/drivers/video/fbdev/msm/Kconfig
new file mode 100644
index 0000000..60b86e7
--- /dev/null
+++ b/drivers/video/fbdev/msm/Kconfig
@@ -0,0 +1,138 @@
+source "drivers/video/fbdev/msm/msm_dba/Kconfig"
+
+if FB_MSM
+
+config FB_MSM_MDSS_COMMON
+ bool
+
+choice
+ prompt "MDP HW version"
+ default FB_MSM_MDP
+
+config FB_MSM_MDP
+ bool "MDP HW"
+ select FB_MSM_MDP_HW
+ ---help---
+ The Mobile Display Processor (MDP) driver support devices which
+ contain MDP hardware block.
+
+ Support for MSM MDP HW revision 2.2.
+ Say Y here if this is msm7201 variant platform.
+
+config FB_MSM_MDSS
+ bool "MDSS HW"
+ select SYNC
+ select SW_SYNC
+ select FB_MSM_MDSS_COMMON
+ ---help---
+ The Mobile Display Sub System (MDSS) driver supports devices which
+ contain MDSS hardware block.
+
+ The MDSS driver implements frame buffer interface to provide access to
+ the display hardware and provide a way for users to display graphics
+ on connected display panels.
+
+config FB_MSM_MDP_NONE
+ bool "MDP HW None"
+ ---help---
+ This is used for platforms without Mobile Display Sub System (MDSS).
+ mdm platform don't have MDSS hardware block.
+
+ Say Y here if this is mdm platform.
+
+endchoice
+
+config FB_MSM_QPIC
+ bool
+ select FB_MSM_MDSS_COMMON
+
+config FB_MSM_QPIC_ILI_QVGA_PANEL
+ bool "Qpic MIPI ILI QVGA Panel"
+ select FB_MSM_QPIC
+ ---help---
+ Support for MIPI ILI QVGA (240x320) panel ILI TECHNOLOGY 9341
+ with on-chip full display RAM use parallel interface.
+
+config FB_MSM_QPIC_PANEL_DETECT
+ bool "Qpic Panel Detect"
+ select FB_MSM_QPIC_ILI_QVGA_PANEL
+ ---help---
+ Support for Qpic panel auto detect.
+
+config FB_MSM_MDSS_WRITEBACK
+ bool "MDSS Writeback Panel"
+ ---help---
+ The MDSS Writeback Panel provides support for routing the output of
+ MDSS frame buffer driver and MDP processing to memory.
+
+config FB_MSM_MDSS_HDMI_PANEL
+ bool "MDSS HDMI Tx Panel"
+ depends on FB_MSM_MDSS
+ select MSM_EXT_DISPLAY
+ default n
+ ---help---
+ The MDSS HDMI Panel provides support for transmitting TMDS signals of
+ MDSS frame buffer data to connected hdmi compliant TVs, monitors etc.
+
+config FB_MSM_MDSS_HDMI_MHL_SII8334
+ depends on FB_MSM_MDSS_HDMI_PANEL
+ bool 'MHL SII8334 support '
+ default n
+ ---help---
+ Support the HDMI to MHL conversion.
+ MHL (Mobile High-Definition Link) technology
+ uses USB connector to output HDMI content
+
+config FB_MSM_MDSS_MHL3
+ depends on FB_MSM_MDSS_HDMI_PANEL
+ bool "MHL3 SII8620 Support"
+ default n
+ ---help---
+ Support the SiliconImage 8620 MHL Tx transmitter that uses
+ USB connector to output HDMI content. Transmitter is an
+ i2c device acting as an HDMI to MHL bridge. Chip supports
+ MHL 3.0 standard.
+
+config FB_MSM_MDSS_DSI_CTRL_STATUS
+ tristate "DSI controller status check feature"
+ ---help---
+ Check DSI controller status periodically (default period is 5
+ seconds) by sending Bus-Turn-Around (BTA) command. If DSI controller
+ fails to acknowledge the BTA command, it sends PANEL_ALIVE=0 status
+ to HAL layer to reset the controller.
+
+config FB_MSM_MDSS_EDP_PANEL
+ depends on FB_MSM_MDSS
+ bool "MDSS eDP Panel"
+ ---help---
+ The MDSS eDP Panel provides support for eDP host controller driver.
+ Which runs in Video mode only and is responsible for transmitting
+ frame buffer from host SOC to eDP display panel.
+
+config FB_MSM_MDSS_MDP3
+ depends on FB_MSM_MDSS
+ bool "MDP3 display controller"
+ ---help---
+ The MDP3 provides support for an older version display controller.
+ Included in latest display sub-system, known as MDSS.
+
+config FB_MSM_MDSS_XLOG_DEBUG
+ depends on FB_MSM_MDSS
+ bool "Enable MDSS debugging"
+ ---help---
+ The MDSS debugging provides support to enable display debugging
+ features to: Dump MDSS registers during driver errors, panic
+ driver during fatal errors and enable some display-driver logging
+ into an internal buffer (this avoids logging overhead).
+
+config FB_MSM_MDSS_FRC_DEBUG
+ depends on DEBUG_FS && FB_MSM_MDSS
+ bool "Enable Video FRC debugging"
+ default n
+ ---help---
+ The MDSS FRC debugging provides support to enable the deterministic
+ frame rate control (FRC) debugging features to: Collect video frame
+ statistics and check whether its output pattern matches expected
+ cadence.
+
+endif
diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile
new file mode 100644
index 0000000..e09dcdb
--- /dev/null
+++ b/drivers/video/fbdev/msm/Makefile
@@ -0,0 +1,73 @@
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_FB_MSM_MDSS_MHL3) += mhl3/
+obj-$(CONFIG_MSM_DBA) += msm_dba/
+
+mdss-mdp3-objs = mdp3.o mdp3_layer.o mdp3_dma.o mdp3_ctrl.o dsi_status_v2.o
+mdss-mdp3-objs += mdp3_ppp.o mdp3_ppp_hwio.o mdp3_ppp_data.o
+obj-$(CONFIG_FB_MSM_MDSS_MDP3) += mdss-mdp3.o
+ifeq ($(CONFIG_FB_MSM_MDSS_MDP3), y)
+ccflags-y += -DTARGET_HW_MDSS_MDP3
+endif
+mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o dsi_status_6g.o
+mdss-mdp-objs += mdss_mdp_pp.o mdss_mdp_pp_debug.o mdss_mdp_pp_cache_config.o mdss_sync.o
+mdss-mdp-objs += mdss_mdp_intf_video.o
+mdss-mdp-objs += mdss_mdp_intf_cmd.o
+mdss-mdp-objs += mdss_mdp_intf_writeback.o
+mdss-mdp-objs += mdss_rotator.o
+mdss-mdp-objs += mdss_mdp_overlay.o
+mdss-mdp-objs += mdss_mdp_layer.o
+mdss-mdp-objs += mdss_mdp_splash_logo.o
+mdss-mdp-objs += mdss_mdp_cdm.o
+mdss-mdp-objs += mdss_smmu.o
+mdss-mdp-objs += mdss_mdp_wfd.o
+mdss-mdp-objs += mdss_io_util.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_mdp_debug.o
+
+mdss-mdp-objs += mdss_mdp_pp_v1_7.o
+mdss-mdp-objs += mdss_mdp_pp_v3.o
+mdss-mdp-objs += mdss_mdp_pp_common.o
+
+ifeq ($(CONFIG_FB_MSM_MDSS),y)
+obj-$(CONFIG_DEBUG_FS) += mdss_debug.o mdss_debug_xlog.o
+endif
+
+ifeq ($(CONFIG_FB_MSM_MDSS_FRC_DEBUG),y)
+obj-$(CONFIG_DEBUG_FS) += mdss_debug_frc.o
+endif
+
+mdss-dsi-objs := mdss_dsi.o mdss_dsi_host.o mdss_dsi_cmd.o mdss_dsi_status.o
+mdss-dsi-objs += mdss_dsi_panel.o
+mdss-dsi-objs += msm_mdss_io_8974.o
+mdss-dsi-objs += mdss_dsi_phy.o
+mdss-dsi-objs += mdss_dsi_clk.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss-dsi.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_panel.o
+
+ifneq ($(CONFIG_FB_MSM_MDSS_MDP3), y)
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_util.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_edid.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_cec_core.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_dba_utils.o
+obj-$(CONFIG_FB_MSM_MDSS_EDP_PANEL) += mdss_edp.o
+obj-$(CONFIG_FB_MSM_MDSS_EDP_PANEL) += mdss_edp_aux.o
+
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_panel.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp2p2.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_cec.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_audio.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334) += mhl_sii8334.o mhl_msc.o
+ccflags-y += -DTARGET_HW_MDSS_HDMI
+endif
+
+obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
+
+mdss-qpic-objs := mdss_qpic.o mdss_fb.o mdss_qpic_panel.o
+obj-$(CONFIG_FB_MSM_QPIC) += mdss-qpic.o
+obj-$(CONFIG_FB_MSM_QPIC_ILI_QVGA_PANEL) += qpic_panel_ili_qvga.o
+
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o mdss_util.o
+obj-$(CONFIG_COMPAT) += mdss_compat_utils.o
diff --git a/drivers/video/fbdev/msm/dsi_host_v2.c b/drivers/video/fbdev/msm/dsi_host_v2.c
new file mode 100644
index 0000000..33775ec
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_host_v2.c
@@ -0,0 +1,1889 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include "dsi_v2.h"
+#include "dsi_io_v2.h"
+#include "dsi_host_v2.h"
+#include "mdss_debug.h"
+#include "mdp3.h"
+
+#define DSI_POLL_SLEEP_US 1000
+#define DSI_POLL_TIMEOUT_US 16000
+#define DSI_ESC_CLK_RATE 19200000
+#define DSI_DMA_CMD_TIMEOUT_MS 200
+#define VSYNC_PERIOD 17
+#define DSI_MAX_PKT_SIZE 10
+#define DSI_SHORT_PKT_DATA_SIZE 2
+#define DSI_MAX_BYTES_TO_READ 16
+
+struct dsi_host_v2_private {
+ unsigned char *dsi_base;
+ size_t dsi_reg_size;
+ struct device dis_dev;
+ int clk_count;
+ int dsi_on;
+
+ void (*debug_enable_clk)(int on);
+};
+
+static struct dsi_host_v2_private *dsi_host_private;
+static int msm_dsi_clk_ctrl(struct mdss_panel_data *pdata, int enable);
+
+int msm_dsi_init(void)
+{
+ if (!dsi_host_private) {
+ dsi_host_private = kzalloc(sizeof(struct dsi_host_v2_private),
+ GFP_KERNEL);
+ if (!dsi_host_private)
+ return -ENOMEM;
+
+ }
+
+ return 0;
+}
+
+void msm_dsi_deinit(void)
+{
+ kfree(dsi_host_private);
+ dsi_host_private = NULL;
+}
+
+void msm_dsi_ack_err_status(unsigned char *ctrl_base)
+{
+ u32 status;
+
+ status = MIPI_INP(ctrl_base + DSI_ACK_ERR_STATUS);
+
+ if (status) {
+ MIPI_OUTP(ctrl_base + DSI_ACK_ERR_STATUS, status);
+
+ /* Writing of an extra 0 needed to clear error bits */
+ MIPI_OUTP(ctrl_base + DSI_ACK_ERR_STATUS, 0);
+ pr_err("%s: status=%x\n", __func__, status);
+ }
+}
+
+void msm_dsi_timeout_status(unsigned char *ctrl_base)
+{
+ u32 status;
+
+ status = MIPI_INP(ctrl_base + DSI_TIMEOUT_STATUS);
+ if (status & 0x0111) {
+ MIPI_OUTP(ctrl_base + DSI_TIMEOUT_STATUS, status);
+ pr_err("%s: status=%x\n", __func__, status);
+ }
+}
+
+void msm_dsi_dln0_phy_err(unsigned char *ctrl_base)
+{
+ u32 status;
+
+ status = MIPI_INP(ctrl_base + DSI_DLN0_PHY_ERR);
+
+ if (status & 0x011111) {
+ MIPI_OUTP(ctrl_base + DSI_DLN0_PHY_ERR, status);
+ pr_err("%s: status=%x\n", __func__, status);
+ }
+}
+
+void msm_dsi_fifo_status(unsigned char *ctrl_base)
+{
+ u32 status;
+
+ status = MIPI_INP(ctrl_base + DSI_FIFO_STATUS);
+
+ if (status & 0x44444489) {
+ MIPI_OUTP(ctrl_base + DSI_FIFO_STATUS, status);
+ pr_err("%s: status=%x\n", __func__, status);
+ }
+}
+
+void msm_dsi_status(unsigned char *ctrl_base)
+{
+ u32 status;
+
+ status = MIPI_INP(ctrl_base + DSI_STATUS);
+
+ if (status & 0x80000000) {
+ MIPI_OUTP(ctrl_base + DSI_STATUS, status);
+ pr_err("%s: status=%x\n", __func__, status);
+ }
+}
+
+void msm_dsi_error(unsigned char *ctrl_base)
+{
+ msm_dsi_ack_err_status(ctrl_base);
+ msm_dsi_timeout_status(ctrl_base);
+ msm_dsi_fifo_status(ctrl_base);
+ msm_dsi_status(ctrl_base);
+ msm_dsi_dln0_phy_err(ctrl_base);
+}
+
+static void msm_dsi_set_irq_mask(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask)
+{
+ u32 intr_ctrl;
+
+ intr_ctrl = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+ intr_ctrl |= mask;
+ MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, intr_ctrl);
+}
+
+static void msm_dsi_clear_irq_mask(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask)
+{
+ u32 intr_ctrl;
+
+ intr_ctrl = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+ intr_ctrl &= ~mask;
+ MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, intr_ctrl);
+}
+
+static void msm_dsi_set_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->irq_lock, flags);
+ if (ctrl->dsi_irq_mask & mask) {
+ spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+ return;
+ }
+ if (ctrl->dsi_irq_mask == 0) {
+ ctrl->mdss_util->enable_irq(ctrl->dsi_hw);
+ pr_debug("%s: IRQ Enable, mask=%x term=%x\n", __func__,
+ (int)ctrl->dsi_irq_mask, (int)mask);
+ }
+
+ msm_dsi_set_irq_mask(ctrl, mask);
+ ctrl->dsi_irq_mask |= mask;
+ spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+}
+
+static void msm_dsi_clear_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->irq_lock, flags);
+ if (!(ctrl->dsi_irq_mask & mask)) {
+ spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+ return;
+ }
+ ctrl->dsi_irq_mask &= ~mask;
+ if (ctrl->dsi_irq_mask == 0) {
+ ctrl->mdss_util->disable_irq(ctrl->dsi_hw);
+ pr_debug("%s: IRQ Disable, mask=%x term=%x\n", __func__,
+ (int)ctrl->dsi_irq_mask, (int)mask);
+ }
+ msm_dsi_clear_irq_mask(ctrl, mask);
+ spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+}
+
+irqreturn_t msm_dsi_isr_handler(int irq, void *ptr)
+{
+ u32 isr;
+
+ struct mdss_dsi_ctrl_pdata *ctrl =
+ (struct mdss_dsi_ctrl_pdata *)ptr;
+
+ spin_lock(&ctrl->mdp_lock);
+
+ if (ctrl->dsi_irq_mask == 0) {
+ spin_unlock(&ctrl->mdp_lock);
+ return IRQ_HANDLED;
+ }
+
+ isr = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+ MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, isr);
+
+ pr_debug("%s: isr=%x", __func__, isr);
+
+ if (isr & DSI_INTR_ERROR) {
+ pr_err("%s: isr=%x %x", __func__, isr, (int)DSI_INTR_ERROR);
+ msm_dsi_error(dsi_host_private->dsi_base);
+ }
+
+ if (isr & DSI_INTR_VIDEO_DONE)
+ complete(&ctrl->video_comp);
+
+ if (isr & DSI_INTR_CMD_DMA_DONE)
+ complete(&ctrl->dma_comp);
+
+ if (isr & DSI_INTR_BTA_DONE)
+ complete(&ctrl->bta_comp);
+
+ if (isr & DSI_INTR_CMD_MDP_DONE)
+ complete(&ctrl->mdp_comp);
+
+ spin_unlock(&ctrl->mdp_lock);
+
+ return IRQ_HANDLED;
+}
+
+int msm_dsi_irq_init(struct device *dev, int irq_no,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int ret;
+ u32 isr;
+ struct mdss_hw *dsi_hw;
+
+ msm_dsi_ahb_ctrl(1);
+ isr = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+ isr &= ~DSI_INTR_ALL_MASK;
+ MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, isr);
+ msm_dsi_ahb_ctrl(0);
+
+ ret = devm_request_irq(dev, irq_no, msm_dsi_isr_handler,
+ IRQF_DISABLED, "DSI", ctrl);
+ if (ret) {
+ pr_err("msm_dsi_irq_init request_irq() failed!\n");
+ return ret;
+ }
+
+ dsi_hw = kzalloc(sizeof(struct mdss_hw), GFP_KERNEL);
+ if (!dsi_hw)
+ return -ENOMEM;
+
+ ctrl->dsi_hw = dsi_hw;
+
+ dsi_hw->irq_info = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
+ if (!dsi_hw->irq_info) {
+ kfree(dsi_hw);
+ pr_err("no mem to save irq info: kzalloc fail\n");
+ return -ENOMEM;
+ }
+
+ dsi_hw->hw_ndx = MDSS_HW_DSI0;
+ dsi_hw->irq_info->irq = irq_no;
+ dsi_hw->irq_info->irq_mask = 0;
+ dsi_hw->irq_info->irq_ena = false;
+ dsi_hw->irq_info->irq_buzy = false;
+
+ ctrl->mdss_util->register_irq(ctrl->dsi_hw);
+ ctrl->mdss_util->disable_irq(ctrl->dsi_hw);
+
+ return 0;
+}
+
+static void msm_dsi_get_cmd_engine(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+ u32 dsi_ctrl;
+
+ if (ctrl->panel_mode == DSI_VIDEO_MODE) {
+ dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl | 0x04);
+ }
+}
+
+static void msm_dsi_release_cmd_engine(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+ u32 dsi_ctrl;
+
+ if (ctrl->panel_mode == DSI_VIDEO_MODE) {
+ dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+ dsi_ctrl &= ~0x04;
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+ }
+}
+
+static int msm_dsi_wait4mdp_done(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int rc;
+ unsigned long flag;
+
+ spin_lock_irqsave(&ctrl->mdp_lock, flag);
+ reinit_completion(&ctrl->mdp_comp);
+ msm_dsi_set_irq(ctrl, DSI_INTR_CMD_MDP_DONE_MASK);
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+ rc = wait_for_completion_timeout(&ctrl->mdp_comp,
+ msecs_to_jiffies(VSYNC_PERIOD * 4));
+
+ if (rc == 0) {
+ pr_err("DSI wait 4 mdp done time out\n");
+ rc = -ETIME;
+ } else if (!IS_ERR_VALUE(rc)) {
+ rc = 0;
+ }
+
+ msm_dsi_clear_irq(ctrl, DSI_INTR_CMD_MDP_DONE_MASK);
+
+ return rc;
+}
+
+void msm_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int rc;
+ u32 dsi_status;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ if (ctrl->panel_mode == DSI_VIDEO_MODE)
+ return;
+
+ dsi_status = MIPI_INP(ctrl_base + DSI_STATUS);
+ if (dsi_status & 0x04) {
+ pr_debug("dsi command engine is busy\n");
+ rc = msm_dsi_wait4mdp_done(ctrl);
+ if (rc)
+ pr_err("Timed out waiting for mdp done");
+ }
+}
+
+static int msm_dsi_wait4video_done(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int rc;
+ unsigned long flag;
+
+ spin_lock_irqsave(&ctrl->mdp_lock, flag);
+ reinit_completion(&ctrl->video_comp);
+ msm_dsi_set_irq(ctrl, DSI_INTR_VIDEO_DONE_MASK);
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+ rc = wait_for_completion_timeout(&ctrl->video_comp,
+ msecs_to_jiffies(VSYNC_PERIOD * 4));
+
+ if (rc == 0) {
+ pr_err("DSI wait 4 video done time out\n");
+ rc = -ETIME;
+ } else if (!IS_ERR_VALUE(rc)) {
+ rc = 0;
+ }
+
+ msm_dsi_clear_irq(ctrl, DSI_INTR_VIDEO_DONE_MASK);
+
+ return rc;
+}
+
+static int msm_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int rc = 0;
+ u32 dsi_status;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ if (ctrl->panel_mode == DSI_CMD_MODE)
+ return rc;
+
+ dsi_status = MIPI_INP(ctrl_base + DSI_STATUS);
+ if (dsi_status & 0x08) {
+ pr_debug("dsi command in video mode wait for active region\n");
+ rc = msm_dsi_wait4video_done(ctrl);
+ /* delay 4-5 ms to skip BLLP */
+ if (!rc)
+ usleep_range(4000, 5000);
+ }
+ return rc;
+}
+
+void msm_dsi_host_init(struct mdss_panel_data *pdata)
+{
+ u32 dsi_ctrl, data;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mipi_panel_info *pinfo;
+
+ pr_debug("msm_dsi_host_init\n");
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ pinfo = &pdata->panel_info.mipi;
+
+
+ if (pinfo->mode == DSI_VIDEO_MODE) {
+ data = 0;
+ if (pinfo->pulse_mode_hsa_he)
+ data |= BIT(28);
+ if (pinfo->hfp_power_stop)
+ data |= BIT(24);
+ if (pinfo->hbp_power_stop)
+ data |= BIT(20);
+ if (pinfo->hsa_power_stop)
+ data |= BIT(16);
+ if (pinfo->eof_bllp_power_stop)
+ data |= BIT(15);
+ if (pinfo->bllp_power_stop)
+ data |= BIT(12);
+ data |= ((pinfo->traffic_mode & 0x03) << 8);
+ data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
+ data |= (pinfo->vc & 0x03);
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_CTRL, data);
+
+ data = 0;
+ data |= ((pinfo->rgb_swap & 0x07) << 12);
+ if (pinfo->b_sel)
+ data |= BIT(8);
+ if (pinfo->g_sel)
+ data |= BIT(4);
+ if (pinfo->r_sel)
+ data |= BIT(0);
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_DATA_CTRL, data);
+ } else if (pinfo->mode == DSI_CMD_MODE) {
+ data = 0;
+ data |= ((pinfo->interleave_max & 0x0f) << 20);
+ data |= ((pinfo->rgb_swap & 0x07) << 16);
+ if (pinfo->b_sel)
+ data |= BIT(12);
+ if (pinfo->g_sel)
+ data |= BIT(8);
+ if (pinfo->r_sel)
+ data |= BIT(4);
+ data |= (pinfo->dst_format & 0x0f); /* 4 bits */
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_CTRL, data);
+
+ /* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
+ data = pinfo->wr_mem_continue & 0x0ff;
+ data <<= 8;
+ data |= (pinfo->wr_mem_start & 0x0ff);
+ if (pinfo->insert_dcs_cmd)
+ data |= BIT(16);
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL,
+ data);
+ } else
+ pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);
+
+ dsi_ctrl = BIT(8) | BIT(2); /* clock enable & cmd mode */
+
+ if (pinfo->crc_check)
+ dsi_ctrl |= BIT(24);
+ if (pinfo->ecc_check)
+ dsi_ctrl |= BIT(20);
+ if (pinfo->data_lane3)
+ dsi_ctrl |= BIT(7);
+ if (pinfo->data_lane2)
+ dsi_ctrl |= BIT(6);
+ if (pinfo->data_lane1)
+ dsi_ctrl |= BIT(5);
+ if (pinfo->data_lane0)
+ dsi_ctrl |= BIT(4);
+
+ /* from frame buffer, low power mode */
+ /* DSI_COMMAND_MODE_DMA_CTRL */
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, 0x14000000);
+
+ data = 0;
+ if (pinfo->te_sel)
+ data |= BIT(31);
+ data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
+ data |= pinfo->dma_trigger; /* cmd dma trigger */
+ data |= (pinfo->stream & 0x01) << 8;
+ MIPI_OUTP(ctrl_base + DSI_TRIG_CTRL, data);
+
+ /* DSI_LAN_SWAP_CTRL */
+ MIPI_OUTP(ctrl_base + DSI_LANE_SWAP_CTRL, ctrl_pdata->dlane_swap);
+
+ /* clock out ctrl */
+ data = pinfo->t_clk_post & 0x3f; /* 6 bits */
+ data <<= 8;
+ data |= pinfo->t_clk_pre & 0x3f; /* 6 bits */
+ /* DSI_CLKOUT_TIMING_CTRL */
+ MIPI_OUTP(ctrl_base + DSI_CLKOUT_TIMING_CTRL, data);
+
+ data = 0;
+ if (pinfo->rx_eot_ignore)
+ data |= BIT(4);
+ if (pinfo->tx_eot_append)
+ data |= BIT(0);
+ MIPI_OUTP(ctrl_base + DSI_EOT_PACKET_CTRL, data);
+
+
+ /* allow only ack-err-status to generate interrupt */
+ /* DSI_ERR_INT_MASK0 */
+ MIPI_OUTP(ctrl_base + DSI_ERR_INT_MASK0, 0x13ff3fe0);
+
+ /* turn esc, byte, dsi, pclk, sclk, hclk on */
+ MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0x23f);
+
+ dsi_ctrl |= BIT(0); /* enable dsi */
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+
+ wmb(); /* ensure write is finished before progressing */
+}
+
+void dsi_set_tx_power_mode(int mode)
+{
+ u32 data;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ data = MIPI_INP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL);
+
+ if (mode == 0)
+ data &= ~BIT(26);
+ else
+ data |= BIT(26);
+
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, data);
+}
+
+void msm_dsi_sw_reset(void)
+{
+ u32 dsi_ctrl;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ pr_debug("msm_dsi_sw_reset\n");
+
+ dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+ dsi_ctrl &= ~0x01;
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+ wmb(); /* ensure write is finished before progressing */
+
+ /* turn esc, byte, dsi, pclk, sclk, hclk on */
+ MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0x23f);
+ wmb(); /* ensure write is finished before progressing */
+
+ MIPI_OUTP(ctrl_base + DSI_SOFT_RESET, 0x01);
+ wmb(); /* ensure write is finished before progressing */
+ MIPI_OUTP(ctrl_base + DSI_SOFT_RESET, 0x00);
+ wmb(); /* ensure write is finished before progressing */
+}
+
+void msm_dsi_controller_cfg(int enable)
+{
+ u32 dsi_ctrl, status;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ pr_debug("msm_dsi_controller_cfg\n");
+
+ /* Check for CMD_MODE_DMA_BUSY */
+ if (readl_poll_timeout((ctrl_base + DSI_STATUS),
+ status,
+ ((status & 0x02) == 0),
+ DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US)) {
+ pr_err("%s: DSI status=%x failed\n", __func__, status);
+ pr_err("%s: Doing sw reset\n", __func__);
+ msm_dsi_sw_reset();
+ }
+
+ /* Check for x_HS_FIFO_EMPTY */
+ if (readl_poll_timeout((ctrl_base + DSI_FIFO_STATUS),
+ status,
+ ((status & 0x11111000) == 0x11111000),
+ DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US))
+ pr_err("%s: FIFO status=%x failed\n", __func__, status);
+
+ /* Check for VIDEO_MODE_ENGINE_BUSY */
+ if (readl_poll_timeout((ctrl_base + DSI_STATUS),
+ status,
+ ((status & 0x08) == 0),
+ DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US)) {
+ pr_err("%s: DSI status=%x\n", __func__, status);
+ pr_err("%s: Doing sw reset\n", __func__);
+ msm_dsi_sw_reset();
+ }
+
+ dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+ if (enable)
+ dsi_ctrl |= 0x01;
+ else
+ dsi_ctrl &= ~0x01;
+
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+ wmb(); /* ensure write is finished before progressing */
+}
+
+void msm_dsi_op_mode_config(int mode, struct mdss_panel_data *pdata)
+{
+ u32 dsi_ctrl;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ pr_debug("msm_dsi_op_mode_config\n");
+
+ dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+
+ if (dsi_ctrl & DSI_VIDEO_MODE_EN)
+ dsi_ctrl &= ~(DSI_CMD_MODE_EN|DSI_EN);
+ else
+ dsi_ctrl &= ~(DSI_CMD_MODE_EN|DSI_VIDEO_MODE_EN|DSI_EN);
+
+ if (mode == DSI_VIDEO_MODE) {
+ dsi_ctrl |= (DSI_VIDEO_MODE_EN|DSI_EN);
+ } else {
+ dsi_ctrl |= (DSI_CMD_MODE_EN|DSI_EN);
+ /* For Video mode panel, keep Video and Cmd mode ON */
+ if (pdata->panel_info.type == MIPI_VIDEO_PANEL)
+ dsi_ctrl |= DSI_VIDEO_MODE_EN;
+ }
+
+ pr_debug("%s: dsi_ctrl=%x\n", __func__, dsi_ctrl);
+
+ MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+ wmb(); /* ensure write is finished before progressing */
+}
+
+int msm_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_buf *tp)
+{
+ int len, rc;
+ unsigned long size, addr;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+ unsigned long flag;
+
+ len = ALIGN(tp->len, 4);
+ size = ALIGN(tp->len, SZ_4K);
+
+ tp->dmap = dma_map_single(&dsi_host_private->dis_dev, tp->data, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dsi_host_private->dis_dev, tp->dmap)) {
+ pr_err("%s: dmap mapp failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ addr = tp->dmap;
+
+ msm_dsi_get_cmd_engine(ctrl);
+
+ spin_lock_irqsave(&ctrl->mdp_lock, flag);
+ reinit_completion(&ctrl->dma_comp);
+ msm_dsi_set_irq(ctrl, DSI_INTR_CMD_DMA_DONE_MASK);
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+ MIPI_OUTP(ctrl_base + DSI_DMA_CMD_OFFSET, addr);
+ MIPI_OUTP(ctrl_base + DSI_DMA_CMD_LENGTH, len);
+ wmb(); /* ensure write is finished before progressing */
+
+ MIPI_OUTP(ctrl_base + DSI_CMD_MODE_DMA_SW_TRIGGER, 0x01);
+ wmb(); /* ensure write is finished before progressing */
+
+ rc = wait_for_completion_timeout(&ctrl->dma_comp,
+ msecs_to_jiffies(DSI_DMA_CMD_TIMEOUT_MS));
+ if (rc == 0) {
+ pr_err("DSI command transaction time out\n");
+ rc = -ETIME;
+ } else if (!IS_ERR_VALUE(rc)) {
+ rc = 0;
+ }
+
+ dma_unmap_single(&dsi_host_private->dis_dev, tp->dmap, size,
+ DMA_TO_DEVICE);
+ tp->dmap = 0;
+
+ msm_dsi_clear_irq(ctrl, DSI_INTR_CMD_DMA_DONE_MASK);
+
+ msm_dsi_release_cmd_engine(ctrl);
+
+ return rc;
+}
+
+int msm_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_buf *rp, int rlen)
+{
+ u32 *lp, data;
+ int i, off, cnt;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+ lp = (u32 *)rp->data;
+ cnt = rlen;
+ cnt += 3;
+ cnt >>= 2;
+
+ if (cnt > 4)
+ cnt = 4; /* 4 x 32 bits registers only */
+
+ off = DSI_RDBK_DATA0;
+ off += ((cnt - 1) * 4);
+
+ for (i = 0; i < cnt; i++) {
+ data = (u32)MIPI_INP(ctrl_base + off);
+ *lp++ = ntohl(data); /* to network byte order */
+ pr_debug("%s: data = 0x%x and ntohl(data) = 0x%x\n",
+ __func__, data, ntohl(data));
+ off -= 4;
+ rp->len += sizeof(*lp);
+ }
+
+ return rlen;
+}
+
+static int msm_dsi_cmds_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_cmd_desc *cmds, int cnt)
+{
+ struct dsi_buf *tp;
+ struct dsi_cmd_desc *cm;
+ struct dsi_ctrl_hdr *dchdr;
+ int len;
+ int rc = 0;
+
+ tp = &ctrl->tx_buf;
+ mdss_dsi_buf_init(tp);
+ cm = cmds;
+ len = 0;
+ while (cnt--) {
+ dchdr = &cm->dchdr;
+ mdss_dsi_buf_reserve(tp, len);
+ len = mdss_dsi_cmd_dma_add(tp, cm);
+ if (!len) {
+ pr_err("%s: failed to add cmd = 0x%x\n",
+ __func__, cm->payload[0]);
+ rc = -EINVAL;
+ goto dsi_cmds_tx_err;
+ }
+
+ if (dchdr->last) {
+ tp->data = tp->start; /* begin of buf */
+ rc = msm_dsi_wait4video_eng_busy(ctrl);
+ if (rc) {
+ pr_err("%s: wait4video_eng failed\n", __func__);
+ goto dsi_cmds_tx_err;
+
+ }
+
+ rc = msm_dsi_cmd_dma_tx(ctrl, tp);
+ if (IS_ERR_VALUE(len)) {
+ pr_err("%s: failed to call cmd_dma_tx for cmd = 0x%x\n",
+ __func__, cmds->payload[0]);
+ goto dsi_cmds_tx_err;
+ }
+
+ if (dchdr->wait)
+ usleep_range(dchdr->wait * 1000,
+ dchdr->wait * 1000);
+
+ mdss_dsi_buf_init(tp);
+ len = 0;
+ }
+ cm++;
+ }
+
+dsi_cmds_tx_err:
+ return rc;
+}
+
+static int msm_dsi_parse_rx_response(struct dsi_buf *rp)
+{
+ int rc = 0;
+ unsigned char cmd;
+
+ cmd = rp->data[0];
+ switch (cmd) {
+ case DTYPE_ACK_ERR_RESP:
+ pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
+ rc = -EINVAL;
+ break;
+ case DTYPE_GEN_READ1_RESP:
+ case DTYPE_DCS_READ1_RESP:
+ mdss_dsi_short_read1_resp(rp);
+ break;
+ case DTYPE_GEN_READ2_RESP:
+ case DTYPE_DCS_READ2_RESP:
+ mdss_dsi_short_read2_resp(rp);
+ break;
+ case DTYPE_GEN_LREAD_RESP:
+ case DTYPE_DCS_LREAD_RESP:
+ mdss_dsi_long_read_resp(rp);
+ break;
+ default:
+ rc = -EINVAL;
+ pr_warn("%s: Unknown cmd received\n", __func__);
+ break;
+ }
+
+ return rc;
+}
+
+/* MIPI_DSI_MRPS, Maximum Return Packet Size */
+static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
+
+static struct dsi_cmd_desc pkt_size_cmd = {
+ {DTYPE_MAX_PKTSIZE, 1, 0, 0, 0, sizeof(max_pktsize)},
+ max_pktsize,
+};
+
+static int msm_dsi_set_max_packet_size(struct mdss_dsi_ctrl_pdata *ctrl,
+ int size)
+{
+ struct dsi_buf *tp;
+ int rc;
+
+ tp = &ctrl->tx_buf;
+ mdss_dsi_buf_init(tp);
+ max_pktsize[0] = size;
+
+ rc = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
+ if (!rc) {
+ pr_err("%s: failed to add max_pkt_size\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_dsi_wait4video_eng_busy(ctrl);
+ if (rc) {
+ pr_err("%s: failed to wait4video_eng\n", __func__);
+ return rc;
+ }
+
+ rc = msm_dsi_cmd_dma_tx(ctrl, tp);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: failed to tx max_pkt_size\n", __func__);
+ return rc;
+ }
+ pr_debug("%s: max_pkt_size=%d sent\n", __func__, size);
+ return rc;
+}
+
+/* read data length is less than or equal to 10 bytes*/
+static int msm_dsi_cmds_rx_1(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_cmd_desc *cmds, int rlen)
+{
+ int rc;
+ struct dsi_buf *tp, *rp;
+
+ tp = &ctrl->tx_buf;
+ rp = &ctrl->rx_buf;
+ mdss_dsi_buf_init(rp);
+ mdss_dsi_buf_init(tp);
+
+ rc = mdss_dsi_cmd_dma_add(tp, cmds);
+ if (!rc) {
+ pr_err("%s: dsi_cmd_dma_add failed\n", __func__);
+ rc = -EINVAL;
+ goto dsi_cmds_rx_1_error;
+ }
+
+ rc = msm_dsi_wait4video_eng_busy(ctrl);
+ if (rc) {
+ pr_err("%s: wait4video_eng failed\n", __func__);
+ goto dsi_cmds_rx_1_error;
+ }
+
+ rc = msm_dsi_cmd_dma_tx(ctrl, tp);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: msm_dsi_cmd_dma_tx failed\n", __func__);
+ goto dsi_cmds_rx_1_error;
+ }
+
+ if (rlen <= DSI_SHORT_PKT_DATA_SIZE) {
+ msm_dsi_cmd_dma_rx(ctrl, rp, rlen);
+ } else {
+ msm_dsi_cmd_dma_rx(ctrl, rp, rlen + DSI_HOST_HDR_SIZE);
+ rp->len = rlen + DSI_HOST_HDR_SIZE;
+ }
+ rc = msm_dsi_parse_rx_response(rp);
+
+dsi_cmds_rx_1_error:
+ if (rc)
+ rp->len = 0;
+
+ return rc;
+}
+
+/* read data length is more than 10 bytes, which requires multiple DSI read*/
+static int msm_dsi_cmds_rx_2(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_cmd_desc *cmds, int rlen)
+{
+ int rc;
+ struct dsi_buf *tp, *rp;
+ int pkt_size, data_bytes, total;
+
+ tp = &ctrl->tx_buf;
+ rp = &ctrl->rx_buf;
+ mdss_dsi_buf_init(rp);
+ pkt_size = DSI_MAX_PKT_SIZE;
+ data_bytes = MDSS_DSI_LEN;
+ total = 0;
+
+ while (true) {
+ rc = msm_dsi_set_max_packet_size(ctrl, pkt_size);
+ if (rc)
+ break;
+
+ mdss_dsi_buf_init(tp);
+ rc = mdss_dsi_cmd_dma_add(tp, cmds);
+ if (!rc) {
+ pr_err("%s: dsi_cmd_dma_add failed\n", __func__);
+ rc = -EINVAL;
+ break;
+ }
+ rc = msm_dsi_wait4video_eng_busy(ctrl);
+ if (rc) {
+ pr_err("%s: wait4video_eng failed\n", __func__);
+ break;
+ }
+
+ rc = msm_dsi_cmd_dma_tx(ctrl, tp);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: msm_dsi_cmd_dma_tx failed\n", __func__);
+ break;
+ }
+
+ msm_dsi_cmd_dma_rx(ctrl, rp, DSI_MAX_BYTES_TO_READ);
+
+ rp->data += DSI_MAX_BYTES_TO_READ - DSI_HOST_HDR_SIZE;
+ total += data_bytes;
+ if (total >= rlen)
+ break;
+
+ data_bytes = DSI_MAX_BYTES_TO_READ - DSI_HOST_HDR_SIZE;
+ pkt_size += data_bytes;
+ }
+
+ if (!rc) {
+ rp->data = rp->start;
+ rp->len = rlen + DSI_HOST_HDR_SIZE;
+ rc = msm_dsi_parse_rx_response(rp);
+ }
+
+ if (rc)
+ rp->len = 0;
+
+ return rc;
+}
+
+int msm_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_cmd_desc *cmds, int rlen)
+{
+ int rc;
+
+ if (rlen <= DSI_MAX_PKT_SIZE)
+ rc = msm_dsi_cmds_rx_1(ctrl, cmds, rlen);
+ else
+ rc = msm_dsi_cmds_rx_2(ctrl, cmds, rlen);
+
+ return rc;
+}
+
+void msm_dsi_cmdlist_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dcs_cmd_req *req)
+{
+ int ret;
+
+ ret = msm_dsi_cmds_tx(ctrl, req->cmds, req->cmds_cnt);
+
+ if (req->cb)
+ req->cb(ret);
+}
+
+void msm_dsi_cmdlist_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dcs_cmd_req *req)
+{
+ struct dsi_buf *rp;
+ int len = 0;
+
+ if (req->rbuf) {
+ rp = &ctrl->rx_buf;
+ len = msm_dsi_cmds_rx(ctrl, req->cmds, req->rlen);
+ memcpy(req->rbuf, rp->data, rp->len);
+ } else {
+ pr_err("%s: No rx buffer provided\n", __func__);
+ }
+
+ if (req->cb)
+ req->cb(len);
+}
+int msm_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
+{
+ struct dcs_cmd_req *req;
+ int dsi_on;
+ int ret = -EINVAL;
+
+ mutex_lock(&ctrl->mutex);
+ dsi_on = dsi_host_private->dsi_on;
+ mutex_unlock(&ctrl->mutex);
+ if (!dsi_on) {
+ pr_err("try to send DSI commands while dsi is off\n");
+ return ret;
+ }
+
+ if (from_mdp) /* from mdp kickoff */
+ mutex_lock(&ctrl->cmd_mutex);
+ req = mdss_dsi_cmdlist_get(ctrl, from_mdp);
+
+ if (!req) {
+ mutex_unlock(&ctrl->cmd_mutex);
+ return ret;
+ }
+ /*
+ * mdss interrupt is generated in mdp core clock domain
+ * mdp clock need to be enabled to receive dsi interrupt
+ * also, axi bus bandwidth need since dsi controller will
+ * fetch dcs commands from axi bus
+ */
+ mdp3_res_update(1, 1, MDP3_CLIENT_DMA_P);
+ msm_dsi_clk_ctrl(&ctrl->panel_data, 1);
+
+ if (0 == (req->flags & CMD_REQ_LP_MODE))
+ dsi_set_tx_power_mode(0);
+
+ if (req->flags & CMD_REQ_RX)
+ msm_dsi_cmdlist_rx(ctrl, req);
+ else
+ msm_dsi_cmdlist_tx(ctrl, req);
+
+ if (0 == (req->flags & CMD_REQ_LP_MODE))
+ dsi_set_tx_power_mode(1);
+
+ msm_dsi_clk_ctrl(&ctrl->panel_data, 0);
+ mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P);
+
+ if (from_mdp) /* from mdp kickoff */
+ mutex_unlock(&ctrl->cmd_mutex);
+ return 0;
+}
+
+static int msm_dsi_cal_clk_rate(struct mdss_panel_data *pdata,
+ u64 *bitclk_rate,
+ u32 *dsiclk_rate,
+ u32 *byteclk_rate,
+ u32 *pclk_rate)
+{
+ struct mdss_panel_info *pinfo;
+ struct mipi_panel_info *mipi;
+ u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+ int lanes;
+ u64 clk_rate;
+
+ pinfo = &pdata->panel_info;
+ mipi = &pdata->panel_info.mipi;
+
+ hbp = pdata->panel_info.lcdc.h_back_porch;
+ hfp = pdata->panel_info.lcdc.h_front_porch;
+ vbp = pdata->panel_info.lcdc.v_back_porch;
+ vfp = pdata->panel_info.lcdc.v_front_porch;
+ hspw = pdata->panel_info.lcdc.h_pulse_width;
+ vspw = pdata->panel_info.lcdc.v_pulse_width;
+ width = pdata->panel_info.xres;
+ height = pdata->panel_info.yres;
+
+ lanes = 0;
+ if (mipi->data_lane0)
+ lanes++;
+ if (mipi->data_lane1)
+ lanes++;
+ if (mipi->data_lane2)
+ lanes++;
+ if (mipi->data_lane3)
+ lanes++;
+ if (lanes == 0)
+ return -EINVAL;
+
+ *bitclk_rate = (width + hbp + hfp + hspw) * (height + vbp + vfp + vspw);
+ *bitclk_rate *= mipi->frame_rate;
+ *bitclk_rate *= pdata->panel_info.bpp;
+ do_div(*bitclk_rate, lanes);
+ clk_rate = *bitclk_rate;
+
+ do_div(clk_rate, 8U);
+ *byteclk_rate = (u32) clk_rate;
+ *dsiclk_rate = *byteclk_rate * lanes;
+ *pclk_rate = *byteclk_rate * lanes * 8 / pdata->panel_info.bpp;
+
+ pr_debug("dsiclk_rate=%u, byteclk=%u, pck_=%u\n",
+ *dsiclk_rate, *byteclk_rate, *pclk_rate);
+ return 0;
+}
+
+static int msm_dsi_on(struct mdss_panel_data *pdata)
+{
+ int ret = 0, i;
+ u64 clk_rate;
+ struct mdss_panel_info *pinfo;
+ struct mipi_panel_info *mipi;
+ u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+ u32 ystride, bpp, data;
+ u32 dummy_xres, dummy_yres;
+ u64 bitclk_rate = 0
+ u32 byteclk_rate = 0, pclk_rate = 0, dsiclk_rate = 0;
+ unsigned char *ctrl_base = dsi_host_private->dsi_base;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ pr_debug("msm_dsi_on\n");
+
+ pinfo = &pdata->panel_info;
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ mutex_lock(&ctrl_pdata->mutex);
+
+
+ if (!pdata->panel_info.dynamic_switch_pending) {
+ for (i = 0; !ret && (i < DSI_MAX_PM); i++) {
+ ret = msm_mdss_enable_vreg(
+ ctrl_pdata->power_data[i].vreg_config,
+ ctrl_pdata->power_data[i].num_vreg, 1);
+ if (ret) {
+ pr_err("%s: failed to enable vregs for %s\n",
+ __func__, __mdss_dsi_pm_name(i));
+ goto error_vreg;
+ }
+ }
+ }
+
+ msm_dsi_ahb_ctrl(1);
+ msm_dsi_phy_sw_reset(dsi_host_private->dsi_base);
+ msm_dsi_phy_init(dsi_host_private->dsi_base, pdata);
+
+ msm_dsi_cal_clk_rate(pdata, &bitclk_rate, &dsiclk_rate,
+ &byteclk_rate, &pclk_rate);
+ msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, dsiclk_rate,
+ byteclk_rate, pclk_rate);
+ msm_dsi_prepare_clocks();
+ msm_dsi_clk_enable();
+
+ clk_rate = pdata->panel_info.clk_rate;
+ clk_rate = min(clk_rate, pdata->panel_info.clk_max);
+
+ hbp = pdata->panel_info.lcdc.h_back_porch;
+ hfp = pdata->panel_info.lcdc.h_front_porch;
+ vbp = pdata->panel_info.lcdc.v_back_porch;
+ vfp = pdata->panel_info.lcdc.v_front_porch;
+ hspw = pdata->panel_info.lcdc.h_pulse_width;
+ vspw = pdata->panel_info.lcdc.v_pulse_width;
+ width = pdata->panel_info.xres;
+ height = pdata->panel_info.yres;
+
+ mipi = &pdata->panel_info.mipi;
+ if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+ dummy_xres = pdata->panel_info.lcdc.xres_pad;
+ dummy_yres = pdata->panel_info.lcdc.yres_pad;
+
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_ACTIVE_H,
+ ((hspw + hbp + width + dummy_xres) << 16 |
+ (hspw + hbp)));
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_ACTIVE_V,
+ ((vspw + vbp + height + dummy_yres) << 16 |
+ (vspw + vbp)));
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_TOTAL,
+ (vspw + vbp + height + dummy_yres +
+ vfp - 1) << 16 | (hspw + hbp +
+ width + dummy_xres + hfp - 1));
+
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_HSYNC, (hspw << 16));
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_VSYNC, 0);
+ MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_VSYNC_VPOS,
+ (vspw << 16));
+
+ } else { /* command mode */
+ if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+ bpp = 3;
+ else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
+ bpp = 3;
+ else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+ bpp = 2;
+ else
+ bpp = 3; /* Default format set to RGB888 */
+
+ ystride = width * bpp + 1;
+
+ data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM0_CTRL,
+ data);
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM1_CTRL,
+ data);
+
+ data = height << 16 | width;
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM1_TOTAL,
+ data);
+ MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM0_TOTAL,
+ data);
+ }
+
+ msm_dsi_sw_reset();
+ msm_dsi_host_init(pdata);
+
+ if (mipi->force_clk_lane_hs) {
+ u32 tmp;
+
+ tmp = MIPI_INP(ctrl_base + DSI_LANE_CTRL);
+ tmp |= (1<<28);
+ MIPI_OUTP(ctrl_base + DSI_LANE_CTRL, tmp);
+ wmb(); /* ensure write is finished before progressing */
+ }
+
+ msm_dsi_op_mode_config(mipi->mode, pdata);
+
+ msm_dsi_set_irq(ctrl_pdata, DSI_INTR_ERROR_MASK);
+ dsi_host_private->clk_count = 1;
+ dsi_host_private->dsi_on = 1;
+
+error_vreg:
+ if (ret) {
+ for (; i >= 0; i--)
+ msm_mdss_enable_vreg(
+ ctrl_pdata->power_data[i].vreg_config,
+ ctrl_pdata->power_data[i].num_vreg, 0);
+ }
+
+ mutex_unlock(&ctrl_pdata->mutex);
+ return ret;
+}
+
+static int msm_dsi_off(struct mdss_panel_data *pdata)
+{
+ int ret = 0, i;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pr_debug("msm_dsi_off\n");
+ mutex_lock(&ctrl_pdata->mutex);
+ msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask);
+ msm_dsi_controller_cfg(0);
+ msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, 0, 0, 0);
+ msm_dsi_clk_disable();
+ msm_dsi_unprepare_clocks();
+ msm_dsi_phy_off(dsi_host_private->dsi_base);
+ msm_dsi_ahb_ctrl(0);
+
+ if (!pdata->panel_info.dynamic_switch_pending) {
+ for (i = DSI_MAX_PM - 1; i >= 0; i--) {
+ ret = msm_mdss_enable_vreg(
+ ctrl_pdata->power_data[i].vreg_config,
+ ctrl_pdata->power_data[i].num_vreg, 0);
+ if (ret)
+ pr_err("%s: failed to disable vregs for %s\n",
+ __func__, __mdss_dsi_pm_name(i));
+ }
+ }
+ dsi_host_private->clk_count = 0;
+ dsi_host_private->dsi_on = 0;
+
+ mutex_unlock(&ctrl_pdata->mutex);
+
+ return ret;
+}
+
+static int msm_dsi_cont_on(struct mdss_panel_data *pdata)
+{
+ struct mdss_panel_info *pinfo;
+ int ret = 0, i;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ ret = -EINVAL;
+ return ret;
+ }
+
+
+ pr_debug("%s:\n", __func__);
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pinfo = &pdata->panel_info;
+ mutex_lock(&ctrl_pdata->mutex);
+ for (i = 0; !ret && (i < DSI_MAX_PM); i++) {
+ ret = msm_mdss_enable_vreg(
+ ctrl_pdata->power_data[i].vreg_config,
+ ctrl_pdata->power_data[i].num_vreg, 1);
+ if (ret) {
+ pr_err("%s: failed to enable vregs for %s\n",
+ __func__, __mdss_dsi_pm_name(i));
+ goto error_vreg;
+ }
+ }
+ pinfo->panel_power_state = MDSS_PANEL_POWER_ON;
+ ret = mdss_dsi_panel_reset(pdata, 1);
+ if (ret) {
+ pr_err("%s: Panel reset failed\n", __func__);
+ mutex_unlock(&ctrl_pdata->mutex);
+ return ret;
+ }
+
+ msm_dsi_ahb_ctrl(1);
+ msm_dsi_prepare_clocks();
+ msm_dsi_clk_enable();
+ msm_dsi_set_irq(ctrl_pdata, DSI_INTR_ERROR_MASK);
+ dsi_host_private->clk_count = 1;
+ dsi_host_private->dsi_on = 1;
+
+error_vreg:
+ if (ret) {
+ for (; i >= 0; i--)
+ msm_mdss_enable_vreg(
+ ctrl_pdata->power_data[i].vreg_config,
+ ctrl_pdata->power_data[i].num_vreg, 0);
+ }
+
+ mutex_unlock(&ctrl_pdata->mutex);
+ return ret;
+}
+
+static int msm_dsi_read_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct dcs_cmd_req cmdreq;
+
+ memset(&cmdreq, 0, sizeof(cmdreq));
+ cmdreq.cmds = ctrl->status_cmds.cmds;
+ cmdreq.cmds_cnt = ctrl->status_cmds.cmd_cnt;
+ cmdreq.flags = CMD_REQ_COMMIT | CMD_REQ_RX;
+ cmdreq.rlen = 1;
+ cmdreq.cb = NULL;
+ cmdreq.rbuf = ctrl->status_buf.data;
+
+ return mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+
+/**
+ * msm_dsi_reg_status_check() - Check dsi panel status through reg read
+ * @ctrl_pdata: pointer to the dsi controller structure
+ *
+ * This function can be used to check the panel status through reading the
+ * status register from the panel.
+ *
+ * Return: positive value if the panel is in good state, negative value or
+ * zero otherwise.
+ */
+int msm_dsi_reg_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int ret = 0;
+
+ if (ctrl_pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return 0;
+ }
+
+ pr_debug("%s: Checking Register status\n", __func__);
+
+ msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 1);
+
+ if (ctrl_pdata->status_cmds.link_state == DSI_HS_MODE)
+ dsi_set_tx_power_mode(0);
+
+ ret = msm_dsi_read_status(ctrl_pdata);
+
+ if (ctrl_pdata->status_cmds.link_state == DSI_HS_MODE)
+ dsi_set_tx_power_mode(1);
+
+ if (ret == 0) {
+ if (!mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf,
+ ctrl_pdata->status_value, 0)) {
+ pr_err("%s: Read back value from panel is incorrect\n",
+ __func__);
+ ret = -EINVAL;
+ } else {
+ ret = 1;
+ }
+ } else {
+ pr_err("%s: Read status register returned error\n", __func__);
+ }
+
+ msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 0);
+ pr_debug("%s: Read register done with ret: %d\n", __func__, ret);
+
+ return ret;
+}
+
+/**
+ * msm_dsi_bta_status_check() - Check dsi panel status through bta check
+ * @ctrl_pdata: pointer to the dsi controller structure
+ *
+ * This function can be used to check status of the panel using bta check
+ * for the panel.
+ *
+ * Return: positive value if the panel is in good state, negative value or
+ * zero otherwise.
+ */
+static int msm_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int ret = 0;
+
+ if (ctrl_pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return 0;
+ }
+
+ mutex_lock(&ctrl_pdata->cmd_mutex);
+ msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 1);
+ msm_dsi_cmd_mdp_busy(ctrl_pdata);
+ msm_dsi_set_irq(ctrl_pdata, DSI_INTR_BTA_DONE_MASK);
+ reinit_completion(&ctrl_pdata->bta_comp);
+
+ /* BTA trigger */
+ MIPI_OUTP(dsi_host_private->dsi_base + DSI_CMD_MODE_BTA_SW_TRIGGER,
+ 0x01);
+ wmb(); /* ensure write is finished before progressing */
+ ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp,
+ HZ/10);
+ msm_dsi_clear_irq(ctrl_pdata, DSI_INTR_BTA_DONE_MASK);
+ msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 0);
+ mutex_unlock(&ctrl_pdata->cmd_mutex);
+
+ if (ret <= 0)
+ pr_err("%s: DSI BTA error: %i\n", __func__, __LINE__);
+
+ pr_debug("%s: BTA done with ret: %d\n", __func__, ret);
+ return ret;
+}
+
+static void msm_dsi_debug_enable_clock(int on)
+{
+ if (dsi_host_private->debug_enable_clk)
+ dsi_host_private->debug_enable_clk(on);
+
+ if (on)
+ msm_dsi_ahb_ctrl(1);
+ else
+ msm_dsi_ahb_ctrl(0);
+}
+
+static int msm_dsi_debug_init(void)
+{
+ int rc;
+
+ if (!mdss_res)
+ return 0;
+
+ dsi_host_private->debug_enable_clk =
+ mdss_res->debug_inf.debug_enable_clock;
+
+ mdss_res->debug_inf.debug_enable_clock = msm_dsi_debug_enable_clock;
+
+
+ rc = mdss_debug_register_base("dsi0",
+ dsi_host_private->dsi_base,
+ dsi_host_private->dsi_reg_size,
+ NULL);
+
+ return rc;
+}
+
+static int dsi_get_panel_cfg(char *panel_cfg)
+{
+ int rc;
+ struct mdss_panel_cfg *pan_cfg = NULL;
+
+ if (!panel_cfg)
+ return MDSS_PANEL_INTF_INVALID;
+
+ pan_cfg = mdp3_panel_intf_type(MDSS_PANEL_INTF_DSI);
+ if (IS_ERR(pan_cfg)) {
+ panel_cfg[0] = 0;
+ return PTR_ERR(pan_cfg);
+ } else if (!pan_cfg) {
+ panel_cfg[0] = 0;
+ return 0;
+ }
+
+ pr_debug("%s:%d: cfg:[%s]\n", __func__, __LINE__,
+ pan_cfg->arg_cfg);
+ rc = strlcpy(panel_cfg, pan_cfg->arg_cfg,
+ MDSS_MAX_PANEL_LEN);
+ return rc;
+}
+
+static struct device_node *dsi_pref_prim_panel(
+ struct platform_device *pdev)
+{
+ struct device_node *dsi_pan_node = NULL;
+
+ pr_debug("%s:%d: Select primary panel from dt\n",
+ __func__, __LINE__);
+ dsi_pan_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,dsi-pref-prim-pan", 0);
+ if (!dsi_pan_node)
+ pr_err("%s:can't find panel phandle\n", __func__);
+
+ return dsi_pan_node;
+}
+
+/**
+ * dsi_find_panel_of_node(): find device node of dsi panel
+ * @pdev: platform_device of the dsi ctrl node
+ * @panel_cfg: string containing intf specific config data
+ *
+ * Function finds the panel device node using the interface
+ * specific configuration data. This configuration data is
+ * could be derived from the result of bootloader's GCDB
+ * panel detection mechanism. If such config data doesn't
+ * exist then this panel returns the default panel configured
+ * in the device tree.
+ *
+ * returns pointer to panel node on success, NULL on error.
+ */
+static struct device_node *dsi_find_panel_of_node(
+ struct platform_device *pdev, char *panel_cfg)
+{
+ int l;
+ char *panel_name;
+ struct device_node *dsi_pan_node = NULL, *mdss_node = NULL;
+
+ if (!panel_cfg)
+ return NULL;
+
+ l = strlen(panel_cfg);
+ if (!l) {
+ /* no panel cfg chg, parse dt */
+ pr_debug("%s:%d: no cmd line cfg present\n",
+ __func__, __LINE__);
+ dsi_pan_node = dsi_pref_prim_panel(pdev);
+ } else {
+ if (panel_cfg[0] != '0') {
+ pr_err("%s:%d:ctrl id=[%d] not supported\n",
+ __func__, __LINE__, panel_cfg[0]);
+ return NULL;
+ }
+ /*
+ * skip first two chars '<dsi_ctrl_id>' and
+ * ':' to get to the panel name
+ */
+ panel_name = panel_cfg + 2;
+ pr_debug("%s:%d:%s:%s\n", __func__, __LINE__,
+ panel_cfg, panel_name);
+
+ mdss_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,mdss-mdp", 0);
+
+ if (!mdss_node) {
+ pr_err("%s: %d: mdss_node null\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+ dsi_pan_node = of_find_node_by_name(mdss_node,
+ panel_name);
+ if (!dsi_pan_node) {
+ pr_err("%s: invalid pan node\n",
+ __func__);
+ dsi_pan_node = dsi_pref_prim_panel(pdev);
+ }
+ }
+ return dsi_pan_node;
+}
+
+static int msm_dsi_clk_ctrl(struct mdss_panel_data *pdata, int enable)
+{
+ u32 bitclk_rate = 0, byteclk_rate = 0, pclk_rate = 0, dsiclk_rate = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ pr_debug("%s:\n", __func__);
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ mutex_lock(&ctrl_pdata->mutex);
+
+ if (enable) {
+ dsi_host_private->clk_count++;
+ if (dsi_host_private->clk_count == 1) {
+ msm_dsi_ahb_ctrl(1);
+ msm_dsi_cal_clk_rate(pdata, &bitclk_rate, &dsiclk_rate,
+ &byteclk_rate, &pclk_rate);
+ msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, dsiclk_rate,
+ byteclk_rate, pclk_rate);
+ msm_dsi_prepare_clocks();
+ msm_dsi_clk_enable();
+ }
+ } else {
+ dsi_host_private->clk_count--;
+ if (dsi_host_private->clk_count == 0) {
+ msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask);
+ msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, 0, 0, 0);
+ msm_dsi_clk_disable();
+ msm_dsi_unprepare_clocks();
+ msm_dsi_ahb_ctrl(0);
+ }
+ }
+ mutex_unlock(&ctrl_pdata->mutex);
+ return 0;
+}
+
+void msm_dsi_ctrl_init(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ init_completion(&ctrl->dma_comp);
+ init_completion(&ctrl->mdp_comp);
+ init_completion(&ctrl->bta_comp);
+ init_completion(&ctrl->video_comp);
+ spin_lock_init(&ctrl->irq_lock);
+ spin_lock_init(&ctrl->mdp_lock);
+ mutex_init(&ctrl->mutex);
+ mutex_init(&ctrl->cmd_mutex);
+ complete(&ctrl->mdp_comp);
+ dsi_buf_alloc(&ctrl->tx_buf, SZ_4K);
+ dsi_buf_alloc(&ctrl->rx_buf, SZ_4K);
+ dsi_buf_alloc(&ctrl->status_buf, SZ_4K);
+ ctrl->cmdlist_commit = msm_dsi_cmdlist_commit;
+ ctrl->panel_mode = ctrl->panel_data.panel_info.mipi.mode;
+
+ if (ctrl->status_mode == ESD_REG)
+ ctrl->check_status = msm_dsi_reg_status_check;
+ else if (ctrl->status_mode == ESD_BTA)
+ ctrl->check_status = msm_dsi_bta_status_check;
+
+ if (ctrl->status_mode == ESD_MAX) {
+ pr_err("%s: Using default BTA for ESD check\n", __func__);
+ ctrl->check_status = msm_dsi_bta_status_check;
+ }
+}
+
+static void msm_dsi_parse_lane_swap(struct device_node *np, char *dlane_swap)
+{
+ const char *data;
+
+ *dlane_swap = DSI_LANE_MAP_0123;
+ data = of_get_property(np, "qcom,lane-map", NULL);
+ if (data) {
+ if (!strcmp(data, "lane_map_3012"))
+ *dlane_swap = DSI_LANE_MAP_3012;
+ else if (!strcmp(data, "lane_map_2301"))
+ *dlane_swap = DSI_LANE_MAP_2301;
+ else if (!strcmp(data, "lane_map_1230"))
+ *dlane_swap = DSI_LANE_MAP_1230;
+ else if (!strcmp(data, "lane_map_0321"))
+ *dlane_swap = DSI_LANE_MAP_0321;
+ else if (!strcmp(data, "lane_map_1032"))
+ *dlane_swap = DSI_LANE_MAP_1032;
+ else if (!strcmp(data, "lane_map_2103"))
+ *dlane_swap = DSI_LANE_MAP_2103;
+ else if (!strcmp(data, "lane_map_3210"))
+ *dlane_swap = DSI_LANE_MAP_3210;
+ }
+}
+
+static int msm_dsi_probe(struct platform_device *pdev)
+{
+ struct dsi_interface intf;
+ char panel_cfg[MDSS_MAX_PANEL_LEN];
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ int rc = 0;
+ struct device_node *dsi_pan_node = NULL;
+ bool cmd_cfg_cont_splash = false;
+ struct resource *mdss_dsi_mres;
+ int i;
+
+ pr_debug("%s\n", __func__);
+
+ rc = msm_dsi_init();
+ if (rc)
+ return rc;
+
+ if (!pdev->dev.of_node) {
+ pr_err("%s: Device node is not accessible\n", __func__);
+ rc = -ENODEV;
+ goto error_no_mem;
+ }
+ pdev->id = 0;
+
+ ctrl_pdata = platform_get_drvdata(pdev);
+ if (!ctrl_pdata) {
+ ctrl_pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct mdss_dsi_ctrl_pdata), GFP_KERNEL);
+ if (!ctrl_pdata) {
+ rc = -ENOMEM;
+ goto error_no_mem;
+ }
+ platform_set_drvdata(pdev, ctrl_pdata);
+ }
+
+ ctrl_pdata->mdss_util = mdss_get_util_intf();
+ if (mdp3_res->mdss_util == NULL) {
+ pr_err("Failed to get mdss utility functions\n");
+ return -ENODEV;
+ }
+
+ mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mdss_dsi_mres) {
+ pr_err("%s:%d unable to get the MDSS reg resources",
+ __func__, __LINE__);
+ rc = -ENOMEM;
+ goto error_io_resource;
+ } else {
+ dsi_host_private->dsi_reg_size = resource_size(mdss_dsi_mres);
+ dsi_host_private->dsi_base = ioremap(mdss_dsi_mres->start,
+ dsi_host_private->dsi_reg_size);
+ if (!dsi_host_private->dsi_base) {
+ pr_err("%s:%d unable to remap dsi resources",
+ __func__, __LINE__);
+ rc = -ENOMEM;
+ goto error_io_resource;
+ }
+ }
+
+ mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mdss_dsi_mres || mdss_dsi_mres->start == 0) {
+ pr_err("%s:%d unable to get the MDSS irq resources",
+ __func__, __LINE__);
+ rc = -ENODEV;
+ goto error_irq_resource;
+ }
+
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+ __func__, rc);
+ goto error_platform_pop;
+ }
+
+ /* DSI panels can be different between controllers */
+ rc = dsi_get_panel_cfg(panel_cfg);
+ if (!rc)
+ /* dsi panel cfg not present */
+ pr_warn("%s:%d:dsi specific cfg not present\n",
+ __func__, __LINE__);
+
+ /* find panel device node */
+ dsi_pan_node = dsi_find_panel_of_node(pdev, panel_cfg);
+ if (!dsi_pan_node) {
+ pr_err("%s: can't find panel node %s\n", __func__,
+ panel_cfg);
+ goto error_pan_node;
+ }
+
+ cmd_cfg_cont_splash = mdp3_panel_get_boot_cfg() ? true : false;
+
+ rc = mdss_dsi_panel_init(dsi_pan_node, ctrl_pdata, cmd_cfg_cont_splash);
+ if (rc) {
+ pr_err("%s: dsi panel init failed\n", __func__);
+ goto error_pan_node;
+ }
+
+ rc = dsi_ctrl_config_init(pdev, ctrl_pdata);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: failed to parse mdss dtsi rc=%d\n",
+ __func__, rc);
+ goto error_pan_node;
+ }
+
+ msm_dsi_parse_lane_swap(pdev->dev.of_node, &(ctrl_pdata->dlane_swap));
+
+ for (i = 0; i < DSI_MAX_PM; i++) {
+ rc = msm_dsi_io_init(pdev, &(ctrl_pdata->power_data[i]));
+ if (rc) {
+ dev_err(&pdev->dev, "%s: failed to init IO for %s\n",
+ __func__, __mdss_dsi_pm_name(i));
+ goto error_io_init;
+ }
+ }
+
+ pr_debug("%s: Dsi Ctrl->0 initialized\n", __func__);
+
+ dsi_host_private->dis_dev = pdev->dev;
+ intf.on = msm_dsi_on;
+ intf.off = msm_dsi_off;
+ intf.cont_on = msm_dsi_cont_on;
+ intf.clk_ctrl = msm_dsi_clk_ctrl;
+ intf.op_mode_config = msm_dsi_op_mode_config;
+ intf.index = 0;
+ intf.private = NULL;
+ dsi_register_interface(&intf);
+
+ msm_dsi_debug_init();
+
+ msm_dsi_ctrl_init(ctrl_pdata);
+
+ rc = msm_dsi_irq_init(&pdev->dev, mdss_dsi_mres->start,
+ ctrl_pdata);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: failed to init irq, rc=%d\n",
+ __func__, rc);
+ goto error_irq_init;
+ }
+
+ rc = dsi_panel_device_register_v2(pdev, ctrl_pdata);
+ if (rc) {
+ pr_err("%s: dsi panel dev reg failed\n", __func__);
+ goto error_device_register;
+ }
+ pr_debug("%s success\n", __func__);
+ return 0;
+error_device_register:
+ kfree(ctrl_pdata->dsi_hw->irq_info);
+ kfree(ctrl_pdata->dsi_hw);
+error_irq_init:
+ for (i = DSI_MAX_PM - 1; i >= 0; i--)
+ msm_dsi_io_deinit(pdev, &(ctrl_pdata->power_data[i]));
+error_io_init:
+ dsi_ctrl_config_deinit(pdev, ctrl_pdata);
+error_pan_node:
+ of_node_put(dsi_pan_node);
+error_platform_pop:
+ msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask);
+error_irq_resource:
+ if (dsi_host_private->dsi_base) {
+ iounmap(dsi_host_private->dsi_base);
+ dsi_host_private->dsi_base = NULL;
+ }
+error_io_resource:
+ devm_kfree(&pdev->dev, ctrl_pdata);
+error_no_mem:
+ msm_dsi_deinit();
+
+ return rc;
+}
+
+static int msm_dsi_remove(struct platform_device *pdev)
+{
+ int i;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = platform_get_drvdata(pdev);
+
+ if (!ctrl_pdata) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask);
+ for (i = DSI_MAX_PM - 1; i >= 0; i--)
+ msm_dsi_io_deinit(pdev, &(ctrl_pdata->power_data[i]));
+ dsi_ctrl_config_deinit(pdev, ctrl_pdata);
+ iounmap(dsi_host_private->dsi_base);
+ dsi_host_private->dsi_base = NULL;
+ msm_dsi_deinit();
+ devm_kfree(&pdev->dev, ctrl_pdata);
+
+ return 0;
+}
+
+static const struct of_device_id msm_dsi_v2_dt_match[] = {
+ {.compatible = "qcom,msm-dsi-v2"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_dsi_v2_dt_match);
+
+static struct platform_driver msm_dsi_v2_driver = {
+ .probe = msm_dsi_probe,
+ .remove = msm_dsi_remove,
+ .shutdown = NULL,
+ .driver = {
+ .name = "qcom,dsi-panel-v2",
+ .of_match_table = msm_dsi_v2_dt_match,
+ },
+};
+
+static int msm_dsi_v2_register_driver(void)
+{
+ return platform_driver_register(&msm_dsi_v2_driver);
+}
+
+static int __init msm_dsi_v2_driver_init(void)
+{
+ int ret;
+
+ ret = msm_dsi_v2_register_driver();
+ if (ret) {
+ pr_err("msm_dsi_v2_register_driver() failed!\n");
+ return ret;
+ }
+
+ return ret;
+}
+module_init(msm_dsi_v2_driver_init);
+
+static void __exit msm_dsi_v2_driver_cleanup(void)
+{
+ platform_driver_unregister(&msm_dsi_v2_driver);
+}
+module_exit(msm_dsi_v2_driver_cleanup);
diff --git a/drivers/video/fbdev/msm/dsi_host_v2.h b/drivers/video/fbdev/msm/dsi_host_v2.h
new file mode 100644
index 0000000..d61bcf9
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_host_v2.h
@@ -0,0 +1,178 @@
+/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef DSI_HOST_V2_H
+#define DSI_HOST_V2_H
+
+#include <linux/bitops.h>
+
+#define DSI_INTR_ERROR_MASK BIT(25)
+#define DSI_INTR_ERROR BIT(24)
+#define DSI_INTR_BTA_DONE_MASK BIT(21)
+#define DSI_INTR_BTA_DONE BIT(20)
+#define DSI_INTR_VIDEO_DONE_MASK BIT(17)
+#define DSI_INTR_VIDEO_DONE BIT(16)
+#define DSI_INTR_CMD_MDP_DONE_MASK BIT(9)
+#define DSI_INTR_CMD_MDP_DONE BIT(8)
+#define DSI_INTR_CMD_DMA_DONE_MASK BIT(1)
+#define DSI_INTR_CMD_DMA_DONE BIT(0)
+#define DSI_INTR_ALL_MASK 0x2220202
+
+#define DSI_BTA_TERM BIT(1)
+
+#define DSI_CTRL 0x0000
+#define DSI_STATUS 0x0004
+#define DSI_FIFO_STATUS 0x0008
+#define DSI_VIDEO_MODE_CTRL 0x000C
+#define DSI_VIDEO_MODE_DATA_CTRL 0x001C
+#define DSI_VIDEO_MODE_ACTIVE_H 0x0020
+#define DSI_VIDEO_MODE_ACTIVE_V 0x0024
+#define DSI_VIDEO_MODE_TOTAL 0x0028
+#define DSI_VIDEO_MODE_HSYNC 0x002C
+#define DSI_VIDEO_MODE_VSYNC 0x0030
+#define DSI_VIDEO_MODE_VSYNC_VPOS 0x0034
+#define DSI_COMMAND_MODE_DMA_CTRL 0x0038
+#define DSI_COMMAND_MODE_MDP_CTRL 0x003C
+#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL 0x0040
+#define DSI_DMA_CMD_OFFSET 0x0044
+#define DSI_DMA_CMD_LENGTH 0x0048
+#define DSI_DMA_FIFO_CTRL 0x004C
+#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL 0x0054
+#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL 0x0058
+#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL 0x005C
+#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL 0x0060
+#define DSI_ACK_ERR_STATUS 0x0064
+#define DSI_RDBK_DATA0 0x0068
+#define DSI_RDBK_DATA1 0x006C
+#define DSI_RDBK_DATA2 0x0070
+#define DSI_RDBK_DATA3 0x0074
+#define DSI_RDBK_DATATYPE0 0x0078
+#define DSI_RDBK_DATATYPE1 0x007C
+#define DSI_TRIG_CTRL 0x0080
+#define DSI_EXT_MUX 0x0084
+#define DSI_EXT_TE_PULSE_DETECT_CTRL 0x0088
+#define DSI_CMD_MODE_DMA_SW_TRIGGER 0x008C
+#define DSI_CMD_MODE_MDP_SW_TRIGGER 0x0090
+#define DSI_CMD_MODE_BTA_SW_TRIGGER 0x0094
+#define DSI_RESET_SW_TRIGGER 0x0098
+#define DSI_LANE_CTRL 0x00A8
+#define DSI_LANE_SWAP_CTRL 0x00AC
+#define DSI_DLN0_PHY_ERR 0x00B0
+#define DSI_TIMEOUT_STATUS 0x00BC
+#define DSI_CLKOUT_TIMING_CTRL 0x00C0
+#define DSI_EOT_PACKET 0x00C4
+#define DSI_EOT_PACKET_CTRL 0x00C8
+#define DSI_ERR_INT_MASK0 0x0108
+#define DSI_INT_CTRL 0x010c
+#define DSI_SOFT_RESET 0x0114
+#define DSI_CLK_CTRL 0x0118
+#define DSI_CLK_STATUS 0x011C
+#define DSI_PHY_SW_RESET 0x0128
+#define DSI_COMMAND_MODE_MDP_IDLE_CTRL 0x0190
+#define DSI_VERSION 0x01F0
+
+#define DSI_DSIPHY_PLL_CTRL_0 0x0200
+#define DSI_DSIPHY_PLL_CTRL_1 0x0204
+#define DSI_DSIPHY_PLL_CTRL_2 0x0208
+#define DSI_DSIPHY_PLL_CTRL_3 0x020C
+#define DSI_DSIPHY_PLL_CTRL_4 0x0210
+#define DSI_DSIPHY_PLL_CTRL_5 0x0214
+#define DSI_DSIPHY_PLL_CTRL_6 0x0218
+#define DSI_DSIPHY_PLL_CTRL_7 0x021C
+#define DSI_DSIPHY_PLL_CTRL_8 0x0220
+#define DSI_DSIPHY_PLL_CTRL_9 0x0224
+#define DSI_DSIPHY_PLL_CTRL_10 0x0228
+#define DSI_DSIPHY_PLL_CTRL_11 0x022C
+#define DSI_DSIPHY_PLL_CTRL_12 0x0230
+#define DSI_DSIPHY_PLL_CTRL_13 0x0234
+#define DSI_DSIPHY_PLL_CTRL_14 0x0238
+#define DSI_DSIPHY_PLL_CTRL_15 0x023C
+#define DSI_DSIPHY_PLL_CTRL_16 0x0240
+#define DSI_DSIPHY_PLL_CTRL_17 0x0244
+#define DSI_DSIPHY_PLL_CTRL_18 0x0248
+#define DSI_DSIPHY_PLL_CTRL_19 0x024C
+#define DSI_DSIPHY_ANA_CTRL0 0x0260
+#define DSI_DSIPHY_ANA_CTRL1 0x0264
+#define DSI_DSIPHY_ANA_CTRL2 0x0268
+#define DSI_DSIPHY_ANA_CTRL3 0x026C
+#define DSI_DSIPHY_ANA_CTRL4 0x0270
+#define DSI_DSIPHY_ANA_CTRL5 0x0274
+#define DSI_DSIPHY_ANA_CTRL6 0x0278
+#define DSI_DSIPHY_ANA_CTRL7 0x027C
+#define DSI_DSIPHY_PLL_RDY 0x0280
+#define DSI_DSIPHY_PLL_ANA_STATUS0 0x0294
+#define DSI_DSIPHY_PLL_ANA_STATUS1 0x0298
+#define DSI_DSIPHY_PLL_ANA_STATUS2 0x029C
+#define DSI_DSIPHY_LN0_CFG0 0x0300
+#define DSI_DSIPHY_LN0_CFG1 0x0304
+#define DSI_DSIPHY_LN0_CFG2 0x0308
+#define DSI_DSIPHY_LN1_CFG0 0x0340
+#define DSI_DSIPHY_LN1_CFG1 0x0344
+#define DSI_DSIPHY_LN1_CFG2 0x0348
+#define DSI_DSIPHY_LN2_CFG0 0x0380
+#define DSI_DSIPHY_LN2_CFG1 0x0384
+#define DSI_DSIPHY_LN2_CFG2 0x0388
+#define DSI_DSIPHY_LN3_CFG0 0x03C0
+#define DSI_DSIPHY_LN3_CFG1 0x03C4
+#define DSI_DSIPHY_LN3_CFG2 0x03C8
+#define DSI_DSIPHY_LNCK_CFG0 0x0400
+#define DSI_DSIPHY_LNCK_CFG1 0x0404
+#define DSI_DSIPHY_LNCK_CFG2 0x0408
+#define DSI_DSIPHY_TIMING_CTRL_0 0x0440
+#define DSI_DSIPHY_TIMING_CTRL_1 0x0444
+#define DSI_DSIPHY_TIMING_CTRL_2 0x0448
+#define DSI_DSIPHY_TIMING_CTRL_3 0x044C
+#define DSI_DSIPHY_TIMING_CTRL_4 0x0450
+#define DSI_DSIPHY_TIMING_CTRL_5 0x0454
+#define DSI_DSIPHY_TIMING_CTRL_6 0x0458
+#define DSI_DSIPHY_TIMING_CTRL_7 0x045C
+#define DSI_DSIPHY_TIMING_CTRL_8 0x0460
+#define DSI_DSIPHY_TIMING_CTRL_9 0x0464
+#define DSI_DSIPHY_TIMING_CTRL_10 0x0468
+#define DSI_DSIPHY_TIMING_CTRL_11 0x046C
+#define DSI_DSIPHY_CTRL_0 0x0470
+#define DSI_DSIPHY_CTRL_1 0x0474
+#define DSI_DSIPHY_CTRL_2 0x0478
+#define DSI_DSIPHY_CTRL_3 0x047C
+#define DSI_DSIPHY_STRENGTH_CTRL_0 0x0480
+#define DSI_DSIPHY_STRENGTH_CTRL_1 0x0484
+#define DSI_DSIPHY_STRENGTH_CTRL_2 0x0488
+#define DSI_DSIPHY_LDO_CNTRL 0x04B0
+#define DSI_DSIPHY_REGULATOR_CTRL_0 0x0500
+#define DSI_DSIPHY_REGULATOR_CTRL_1 0x0504
+#define DSI_DSIPHY_REGULATOR_CTRL_2 0x0508
+#define DSI_DSIPHY_REGULATOR_CTRL_3 0x050C
+#define DSI_DSIPHY_REGULATOR_CTRL_4 0x0510
+#define DSI_DSIPHY_REGULATOR_TEST 0x0514
+#define DSI_DSIPHY_REGULATOR_CAL_PWR_CFG 0x0518
+#define DSI_DSIPHY_CAL_HW_TRIGGER 0x0528
+#define DSI_DSIPHY_CAL_SW_CFG0 0x052C
+#define DSI_DSIPHY_CAL_SW_CFG1 0x0530
+#define DSI_DSIPHY_CAL_SW_CFG2 0x0534
+#define DSI_DSIPHY_CAL_HW_CFG0 0x0538
+#define DSI_DSIPHY_CAL_HW_CFG1 0x053C
+#define DSI_DSIPHY_CAL_HW_CFG2 0x0540
+#define DSI_DSIPHY_CAL_HW_CFG3 0x0544
+#define DSI_DSIPHY_CAL_HW_CFG4 0x0548
+#define DSI_DSIPHY_REGULATOR_CAL_STATUS0 0x0550
+#define DSI_DSIPHY_BIST_CTRL0 0x048C
+#define DSI_DSIPHY_BIST_CTRL1 0x0490
+#define DSI_DSIPHY_BIST_CTRL2 0x0494
+#define DSI_DSIPHY_BIST_CTRL3 0x0498
+#define DSI_DSIPHY_BIST_CTRL4 0x049C
+#define DSI_DSIPHY_BIST_CTRL5 0x04A0
+
+#define DSI_EN BIT(0)
+#define DSI_VIDEO_MODE_EN BIT(1)
+#define DSI_CMD_MODE_EN BIT(2)
+
+#endif /* DSI_HOST_V2_H */
diff --git a/drivers/video/fbdev/msm/dsi_io_v2.c b/drivers/video/fbdev/msm/dsi_io_v2.c
new file mode 100644
index 0000000..28441b6
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_io_v2.c
@@ -0,0 +1,389 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk/msm-clk.h>
+
+
+#include "dsi_v2.h"
+#include "dsi_io_v2.h"
+#include "dsi_host_v2.h"
+
+struct msm_dsi_io_private {
+ struct clk *dsi_byte_clk;
+ struct clk *dsi_esc_clk;
+ struct clk *dsi_pixel_clk;
+ struct clk *dsi_ahb_clk;
+ struct clk *dsi_clk;
+ int msm_dsi_clk_on;
+ int msm_dsi_ahb_clk_on;
+};
+
+static struct msm_dsi_io_private *dsi_io_private;
+
+#define DSI_VDDA_VOLTAGE 1200000
+
+void msm_dsi_ahb_ctrl(int enable)
+{
+ if (enable) {
+ dsi_io_private->msm_dsi_ahb_clk_on++;
+ if (dsi_io_private->msm_dsi_ahb_clk_on == 1)
+ clk_enable(dsi_io_private->dsi_ahb_clk);
+ } else {
+ dsi_io_private->msm_dsi_ahb_clk_on--;
+ if (dsi_io_private->msm_dsi_ahb_clk_on == 0)
+ clk_disable(dsi_io_private->dsi_ahb_clk);
+ }
+}
+
+int msm_dsi_io_init(struct platform_device *pdev, struct mdss_module_power *mp)
+{
+ int rc;
+
+ if (!dsi_io_private) {
+ dsi_io_private = kzalloc(sizeof(struct msm_dsi_io_private),
+ GFP_KERNEL);
+ if (!dsi_io_private)
+ return -ENOMEM;
+ }
+
+ rc = msm_dsi_clk_init(pdev);
+ if (rc) {
+ pr_err("fail to initialize DSI clock\n");
+ return rc;
+ }
+
+ rc = msm_mdss_config_vreg(&pdev->dev, mp->vreg_config,
+ mp->num_vreg, 1);
+ if (rc) {
+ pr_err("fail to initialize DSI regulator\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+void msm_dsi_io_deinit(struct platform_device *pdev,
+ struct mdss_module_power *mp)
+{
+ if (dsi_io_private) {
+ msm_dsi_clk_deinit();
+ msm_mdss_config_vreg(&pdev->dev, mp->vreg_config,
+ mp->num_vreg, 0);
+ kfree(dsi_io_private);
+ dsi_io_private = NULL;
+ }
+}
+
+int msm_dsi_clk_init(struct platform_device *dev)
+{
+ int rc = 0;
+
+ dsi_io_private->dsi_clk = clk_get(&dev->dev, "dsi_clk");
+ if (IS_ERR(dsi_io_private->dsi_clk)) {
+ pr_err("can't find dsi core_clk\n");
+ rc = PTR_ERR(dsi_io_private->dsi_clk);
+ dsi_io_private->dsi_clk = NULL;
+ return rc;
+ }
+ dsi_io_private->dsi_byte_clk = clk_get(&dev->dev, "byte_clk");
+ if (IS_ERR(dsi_io_private->dsi_byte_clk)) {
+ pr_err("can't find dsi byte_clk\n");
+ rc = PTR_ERR(dsi_io_private->dsi_byte_clk);
+ dsi_io_private->dsi_byte_clk = NULL;
+ return rc;
+ }
+
+ dsi_io_private->dsi_esc_clk = clk_get(&dev->dev, "esc_clk");
+ if (IS_ERR(dsi_io_private->dsi_esc_clk)) {
+ pr_err("can't find dsi esc_clk\n");
+ rc = PTR_ERR(dsi_io_private->dsi_esc_clk);
+ dsi_io_private->dsi_esc_clk = NULL;
+ return rc;
+ }
+
+ dsi_io_private->dsi_pixel_clk = clk_get(&dev->dev, "pixel_clk");
+ if (IS_ERR(dsi_io_private->dsi_pixel_clk)) {
+ pr_err("can't find dsi pixel\n");
+ rc = PTR_ERR(dsi_io_private->dsi_pixel_clk);
+ dsi_io_private->dsi_pixel_clk = NULL;
+ return rc;
+ }
+
+ dsi_io_private->dsi_ahb_clk = clk_get(&dev->dev, "iface_clk");
+ if (IS_ERR(dsi_io_private->dsi_ahb_clk)) {
+ pr_err("can't find dsi iface_clk\n");
+ rc = PTR_ERR(dsi_io_private->dsi_ahb_clk);
+ dsi_io_private->dsi_ahb_clk = NULL;
+ return rc;
+ }
+ clk_prepare(dsi_io_private->dsi_ahb_clk);
+
+ return 0;
+}
+
+void msm_dsi_clk_deinit(void)
+{
+ if (dsi_io_private->dsi_clk) {
+ clk_put(dsi_io_private->dsi_clk);
+ dsi_io_private->dsi_clk = NULL;
+ }
+ if (dsi_io_private->dsi_byte_clk) {
+ clk_put(dsi_io_private->dsi_byte_clk);
+ dsi_io_private->dsi_byte_clk = NULL;
+ }
+ if (dsi_io_private->dsi_esc_clk) {
+ clk_put(dsi_io_private->dsi_esc_clk);
+ dsi_io_private->dsi_esc_clk = NULL;
+ }
+ if (dsi_io_private->dsi_pixel_clk) {
+ clk_put(dsi_io_private->dsi_pixel_clk);
+ dsi_io_private->dsi_pixel_clk = NULL;
+ }
+ if (dsi_io_private->dsi_ahb_clk) {
+ clk_unprepare(dsi_io_private->dsi_ahb_clk);
+ clk_put(dsi_io_private->dsi_ahb_clk);
+ dsi_io_private->dsi_ahb_clk = NULL;
+ }
+}
+
+int msm_dsi_prepare_clocks(void)
+{
+ clk_prepare(dsi_io_private->dsi_clk);
+ clk_prepare(dsi_io_private->dsi_byte_clk);
+ clk_prepare(dsi_io_private->dsi_esc_clk);
+ clk_prepare(dsi_io_private->dsi_pixel_clk);
+ return 0;
+}
+
+int msm_dsi_unprepare_clocks(void)
+{
+ clk_unprepare(dsi_io_private->dsi_clk);
+ clk_unprepare(dsi_io_private->dsi_esc_clk);
+ clk_unprepare(dsi_io_private->dsi_byte_clk);
+ clk_unprepare(dsi_io_private->dsi_pixel_clk);
+ return 0;
+}
+
+int msm_dsi_clk_set_rate(unsigned long esc_rate,
+ unsigned long dsi_rate,
+ unsigned long byte_rate,
+ unsigned long pixel_rate)
+{
+ int rc;
+
+ rc = clk_set_rate(dsi_io_private->dsi_clk, dsi_rate);
+ if (rc) {
+ pr_err("dsi_esc_clk - clk_set_rate failed =%d\n", rc);
+ return rc;
+ }
+
+ rc = clk_set_rate(dsi_io_private->dsi_esc_clk, esc_rate);
+ if (rc) {
+ pr_err("dsi_esc_clk - clk_set_rate failed =%d\n", rc);
+ return rc;
+ }
+
+ rc = clk_set_rate(dsi_io_private->dsi_byte_clk, byte_rate);
+ if (rc) {
+ pr_err("dsi_byte_clk - clk_set_rate faile = %dd\n", rc);
+ return rc;
+ }
+
+ rc = clk_set_rate(dsi_io_private->dsi_pixel_clk, pixel_rate);
+ if (rc) {
+ pr_err("dsi_pixel_clk - clk_set_rate failed = %d\n", rc);
+ return rc;
+ }
+ return 0;
+}
+
+int msm_dsi_clk_enable(void)
+{
+ if (dsi_io_private->msm_dsi_clk_on) {
+ pr_debug("dsi_clks on already\n");
+ return 0;
+ }
+
+ clk_enable(dsi_io_private->dsi_clk);
+ clk_enable(dsi_io_private->dsi_esc_clk);
+ clk_enable(dsi_io_private->dsi_byte_clk);
+ clk_enable(dsi_io_private->dsi_pixel_clk);
+
+ dsi_io_private->msm_dsi_clk_on = 1;
+ return 0;
+}
+
+int msm_dsi_clk_disable(void)
+{
+ if (dsi_io_private->msm_dsi_clk_on == 0) {
+ pr_debug("mdss_dsi_clks already OFF\n");
+ return 0;
+ }
+
+ clk_disable(dsi_io_private->dsi_clk);
+ clk_disable(dsi_io_private->dsi_byte_clk);
+ clk_disable(dsi_io_private->dsi_esc_clk);
+ clk_disable(dsi_io_private->dsi_pixel_clk);
+
+ dsi_io_private->msm_dsi_clk_on = 0;
+ return 0;
+}
+
+static void msm_dsi_phy_strength_init(unsigned char *ctrl_base,
+ struct mdss_dsi_phy_ctrl *pd)
+{
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_STRENGTH_CTRL_0, pd->strength[0]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_STRENGTH_CTRL_2, pd->strength[1]);
+}
+
+static void msm_dsi_phy_ctrl_init(unsigned char *ctrl_base,
+ struct mdss_panel_data *pdata)
+{
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_0, 0x5f);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_3, 0x10);
+}
+
+static void msm_dsi_phy_regulator_init(unsigned char *ctrl_base,
+ struct mdss_dsi_phy_ctrl *pd)
+{
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_LDO_CNTRL, 0x25);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_0, pd->regulator[0]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_1, pd->regulator[1]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_2, pd->regulator[2]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_3, pd->regulator[3]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_4, pd->regulator[4]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CAL_PWR_CFG,
+ pd->regulator[5]);
+
+}
+
+static int msm_dsi_phy_calibration(unsigned char *ctrl_base)
+{
+ int i = 0, term_cnt = 5000, ret = 0, cal_busy;
+
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_SW_CFG2, 0x0);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG1, 0x5a);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG3, 0x10);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG4, 0x01);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG0, 0x01);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_TRIGGER, 0x01);
+ usleep_range(5000, 5100); /*per DSI controller spec*/
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_TRIGGER, 0x00);
+
+ cal_busy = MIPI_INP(ctrl_base + DSI_DSIPHY_REGULATOR_CAL_STATUS0);
+ while (cal_busy & 0x10) {
+ i++;
+ if (i > term_cnt) {
+ ret = -EINVAL;
+ pr_err("msm_dsi_phy_calibration error\n");
+ break;
+ }
+ cal_busy = MIPI_INP(ctrl_base +
+ DSI_DSIPHY_REGULATOR_CAL_STATUS0);
+ }
+
+ return ret;
+}
+
+static void msm_dsi_phy_lane_init(unsigned char *ctrl_base,
+ struct mdss_dsi_phy_ctrl *pd)
+{
+ int ln, index;
+
+ /*CFG0, CFG1, CFG2, TEST_DATAPATH, TEST_STR0, TEST_STR1*/
+ for (ln = 0; ln < 5; ln++) {
+ unsigned char *off = ctrl_base + 0x0300 + (ln * 0x40);
+
+ index = ln * 6;
+
+ MIPI_OUTP(off, pd->lanecfg[index]);
+ MIPI_OUTP(off + 4, pd->lanecfg[index + 1]);
+ MIPI_OUTP(off + 8, pd->lanecfg[index + 2]);
+ MIPI_OUTP(off + 12, pd->lanecfg[index + 3]);
+ MIPI_OUTP(off + 20, pd->lanecfg[index + 4]);
+ MIPI_OUTP(off + 24, pd->lanecfg[index + 5]);
+ }
+ wmb(); /* ensure write is finished before progressing */
+}
+
+static void msm_dsi_phy_timing_init(unsigned char *ctrl_base,
+ struct mdss_dsi_phy_ctrl *pd)
+{
+ int i, off = DSI_DSIPHY_TIMING_CTRL_0;
+
+ for (i = 0; i < 12; i++) {
+ MIPI_OUTP(ctrl_base + off, pd->timing[i]);
+ off += 4;
+ }
+ wmb(); /* ensure write is finished before progressing */
+}
+
+static void msm_dsi_phy_bist_init(unsigned char *ctrl_base,
+ struct mdss_dsi_phy_ctrl *pd)
+{
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL4, pd->bistctrl[4]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL1, pd->bistctrl[1]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL0, pd->bistctrl[0]);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL4, 0);
+ wmb(); /* ensure write is finished before progressing */
+}
+
+int msm_dsi_phy_init(unsigned char *ctrl_base,
+ struct mdss_panel_data *pdata)
+{
+ struct mdss_dsi_phy_ctrl *pd;
+
+ pd = &(pdata->panel_info.mipi.dsi_phy_db);
+
+ msm_dsi_phy_strength_init(ctrl_base, pd);
+
+ msm_dsi_phy_ctrl_init(ctrl_base, pdata);
+
+ msm_dsi_phy_regulator_init(ctrl_base, pd);
+
+ msm_dsi_phy_calibration(ctrl_base);
+
+ msm_dsi_phy_lane_init(ctrl_base, pd);
+
+ msm_dsi_phy_timing_init(ctrl_base, pd);
+
+ msm_dsi_phy_bist_init(ctrl_base, pd);
+
+ return 0;
+}
+
+void msm_dsi_phy_sw_reset(unsigned char *ctrl_base)
+{
+ /* start phy sw reset */
+ MIPI_OUTP(ctrl_base + DSI_PHY_SW_RESET, 0x0001);
+ udelay(1000); /*per DSI controller spec*/
+ wmb(); /* ensure write is finished before progressing */
+ /* end phy sw reset */
+ MIPI_OUTP(ctrl_base + DSI_PHY_SW_RESET, 0x0000);
+ udelay(100); /*per DSI controller spec*/
+ wmb(); /* ensure write is finished before progressing */
+}
+
+void msm_dsi_phy_off(unsigned char *ctrl_base)
+{
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_PLL_CTRL_5, 0x05f);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_0, 0x02);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_0, 0x00);
+ MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_1, 0x7f);
+ MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0);
+}
diff --git a/drivers/video/fbdev/msm/dsi_io_v2.h b/drivers/video/fbdev/msm/dsi_io_v2.h
new file mode 100644
index 0000000..d0227ec
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_io_v2.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef DSI_IO_V2_H
+#define DSI_IO_V2_H
+
+#include "mdss_panel.h"
+
+void msm_dsi_ahb_ctrl(int enable);
+
+int msm_dsi_io_init(struct platform_device *dev,
+ struct mdss_module_power *mp);
+
+void msm_dsi_io_deinit(struct platform_device *dev,
+ struct mdss_module_power *mp);
+
+int msm_dsi_clk_init(struct platform_device *dev);
+
+void msm_dsi_clk_deinit(void);
+
+int msm_dsi_prepare_clocks(void);
+
+int msm_dsi_unprepare_clocks(void);
+
+int msm_dsi_clk_set_rate(unsigned long esc_rate,
+ unsigned long dsi_rate,
+ unsigned long byte_rate,
+ unsigned long pixel_rate);
+
+int msm_dsi_clk_enable(void);
+
+int msm_dsi_clk_disable(void);
+
+int msm_dsi_phy_init(unsigned char *ctrl_base,
+ struct mdss_panel_data *pdata);
+
+void msm_dsi_phy_sw_reset(unsigned char *ctrl_base);
+
+void msm_dsi_phy_off(unsigned char *ctrl_base);
+#endif /* DSI_IO_V2_H */
diff --git a/drivers/video/fbdev/msm/dsi_status_6g.c b/drivers/video/fbdev/msm/dsi_status_6g.c
new file mode 100644
index 0000000..88bf0aa
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_status_6g.c
@@ -0,0 +1,186 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/interrupt.h>
+
+#include "mdss_dsi.h"
+#include "mdss_mdp.h"
+
+/*
+ * mdss_check_te_status() - Check the status of panel for TE based ESD.
+ * @ctrl_pdata : dsi controller data
+ * @pstatus_data : dsi status data
+ * @interval : duration in milliseconds for panel TE wait
+ *
+ * This function is called when the TE signal from the panel doesn't arrive
+ * after 'interval' milliseconds. If the TE IRQ is not ready, the workqueue
+ * gets re-scheduled. Otherwise, report the panel to be dead due to ESD attack.
+ */
+static bool mdss_check_te_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ struct dsi_status_data *pstatus_data, uint32_t interval)
+{
+ bool ret;
+
+ atomic_set(&ctrl_pdata->te_irq_ready, 0);
+ reinit_completion(&ctrl_pdata->te_irq_comp);
+ enable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+ /* Define TE interrupt timeout value as 3x(1/fps) */
+ ret = wait_for_completion_timeout(&ctrl_pdata->te_irq_comp,
+ msecs_to_jiffies(interval));
+ disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+ pr_debug("%s: Panel TE check done with ret = %d\n", __func__, ret);
+ return ret;
+}
+
+/*
+ * mdss_check_dsi_ctrl_status() - Check MDP5 DSI controller status periodically.
+ * @work : dsi controller status data
+ * @interval : duration in milliseconds to schedule work queue
+ *
+ * This function calls check_status API on DSI controller to send the BTA
+ * command. If DSI controller fails to acknowledge the BTA command, it sends
+ * the PANEL_ALIVE=0 status to HAL layer.
+ */
+void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval)
+{
+ struct dsi_status_data *pstatus_data = NULL;
+ struct mdss_panel_data *pdata = NULL;
+ struct mipi_panel_info *mipi = NULL;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_overlay_private *mdp5_data = NULL;
+ struct mdss_mdp_ctl *ctl = NULL;
+ int ret = 0;
+
+ pstatus_data = container_of(to_delayed_work(work),
+ struct dsi_status_data, check_status);
+ if (!pstatus_data || !(pstatus_data->mfd)) {
+ pr_err("%s: mfd not available\n", __func__);
+ return;
+ }
+
+ pdata = dev_get_platdata(&pstatus_data->mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("%s: Panel data not available\n", __func__);
+ return;
+ }
+ mipi = &pdata->panel_info.mipi;
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ if (!ctrl_pdata || (!ctrl_pdata->check_status &&
+ (ctrl_pdata->status_mode != ESD_TE))) {
+ pr_err("%s: DSI ctrl or status_check callback not available\n",
+ __func__);
+ return;
+ }
+
+ if (!pdata->panel_info.esd_rdy) {
+ pr_debug("%s: unblank not complete, reschedule check status\n",
+ __func__);
+ schedule_delayed_work(&pstatus_data->check_status,
+ msecs_to_jiffies(interval));
+ return;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(pstatus_data->mfd);
+ ctl = mfd_to_ctl(pstatus_data->mfd);
+
+ if (!ctl) {
+ pr_err("%s: Display is off\n", __func__);
+ return;
+ }
+
+ if (ctrl_pdata->status_mode == ESD_TE) {
+ uint32_t fps = mdss_panel_get_framerate(&pdata->panel_info,
+ FPS_RESOLUTION_HZ);
+ uint32_t timeout = ((1000 / fps) + 1) *
+ MDSS_STATUS_TE_WAIT_MAX;
+
+ if (mdss_check_te_status(ctrl_pdata, pstatus_data, timeout))
+ goto sim;
+ else
+ goto status_dead;
+ }
+
+ /*
+ * TODO: Because mdss_dsi_cmd_mdp_busy has made sure DMA to
+ * be idle in mdss_dsi_cmdlist_commit, it is not necessary
+ * to acquire ov_lock in case of video mode. Removing this
+ * lock to fix issues so that ESD thread would not block other
+ * overlay operations. Need refine this lock for command mode
+ *
+ * If Burst mode is enabled then we dont have to acquire ov_lock as
+ * command and data arbitration is possible in h/w
+ */
+
+ if ((mipi->mode == DSI_CMD_MODE) && !ctrl_pdata->burst_mode_enabled)
+ mutex_lock(&mdp5_data->ov_lock);
+ mutex_lock(&ctl->offlock);
+
+ if (mdss_panel_is_power_off(pstatus_data->mfd->panel_power_state) ||
+ pstatus_data->mfd->shutdown_pending) {
+ mutex_unlock(&ctl->offlock);
+ if ((mipi->mode == DSI_CMD_MODE) &&
+ !ctrl_pdata->burst_mode_enabled)
+ mutex_unlock(&mdp5_data->ov_lock);
+ pr_err("%s: DSI turning off, avoiding panel status check\n",
+ __func__);
+ return;
+ }
+
+ /*
+ * For the command mode panels, we return pan display
+ * IOCTL on vsync interrupt. So, after vsync interrupt comes
+ * and when DMA_P is in progress, if the panel stops responding
+ * and if we trigger BTA before DMA_P finishes, then the DSI
+ * FIFO will not be cleared since the DSI data bus control
+ * doesn't come back to the host after BTA. This may cause the
+ * display reset not to be proper. Hence, wait for DMA_P done
+ * for command mode panels before triggering BTA.
+ */
+ if (ctl->ops.wait_pingpong && !ctrl_pdata->burst_mode_enabled)
+ ctl->ops.wait_pingpong(ctl, NULL);
+
+ pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ ret = ctrl_pdata->check_status(ctrl_pdata);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ mutex_unlock(&ctl->offlock);
+ if ((mipi->mode == DSI_CMD_MODE) && !ctrl_pdata->burst_mode_enabled)
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ if (pstatus_data->mfd->panel_power_state == MDSS_PANEL_POWER_ON) {
+ if (ret > 0)
+ schedule_delayed_work(&pstatus_data->check_status,
+ msecs_to_jiffies(interval));
+ else
+ goto status_dead;
+ }
+sim:
+ if (pdata->panel_info.panel_force_dead) {
+ pr_debug("force_dead=%d\n", pdata->panel_info.panel_force_dead);
+ pdata->panel_info.panel_force_dead--;
+ if (!pdata->panel_info.panel_force_dead)
+ goto status_dead;
+ }
+
+ return;
+
+status_dead:
+ mdss_fb_report_panel_dead(pstatus_data->mfd);
+}
diff --git a/drivers/video/fbdev/msm/dsi_status_v2.c b/drivers/video/fbdev/msm/dsi_status_v2.c
new file mode 100644
index 0000000..35b0984
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_status_v2.c
@@ -0,0 +1,167 @@
+/* Copyright (c) 2013-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/interrupt.h>
+
+#include "mdss_dsi.h"
+#include "mdp3_ctrl.h"
+
+/*
+ * mdp3_check_te_status() - Check the status of panel for TE based ESD.
+ * @ctrl_pdata : dsi controller data
+ * @pstatus_data : dsi status data
+ * @interval : duration in milliseconds for panel TE wait
+ *
+ * This function waits for TE signal from the panel for a maximum
+ * duration of 3 vsyncs. If timeout occurs, report the panel to be
+ * dead due to ESD attack.
+ * NOTE: The TE IRQ handling is linked to the ESD thread scheduling,
+ * i.e. rate of TE IRQs firing is bound by the ESD interval.
+ */
+static int mdp3_check_te_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ struct dsi_status_data *pstatus_data, uint32_t interval)
+{
+ int ret;
+
+ pr_debug("%s: Checking panel TE status\n", __func__);
+
+ atomic_set(&ctrl_pdata->te_irq_ready, 0);
+ reinit_completion(&ctrl_pdata->te_irq_comp);
+ enable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+
+ ret = wait_for_completion_timeout(&ctrl_pdata->te_irq_comp,
+ msecs_to_jiffies(interval));
+
+ disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+ pr_debug("%s: Panel TE check done with ret = %d\n", __func__, ret);
+
+ return ret;
+}
+
+/*
+ * mdp3_check_dsi_ctrl_status() - Check MDP3 DSI controller status periodically.
+ * @work : dsi controller status data
+ * @interval : duration in milliseconds to schedule work queue
+ *
+ * This function calls check_status API on DSI controller to send the BTA
+ * command. If DSI controller fails to acknowledge the BTA command, it sends
+ * the PANEL_ALIVE=0 status to HAL layer.
+ */
+void mdp3_check_dsi_ctrl_status(struct work_struct *work,
+ uint32_t interval)
+{
+ struct dsi_status_data *pdsi_status = NULL;
+ struct mdss_panel_data *pdata = NULL;
+ struct mipi_panel_info *mipi = NULL;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdp3_session_data *mdp3_session = NULL;
+ int ret = 0;
+
+ pdsi_status = container_of(to_delayed_work(work),
+ struct dsi_status_data, check_status);
+
+ if (!pdsi_status || !(pdsi_status->mfd)) {
+ pr_err("%s: mfd not available\n", __func__);
+ return;
+ }
+
+ pdata = dev_get_platdata(&pdsi_status->mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("%s: Panel data not available\n", __func__);
+ return;
+ }
+
+ mipi = &pdata->panel_info.mipi;
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ if (!ctrl_pdata || (!ctrl_pdata->check_status &&
+ (ctrl_pdata->status_mode != ESD_TE))) {
+ pr_err("%s: DSI ctrl or status_check callback not available\n",
+ __func__);
+ return;
+ }
+
+ if (!pdata->panel_info.esd_rdy) {
+ pr_err("%s: unblank not complete, reschedule check status\n",
+ __func__);
+ schedule_delayed_work(&pdsi_status->check_status,
+ msecs_to_jiffies(interval));
+ return;
+ }
+
+ mdp3_session = pdsi_status->mfd->mdp.private1;
+ if (!mdp3_session) {
+ pr_err("%s: Display is off\n", __func__);
+ return;
+ }
+
+ if (mdp3_session->in_splash_screen) {
+ schedule_delayed_work(&pdsi_status->check_status,
+ msecs_to_jiffies(interval));
+ pr_debug("%s: cont splash is on\n", __func__);
+ return;
+ }
+
+ if (mipi->mode == DSI_CMD_MODE &&
+ mipi->hw_vsync_mode &&
+ mdss_dsi_is_te_based_esd(ctrl_pdata)) {
+ uint32_t fps = mdss_panel_get_framerate(&pdata->panel_info,
+ FPS_RESOLUTION_HZ);
+ uint32_t timeout = ((1000 / fps) + 1) *
+ MDSS_STATUS_TE_WAIT_MAX;
+
+ if (mdp3_check_te_status(ctrl_pdata, pdsi_status, timeout) > 0)
+ goto sim;
+ goto status_dead;
+ }
+
+ mutex_lock(&mdp3_session->lock);
+ if (!mdp3_session->status) {
+ pr_debug("%s: display off already\n", __func__);
+ mutex_unlock(&mdp3_session->lock);
+ return;
+ }
+
+ if (mdp3_session->wait_for_dma_done)
+ ret = mdp3_session->wait_for_dma_done(mdp3_session);
+ mutex_unlock(&mdp3_session->lock);
+
+ if (!ret)
+ ret = ctrl_pdata->check_status(ctrl_pdata);
+ else
+ pr_err("%s: wait_for_dma_done error\n", __func__);
+
+ if (mdss_fb_is_power_on_interactive(pdsi_status->mfd)) {
+ if (ret > 0)
+ schedule_delayed_work(&pdsi_status->check_status,
+ msecs_to_jiffies(interval));
+ else
+ goto status_dead;
+ }
+sim:
+ if (pdata->panel_info.panel_force_dead) {
+ pr_debug("force_dead=%d\n", pdata->panel_info.panel_force_dead);
+ pdata->panel_info.panel_force_dead--;
+ if (!pdata->panel_info.panel_force_dead)
+ goto status_dead;
+ }
+ return;
+
+status_dead:
+ mdss_fb_report_panel_dead(pdsi_status->mfd);
+}
+
diff --git a/drivers/video/fbdev/msm/dsi_v2.c b/drivers/video/fbdev/msm/dsi_v2.c
new file mode 100644
index 0000000..74c0726
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_v2.c
@@ -0,0 +1,619 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+
+#include "dsi_v2.h"
+
+static struct dsi_interface dsi_intf;
+
+static int dsi_off(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+
+ pr_debug("turn off dsi controller\n");
+ if (dsi_intf.off)
+ rc = dsi_intf.off(pdata);
+
+ if (rc) {
+ pr_err("mdss_dsi_off DSI failed %d\n", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static int dsi_on(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+
+ pr_debug("dsi_on DSI controller on\n");
+ if (dsi_intf.on)
+ rc = dsi_intf.on(pdata);
+
+ if (rc) {
+ pr_err("mdss_dsi_on DSI failed %d\n", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static int dsi_update_pconfig(struct mdss_panel_data *pdata,
+ int mode)
+{
+ int ret = 0;
+ struct mdss_panel_info *pinfo = &pdata->panel_info;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (!pdata)
+ return -ENODEV;
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ if (mode == DSI_CMD_MODE) {
+ pinfo->mipi.mode = DSI_CMD_MODE;
+ pinfo->type = MIPI_CMD_PANEL;
+ pinfo->mipi.vsync_enable = 1;
+ pinfo->mipi.hw_vsync_mode = 1;
+ } else {
+ pinfo->mipi.mode = DSI_VIDEO_MODE;
+ pinfo->type = MIPI_VIDEO_PANEL;
+ pinfo->mipi.vsync_enable = 0;
+ pinfo->mipi.hw_vsync_mode = 0;
+ }
+
+ ctrl_pdata->panel_mode = pinfo->mipi.mode;
+ mdss_panel_get_dst_fmt(pinfo->bpp, pinfo->mipi.mode,
+ pinfo->mipi.pixel_packing, &(pinfo->mipi.dst_format));
+ pinfo->cont_splash_enabled = 0;
+
+ return ret;
+}
+
+static int dsi_panel_handler(struct mdss_panel_data *pdata, int enable)
+{
+ int rc = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ pr_debug("dsi_panel_handler enable=%d\n", enable);
+ if (!pdata)
+ return -ENODEV;
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ if (enable &&
+ (pdata->panel_info.panel_power_state == MDSS_PANEL_POWER_OFF)) {
+ if (!pdata->panel_info.dynamic_switch_pending) {
+ mdss_dsi_panel_reset(pdata, 1);
+ rc = ctrl_pdata->on(pdata);
+ if (rc)
+ pr_err("dsi_panel_handler panel on failed %d\n",
+ rc);
+ }
+ pdata->panel_info.panel_power_state = MDSS_PANEL_POWER_ON;
+ if (pdata->panel_info.type == MIPI_CMD_PANEL)
+ mdss_dsi_set_tear_on(ctrl_pdata);
+ } else if (!enable &&
+ (pdata->panel_info.panel_power_state == MDSS_PANEL_POWER_ON)) {
+ msm_dsi_sw_reset();
+ if (dsi_intf.op_mode_config)
+ dsi_intf.op_mode_config(DSI_CMD_MODE, pdata);
+ if (pdata->panel_info.dynamic_switch_pending) {
+ pr_info("%s: switching to %s mode\n", __func__,
+ (pdata->panel_info.mipi.mode ? "video" : "command"));
+ if (pdata->panel_info.type == MIPI_CMD_PANEL) {
+ ctrl_pdata->switch_mode(pdata, DSI_VIDEO_MODE);
+ } else if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+ ctrl_pdata->switch_mode(pdata, DSI_CMD_MODE);
+ mdss_dsi_set_tear_off(ctrl_pdata);
+ }
+ }
+ pdata->panel_info.panel_power_state = MDSS_PANEL_POWER_OFF;
+ if (!pdata->panel_info.dynamic_switch_pending) {
+ rc = ctrl_pdata->off(pdata);
+ mdss_dsi_panel_reset(pdata, 0);
+ }
+ }
+ return rc;
+}
+
+static int dsi_splash_on(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+
+ pr_debug("%s:\n", __func__);
+
+ if (dsi_intf.cont_on)
+ rc = dsi_intf.cont_on(pdata);
+
+ if (rc) {
+ pr_err("mdss_dsi_on DSI failed %d\n", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static int dsi_clk_ctrl(struct mdss_panel_data *pdata, int enable)
+{
+ int rc = 0;
+
+ pr_debug("%s:\n", __func__);
+
+ if (dsi_intf.clk_ctrl)
+ rc = dsi_intf.clk_ctrl(pdata, enable);
+
+ return rc;
+}
+
+static int dsi_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+
+ if (!pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -ENODEV;
+ }
+
+ switch (event) {
+ case MDSS_EVENT_UNBLANK:
+ rc = dsi_on(pdata);
+ break;
+ case MDSS_EVENT_BLANK:
+ rc = dsi_off(pdata);
+ break;
+ case MDSS_EVENT_PANEL_ON:
+ rc = dsi_panel_handler(pdata, 1);
+ break;
+ case MDSS_EVENT_PANEL_OFF:
+ rc = dsi_panel_handler(pdata, 0);
+ break;
+ case MDSS_EVENT_CONT_SPLASH_BEGIN:
+ rc = dsi_splash_on(pdata);
+ break;
+ case MDSS_EVENT_PANEL_CLK_CTRL:
+ rc = dsi_clk_ctrl(pdata,
+ (int)(((struct dsi_panel_clk_ctrl *)arg)->state));
+ break;
+ case MDSS_EVENT_DSI_UPDATE_PANEL_DATA:
+ rc = dsi_update_pconfig(pdata, (int)(unsigned long) arg);
+ break;
+ default:
+ pr_debug("%s: unhandled event=%d\n", __func__, event);
+ break;
+ }
+ return rc;
+}
+
+static int dsi_parse_gpio(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ ctrl_pdata->disp_en_gpio = of_get_named_gpio(np,
+ "qcom,platform-enable-gpio", 0);
+
+ if (!gpio_is_valid(ctrl_pdata->disp_en_gpio))
+ pr_err("%s:%d, Disp_en gpio not specified\n",
+ __func__, __LINE__);
+
+ ctrl_pdata->rst_gpio = of_get_named_gpio(np,
+ "qcom,platform-reset-gpio", 0);
+ if (!gpio_is_valid(ctrl_pdata->rst_gpio))
+ pr_err("%s:%d, reset gpio not specified\n",
+ __func__, __LINE__);
+
+ ctrl_pdata->mode_gpio = -1;
+ if (ctrl_pdata->panel_data.panel_info.mode_gpio_state !=
+ MODE_GPIO_NOT_VALID) {
+ ctrl_pdata->mode_gpio = of_get_named_gpio(np,
+ "qcom,platform-mode-gpio", 0);
+ if (!gpio_is_valid(ctrl_pdata->mode_gpio))
+ pr_info("%s:%d, reset gpio not specified\n",
+ __func__, __LINE__);
+ }
+
+ ctrl_pdata->bklt_en_gpio = of_get_named_gpio(np,
+ "qcom,platform-bklight-en-gpio", 0);
+ if (!gpio_is_valid(ctrl_pdata->bklt_en_gpio))
+ pr_err("%s:%d, bklt_en gpio not specified\n",
+ __func__, __LINE__);
+
+ return 0;
+}
+
+static void mdss_dsi_put_dt_vreg_data(struct device *dev,
+ struct mdss_module_power *module_power)
+{
+ if (!module_power) {
+ pr_err("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (module_power->vreg_config) {
+ devm_kfree(dev, module_power->vreg_config);
+ module_power->vreg_config = NULL;
+ }
+ module_power->num_vreg = 0;
+}
+
+static int mdss_dsi_get_dt_vreg_data(struct device *dev,
+ struct mdss_module_power *mp, enum dsi_pm_type module)
+{
+ int i = 0, rc = 0;
+ u32 tmp = 0;
+ struct device_node *of_node = NULL, *supply_node = NULL;
+ const char *pm_supply_name = NULL;
+ struct device_node *supply_root_node = NULL;
+
+ if (!dev || !mp) {
+ pr_err("%s: invalid input\n", __func__);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ of_node = dev->of_node;
+
+ mp->num_vreg = 0;
+ pm_supply_name = __mdss_dsi_pm_supply_node_name(module);
+ supply_root_node = of_get_child_by_name(of_node, pm_supply_name);
+ if (!supply_root_node) {
+ pr_err("no supply entry present\n");
+ goto novreg;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node) {
+ mp->num_vreg++;
+ }
+
+ if (mp->num_vreg == 0) {
+ pr_debug("%s: no vreg\n", __func__);
+ goto novreg;
+ } else {
+ pr_debug("%s: vreg found. count=%d\n", __func__, mp->num_vreg);
+ }
+
+ mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) *
+ mp->num_vreg, GFP_KERNEL);
+ if (!mp->vreg_config) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node) {
+ const char *st = NULL;
+ /* vreg-name */
+ rc = of_property_read_string(supply_node,
+ "qcom,supply-name", &st);
+ if (rc) {
+ pr_err("%s: error reading name. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ snprintf(mp->vreg_config[i].vreg_name,
+ ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st);
+ /* vreg-min-voltage */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-min-voltage", &tmp);
+ if (rc) {
+ pr_err("%s: error reading min volt. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ mp->vreg_config[i].min_voltage = tmp;
+
+ /* vreg-max-voltage */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-max-voltage", &tmp);
+ if (rc) {
+ pr_err("%s: error reading max volt. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ mp->vreg_config[i].max_voltage = tmp;
+
+ /* enable-load */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-enable-load", &tmp);
+ if (rc) {
+ pr_err("%s: error reading enable load. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ mp->vreg_config[i].load[DSS_REG_MODE_ENABLE] = tmp;
+
+ /* disable-load */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-disable-load", &tmp);
+ if (rc) {
+ pr_err("%s: error reading disable load. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] = tmp;
+
+ /* ulp-load */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-ulp-load", &tmp);
+ if (rc)
+ pr_warn("%s: error reading ulp load. rc=%d\n",
+ __func__, rc);
+
+ mp->vreg_config[i].load[DSS_REG_MODE_ULP] = (!rc ? tmp :
+ mp->vreg_config[i].load[DSS_REG_MODE_ENABLE]);
+
+ /* pre-sleep */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-on-sleep", &tmp);
+ if (rc) {
+ pr_debug("%s: error reading supply pre sleep value. rc=%d\n",
+ __func__, rc);
+ rc = 0;
+ } else {
+ mp->vreg_config[i].pre_on_sleep = tmp;
+ }
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-off-sleep", &tmp);
+ if (rc) {
+ pr_debug("%s: error reading supply pre sleep value. rc=%d\n",
+ __func__, rc);
+ rc = 0;
+ } else {
+ mp->vreg_config[i].pre_off_sleep = tmp;
+ }
+
+ /* post-sleep */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-on-sleep", &tmp);
+ if (rc) {
+ pr_debug("%s: error reading supply post sleep value. rc=%d\n",
+ __func__, rc);
+ rc = 0;
+ } else {
+ mp->vreg_config[i].post_on_sleep = tmp;
+ }
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-off-sleep", &tmp);
+ if (rc) {
+ pr_debug("%s: error reading supply post sleep value. rc=%d\n",
+ __func__, rc);
+ rc = 0;
+ } else {
+ mp->vreg_config[i].post_off_sleep = tmp;
+ }
+
+ pr_debug("%s: %s min=%d, max=%d, enable=%d, disable=%d, ulp=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+ __func__,
+ mp->vreg_config[i].vreg_name,
+ mp->vreg_config[i].min_voltage,
+ mp->vreg_config[i].max_voltage,
+ mp->vreg_config[i].load[DSS_REG_MODE_ENABLE]
+ mp->vreg_config[i].load[DSS_REG_MODE_DISABLE]
+ mp->vreg_config[i].load[DSS_REG_MODE_ULP]
+ mp->vreg_config[i].pre_on_sleep,
+ mp->vreg_config[i].post_on_sleep,
+ mp->vreg_config[i].pre_off_sleep,
+ mp->vreg_config[i].post_off_sleep
+ );
+ ++i;
+ }
+
+ return rc;
+
+error:
+ if (mp->vreg_config) {
+ devm_kfree(dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ }
+novreg:
+ mp->num_vreg = 0;
+
+ return rc;
+}
+
+static int dsi_parse_phy(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int i, len;
+ const char *data;
+ struct mdss_dsi_phy_ctrl *phy_db
+ = &(ctrl_pdata->panel_data.panel_info.mipi.dsi_phy_db);
+
+ data = of_get_property(np, "qcom,platform-regulator-settings", &len);
+ if ((!data) || (len != 6)) {
+ pr_err("%s:%d, Unable to read Phy regulator settings",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ for (i = 0; i < len; i++)
+ phy_db->regulator[i] = data[i];
+
+ data = of_get_property(np, "qcom,platform-strength-ctrl", &len);
+ if ((!data) || (len != 2)) {
+ pr_err("%s:%d, Unable to read Phy Strength ctrl settings",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ phy_db->strength[0] = data[0];
+ phy_db->strength[1] = data[1];
+
+ data = of_get_property(np, "qcom,platform-bist-ctrl", &len);
+ if ((!data) || (len != 6)) {
+ pr_err("%s:%d, Unable to read Phy Bist Ctrl settings",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ for (i = 0; i < len; i++)
+ phy_db->bistctrl[i] = data[i];
+
+ data = of_get_property(np, "qcom,platform-lane-config", &len);
+ if ((!data) || (len != 30)) {
+ pr_err("%s:%d, Unable to read Phy lane configure settings",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ for (i = 0; i < len; i++)
+ phy_db->lanecfg[i] = data[i];
+
+ return 0;
+}
+
+void dsi_ctrl_config_deinit(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int i;
+
+ for (i = DSI_MAX_PM - 1; i >= 0; i--) {
+ mdss_dsi_put_dt_vreg_data(&pdev->dev,
+ &ctrl_pdata->power_data[i]);
+ }
+}
+
+int dsi_ctrl_config_init(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int rc = 0, i;
+
+ for (i = 0; i < DSI_MAX_PM; i++) {
+ rc = mdss_dsi_get_dt_vreg_data(&pdev->dev,
+ &ctrl_pdata->power_data[i], i);
+ if (rc) {
+ DEV_ERR("%s: '%s' get_dt_vreg_data failed.rc=%d\n",
+ __func__, __mdss_dsi_pm_name(i), rc);
+ return rc;
+ }
+ }
+
+ rc = dsi_parse_gpio(pdev, ctrl_pdata);
+ if (rc) {
+ pr_err("fail to parse panel GPIOs\n");
+ return rc;
+ }
+
+ rc = dsi_parse_phy(pdev, ctrl_pdata);
+ if (rc) {
+ pr_err("fail to parse DSI PHY settings\n");
+ return rc;
+ }
+
+ return 0;
+}
+int dsi_panel_device_register_v2(struct platform_device *dev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct mipi_panel_info *mipi;
+ int rc;
+ u8 lanes = 0, bpp;
+ u32 h_period, v_period;
+ struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+ h_period = ((pinfo->lcdc.h_pulse_width)
+ + (pinfo->lcdc.h_back_porch)
+ + (pinfo->xres)
+ + (pinfo->lcdc.h_front_porch));
+
+ v_period = ((pinfo->lcdc.v_pulse_width)
+ + (pinfo->lcdc.v_back_porch)
+ + (pinfo->yres)
+ + (pinfo->lcdc.v_front_porch));
+
+ mipi = &pinfo->mipi;
+
+ pinfo->type =
+ ((mipi->mode == DSI_VIDEO_MODE)
+ ? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
+
+ if (mipi->data_lane3)
+ lanes += 1;
+ if (mipi->data_lane2)
+ lanes += 1;
+ if (mipi->data_lane1)
+ lanes += 1;
+ if (mipi->data_lane0)
+ lanes += 1;
+
+ if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+ || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB888)
+ || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB666_LOOSE))
+ bpp = 3;
+ else if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+ || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB565))
+ bpp = 2;
+ else
+ bpp = 3; /* Default format set to RGB888 */
+
+ if (pinfo->type == MIPI_VIDEO_PANEL &&
+ !pinfo->clk_rate) {
+ h_period += pinfo->lcdc.xres_pad;
+ v_period += pinfo->lcdc.yres_pad;
+
+ if (lanes > 0) {
+ pinfo->clk_rate =
+ ((h_period * v_period * (mipi->frame_rate) * bpp * 8)
+ / lanes);
+ } else {
+ pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+ pinfo->clk_rate =
+ (h_period * v_period
+ * (mipi->frame_rate) * bpp * 8);
+ }
+ }
+
+ ctrl_pdata->panel_data.event_handler = dsi_event_handler;
+
+ /*
+ * register in mdp driver
+ */
+ rc = mdss_register_panel(dev, &(ctrl_pdata->panel_data));
+ if (rc) {
+ dev_err(&dev->dev, "unable to register MIPI DSI panel\n");
+ return rc;
+ }
+
+ pr_debug("%s: Panal data initialized\n", __func__);
+ return 0;
+}
+
+void dsi_register_interface(struct dsi_interface *intf)
+{
+ dsi_intf = *intf;
+}
+
+int dsi_buf_alloc(struct dsi_buf *dp, int size)
+{
+ dp->start = kzalloc(size, GFP_KERNEL);
+ if (dp->start == NULL) {
+ pr_err("%s:%u\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ dp->end = dp->start + size;
+ dp->size = size;
+
+ if ((int)dp->start & 0x07) {
+ pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ dp->data = dp->start;
+ dp->len = 0;
+ return 0;
+}
+
diff --git a/drivers/video/fbdev/msm/dsi_v2.h b/drivers/video/fbdev/msm/dsi_v2.h
new file mode 100644
index 0000000..2f6f404
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_v2.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef DSI_V2_H
+#define DSI_V2_H
+
+#include <linux/list.h>
+#include <mach/scm-io.h>
+
+#include "mdss_dsi.h"
+#include "mdss_panel.h"
+
+#define DSI_BUF_SIZE 1024
+#define DSI_MRPS 0x04 /* Maximum Return Packet Size */
+
+struct dsi_interface {
+ int (*on)(struct mdss_panel_data *pdata);
+ int (*off)(struct mdss_panel_data *pdata);
+ int (*cont_on)(struct mdss_panel_data *pdata);
+ int (*clk_ctrl)(struct mdss_panel_data *pdata, int enable);
+ void (*op_mode_config)(int mode, struct mdss_panel_data *pdata);
+ int index;
+ void *private;
+};
+
+int dsi_panel_device_register_v2(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+
+void dsi_register_interface(struct dsi_interface *intf);
+
+int dsi_buf_alloc(struct dsi_buf *dp, int size);
+
+void dsi_set_tx_power_mode(int mode);
+
+void dsi_ctrl_config_deinit(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+
+int dsi_ctrl_config_init(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+
+struct mdss_panel_cfg *mdp3_panel_intf_type(int intf_val);
+
+int mdp3_panel_get_boot_cfg(void);
+
+void msm_dsi_sw_reset(void);
+#endif /* DSI_V2_H */
diff --git a/drivers/video/fbdev/msm/mdp3.c b/drivers/video/fbdev/msm/mdp3.c
new file mode 100644
index 0000000..5cf439c
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3.c
@@ -0,0 +1,3173 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/msm_kgsl.h>
+#include <linux/major.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/qcom_iommu.h>
+
+#include <linux/msm_dma_iommu_mapping.h>
+
+#include "mdp3.h"
+#include "mdss_fb.h"
+#include "mdp3_hwio.h"
+#include "mdp3_ctrl.h"
+#include "mdp3_ppp.h"
+#include "mdss_debug.h"
+#include "mdss_smmu.h"
+#include "mdss.h"
+
+#ifndef EXPORT_COMPAT
+#define EXPORT_COMPAT(x)
+#endif
+
+#define AUTOSUSPEND_TIMEOUT_MS 100
+#define MISR_POLL_SLEEP 2000
+#define MISR_POLL_TIMEOUT 32000
+#define MDP3_REG_CAPTURED_DSI_PCLK_MASK 1
+
+#define MDP_CORE_HW_VERSION 0x03050306
+struct mdp3_hw_resource *mdp3_res;
+
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_MDP_PORT0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+#define SET_BIT(value, bit_num) \
+{ \
+ value[bit_num >> 3] |= (1 << (bit_num & 7)); \
+}
+
+#define MAX_BPP_SUPPORTED 4
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+ MDP_BUS_VECTOR_ENTRY(0, 0),
+ MDP_BUS_VECTOR_ENTRY(SZ_128M, SZ_256M),
+ MDP_BUS_VECTOR_ENTRY(SZ_256M, SZ_512M),
+};
+static struct msm_bus_paths
+ mdp_bus_usecases[ARRAY_SIZE(mdp_bus_vectors)];
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+ .usecase = mdp_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+ .name = "mdp3",
+};
+
+struct mdp3_bus_handle_map mdp3_bus_handle[MDP3_BUS_HANDLE_MAX] = {
+ [MDP3_BUS_HANDLE] = {
+ .bus_vector = mdp_bus_vectors,
+ .usecases = mdp_bus_usecases,
+ .scale_pdata = &mdp_bus_scale_table,
+ .current_bus_idx = 0,
+ .handle = 0,
+ },
+};
+
+static struct mdss_panel_intf pan_types[] = {
+ {"dsi", MDSS_PANEL_INTF_DSI},
+};
+static char mdss_mdp3_panel[MDSS_MAX_PANEL_LEN];
+
+struct mdp3_iommu_domain_map mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_MAX] = {
+ [MDP3_IOMMU_DOMAIN_UNSECURE] = {
+ .domain_type = MDP3_IOMMU_DOMAIN_UNSECURE,
+ .client_name = "mdp_ns",
+ .partitions = {
+ {
+ .start = SZ_128K,
+ .size = SZ_1G - SZ_128K,
+ },
+ },
+ .npartitions = 1,
+ },
+ [MDP3_IOMMU_DOMAIN_SECURE] = {
+ .domain_type = MDP3_IOMMU_DOMAIN_SECURE,
+ .client_name = "mdp_secure",
+ .partitions = {
+ {
+ .start = SZ_1G,
+ .size = SZ_1G,
+ },
+ },
+ .npartitions = 1,
+ },
+};
+
+struct mdp3_iommu_ctx_map mdp3_iommu_contexts[MDP3_IOMMU_CTX_MAX] = {
+ [MDP3_IOMMU_CTX_MDP_0] = {
+ .ctx_type = MDP3_IOMMU_CTX_MDP_0,
+ .domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_UNSECURE],
+ .ctx_name = "mdp_0",
+ .attached = 0,
+ },
+ [MDP3_IOMMU_CTX_MDP_1] = {
+ .ctx_type = MDP3_IOMMU_CTX_MDP_1,
+ .domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_SECURE],
+ .ctx_name = "mdp_1",
+ .attached = 0,
+ },
+};
+
+static irqreturn_t mdp3_irq_handler(int irq, void *ptr)
+{
+ int i = 0;
+ struct mdp3_hw_resource *mdata = (struct mdp3_hw_resource *)ptr;
+ u32 mdp_interrupt = 0;
+ u32 mdp_status = 0;
+
+ spin_lock(&mdata->irq_lock);
+ if (!mdata->irq_mask) {
+ pr_err("spurious interrupt\n");
+ spin_unlock(&mdata->irq_lock);
+ return IRQ_HANDLED;
+ }
+ mdp_status = MDP3_REG_READ(MDP3_REG_INTR_STATUS);
+ mdp_interrupt = mdp_status;
+ pr_debug("mdp3_irq_handler irq=%d\n", mdp_interrupt);
+
+ mdp_interrupt &= mdata->irq_mask;
+
+ while (mdp_interrupt && i < MDP3_MAX_INTR) {
+ if ((mdp_interrupt & 0x1) && mdata->callbacks[i].cb)
+ mdata->callbacks[i].cb(i, mdata->callbacks[i].data);
+ mdp_interrupt = mdp_interrupt >> 1;
+ i++;
+ }
+ MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, mdp_status);
+
+ spin_unlock(&mdata->irq_lock);
+
+ return IRQ_HANDLED;
+}
+
+void mdp3_irq_enable(int type)
+{
+ unsigned long flag;
+
+ pr_debug("mdp3_irq_enable type=%d\n", type);
+ spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+ if (mdp3_res->irq_ref_count[type] > 0) {
+ pr_debug("interrupt %d already enabled\n", type);
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+ return;
+ }
+
+ mdp3_res->irq_mask |= BIT(type);
+ MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask);
+
+ mdp3_res->irq_ref_count[type] += 1;
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_disable(int type)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+ mdp3_irq_disable_nosync(type);
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_disable_nosync(int type)
+{
+ if (mdp3_res->irq_ref_count[type] <= 0) {
+ pr_debug("interrupt %d not enabled\n", type);
+ return;
+ }
+ mdp3_res->irq_ref_count[type] -= 1;
+ if (mdp3_res->irq_ref_count[type] == 0) {
+ mdp3_res->irq_mask &= ~BIT(type);
+ MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask);
+ }
+}
+
+int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb)
+{
+ unsigned long flag;
+
+ pr_debug("interrupt %d callback\n", type);
+ spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+ if (cb)
+ mdp3_res->callbacks[type] = *cb;
+ else
+ mdp3_res->callbacks[type].cb = NULL;
+
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+ return 0;
+}
+
+void mdp3_irq_register(void)
+{
+ unsigned long flag;
+ struct mdss_hw *mdp3_hw;
+
+ pr_debug("mdp3_irq_register\n");
+ mdp3_hw = &mdp3_res->mdp3_hw;
+ spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+ mdp3_res->irq_ref_cnt++;
+ if (mdp3_res->irq_ref_cnt == 1) {
+ MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask);
+ mdp3_res->mdss_util->enable_irq(&mdp3_res->mdp3_hw);
+ }
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_deregister(void)
+{
+ unsigned long flag;
+ bool irq_enabled = true;
+ struct mdss_hw *mdp3_hw;
+
+ pr_debug("mdp3_irq_deregister\n");
+ mdp3_hw = &mdp3_res->mdp3_hw;
+ spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+ memset(mdp3_res->irq_ref_count, 0, sizeof(u32) * MDP3_MAX_INTR);
+ mdp3_res->irq_mask = 0;
+ MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
+ mdp3_res->irq_ref_cnt--;
+ /* This can happen if suspend is called first */
+ if (mdp3_res->irq_ref_cnt < 0) {
+ irq_enabled = false;
+ mdp3_res->irq_ref_cnt = 0;
+ }
+ if (mdp3_res->irq_ref_cnt == 0 && irq_enabled)
+ mdp3_res->mdss_util->disable_irq_nosync(&mdp3_res->mdp3_hw);
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_suspend(void)
+{
+ unsigned long flag;
+ bool irq_enabled = true;
+ struct mdss_hw *mdp3_hw;
+
+ pr_debug("%s\n", __func__);
+ mdp3_hw = &mdp3_res->mdp3_hw;
+ spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+ mdp3_res->irq_ref_cnt--;
+ if (mdp3_res->irq_ref_cnt < 0) {
+ irq_enabled = false;
+ mdp3_res->irq_ref_cnt = 0;
+ }
+ if (mdp3_res->irq_ref_cnt == 0 && irq_enabled) {
+ MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
+ mdp3_res->mdss_util->disable_irq_nosync(&mdp3_res->mdp3_hw);
+ }
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+static int mdp3_bus_scale_register(void)
+{
+ int i, j;
+
+ if (!mdp3_res->bus_handle) {
+ pr_err("No bus handle\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < MDP3_BUS_HANDLE_MAX; i++) {
+ struct mdp3_bus_handle_map *bus_handle =
+ &mdp3_res->bus_handle[i];
+
+ if (!bus_handle->handle) {
+ int j;
+ struct msm_bus_scale_pdata *bus_pdata =
+ bus_handle->scale_pdata;
+
+ for (j = 0; j < bus_pdata->num_usecases; j++) {
+ bus_handle->usecases[j].num_paths = 1;
+ bus_handle->usecases[j].vectors =
+ &bus_handle->bus_vector[j];
+ }
+
+ bus_handle->handle =
+ msm_bus_scale_register_client(bus_pdata);
+ if (!bus_handle->handle) {
+ pr_err("not able to get bus scale i=%d\n", i);
+ return -ENOMEM;
+ }
+ pr_debug("register bus_hdl=%x\n",
+ bus_handle->handle);
+ }
+
+ for (j = 0; j < MDP3_CLIENT_MAX; j++) {
+ bus_handle->ab[j] = 0;
+ bus_handle->ib[j] = 0;
+ }
+ }
+ return 0;
+}
+
+static void mdp3_bus_scale_unregister(void)
+{
+ int i;
+
+ if (!mdp3_res->bus_handle)
+ return;
+
+ for (i = 0; i < MDP3_BUS_HANDLE_MAX; i++) {
+ pr_debug("unregister index=%d bus_handle=%x\n",
+ i, mdp3_res->bus_handle[i].handle);
+ if (mdp3_res->bus_handle[i].handle) {
+ msm_bus_scale_unregister_client(
+ mdp3_res->bus_handle[i].handle);
+ mdp3_res->bus_handle[i].handle = 0;
+ }
+ }
+}
+
+int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
+{
+ struct mdp3_bus_handle_map *bus_handle;
+ int cur_bus_idx;
+ int bus_idx;
+ int client_idx;
+ u64 total_ib = 0, total_ab = 0;
+ int i, rc;
+
+ client_idx = MDP3_BUS_HANDLE;
+
+ bus_handle = &mdp3_res->bus_handle[client_idx];
+ cur_bus_idx = bus_handle->current_bus_idx;
+
+ if (bus_handle->handle < 1) {
+ pr_err("invalid bus handle %d\n", bus_handle->handle);
+ return -EINVAL;
+ }
+
+ bus_handle->ab[client] = ab_quota;
+ bus_handle->ib[client] = ib_quota;
+
+ for (i = 0; i < MDP3_CLIENT_MAX; i++) {
+ total_ab += bus_handle->ab[i];
+ total_ib += bus_handle->ib[i];
+ }
+
+ if ((total_ab | total_ib) == 0) {
+ bus_idx = 0;
+ } else {
+ int num_cases = bus_handle->scale_pdata->num_usecases;
+ struct msm_bus_vectors *vect = NULL;
+
+ bus_idx = (cur_bus_idx % (num_cases - 1)) + 1;
+
+ /* aligning to avoid performing updates for small changes */
+ total_ab = ALIGN(total_ab, SZ_64M);
+ total_ib = ALIGN(total_ib, SZ_64M);
+
+ vect = bus_handle->scale_pdata->usecase[cur_bus_idx].vectors;
+ if ((total_ab == vect->ab) && (total_ib == vect->ib)) {
+ pr_debug("skip bus scaling, no change in vectors\n");
+ return 0;
+ }
+
+ vect = bus_handle->scale_pdata->usecase[bus_idx].vectors;
+ vect->ab = total_ab;
+ vect->ib = total_ib;
+
+ pr_debug("bus scale idx=%d ab=%llu ib=%llu\n", bus_idx,
+ vect->ab, vect->ib);
+ }
+ bus_handle->current_bus_idx = bus_idx;
+ rc = msm_bus_scale_client_update_request(bus_handle->handle, bus_idx);
+
+ if (!rc && ab_quota != 0 && ib_quota != 0) {
+ bus_handle->restore_ab[client] = ab_quota;
+ bus_handle->restore_ib[client] = ib_quota;
+ }
+
+ return rc;
+}
+
+static int mdp3_clk_update(u32 clk_idx, u32 enable)
+{
+ int ret = 0;
+ struct clk *clk;
+ int count = 0;
+
+ if (clk_idx >= MDP3_MAX_CLK || !mdp3_res->clocks[clk_idx])
+ return -ENODEV;
+
+ clk = mdp3_res->clocks[clk_idx];
+
+ if (enable)
+ mdp3_res->clock_ref_count[clk_idx]++;
+ else
+ mdp3_res->clock_ref_count[clk_idx]--;
+
+ count = mdp3_res->clock_ref_count[clk_idx];
+ if (count == 1 && enable) {
+ pr_debug("clk=%d en=%d\n", clk_idx, enable);
+ ret = clk_prepare(clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare clock %d",
+ __func__, clk_idx);
+ mdp3_res->clock_ref_count[clk_idx]--;
+ return ret;
+ }
+ if (clk_idx == MDP3_CLK_MDP_CORE)
+ MDSS_XLOG(enable);
+ ret = clk_enable(clk);
+ if (ret)
+ pr_err("%s: clock enable failed %d\n", __func__,
+ clk_idx);
+ } else if (count == 0) {
+ pr_debug("clk=%d disable\n", clk_idx);
+ if (clk_idx == MDP3_CLK_MDP_CORE)
+ MDSS_XLOG(enable);
+ clk_disable(clk);
+ clk_unprepare(clk);
+ ret = 0;
+ } else if (count < 0) {
+ pr_err("clk=%d count=%d\n", clk_idx, count);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+
+
+int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate,
+ int client)
+{
+ int ret = 0;
+ unsigned long rounded_rate;
+ struct clk *clk = mdp3_res->clocks[clk_type];
+
+ if (clk) {
+ mutex_lock(&mdp3_res->res_mutex);
+ rounded_rate = clk_round_rate(clk, clk_rate);
+ if (IS_ERR_VALUE(rounded_rate)) {
+ pr_err("unable to round rate err=%ld\n", rounded_rate);
+ mutex_unlock(&mdp3_res->res_mutex);
+ return -EINVAL;
+ }
+ if (clk_type == MDP3_CLK_MDP_SRC) {
+ if (client == MDP3_CLIENT_DMA_P) {
+ mdp3_res->dma_core_clk_request = rounded_rate;
+ } else if (client == MDP3_CLIENT_PPP) {
+ mdp3_res->ppp_core_clk_request = rounded_rate;
+ } else {
+ pr_err("unrecognized client=%d\n", client);
+ mutex_unlock(&mdp3_res->res_mutex);
+ return -EINVAL;
+ }
+ rounded_rate = max(mdp3_res->dma_core_clk_request,
+ mdp3_res->ppp_core_clk_request);
+ }
+ if (rounded_rate != clk_get_rate(clk)) {
+ ret = clk_set_rate(clk, rounded_rate);
+ if (ret)
+ pr_err("clk_set_rate failed ret=%d\n", ret);
+ else
+ pr_debug("mdp clk rate=%lu, client = %d\n",
+ rounded_rate, client);
+ }
+ mutex_unlock(&mdp3_res->res_mutex);
+ } else {
+ pr_err("mdp src clk not setup properly\n");
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+unsigned long mdp3_get_clk_rate(u32 clk_idx)
+{
+ unsigned long clk_rate = 0;
+ struct clk *clk;
+
+ if (clk_idx >= MDP3_MAX_CLK)
+ return -ENODEV;
+
+ clk = mdp3_res->clocks[clk_idx];
+
+ if (clk) {
+ mutex_lock(&mdp3_res->res_mutex);
+ clk_rate = clk_get_rate(clk);
+ mutex_unlock(&mdp3_res->res_mutex);
+ }
+ return clk_rate;
+}
+
+static int mdp3_clk_register(char *clk_name, int clk_idx)
+{
+ struct clk *tmp;
+
+ if (clk_idx >= MDP3_MAX_CLK) {
+ pr_err("invalid clk index %d\n", clk_idx);
+ return -EINVAL;
+ }
+
+ tmp = devm_clk_get(&mdp3_res->pdev->dev, clk_name);
+ if (IS_ERR(tmp)) {
+ pr_err("unable to get clk: %s\n", clk_name);
+ return PTR_ERR(tmp);
+ }
+
+ mdp3_res->clocks[clk_idx] = tmp;
+
+ return 0;
+}
+
+static int mdp3_clk_setup(void)
+{
+ int rc;
+
+ rc = mdp3_clk_register("iface_clk", MDP3_CLK_AHB);
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_register("bus_clk", MDP3_CLK_AXI);
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_register("core_clk_src", MDP3_CLK_MDP_SRC);
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_register("core_clk", MDP3_CLK_MDP_CORE);
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_register("vsync_clk", MDP3_CLK_VSYNC);
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, MDP_CORE_CLK_RATE_SVS,
+ MDP3_CLIENT_DMA_P);
+ if (rc)
+ pr_err("%s: Error setting max clock during probe\n", __func__);
+ return rc;
+}
+
+static void mdp3_clk_remove(void)
+{
+ if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_AHB]))
+ clk_put(mdp3_res->clocks[MDP3_CLK_AHB]);
+
+ if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_AXI]))
+ clk_put(mdp3_res->clocks[MDP3_CLK_AXI]);
+
+ if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_MDP_SRC]))
+ clk_put(mdp3_res->clocks[MDP3_CLK_MDP_SRC]);
+
+ if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_MDP_CORE]))
+ clk_put(mdp3_res->clocks[MDP3_CLK_MDP_CORE]);
+
+ if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_VSYNC]))
+ clk_put(mdp3_res->clocks[MDP3_CLK_VSYNC]);
+
+}
+
+u64 mdp3_clk_round_off(u64 clk_rate)
+{
+ u64 clk_round_off = 0;
+
+ if (clk_rate <= MDP_CORE_CLK_RATE_SVS)
+ clk_round_off = MDP_CORE_CLK_RATE_SVS;
+ else if (clk_rate <= MDP_CORE_CLK_RATE_SUPER_SVS)
+ clk_round_off = MDP_CORE_CLK_RATE_SUPER_SVS;
+ else
+ clk_round_off = MDP_CORE_CLK_RATE_MAX;
+
+ pr_debug("clk = %llu rounded to = %llu\n",
+ clk_rate, clk_round_off);
+ return clk_round_off;
+}
+
+int mdp3_clk_enable(int enable, int dsi_clk)
+{
+ int rc = 0;
+ int changed = 0;
+
+ pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
+
+ mutex_lock(&mdp3_res->res_mutex);
+
+ if (enable) {
+ if (mdp3_res->clk_ena == 0)
+ changed++;
+ mdp3_res->clk_ena++;
+ } else {
+ if (mdp3_res->clk_ena) {
+ mdp3_res->clk_ena--;
+ if (mdp3_res->clk_ena == 0)
+ changed++;
+ } else {
+ pr_err("Can not be turned off\n");
+ }
+ }
+ pr_debug("%s: clk_ena=%d changed=%d enable=%d\n",
+ __func__, mdp3_res->clk_ena, changed, enable);
+
+ if (changed) {
+ if (enable)
+ pm_runtime_get_sync(&mdp3_res->pdev->dev);
+
+ rc = mdp3_clk_update(MDP3_CLK_AHB, enable);
+ rc |= mdp3_clk_update(MDP3_CLK_AXI, enable);
+ rc |= mdp3_clk_update(MDP3_CLK_MDP_SRC, enable);
+ rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, enable);
+ rc |= mdp3_clk_update(MDP3_CLK_VSYNC, enable);
+
+ if (!enable) {
+ pm_runtime_mark_last_busy(&mdp3_res->pdev->dev);
+ pm_runtime_put_autosuspend(&mdp3_res->pdev->dev);
+ }
+ }
+
+ mutex_unlock(&mdp3_res->res_mutex);
+ return rc;
+}
+
+void mdp3_bus_bw_iommu_enable(int enable, int client)
+{
+ struct mdp3_bus_handle_map *bus_handle;
+ int client_idx;
+ u64 ab = 0, ib = 0;
+ int ref_cnt;
+
+ client_idx = MDP3_BUS_HANDLE;
+
+ bus_handle = &mdp3_res->bus_handle[client_idx];
+ if (bus_handle->handle < 1) {
+ pr_err("invalid bus handle %d\n", bus_handle->handle);
+ return;
+ }
+ mutex_lock(&mdp3_res->res_mutex);
+ if (enable)
+ bus_handle->ref_cnt++;
+ else
+ if (bus_handle->ref_cnt)
+ bus_handle->ref_cnt--;
+ ref_cnt = bus_handle->ref_cnt;
+ mutex_unlock(&mdp3_res->res_mutex);
+
+ if (enable) {
+ if (mdp3_res->allow_iommu_update)
+ mdp3_iommu_enable(client);
+ if (ref_cnt == 1) {
+ pm_runtime_get_sync(&mdp3_res->pdev->dev);
+ ab = bus_handle->restore_ab[client];
+ ib = bus_handle->restore_ib[client];
+ mdp3_bus_scale_set_quota(client, ab, ib);
+ }
+ } else {
+ if (ref_cnt == 0) {
+ mdp3_bus_scale_set_quota(client, 0, 0);
+ pm_runtime_mark_last_busy(&mdp3_res->pdev->dev);
+ pm_runtime_put_autosuspend(&mdp3_res->pdev->dev);
+ }
+ mdp3_iommu_disable(client);
+ }
+
+ if (ref_cnt < 0) {
+ pr_err("Ref count < 0, bus client=%d, ref_cnt=%d",
+ client_idx, ref_cnt);
+ }
+}
+
+void mdp3_calc_dma_res(struct mdss_panel_info *panel_info, u64 *clk_rate,
+ u64 *ab, u64 *ib, uint32_t bpp)
+{
+ u32 vtotal = mdss_panel_get_vtotal(panel_info);
+ u32 htotal = mdss_panel_get_htotal(panel_info, 0);
+ u64 clk = htotal * vtotal * panel_info->mipi.frame_rate;
+
+ pr_debug("clk_rate for dma = %llu, bpp = %d\n", clk, bpp);
+ if (clk_rate)
+ *clk_rate = mdp3_clk_round_off(clk);
+
+ /* ab and ib vote should be same for honest voting */
+ if (ab || ib) {
+ *ab = clk * bpp;
+ *ib = *ab;
+ }
+}
+
+int mdp3_res_update(int enable, int dsi_clk, int client)
+{
+ int rc = 0;
+
+ if (enable) {
+ rc = mdp3_clk_enable(enable, dsi_clk);
+ if (rc < 0) {
+ pr_err("mdp3_clk_enable failed, enable=%d, dsi_clk=%d\n",
+ enable, dsi_clk);
+ goto done;
+ }
+ mdp3_irq_register();
+ mdp3_bus_bw_iommu_enable(enable, client);
+ } else {
+ mdp3_bus_bw_iommu_enable(enable, client);
+ mdp3_irq_suspend();
+ rc = mdp3_clk_enable(enable, dsi_clk);
+ if (rc < 0) {
+ pr_err("mdp3_clk_enable failed, enable=%d, dsi_clk=%d\n",
+ enable, dsi_clk);
+ goto done;
+ }
+ }
+
+done:
+ return rc;
+}
+
+int mdp3_get_mdp_dsi_clk(void)
+{
+ int rc;
+
+ mutex_lock(&mdp3_res->res_mutex);
+ rc = mdp3_clk_update(MDP3_CLK_DSI, 1);
+ mutex_unlock(&mdp3_res->res_mutex);
+ return rc;
+}
+
+int mdp3_put_mdp_dsi_clk(void)
+{
+ int rc;
+
+ mutex_lock(&mdp3_res->res_mutex);
+ rc = mdp3_clk_update(MDP3_CLK_DSI, 0);
+ mutex_unlock(&mdp3_res->res_mutex);
+ return rc;
+}
+
+static int mdp3_irq_setup(void)
+{
+ int ret;
+ struct mdss_hw *mdp3_hw;
+
+ mdp3_hw = &mdp3_res->mdp3_hw;
+ ret = devm_request_irq(&mdp3_res->pdev->dev,
+ mdp3_hw->irq_info->irq,
+ mdp3_irq_handler,
+ IRQF_DISABLED, "MDP", mdp3_res);
+ if (ret) {
+ pr_err("mdp request_irq() failed!\n");
+ return ret;
+ }
+ disable_irq_nosync(mdp3_hw->irq_info->irq);
+ mdp3_res->irq_registered = true;
+ return 0;
+}
+
+
+static int mdp3_get_iommu_domain(u32 type)
+{
+ if (type >= MDSS_IOMMU_MAX_DOMAIN)
+ return -EINVAL;
+
+ if (!mdp3_res)
+ return -ENODEV;
+
+ return mdp3_res->domains[type].domain_idx;
+}
+
+int mdp3_iommu_attach(int context)
+{
+ int rc = 0;
+ struct mdp3_iommu_ctx_map *context_map;
+ struct mdp3_iommu_domain_map *domain_map;
+
+ if (context >= MDP3_IOMMU_CTX_MAX)
+ return -EINVAL;
+
+ context_map = mdp3_res->iommu_contexts + context;
+ if (context_map->attached) {
+ pr_warn("mdp iommu already attached\n");
+ return 0;
+ }
+
+ domain_map = context_map->domain;
+
+ rc = iommu_attach_device(domain_map->domain, context_map->ctx);
+ if (rc) {
+ pr_err("mpd3 iommu attach failed\n");
+ return -EINVAL;
+ }
+
+ context_map->attached = true;
+ return 0;
+}
+
+int mdp3_iommu_dettach(int context)
+{
+ struct mdp3_iommu_ctx_map *context_map;
+ struct mdp3_iommu_domain_map *domain_map;
+
+ if (!mdp3_res->iommu_contexts ||
+ context >= MDP3_IOMMU_CTX_MAX)
+ return -EINVAL;
+
+ context_map = mdp3_res->iommu_contexts + context;
+ if (!context_map->attached) {
+ pr_warn("mdp iommu not attached\n");
+ return 0;
+ }
+
+ domain_map = context_map->domain;
+ iommu_detach_device(domain_map->domain, context_map->ctx);
+ context_map->attached = false;
+
+ return 0;
+}
+
+int mdp3_iommu_domain_init(void)
+{
+ struct msm_iova_layout layout;
+ int i;
+
+ if (mdp3_res->domains) {
+ pr_warn("iommu domain already initialized\n");
+ return 0;
+ }
+
+ for (i = 0; i < MDP3_IOMMU_DOMAIN_MAX; i++) {
+ int domain_idx;
+
+ layout.client_name = mdp3_iommu_domains[i].client_name;
+ layout.partitions = mdp3_iommu_domains[i].partitions;
+ layout.npartitions = mdp3_iommu_domains[i].npartitions;
+ layout.is_secure = (i == MDP3_IOMMU_DOMAIN_SECURE);
+
+ domain_idx = msm_register_domain(&layout);
+ if (IS_ERR_VALUE(domain_idx))
+ return -EINVAL;
+
+ mdp3_iommu_domains[i].domain_idx = domain_idx;
+ mdp3_iommu_domains[i].domain = msm_get_iommu_domain(domain_idx);
+ if (IS_ERR_OR_NULL(mdp3_iommu_domains[i].domain)) {
+ pr_err("unable to get iommu domain(%d)\n",
+ domain_idx);
+ if (!mdp3_iommu_domains[i].domain)
+ return -EINVAL;
+ else
+ return PTR_ERR(mdp3_iommu_domains[i].domain);
+ }
+ }
+
+ mdp3_res->domains = mdp3_iommu_domains;
+
+ return 0;
+}
+
+int mdp3_iommu_context_init(void)
+{
+ int i;
+
+ if (mdp3_res->iommu_contexts) {
+ pr_warn("iommu context already initialized\n");
+ return 0;
+ }
+
+ for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++) {
+ mdp3_iommu_contexts[i].ctx =
+ msm_iommu_get_ctx(mdp3_iommu_contexts[i].ctx_name);
+
+ if (IS_ERR_OR_NULL(mdp3_iommu_contexts[i].ctx)) {
+ pr_warn("unable to get iommu ctx(%s)\n",
+ mdp3_iommu_contexts[i].ctx_name);
+ if (!mdp3_iommu_contexts[i].ctx)
+ return -EINVAL;
+ else
+ return PTR_ERR(mdp3_iommu_contexts[i].ctx);
+ }
+ }
+
+ mdp3_res->iommu_contexts = mdp3_iommu_contexts;
+
+ return 0;
+}
+
+int mdp3_iommu_init(void)
+{
+ int ret;
+
+ mutex_init(&mdp3_res->iommu_lock);
+
+ ret = mdp3_iommu_domain_init();
+ if (ret) {
+ pr_err("mdp3 iommu domain init fails\n");
+ return ret;
+ }
+
+ ret = mdp3_iommu_context_init();
+ if (ret) {
+ pr_err("mdp3 iommu context init fails\n");
+ return ret;
+ }
+ return ret;
+}
+
+void mdp3_iommu_deinit(void)
+{
+ int i;
+
+ if (!mdp3_res->domains)
+ return;
+
+ for (i = 0; i < MDP3_IOMMU_DOMAIN_MAX; i++) {
+ if (!IS_ERR_OR_NULL(mdp3_res->domains[i].domain))
+ msm_unregister_domain(mdp3_res->domains[i].domain);
+ }
+}
+
+static int mdp3_check_version(void)
+{
+ int rc;
+
+ rc = mdp3_clk_enable(1, 0);
+ if (rc) {
+ pr_err("fail to turn on MDP core clks\n");
+ return rc;
+ }
+
+ mdp3_res->mdp_rev = MDP3_REG_READ(MDP3_REG_HW_VERSION);
+
+ if (mdp3_res->mdp_rev != MDP_CORE_HW_VERSION) {
+ pr_err("mdp_hw_revision=%x mismatch\n", mdp3_res->mdp_rev);
+ rc = -ENODEV;
+ }
+
+ rc = mdp3_clk_enable(0, 0);
+ if (rc)
+ pr_err("fail to turn off MDP core clks\n");
+
+ return rc;
+}
+
+static int mdp3_hw_init(void)
+{
+ int i;
+
+ for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) {
+ mdp3_res->dma[i].dma_sel = i;
+ mdp3_res->dma[i].capability = MDP3_DMA_CAP_ALL;
+ mdp3_res->dma[i].in_use = 0;
+ mdp3_res->dma[i].available = 1;
+ mdp3_res->dma[i].cc_vect_sel = 0;
+ mdp3_res->dma[i].lut_sts = 0;
+ mdp3_res->dma[i].hist_cmap = NULL;
+ mdp3_res->dma[i].gc_cmap = NULL;
+ mutex_init(&mdp3_res->dma[i].pp_lock);
+ }
+ mdp3_res->dma[MDP3_DMA_S].capability = MDP3_DMA_CAP_DITHER;
+ mdp3_res->dma[MDP3_DMA_E].available = 0;
+
+ for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) {
+ mdp3_res->intf[i].cfg.type = i;
+ mdp3_res->intf[i].active = 0;
+ mdp3_res->intf[i].in_use = 0;
+ mdp3_res->intf[i].available = 1;
+ }
+ mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_AHB].available = 0;
+ mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_LCDC].available = 0;
+ mdp3_res->smart_blit_en = SMART_BLIT_RGB_EN | SMART_BLIT_YUV_EN;
+ mdp3_res->solid_fill_vote_en = false;
+ return 0;
+}
+
+int mdp3_dynamic_clock_gating_ctrl(int enable)
+{
+ int rc = 0;
+ int cgc_cfg = 0;
+ /*Disable dynamic auto clock gating*/
+ pr_debug("%s Status %s\n", __func__, (enable ? "ON":"OFF"));
+ rc = mdp3_clk_enable(1, 0);
+ if (rc) {
+ pr_err("fail to turn on MDP core clks\n");
+ return rc;
+ }
+ cgc_cfg = MDP3_REG_READ(MDP3_REG_CGC_EN);
+ if (enable) {
+ cgc_cfg |= (BIT(10));
+ cgc_cfg |= (BIT(18));
+ MDP3_REG_WRITE(MDP3_REG_CGC_EN, cgc_cfg);
+ VBIF_REG_WRITE(MDP3_VBIF_REG_FORCE_EN, 0x0);
+ } else {
+ cgc_cfg &= ~(BIT(10));
+ cgc_cfg &= ~(BIT(18));
+ MDP3_REG_WRITE(MDP3_REG_CGC_EN, cgc_cfg);
+ VBIF_REG_WRITE(MDP3_VBIF_REG_FORCE_EN, 0x3);
+ }
+
+ rc = mdp3_clk_enable(0, 0);
+ if (rc)
+ pr_err("fail to turn off MDP core clks\n");
+
+ return rc;
+}
+
+/**
+ * mdp3_get_panic_lut_cfg() - calculate panic and robust lut mask
+ * @panel_width: Panel width
+ *
+ * DMA buffer has 16 fill levels. Which needs to configured as safe
+ * and panic levels based on panel resolutions.
+ * No. of fill levels used = ((panel active width * 8) / 512).
+ * Roundoff the fill levels if needed.
+ * half of the total fill levels used will be treated as panic levels.
+ * Roundoff panic levels if total used fill levels are odd.
+ *
+ * Sample calculation for 720p display:
+ * Fill levels used = (720 * 8) / 512 = 12.5 after round off 13.
+ * panic levels = 13 / 2 = 6.5 after roundoff 7.
+ * Panic mask = 0x3FFF (2 bits per level)
+ * Robust mask = 0xFF80 (1 bit per level)
+ */
+u64 mdp3_get_panic_lut_cfg(u32 panel_width)
+{
+ u32 fill_levels = (((panel_width * 8) / 512) + 1);
+ u32 panic_mask = 0;
+ u32 robust_mask = 0;
+ u32 i = 0;
+ u64 panic_config = 0;
+ u32 panic_levels = 0;
+
+ panic_levels = fill_levels / 2;
+ if (fill_levels % 2)
+ panic_levels++;
+
+ for (i = 0; i < panic_levels; i++) {
+ panic_mask |= (BIT((i * 2) + 1) | BIT(i * 2));
+ robust_mask |= BIT(i);
+ }
+ panic_config = ~robust_mask;
+ panic_config = panic_config << 32;
+ panic_config |= panic_mask;
+ return panic_config;
+}
+
+int mdp3_enable_panic_ctrl(void)
+{
+ int rc = 0;
+
+ if (MDP3_REG_READ(MDP3_PANIC_ROBUST_CTRL) == 0) {
+ pr_err("%s: Enable Panic Control\n", __func__);
+ MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, BIT(0));
+ }
+ return rc;
+}
+
+int mdp3_qos_remapper_setup(struct mdss_panel_data *panel)
+{
+ int rc = 0;
+ u64 panic_config = mdp3_get_panic_lut_cfg(panel->panel_info.xres);
+
+ rc = mdp3_clk_update(MDP3_CLK_AHB, 1);
+ rc |= mdp3_clk_update(MDP3_CLK_AXI, 1);
+ rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, 1);
+ if (rc) {
+ pr_err("fail to turn on MDP core clks\n");
+ return rc;
+ }
+
+ if (!panel)
+ return -EINVAL;
+ /* Program MDP QOS Remapper */
+ MDP3_REG_WRITE(MDP3_DMA_P_QOS_REMAPPER, 0x1A9);
+ MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_0, 0x0);
+ MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_1, 0x0);
+ MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_2, 0x0);
+ /* PANIC setting depends on panel width*/
+ MDP3_REG_WRITE(MDP3_PANIC_LUT0, (panic_config & 0xFFFF));
+ MDP3_REG_WRITE(MDP3_PANIC_LUT1, ((panic_config >> 16) & 0xFFFF));
+ MDP3_REG_WRITE(MDP3_ROBUST_LUT, ((panic_config >> 32) & 0xFFFF));
+ MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, 0x1);
+ pr_debug("Panel width %d Panic Lut0 %x Lut1 %x Robust %x\n",
+ panel->panel_info.xres,
+ MDP3_REG_READ(MDP3_PANIC_LUT0),
+ MDP3_REG_READ(MDP3_PANIC_LUT1),
+ MDP3_REG_READ(MDP3_ROBUST_LUT));
+
+ rc = mdp3_clk_update(MDP3_CLK_AHB, 0);
+ rc |= mdp3_clk_update(MDP3_CLK_AXI, 0);
+ rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, 0);
+ if (rc)
+ pr_err("fail to turn off MDP core clks\n");
+ return rc;
+}
+
+static int mdp3_res_init(void)
+{
+ int rc = 0;
+
+ rc = mdp3_irq_setup();
+ if (rc)
+ return rc;
+
+ rc = mdp3_clk_setup();
+ if (rc)
+ return rc;
+
+ mdp3_res->ion_client = msm_ion_client_create(mdp3_res->pdev->name);
+ if (IS_ERR_OR_NULL(mdp3_res->ion_client)) {
+ pr_err("msm_ion_client_create() return error (%pK)\n",
+ mdp3_res->ion_client);
+ mdp3_res->ion_client = NULL;
+ return -EINVAL;
+ }
+
+ rc = mdp3_iommu_init();
+ if (rc)
+ return rc;
+
+ mdp3_res->bus_handle = mdp3_bus_handle;
+ rc = mdp3_bus_scale_register();
+ if (rc) {
+ pr_err("unable to register bus scaling\n");
+ return rc;
+ }
+
+ rc = mdp3_hw_init();
+
+ return rc;
+}
+
+static void mdp3_res_deinit(void)
+{
+ struct mdss_hw *mdp3_hw;
+ int i;
+
+ mdp3_hw = &mdp3_res->mdp3_hw;
+ mdp3_bus_scale_unregister();
+
+ mutex_lock(&mdp3_res->iommu_lock);
+ for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++)
+ mdp3_iommu_dettach(i);
+ mutex_unlock(&mdp3_res->iommu_lock);
+
+ mdp3_iommu_deinit();
+
+ if (!IS_ERR_OR_NULL(mdp3_res->ion_client))
+ ion_client_destroy(mdp3_res->ion_client);
+
+ mdp3_clk_remove();
+
+ if (mdp3_res->irq_registered)
+ devm_free_irq(&mdp3_res->pdev->dev,
+ mdp3_hw->irq_info->irq, mdp3_res);
+}
+
+static int mdp3_get_pan_intf(const char *pan_intf)
+{
+ int i, rc = MDSS_PANEL_INTF_INVALID;
+
+ if (!pan_intf)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(pan_types); i++) {
+ if (!strcmp(pan_intf, pan_types[i].name)) {
+ rc = pan_types[i].type;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int mdp3_parse_dt_pan_intf(struct platform_device *pdev)
+{
+ int rc;
+ struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+ const char *prim_intf = NULL;
+
+ rc = of_property_read_string(pdev->dev.of_node,
+ "qcom,mdss-pref-prim-intf", &prim_intf);
+ if (rc)
+ return -ENODEV;
+
+ rc = mdp3_get_pan_intf(prim_intf);
+ if (rc < 0) {
+ mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID;
+ } else {
+ mdata->pan_cfg.pan_intf = rc;
+ rc = 0;
+ }
+ return rc;
+}
+
+static int mdp3_get_pan_cfg(struct mdss_panel_cfg *pan_cfg)
+{
+ char *t = NULL;
+ char pan_intf_str[MDSS_MAX_PANEL_LEN];
+ int rc, i, panel_len;
+ char pan_name[MDSS_MAX_PANEL_LEN];
+
+ if (!pan_cfg)
+ return -EINVAL;
+
+ if (mdss_mdp3_panel[0] == '0') {
+ pan_cfg->lk_cfg = false;
+ } else if (mdss_mdp3_panel[0] == '1') {
+ pan_cfg->lk_cfg = true;
+ } else {
+ /* read from dt */
+ pan_cfg->lk_cfg = true;
+ pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+ return -EINVAL;
+ }
+
+ /* skip lk cfg and delimiter; ex: "0:" */
+ strlcpy(pan_name, &mdss_mdp3_panel[2], MDSS_MAX_PANEL_LEN);
+ t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN);
+ if (!t) {
+ pr_err("%s: pan_name=[%s] invalid\n",
+ __func__, pan_name);
+ pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+ return -EINVAL;
+ }
+
+ for (i = 0; ((pan_name + i) < t) && (i < 4); i++)
+ pan_intf_str[i] = *(pan_name + i);
+ pan_intf_str[i] = 0;
+ pr_debug("%s:%d panel intf %s\n", __func__, __LINE__, pan_intf_str);
+ /* point to the start of panel name */
+ t = t + 1;
+ strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg));
+ pr_debug("%s:%d: t=[%s] panel name=[%s]\n", __func__, __LINE__,
+ t, pan_cfg->arg_cfg);
+
+ panel_len = strlen(pan_cfg->arg_cfg);
+ if (!panel_len) {
+ pr_err("%s: Panel name is invalid\n", __func__);
+ pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+ return -EINVAL;
+ }
+
+ rc = mdp3_get_pan_intf(pan_intf_str);
+ pan_cfg->pan_intf = (rc < 0) ? MDSS_PANEL_INTF_INVALID : rc;
+ return 0;
+}
+
+static int mdp3_get_cmdline_config(struct platform_device *pdev)
+{
+ int rc, len = 0;
+ int *intf_type;
+ char *panel_name;
+ struct mdss_panel_cfg *pan_cfg;
+ struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+
+ mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0;
+ pan_cfg = &mdata->pan_cfg;
+ panel_name = &pan_cfg->arg_cfg[0];
+ intf_type = &pan_cfg->pan_intf;
+
+ /* reads from dt by default */
+ pan_cfg->lk_cfg = true;
+
+ len = strlen(mdss_mdp3_panel);
+
+ if (len > 0) {
+ rc = mdp3_get_pan_cfg(pan_cfg);
+ if (!rc) {
+ pan_cfg->init_done = true;
+ return rc;
+ }
+ }
+
+ rc = mdp3_parse_dt_pan_intf(pdev);
+ /* if pref pan intf is not present */
+ if (rc)
+ pr_err("%s:unable to parse device tree for pan intf\n",
+ __func__);
+ else
+ pan_cfg->init_done = true;
+
+ return rc;
+}
+
+
+int mdp3_irq_init(u32 irq_start)
+{
+ struct mdss_hw *mdp3_hw;
+
+ mdp3_hw = &mdp3_res->mdp3_hw;
+
+ mdp3_hw->irq_info = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
+ if (!mdp3_hw->irq_info)
+ return -ENOMEM;
+
+ mdp3_hw->hw_ndx = MDSS_HW_MDP;
+ mdp3_hw->irq_info->irq = irq_start;
+ mdp3_hw->irq_info->irq_mask = 0;
+ mdp3_hw->irq_info->irq_ena = false;
+ mdp3_hw->irq_info->irq_buzy = false;
+
+ mdp3_res->mdss_util->register_irq(&mdp3_res->mdp3_hw);
+ return 0;
+}
+
+static int mdp3_parse_dt(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct property *prop = NULL;
+ bool panic_ctrl;
+ int rc;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys");
+ if (!res) {
+ pr_err("unable to get MDP base address\n");
+ return -EINVAL;
+ }
+
+ mdp3_res->mdp_reg_size = resource_size(res);
+ mdp3_res->mdp_base = devm_ioremap(&pdev->dev, res->start,
+ mdp3_res->mdp_reg_size);
+ if (unlikely(!mdp3_res->mdp_base)) {
+ pr_err("unable to map MDP base\n");
+ return -ENOMEM;
+ }
+
+ pr_debug("MDP HW Base phy_Address=0x%x virt=0x%x\n",
+ (int) res->start,
+ (int) mdp3_res->mdp_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vbif_phys");
+ if (!res) {
+ pr_err("unable to get VBIF base address\n");
+ return -EINVAL;
+ }
+
+ mdp3_res->vbif_reg_size = resource_size(res);
+ mdp3_res->vbif_base = devm_ioremap(&pdev->dev, res->start,
+ mdp3_res->vbif_reg_size);
+ if (unlikely(!mdp3_res->vbif_base)) {
+ pr_err("unable to map VBIF base\n");
+ return -ENOMEM;
+ }
+
+ pr_debug("VBIF HW Base phy_Address=0x%x virt=0x%x\n",
+ (int) res->start,
+ (int) mdp3_res->vbif_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ pr_err("unable to get MDSS irq\n");
+ return -EINVAL;
+ }
+ rc = mdp3_irq_init(res->start);
+ if (rc) {
+ pr_err("%s: Error in irq initialization:rc=[%d]\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = mdp3_get_cmdline_config(pdev);
+ if (rc) {
+ pr_err("%s: Error in panel override:rc=[%d]\n",
+ __func__, rc);
+ kfree(mdp3_res->mdp3_hw.irq_info);
+ return rc;
+ }
+
+ prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL);
+ mdp3_res->batfet_required = prop ? true : false;
+
+ panic_ctrl = of_property_read_bool(
+ pdev->dev.of_node, "qcom,mdss-has-panic-ctrl");
+ mdp3_res->dma[MDP3_DMA_P].has_panic_ctrl = panic_ctrl;
+
+ mdp3_res->idle_pc_enabled = of_property_read_bool(
+ pdev->dev.of_node, "qcom,mdss-idle-power-collapse-enabled");
+
+ return 0;
+}
+
+void msm_mdp3_cx_ctrl(int enable)
+{
+ int rc;
+
+ if (!mdp3_res->vdd_cx) {
+ mdp3_res->vdd_cx = devm_regulator_get(&mdp3_res->pdev->dev,
+ "vdd-cx");
+ if (IS_ERR_OR_NULL(mdp3_res->vdd_cx)) {
+ pr_debug("unable to get CX reg. rc=%d\n",
+ PTR_RET(mdp3_res->vdd_cx));
+ mdp3_res->vdd_cx = NULL;
+ return;
+ }
+ }
+
+ if (enable) {
+ rc = regulator_set_voltage(
+ mdp3_res->vdd_cx,
+ RPM_REGULATOR_CORNER_SVS_SOC,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc < 0)
+ goto vreg_set_voltage_fail;
+
+ rc = regulator_enable(mdp3_res->vdd_cx);
+ if (rc) {
+ pr_err("Failed to enable regulator vdd_cx.\n");
+ return;
+ }
+ } else {
+ rc = regulator_disable(mdp3_res->vdd_cx);
+ if (rc) {
+ pr_err("Failed to disable regulator vdd_cx.\n");
+ return;
+ }
+ rc = regulator_set_voltage(
+ mdp3_res->vdd_cx,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc < 0)
+ goto vreg_set_voltage_fail;
+ }
+
+ return;
+vreg_set_voltage_fail:
+ pr_err("Set vltg failed\n");
+}
+
+void mdp3_batfet_ctrl(int enable)
+{
+ int rc;
+
+ if (!mdp3_res->batfet_required)
+ return;
+
+ if (!mdp3_res->batfet) {
+ if (enable) {
+ mdp3_res->batfet =
+ devm_regulator_get(&mdp3_res->pdev->dev,
+ "batfet");
+ if (IS_ERR_OR_NULL(mdp3_res->batfet)) {
+ pr_debug("unable to get batfet reg. rc=%d\n",
+ PTR_RET(mdp3_res->batfet));
+ mdp3_res->batfet = NULL;
+ return;
+ }
+ } else {
+ pr_debug("Batfet regulator disable w/o enable\n");
+ return;
+ }
+ }
+
+ if (enable)
+ rc = regulator_enable(mdp3_res->batfet);
+ else
+ rc = regulator_disable(mdp3_res->batfet);
+
+ if (rc < 0)
+ pr_err("%s: reg enable/disable failed", __func__);
+}
+
+void mdp3_enable_regulator(int enable)
+{
+ mdp3_batfet_ctrl(enable);
+}
+
+static void mdp3_iommu_heap_unmap_iommu(struct mdp3_iommu_meta *meta)
+{
+ unsigned int domain_num;
+ unsigned int partition_num = 0;
+ struct iommu_domain *domain;
+
+ domain_num = (mdp3_res->domains +
+ MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ pr_err("Could not get domain %d. Corruption?\n", domain_num);
+ return;
+ }
+
+ iommu_unmap_range(domain, meta->iova_addr, meta->mapped_size);
+ msm_free_iova_address(meta->iova_addr, domain_num, partition_num,
+ meta->mapped_size);
+}
+
+static void mdp3_iommu_meta_destroy(struct kref *kref)
+{
+ struct mdp3_iommu_meta *meta =
+ container_of(kref, struct mdp3_iommu_meta, ref);
+
+ rb_erase(&meta->node, &mdp3_res->iommu_root);
+ mdp3_iommu_heap_unmap_iommu(meta);
+ dma_buf_put(meta->dbuf);
+ kfree(meta);
+}
+
+
+static void mdp3_iommu_meta_put(struct mdp3_iommu_meta *meta)
+{
+ /* Need to lock here to prevent race against map/unmap */
+ mutex_lock(&mdp3_res->iommu_lock);
+ kref_put(&meta->ref, mdp3_iommu_meta_destroy);
+ mutex_unlock(&mdp3_res->iommu_lock);
+}
+
+static struct mdp3_iommu_meta *mdp3_iommu_meta_lookup(struct sg_table *table)
+{
+ struct rb_root *root = &mdp3_res->iommu_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct mdp3_iommu_meta *entry = NULL;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct mdp3_iommu_meta, node);
+
+ if (table < entry->table)
+ p = &(*p)->rb_left;
+ else if (table > entry->table)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+ return NULL;
+}
+
+void mdp3_unmap_iommu(struct ion_client *client, struct ion_handle *handle)
+{
+ struct mdp3_iommu_meta *meta;
+ struct sg_table *table;
+
+ table = ion_sg_table(client, handle);
+
+ mutex_lock(&mdp3_res->iommu_lock);
+ meta = mdp3_iommu_meta_lookup(table);
+ if (!meta) {
+ WARN(1, "%s: buffer was never mapped for %pK\n", __func__,
+ handle);
+ mutex_unlock(&mdp3_res->iommu_lock);
+ return;
+ }
+ mutex_unlock(&mdp3_res->iommu_lock);
+
+ mdp3_iommu_meta_put(meta);
+}
+
+static void mdp3_iommu_meta_add(struct mdp3_iommu_meta *meta)
+{
+ struct rb_root *root = &mdp3_res->iommu_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct mdp3_iommu_meta *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct mdp3_iommu_meta, node);
+
+ if (meta->table < entry->table) {
+ p = &(*p)->rb_left;
+ } else if (meta->table > entry->table) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: handle %pK already exists\n", __func__,
+ entry->handle);
+ WARN_ON(1);
+ }
+ }
+
+ rb_link_node(&meta->node, parent, p);
+ rb_insert_color(&meta->node, root);
+}
+
+static int mdp3_iommu_map_iommu(struct mdp3_iommu_meta *meta,
+ unsigned long align, unsigned long iova_length,
+ unsigned int padding, unsigned long flags)
+{
+ struct iommu_domain *domain;
+ int ret = 0;
+ unsigned long size;
+ unsigned long unmap_size;
+ struct sg_table *table;
+ int prot = IOMMU_WRITE | IOMMU_READ;
+ unsigned int domain_num = (mdp3_res->domains +
+ MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+ unsigned int partition_num = 0;
+
+ size = meta->size;
+ table = meta->table;
+
+ /* Use the biggest alignment to allow bigger IOMMU mappings.
+ * Use the first entry since the first entry will always be the
+ * biggest entry. To take advantage of bigger mapping sizes both the
+ * VA and PA addresses have to be aligned to the biggest size.
+ */
+ if (table->sgl->length > align)
+ align = table->sgl->length;
+
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ meta->mapped_size, align,
+ (unsigned long *)&meta->iova_addr);
+
+ if (ret)
+ goto out;
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ /* Adding padding to before buffer */
+ if (padding) {
+ unsigned long phys_addr = sg_phys(table->sgl);
+
+ ret = msm_iommu_map_extra(domain, meta->iova_addr, phys_addr,
+ padding, SZ_4K, prot);
+ if (ret)
+ goto out1;
+ }
+
+ /* Mapping actual buffer */
+ ret = iommu_map_range(domain, meta->iova_addr + padding,
+ table->sgl, size, prot);
+ if (ret) {
+ pr_err("%s: could not map %pa in domain %pK\n",
+ __func__, &meta->iova_addr, domain);
+ unmap_size = padding;
+ goto out2;
+ }
+
+ /* Adding padding to end of buffer */
+ if (padding) {
+ unsigned long phys_addr = sg_phys(table->sgl);
+ unsigned long extra_iova_addr = meta->iova_addr +
+ padding + size;
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+ padding, SZ_4K, prot);
+ if (ret) {
+ unmap_size = padding + size;
+ goto out2;
+ }
+ }
+ return ret;
+
+out2:
+ iommu_unmap_range(domain, meta->iova_addr, unmap_size);
+out1:
+ msm_free_iova_address(meta->iova_addr, domain_num, partition_num,
+ iova_length);
+
+out:
+ return ret;
+}
+
+static struct mdp3_iommu_meta *mdp3_iommu_meta_create(struct ion_client *client,
+ struct ion_handle *handle, struct sg_table *table, unsigned long size,
+ unsigned long align, unsigned long iova_length, unsigned int padding,
+ unsigned long flags, dma_addr_t *iova)
+{
+ struct mdp3_iommu_meta *meta;
+ int ret;
+
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+
+ if (!meta)
+ return ERR_PTR(-ENOMEM);
+
+ meta->handle = handle;
+ meta->table = table;
+ meta->size = size;
+ meta->mapped_size = iova_length;
+ meta->dbuf = ion_share_dma_buf(client, handle);
+ kref_init(&meta->ref);
+
+ ret = mdp3_iommu_map_iommu(meta,
+ align, iova_length, padding, flags);
+ if (ret < 0) {
+ pr_err("%s: Unable to map buffer\n", __func__);
+ goto out;
+ }
+
+ *iova = meta->iova_addr;
+ mdp3_iommu_meta_add(meta);
+
+ return meta;
+out:
+ kfree(meta);
+ return ERR_PTR(ret);
+}
+
+/*
+ * PPP hw reads in tiles of 16 which might be outside mapped region
+ * need to map buffers ourseleve to add extra padding
+ */
+int mdp3_self_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ unsigned long align, unsigned long padding, dma_addr_t *iova,
+ unsigned long *buffer_size, unsigned long flags,
+ unsigned long iommu_flags)
+{
+ struct mdp3_iommu_meta *iommu_meta = NULL;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ unsigned long size = 0, iova_length = 0;
+ int ret = 0;
+ int i;
+
+ table = ion_sg_table(client, handle);
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ size += sg->length;
+
+ padding = PAGE_ALIGN(padding);
+
+ /* Adding 16 lines padding before and after buffer */
+ iova_length = size + 2 * padding;
+
+ if (size & ~PAGE_MASK) {
+ pr_debug("%s: buffer size %lx is not aligned to %lx",
+ __func__, size, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (iova_length & ~PAGE_MASK) {
+ pr_debug("%s: iova_length %lx is not aligned to %lx",
+ __func__, iova_length, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&mdp3_res->iommu_lock);
+ iommu_meta = mdp3_iommu_meta_lookup(table);
+
+ if (!iommu_meta) {
+ iommu_meta = mdp3_iommu_meta_create(client, handle, table, size,
+ align, iova_length, padding, flags, iova);
+ if (!IS_ERR_OR_NULL(iommu_meta)) {
+ iommu_meta->flags = iommu_flags;
+ ret = 0;
+ } else {
+ ret = PTR_ERR(iommu_meta);
+ goto out_unlock;
+ }
+ } else {
+ if (iommu_meta->flags != iommu_flags) {
+ pr_err("%s: hndl %pK already mapped with diff flag\n",
+ __func__, handle);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else if (iommu_meta->mapped_size != iova_length) {
+ pr_err("%s: hndl %pK already mapped with diff len\n",
+ __func__, handle);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else {
+ kref_get(&iommu_meta->ref);
+ *iova = iommu_meta->iova_addr;
+ }
+ }
+ WARN_ON(iommu_meta->size != size);
+ mutex_unlock(&mdp3_res->iommu_lock);
+
+ *iova = *iova + padding;
+ *buffer_size = size;
+ return ret;
+
+out_unlock:
+ mutex_unlock(&mdp3_res->iommu_lock);
+out:
+ mdp3_iommu_meta_put(iommu_meta);
+ return ret;
+}
+
+int mdp3_put_img(struct mdp3_img_data *data, int client)
+{
+ struct ion_client *iclient = mdp3_res->ion_client;
+ int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+ int dir = DMA_BIDIRECTIONAL;
+
+ if (data->flags & MDP_MEMORY_ID_TYPE_FB) {
+ pr_info("mdp3_put_img fb mem buf=0x%pa\n", &data->addr);
+ fdput(data->srcp_f);
+ memset(&data->srcp_f, 0, sizeof(struct fd));
+ } else if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+ pr_debug("ion hdl = %pK buf=0x%pa\n", data->srcp_dma_buf,
+ &data->addr);
+ if (!iclient) {
+ pr_err("invalid ion client\n");
+ return -ENOMEM;
+ }
+ if (data->mapped) {
+ if (client == MDP3_CLIENT_PPP ||
+ client == MDP3_CLIENT_DMA_P)
+ mdss_smmu_unmap_dma_buf(data->tab_clone,
+ dom, dir, data->srcp_dma_buf);
+ else
+ mdss_smmu_unmap_dma_buf(data->srcp_table,
+ dom, dir, data->srcp_dma_buf);
+ data->mapped = false;
+ }
+ if (!data->skip_detach) {
+ dma_buf_unmap_attachment(data->srcp_attachment,
+ data->srcp_table,
+ mdss_smmu_dma_data_direction(dir));
+ dma_buf_detach(data->srcp_dma_buf,
+ data->srcp_attachment);
+ dma_buf_put(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ }
+ } else {
+ return -EINVAL;
+ }
+ if (client == MDP3_CLIENT_PPP || client == MDP3_CLIENT_DMA_P) {
+ kfree(data->tab_clone->sgl);
+ kfree(data->tab_clone);
+ }
+ return 0;
+}
+
+int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data, int client)
+{
+ struct fd f;
+ int ret = -EINVAL;
+ int fb_num;
+ struct ion_client *iclient = mdp3_res->ion_client;
+ int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+
+ data->flags = img->flags;
+
+ if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
+ f = fdget(img->memory_id);
+ if (f.file == NULL) {
+ pr_err("invalid framebuffer file (%d)\n",
+ img->memory_id);
+ return -EINVAL;
+ }
+ if (MAJOR(f.file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+ fb_num = MINOR(f.file->f_dentry->d_inode->i_rdev);
+ ret = mdss_fb_get_phys_info(&data->addr,
+ &data->len, fb_num);
+ if (ret) {
+ pr_err("mdss_fb_get_phys_info() failed\n");
+ fdput(f);
+ memset(&f, 0, sizeof(struct fd));
+ }
+ } else {
+ pr_err("invalid FB_MAJOR\n");
+ fdput(f);
+ ret = -EINVAL;
+ }
+ data->srcp_f = f;
+ if (!ret)
+ goto done;
+ } else if (iclient) {
+ data->srcp_dma_buf = dma_buf_get(img->memory_id);
+ if (IS_ERR(data->srcp_dma_buf)) {
+ pr_err("DMA : error on ion_import_fd\n");
+ ret = PTR_ERR(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ return ret;
+ }
+
+ data->srcp_attachment =
+ mdss_smmu_dma_buf_attach(data->srcp_dma_buf,
+ &mdp3_res->pdev->dev, dom);
+ if (IS_ERR(data->srcp_attachment)) {
+ ret = PTR_ERR(data->srcp_attachment);
+ goto err_put;
+ }
+
+ data->srcp_table =
+ dma_buf_map_attachment(data->srcp_attachment,
+ mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL));
+ if (IS_ERR(data->srcp_table)) {
+ ret = PTR_ERR(data->srcp_table);
+ goto err_detach;
+ }
+
+ if (client == MDP3_CLIENT_PPP ||
+ client == MDP3_CLIENT_DMA_P) {
+ data->tab_clone =
+ mdss_smmu_sg_table_clone(data->srcp_table,
+ GFP_KERNEL, true);
+ if (IS_ERR_OR_NULL(data->tab_clone)) {
+ if (!(data->tab_clone))
+ ret = -EINVAL;
+ else
+ ret = PTR_ERR(data->tab_clone);
+ goto clone_err;
+ }
+ ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf,
+ data->tab_clone, dom,
+ &data->addr, &data->len,
+ DMA_BIDIRECTIONAL);
+ } else {
+ ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf,
+ data->srcp_table, dom, &data->addr,
+ &data->len, DMA_BIDIRECTIONAL);
+ }
+
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("smmu map dma buf failed: (%d)\n", ret);
+ goto err_unmap;
+ }
+
+ data->mapped = true;
+ data->skip_detach = false;
+ }
+done:
+ if (client == MDP3_CLIENT_PPP || client == MDP3_CLIENT_DMA_P) {
+ data->addr += data->tab_clone->sgl->length;
+ data->len -= data->tab_clone->sgl->length;
+ }
+ if (!ret && (img->offset < data->len)) {
+ data->addr += img->offset;
+ data->len -= img->offset;
+
+ pr_debug("mem=%d ihdl=%pK buf=0x%pa len=0x%lx\n",
+ img->memory_id, data->srcp_dma_buf,
+ &data->addr, data->len);
+
+ } else {
+ mdp3_put_img(data, client);
+ return -EINVAL;
+ }
+ return ret;
+
+clone_err:
+ dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
+ mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL));
+err_detach:
+ dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+err_put:
+ dma_buf_put(data->srcp_dma_buf);
+ return ret;
+err_unmap:
+ dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
+ mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL));
+ dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+ dma_buf_put(data->srcp_dma_buf);
+
+ if (client == MDP3_CLIENT_PPP || client == MDP3_CLIENT_DMA_P) {
+ kfree(data->tab_clone->sgl);
+ kfree(data->tab_clone);
+ }
+ return ret;
+
+}
+
+int mdp3_iommu_enable(int client)
+{
+ int rc = 0;
+
+ mutex_lock(&mdp3_res->iommu_lock);
+
+ if (mdp3_res->iommu_ref_cnt == 0) {
+ rc = mdss_smmu_attach(mdss_res);
+ if (rc)
+ rc = mdss_smmu_detach(mdss_res);
+ }
+
+ if (!rc)
+ mdp3_res->iommu_ref_cnt++;
+ mutex_unlock(&mdp3_res->iommu_lock);
+
+ pr_debug("client :%d total_ref_cnt: %d\n",
+ client, mdp3_res->iommu_ref_cnt);
+ return rc;
+}
+
+int mdp3_iommu_disable(int client)
+{
+ int rc = 0;
+
+ mutex_lock(&mdp3_res->iommu_lock);
+ if (mdp3_res->iommu_ref_cnt) {
+ mdp3_res->iommu_ref_cnt--;
+
+ pr_debug("client :%d total_ref_cnt: %d\n",
+ client, mdp3_res->iommu_ref_cnt);
+ if (mdp3_res->iommu_ref_cnt == 0)
+ rc = mdss_smmu_detach(mdss_res);
+ } else {
+ pr_err("iommu ref count unbalanced for client %d\n", client);
+ }
+ mutex_unlock(&mdp3_res->iommu_lock);
+
+ return rc;
+}
+
+int mdp3_iommu_ctrl(int enable)
+{
+ int rc;
+
+ if (mdp3_res->allow_iommu_update == false)
+ return 0;
+
+ if (enable)
+ rc = mdp3_iommu_enable(MDP3_CLIENT_DSI);
+ else
+ rc = mdp3_iommu_disable(MDP3_CLIENT_DSI);
+ return rc;
+}
+
+static int mdp3_init(struct msm_fb_data_type *mfd)
+{
+ int rc;
+
+ rc = mdp3_ctrl_init(mfd);
+ if (rc) {
+ pr_err("mdp3 ctl init fail\n");
+ return rc;
+ }
+
+ rc = mdp3_ppp_res_init(mfd);
+ if (rc)
+ pr_err("mdp3 ppp res init fail\n");
+
+ return rc;
+}
+
+u32 mdp3_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+ /*
+ * The adreno GPU hardware requires that the pitch be aligned to
+ * 32 pixels for color buffers, so for the cases where the GPU
+ * is writing directly to fb0, the framebuffer pitch
+ * also needs to be 32 pixel aligned
+ */
+
+ if (fb_index == 0)
+ return ALIGN(xres, 32) * bpp;
+ else
+ return xres * bpp;
+}
+
+__ref int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd)
+{
+ struct platform_device *pdev = mfd->pdev;
+ int len = 0, rc = 0;
+ u32 offsets[2];
+ struct device_node *pnode, *child_node;
+ struct property *prop = NULL;
+
+ mfd->splash_info.splash_logo_enabled =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-fb-splash-logo-enabled");
+
+ prop = of_find_property(pdev->dev.of_node, "qcom,memblock-reserve",
+ &len);
+ if (!prop) {
+ pr_debug("Read memblock reserve settings for fb failed\n");
+ pr_debug("Read cont-splash-memory settings\n");
+ }
+
+ if (len) {
+ len = len / sizeof(u32);
+
+ rc = of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,memblock-reserve", offsets, len);
+ if (rc) {
+ pr_err("error reading mem reserve settings for fb\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ } else {
+ child_node = of_get_child_by_name(pdev->dev.of_node,
+ "qcom,cont-splash-memory");
+ if (!child_node) {
+ pr_err("splash mem child node is not present\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ pnode = of_parse_phandle(child_node, "linux,contiguous-region",
+ 0);
+ if (pnode != NULL) {
+ const u32 *addr;
+ u64 size;
+
+ addr = of_get_address(pnode, 0, &size, NULL);
+ if (!addr) {
+ pr_err("failed to parse the splash memory address\n");
+ of_node_put(pnode);
+ rc = -EINVAL;
+ goto error;
+ }
+ offsets[0] = (u32) of_read_ulong(addr, 2);
+ offsets[1] = (u32) size;
+ of_node_put(pnode);
+ } else {
+ pr_err("mem reservation for splash screen fb not present\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+ if (!memblock_is_reserved(offsets[0])) {
+ pr_debug("failed to reserve memory for fb splash\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ mdp3_res->splash_mem_addr = offsets[0];
+ mdp3_res->splash_mem_size = offsets[1];
+error:
+ if (rc && mfd->panel_info->cont_splash_enabled)
+ pr_err("no rsvd mem found in DT for splash screen\n");
+ else
+ rc = 0;
+
+ return rc;
+}
+
+void mdp3_free(struct msm_fb_data_type *mfd)
+{
+ size_t size = 0;
+ int dom;
+ unsigned long phys;
+
+ if (!mfd->iova || !mfd->fbi->screen_base) {
+ pr_info("no fbmem allocated\n");
+ return;
+ }
+
+ size = mfd->fbi->fix.smem_len;
+ phys = mfd->fbi->fix.smem_start;
+ dom = mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx;
+ iommu_unmap(mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain,
+ phys, size);
+ msm_iommu_unmap_contig_buffer(mfd->iova, dom, 0, size);
+
+ mfd->fbi->screen_base = NULL;
+ mfd->fbi->fix.smem_start = 0;
+ mfd->iova = 0;
+}
+
+void mdp3_release_splash_memory(struct msm_fb_data_type *mfd)
+{
+ /* Give back the reserved memory to the system */
+ if (mdp3_res->splash_mem_addr) {
+ if ((mfd->panel.type == MIPI_VIDEO_PANEL) &&
+ (mdp3_res->cont_splash_en)) {
+ mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
+ mdp3_res->splash_mem_addr,
+ mdp3_res->splash_mem_size);
+ }
+ mdp3_free(mfd);
+ pr_debug("mdp3_release_splash_memory\n");
+ memblock_free(mdp3_res->splash_mem_addr,
+ mdp3_res->splash_mem_size);
+ free_bootmem_late(mdp3_res->splash_mem_addr,
+ mdp3_res->splash_mem_size);
+ mdp3_res->splash_mem_addr = 0;
+ }
+}
+
+struct mdp3_dma *mdp3_get_dma_pipe(int capability)
+{
+ int i;
+
+ for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) {
+ if (!mdp3_res->dma[i].in_use && mdp3_res->dma[i].available &&
+ mdp3_res->dma[i].capability & capability) {
+ mdp3_res->dma[i].in_use = true;
+ return &mdp3_res->dma[i];
+ }
+ }
+ return NULL;
+}
+
+struct mdp3_intf *mdp3_get_display_intf(int type)
+{
+ int i;
+
+ for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) {
+ if (!mdp3_res->intf[i].in_use && mdp3_res->intf[i].available &&
+ mdp3_res->intf[i].cfg.type == type) {
+ mdp3_res->intf[i].in_use = true;
+ return &mdp3_res->intf[i];
+ }
+ }
+ return NULL;
+}
+
+static int mdp3_fb_mem_get_iommu_domain(void)
+{
+ if (!mdp3_res)
+ return -ENODEV;
+ return mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx;
+}
+
+int mdp3_get_cont_spash_en(void)
+{
+ return mdp3_res->cont_splash_en;
+}
+
+static int mdp3_is_display_on(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+ u32 status;
+
+ rc = mdp3_clk_enable(1, 0);
+ if (rc) {
+ pr_err("fail to turn on MDP core clks\n");
+ return rc;
+ }
+ if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+ status = MDP3_REG_READ(MDP3_REG_DSI_VIDEO_EN);
+ rc = status & 0x1;
+ } else {
+ status = MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG);
+ status &= 0x180000;
+ rc = (status == 0x080000);
+ }
+
+ mdp3_res->splash_mem_addr = MDP3_REG_READ(MDP3_REG_DMA_P_IBUF_ADDR);
+
+ if (mdp3_clk_enable(0, 0))
+ pr_err("fail to turn off MDP core clks\n");
+ return rc;
+}
+
+static int mdp3_continuous_splash_on(struct mdss_panel_data *pdata)
+{
+ struct mdss_panel_info *panel_info = &pdata->panel_info;
+ struct mdp3_bus_handle_map *bus_handle;
+ u64 ab = 0;
+ u64 ib = 0;
+ u64 mdp_clk_rate = 0;
+ int rc = 0;
+
+ pr_debug("mdp3__continuous_splash_on\n");
+
+ bus_handle = &mdp3_res->bus_handle[MDP3_BUS_HANDLE];
+ if (bus_handle->handle < 1) {
+ pr_err("invalid bus handle %d\n", bus_handle->handle);
+ return -EINVAL;
+ }
+ mdp3_calc_dma_res(panel_info, &mdp_clk_rate, &ab, &ib, panel_info->bpp);
+
+ mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE,
+ MDP3_CLIENT_DMA_P);
+ mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, mdp_clk_rate,
+ MDP3_CLIENT_DMA_P);
+
+ rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
+ bus_handle->restore_ab[MDP3_CLIENT_DMA_P] = ab;
+ bus_handle->restore_ib[MDP3_CLIENT_DMA_P] = ib;
+
+ rc = mdp3_res_update(1, 1, MDP3_CLIENT_DMA_P);
+ if (rc) {
+ pr_err("fail to enable clk\n");
+ return rc;
+ }
+
+ rc = mdp3_ppp_init();
+ if (rc) {
+ pr_err("ppp init failed\n");
+ goto splash_on_err;
+ }
+
+ if (panel_info->type == MIPI_VIDEO_PANEL)
+ mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_VIDEO].active = 1;
+ else
+ mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_CMD].active = 1;
+
+ mdp3_enable_regulator(true);
+ mdp3_res->cont_splash_en = 1;
+ return 0;
+
+splash_on_err:
+ if (mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P))
+ pr_err("%s: Unable to disable mdp3 clocks\n", __func__);
+
+ return rc;
+}
+
+static int mdp3_panel_register_done(struct mdss_panel_data *pdata)
+{
+ int rc = 0;
+ u64 ab = 0; u64 ib = 0;
+ u64 mdp_clk_rate = 0;
+
+ /* Store max bandwidth supported in mdp res */
+ mdp3_calc_dma_res(&pdata->panel_info, &mdp_clk_rate, &ab, &ib,
+ MAX_BPP_SUPPORTED);
+ do_div(ab, 1024);
+ mdp3_res->max_bw = ab+1;
+
+ /*
+ * If idle pc feature is not enabled, then get a reference to the
+ * runtime device which will be released when device is turned off
+ */
+ if (!mdp3_res->idle_pc_enabled ||
+ pdata->panel_info.type != MIPI_CMD_PANEL) {
+ pm_runtime_get_sync(&mdp3_res->pdev->dev);
+ }
+
+ if (pdata->panel_info.cont_splash_enabled) {
+ if (!mdp3_is_display_on(pdata)) {
+ pr_err("continuous splash, but bootloader is not\n");
+ return 0;
+ }
+ rc = mdp3_continuous_splash_on(pdata);
+ } else {
+ if (mdp3_is_display_on(pdata)) {
+ pr_err("lk continuous splash, but kerenl not\n");
+ rc = mdp3_continuous_splash_on(pdata);
+ }
+ }
+ /*
+ * We want to prevent iommu from being enabled if there is
+ * continue splash screen. This would have happened in
+ * res_update in continuous_splash_on without this flag.
+ */
+ if (pdata->panel_info.cont_splash_enabled == false)
+ mdp3_res->allow_iommu_update = true;
+
+ mdss_res->pdata = pdata;
+ return rc;
+}
+
+/* mdp3_clear_irq() - Clear interrupt
+ * @ interrupt_mask : interrupt mask
+ *
+ * This function clear sync irq for command mode panel.
+ * When system is entering in idle screen state.
+ */
+void mdp3_clear_irq(u32 interrupt_mask)
+{
+ unsigned long flag;
+ u32 irq_status = 0;
+
+ spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+ irq_status = interrupt_mask &
+ MDP3_REG_READ(MDP3_REG_INTR_STATUS);
+ if (irq_status)
+ MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, irq_status);
+ spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+
+}
+
+/* mdp3_autorefresh_disable() - Disable Auto refresh
+ * @ panel_info : pointer to panel configuration structure
+ *
+ * This function disable Auto refresh block for command mode panel.
+ */
+int mdp3_autorefresh_disable(struct mdss_panel_info *panel_info)
+{
+ if ((panel_info->type == MIPI_CMD_PANEL) &&
+ (MDP3_REG_READ(MDP3_REG_AUTOREFRESH_CONFIG_P)))
+ MDP3_REG_WRITE(MDP3_REG_AUTOREFRESH_CONFIG_P, 0);
+ return 0;
+}
+
+int mdp3_splash_done(struct mdss_panel_info *panel_info)
+{
+ if (panel_info->cont_splash_enabled) {
+ pr_err("continuous splash is on and splash done called\n");
+ return -EINVAL;
+ }
+ mdp3_res->allow_iommu_update = true;
+ return 0;
+}
+
+static int mdp3_debug_dump_stats_show(struct seq_file *s, void *v)
+{
+ struct mdp3_hw_resource *res = (struct mdp3_hw_resource *)s->private;
+
+ seq_printf(s, "underrun: %08u\n", res->underrun_cnt);
+
+ return 0;
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdp3_debug_dump_stats);
+
+static void mdp3_debug_enable_clock(int on)
+{
+ if (on)
+ mdp3_clk_enable(1, 0);
+ else
+ mdp3_clk_enable(0, 0);
+}
+
+static int mdp3_debug_init(struct platform_device *pdev)
+{
+ int rc;
+ struct mdss_data_type *mdata;
+ struct mdss_debug_data *mdd;
+
+ mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
+
+ mdss_res = mdata;
+ mutex_init(&mdata->reg_lock);
+ mutex_init(&mdata->reg_bus_lock);
+ mutex_init(&mdata->bus_lock);
+ INIT_LIST_HEAD(&mdata->reg_bus_clist);
+ atomic_set(&mdata->sd_client_count, 0);
+ atomic_set(&mdata->active_intf_cnt, 0);
+ mdss_res->mdss_util = mdp3_res->mdss_util;
+
+ mdata->debug_inf.debug_enable_clock = mdp3_debug_enable_clock;
+ mdata->mdp_rev = mdp3_res->mdp_rev;
+
+ rc = mdss_debugfs_init(mdata);
+ if (rc)
+ return rc;
+
+ mdd = mdata->debug_inf.debug_data;
+ if (!mdd)
+ return -EINVAL;
+
+ debugfs_create_file("stat", 0644, mdd->root, mdp3_res,
+ &mdp3_debug_dump_stats_fops);
+
+ rc = mdss_debug_register_base(NULL, mdp3_res->mdp_base,
+ mdp3_res->mdp_reg_size, NULL);
+
+ return rc;
+}
+
+static void mdp3_debug_deinit(struct platform_device *pdev)
+{
+ if (mdss_res) {
+ mdss_debugfs_remove(mdss_res);
+ devm_kfree(&pdev->dev, mdss_res);
+ mdss_res = NULL;
+ }
+}
+
+static void mdp3_dma_underrun_intr_handler(int type, void *arg)
+{
+ struct mdp3_dma *dma = &mdp3_res->dma[MDP3_DMA_P];
+
+ mdp3_res->underrun_cnt++;
+ pr_err_ratelimited("display underrun detected count=%d\n",
+ mdp3_res->underrun_cnt);
+ ATRACE_INT("mdp3_dma_underrun_intr_handler", mdp3_res->underrun_cnt);
+
+ if (dma->ccs_config.ccs_enable && !dma->ccs_config.ccs_dirty) {
+ dma->ccs_config.ccs_dirty = true;
+ schedule_work(&dma->underrun_work);
+ }
+}
+
+uint32_t ppp_formats_supported[] = {
+ MDP_RGB_565,
+ MDP_BGR_565,
+ MDP_RGB_888,
+ MDP_BGR_888,
+ MDP_XRGB_8888,
+ MDP_ARGB_8888,
+ MDP_RGBA_8888,
+ MDP_BGRA_8888,
+ MDP_RGBX_8888,
+ MDP_Y_CBCR_H2V1,
+ MDP_Y_CBCR_H2V2,
+ MDP_Y_CBCR_H2V2_ADRENO,
+ MDP_Y_CBCR_H2V2_VENUS,
+ MDP_Y_CRCB_H2V1,
+ MDP_Y_CRCB_H2V2,
+ MDP_YCRYCB_H2V1,
+ MDP_BGRX_8888,
+};
+
+uint32_t dma_formats_supported[] = {
+ MDP_RGB_565,
+ MDP_RGB_888,
+ MDP_XRGB_8888,
+};
+
+static void __mdp3_set_supported_formats(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ppp_formats_supported); i++)
+ SET_BIT(mdp3_res->ppp_formats, ppp_formats_supported[i]);
+
+ for (i = 0; i < ARRAY_SIZE(dma_formats_supported); i++)
+ SET_BIT(mdp3_res->dma_formats, dma_formats_supported[i]);
+}
+
+static void __update_format_supported_info(char *buf, int *cnt)
+{
+ int j;
+ size_t len = PAGE_SIZE;
+ int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
+#define SPRINT(fmt, ...) \
+ (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
+
+ SPRINT("ppp_input_fmts=");
+ for (j = 0; j < num_bytes; j++)
+ SPRINT("%d,", mdp3_res->ppp_formats[j]);
+ SPRINT("\ndma_output_fmts=");
+ for (j = 0; j < num_bytes; j++)
+ SPRINT("%d,", mdp3_res->dma_formats[j]);
+ SPRINT("\n");
+#undef SPRINT
+}
+
+static ssize_t mdp3_show_capabilities(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ size_t len = PAGE_SIZE;
+ int cnt = 0;
+
+#define SPRINT(fmt, ...) \
+ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+ SPRINT("dma_pipes=%d\n", 1);
+ SPRINT("mdp_version=3\n");
+ SPRINT("hw_rev=%d\n", 305);
+ SPRINT("pipe_count:%d\n", 1);
+ SPRINT("pipe_num:%d pipe_type:dma pipe_ndx:%d rects:%d ", 0, 1, 1);
+ SPRINT("pipe_is_handoff:%d display_id:%d\n", 0, 0);
+ __update_format_supported_info(buf, &cnt);
+ SPRINT("rgb_pipes=%d\n", 0);
+ SPRINT("vig_pipes=%d\n", 0);
+ SPRINT("dma_pipes=%d\n", 1);
+ SPRINT("blending_stages=%d\n", 1);
+ SPRINT("cursor_pipes=%d\n", 0);
+ SPRINT("max_cursor_size=%d\n", 0);
+ SPRINT("smp_count=%d\n", 0);
+ SPRINT("smp_size=%d\n", 0);
+ SPRINT("smp_mb_per_pipe=%d\n", 0);
+ SPRINT("max_downscale_ratio=%d\n", PPP_DOWNSCALE_MAX);
+ SPRINT("max_upscale_ratio=%d\n", PPP_UPSCALE_MAX);
+ SPRINT("max_pipe_bw=%u\n", mdp3_res->max_bw);
+ SPRINT("max_bandwidth_low=%u\n", mdp3_res->max_bw);
+ SPRINT("max_bandwidth_high=%u\n", mdp3_res->max_bw);
+ SPRINT("max_mdp_clk=%u\n", MDP_CORE_CLK_RATE_MAX);
+ SPRINT("clk_fudge_factor=%u,%u\n", CLK_FUDGE_NUM, CLK_FUDGE_DEN);
+ SPRINT("features=has_ppp\n");
+
+#undef SPRINT
+
+ return cnt;
+}
+
+static DEVICE_ATTR(caps, 0444, mdp3_show_capabilities, NULL);
+
+static ssize_t mdp3_store_smart_blit(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ u32 data = -1;
+ ssize_t rc = 0;
+
+ rc = kstrtoint(buf, 10, &data);
+ if (rc) {
+ pr_err("kstrtoint failed. rc=%d\n", rc);
+ return rc;
+ }
+ mdp3_res->smart_blit_en = data;
+ pr_debug("mdp3 smart blit RGB %s YUV %s\n",
+ (mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) ?
+ "ENABLED" : "DISABLED",
+ (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) ?
+ "ENABLED" : "DISABLED");
+ return len;
+}
+
+static ssize_t mdp3_show_smart_blit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+
+ pr_debug("mdp3 smart blit RGB %s YUV %s\n",
+ (mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) ?
+ "ENABLED" : "DISABLED",
+ (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) ?
+ "ENABLED" : "DISABLED");
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", mdp3_res->smart_blit_en);
+ return ret;
+}
+
+static DEVICE_ATTR(smart_blit, 0664,
+ mdp3_show_smart_blit, mdp3_store_smart_blit);
+
+static struct attribute *mdp3_fs_attrs[] = {
+ &dev_attr_caps.attr,
+ &dev_attr_smart_blit.attr,
+ NULL
+};
+
+static struct attribute_group mdp3_fs_attr_group = {
+ .attrs = mdp3_fs_attrs
+};
+
+static int mdp3_register_sysfs(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ rc = sysfs_create_group(&dev->kobj, &mdp3_fs_attr_group);
+
+ return rc;
+}
+
+int mdp3_create_sysfs_link(struct device *dev)
+{
+ int rc;
+
+ rc = sysfs_create_link_nowarn(&dev->kobj,
+ &mdp3_res->pdev->dev.kobj, "mdp");
+
+ return rc;
+}
+
+int mdp3_misr_get(struct mdp_misr *misr_resp)
+{
+ int result = 0, ret = -1;
+ int crc = 0;
+
+ pr_debug("%s CRC Capture on DSI\n", __func__);
+ switch (misr_resp->block_id) {
+ case DISPLAY_MISR_DSI0:
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 0);
+ /* Sleep for one vsync after DSI video engine is disabled */
+ msleep(20);
+ /* Enable DSI_VIDEO_0 MISR Block */
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x20);
+ /* Reset MISR Block */
+ MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 1);
+ /* Clear MISR capture done bit */
+ MDP3_REG_WRITE(MDP3_REG_CAPTURED_DSI_PCLK, 0);
+ /* Enable MDP DSI interface */
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 1);
+ ret = readl_poll_timeout(mdp3_res->mdp_base +
+ MDP3_REG_CAPTURED_DSI_PCLK, result,
+ result & MDP3_REG_CAPTURED_DSI_PCLK_MASK,
+ MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0);
+ if (ret == 0) {
+ /* Disable DSI MISR interface */
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x0);
+ crc = MDP3_REG_READ(MDP3_REG_MISR_CAPT_VAL_DSI_PCLK);
+ pr_debug("CRC Val %d\n", crc);
+ } else {
+ pr_err("CRC Read Timed Out\n");
+ }
+ break;
+
+ case DISPLAY_MISR_DSI_CMD:
+ /* Select DSI PCLK Domain */
+ MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 0x004);
+ /* Select Block id DSI_CMD */
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x10);
+ /* Reset MISR Block */
+ MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 1);
+ /* Drive Data on Test Bus */
+ MDP3_REG_WRITE(MDP3_REG_EXPORT_MISR_DSI_PCLK, 0);
+ /* Kikk off DMA_P */
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_START, 0x11);
+ /* Wait for DMA_P Done */
+ ret = readl_poll_timeout(mdp3_res->mdp_base +
+ MDP3_REG_INTR_STATUS, result,
+ result & MDP3_INTR_DMA_P_DONE_BIT,
+ MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+ if (ret == 0) {
+ crc = MDP3_REG_READ(MDP3_REG_MISR_CURR_VAL_DSI_PCLK);
+ pr_debug("CRC Val %d\n", crc);
+ } else {
+ pr_err("CRC Read Timed Out\n");
+ }
+ break;
+
+ default:
+ pr_err("%s CRC Capture not supported\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ misr_resp->crc_value[0] = crc;
+ pr_debug("%s, CRC Capture on DSI Param Block = 0x%x, CRC 0x%x\n",
+ __func__, misr_resp->block_id, misr_resp->crc_value[0]);
+ return ret;
+}
+
+int mdp3_misr_set(struct mdp_misr *misr_req)
+{
+ int ret = 0;
+
+ pr_debug("%s Parameters Block = %d Cframe Count = %d CRC = %d\n",
+ __func__, misr_req->block_id, misr_req->frame_count,
+ misr_req->crc_value[0]);
+
+ switch (misr_req->block_id) {
+ case DISPLAY_MISR_DSI0:
+ pr_debug("In the case DISPLAY_MISR_DSI0\n");
+ MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 1);
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x20);
+ MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 0x1);
+ break;
+
+ case DISPLAY_MISR_DSI_CMD:
+ pr_debug("In the case DISPLAY_MISR_DSI_CMD\n");
+ MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 1);
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x10);
+ MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 0x1);
+ break;
+
+ default:
+ pr_err("%s CRC Capture not supported\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+struct mdss_panel_cfg *mdp3_panel_intf_type(int intf_val)
+{
+ if (!mdp3_res || !mdp3_res->pan_cfg.init_done)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (mdp3_res->pan_cfg.pan_intf == intf_val)
+ return &mdp3_res->pan_cfg;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(mdp3_panel_intf_type);
+
+int mdp3_footswitch_ctrl(int enable)
+{
+ int rc = 0;
+ int active_cnt = 0;
+
+ mutex_lock(&mdp3_res->fs_idle_pc_lock);
+ MDSS_XLOG(enable);
+ if (!mdp3_res->fs_ena && enable) {
+ rc = regulator_enable(mdp3_res->fs);
+ if (rc) {
+ pr_err("mdp footswitch ctrl enable failed\n");
+ mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+ return -EINVAL;
+ }
+ pr_debug("mdp footswitch ctrl enable success\n");
+ mdp3_enable_regulator(true);
+ mdp3_res->fs_ena = true;
+ } else if (!enable && mdp3_res->fs_ena) {
+ active_cnt = atomic_read(&mdp3_res->active_intf_cnt);
+ if (active_cnt != 0) {
+ /*
+ * Turning off GDSC while overlays are still
+ * active.
+ */
+ mdp3_res->idle_pc = true;
+ pr_debug("idle pc. active overlays=%d\n",
+ active_cnt);
+ }
+ mdp3_enable_regulator(false);
+ rc = regulator_disable(mdp3_res->fs);
+ if (rc) {
+ pr_err("mdp footswitch ctrl disable failed\n");
+ mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+ return -EINVAL;
+ }
+ mdp3_res->fs_ena = false;
+ pr_debug("mdp3 footswitch ctrl disable configured\n");
+ } else {
+ pr_debug("mdp3 footswitch ctrl already configured\n");
+ }
+
+ mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+ return rc;
+}
+
+int mdp3_panel_get_intf_status(u32 disp_num, u32 intf_type)
+{
+ int rc = 0, status = 0;
+
+ if (intf_type != MDSS_PANEL_INTF_DSI)
+ return 0;
+
+ rc = mdp3_clk_enable(1, 0);
+ if (rc) {
+ pr_err("fail to turn on MDP core clks\n");
+ return rc;
+ }
+
+ status = (MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG) & 0x180000);
+ /* DSI video mode or command mode */
+ rc = (status == 0x180000) || (status == 0x080000);
+
+ if (mdp3_clk_enable(0, 0))
+ pr_err("fail to turn off MDP core clks\n");
+ return rc;
+}
+
+static int mdp3_probe(struct platform_device *pdev)
+{
+ int rc;
+ static struct msm_mdp_interface mdp3_interface = {
+ .init_fnc = mdp3_init,
+ .fb_mem_get_iommu_domain = mdp3_fb_mem_get_iommu_domain,
+ .panel_register_done = mdp3_panel_register_done,
+ .fb_stride = mdp3_fb_stride,
+ .check_dsi_status = mdp3_check_dsi_ctrl_status,
+ };
+
+ struct mdp3_intr_cb underrun_cb = {
+ .cb = mdp3_dma_underrun_intr_handler,
+ .data = NULL,
+ };
+
+ pr_debug("%s: START\n", __func__);
+ if (!pdev->dev.of_node) {
+ pr_err("MDP driver only supports device tree probe\n");
+ return -ENOTSUPP;
+ }
+
+ if (mdp3_res) {
+ pr_err("MDP already initialized\n");
+ return -EINVAL;
+ }
+
+ mdp3_res = devm_kzalloc(&pdev->dev, sizeof(struct mdp3_hw_resource),
+ GFP_KERNEL);
+ if (mdp3_res == NULL)
+ return -ENOMEM;
+
+ pdev->id = 0;
+ mdp3_res->pdev = pdev;
+ mutex_init(&mdp3_res->res_mutex);
+ mutex_init(&mdp3_res->fs_idle_pc_lock);
+ spin_lock_init(&mdp3_res->irq_lock);
+ platform_set_drvdata(pdev, mdp3_res);
+ atomic_set(&mdp3_res->active_intf_cnt, 0);
+ mutex_init(&mdp3_res->reg_bus_lock);
+ INIT_LIST_HEAD(&mdp3_res->reg_bus_clist);
+
+ mdp3_res->mdss_util = mdss_get_util_intf();
+ if (mdp3_res->mdss_util == NULL) {
+ pr_err("Failed to get mdss utility functions\n");
+ rc = -ENODEV;
+ goto get_util_fail;
+ }
+ mdp3_res->mdss_util->get_iommu_domain = mdp3_get_iommu_domain;
+ mdp3_res->mdss_util->iommu_attached = is_mdss_iommu_attached;
+ mdp3_res->mdss_util->iommu_ctrl = mdp3_iommu_ctrl;
+ mdp3_res->mdss_util->bus_scale_set_quota = mdp3_bus_scale_set_quota;
+ mdp3_res->mdss_util->panel_intf_type = mdp3_panel_intf_type;
+ mdp3_res->mdss_util->dyn_clk_gating_ctrl =
+ mdp3_dynamic_clock_gating_ctrl;
+ mdp3_res->mdss_util->panel_intf_type = mdp3_panel_intf_type;
+ mdp3_res->mdss_util->panel_intf_status = mdp3_panel_get_intf_status;
+
+ if (mdp3_res->mdss_util->param_check(mdss_mdp3_panel)) {
+ mdp3_res->mdss_util->display_disabled = true;
+ mdp3_res->mdss_util->mdp_probe_done = true;
+ return 0;
+ }
+
+ rc = mdp3_parse_dt(pdev);
+ if (rc)
+ goto probe_done;
+
+ rc = mdp3_res_init();
+ if (rc) {
+ pr_err("unable to initialize mdp3 resources\n");
+ goto probe_done;
+ }
+
+ mdp3_res->fs_ena = false;
+ mdp3_res->fs = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR_OR_NULL(mdp3_res->fs)) {
+ pr_err("unable to get mdss gdsc regulator\n");
+ return -EINVAL;
+ }
+
+ rc = mdp3_debug_init(pdev);
+ if (rc) {
+ pr_err("unable to initialize mdp debugging\n");
+ goto probe_done;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS);
+ if (mdp3_res->idle_pc_enabled) {
+ pr_debug("%s: Enabling autosuspend\n", __func__);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ }
+ /* Enable PM runtime */
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ rc = mdp3_footswitch_ctrl(1);
+ if (rc) {
+ pr_err("unable to turn on FS\n");
+ goto probe_done;
+ }
+ }
+
+ rc = mdp3_check_version();
+ if (rc) {
+ pr_err("mdp3 check version failed\n");
+ goto probe_done;
+ }
+ rc = mdp3_register_sysfs(pdev);
+ if (rc)
+ pr_err("unable to register mdp sysfs nodes\n");
+
+ rc = mdss_fb_register_mdp_instance(&mdp3_interface);
+ if (rc)
+ pr_err("unable to register mdp instance\n");
+
+ rc = mdp3_set_intr_callback(MDP3_INTR_LCDC_UNDERFLOW,
+ &underrun_cb);
+ if (rc)
+ pr_err("unable to configure interrupt callback\n");
+
+ rc = mdss_smmu_init(mdss_res, &pdev->dev);
+ if (rc)
+ pr_err("mdss smmu init failed\n");
+
+ __mdp3_set_supported_formats();
+
+ mdp3_res->mdss_util->mdp_probe_done = true;
+ pr_debug("%s: END\n", __func__);
+
+probe_done:
+ if (IS_ERR_VALUE(rc))
+ kfree(mdp3_res->mdp3_hw.irq_info);
+get_util_fail:
+ if (IS_ERR_VALUE(rc)) {
+ mdp3_res_deinit();
+
+ if (mdp3_res->mdp_base)
+ devm_iounmap(&pdev->dev, mdp3_res->mdp_base);
+
+ devm_kfree(&pdev->dev, mdp3_res);
+ mdp3_res = NULL;
+
+ if (mdss_res) {
+ devm_kfree(&pdev->dev, mdss_res);
+ mdss_res = NULL;
+ }
+ }
+
+ return rc;
+}
+
+int mdp3_panel_get_boot_cfg(void)
+{
+ int rc;
+
+ if (!mdp3_res || !mdp3_res->pan_cfg.init_done)
+ rc = -EPROBE_DEFER;
+ else if (mdp3_res->pan_cfg.lk_cfg)
+ rc = 1;
+ else
+ rc = 0;
+ return rc;
+}
+
+static int mdp3_suspend_sub(void)
+{
+ mdp3_footswitch_ctrl(0);
+ return 0;
+}
+
+static int mdp3_resume_sub(void)
+{
+ mdp3_footswitch_ctrl(1);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mdp3_pm_suspend(struct device *dev)
+{
+ dev_dbg(dev, "Display pm suspend\n");
+ MDSS_XLOG(XLOG_FUNC_ENTRY);
+ return mdp3_suspend_sub();
+}
+
+static int mdp3_pm_resume(struct device *dev)
+{
+ dev_dbg(dev, "Display pm resume\n");
+
+ /*
+ * It is possible that the runtime status of the mdp device may
+ * have been active when the system was suspended. Reset the runtime
+ * status to suspended state after a complete system resume.
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ MDSS_XLOG(XLOG_FUNC_ENTRY);
+ return mdp3_resume_sub();
+}
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdp3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ pr_debug("Display suspend\n");
+
+ MDSS_XLOG(XLOG_FUNC_ENTRY);
+ return mdp3_suspend_sub();
+}
+
+static int mdp3_resume(struct platform_device *pdev)
+{
+ pr_debug("Display resume\n");
+
+ MDSS_XLOG(XLOG_FUNC_ENTRY);
+ return mdp3_resume_sub();
+}
+#else
+#define mdp3_suspend NULL
+#define mdp3_resume NULL
+#endif
+
+
+#ifdef CONFIG_PM_RUNTIME
+static int mdp3_runtime_resume(struct device *dev)
+{
+ bool device_on = true;
+
+ dev_dbg(dev, "Display pm runtime resume, active overlay cnt=%d\n",
+ atomic_read(&mdp3_res->active_intf_cnt));
+
+ /* do not resume panels when coming out of idle power collapse */
+ if (!mdp3_res->idle_pc)
+ device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
+
+ MDSS_XLOG(XLOG_FUNC_ENTRY);
+ mdp3_footswitch_ctrl(1);
+
+ return 0;
+}
+
+static int mdp3_runtime_idle(struct device *dev)
+{
+ dev_dbg(dev, "Display pm runtime idle\n");
+
+ return 0;
+}
+
+static int mdp3_runtime_suspend(struct device *dev)
+{
+ bool device_on = false;
+
+ dev_dbg(dev, "Display pm runtime suspend, active overlay cnt=%d\n",
+ atomic_read(&mdp3_res->active_intf_cnt));
+
+ if (mdp3_res->clk_ena) {
+ pr_debug("Clk turned on...MDP suspend failed\n");
+ return -EBUSY;
+ }
+
+ MDSS_XLOG(XLOG_FUNC_ENTRY);
+ mdp3_footswitch_ctrl(0);
+
+ /* do not suspend panels when going in to idle power collapse */
+ if (!mdp3_res->idle_pc)
+ device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mdp3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mdp3_pm_suspend,
+ mdp3_pm_resume)
+ SET_RUNTIME_PM_OPS(mdp3_runtime_suspend,
+ mdp3_runtime_resume,
+ mdp3_runtime_idle)
+};
+
+
+static int mdp3_remove(struct platform_device *pdev)
+{
+ struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+
+ if (!mdata)
+ return -ENODEV;
+ pm_runtime_disable(&pdev->dev);
+ mdp3_bus_scale_unregister();
+ mdp3_clk_remove();
+ mdp3_debug_deinit(pdev);
+ return 0;
+}
+
+static const struct of_device_id mdp3_dt_match[] = {
+ { .compatible = "qcom,mdss_mdp3",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdp3_dt_match);
+EXPORT_COMPAT("qcom,mdss_mdp3");
+
+static struct platform_driver mdp3_driver = {
+ .probe = mdp3_probe,
+ .remove = mdp3_remove,
+ .suspend = mdp3_suspend,
+ .resume = mdp3_resume,
+ .shutdown = NULL,
+ .driver = {
+ .name = "mdp3",
+ .of_match_table = mdp3_dt_match,
+ .pm = &mdp3_pm_ops,
+ },
+};
+
+static int __init mdp3_driver_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mdp3_driver);
+ if (ret) {
+ pr_err("register mdp3 driver failed!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+module_param_string(panel, mdss_mdp3_panel, MDSS_MAX_PANEL_LEN, 0600);
+/*
+ * panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>
+ * where <lk_cfg> is "1"-lk/gcdb config or "0" non-lk/non-gcdb
+ * config; <pan_intf> is dsi:0
+ * <pan_intf_cfg> is panel interface specific string
+ * Ex: This string is panel's device node name from DT
+ * for DSI interface
+ */
+MODULE_PARM_DESC(panel, "lk supplied panel selection string");
+module_init(mdp3_driver_init);
diff --git a/drivers/video/fbdev/msm/mdp3.h b/drivers/video/fbdev/msm/mdp3.h
new file mode 100644
index 0000000..6fb39a7
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3.h
@@ -0,0 +1,292 @@
+/* Copyright (c) 2013-2014, 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDP3_H
+#define MDP3_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include "mdss_dsi_clk.h"
+#include "mdp3_dma.h"
+#include "mdss_fb.h"
+#include "mdss.h"
+
+#define MDP_VSYNC_CLK_RATE 19200000
+#define MDP_CORE_CLK_RATE_SVS 160000000
+#define MDP_CORE_CLK_RATE_SUPER_SVS 200000000
+#define MDP_CORE_CLK_RATE_MAX 307200000
+
+#define CLK_FUDGE_NUM 12
+#define CLK_FUDGE_DEN 10
+
+/* PPP cant work at SVS for panel res above qHD */
+#define SVS_MAX_PIXEL (540 * 960)
+
+#define KOFF_TIMEOUT_MS 84
+#define KOFF_TIMEOUT msecs_to_jiffies(KOFF_TIMEOUT_MS)
+#define WAIT_DMA_TIMEOUT msecs_to_jiffies(84)
+
+/*
+ * MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3
+ * so using them together for MDP_SMART_BLIT.
+ */
+#define MDP_SMART_BLIT 0xC0000000
+
+#define BITS_PER_BYTE 8
+#define MDP_IMGTYPE_LIMIT1 0x100
+#define BITS_TO_BYTES(x) DIV_ROUND_UP(x, BITS_PER_BYTE)
+
+enum {
+ MDP3_CLK_AHB,
+ MDP3_CLK_AXI,
+ MDP3_CLK_MDP_SRC,
+ MDP3_CLK_MDP_CORE,
+ MDP3_CLK_VSYNC,
+ MDP3_CLK_DSI,
+ MDP3_MAX_CLK
+};
+
+enum {
+ MDP3_BUS_HANDLE,
+ MDP3_BUS_HANDLE_MAX,
+};
+
+enum {
+ MDP3_IOMMU_DOMAIN_UNSECURE,
+ MDP3_IOMMU_DOMAIN_SECURE,
+ MDP3_IOMMU_DOMAIN_MAX,
+};
+
+enum {
+ MDP3_IOMMU_CTX_MDP_0,
+ MDP3_IOMMU_CTX_MDP_1,
+ MDP3_IOMMU_CTX_MAX
+};
+
+/* Keep DSI entry in sync with mdss
+ * which is being used by DSI 6G
+ */
+enum {
+ MDP3_CLIENT_DMA_P,
+ MDP3_CLIENT_DSI = 1,
+ MDP3_CLIENT_PPP,
+ MDP3_CLIENT_IOMMU,
+ MDP3_CLIENT_MAX,
+};
+
+enum {
+ DI_PARTITION_NUM = 0,
+ DI_DOMAIN_NUM = 1,
+ DI_MAX,
+};
+
+struct mdp3_bus_handle_map {
+ struct msm_bus_vectors *bus_vector;
+ struct msm_bus_paths *usecases;
+ struct msm_bus_scale_pdata *scale_pdata;
+ int current_bus_idx;
+ int ref_cnt;
+ u64 restore_ab[MDP3_CLIENT_MAX];
+ u64 restore_ib[MDP3_CLIENT_MAX];
+ u64 ab[MDP3_CLIENT_MAX];
+ u64 ib[MDP3_CLIENT_MAX];
+ u32 handle;
+};
+
+struct mdp3_iommu_domain_map {
+ u32 domain_type;
+ char *client_name;
+ int npartitions;
+ int domain_idx;
+ struct iommu_domain *domain;
+};
+
+struct mdp3_iommu_ctx_map {
+ u32 ctx_type;
+ struct mdp3_iommu_domain_map *domain;
+ char *ctx_name;
+ struct device *ctx;
+ int attached;
+};
+
+struct mdp3_iommu_meta {
+ struct rb_node node;
+ struct ion_handle *handle;
+ struct rb_root iommu_maps;
+ struct kref ref;
+ struct sg_table *table;
+ struct dma_buf *dbuf;
+ int mapped_size;
+ unsigned long size;
+ dma_addr_t iova_addr;
+ unsigned long flags;
+};
+
+#define MDP3_MAX_INTR 28
+
+struct mdp3_intr_cb {
+ void (*cb)(int type, void *);
+ void *data;
+};
+
+#define SMART_BLIT_RGB_EN 1
+#define SMART_BLIT_YUV_EN 2
+
+struct mdp3_hw_resource {
+ struct platform_device *pdev;
+ u32 mdp_rev;
+
+ struct mutex res_mutex;
+
+ struct clk *clocks[MDP3_MAX_CLK];
+ int clock_ref_count[MDP3_MAX_CLK];
+ unsigned long dma_core_clk_request;
+ unsigned long ppp_core_clk_request;
+ struct mdss_hw mdp3_hw;
+ struct mdss_util_intf *mdss_util;
+
+ char __iomem *mdp_base;
+ size_t mdp_reg_size;
+
+ char __iomem *vbif_base;
+ size_t vbif_reg_size;
+
+ struct mdp3_bus_handle_map *bus_handle;
+
+ struct ion_client *ion_client;
+ struct mdp3_iommu_domain_map *domains;
+ struct mdp3_iommu_ctx_map *iommu_contexts;
+ unsigned int iommu_ref_cnt;
+ bool allow_iommu_update;
+ struct ion_handle *ion_handle;
+ struct mutex iommu_lock;
+ struct mutex fs_idle_pc_lock;
+
+ struct mdp3_dma dma[MDP3_DMA_MAX];
+ struct mdp3_intf intf[MDP3_DMA_OUTPUT_SEL_MAX];
+
+ struct rb_root iommu_root;
+ spinlock_t irq_lock;
+ u32 irq_ref_count[MDP3_MAX_INTR];
+ u32 irq_mask;
+ int irq_ref_cnt;
+ struct mdp3_intr_cb callbacks[MDP3_MAX_INTR];
+ u32 underrun_cnt;
+
+ int irq_registered;
+
+ unsigned long splash_mem_addr;
+ u32 splash_mem_size;
+ struct mdss_panel_cfg pan_cfg;
+
+ int clk_prepare_count;
+ int cont_splash_en;
+
+ bool batfet_required;
+ struct regulator *batfet;
+ struct regulator *vdd_cx;
+ struct regulator *fs;
+ bool fs_ena;
+ int clk_ena;
+ bool idle_pc_enabled;
+ bool idle_pc;
+ atomic_t active_intf_cnt;
+ u8 smart_blit_en;
+ bool solid_fill_vote_en;
+ struct list_head reg_bus_clist;
+ struct mutex reg_bus_lock;
+
+ u32 max_bw;
+
+ u8 ppp_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
+ u8 dma_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
+};
+
+struct mdp3_img_data {
+ dma_addr_t addr;
+ unsigned long len;
+ u32 offset;
+ u32 flags;
+ u32 padding;
+ int p_need;
+ struct ion_handle *srcp_ihdl;
+ u32 dir;
+ u32 domain;
+ bool mapped;
+ bool skip_detach;
+ struct fd srcp_f;
+ struct dma_buf *srcp_dma_buf;
+ struct dma_buf_attachment *srcp_attachment;
+ struct sg_table *srcp_table;
+ struct sg_table *tab_clone;
+};
+
+extern struct mdp3_hw_resource *mdp3_res;
+
+struct mdp3_dma *mdp3_get_dma_pipe(int capability);
+struct mdp3_intf *mdp3_get_display_intf(int type);
+void mdp3_irq_enable(int type);
+void mdp3_irq_disable(int type);
+void mdp3_irq_disable_nosync(int type);
+int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb);
+void mdp3_irq_register(void);
+void mdp3_irq_deregister(void);
+int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate, int client);
+int mdp3_clk_enable(int enable, int dsi_clk);
+int mdp3_res_update(int enable, int dsi_clk, int client);
+int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota);
+int mdp3_put_img(struct mdp3_img_data *data, int client);
+int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data,
+ int client);
+int mdp3_iommu_enable(int client);
+int mdp3_iommu_disable(int client);
+int mdp3_iommu_is_attached(void);
+void mdp3_free(struct msm_fb_data_type *mfd);
+int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd);
+void mdp3_release_splash_memory(struct msm_fb_data_type *mfd);
+int mdp3_create_sysfs_link(struct device *dev);
+int mdp3_get_cont_spash_en(void);
+int mdp3_get_mdp_dsi_clk(void);
+int mdp3_put_mdp_dsi_clk(void);
+
+int mdp3_misr_set(struct mdp_misr *misr_req);
+int mdp3_misr_get(struct mdp_misr *misr_resp);
+void mdp3_enable_regulator(int enable);
+void mdp3_check_dsi_ctrl_status(struct work_struct *work,
+ uint32_t interval);
+int mdp3_dynamic_clock_gating_ctrl(int enable);
+int mdp3_footswitch_ctrl(int enable);
+int mdp3_qos_remapper_setup(struct mdss_panel_data *panel);
+int mdp3_splash_done(struct mdss_panel_info *panel_info);
+int mdp3_autorefresh_disable(struct mdss_panel_info *panel_info);
+u64 mdp3_clk_round_off(u64 clk_rate);
+
+void mdp3_calc_dma_res(struct mdss_panel_info *panel_info, u64 *clk_rate,
+ u64 *ab, u64 *ib, uint32_t bpp);
+void mdp3_clear_irq(u32 interrupt_mask);
+int mdp3_enable_panic_ctrl(void);
+
+int mdp3_layer_pre_commit(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *commit);
+int mdp3_layer_atomic_validate(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *commit);
+
+#define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr)
+#define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr)
+#define VBIF_REG_WRITE(off, val) writel_relaxed(val, mdp3_res->vbif_base + off)
+#define VBIF_REG_READ(off) readl_relaxed(mdp3_res->vbif_base + off)
+
+#endif /* MDP3_H */
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
new file mode 100644
index 0000000..17dadf4
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -0,0 +1,3018 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/pm_runtime.h>
+#include <linux/sw_sync.h>
+#include <linux/iommu.h>
+
+#include "mdp3_ctrl.h"
+#include "mdp3.h"
+#include "mdp3_ppp.h"
+#include "mdss_smmu.h"
+
+#define VSYNC_EXPIRE_TICK 4
+
+static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd);
+static int mdp3_overlay_unset(struct msm_fb_data_type *mfd, int ndx);
+static int mdp3_histogram_stop(struct mdp3_session_data *session,
+ u32 block);
+static int mdp3_ctrl_clk_enable(struct msm_fb_data_type *mfd, int enable);
+static int mdp3_ctrl_vsync_enable(struct msm_fb_data_type *mfd, int enable);
+static int mdp3_ctrl_get_intf_type(struct msm_fb_data_type *mfd);
+static int mdp3_ctrl_lut_read(struct msm_fb_data_type *mfd,
+ struct mdp_rgb_lut_data *cfg);
+static int mdp3_ctrl_lut_config(struct msm_fb_data_type *mfd,
+ struct mdp_rgb_lut_data *cfg);
+static void mdp3_ctrl_pp_resume(struct msm_fb_data_type *mfd);
+
+u32 mdp_lut_inverse16[MDP_LUT_SIZE] = {
+0, 65536, 32768, 21845, 16384, 13107, 10923, 9362, 8192, 7282, 6554, 5958,
+5461, 5041, 4681, 4369, 4096, 3855, 3641, 3449, 3277, 3121, 2979, 2849, 2731,
+2621, 2521, 2427, 2341, 2260, 2185, 2114, 2048, 1986, 1928, 1872, 1820, 1771,
+1725, 1680, 1638, 1598, 1560, 1524, 1489, 1456, 1425, 1394, 1365, 1337, 1311,
+1285, 1260, 1237, 1214, 1192, 1170, 1150, 1130, 1111, 1092, 1074, 1057, 1040,
+1024, 1008, 993, 978, 964, 950, 936, 923, 910, 898, 886, 874, 862, 851, 840,
+830, 819, 809, 799, 790, 780, 771, 762, 753, 745, 736, 728, 720, 712, 705, 697,
+690, 683, 676, 669, 662, 655, 649, 643, 636, 630, 624, 618, 612, 607, 601, 596,
+590, 585, 580, 575, 570, 565, 560, 555, 551, 546, 542, 537, 533, 529, 524, 520,
+516, 512, 508, 504, 500, 496, 493, 489, 485, 482, 478, 475, 471, 468, 465, 462,
+458, 455, 452, 449, 446, 443, 440, 437, 434, 431, 428, 426, 423, 420, 417, 415,
+412, 410, 407, 405, 402, 400, 397, 395, 392, 390, 388, 386, 383, 381, 379, 377,
+374, 372, 370, 368, 366, 364, 362, 360, 358, 356, 354, 352, 350, 349, 347, 345,
+343, 341, 340, 338, 336, 334, 333, 331, 329, 328, 326, 324, 323, 321, 320, 318,
+317, 315, 314, 312, 311, 309, 308, 306, 305, 303, 302, 301, 299, 298, 297, 295,
+294, 293, 291, 290, 289, 287, 286, 285, 284, 282, 281, 280, 279, 278, 277, 275,
+274, 273, 272, 271, 270, 269, 267, 266, 265, 264, 263, 262, 261, 260, 259, 258,
+257};
+
+static void mdp3_bufq_init(struct mdp3_buffer_queue *bufq)
+{
+ bufq->count = 0;
+ bufq->push_idx = 0;
+ bufq->pop_idx = 0;
+}
+
+void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq)
+{
+ int count = bufq->count;
+
+ if (!count)
+ return;
+
+ while (count-- && (bufq->pop_idx >= 0)) {
+ struct mdp3_img_data *data = &bufq->img_data[bufq->pop_idx];
+
+ bufq->pop_idx = (bufq->pop_idx + 1) % MDP3_MAX_BUF_QUEUE;
+ mdp3_put_img(data, MDP3_CLIENT_DMA_P);
+ }
+ bufq->count = 0;
+ bufq->push_idx = 0;
+ bufq->pop_idx = 0;
+}
+
+int mdp3_bufq_push(struct mdp3_buffer_queue *bufq,
+ struct mdp3_img_data *data)
+{
+ if (bufq->count >= MDP3_MAX_BUF_QUEUE) {
+ pr_err("bufq full\n");
+ return -EPERM;
+ }
+
+ bufq->img_data[bufq->push_idx] = *data;
+ bufq->push_idx = (bufq->push_idx + 1) % MDP3_MAX_BUF_QUEUE;
+ bufq->count++;
+ return 0;
+}
+
+static struct mdp3_img_data *mdp3_bufq_pop(struct mdp3_buffer_queue *bufq)
+{
+ struct mdp3_img_data *data;
+
+ if (bufq->count == 0)
+ return NULL;
+
+ data = &bufq->img_data[bufq->pop_idx];
+ bufq->count--;
+ bufq->pop_idx = (bufq->pop_idx + 1) % MDP3_MAX_BUF_QUEUE;
+ return data;
+}
+
+static int mdp3_bufq_count(struct mdp3_buffer_queue *bufq)
+{
+ return bufq->count;
+}
+
+void mdp3_ctrl_notifier_register(struct mdp3_session_data *ses,
+ struct notifier_block *notifier)
+{
+ blocking_notifier_chain_register(&ses->notifier_head, notifier);
+}
+
+void mdp3_ctrl_notifier_unregister(struct mdp3_session_data *ses,
+ struct notifier_block *notifier)
+{
+ blocking_notifier_chain_unregister(&ses->notifier_head, notifier);
+}
+
+int mdp3_ctrl_notify(struct mdp3_session_data *ses, int event)
+{
+ return blocking_notifier_call_chain(&ses->notifier_head, event, ses);
+}
+
+static void mdp3_dispatch_dma_done(struct kthread_work *work)
+{
+ struct mdp3_session_data *session;
+ int cnt = 0;
+
+ pr_debug("%s\n", __func__);
+ session = container_of(work, struct mdp3_session_data,
+ dma_done_work);
+ if (!session)
+ return;
+
+ cnt = atomic_read(&session->dma_done_cnt);
+ MDSS_XLOG(cnt);
+ while (cnt > 0) {
+ mdp3_ctrl_notify(session, MDP_NOTIFY_FRAME_DONE);
+ atomic_dec(&session->dma_done_cnt);
+ cnt--;
+ }
+}
+
+static void mdp3_dispatch_clk_off(struct work_struct *work)
+{
+ struct mdp3_session_data *session;
+ int rc;
+ bool dmap_busy;
+ int retry_count = 2;
+
+ pr_debug("%s\n", __func__);
+ MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__);
+ session = container_of(work, struct mdp3_session_data,
+ clk_off_work);
+ if (!session)
+ return;
+
+ mutex_lock(&session->lock);
+ if (session->vsync_enabled ||
+ atomic_read(&session->vsync_countdown) > 0) {
+ mutex_unlock(&session->lock);
+ pr_debug("%s: Ignoring clk shut down\n", __func__);
+ MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
+ return;
+ }
+
+ if (session->intf->active) {
+retry_dma_done:
+ rc = wait_for_completion_timeout(&session->dma_completion,
+ WAIT_DMA_TIMEOUT);
+ if (rc <= 0) {
+ struct mdss_panel_data *panel;
+
+ panel = session->panel;
+ pr_debug("cmd kickoff timed out (%d)\n", rc);
+ dmap_busy = session->dma->busy();
+ if (dmap_busy) {
+ if (--retry_count) {
+ pr_err("dmap is busy, retry %d\n",
+ retry_count);
+ goto retry_dma_done;
+ }
+ pr_err("dmap is still busy, bug_on\n");
+ WARN_ON(1);
+ } else {
+ pr_debug("dmap is not busy, continue\n");
+ }
+ }
+ }
+ mdp3_ctrl_vsync_enable(session->mfd, 0);
+ mdp3_ctrl_clk_enable(session->mfd, 0);
+ MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
+ mutex_unlock(&session->lock);
+}
+
+static void mdp3_vsync_retire_handle_vsync(void *arg)
+{
+ struct mdp3_session_data *mdp3_session;
+
+ mdp3_session = (struct mdp3_session_data *)arg;
+
+ if (!mdp3_session) {
+ pr_warn("Invalid handle for vsync\n");
+ return;
+ }
+
+ schedule_work(&mdp3_session->retire_work);
+}
+
+static void mdp3_vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
+{
+ struct mdp3_session_data *mdp3_session;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+ mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+ if (mdp3_session->retire_cnt > 0) {
+ sw_sync_timeline_inc(mdp3_session->vsync_timeline, val);
+ mdp3_session->retire_cnt -= min(val, mdp3_session->retire_cnt);
+ }
+ mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+}
+
+static void mdp3_vsync_retire_work_handler(struct work_struct *work)
+{
+ struct mdp3_session_data *mdp3_session =
+ container_of(work, struct mdp3_session_data, retire_work);
+
+ if (!mdp3_session)
+ return;
+
+ mdp3_vsync_retire_signal(mdp3_session->mfd, 1);
+}
+
+void mdp3_hist_intr_notify(struct mdp3_dma *dma)
+{
+ dma->hist_events++;
+ sysfs_notify_dirent(dma->hist_event_sd);
+ pr_debug("%s:: hist_events = %u\n", __func__, dma->hist_events);
+}
+
+void vsync_notify_handler(void *arg)
+{
+ struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
+
+ session->vsync_time = ktime_get();
+ MDSS_XLOG(ktime_to_ms(session->vsync_time));
+ sysfs_notify_dirent(session->vsync_event_sd);
+}
+
+void dma_done_notify_handler(void *arg)
+{
+ struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
+
+ atomic_inc(&session->dma_done_cnt);
+ kthread_queue_work(&session->worker, &session->dma_done_work);
+ complete_all(&session->dma_completion);
+}
+
+void vsync_count_down(void *arg)
+{
+ struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
+
+ /* We are counting down to turn off clocks */
+ if (atomic_read(&session->vsync_countdown) > 0)
+ atomic_dec(&session->vsync_countdown);
+ if (atomic_read(&session->vsync_countdown) == 0)
+ schedule_work(&session->clk_off_work);
+}
+
+void mdp3_ctrl_reset_countdown(struct mdp3_session_data *session,
+ struct msm_fb_data_type *mfd)
+{
+ if (mdp3_ctrl_get_intf_type(mfd) == MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+ atomic_set(&session->vsync_countdown, VSYNC_EXPIRE_TICK);
+}
+
+static int mdp3_ctrl_vsync_enable(struct msm_fb_data_type *mfd, int enable)
+{
+ struct mdp3_session_data *mdp3_session;
+ struct mdp3_notification vsync_client;
+ struct mdp3_notification *arg = NULL;
+ bool mod_vsync_timer = false;
+
+ pr_debug("mdp3_ctrl_vsync_enable =%d\n", enable);
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+ !mdp3_session->intf)
+ return -ENODEV;
+
+ if (!mdp3_session->status) {
+ pr_debug("fb%d is not on yet", mfd->index);
+ return -EINVAL;
+ }
+ if (enable) {
+ vsync_client.handler = vsync_notify_handler;
+ vsync_client.arg = mdp3_session;
+ arg = &vsync_client;
+ } else if (atomic_read(&mdp3_session->vsync_countdown) > 0) {
+ /*
+ * Now that vsync is no longer needed we will
+ * shutdown dsi clocks as soon as cnt down == 0
+ * for cmd mode panels
+ */
+ vsync_client.handler = vsync_count_down;
+ vsync_client.arg = mdp3_session;
+ arg = &vsync_client;
+ enable = 1;
+ }
+
+ if (enable) {
+ if (mdp3_session->status == 1 &&
+ (mdp3_session->vsync_before_commit ||
+ !mdp3_session->intf->active)) {
+ mod_vsync_timer = true;
+ } else if (!mdp3_session->clk_on) {
+ /* Enable clocks before enabling the vsync interrupt */
+ mdp3_ctrl_reset_countdown(mdp3_session, mfd);
+ mdp3_ctrl_clk_enable(mfd, 1);
+ }
+ }
+
+ mdp3_clk_enable(1, 0);
+ mdp3_session->dma->vsync_enable(mdp3_session->dma, arg);
+ mdp3_clk_enable(0, 0);
+
+ /*
+ * Need to fake vsync whenever dsi interface is not
+ * active or when dsi clocks are currently off
+ */
+ if (mod_vsync_timer) {
+ mod_timer(&mdp3_session->vsync_timer,
+ jiffies + msecs_to_jiffies(mdp3_session->vsync_period));
+ } else if (!enable) {
+ del_timer(&mdp3_session->vsync_timer);
+ }
+
+ return 0;
+}
+
+void mdp3_vsync_timer_func(unsigned long arg)
+{
+ struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
+
+ if (session->status == 1 && (session->vsync_before_commit ||
+ !session->intf->active)) {
+ pr_debug("mdp3_vsync_timer_func trigger\n");
+ vsync_notify_handler(session);
+ mod_timer(&session->vsync_timer,
+ jiffies + msecs_to_jiffies(session->vsync_period));
+ }
+}
+
+static int mdp3_ctrl_async_blit_req(struct msm_fb_data_type *mfd,
+ void __user *p)
+{
+ struct mdp_async_blit_req_list req_list_header;
+ int rc, count;
+ void __user *p_req;
+
+ if (copy_from_user(&req_list_header, p, sizeof(req_list_header)))
+ return -EFAULT;
+ p_req = p + sizeof(req_list_header);
+ count = req_list_header.count;
+ if (count < 0 || count >= MAX_BLIT_REQ)
+ return -EINVAL;
+ rc = mdp3_ppp_parse_req(p_req, &req_list_header, 1);
+ if (!rc)
+ rc = copy_to_user(p, &req_list_header, sizeof(req_list_header));
+ return rc;
+}
+
+static int mdp3_ctrl_blit_req(struct msm_fb_data_type *mfd, void __user *p)
+{
+ struct mdp_async_blit_req_list req_list_header;
+ int rc, count;
+ void __user *p_req;
+
+ if (copy_from_user(&(req_list_header.count), p,
+ sizeof(struct mdp_blit_req_list)))
+ return -EFAULT;
+ p_req = p + sizeof(struct mdp_blit_req_list);
+ count = req_list_header.count;
+ if (count < 0 || count >= MAX_BLIT_REQ)
+ return -EINVAL;
+ req_list_header.sync.acq_fen_fd_cnt = 0;
+ rc = mdp3_ppp_parse_req(p_req, &req_list_header, 0);
+ return rc;
+}
+
+static ssize_t mdp3_bl_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdp3_session_data *mdp3_session = NULL;
+ int ret;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EAGAIN;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp3_session->bl_events);
+ return ret;
+}
+
+static ssize_t mdp3_hist_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdp3_session_data *mdp3_session = NULL;
+ struct mdp3_dma *dma = NULL;
+ int ret;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EAGAIN;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ dma = (struct mdp3_dma *)mdp3_session->dma;
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", dma->hist_events);
+ return ret;
+}
+
+static ssize_t mdp3_vsync_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdp3_session_data *mdp3_session = NULL;
+ u64 vsync_ticks;
+ int rc;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EAGAIN;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+ vsync_ticks = ktime_to_ns(mdp3_session->vsync_time);
+
+ pr_debug("fb%d vsync=%llu\n", mfd->index, vsync_ticks);
+ rc = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks);
+ return rc;
+}
+
+static ssize_t mdp3_packpattern_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdp3_session_data *mdp3_session = NULL;
+ int rc;
+ u32 pattern = 0;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EAGAIN;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+ pattern = mdp3_session->dma->output_config.pack_pattern;
+
+ /* If pattern was found to be 0 then get pattern for fb imagetype */
+ if (!pattern)
+ pattern = mdp3_ctrl_get_pack_pattern(mfd->fb_imgType);
+
+ pr_debug("fb%d pack_pattern c= %d.", mfd->index, pattern);
+ rc = scnprintf(buf, PAGE_SIZE, "packpattern=%d\n", pattern);
+ return rc;
+}
+
+static ssize_t mdp3_dyn_pu_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdp3_session_data *mdp3_session = NULL;
+ int ret, state;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EAGAIN;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ state = (mdp3_session->dyn_pu_state >= 0) ?
+ mdp3_session->dyn_pu_state : -1;
+ ret = scnprintf(buf, PAGE_SIZE, "%d", state);
+ return ret;
+}
+
+static ssize_t mdp3_dyn_pu_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdp3_session_data *mdp3_session = NULL;
+ int ret, dyn_pu;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EAGAIN;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ ret = kstrtoint(buf, 10, &dyn_pu);
+ if (ret) {
+ pr_err("Invalid input for partial update: ret = %d\n", ret);
+ return ret;
+ }
+
+ mdp3_session->dyn_pu_state = dyn_pu;
+ sysfs_notify(&dev->kobj, NULL, "dyn_pu");
+ return count;
+}
+
+static DEVICE_ATTR(hist_event, 0444, mdp3_hist_show_event, NULL);
+static DEVICE_ATTR(bl_event, 0444, mdp3_bl_show_event, NULL);
+static DEVICE_ATTR(vsync_event, 0444, mdp3_vsync_show_event, NULL);
+static DEVICE_ATTR(packpattern, 0444, mdp3_packpattern_show, NULL);
+static DEVICE_ATTR(dyn_pu, 0664, mdp3_dyn_pu_show,
+ mdp3_dyn_pu_store);
+
+static struct attribute *generic_attrs[] = {
+ &dev_attr_packpattern.attr,
+ &dev_attr_dyn_pu.attr,
+ &dev_attr_hist_event.attr,
+ &dev_attr_bl_event.attr,
+ NULL,
+};
+
+static struct attribute *vsync_fs_attrs[] = {
+ &dev_attr_vsync_event.attr,
+ NULL,
+};
+
+static struct attribute_group vsync_fs_attr_group = {
+ .attrs = vsync_fs_attrs,
+};
+
+static struct attribute_group generic_attr_group = {
+ .attrs = generic_attrs,
+};
+
+static int mdp3_ctrl_clk_enable(struct msm_fb_data_type *mfd, int enable)
+{
+ struct mdp3_session_data *session;
+ struct mdss_panel_data *panel;
+ struct dsi_panel_clk_ctrl clk_ctrl;
+ int rc = 0;
+
+ pr_debug("mdp3_ctrl_clk_enable %d\n", enable);
+
+ session = mfd->mdp.private1;
+ panel = session->panel;
+
+ if (!panel->event_handler)
+ return 0;
+
+ if ((enable && session->clk_on == 0) ||
+ (!enable && session->clk_on == 1)) {
+ clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+ clk_ctrl.state = enable;
+ rc = panel->event_handler(panel,
+ MDSS_EVENT_PANEL_CLK_CTRL, (void *)&clk_ctrl);
+ rc |= mdp3_res_update(enable, 1, MDP3_CLIENT_DMA_P);
+ } else {
+ pr_debug("enable = %d, clk_on=%d\n", enable, session->clk_on);
+ }
+
+ session->clk_on = enable;
+ return rc;
+}
+
+static int mdp3_ctrl_res_req_bus(struct msm_fb_data_type *mfd, int status)
+{
+ int rc = 0;
+
+ if (status) {
+ u64 ab = 0;
+ u64 ib = 0;
+
+ mdp3_calc_dma_res(mfd->panel_info, NULL, &ab, &ib,
+ ppp_bpp(mfd->fb_imgType));
+ rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
+ } else {
+ rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, 0, 0);
+ }
+ return rc;
+}
+
+static int mdp3_ctrl_res_req_clk(struct msm_fb_data_type *mfd, int status)
+{
+ int rc = 0;
+
+ if (status) {
+ u64 mdp_clk_rate = 0;
+
+ mdp3_calc_dma_res(mfd->panel_info, &mdp_clk_rate,
+ NULL, NULL, 0);
+
+ mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, mdp_clk_rate,
+ MDP3_CLIENT_DMA_P);
+ mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE,
+ MDP3_CLIENT_DMA_P);
+
+ rc = mdp3_res_update(1, 1, MDP3_CLIENT_DMA_P);
+ if (rc) {
+ pr_err("mdp3 clk enable fail\n");
+ return rc;
+ }
+ } else {
+ rc = mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P);
+ if (rc)
+ pr_err("mdp3 clk disable fail\n");
+ }
+ return rc;
+}
+
+static int mdp3_ctrl_get_intf_type(struct msm_fb_data_type *mfd)
+{
+ int type;
+
+ switch (mfd->panel.type) {
+ case MIPI_VIDEO_PANEL:
+ type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
+ break;
+ case MIPI_CMD_PANEL:
+ type = MDP3_DMA_OUTPUT_SEL_DSI_CMD;
+ break;
+ case LCDC_PANEL:
+ type = MDP3_DMA_OUTPUT_SEL_LCDC;
+ break;
+ default:
+ type = MDP3_DMA_OUTPUT_SEL_MAX;
+ }
+ return type;
+}
+
+int mdp3_ctrl_get_source_format(u32 imgType)
+{
+ int format;
+
+ switch (imgType) {
+ case MDP_RGB_565:
+ format = MDP3_DMA_IBUF_FORMAT_RGB565;
+ break;
+ case MDP_RGB_888:
+ format = MDP3_DMA_IBUF_FORMAT_RGB888;
+ break;
+ case MDP_ARGB_8888:
+ case MDP_RGBA_8888:
+ format = MDP3_DMA_IBUF_FORMAT_XRGB8888;
+ break;
+ default:
+ format = MDP3_DMA_IBUF_FORMAT_UNDEFINED;
+ }
+ return format;
+}
+
+int mdp3_ctrl_get_pack_pattern(u32 imgType)
+{
+ int packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_RGB;
+
+ if (imgType == MDP_RGBA_8888 || imgType == MDP_RGB_888)
+ packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_BGR;
+ return packPattern;
+}
+
+static int mdp3_ctrl_intf_init(struct msm_fb_data_type *mfd,
+ struct mdp3_intf *intf)
+{
+ int rc = 0;
+ struct mdp3_intf_cfg cfg;
+ struct mdp3_video_intf_cfg *video = &cfg.video;
+ struct mdss_panel_info *p = mfd->panel_info;
+ int h_back_porch = p->lcdc.h_back_porch;
+ int h_front_porch = p->lcdc.h_front_porch;
+ int w = p->xres;
+ int v_back_porch = p->lcdc.v_back_porch;
+ int v_front_porch = p->lcdc.v_front_porch;
+ int h = p->yres;
+ int h_sync_skew = p->lcdc.hsync_skew;
+ int h_pulse_width = p->lcdc.h_pulse_width;
+ int v_pulse_width = p->lcdc.v_pulse_width;
+ int hsync_period = h_front_porch + h_back_porch + w + h_pulse_width;
+ int vsync_period = v_front_porch + v_back_porch + h + v_pulse_width;
+ struct mdp3_session_data *mdp3_session;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ vsync_period *= hsync_period;
+
+ cfg.type = mdp3_ctrl_get_intf_type(mfd);
+ if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+ cfg.type == MDP3_DMA_OUTPUT_SEL_LCDC) {
+ video->hsync_period = hsync_period;
+ video->hsync_pulse_width = h_pulse_width;
+ video->vsync_period = vsync_period;
+ video->vsync_pulse_width = v_pulse_width * hsync_period;
+ video->display_start_x = h_back_porch + h_pulse_width;
+ video->display_end_x = hsync_period - h_front_porch - 1;
+ video->display_start_y =
+ (v_back_porch + v_pulse_width) * hsync_period;
+ video->display_end_y =
+ vsync_period - v_front_porch * hsync_period - 1;
+ video->active_start_x = video->display_start_x;
+ video->active_end_x = video->display_end_x;
+ video->active_h_enable = true;
+ video->active_start_y = video->display_start_y;
+ video->active_end_y = video->display_end_y;
+ video->active_v_enable = true;
+ video->hsync_skew = h_sync_skew;
+ video->hsync_polarity = 1;
+ video->vsync_polarity = 1;
+ video->de_polarity = 1;
+ video->underflow_color = p->lcdc.underflow_clr;
+ } else if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ cfg.dsi_cmd.primary_dsi_cmd_id = 0;
+ cfg.dsi_cmd.secondary_dsi_cmd_id = 1;
+ cfg.dsi_cmd.dsi_cmd_tg_intf_sel = 0;
+ } else
+ return -EINVAL;
+
+ if (!(mdp3_session->in_splash_screen)) {
+ if (intf->config)
+ rc = intf->config(intf, &cfg);
+ else
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int mdp3_ctrl_dma_init(struct msm_fb_data_type *mfd,
+ struct mdp3_dma *dma)
+{
+ int rc;
+ struct mdss_panel_info *panel_info = mfd->panel_info;
+ struct fb_info *fbi = mfd->fbi;
+ struct fb_fix_screeninfo *fix;
+ struct fb_var_screeninfo *var;
+ struct mdp3_dma_output_config outputConfig;
+ struct mdp3_dma_source sourceConfig;
+ int frame_rate = mfd->panel_info->mipi.frame_rate;
+ int vbp, vfp, vspw;
+ int vtotal, vporch;
+ struct mdp3_notification dma_done_callback;
+ struct mdp3_tear_check te;
+ struct mdp3_session_data *mdp3_session;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+ vbp = panel_info->lcdc.v_back_porch;
+ vfp = panel_info->lcdc.v_front_porch;
+ vspw = panel_info->lcdc.v_pulse_width;
+ vporch = vbp + vfp + vspw;
+ vtotal = vporch + panel_info->yres;
+
+ fix = &fbi->fix;
+ var = &fbi->var;
+
+ sourceConfig.width = panel_info->xres;
+ sourceConfig.height = panel_info->yres;
+ sourceConfig.x = 0;
+ sourceConfig.y = 0;
+ sourceConfig.buf = mfd->iova;
+ sourceConfig.vporch = vporch;
+ sourceConfig.vsync_count =
+ MDP_VSYNC_CLK_RATE / (frame_rate * vtotal);
+
+ outputConfig.dither_en = 0;
+ outputConfig.out_sel = mdp3_ctrl_get_intf_type(mfd);
+ outputConfig.bit_mask_polarity = 0;
+ outputConfig.color_components_flip = 0;
+ outputConfig.pack_align = MDP3_DMA_OUTPUT_PACK_ALIGN_LSB;
+ outputConfig.color_comp_out_bits = (MDP3_DMA_OUTPUT_COMP_BITS_8 << 4) |
+ (MDP3_DMA_OUTPUT_COMP_BITS_8 << 2)|
+ MDP3_DMA_OUTPUT_COMP_BITS_8;
+
+ if (dma->update_src_cfg) {
+ /* configuration has been updated through PREPARE call */
+ sourceConfig.format = dma->source_config.format;
+ sourceConfig.stride = dma->source_config.stride;
+ outputConfig.pack_pattern = dma->output_config.pack_pattern;
+ } else {
+ sourceConfig.format =
+ mdp3_ctrl_get_source_format(mfd->fb_imgType);
+ outputConfig.pack_pattern =
+ mdp3_ctrl_get_pack_pattern(mfd->fb_imgType);
+ sourceConfig.stride = fix->line_length;
+ }
+
+ te.frame_rate = panel_info->mipi.frame_rate;
+ te.hw_vsync_mode = panel_info->mipi.hw_vsync_mode;
+ te.tear_check_en = panel_info->te.tear_check_en;
+ te.sync_cfg_height = panel_info->te.sync_cfg_height;
+ te.vsync_init_val = panel_info->te.vsync_init_val;
+ te.sync_threshold_start = panel_info->te.sync_threshold_start;
+ te.sync_threshold_continue = panel_info->te.sync_threshold_continue;
+ te.start_pos = panel_info->te.start_pos;
+ te.rd_ptr_irq = panel_info->te.rd_ptr_irq;
+ te.refx100 = panel_info->te.refx100;
+
+ if (dma->dma_config) {
+ if (!panel_info->partial_update_enabled) {
+ dma->roi.w = sourceConfig.width;
+ dma->roi.h = sourceConfig.height;
+ dma->roi.x = sourceConfig.x;
+ dma->roi.y = sourceConfig.y;
+ }
+ rc = dma->dma_config(dma, &sourceConfig, &outputConfig,
+ mdp3_session->in_splash_screen);
+ } else {
+ pr_err("%s: dma config failed\n", __func__);
+ rc = -EINVAL;
+ }
+
+ if (outputConfig.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ if (dma->dma_sync_config)
+ rc = dma->dma_sync_config(dma,
+ &sourceConfig, &te);
+ else
+ rc = -EINVAL;
+ dma_done_callback.handler = dma_done_notify_handler;
+ dma_done_callback.arg = mfd->mdp.private1;
+ dma->dma_done_notifier(dma, &dma_done_callback);
+ }
+
+ return rc;
+}
+
+static int mdp3_ctrl_on(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ struct mdp3_session_data *mdp3_session;
+ struct mdss_panel_data *panel;
+
+ pr_debug("mdp3_ctrl_on\n");
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+ !mdp3_session->intf) {
+ pr_err("mdp3_ctrl_on no device");
+ return -ENODEV;
+ }
+ mutex_lock(&mdp3_session->lock);
+
+ MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mfd->panel_power_state);
+ panel = mdp3_session->panel;
+ /* make sure DSI host is initialized properly */
+ if (panel) {
+ pr_debug("%s : dsi host init, power state = %d Splash %d\n",
+ __func__, mfd->panel_power_state,
+ mdp3_session->in_splash_screen);
+ if (mdss_fb_is_power_on_lp(mfd) ||
+ mdp3_session->in_splash_screen) {
+ /* Turn on panel so that it can exit low power mode */
+ mdp3_clk_enable(1, 0);
+ rc = panel->event_handler(panel,
+ MDSS_EVENT_LINK_READY, NULL);
+ rc |= panel->event_handler(panel,
+ MDSS_EVENT_UNBLANK, NULL);
+ rc |= panel->event_handler(panel,
+ MDSS_EVENT_PANEL_ON, NULL);
+ if (mdss_fb_is_power_on_ulp(mfd))
+ rc |= mdp3_enable_panic_ctrl();
+ mdp3_clk_enable(0, 0);
+ }
+ }
+
+ if (mdp3_session->status) {
+ pr_debug("fb%d is on already\n", mfd->index);
+ MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__, mfd->panel_power_state);
+ goto end;
+ }
+
+ if (mdp3_session->intf->active) {
+ pr_debug("continuous splash screen, initialized already\n");
+ mdp3_session->status = 1;
+ goto end;
+ }
+
+ /*
+ * Get a reference to the runtime pm device.
+ * If idle pc feature is enabled, it will be released
+ * at end of this routine else, when device is turned off.
+ */
+ pm_runtime_get_sync(&mdp3_res->pdev->dev);
+
+ /* Increment the overlay active count */
+ atomic_inc(&mdp3_res->active_intf_cnt);
+ mdp3_ctrl_notifier_register(mdp3_session,
+ &mdp3_session->mfd->mdp_sync_pt_data.notifier);
+
+ /* request bus bandwidth before DSI DMA traffic */
+ rc = mdp3_ctrl_res_req_bus(mfd, 1);
+ if (rc) {
+ pr_err("fail to request bus resource\n");
+ goto on_error;
+ }
+
+ rc = mdp3_dynamic_clock_gating_ctrl(0);
+ if (rc) {
+ pr_err("fail to disable dynamic clock gating\n");
+ goto on_error;
+ }
+ mdp3_qos_remapper_setup(panel);
+
+ rc = mdp3_ctrl_res_req_clk(mfd, 1);
+ if (rc) {
+ pr_err("fail to request mdp clk resource\n");
+ goto on_error;
+ }
+
+ if (panel->event_handler) {
+ rc = panel->event_handler(panel, MDSS_EVENT_LINK_READY, NULL);
+ rc |= panel->event_handler(panel, MDSS_EVENT_UNBLANK, NULL);
+ rc |= panel->event_handler(panel, MDSS_EVENT_PANEL_ON, NULL);
+ if (panel->panel_info.type == MIPI_CMD_PANEL) {
+ struct dsi_panel_clk_ctrl clk_ctrl;
+
+ clk_ctrl.state = MDSS_DSI_CLK_ON;
+ clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+ rc |= panel->event_handler(panel,
+ MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl);
+ }
+ }
+ if (rc) {
+ pr_err("fail to turn on the panel\n");
+ goto on_error;
+ }
+
+ rc = mdp3_ctrl_dma_init(mfd, mdp3_session->dma);
+ if (rc) {
+ pr_err("dma init failed\n");
+ goto on_error;
+ }
+
+ rc = mdp3_ppp_init();
+ if (rc) {
+ pr_err("ppp init failed\n");
+ goto on_error;
+ }
+
+ rc = mdp3_ctrl_intf_init(mfd, mdp3_session->intf);
+ if (rc) {
+ pr_err("display interface init failed\n");
+ goto on_error;
+ }
+ mdp3_session->clk_on = 1;
+
+ mdp3_session->first_commit = true;
+ if (mfd->panel_info->panel_dead)
+ mdp3_session->esd_recovery = true;
+
+ mdp3_session->status = 1;
+
+ mdp3_ctrl_pp_resume(mfd);
+ MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__, mfd->panel_power_state);
+on_error:
+ if (rc || (mdp3_res->idle_pc_enabled &&
+ (mfd->panel_info->type == MIPI_CMD_PANEL))) {
+ if (rc) {
+ pr_err("Failed to turn on fb%d\n", mfd->index);
+ atomic_dec(&mdp3_res->active_intf_cnt);
+ }
+ pm_runtime_put(&mdp3_res->pdev->dev);
+ }
+end:
+ mutex_unlock(&mdp3_session->lock);
+ return rc;
+}
+
+static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ bool intf_stopped = true;
+ struct mdp3_session_data *mdp3_session;
+ struct mdss_panel_data *panel;
+
+ pr_debug("mdp3_ctrl_off\n");
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+ !mdp3_session->intf) {
+ pr_err("mdp3_ctrl_on no device");
+ return -ENODEV;
+ }
+
+ /*
+ * Keep a reference to the runtime pm until the overlay is turned
+ * off, and then release this last reference at the end. This will
+ * help in distinguishing between idle power collapse versus suspend
+ * power collapse
+ */
+ pm_runtime_get_sync(&mdp3_res->pdev->dev);
+
+ MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mdss_fb_is_power_on_ulp(mfd),
+ mfd->panel_power_state);
+ panel = mdp3_session->panel;
+ mutex_lock(&mdp3_session->lock);
+
+ pr_debug("Requested power state = %d\n", mfd->panel_power_state);
+ if (mdss_fb_is_power_on_lp(mfd)) {
+ /*
+ * Transition to low power
+ * As display updates are expected in low power mode,
+ * keep the interface and clocks on.
+ */
+ intf_stopped = false;
+ } else {
+ /* Transition to display off */
+ if (!mdp3_session->status) {
+ pr_debug("fb%d is off already", mfd->index);
+ goto off_error;
+ }
+ if (panel && panel->set_backlight)
+ panel->set_backlight(panel, 0);
+ }
+
+ /*
+ * While transitioning from interactive to low power,
+ * events need to be sent to the interface so that the
+ * panel can be configured in low power mode
+ */
+ if (panel->event_handler)
+ rc = panel->event_handler(panel, MDSS_EVENT_BLANK,
+ (void *) (long int)mfd->panel_power_state);
+ if (rc)
+ pr_err("EVENT_BLANK error (%d)\n", rc);
+
+ if (intf_stopped) {
+ if (!mdp3_session->clk_on)
+ mdp3_ctrl_clk_enable(mfd, 1);
+ /* PP related programming for ctrl off */
+ mdp3_histogram_stop(mdp3_session, MDP_BLOCK_DMA_P);
+ mutex_lock(&mdp3_session->dma->pp_lock);
+ mdp3_session->dma->ccs_config.ccs_dirty = false;
+ mdp3_session->dma->lut_config.lut_dirty = false;
+ mutex_unlock(&mdp3_session->dma->pp_lock);
+
+ rc = mdp3_session->dma->stop(mdp3_session->dma,
+ mdp3_session->intf);
+ if (rc)
+ pr_debug("fail to stop the MDP3 dma\n");
+ /* Wait to ensure TG to turn off */
+ msleep(20);
+ mfd->panel_info->cont_splash_enabled = 0;
+
+ /* Disable Auto refresh once continuous splash disabled */
+ mdp3_autorefresh_disable(mfd->panel_info);
+ mdp3_splash_done(mfd->panel_info);
+
+ mdp3_irq_deregister();
+ }
+
+ if (panel->event_handler)
+ rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF,
+ (void *) (long int)mfd->panel_power_state);
+ if (rc)
+ pr_err("EVENT_PANEL_OFF error (%d)\n", rc);
+
+ if (intf_stopped) {
+ if (mdp3_session->clk_on) {
+ pr_debug("mdp3_ctrl_off stop clock\n");
+ if (panel->event_handler &&
+ (panel->panel_info.type == MIPI_CMD_PANEL)) {
+ struct dsi_panel_clk_ctrl clk_ctrl;
+
+ clk_ctrl.state = MDSS_DSI_CLK_OFF;
+ clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+ rc |= panel->event_handler(panel,
+ MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl);
+ }
+
+ rc = mdp3_dynamic_clock_gating_ctrl(1);
+ rc = mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P);
+ if (rc)
+ pr_err("mdp clock resource release failed\n");
+ }
+
+ mdp3_ctrl_notifier_unregister(mdp3_session,
+ &mdp3_session->mfd->mdp_sync_pt_data.notifier);
+
+ mdp3_session->vsync_enabled = 0;
+ atomic_set(&mdp3_session->vsync_countdown, 0);
+ atomic_set(&mdp3_session->dma_done_cnt, 0);
+ mdp3_session->clk_on = 0;
+ mdp3_session->in_splash_screen = 0;
+ mdp3_res->solid_fill_vote_en = false;
+ mdp3_session->status = 0;
+ if (atomic_dec_return(&mdp3_res->active_intf_cnt) != 0) {
+ pr_warn("active_intf_cnt unbalanced\n");
+ atomic_set(&mdp3_res->active_intf_cnt, 0);
+ }
+ /*
+ * Release the pm runtime reference held when
+ * idle pc feature is not enabled
+ */
+ if (!mdp3_res->idle_pc_enabled ||
+ (mfd->panel_info->type != MIPI_CMD_PANEL)) {
+ rc = pm_runtime_put(&mdp3_res->pdev->dev);
+ if (rc)
+ pr_err("%s: pm_runtime_put failed (rc %d)\n",
+ __func__, rc);
+ }
+ mdp3_bufq_deinit(&mdp3_session->bufq_out);
+ if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) {
+ mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
+ mdp3_bufq_deinit(&mdp3_session->bufq_in);
+ }
+ }
+
+ if (mdss_fb_is_power_on_ulp(mfd) &&
+ (mfd->panel.type == MIPI_CMD_PANEL)) {
+ pr_debug("%s: Disable MDP3 clocks in ULP\n", __func__);
+ if (!mdp3_session->clk_on)
+ mdp3_ctrl_clk_enable(mfd, 1);
+ /*
+ * STOP DMA transfer first and signal vsync notification
+ * Before releasing the resource in ULP state.
+ */
+ rc = mdp3_session->dma->stop(mdp3_session->dma,
+ mdp3_session->intf);
+ if (rc)
+ pr_warn("fail to stop the MDP3 dma in ULP\n");
+ /* Wait to ensure TG to turn off */
+ msleep(20);
+ /*
+ * Handle ULP request initiated from fb_pm_suspend.
+ * For ULP panel power state disabling vsync and set
+ * vsync_count to zero and Turn off MDP3 clocks
+ */
+ atomic_set(&mdp3_session->vsync_countdown, 0);
+ mdp3_session->vsync_enabled = 0;
+ mdp3_ctrl_vsync_enable(mdp3_session->mfd, 0);
+ mdp3_ctrl_clk_enable(mdp3_session->mfd, 0);
+ }
+off_error:
+ MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
+ mutex_unlock(&mdp3_session->lock);
+ /* Release the last reference to the runtime device */
+ pm_runtime_put(&mdp3_res->pdev->dev);
+
+ return 0;
+}
+
+int mdp3_ctrl_reset(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ struct mdp3_session_data *mdp3_session;
+ struct mdp3_dma *mdp3_dma;
+ struct mdss_panel_data *panel;
+ struct mdp3_notification vsync_client;
+
+ pr_debug("mdp3_ctrl_reset\n");
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+ !mdp3_session->intf) {
+ pr_err("mdp3_ctrl_reset no device");
+ return -ENODEV;
+ }
+
+ panel = mdp3_session->panel;
+ mdp3_dma = mdp3_session->dma;
+ mutex_lock(&mdp3_session->lock);
+ pr_debug("mdp3_ctrl_reset idle_pc %s FS_EN %s\n",
+ mdp3_res->idle_pc ? "True":"False",
+ mdp3_res->fs_ena ? "True":"False");
+ if (mdp3_res->idle_pc) {
+ mdp3_clk_enable(1, 0);
+ mdp3_dynamic_clock_gating_ctrl(0);
+ mdp3_qos_remapper_setup(panel);
+ }
+
+ /*Map the splash addr for VIDEO mode panel before smmu attach*/
+ if ((mfd->panel.type == MIPI_VIDEO_PANEL) &&
+ (mdp3_session->in_splash_screen)) {
+ rc = mdss_smmu_map(MDSS_IOMMU_DOMAIN_UNSECURE,
+ mdp3_res->splash_mem_addr,
+ mdp3_res->splash_mem_addr,
+ mdp3_res->splash_mem_size,
+ IOMMU_READ | IOMMU_NOEXEC);
+ }
+
+ rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
+ if (rc) {
+ pr_err("fail to attach dma iommu\n");
+ if (mdp3_res->idle_pc)
+ mdp3_clk_enable(0, 0);
+ goto reset_error;
+ }
+
+ vsync_client = mdp3_dma->vsync_client;
+
+ mdp3_ctrl_intf_init(mfd, mdp3_session->intf);
+ mdp3_ctrl_dma_init(mfd, mdp3_dma);
+ mdp3_ppp_init();
+ mdp3_ctrl_pp_resume(mfd);
+ if (vsync_client.handler)
+ mdp3_dma->vsync_enable(mdp3_dma, &vsync_client);
+
+ if (!mdp3_res->idle_pc) {
+ mdp3_session->first_commit = true;
+ mfd->panel_info->cont_splash_enabled = 0;
+ mdp3_session->in_splash_screen = 0;
+ mdp3_splash_done(mfd->panel_info);
+ /* Disable Auto refresh */
+ mdp3_autorefresh_disable(mfd->panel_info);
+ } else {
+ mdp3_res->idle_pc = false;
+ mdp3_clk_enable(0, 0);
+ mdp3_iommu_disable(MDP3_CLIENT_DMA_P);
+ }
+
+reset_error:
+ mutex_unlock(&mdp3_session->lock);
+ return rc;
+}
+
+static int mdp3_overlay_get(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req)
+{
+ int rc = 0;
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+
+ mutex_lock(&mdp3_session->lock);
+
+ if (mdp3_session->overlay.id == req->id)
+ *req = mdp3_session->overlay;
+ else
+ rc = -EINVAL;
+
+ mutex_unlock(&mdp3_session->lock);
+
+ return rc;
+}
+
+static int mdp3_overlay_set(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req)
+{
+ int rc = 0;
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ struct mdp3_dma *dma = mdp3_session->dma;
+ struct fb_fix_screeninfo *fix;
+ struct fb_info *fbi = mfd->fbi;
+ int stride;
+ int format;
+
+ fix = &fbi->fix;
+ stride = req->src.width * ppp_bpp(req->src.format);
+ format = mdp3_ctrl_get_source_format(req->src.format);
+
+
+ if (mdp3_session->overlay.id != req->id)
+ pr_err("overlay was not released, continue to recover\n");
+ /*
+ * A change in overlay structure will always come with
+ * MSMFB_NEW_REQUEST for MDP3
+ */
+ if (req->id == MSMFB_NEW_REQUEST) {
+ mutex_lock(&mdp3_session->lock);
+ if (dma->source_config.stride != stride ||
+ dma->source_config.format != format) {
+ dma->source_config.format = format;
+ dma->source_config.stride = stride;
+ dma->output_config.pack_pattern =
+ mdp3_ctrl_get_pack_pattern(req->src.format);
+ dma->update_src_cfg = true;
+ }
+ mdp3_session->overlay = *req;
+ mdp3_session->overlay.id = 1;
+ req->id = 1;
+ mutex_unlock(&mdp3_session->lock);
+ }
+
+ return rc;
+}
+
+static int mdp3_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+{
+ int rc = 0;
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ struct fb_info *fbi = mfd->fbi;
+ struct fb_fix_screeninfo *fix;
+ int format;
+
+ fix = &fbi->fix;
+ format = mdp3_ctrl_get_source_format(mfd->fb_imgType);
+ mutex_lock(&mdp3_session->lock);
+
+ if (mdp3_session->overlay.id == ndx && ndx == 1) {
+ mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
+ mdp3_bufq_deinit(&mdp3_session->bufq_in);
+ } else {
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&mdp3_session->lock);
+
+ return rc;
+}
+
+static int mdp3_overlay_queue_buffer(struct msm_fb_data_type *mfd,
+ struct msmfb_overlay_data *req)
+{
+ int rc;
+ bool is_panel_type_cmd = false;
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ struct msmfb_data *img = &req->data;
+ struct mdp3_img_data data;
+ struct mdp3_dma *dma = mdp3_session->dma;
+
+ memset(&data, 0, sizeof(struct mdp3_img_data));
+ if (mfd->panel.type == MIPI_CMD_PANEL)
+ is_panel_type_cmd = true;
+ if (is_panel_type_cmd) {
+ rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
+ if (rc) {
+ pr_err("fail to enable iommu\n");
+ return rc;
+ }
+ }
+ rc = mdp3_get_img(img, &data, MDP3_CLIENT_DMA_P);
+ if (rc) {
+ pr_err("fail to get overlay buffer\n");
+ goto err;
+ }
+
+ if (data.len < dma->source_config.stride * dma->source_config.height) {
+ pr_err("buf size(0x%lx) is smaller than dma config(0x%x)\n",
+ data.len, (dma->source_config.stride *
+ dma->source_config.height));
+ mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+ rc = -EINVAL;
+ goto err;
+ }
+ rc = mdp3_bufq_push(&mdp3_session->bufq_in, &data);
+ if (rc) {
+ pr_err("fail to queue the overlay buffer, buffer drop\n");
+ mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+ goto err;
+ }
+ rc = 0;
+err:
+ if (is_panel_type_cmd)
+ mdp3_iommu_disable(MDP3_CLIENT_DMA_P);
+ return rc;
+}
+
+static int mdp3_overlay_play(struct msm_fb_data_type *mfd,
+ struct msmfb_overlay_data *req)
+{
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ int rc = 0;
+
+ pr_debug("mdp3_overlay_play req id=%x mem_id=%d\n",
+ req->id, req->data.memory_id);
+
+ mutex_lock(&mdp3_session->lock);
+
+ if (mdp3_session->overlay.id == MSMFB_NEW_REQUEST) {
+ pr_err("overlay play without overlay set first\n");
+ mutex_unlock(&mdp3_session->lock);
+ return -EINVAL;
+ }
+
+ if (mdss_fb_is_power_on(mfd))
+ rc = mdp3_overlay_queue_buffer(mfd, req);
+ else
+ rc = -EPERM;
+
+ mutex_unlock(&mdp3_session->lock);
+
+ return rc;
+}
+
+bool update_roi(struct mdp3_rect oldROI, struct mdp_rect newROI)
+{
+ return ((newROI.x != oldROI.x) || (newROI.y != oldROI.y) ||
+ (newROI.w != oldROI.w) || (newROI.h != oldROI.h));
+}
+
+bool is_roi_valid(struct mdp3_dma_source source_config, struct mdp_rect roi)
+{
+ return (roi.w > 0) && (roi.h > 0) &&
+ (roi.x >= source_config.x) &&
+ ((roi.x + roi.w) <= source_config.width) &&
+ (roi.y >= source_config.y) &&
+ ((roi.y + roi.h) <= source_config.height);
+}
+
+static int mdp3_ctrl_display_commit_kickoff(struct msm_fb_data_type *mfd,
+ struct mdp_display_commit *cmt_data)
+{
+ struct mdp3_session_data *mdp3_session;
+ struct mdp3_img_data *data;
+ struct mdss_panel_info *panel_info;
+ int rc = 0;
+ static bool splash_done;
+ struct mdss_panel_data *panel;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EINVAL;
+
+ panel_info = mfd->panel_info;
+ mdp3_session = mfd->mdp.private1;
+ if (!mdp3_session || !mdp3_session->dma)
+ return -EINVAL;
+
+ if (mdp3_bufq_count(&mdp3_session->bufq_in) == 0) {
+ pr_debug("no buffer in queue yet\n");
+ return -EPERM;
+ }
+
+ if (panel_info->partial_update_enabled &&
+ is_roi_valid(mdp3_session->dma->source_config,
+ cmt_data->l_roi) &&
+ update_roi(mdp3_session->dma->roi, cmt_data->l_roi)) {
+ mdp3_session->dma->roi.x = cmt_data->l_roi.x;
+ mdp3_session->dma->roi.y = cmt_data->l_roi.y;
+ mdp3_session->dma->roi.w = cmt_data->l_roi.w;
+ mdp3_session->dma->roi.h = cmt_data->l_roi.h;
+ mdp3_session->dma->update_src_cfg = true;
+ pr_debug("%s: ROI: x=%d y=%d w=%d h=%d\n", __func__,
+ mdp3_session->dma->roi.x,
+ mdp3_session->dma->roi.y,
+ mdp3_session->dma->roi.w,
+ mdp3_session->dma->roi.h);
+ }
+
+ panel = mdp3_session->panel;
+ mutex_lock(&mdp3_res->fs_idle_pc_lock);
+ if (mdp3_session->in_splash_screen ||
+ mdp3_res->idle_pc) {
+ pr_debug("%s: reset- in_splash = %d, idle_pc = %d", __func__,
+ mdp3_session->in_splash_screen, mdp3_res->idle_pc);
+ rc = mdp3_ctrl_reset(mfd);
+ if (rc) {
+ pr_err("fail to reset display\n");
+ mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+
+ mutex_lock(&mdp3_session->lock);
+
+ if (!mdp3_session->status) {
+ pr_err("%s, display off!\n", __func__);
+ mutex_unlock(&mdp3_session->lock);
+ return -EPERM;
+ }
+
+ mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN);
+ data = mdp3_bufq_pop(&mdp3_session->bufq_in);
+ if (data) {
+ mdp3_ctrl_reset_countdown(mdp3_session, mfd);
+ mdp3_ctrl_clk_enable(mfd, 1);
+ if (mdp3_session->dma->update_src_cfg &&
+ panel_info->partial_update_enabled) {
+ panel->panel_info.roi.x = mdp3_session->dma->roi.x;
+ panel->panel_info.roi.y = mdp3_session->dma->roi.y;
+ panel->panel_info.roi.w = mdp3_session->dma->roi.w;
+ panel->panel_info.roi.h = mdp3_session->dma->roi.h;
+ rc = mdp3_session->dma->update(mdp3_session->dma,
+ (void *)(int)data->addr,
+ mdp3_session->intf, (void *)panel);
+ } else {
+ rc = mdp3_session->dma->update(mdp3_session->dma,
+ (void *)(int)data->addr,
+ mdp3_session->intf, NULL);
+ }
+ /* This is for the previous frame */
+ if (rc < 0) {
+ mdp3_ctrl_notify(mdp3_session,
+ MDP_NOTIFY_FRAME_TIMEOUT);
+ } else {
+ if (mdp3_ctrl_get_intf_type(mfd) ==
+ MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
+ mdp3_ctrl_notify(mdp3_session,
+ MDP_NOTIFY_FRAME_DONE);
+ }
+ }
+ mdp3_session->dma_active = 1;
+ init_completion(&mdp3_session->dma_completion);
+ mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
+ mdp3_bufq_push(&mdp3_session->bufq_out, data);
+ }
+
+ if (mdp3_bufq_count(&mdp3_session->bufq_out) > 1) {
+ mdp3_release_splash_memory(mfd);
+ data = mdp3_bufq_pop(&mdp3_session->bufq_out);
+ if (data)
+ mdp3_put_img(data, MDP3_CLIENT_DMA_P);
+ }
+
+ if (mdp3_session->first_commit) {
+ /*wait to ensure frame is sent to panel*/
+ if (panel_info->mipi.post_init_delay)
+ msleep(((1000 / panel_info->mipi.frame_rate) + 1) *
+ panel_info->mipi.post_init_delay);
+ else
+ msleep(1000 / panel_info->mipi.frame_rate);
+ mdp3_session->first_commit = false;
+ if (panel)
+ rc |= panel->event_handler(panel,
+ MDSS_EVENT_POST_PANEL_ON, NULL);
+ }
+
+ mdp3_session->vsync_before_commit = 0;
+ if (!splash_done || mdp3_session->esd_recovery == true) {
+ if (panel && panel->set_backlight)
+ panel->set_backlight(panel, panel->panel_info.bl_max);
+ splash_done = true;
+ mdp3_session->esd_recovery = false;
+ }
+
+ /* start vsync tick countdown for cmd mode if vsync isn't enabled */
+ if (mfd->panel.type == MIPI_CMD_PANEL && !mdp3_session->vsync_enabled)
+ mdp3_ctrl_vsync_enable(mdp3_session->mfd, 0);
+
+ mutex_unlock(&mdp3_session->lock);
+
+ mdss_fb_update_notify_update(mfd);
+
+ return 0;
+}
+
+static int mdp3_map_pan_buff_immediate(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ unsigned long length;
+ dma_addr_t addr;
+ int domain = mfd->mdp.fb_mem_get_iommu_domain();
+
+ rc = mdss_smmu_map_dma_buf(mfd->fbmem_buf, mfd->fb_table, domain,
+ &addr, &length, DMA_BIDIRECTIONAL);
+ if (IS_ERR_VALUE(rc))
+ goto err_unmap;
+ else
+ mfd->iova = addr;
+
+ pr_debug("%s : smmu map dma buf VA: (%llx) MFD->iova %llx\n",
+ __func__, (u64) addr, (u64) mfd->iova);
+ return rc;
+
+err_unmap:
+ pr_err("smmu map dma buf failed: (%d)\n", rc);
+ dma_buf_unmap_attachment(mfd->fb_attachment, mfd->fb_table,
+ mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL));
+ dma_buf_detach(mfd->fbmem_buf, mfd->fb_attachment);
+ dma_buf_put(mfd->fbmem_buf);
+ return rc;
+}
+
+static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd)
+{
+ struct fb_info *fbi;
+ struct mdp3_session_data *mdp3_session;
+ u32 offset;
+ int bpp;
+ struct mdss_panel_info *panel_info;
+ static bool splash_done;
+ struct mdss_panel_data *panel;
+
+ int rc;
+
+ pr_debug("mdp3_ctrl_pan_display\n");
+ if (!mfd || !mfd->mdp.private1)
+ return;
+
+ panel_info = mfd->panel_info;
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session || !mdp3_session->dma)
+ return;
+
+ mutex_lock(&mdp3_res->fs_idle_pc_lock);
+ if (mdp3_session->in_splash_screen ||
+ mdp3_res->idle_pc) {
+ pr_debug("%s: reset- in_splash = %d, idle_pc = %d", __func__,
+ mdp3_session->in_splash_screen, mdp3_res->idle_pc);
+ rc = mdp3_ctrl_reset(mfd);
+ if (rc) {
+ pr_err("fail to reset display\n");
+ mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+ return;
+ }
+ }
+ mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+
+ mutex_lock(&mdp3_session->lock);
+
+ if (!mdp3_session->status) {
+ pr_err("mdp3_ctrl_pan_display, display off!\n");
+ goto pan_error;
+ }
+
+ fbi = mfd->fbi;
+
+ bpp = fbi->var.bits_per_pixel / 8;
+ offset = fbi->var.xoffset * bpp +
+ fbi->var.yoffset * fbi->fix.line_length;
+
+ if (offset > fbi->fix.smem_len) {
+ pr_err("invalid fb offset=%u total length=%u\n",
+ offset, fbi->fix.smem_len);
+ goto pan_error;
+ }
+
+ if (mfd->fbi->screen_base) {
+ mdp3_ctrl_reset_countdown(mdp3_session, mfd);
+ mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN);
+ mdp3_ctrl_clk_enable(mfd, 1);
+ if (mdp3_session->first_commit) {
+ rc = mdp3_map_pan_buff_immediate(mfd);
+ if (IS_ERR_VALUE(rc))
+ goto pan_error;
+ }
+ rc = mdp3_session->dma->update(mdp3_session->dma,
+ (void *)(int)(mfd->iova + offset),
+ mdp3_session->intf, NULL);
+ /* This is for the previous frame */
+ if (rc < 0) {
+ mdp3_ctrl_notify(mdp3_session,
+ MDP_NOTIFY_FRAME_TIMEOUT);
+ } else {
+ if (mdp3_ctrl_get_intf_type(mfd) ==
+ MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
+ mdp3_ctrl_notify(mdp3_session,
+ MDP_NOTIFY_FRAME_DONE);
+ }
+ }
+ mdp3_session->dma_active = 1;
+ init_completion(&mdp3_session->dma_completion);
+ mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
+ } else {
+ pr_debug("mdp3_ctrl_pan_display no memory, stop interface");
+ mdp3_clk_enable(1, 0);
+ mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
+ mdp3_clk_enable(0, 0);
+ }
+
+ panel = mdp3_session->panel;
+ if (mdp3_session->first_commit) {
+ /*wait to ensure frame is sent to panel*/
+ if (panel_info->mipi.init_delay)
+ msleep(((1000 / panel_info->mipi.frame_rate) + 1) *
+ panel_info->mipi.init_delay);
+ else
+ msleep(1000 / panel_info->mipi.frame_rate);
+ mdp3_session->first_commit = false;
+ if (panel)
+ panel->event_handler(panel, MDSS_EVENT_POST_PANEL_ON,
+ NULL);
+ }
+
+ mdp3_session->vsync_before_commit = 0;
+ if (!splash_done || mdp3_session->esd_recovery == true) {
+ if (panel && panel->set_backlight)
+ panel->set_backlight(panel, panel->panel_info.bl_max);
+ splash_done = true;
+ mdp3_session->esd_recovery = false;
+ }
+
+
+pan_error:
+ mutex_unlock(&mdp3_session->lock);
+}
+
+static int mdp3_set_metadata(struct msm_fb_data_type *mfd,
+ struct msmfb_metadata *metadata_ptr)
+{
+ int ret = 0;
+
+ switch (metadata_ptr->op) {
+ case metadata_op_crc:
+ ret = mdp3_ctrl_res_req_clk(mfd, 1);
+ if (ret) {
+ pr_err("failed to turn on mdp clks\n");
+ return ret;
+ }
+ ret = mdp3_misr_set(&metadata_ptr->data.misr_request);
+ ret = mdp3_ctrl_res_req_clk(mfd, 0);
+ if (ret) {
+ pr_err("failed to release mdp clks\n");
+ return ret;
+ }
+ break;
+ default:
+ pr_warn("Unsupported request to MDP SET META IOCTL.\n");
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int mdp3_get_metadata(struct msm_fb_data_type *mfd,
+ struct msmfb_metadata *metadata)
+{
+ int ret = 0;
+
+ switch (metadata->op) {
+ case metadata_op_frame_rate:
+ metadata->data.panel_frame_rate =
+ mfd->panel_info->mipi.frame_rate;
+ break;
+ case metadata_op_get_caps:
+ metadata->data.caps.mdp_rev = 305;
+ metadata->data.caps.rgb_pipes = 0;
+ metadata->data.caps.vig_pipes = 0;
+ metadata->data.caps.dma_pipes = 1;
+ break;
+ case metadata_op_crc:
+ ret = mdp3_ctrl_res_req_clk(mfd, 1);
+ if (ret) {
+ pr_err("failed to turn on mdp clks\n");
+ return ret;
+ }
+ ret = mdp3_misr_get(&metadata->data.misr_request);
+ ret = mdp3_ctrl_res_req_clk(mfd, 0);
+ if (ret) {
+ pr_err("failed to release mdp clks\n");
+ return ret;
+ }
+ break;
+ case metadata_op_get_ion_fd:
+ if (mfd->fb_ion_handle) {
+ metadata->data.fbmem_ionfd =
+ dma_buf_fd(mfd->fbmem_buf, 0);
+ if (metadata->data.fbmem_ionfd < 0)
+ pr_err("fd allocation failed. fd = %d\n",
+ metadata->data.fbmem_ionfd);
+ }
+ break;
+ default:
+ pr_warn("Unsupported request to MDP GET META IOCTL.\n");
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+int mdp3_validate_start_req(struct mdp_histogram_start_req *req)
+{
+ if (req->frame_cnt >= MDP_HISTOGRAM_FRAME_COUNT_MAX) {
+ pr_err("%s invalid req frame_cnt\n", __func__);
+ return -EINVAL;
+ }
+ if (req->bit_mask >= MDP_HISTOGRAM_BIT_MASK_MAX) {
+ pr_err("%s invalid req bit mask\n", __func__);
+ return -EINVAL;
+ }
+ if (req->block != MDP_BLOCK_DMA_P ||
+ req->num_bins != MDP_HISTOGRAM_BIN_NUM) {
+ pr_err("mdp3_histogram_start invalid request\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mdp3_validate_scale_config(struct mdp_bl_scale_data *data)
+{
+ if (data->scale > MDP_HISTOGRAM_BL_SCALE_MAX) {
+ pr_err("%s invalid bl_scale\n", __func__);
+ return -EINVAL;
+ }
+ if (data->min_lvl > MDP_HISTOGRAM_BL_LEVEL_MAX) {
+ pr_err("%s invalid bl_min_lvl\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mdp3_validate_csc_data(struct mdp_csc_cfg_data *data)
+{
+ int i;
+ bool mv_valid = false;
+
+ for (i = 0; i < 9; i++) {
+ if (data->csc_data.csc_mv[i] >=
+ MDP_HISTOGRAM_CSC_MATRIX_MAX)
+ return -EINVAL;
+ if ((!mv_valid) && (data->csc_data.csc_mv[i] != 0))
+ mv_valid = true;
+ }
+ if (!mv_valid) {
+ pr_err("%s: black screen data! csc_mv is all 0s\n", __func__);
+ return -EINVAL;
+ }
+ for (i = 0; i < 3; i++) {
+ if (data->csc_data.csc_pre_bv[i] >=
+ MDP_HISTOGRAM_CSC_VECTOR_MAX)
+ return -EINVAL;
+ if (data->csc_data.csc_post_bv[i] >=
+ MDP_HISTOGRAM_CSC_VECTOR_MAX)
+ return -EINVAL;
+ }
+ for (i = 0; i < 6; i++) {
+ if (data->csc_data.csc_pre_lv[i] >=
+ MDP_HISTOGRAM_CSC_VECTOR_MAX)
+ return -EINVAL;
+ if (data->csc_data.csc_post_lv[i] >=
+ MDP_HISTOGRAM_CSC_VECTOR_MAX)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mdp3_histogram_start(struct mdp3_session_data *session,
+ struct mdp_histogram_start_req *req)
+{
+ int ret;
+ struct mdp3_dma_histogram_config histo_config;
+
+ mutex_lock(&session->lock);
+ if (!session->status) {
+ mutex_unlock(&session->lock);
+ return -EPERM;
+ }
+
+ pr_debug("mdp3_histogram_start\n");
+
+ ret = mdp3_validate_start_req(req);
+ if (ret) {
+ mutex_unlock(&session->lock);
+ return ret;
+ }
+
+ if (!session->dma->histo_op ||
+ !session->dma->config_histo) {
+ pr_err("mdp3_histogram_start not supported\n");
+ mutex_unlock(&session->lock);
+ return -EINVAL;
+ }
+
+ mutex_lock(&session->histo_lock);
+
+ if (session->histo_status) {
+ pr_info("mdp3_histogram_start already started\n");
+ mutex_unlock(&session->histo_lock);
+ mutex_unlock(&session->lock);
+ return 0;
+ }
+
+ mdp3_res_update(1, 0, MDP3_CLIENT_DMA_P);
+ ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_RESET);
+ if (ret) {
+ pr_err("mdp3_histogram_start reset error\n");
+ goto histogram_start_err;
+ }
+
+ histo_config.frame_count = req->frame_cnt;
+ histo_config.bit_mask = req->bit_mask;
+ histo_config.auto_clear_en = 1;
+ histo_config.bit_mask_polarity = 0;
+ ret = session->dma->config_histo(session->dma, &histo_config);
+ if (ret) {
+ pr_err("mdp3_histogram_start config error\n");
+ goto histogram_start_err;
+ }
+
+ ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_START);
+ if (ret) {
+ pr_err("mdp3_histogram_start config error\n");
+ goto histogram_start_err;
+ }
+
+ session->histo_status = 1;
+
+histogram_start_err:
+ mdp3_res_update(0, 0, MDP3_CLIENT_DMA_P);
+ mutex_unlock(&session->histo_lock);
+ mutex_unlock(&session->lock);
+ return ret;
+}
+
+static int mdp3_histogram_stop(struct mdp3_session_data *session,
+ u32 block)
+{
+ int ret;
+
+ pr_debug("mdp3_histogram_stop\n");
+
+ if (!session->dma->histo_op || block != MDP_BLOCK_DMA_P) {
+ pr_err("mdp3_histogram_stop not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&session->histo_lock);
+
+ if (!session->histo_status) {
+ pr_debug("mdp3_histogram_stop already stopped!");
+ ret = 0;
+ goto histogram_stop_err;
+ }
+
+ mdp3_clk_enable(1, 0);
+ ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_CANCEL);
+ mdp3_clk_enable(0, 0);
+ if (ret)
+ pr_err("mdp3_histogram_stop error\n");
+
+ session->histo_status = 0;
+
+histogram_stop_err:
+ mutex_unlock(&session->histo_lock);
+ return ret;
+}
+
+static int mdp3_histogram_collect(struct mdp3_session_data *session,
+ struct mdp_histogram_data *hist)
+{
+ int ret;
+ struct mdp3_dma_histogram_data *mdp3_histo;
+
+ pr_debug("%s\n", __func__);
+ if (!session->dma->get_histo) {
+ pr_err("mdp3_histogram_collect not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&session->histo_lock);
+
+ if (!session->histo_status) {
+ pr_debug("mdp3_histogram_collect not started\n");
+ mutex_unlock(&session->histo_lock);
+ return -EPROTO;
+ }
+
+ mutex_unlock(&session->histo_lock);
+
+ if (!session->clk_on) {
+ pr_debug("mdp/dsi clock off currently\n");
+ return -EPERM;
+ }
+
+ mdp3_clk_enable(1, 0);
+ ret = session->dma->get_histo(session->dma);
+ mdp3_clk_enable(0, 0);
+ if (ret) {
+ pr_debug("mdp3_histogram_collect error = %d\n", ret);
+ return ret;
+ }
+
+ mdp3_histo = &session->dma->histo_data;
+
+ ret = copy_to_user(hist->c0, mdp3_histo->r_data,
+ sizeof(uint32_t) * MDP_HISTOGRAM_BIN_NUM);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(hist->c1, mdp3_histo->g_data,
+ sizeof(uint32_t) * MDP_HISTOGRAM_BIN_NUM);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(hist->c2, mdp3_histo->b_data,
+ sizeof(uint32_t) * MDP_HISTOGRAM_BIN_NUM);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(hist->extra_info, mdp3_histo->extra,
+ sizeof(uint32_t) * 2);
+ if (ret)
+ return ret;
+
+ hist->bin_cnt = MDP_HISTOGRAM_BIN_NUM;
+ hist->block = MDP_BLOCK_DMA_P;
+ return ret;
+}
+
+static int mdp3_bl_scale_config(struct msm_fb_data_type *mfd,
+ struct mdp_bl_scale_data *data)
+{
+ int ret = 0;
+ int curr_bl;
+
+ mutex_lock(&mfd->bl_lock);
+ curr_bl = mfd->bl_level;
+ mfd->bl_scale = data->scale;
+ mfd->bl_min_lvl = data->min_lvl;
+ pr_debug("update scale = %d, min_lvl = %d\n", mfd->bl_scale,
+ mfd->bl_min_lvl);
+
+ /* update current backlight to use new scaling*/
+ mdss_fb_set_backlight(mfd, curr_bl);
+ mutex_unlock(&mfd->bl_lock);
+ return ret;
+}
+
+static int mdp3_csc_config(struct mdp3_session_data *session,
+ struct mdp_csc_cfg_data *data)
+{
+ struct mdp3_dma_color_correct_config config;
+ struct mdp3_dma_ccs ccs;
+ int ret = -EINVAL;
+
+ if (!data->csc_data.csc_mv || !data->csc_data.csc_pre_bv ||
+ !data->csc_data.csc_post_bv || !data->csc_data.csc_pre_lv ||
+ !data->csc_data.csc_post_lv) {
+ pr_err("%s : Invalid csc vectors", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&session->lock);
+ mutex_lock(&session->dma->pp_lock);
+ session->dma->cc_vect_sel = (session->dma->cc_vect_sel + 1) % 2;
+
+ config.ccs_enable = 1;
+ config.ccs_sel = session->dma->cc_vect_sel;
+ config.pre_limit_sel = session->dma->cc_vect_sel;
+ config.post_limit_sel = session->dma->cc_vect_sel;
+ config.pre_bias_sel = session->dma->cc_vect_sel;
+ config.post_bias_sel = session->dma->cc_vect_sel;
+ config.ccs_dirty = true;
+
+ ccs.mv = data->csc_data.csc_mv;
+ ccs.pre_bv = data->csc_data.csc_pre_bv;
+ ccs.post_bv = data->csc_data.csc_post_bv;
+ ccs.pre_lv = data->csc_data.csc_pre_lv;
+ ccs.post_lv = data->csc_data.csc_post_lv;
+
+ /* cache one copy of setting for suspend/resume reconfiguring */
+ session->dma->ccs_cache = *data;
+
+ mdp3_clk_enable(1, 0);
+ ret = session->dma->config_ccs(session->dma, &config, &ccs);
+ mdp3_clk_enable(0, 0);
+ mutex_unlock(&session->dma->pp_lock);
+ mutex_unlock(&session->lock);
+ return ret;
+}
+
+static int mdp3_pp_ioctl(struct msm_fb_data_type *mfd,
+ void __user *argp)
+{
+ int ret = -EINVAL;
+ struct msmfb_mdp_pp mdp_pp;
+ struct mdp_lut_cfg_data *lut;
+ struct mdp3_session_data *mdp3_session;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EINVAL;
+
+ mdp3_session = mfd->mdp.private1;
+
+ ret = copy_from_user(&mdp_pp, argp, sizeof(mdp_pp));
+ if (ret)
+ return ret;
+
+ switch (mdp_pp.op) {
+ case mdp_bl_scale_cfg:
+ ret = mdp3_validate_scale_config(&mdp_pp.data.bl_scale_data);
+ if (ret) {
+ pr_err("%s: invalid scale config\n", __func__);
+ break;
+ }
+ ret = mdp3_bl_scale_config(mfd, (struct mdp_bl_scale_data *)
+ &mdp_pp.data.bl_scale_data);
+ break;
+ case mdp_op_csc_cfg:
+ /* Checking state of dyn_pu before programming CSC block */
+ if (mdp3_session->dyn_pu_state) {
+ pr_debug("Partial update feature is enabled.\n");
+ return -EPERM;
+ }
+ ret = mdp3_validate_csc_data(&(mdp_pp.data.csc_cfg_data));
+ if (ret) {
+ pr_err("%s: invalid csc data\n", __func__);
+ break;
+ }
+ ret = mdp3_csc_config(mdp3_session,
+ &(mdp_pp.data.csc_cfg_data));
+ break;
+ case mdp_op_lut_cfg:
+ lut = &mdp_pp.data.lut_cfg_data;
+ if (lut->lut_type != mdp_lut_rgb) {
+ pr_err("Lut type %d is not supported", lut->lut_type);
+ return -EINVAL;
+ }
+ if (lut->data.rgb_lut_data.flags & MDP_PP_OPS_READ)
+ ret = mdp3_ctrl_lut_read(mfd,
+ &(lut->data.rgb_lut_data));
+ else
+ ret = mdp3_ctrl_lut_config(mfd,
+ &(lut->data.rgb_lut_data));
+ if (ret)
+ pr_err("RGB LUT ioctl failed\n");
+ else
+ ret = copy_to_user(argp, &mdp_pp, sizeof(mdp_pp));
+ break;
+
+ default:
+ pr_err("Unsupported request to MDP_PP IOCTL.\n");
+ ret = -EINVAL;
+ break;
+ }
+ if (!ret)
+ ret = copy_to_user(argp, &mdp_pp, sizeof(struct msmfb_mdp_pp));
+ return ret;
+}
+
+static int mdp3_histo_ioctl(struct msm_fb_data_type *mfd, u32 cmd,
+ void __user *argp)
+{
+ int ret = -ENOTSUP;
+ struct mdp_histogram_data hist;
+ struct mdp_histogram_start_req hist_req;
+ u32 block;
+ struct mdp3_session_data *mdp3_session;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EINVAL;
+
+ mdp3_session = mfd->mdp.private1;
+
+ switch (cmd) {
+ case MSMFB_HISTOGRAM_START:
+ ret = copy_from_user(&hist_req, argp, sizeof(hist_req));
+ if (ret)
+ return ret;
+
+ ret = mdp3_histogram_start(mdp3_session, &hist_req);
+ break;
+
+ case MSMFB_HISTOGRAM_STOP:
+ ret = copy_from_user(&block, argp, sizeof(int));
+ if (ret)
+ return ret;
+
+ ret = mdp3_histogram_stop(mdp3_session, block);
+ break;
+
+ case MSMFB_HISTOGRAM:
+ ret = copy_from_user(&hist, argp, sizeof(hist));
+ if (ret)
+ return ret;
+
+ ret = mdp3_histogram_collect(mdp3_session, &hist);
+ if (!ret)
+ ret = copy_to_user(argp, &hist, sizeof(hist));
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int mdp3_validate_lut_data(struct fb_cmap *cmap)
+{
+ u32 i = 0;
+
+ if (!cmap || !cmap->red || !cmap->green || !cmap->blue) {
+ pr_err("Invalid arguments!\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MDP_LUT_SIZE; i++) {
+ if (cmap->red[i] > 0xFF || cmap->green[i] > 0xFF ||
+ cmap->blue[i] > 0xFF) {
+ pr_err("LUT value over 255 (limit) at %d index\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline int mdp3_copy_lut_buffer(struct fb_cmap *dst, struct fb_cmap *src)
+{
+ if (!dst || !src || !dst->red || !dst->blue || !dst->green ||
+ !src->red || !src->green || !src->blue) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ dst->start = src->start;
+ dst->len = src->len;
+
+ memcpy(dst->red, src->red, MDP_LUT_SIZE * sizeof(u16));
+ memcpy(dst->green, src->green, MDP_LUT_SIZE * sizeof(u16));
+ memcpy(dst->blue, src->blue, MDP_LUT_SIZE * sizeof(u16));
+ return 0;
+}
+
+static int mdp3_alloc_lut_buffer(struct platform_device *pdev, void **cmap)
+{
+ struct fb_cmap *map;
+
+ map = devm_kzalloc(&pdev->dev, sizeof(struct fb_cmap), GFP_KERNEL);
+ if (map == NULL)
+ return -ENOMEM;
+
+ memset(map, 0, sizeof(struct fb_cmap));
+
+ map->red = devm_kzalloc(&pdev->dev, MDP_LUT_SIZE * sizeof(u16),
+ GFP_KERNEL);
+ if (map->red == NULL)
+ goto exit_red;
+
+ memset(map->red, 0, sizeof(u16) * MDP_LUT_SIZE);
+
+ map->green = devm_kzalloc(&pdev->dev, MDP_LUT_SIZE * sizeof(u16),
+ GFP_KERNEL);
+ if (map->green == NULL)
+ goto exit_green;
+
+ memset(map->green, 0, sizeof(u16) * MDP_LUT_SIZE);
+
+ map->blue = devm_kzalloc(&pdev->dev, MDP_LUT_SIZE * sizeof(u16),
+ GFP_KERNEL);
+ if (map->blue == NULL)
+ goto exit_blue;
+
+ memset(map->blue, 0, sizeof(u16) * MDP_LUT_SIZE);
+
+ *cmap = map;
+ return 0;
+exit_blue:
+ devm_kfree(&pdev->dev, map->green);
+exit_green:
+ devm_kfree(&pdev->dev, map->red);
+exit_red:
+ devm_kfree(&pdev->dev, map);
+ return -ENOMEM;
+}
+
+static void mdp3_free_lut_buffer(struct platform_device *pdev, void **cmap)
+{
+ struct fb_cmap *map = (struct fb_cmap *)(*cmap);
+
+ if (map == NULL)
+ return;
+
+ devm_kfree(&pdev->dev, map->blue);
+ map->blue = NULL;
+ devm_kfree(&pdev->dev, map->green);
+ map->green = NULL;
+ devm_kfree(&pdev->dev, map->red);
+ map->red = NULL;
+ devm_kfree(&pdev->dev, map);
+ map = NULL;
+}
+
+static int mdp3_lut_combine_gain(struct fb_cmap *cmap, struct mdp3_dma *dma)
+{
+ int i = 0;
+ u32 r = 0, g = 0, b = 0;
+
+ if (!cmap || !dma || !dma->gc_cmap || !dma->hist_cmap ||
+ !dma->gc_cmap->red || !dma->gc_cmap->green ||
+ !dma->gc_cmap->blue || !dma->hist_cmap->red ||
+ !dma->hist_cmap->green || !dma->hist_cmap->blue) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ for (i = 1; i < MDP_LUT_SIZE; i++) {
+ r = MIN(dma->gc_cmap->red[i] * dma->hist_cmap->red[i] *
+ mdp_lut_inverse16[i], 0xFF0000);
+ g = MIN(dma->gc_cmap->green[i] * dma->hist_cmap->green[i] *
+ mdp_lut_inverse16[i], 0xFF0000);
+ b = MIN(dma->gc_cmap->blue[i] * dma->hist_cmap->blue[i] *
+ mdp_lut_inverse16[i], 0xFF0000);
+
+ cmap->red[i] = (r >> 16) & 0xFF;
+ cmap->green[i] = (g >> 16) & 0xFF;
+ cmap->blue[i] = (b >> 16) & 0xFF;
+ }
+ return 0;
+}
+
+/* Called from within pp_lock and session lock locked context */
+static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd,
+ struct fb_cmap *cmap)
+{
+ int rc = 0;
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ struct mdp3_dma *dma;
+ struct mdp3_dma_lut_config lut_config;
+
+ dma = mdp3_session->dma;
+
+ if (!dma->config_lut) {
+ pr_err("Config LUT not defined!\n");
+ return -EINVAL;
+ }
+
+ lut_config.lut_enable = 7;
+ lut_config.lut_sel = mdp3_session->lut_sel;
+ lut_config.lut_position = 1;
+ lut_config.lut_dirty = true;
+
+ if (!mdp3_session->status) {
+ pr_err("display off!\n");
+ return -EPERM;
+ }
+
+ mdp3_clk_enable(1, 0);
+ rc = dma->config_lut(dma, &lut_config, cmap);
+ mdp3_clk_enable(0, 0);
+ if (rc)
+ pr_err("mdp3_ctrl_lut_update failed\n");
+
+ mdp3_session->lut_sel = (mdp3_session->lut_sel + 1) % 2;
+ return rc;
+}
+
+static int mdp3_ctrl_lut_config(struct msm_fb_data_type *mfd,
+ struct mdp_rgb_lut_data *cfg)
+{
+ int rc = 0;
+ bool data_validated = false;
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ struct mdp3_dma *dma;
+ struct fb_cmap *cmap;
+
+ dma = mdp3_session->dma;
+
+ if ((cfg->cmap.start > MDP_LUT_SIZE) ||
+ (cfg->cmap.len > MDP_LUT_SIZE) ||
+ (cfg->cmap.start + cfg->cmap.len > MDP_LUT_SIZE)) {
+ pr_err("Invalid arguments.\n");
+ return -EINVAL;
+ }
+
+ rc = mdp3_alloc_lut_buffer(mfd->pdev, (void **) &cmap);
+ if (rc) {
+ pr_err("No memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&mdp3_session->lock);
+ mutex_lock(&dma->pp_lock);
+ rc = copy_from_user(cmap->red + cfg->cmap.start,
+ cfg->cmap.red, sizeof(u16) * cfg->cmap.len);
+ rc |= copy_from_user(cmap->green + cfg->cmap.start,
+ cfg->cmap.green, sizeof(u16) * cfg->cmap.len);
+ rc |= copy_from_user(cmap->blue + cfg->cmap.start,
+ cfg->cmap.blue, sizeof(u16) * cfg->cmap.len);
+ if (rc) {
+ pr_err("Copying user data failed!\n");
+ goto exit_err;
+ }
+
+ switch (cfg->lut_type) {
+ case mdp_rgb_lut_gc:
+ if (cfg->flags & MDP_PP_OPS_DISABLE) {
+ if (dma->lut_sts & MDP3_LUT_GC_EN)
+ /* Free GC cmap cache since disabled */
+ mdp3_free_lut_buffer(mfd->pdev,
+ (void **)&dma->gc_cmap);
+ dma->lut_sts &= ~MDP3_LUT_GC_EN;
+ } else if (!(dma->lut_sts & MDP3_LUT_GC_EN)) {
+ /* Check if values sent are valid */
+ rc = mdp3_validate_lut_data(cmap);
+ if (rc) {
+ pr_err("Invalid GC LUT data\n");
+ goto exit_err;
+ }
+ data_validated = true;
+
+ /* Allocate GC cmap cache to store values */
+ rc = mdp3_alloc_lut_buffer(mfd->pdev,
+ (void **)&dma->gc_cmap);
+ if (rc) {
+ pr_err("GC LUT config failed\n");
+ goto exit_err;
+ }
+ dma->lut_sts |= MDP3_LUT_GC_EN;
+ }
+ /*
+ * Copy the GC values from userspace to maintain the
+ * correct values user intended to program in cache.
+ * The values programmed in HW might factor in presence
+ * of other LUT modifying features hence can be
+ * different from these user given values.
+ */
+ if (dma->lut_sts & MDP3_LUT_GC_EN) {
+ /* Validate LUT data if not yet validated */
+ if (!data_validated) {
+ rc = mdp3_validate_lut_data(cmap);
+ if (rc) {
+ pr_err("Invalid GC LUT data\n");
+ goto exit_err;
+ }
+ }
+ rc = mdp3_copy_lut_buffer(dma->gc_cmap, cmap);
+ if (rc) {
+ pr_err("Could not store GC to cache\n");
+ goto exit_err;
+ }
+ }
+ break;
+ case mdp_rgb_lut_hist:
+ if (cfg->flags & MDP_PP_OPS_DISABLE) {
+ if (dma->lut_sts & MDP3_LUT_HIST_EN)
+ /* Free HIST cmap cache since disabled */
+ mdp3_free_lut_buffer(mfd->pdev,
+ (void **)&dma->hist_cmap);
+ dma->lut_sts &= ~MDP3_LUT_HIST_EN;
+ } else if (!(dma->lut_sts & MDP3_LUT_HIST_EN)) {
+ /* Check if values sent are valid */
+ rc = mdp3_validate_lut_data(cmap);
+ if (rc) {
+ pr_err("Invalid HIST LUT data\n");
+ goto exit_err;
+ }
+ data_validated = true;
+
+ /* Allocate HIST cmap cache to store values */
+ rc = mdp3_alloc_lut_buffer(mfd->pdev,
+ (void **)&dma->hist_cmap);
+ if (rc) {
+ pr_err("HIST LUT config failed\n");
+ goto exit_err;
+ }
+ dma->lut_sts |= MDP3_LUT_HIST_EN;
+ }
+ /*
+ * Copy the HIST LUT values from userspace to maintain
+ * correct values user intended to program in cache.
+ * The values programmed in HW might factor in presence
+ * of other LUT modifying features hence can be
+ * different from these user given values.
+ */
+ if (dma->lut_sts & MDP3_LUT_HIST_EN) {
+ /* Validate LUT data if not yet validated */
+ if (!data_validated) {
+ rc = mdp3_validate_lut_data(cmap);
+ if (rc) {
+ pr_err("Invalid H LUT data\n");
+ goto exit_err;
+ }
+ }
+ rc = mdp3_copy_lut_buffer(dma->hist_cmap, cmap);
+ if (rc) {
+ pr_err("Could not cache Hist LUT\n");
+ goto exit_err;
+ }
+ }
+ break;
+ default:
+ pr_err("Invalid lut type: %u\n", cfg->lut_type);
+ rc = -EINVAL;
+ goto exit_err;
+ }
+
+ /*
+ * In case both GC LUT and HIST LUT need to be programmed the gains
+ * of each the individual LUTs need to be applied onto a single LUT
+ * and applied in HW
+ */
+ if ((dma->lut_sts & MDP3_LUT_HIST_EN) &&
+ (dma->lut_sts & MDP3_LUT_GC_EN)) {
+ rc = mdp3_lut_combine_gain(cmap, dma);
+ if (rc) {
+ pr_err("Combining gains failed rc = %d\n", rc);
+ goto exit_err;
+ }
+ }
+
+ rc = mdp3_ctrl_lut_update(mfd, cmap);
+ if (rc)
+ pr_err("Updating LUT failed! rc = %d\n", rc);
+exit_err:
+ mutex_unlock(&dma->pp_lock);
+ mutex_unlock(&mdp3_session->lock);
+ mdp3_free_lut_buffer(mfd->pdev, (void **) &cmap);
+ return rc;
+}
+
+static int mdp3_ctrl_lut_read(struct msm_fb_data_type *mfd,
+ struct mdp_rgb_lut_data *cfg)
+{
+ int rc = 0;
+ struct fb_cmap *cmap;
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ struct mdp3_dma *dma = mdp3_session->dma;
+
+ switch (cfg->lut_type) {
+ case mdp_rgb_lut_gc:
+ if (!dma->gc_cmap) {
+ pr_err("GC not programmed\n");
+ return -EPERM;
+ }
+ cmap = dma->gc_cmap;
+ break;
+ case mdp_rgb_lut_hist:
+ if (!dma->hist_cmap) {
+ pr_err("Hist LUT not programmed\n");
+ return -EPERM;
+ }
+ cmap = dma->hist_cmap;
+ break;
+ default:
+ pr_err("Invalid lut type %u\n", cfg->lut_type);
+ return -EINVAL;
+ }
+
+ cfg->cmap.start = cmap->start;
+ cfg->cmap.len = cmap->len;
+
+ mutex_lock(&dma->pp_lock);
+ rc = copy_to_user(cfg->cmap.red, cmap->red, sizeof(u16) *
+ MDP_LUT_SIZE);
+ rc |= copy_to_user(cfg->cmap.green, cmap->green, sizeof(u16) *
+ MDP_LUT_SIZE);
+ rc |= copy_to_user(cfg->cmap.blue, cmap->blue, sizeof(u16) *
+ MDP_LUT_SIZE);
+ mutex_unlock(&dma->pp_lock);
+ return rc;
+}
+
+/* Invoked from ctrl_on with session lock locked context */
+static void mdp3_ctrl_pp_resume(struct msm_fb_data_type *mfd)
+{
+ struct mdp3_session_data *mdp3_session;
+ struct mdp3_dma *dma;
+ struct fb_cmap *cmap;
+ int rc = 0;
+
+ mdp3_session = mfd->mdp.private1;
+ dma = mdp3_session->dma;
+
+ mutex_lock(&dma->pp_lock);
+ /*
+ * if dma->ccs_config.ccs_enable is set then DMA PP block was enabled
+ * via user space IOCTL.
+ * Then set dma->ccs_config.ccs_dirty flag
+ * Then PP block will be reconfigured when next kickoff comes.
+ */
+ if (dma->ccs_config.ccs_enable)
+ dma->ccs_config.ccs_dirty = true;
+
+ /*
+ * If gamma correction was enabled then we program the LUT registers
+ * with the last configuration data before suspend. If gamma correction
+ * is not enabled then we do not program anything. The LUT from
+ * histogram processing algorithms will program hardware based on new
+ * frame data if they are enabled.
+ */
+ if (dma->lut_sts & MDP3_LUT_GC_EN) {
+
+ rc = mdp3_alloc_lut_buffer(mfd->pdev, (void **)&cmap);
+ if (rc) {
+ pr_err("No memory for GC LUT, rc = %d\n", rc);
+ goto exit_err;
+ }
+
+ if (dma->lut_sts & MDP3_LUT_HIST_EN) {
+ rc = mdp3_lut_combine_gain(cmap, dma);
+ if (rc) {
+ pr_err("Combining the gain failed rc=%d\n", rc);
+ goto exit_err;
+ }
+ } else {
+ rc = mdp3_copy_lut_buffer(cmap, dma->gc_cmap);
+ if (rc) {
+ pr_err("Updating GC failed rc = %d\n", rc);
+ goto exit_err;
+ }
+ }
+
+ rc = mdp3_ctrl_lut_update(mfd, cmap);
+ if (rc)
+ pr_err("GC Lut update failed rc=%d\n", rc);
+exit_err:
+ mdp3_free_lut_buffer(mfd->pdev, (void **)&cmap);
+ }
+
+ mutex_unlock(&dma->pp_lock);
+}
+
+static int mdp3_overlay_prepare(struct msm_fb_data_type *mfd,
+ struct mdp_overlay_list __user *user_ovlist)
+{
+ struct mdp_overlay_list ovlist;
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ struct mdp_overlay *req_list;
+ struct mdp_overlay *req;
+ int rc;
+
+ if (!mdp3_session)
+ return -ENODEV;
+
+ req = &mdp3_session->req_overlay;
+
+ if (copy_from_user(&ovlist, user_ovlist, sizeof(ovlist)))
+ return -EFAULT;
+
+ if (ovlist.num_overlays != 1) {
+ pr_err("OV_PREPARE failed: only 1 overlay allowed\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&req_list, ovlist.overlay_list,
+ sizeof(struct mdp_overlay *)))
+ return -EFAULT;
+
+ if (copy_from_user(req, req_list, sizeof(*req)))
+ return -EFAULT;
+
+ rc = mdp3_overlay_set(mfd, req);
+ if (!IS_ERR_VALUE(rc)) {
+ if (copy_to_user(req_list, req, sizeof(*req)))
+ return -EFAULT;
+ }
+
+ if (put_user(IS_ERR_VALUE(rc) ? 0 : 1,
+ &user_ovlist->processed_overlays))
+ return -EFAULT;
+
+ return rc;
+}
+
+static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd,
+ u32 cmd, void __user *argp)
+{
+ int rc = -EINVAL;
+ struct mdp3_session_data *mdp3_session;
+ struct msmfb_metadata metadata;
+ struct mdp_overlay *req = NULL;
+ struct msmfb_overlay_data ov_data;
+ int val;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (!mdp3_session)
+ return -ENODEV;
+
+ req = &mdp3_session->req_overlay;
+
+ if (!mdp3_session->status && cmd != MSMFB_METADATA_GET &&
+ cmd != MSMFB_HISTOGRAM_STOP && cmd != MSMFB_HISTOGRAM) {
+ pr_err("mdp3_ctrl_ioctl_handler, display off!\n");
+ return -EPERM;
+ }
+
+ switch (cmd) {
+ case MSMFB_MDP_PP:
+ rc = mdp3_pp_ioctl(mfd, argp);
+ break;
+ case MSMFB_HISTOGRAM_START:
+ case MSMFB_HISTOGRAM_STOP:
+ case MSMFB_HISTOGRAM:
+ rc = mdp3_histo_ioctl(mfd, cmd, argp);
+ break;
+
+ case MSMFB_VSYNC_CTRL:
+ case MSMFB_OVERLAY_VSYNC_CTRL:
+ if (!copy_from_user(&val, argp, sizeof(val))) {
+ mutex_lock(&mdp3_session->lock);
+ mdp3_session->vsync_enabled = val;
+ rc = mdp3_ctrl_vsync_enable(mfd, val);
+ mutex_unlock(&mdp3_session->lock);
+ } else {
+ pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed\n");
+ rc = -EFAULT;
+ }
+ break;
+ case MSMFB_ASYNC_BLIT:
+ mutex_lock(&mdp3_res->fs_idle_pc_lock);
+ if (mdp3_session->in_splash_screen || mdp3_res->idle_pc) {
+ pr_debug("%s: reset- in_splash = %d, idle_pc = %d",
+ __func__, mdp3_session->in_splash_screen,
+ mdp3_res->idle_pc);
+ mdp3_ctrl_reset(mfd);
+ }
+ mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+ rc = mdp3_ctrl_async_blit_req(mfd, argp);
+ break;
+ case MSMFB_BLIT:
+ mutex_lock(&mdp3_res->fs_idle_pc_lock);
+ if (mdp3_session->in_splash_screen)
+ mdp3_ctrl_reset(mfd);
+ mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+ rc = mdp3_ctrl_blit_req(mfd, argp);
+ break;
+ case MSMFB_METADATA_GET:
+ rc = copy_from_user(&metadata, argp, sizeof(metadata));
+ if (!rc)
+ rc = mdp3_get_metadata(mfd, &metadata);
+ if (!rc)
+ rc = copy_to_user(argp, &metadata, sizeof(metadata));
+ if (rc)
+ pr_err("mdp3_get_metadata failed (%d)\n", rc);
+ break;
+ case MSMFB_METADATA_SET:
+ rc = copy_from_user(&metadata, argp, sizeof(metadata));
+ if (!rc)
+ rc = mdp3_set_metadata(mfd, &metadata);
+ if (rc)
+ pr_err("mdp3_set_metadata failed (%d)\n", rc);
+ break;
+ case MSMFB_OVERLAY_GET:
+ rc = copy_from_user(req, argp, sizeof(*req));
+ if (!rc) {
+ rc = mdp3_overlay_get(mfd, req);
+
+ if (!IS_ERR_VALUE(rc))
+ rc = copy_to_user(argp, req, sizeof(*req));
+ }
+ if (rc)
+ pr_err("OVERLAY_GET failed (%d)\n", rc);
+ break;
+ case MSMFB_OVERLAY_SET:
+ rc = copy_from_user(req, argp, sizeof(*req));
+ if (!rc) {
+ rc = mdp3_overlay_set(mfd, req);
+
+ if (!IS_ERR_VALUE(rc))
+ rc = copy_to_user(argp, req, sizeof(*req));
+ }
+ if (rc)
+ pr_err("OVERLAY_SET failed (%d)\n", rc);
+ break;
+ case MSMFB_OVERLAY_UNSET:
+ if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val))))
+ rc = mdp3_overlay_unset(mfd, val);
+ break;
+ case MSMFB_OVERLAY_PLAY:
+ rc = copy_from_user(&ov_data, argp, sizeof(ov_data));
+ mutex_lock(&mdp3_res->fs_idle_pc_lock);
+ if (mdp3_session->in_splash_screen)
+ mdp3_ctrl_reset(mfd);
+ mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+ if (!rc)
+ rc = mdp3_overlay_play(mfd, &ov_data);
+ if (rc)
+ pr_err("OVERLAY_PLAY failed (%d)\n", rc);
+ break;
+ case MSMFB_OVERLAY_PREPARE:
+ rc = mdp3_overlay_prepare(mfd, argp);
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+int mdp3_wait_for_dma_done(struct mdp3_session_data *session)
+{
+ int rc = 0;
+
+ if (session->dma_active) {
+ rc = wait_for_completion_timeout(&session->dma_completion,
+ KOFF_TIMEOUT);
+ if (rc > 0) {
+ session->dma_active = 0;
+ rc = 0;
+ } else if (rc == 0) {
+ rc = -ETIME;
+ }
+ }
+ return rc;
+}
+
+static int mdp3_update_panel_info(struct msm_fb_data_type *mfd, int mode,
+ int dest_ctrl)
+{
+ int ret = 0;
+ struct mdp3_session_data *mdp3_session;
+ struct mdss_panel_data *panel;
+ u32 intf_type = 0;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EINVAL;
+
+ mdp3_session = mfd->mdp.private1;
+ panel = mdp3_session->panel;
+
+ if (!panel->event_handler)
+ return 0;
+ ret = panel->event_handler(panel, MDSS_EVENT_DSI_UPDATE_PANEL_DATA,
+ (void *)(unsigned long)mode);
+ if (ret)
+ pr_err("Dynamic switch to %s mode failed!\n",
+ mode ? "command" : "video");
+ if (mode == 1)
+ mfd->panel.type = MIPI_CMD_PANEL;
+ else
+ mfd->panel.type = MIPI_VIDEO_PANEL;
+
+ if (mfd->panel.type != MIPI_VIDEO_PANEL)
+ mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done;
+
+ intf_type = mdp3_ctrl_get_intf_type(mfd);
+ mdp3_session->intf->cfg.type = intf_type;
+ mdp3_session->intf->available = 1;
+ mdp3_session->intf->in_use = 1;
+ mdp3_res->intf[intf_type].in_use = 1;
+
+ mdp3_intf_init(mdp3_session->intf);
+
+ mdp3_session->dma->output_config.out_sel = intf_type;
+ mdp3_session->status = mdp3_session->intf->active;
+
+ return 0;
+}
+
+static int mdp3_vsync_retire_setup(struct msm_fb_data_type *mfd)
+{
+ struct mdp3_session_data *mdp3_session;
+ struct mdp3_notification retire_client;
+ char name[24];
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+ snprintf(name, sizeof(name), "mdss_fb%d_retire", mfd->index);
+ mdp3_session->vsync_timeline = sw_sync_timeline_create(name);
+ if (mdp3_session->vsync_timeline == NULL) {
+ pr_err("cannot vsync create time line");
+ return -ENOMEM;
+ }
+
+ /* Add retire vsync handler */
+ retire_client.handler = mdp3_vsync_retire_handle_vsync;
+ retire_client.arg = mdp3_session;
+
+ if (mdp3_session->dma)
+ mdp3_session->dma->retire_client = retire_client;
+
+ INIT_WORK(&mdp3_session->retire_work, mdp3_vsync_retire_work_handler);
+
+ return 0;
+}
+
+int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
+{
+ struct device *dev = mfd->fbi->dev;
+ struct msm_mdp_interface *mdp3_interface = &mfd->mdp;
+ struct mdp3_session_data *mdp3_session = NULL;
+ u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
+ int rc;
+ int splash_mismatch = 0;
+ struct sched_param sched = { .sched_priority = 16 };
+
+ pr_info("mdp3_ctrl_init\n");
+ rc = mdp3_parse_dt_splash(mfd);
+ if (rc)
+ splash_mismatch = 1;
+
+ mdp3_interface->on_fnc = mdp3_ctrl_on;
+ mdp3_interface->off_fnc = mdp3_ctrl_off;
+ mdp3_interface->do_histogram = NULL;
+ mdp3_interface->cursor_update = NULL;
+ mdp3_interface->dma_fnc = mdp3_ctrl_pan_display;
+ mdp3_interface->ioctl_handler = mdp3_ctrl_ioctl_handler;
+ mdp3_interface->kickoff_fnc = mdp3_ctrl_display_commit_kickoff;
+ mdp3_interface->pre_commit = mdp3_layer_pre_commit;
+ mdp3_interface->atomic_validate = mdp3_layer_atomic_validate;
+ mdp3_interface->lut_update = NULL;
+ mdp3_interface->configure_panel = mdp3_update_panel_info;
+ mdp3_interface->input_event_handler = NULL;
+ mdp3_interface->signal_retire_fence = NULL;
+
+ mdp3_session = kzalloc(sizeof(struct mdp3_session_data), GFP_KERNEL);
+ if (!mdp3_session)
+ return -ENOMEM;
+
+ mutex_init(&mdp3_session->lock);
+ INIT_WORK(&mdp3_session->clk_off_work, mdp3_dispatch_clk_off);
+
+ init_kthread_worker(&mdp3_session->worker);
+ init_kthread_work(&mdp3_session->dma_done_work, mdp3_dispatch_dma_done);
+
+ mdp3_session->thread = kthread_run(kthread_worker_fn,
+ &mdp3_session->worker,
+ "mdp3_dispatch_dma_done");
+
+ if (IS_ERR(mdp3_session->thread)) {
+ pr_err("Can't initialize mdp3_dispatch_dma_done thread\n");
+ rc = -ENODEV;
+ goto init_done;
+ }
+
+ sched_setscheduler(mdp3_session->thread, SCHED_FIFO, &sched);
+
+ atomic_set(&mdp3_session->vsync_countdown, 0);
+ mutex_init(&mdp3_session->histo_lock);
+ mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL);
+ if (!mdp3_session->dma) {
+ rc = -ENODEV;
+ goto init_done;
+ }
+
+ rc = mdp3_dma_init(mdp3_session->dma);
+ if (rc) {
+ pr_err("fail to init dma\n");
+ goto init_done;
+ }
+
+ intf_type = mdp3_ctrl_get_intf_type(mfd);
+ mdp3_session->intf = mdp3_get_display_intf(intf_type);
+ if (!mdp3_session->intf) {
+ rc = -ENODEV;
+ goto init_done;
+ }
+ rc = mdp3_intf_init(mdp3_session->intf);
+ if (rc) {
+ pr_err("fail to init interface\n");
+ goto init_done;
+ }
+
+ mdp3_session->dma->output_config.out_sel = intf_type;
+ mdp3_session->mfd = mfd;
+ mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev);
+ mdp3_session->status = mdp3_session->intf->active;
+ mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
+ mdp3_bufq_init(&mdp3_session->bufq_in);
+ mdp3_bufq_init(&mdp3_session->bufq_out);
+ mdp3_session->histo_status = 0;
+ mdp3_session->lut_sel = 0;
+ BLOCKING_INIT_NOTIFIER_HEAD(&mdp3_session->notifier_head);
+
+ init_timer(&mdp3_session->vsync_timer);
+ mdp3_session->vsync_timer.function = mdp3_vsync_timer_func;
+ mdp3_session->vsync_timer.data = (u32)mdp3_session;
+ mdp3_session->vsync_period = 1000 / mfd->panel_info->mipi.frame_rate;
+ mfd->mdp.private1 = mdp3_session;
+ init_completion(&mdp3_session->dma_completion);
+ if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+ mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done;
+
+ rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
+ if (rc) {
+ pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
+ goto init_done;
+ }
+ rc = sysfs_create_group(&dev->kobj, &generic_attr_group);
+ if (rc) {
+ pr_err("generic sysfs group creation failed, ret=%d\n", rc);
+ goto init_done;
+ }
+
+ mdp3_session->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd,
+ "vsync_event");
+ if (!mdp3_session->vsync_event_sd) {
+ pr_err("vsync_event sysfs lookup failed\n");
+ rc = -ENODEV;
+ goto init_done;
+ }
+
+ mdp3_session->dma->hist_event_sd = sysfs_get_dirent(dev->kobj.sd,
+ "hist_event");
+ if (!mdp3_session->dma->hist_event_sd) {
+ pr_err("hist_event sysfs lookup failed\n");
+ rc = -ENODEV;
+ goto init_done;
+ }
+
+ mdp3_session->bl_event_sd = sysfs_get_dirent(dev->kobj.sd,
+ "bl_event");
+ if (!mdp3_session->bl_event_sd) {
+ pr_err("bl_event sysfs lookup failed\n");
+ rc = -ENODEV;
+ goto init_done;
+ }
+
+ rc = mdp3_create_sysfs_link(dev);
+ if (rc)
+ pr_warn("problem creating link to mdp sysfs\n");
+
+ /* Enable PM runtime */
+ pm_runtime_set_suspended(&mdp3_res->pdev->dev);
+ pm_runtime_enable(&mdp3_res->pdev->dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
+ pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
+
+ if (mdp3_get_cont_spash_en()) {
+ mdp3_session->clk_on = 1;
+ mdp3_session->in_splash_screen = 1;
+ mdp3_ctrl_notifier_register(mdp3_session,
+ &mdp3_session->mfd->mdp_sync_pt_data.notifier);
+ }
+
+ /*
+ * Increment the overlay active count.
+ * This is needed to ensure that if idle power collapse kicks in
+ * right away, it would be handled correctly.
+ */
+ atomic_inc(&mdp3_res->active_intf_cnt);
+ if (splash_mismatch) {
+ pr_err("splash memory mismatch, stop splash\n");
+ mdp3_ctrl_off(mfd);
+ }
+
+ mdp3_session->vsync_before_commit = true;
+ mdp3_session->dyn_pu_state = mfd->panel_info->partial_update_enabled;
+
+ if (mfd->panel_info->mipi.dms_mode ||
+ mfd->panel_info->type == MIPI_CMD_PANEL) {
+ rc = mdp3_vsync_retire_setup(mfd);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("unable to create vsync timeline\n");
+ goto init_done;
+ }
+ }
+init_done:
+ if (IS_ERR_VALUE(rc))
+ kfree(mdp3_session);
+
+ return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.h b/drivers/video/fbdev/msm/mdp3_ctrl.h
new file mode 100644
index 0000000..2cc3421
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2013-2014, 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_CTRL_H
+#define MDP3_CTRL_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/timer.h>
+#include <linux/kthread.h>
+
+#include "mdp3.h"
+#include "mdp3_dma.h"
+#include "mdss_fb.h"
+#include "mdss_panel.h"
+
+#define MDP3_MAX_BUF_QUEUE 8
+#define MDP3_LUT_HIST_EN 0x001
+#define MDP3_LUT_GC_EN 0x002
+
+struct mdp3_buffer_queue {
+ struct mdp3_img_data img_data[MDP3_MAX_BUF_QUEUE];
+ int count;
+ int push_idx;
+ int pop_idx;
+};
+
+struct mdp3_session_data {
+ struct mutex lock;
+ int status;
+ struct mdp3_dma *dma;
+ struct mdss_panel_data *panel;
+ struct mdp3_intf *intf;
+ struct msm_fb_data_type *mfd;
+ ktime_t vsync_time;
+ struct timer_list vsync_timer;
+ int vsync_period;
+ struct kernfs_node *vsync_event_sd;
+ struct kernfs_node *bl_event_sd;
+ struct mdp_overlay overlay;
+ struct mdp_overlay req_overlay;
+ struct mdp3_buffer_queue bufq_in;
+ struct mdp3_buffer_queue bufq_out;
+ struct work_struct clk_off_work;
+
+ struct kthread_work dma_done_work;
+ struct kthread_worker worker;
+ struct task_struct *thread;
+
+ atomic_t dma_done_cnt;
+ int histo_status;
+ struct mutex histo_lock;
+ int lut_sel;
+ bool vsync_before_commit;
+ bool first_commit;
+ int clk_on;
+ struct blocking_notifier_head notifier_head;
+
+ int vsync_enabled;
+ atomic_t vsync_countdown; /* Used to count down */
+ bool in_splash_screen;
+ bool esd_recovery;
+ int dyn_pu_state; /* dynamic partial update status */
+ u32 bl_events;
+
+ bool dma_active;
+ struct completion dma_completion;
+ int (*wait_for_dma_done)(struct mdp3_session_data *session);
+
+ /* For retire fence */
+ struct sw_sync_timeline *vsync_timeline;
+ int retire_cnt;
+ struct work_struct retire_work;
+};
+
+void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq);
+int mdp3_ctrl_init(struct msm_fb_data_type *mfd);
+int mdp3_bufq_push(struct mdp3_buffer_queue *bufq,
+ struct mdp3_img_data *data);
+int mdp3_ctrl_get_source_format(u32 imgType);
+int mdp3_ctrl_get_pack_pattern(u32 imgType);
+int mdp3_ctrl_reset(struct msm_fb_data_type *mfd);
+
+#endif /* MDP3_CTRL_H */
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
new file mode 100644
index 0000000..b7c8d43
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -0,0 +1,1291 @@
+/* Copyright (c) 2013-2014, 2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+
+#include "mdp3.h"
+#include "mdp3_dma.h"
+#include "mdp3_hwio.h"
+#include "mdss_debug.h"
+
+#define DMA_STOP_POLL_SLEEP_US 1000
+#define DMA_STOP_POLL_TIMEOUT_US 200000
+#define DMA_HISTO_RESET_TIMEOUT_MS 40
+#define DMA_LUT_CONFIG_MASK 0xfffffbe8
+#define DMA_CCS_CONFIG_MASK 0xfffffc17
+#define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000)
+
+#define VSYNC_SELECT 0x024
+#define VSYNC_TOTAL_LINES_SHIFT 21
+#define VSYNC_COUNT_MASK 0x7ffff
+#define VSYNC_THRESH_CONT_SHIFT 16
+
+static void mdp3_vsync_intr_handler(int type, void *arg)
+{
+ struct mdp3_dma *dma = (struct mdp3_dma *)arg;
+ struct mdp3_notification vsync_client;
+ struct mdp3_notification retire_client;
+ unsigned int wait_for_next_vs;
+
+ pr_debug("mdp3_vsync_intr_handler\n");
+ spin_lock(&dma->dma_lock);
+ vsync_client = dma->vsync_client;
+ retire_client = dma->retire_client;
+ wait_for_next_vs = !dma->vsync_status;
+ dma->vsync_status = 0;
+ if (wait_for_next_vs)
+ complete(&dma->vsync_comp);
+ spin_unlock(&dma->dma_lock);
+ if (vsync_client.handler) {
+ vsync_client.handler(vsync_client.arg);
+ } else {
+ if (wait_for_next_vs)
+ mdp3_irq_disable_nosync(type);
+ }
+
+ if (retire_client.handler)
+ retire_client.handler(retire_client.arg);
+}
+
+static void mdp3_dma_done_intr_handler(int type, void *arg)
+{
+ struct mdp3_dma *dma = (struct mdp3_dma *)arg;
+ struct mdp3_notification dma_client;
+
+ pr_debug("mdp3_dma_done_intr_handler\n");
+ spin_lock(&dma->dma_lock);
+ dma_client = dma->dma_notifier_client;
+ complete(&dma->dma_comp);
+ spin_unlock(&dma->dma_lock);
+ mdp3_irq_disable_nosync(type);
+ if (dma_client.handler)
+ dma_client.handler(dma_client.arg);
+}
+
+static void mdp3_hist_done_intr_handler(int type, void *arg)
+{
+ struct mdp3_dma *dma = (struct mdp3_dma *)arg;
+ u32 isr, mask;
+
+ isr = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_INTR_STATUS);
+ mask = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_INTR_ENABLE);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_CLEAR, isr);
+
+ isr &= mask;
+ if (isr == 0)
+ return;
+
+ if (isr & MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT) {
+ spin_lock(&dma->histo_lock);
+ dma->histo_state = MDP3_DMA_HISTO_STATE_READY;
+ complete(&dma->histo_comp);
+ spin_unlock(&dma->histo_lock);
+ mdp3_hist_intr_notify(dma);
+ }
+ if (isr & MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT) {
+ spin_lock(&dma->histo_lock);
+ dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE;
+ complete(&dma->histo_comp);
+ spin_unlock(&dma->histo_lock);
+ }
+}
+
+void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type)
+{
+ int irq_bit;
+
+ pr_debug("mdp3_dma_callback_enable type=%d\n", type);
+
+ if (dma->dma_sel == MDP3_DMA_P) {
+ if (type & MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE)
+ mdp3_irq_enable(MDP3_INTR_DMA_P_HISTO);
+
+ if (type & MDP3_DMA_CALLBACK_TYPE_HIST_DONE)
+ mdp3_irq_enable(MDP3_INTR_DMA_P_HISTO);
+ }
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+ dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+ if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
+ mdp3_irq_enable(MDP3_INTR_LCDC_START_OF_FRAME);
+ } else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) {
+ irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+ irq_bit += dma->dma_sel;
+ mdp3_irq_enable(irq_bit);
+ }
+
+ if (type & MDP3_DMA_CALLBACK_TYPE_DMA_DONE) {
+ irq_bit = MDP3_INTR_DMA_P_DONE;
+ if (dma->dma_sel == MDP3_DMA_S)
+ irq_bit = MDP3_INTR_DMA_S_DONE;
+ mdp3_irq_enable(irq_bit);
+ }
+ } else {
+ pr_err("mdp3_dma_callback_enable not supported interface\n");
+ }
+}
+
+void mdp3_dma_callback_disable(struct mdp3_dma *dma, int type)
+{
+ int irq_bit;
+
+ pr_debug("mdp3_dma_callback_disable type=%d\n", type);
+
+ if (dma->dma_sel == MDP3_DMA_P) {
+ if (type & MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE)
+ mdp3_irq_disable(MDP3_INTR_DMA_P_HISTO);
+
+ if (type & MDP3_DMA_CALLBACK_TYPE_HIST_DONE)
+ mdp3_irq_disable(MDP3_INTR_DMA_P_HISTO);
+ }
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+ dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+ if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
+ mdp3_irq_disable(MDP3_INTR_LCDC_START_OF_FRAME);
+ } else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) {
+ irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+ irq_bit += dma->dma_sel;
+ mdp3_irq_disable(irq_bit);
+ /*
+ * Clear read pointer interrupt before disabling clocks.
+ * Else pending ISR handling will result in NOC error
+ * since the clock will be disable after this point.
+ */
+ mdp3_clear_irq(irq_bit);
+ }
+
+ if (type & MDP3_DMA_CALLBACK_TYPE_DMA_DONE) {
+ irq_bit = MDP3_INTR_DMA_P_DONE;
+ if (dma->dma_sel == MDP3_DMA_S)
+ irq_bit = MDP3_INTR_DMA_S_DONE;
+ mdp3_irq_disable(irq_bit);
+ }
+ }
+}
+
+static int mdp3_dma_callback_setup(struct mdp3_dma *dma)
+{
+ int rc = 0;
+ struct mdp3_intr_cb vsync_cb = {
+ .cb = mdp3_vsync_intr_handler,
+ .data = dma,
+ };
+
+ struct mdp3_intr_cb dma_cb = {
+ .cb = mdp3_dma_done_intr_handler,
+ .data = dma,
+ };
+
+
+ struct mdp3_intr_cb hist_cb = {
+ .cb = mdp3_hist_done_intr_handler,
+ .data = dma,
+ };
+
+ if (dma->dma_sel == MDP3_DMA_P)
+ rc = mdp3_set_intr_callback(MDP3_INTR_DMA_P_HISTO, &hist_cb);
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+ dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC)
+ rc |= mdp3_set_intr_callback(MDP3_INTR_LCDC_START_OF_FRAME,
+ &vsync_cb);
+ else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ int irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+
+ irq_bit += dma->dma_sel;
+ rc |= mdp3_set_intr_callback(irq_bit, &vsync_cb);
+ irq_bit = MDP3_INTR_DMA_P_DONE;
+ if (dma->dma_sel == MDP3_DMA_S)
+ irq_bit = MDP3_INTR_DMA_S_DONE;
+ rc |= mdp3_set_intr_callback(irq_bit, &dma_cb);
+ } else {
+ pr_err("mdp3_dma_callback_setup not supported interface\n");
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+static void mdp3_dma_vsync_enable(struct mdp3_dma *dma,
+ struct mdp3_notification *vsync_client)
+{
+ unsigned long flag;
+ int updated = 0;
+ int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+
+ pr_debug("mdp3_dma_vsync_enable\n");
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ if (vsync_client) {
+ if (dma->vsync_client.handler != vsync_client->handler) {
+ dma->vsync_client = *vsync_client;
+ updated = 1;
+ }
+ } else {
+ if (dma->vsync_client.handler) {
+ dma->vsync_client.handler = NULL;
+ dma->vsync_client.arg = NULL;
+ updated = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+ if (updated) {
+ if (vsync_client && vsync_client->handler)
+ mdp3_dma_callback_enable(dma, cb_type);
+ else
+ mdp3_dma_callback_disable(dma, cb_type);
+ }
+}
+
+static void mdp3_dma_done_notifier(struct mdp3_dma *dma,
+ struct mdp3_notification *dma_client)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ if (dma_client) {
+ dma->dma_notifier_client = *dma_client;
+ } else {
+ dma->dma_notifier_client.handler = NULL;
+ dma->dma_notifier_client.arg = NULL;
+ }
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+}
+
+int mdp3_dma_sync_config(struct mdp3_dma *dma,
+ struct mdp3_dma_source *source_config, struct mdp3_tear_check *te)
+{
+ u32 vsync_clk_speed_hz, vclks_line, cfg;
+ int porch = source_config->vporch;
+ int height = source_config->height;
+ int total_lines = height + porch;
+ int dma_sel = dma->dma_sel;
+
+ vsync_clk_speed_hz = MDP_VSYNC_CLK_RATE;
+
+ cfg = total_lines << VSYNC_TOTAL_LINES_SHIFT;
+ total_lines *= te->frame_rate;
+
+ vclks_line = (total_lines) ? vsync_clk_speed_hz / total_lines : 0;
+
+ cfg |= BIT(19);
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ if (te->refx100) {
+ vclks_line = vclks_line * te->frame_rate *
+ 100 / te->refx100;
+ } else {
+ pr_warn("refx100 cannot be zero! Use 6000 as default\n");
+ vclks_line = vclks_line * te->frame_rate *
+ 100 / 6000;
+ }
+
+ cfg |= (vclks_line & VSYNC_COUNT_MASK);
+
+ MDP3_REG_WRITE(MDP3_REG_SYNC_CONFIG_0 + dma_sel, cfg);
+ MDP3_REG_WRITE(MDP3_REG_VSYNC_SEL, VSYNC_SELECT);
+ MDP3_REG_WRITE(MDP3_REG_PRIMARY_VSYNC_INIT_VAL + dma_sel,
+ te->vsync_init_val);
+ MDP3_REG_WRITE(MDP3_REG_PRIMARY_RD_PTR_IRQ, te->rd_ptr_irq);
+ MDP3_REG_WRITE(MDP3_REG_SYNC_THRESH_0 + dma_sel,
+ ((te->sync_threshold_continue << VSYNC_THRESH_CONT_SHIFT) |
+ te->sync_threshold_start));
+ MDP3_REG_WRITE(MDP3_REG_PRIMARY_START_P0S + dma_sel, te->start_pos);
+ MDP3_REG_WRITE(MDP3_REG_TEAR_CHECK_EN, te->tear_check_en);
+ return 0;
+}
+
+static int mdp3_dmap_config(struct mdp3_dma *dma,
+ struct mdp3_dma_source *source_config,
+ struct mdp3_dma_output_config *output_config,
+ bool splash_screen_active)
+{
+ u32 dma_p_cfg_reg, dma_p_size, dma_p_out_xy;
+
+ dma_p_cfg_reg = source_config->format << 25;
+ if (output_config->dither_en)
+ dma_p_cfg_reg |= BIT(24);
+ dma_p_cfg_reg |= output_config->out_sel << 19;
+ dma_p_cfg_reg |= output_config->bit_mask_polarity << 18;
+ dma_p_cfg_reg |= output_config->color_components_flip << 14;
+ dma_p_cfg_reg |= output_config->pack_pattern << 8;
+ dma_p_cfg_reg |= output_config->pack_align << 7;
+ dma_p_cfg_reg |= output_config->color_comp_out_bits;
+
+ dma_p_size = source_config->width | (source_config->height << 16);
+ dma_p_out_xy = source_config->x | (source_config->y << 16);
+ if (!splash_screen_active) {
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CONFIG, dma_p_cfg_reg);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_SIZE, dma_p_size);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR,
+ (u32)source_config->buf);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_Y_STRIDE,
+ source_config->stride);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_OUT_XY, dma_p_out_xy);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_FETCH_CFG, 0x40);
+ }
+
+ dma->source_config = *source_config;
+ dma->output_config = *output_config;
+
+ if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+ mdp3_irq_enable(MDP3_INTR_LCDC_UNDERFLOW);
+
+ mdp3_dma_callback_setup(dma);
+ return 0;
+}
+
+static void mdp3_dmap_config_source(struct mdp3_dma *dma)
+{
+ struct mdp3_dma_source *source_config = &dma->source_config;
+ u32 dma_p_cfg_reg, dma_p_size;
+
+ dma_p_cfg_reg = MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG);
+ dma_p_cfg_reg &= ~MDP3_DMA_IBUF_FORMAT_MASK;
+ dma_p_cfg_reg |= source_config->format << 25;
+ dma_p_cfg_reg &= ~MDP3_DMA_PACK_PATTERN_MASK;
+ dma_p_cfg_reg |= dma->output_config.pack_pattern << 8;
+
+ dma_p_size = dma->roi.w | (dma->roi.h << 16);
+
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CONFIG, dma_p_cfg_reg);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_SIZE, dma_p_size);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_Y_STRIDE, source_config->stride);
+}
+
+static int mdp3_dmas_config(struct mdp3_dma *dma,
+ struct mdp3_dma_source *source_config,
+ struct mdp3_dma_output_config *output_config,
+ bool splash_screen_active)
+{
+ u32 dma_s_cfg_reg, dma_s_size, dma_s_out_xy;
+
+ dma_s_cfg_reg = source_config->format << 25;
+ if (output_config->dither_en)
+ dma_s_cfg_reg |= BIT(24);
+ dma_s_cfg_reg |= output_config->out_sel << 19;
+ dma_s_cfg_reg |= output_config->bit_mask_polarity << 18;
+ dma_s_cfg_reg |= output_config->color_components_flip << 14;
+ dma_s_cfg_reg |= output_config->pack_pattern << 8;
+ dma_s_cfg_reg |= output_config->pack_align << 7;
+ dma_s_cfg_reg |= output_config->color_comp_out_bits;
+
+ dma_s_size = source_config->width | (source_config->height << 16);
+ dma_s_out_xy = source_config->x | (source_config->y << 16);
+
+ if (!splash_screen_active) {
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_CONFIG, dma_s_cfg_reg);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_SIZE, dma_s_size);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR,
+ (u32)source_config->buf);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_Y_STRIDE,
+ source_config->stride);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_OUT_XY, dma_s_out_xy);
+ MDP3_REG_WRITE(MDP3_REG_SECONDARY_RD_PTR_IRQ, 0x10);
+ }
+ dma->source_config = *source_config;
+ dma->output_config = *output_config;
+
+ mdp3_dma_callback_setup(dma);
+ return 0;
+}
+
+static void mdp3_dmas_config_source(struct mdp3_dma *dma)
+{
+ struct mdp3_dma_source *source_config = &dma->source_config;
+ u32 dma_s_cfg_reg, dma_s_size;
+
+ dma_s_cfg_reg = MDP3_REG_READ(MDP3_REG_DMA_S_CONFIG);
+ dma_s_cfg_reg &= ~MDP3_DMA_IBUF_FORMAT_MASK;
+ dma_s_cfg_reg |= source_config->format << 25;
+
+ dma_s_size = source_config->width | (source_config->height << 16);
+
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_CONFIG, dma_s_cfg_reg);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_SIZE, dma_s_size);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_Y_STRIDE, source_config->stride);
+}
+
+static int mdp3_dmap_cursor_config(struct mdp3_dma *dma,
+ struct mdp3_dma_cursor *cursor)
+{
+ u32 cursor_size, cursor_pos, blend_param, trans_mask;
+
+ cursor_size = cursor->width | (cursor->height << 16);
+ cursor_pos = cursor->x | (cursor->y << 16);
+ trans_mask = 0;
+ if (cursor->blend_config.mode == MDP3_DMA_CURSOR_BLEND_CONSTANT_ALPHA) {
+ blend_param = cursor->blend_config.constant_alpha << 24;
+ } else if (cursor->blend_config.mode ==
+ MDP3_DMA_CURSOR_BLEND_COLOR_KEYING) {
+ blend_param = cursor->blend_config.transparent_color;
+ trans_mask = cursor->blend_config.transparency_mask;
+ } else {
+ blend_param = 0;
+ }
+
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_FORMAT, cursor->format);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_SIZE, cursor_size);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BUF_ADDR, (u32)cursor->buf);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_POS, cursor_pos);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_CONFIG,
+ cursor->blend_config.mode);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_PARAM, blend_param);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_TRANS_MASK, trans_mask);
+ dma->cursor = *cursor;
+ return 0;
+}
+
+static int mdp3_dmap_ccs_config_internal(struct mdp3_dma *dma,
+ struct mdp3_dma_color_correct_config *config,
+ struct mdp3_dma_ccs *ccs)
+{
+ int i;
+ u32 addr;
+
+ if (!ccs)
+ return -EINVAL;
+
+ if (config->ccs_enable) {
+ addr = MDP3_REG_DMA_P_CSC_MV1;
+ if (config->ccs_sel)
+ addr = MDP3_REG_DMA_P_CSC_MV2;
+ for (i = 0; i < 9; i++) {
+ MDP3_REG_WRITE(addr, ccs->mv[i]);
+ addr += 4;
+ }
+
+ addr = MDP3_REG_DMA_P_CSC_PRE_BV1;
+ if (config->pre_bias_sel)
+ addr = MDP3_REG_DMA_P_CSC_PRE_BV2;
+ for (i = 0; i < 3; i++) {
+ MDP3_REG_WRITE(addr, ccs->pre_bv[i]);
+ addr += 4;
+ }
+
+ addr = MDP3_REG_DMA_P_CSC_POST_BV1;
+ if (config->post_bias_sel)
+ addr = MDP3_REG_DMA_P_CSC_POST_BV2;
+ for (i = 0; i < 3; i++) {
+ MDP3_REG_WRITE(addr, ccs->post_bv[i]);
+ addr += 4;
+ }
+
+ addr = MDP3_REG_DMA_P_CSC_PRE_LV1;
+ if (config->pre_limit_sel)
+ addr = MDP3_REG_DMA_P_CSC_PRE_LV2;
+ for (i = 0; i < 6; i++) {
+ MDP3_REG_WRITE(addr, ccs->pre_lv[i]);
+ addr += 4;
+ }
+
+ addr = MDP3_REG_DMA_P_CSC_POST_LV1;
+ if (config->post_limit_sel)
+ addr = MDP3_REG_DMA_P_CSC_POST_LV2;
+ for (i = 0; i < 6; i++) {
+ MDP3_REG_WRITE(addr, ccs->post_lv[i]);
+ addr += 4;
+ }
+ }
+ return 0;
+}
+
+static void mdp3_ccs_update(struct mdp3_dma *dma, bool from_kickoff)
+{
+ u32 cc_config;
+ bool ccs_updated = false, lut_updated = false;
+ struct mdp3_dma_ccs ccs;
+
+ cc_config = MDP3_REG_READ(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG);
+
+ if (dma->ccs_config.ccs_dirty) {
+ cc_config &= DMA_CCS_CONFIG_MASK;
+ if (dma->ccs_config.ccs_enable)
+ cc_config |= BIT(3);
+ else
+ cc_config &= ~BIT(3);
+ cc_config |= dma->ccs_config.ccs_sel << 5;
+ cc_config |= dma->ccs_config.pre_bias_sel << 6;
+ cc_config |= dma->ccs_config.post_bias_sel << 7;
+ cc_config |= dma->ccs_config.pre_limit_sel << 8;
+ cc_config |= dma->ccs_config.post_limit_sel << 9;
+ /*
+ * CCS dirty flag should be reset when call is made from frame
+ * kickoff, or else upon resume the flag would be dirty and LUT
+ * config could call this function thereby causing no register
+ * programming for CCS, which will cause screen to go dark
+ */
+ if (from_kickoff)
+ dma->ccs_config.ccs_dirty = false;
+ ccs_updated = true;
+ }
+
+ if (dma->lut_config.lut_dirty) {
+ cc_config &= DMA_LUT_CONFIG_MASK;
+ cc_config |= dma->lut_config.lut_enable;
+ cc_config |= dma->lut_config.lut_position << 4;
+ cc_config |= dma->lut_config.lut_sel << 10;
+ dma->lut_config.lut_dirty = false;
+ lut_updated = true;
+ }
+
+ if (ccs_updated && from_kickoff) {
+ ccs.mv = dma->ccs_cache.csc_data.csc_mv;
+ ccs.pre_bv = dma->ccs_cache.csc_data.csc_pre_bv;
+ ccs.post_bv = dma->ccs_cache.csc_data.csc_post_bv;
+ ccs.pre_lv = dma->ccs_cache.csc_data.csc_pre_lv;
+ ccs.post_lv = dma->ccs_cache.csc_data.csc_post_lv;
+ mdp3_dmap_ccs_config_internal(dma, &dma->ccs_config, &ccs);
+ }
+
+ if (lut_updated || ccs_updated) {
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG, cc_config);
+ /*
+ * Make sure ccs configuration update is done before continuing
+ * with the DMA transfer
+ */
+ wmb(); /* ensure write is finished before progressing */
+ }
+}
+
+static int mdp3_dmap_ccs_config(struct mdp3_dma *dma,
+ struct mdp3_dma_color_correct_config *config,
+ struct mdp3_dma_ccs *ccs)
+{
+ mdp3_dmap_ccs_config_internal(dma, config, ccs);
+
+ dma->ccs_config = *config;
+
+ if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+ mdp3_ccs_update(dma, false);
+
+ return 0;
+}
+
+static int mdp3_dmap_lut_config(struct mdp3_dma *dma,
+ struct mdp3_dma_lut_config *config,
+ struct fb_cmap *cmap)
+{
+ u32 addr, color;
+ int i;
+
+ if (config->lut_enable && cmap) {
+ addr = MDP3_REG_DMA_P_CSC_LUT1;
+ if (config->lut_sel)
+ addr = MDP3_REG_DMA_P_CSC_LUT2;
+
+ for (i = 0; i < MDP_LUT_SIZE; i++) {
+ color = cmap->green[i] & 0xff;
+ color |= (cmap->red[i] & 0xff) << 8;
+ color |= (cmap->blue[i] & 0xff) << 16;
+ MDP3_REG_WRITE(addr, color);
+ addr += 4;
+ }
+ }
+
+ dma->lut_config = *config;
+
+ if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+ mdp3_ccs_update(dma, false);
+
+ return 0;
+}
+
+static int mdp3_dmap_histo_config(struct mdp3_dma *dma,
+ struct mdp3_dma_histogram_config *histo_config)
+{
+ unsigned long flag;
+ u32 histo_bit_mask = 0, histo_control = 0;
+ u32 histo_isr_mask = MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT |
+ MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT;
+
+ spin_lock_irqsave(&dma->histo_lock, flag);
+
+ if (histo_config->bit_mask_polarity)
+ histo_bit_mask = BIT(31);
+ histo_bit_mask |= histo_config->bit_mask;
+
+ if (histo_config->auto_clear_en)
+ histo_control = BIT(0);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_FRAME_CNT,
+ histo_config->frame_count);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_BIT_MASK, histo_bit_mask);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_CONTROL, histo_control);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, histo_isr_mask);
+
+ spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+ dma->histogram_config = *histo_config;
+ return 0;
+}
+
+int dma_bpp(int format)
+{
+ int bpp;
+
+ switch (format) {
+ case MDP3_DMA_IBUF_FORMAT_RGB888:
+ bpp = 3;
+ break;
+ case MDP3_DMA_IBUF_FORMAT_RGB565:
+ bpp = 2;
+ break;
+ case MDP3_DMA_IBUF_FORMAT_XRGB8888:
+ bpp = 4;
+ break;
+ default:
+ bpp = 0;
+ }
+ return bpp;
+}
+
+static int mdp3_dmap_update(struct mdp3_dma *dma, void *buf,
+ struct mdp3_intf *intf, void *data)
+{
+ unsigned long flag;
+ int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+ struct mdss_panel_data *panel;
+ int rc = 0;
+ int retry_count = 2;
+
+ ATRACE_BEGIN(__func__);
+ pr_debug("mdp3_dmap_update\n");
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+ if (intf->active) {
+ ATRACE_BEGIN("mdp3_wait_for_dma_comp");
+retry_dma_done:
+ rc = wait_for_completion_timeout(&dma->dma_comp,
+ KOFF_TIMEOUT);
+ if (rc <= 0 && --retry_count) {
+ int vsync_status;
+
+ vsync_status = (1 << MDP3_INTR_DMA_P_DONE) &
+ MDP3_REG_READ(MDP3_REG_INTR_STATUS);
+ if (!vsync_status) {
+ pr_err("%s: cmd timeout retry cnt %d\n",
+ __func__, retry_count);
+ goto retry_dma_done;
+ }
+ rc = -1;
+ }
+ ATRACE_END("mdp3_wait_for_dma_comp");
+ }
+ }
+ if (dma->update_src_cfg) {
+ if (dma->output_config.out_sel ==
+ MDP3_DMA_OUTPUT_SEL_DSI_VIDEO && intf->active)
+ pr_err("configuring dma source while it is active\n");
+ dma->dma_config_source(dma);
+ if (data) {
+ panel = (struct mdss_panel_data *)data;
+ if (panel->event_handler) {
+ panel->event_handler(panel,
+ MDSS_EVENT_ENABLE_PARTIAL_ROI, NULL);
+ panel->event_handler(panel,
+ MDSS_EVENT_DSI_STREAM_SIZE, NULL);
+ }
+ }
+ dma->update_src_cfg = false;
+ }
+ mutex_lock(&dma->pp_lock);
+ if (dma->ccs_config.ccs_dirty)
+ mdp3_ccs_update(dma, true);
+ mutex_unlock(&dma->pp_lock);
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)(buf +
+ dma->roi.y * dma->source_config.stride +
+ dma->roi.x * dma_bpp(dma->source_config.format)));
+ dma->source_config.buf = (int)buf;
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_START, 1);
+
+ if (!intf->active) {
+ pr_debug("%s start interface\n", __func__);
+ intf->start(intf);
+ }
+
+ mb(); /* make sure everything is written before enable */
+ dma->vsync_status = MDP3_REG_READ(MDP3_REG_INTR_STATUS) &
+ (1 << MDP3_INTR_LCDC_START_OF_FRAME);
+ init_completion(&dma->vsync_comp);
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+ mdp3_dma_callback_enable(dma, cb_type);
+ pr_debug("%s wait for vsync_comp\n", __func__);
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
+ ATRACE_BEGIN("mdp3_wait_for_vsync_comp");
+retry_vsync:
+ rc = wait_for_completion_timeout(&dma->vsync_comp,
+ KOFF_TIMEOUT);
+ if (rc <= 0 && --retry_count) {
+ int vsync = MDP3_REG_READ(MDP3_REG_INTR_STATUS) &
+ (1 << MDP3_INTR_LCDC_START_OF_FRAME);
+
+ if (!vsync) {
+ pr_err("%s trying again count = %d\n",
+ __func__, retry_count);
+ goto retry_vsync;
+ }
+ rc = -1;
+ }
+ ATRACE_END("mdp3_wait_for_vsync_comp");
+ }
+ pr_debug("$%s wait for vsync_comp out\n", __func__);
+ ATRACE_END(__func__);
+ return rc;
+}
+
+static int mdp3_dmas_update(struct mdp3_dma *dma, void *buf,
+ struct mdp3_intf *intf, void *data)
+{
+ unsigned long flag;
+ int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+ if (intf->active)
+ wait_for_completion_killable(&dma->dma_comp);
+ }
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR, (u32)buf);
+ dma->source_config.buf = (int)buf;
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_START, 1);
+
+ if (!intf->active) {
+ pr_debug("mdp3_dmap_update start interface\n");
+ intf->start(intf);
+ }
+
+ wmb(); /* ensure write is finished before progressing */
+ init_completion(&dma->vsync_comp);
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+ mdp3_dma_callback_enable(dma, cb_type);
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+ wait_for_completion_killable(&dma->vsync_comp);
+ return 0;
+}
+
+static int mdp3_dmap_cursor_update(struct mdp3_dma *dma, int x, int y)
+{
+ u32 cursor_pos;
+
+ cursor_pos = x | (y << 16);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_POS, cursor_pos);
+ dma->cursor.x = x;
+ dma->cursor.y = y;
+ return 0;
+}
+
+static int mdp3_dmap_histo_get(struct mdp3_dma *dma)
+{
+ int i, state, timeout, ret;
+ u32 addr;
+ unsigned long flag;
+
+ spin_lock_irqsave(&dma->histo_lock, flag);
+ state = dma->histo_state;
+ spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+ if (state != MDP3_DMA_HISTO_STATE_START &&
+ state != MDP3_DMA_HISTO_STATE_READY) {
+ pr_err("mdp3_dmap_histo_get invalid state %d\n", state);
+ return -EINVAL;
+ }
+
+ timeout = HIST_WAIT_TIMEOUT(dma->histogram_config.frame_count);
+ ret = wait_for_completion_killable_timeout(&dma->histo_comp, timeout);
+
+ if (ret == 0) {
+ pr_debug("mdp3_dmap_histo_get time out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret < 0) {
+ pr_err("mdp3_dmap_histo_get interrupted\n");
+ }
+
+ if (ret < 0)
+ return ret;
+
+ if (dma->histo_state != MDP3_DMA_HISTO_STATE_READY) {
+ pr_debug("mdp3_dmap_histo_get after dma shut down\n");
+ return -EPERM;
+ }
+
+ addr = MDP3_REG_DMA_P_HIST_R_DATA;
+ for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
+ dma->histo_data.r_data[i] = MDP3_REG_READ(addr);
+ addr += 4;
+ }
+
+ addr = MDP3_REG_DMA_P_HIST_G_DATA;
+ for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
+ dma->histo_data.g_data[i] = MDP3_REG_READ(addr);
+ addr += 4;
+ }
+
+ addr = MDP3_REG_DMA_P_HIST_B_DATA;
+ for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
+ dma->histo_data.b_data[i] = MDP3_REG_READ(addr);
+ addr += 4;
+ }
+
+ dma->histo_data.extra[0] =
+ MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_0);
+ dma->histo_data.extra[1] =
+ MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_1);
+
+ spin_lock_irqsave(&dma->histo_lock, flag);
+ init_completion(&dma->histo_comp);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
+ wmb(); /* ensure write is finished before progressing */
+ dma->histo_state = MDP3_DMA_HISTO_STATE_START;
+ spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+ return 0;
+}
+
+static int mdp3_dmap_histo_start(struct mdp3_dma *dma)
+{
+ unsigned long flag;
+
+ if (dma->histo_state != MDP3_DMA_HISTO_STATE_IDLE)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dma->histo_lock, flag);
+
+ init_completion(&dma->histo_comp);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
+ wmb(); /* ensure write is finished before progressing */
+ dma->histo_state = MDP3_DMA_HISTO_STATE_START;
+
+ spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+ mdp3_dma_callback_enable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_DONE);
+ return 0;
+
+}
+
+static int mdp3_dmap_histo_reset(struct mdp3_dma *dma)
+{
+ unsigned long flag;
+ int ret;
+
+ spin_lock_irqsave(&dma->histo_lock, flag);
+
+ init_completion(&dma->histo_comp);
+
+
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, BIT(0)|BIT(1));
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_RESET_SEQ_START, 1);
+ wmb(); /* ensure write is finished before progressing */
+ dma->histo_state = MDP3_DMA_HISTO_STATE_RESET;
+
+ spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+ mdp3_dma_callback_enable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE);
+ ret = wait_for_completion_killable_timeout(&dma->histo_comp,
+ msecs_to_jiffies(DMA_HISTO_RESET_TIMEOUT_MS));
+
+ if (ret == 0) {
+ pr_err("mdp3_dmap_histo_reset time out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret < 0) {
+ pr_err("mdp3_dmap_histo_reset interrupted\n");
+ } else {
+ ret = 0;
+ }
+ mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE);
+
+ return ret;
+}
+
+static int mdp3_dmap_histo_stop(struct mdp3_dma *dma)
+{
+ unsigned long flag;
+ int cb_type = MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE |
+ MDP3_DMA_CALLBACK_TYPE_HIST_DONE;
+
+ spin_lock_irqsave(&dma->histo_lock, flag);
+
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_CANCEL_REQ, 1);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, 0);
+ wmb(); /* ensure write is finished before progressing */
+ dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE;
+ complete(&dma->histo_comp);
+
+ spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+ mdp3_dma_callback_disable(dma, cb_type);
+ return 0;
+}
+
+static int mdp3_dmap_histo_op(struct mdp3_dma *dma, u32 op)
+{
+ int ret;
+
+ switch (op) {
+ case MDP3_DMA_HISTO_OP_START:
+ ret = mdp3_dmap_histo_start(dma);
+ break;
+ case MDP3_DMA_HISTO_OP_STOP:
+ case MDP3_DMA_HISTO_OP_CANCEL:
+ ret = mdp3_dmap_histo_stop(dma);
+ break;
+ case MDP3_DMA_HISTO_OP_RESET:
+ ret = mdp3_dmap_histo_reset(dma);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+bool mdp3_dmap_busy(void)
+{
+ u32 val;
+
+ val = MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS);
+ pr_err("%s DMAP Status %s\n", __func__,
+ (val & MDP3_DMA_P_BUSY_BIT) ? "BUSY":"IDLE");
+ return val & MDP3_DMA_P_BUSY_BIT;
+}
+
+/*
+ * During underrun DMA_P registers are reset. Reprogramming CSC to prevent
+ * black screen
+ */
+static void mdp3_dmap_underrun_worker(struct work_struct *work)
+{
+ struct mdp3_dma *dma;
+
+ dma = container_of(work, struct mdp3_dma, underrun_work);
+ mutex_lock(&dma->pp_lock);
+ if (dma->ccs_config.ccs_enable && dma->ccs_config.ccs_dirty) {
+ dma->cc_vect_sel = (dma->cc_vect_sel + 1) % 2;
+ dma->ccs_config.ccs_sel = dma->cc_vect_sel;
+ dma->ccs_config.pre_limit_sel = dma->cc_vect_sel;
+ dma->ccs_config.post_limit_sel = dma->cc_vect_sel;
+ dma->ccs_config.pre_bias_sel = dma->cc_vect_sel;
+ dma->ccs_config.post_bias_sel = dma->cc_vect_sel;
+ mdp3_ccs_update(dma, true);
+ }
+ mutex_unlock(&dma->pp_lock);
+}
+
+static int mdp3_dma_start(struct mdp3_dma *dma, struct mdp3_intf *intf)
+{
+ unsigned long flag;
+ int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+ u32 dma_start_offset = MDP3_REG_DMA_P_START;
+
+ if (dma->dma_sel == MDP3_DMA_P)
+ dma_start_offset = MDP3_REG_DMA_P_START;
+ else if (dma->dma_sel == MDP3_DMA_S)
+ dma_start_offset = MDP3_REG_DMA_S_START;
+ else
+ return -EINVAL;
+
+ spin_lock_irqsave(&dma->dma_lock, flag);
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+ MDP3_REG_WRITE(dma_start_offset, 1);
+ }
+
+ intf->start(intf);
+ wmb(); /* ensure write is finished before progressing */
+ init_completion(&dma->vsync_comp);
+ spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+ if (dma->dma_sel == MDP3_DMA_P && dma->has_panic_ctrl)
+ MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, BIT(0));
+
+ mdp3_dma_callback_enable(dma, cb_type);
+ pr_debug("mdp3_dma_start wait for vsync_comp in\n");
+ wait_for_completion_killable(&dma->vsync_comp);
+ pr_debug("mdp3_dma_start wait for vsync_comp out\n");
+ return 0;
+}
+
+static int mdp3_dma_stop(struct mdp3_dma *dma, struct mdp3_intf *intf)
+{
+ int ret = 0;
+ u32 status, display_status_bit;
+
+ if (dma->dma_sel == MDP3_DMA_P)
+ display_status_bit = BIT(6);
+ else if (dma->dma_sel == MDP3_DMA_S)
+ display_status_bit = BIT(7);
+ else
+ return -EINVAL;
+
+ if (dma->dma_sel == MDP3_DMA_P && dma->has_panic_ctrl)
+ MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, 0);
+
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+ display_status_bit |= BIT(11);
+
+ intf->stop(intf);
+ ret = readl_poll_timeout((mdp3_res->mdp_base + MDP3_REG_DISPLAY_STATUS),
+ status,
+ ((status & display_status_bit) == 0),
+ DMA_STOP_POLL_SLEEP_US,
+ DMA_STOP_POLL_TIMEOUT_US);
+
+ mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_VSYNC |
+ MDP3_DMA_CALLBACK_TYPE_DMA_DONE);
+ mdp3_irq_disable(MDP3_INTR_LCDC_UNDERFLOW);
+
+ MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
+ MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, 0xfffffff);
+
+ init_completion(&dma->dma_comp);
+ dma->vsync_client.handler = NULL;
+ return ret;
+}
+
+int mdp3_dma_init(struct mdp3_dma *dma)
+{
+ int ret = 0;
+
+ pr_debug("mdp3_dma_init\n");
+ switch (dma->dma_sel) {
+ case MDP3_DMA_P:
+ dma->dma_config = mdp3_dmap_config;
+ dma->dma_sync_config = mdp3_dma_sync_config;
+ dma->dma_config_source = mdp3_dmap_config_source;
+ dma->config_cursor = mdp3_dmap_cursor_config;
+ dma->config_ccs = mdp3_dmap_ccs_config;
+ dma->config_histo = mdp3_dmap_histo_config;
+ dma->config_lut = mdp3_dmap_lut_config;
+ dma->update = mdp3_dmap_update;
+ dma->update_cursor = mdp3_dmap_cursor_update;
+ dma->get_histo = mdp3_dmap_histo_get;
+ dma->histo_op = mdp3_dmap_histo_op;
+ dma->vsync_enable = mdp3_dma_vsync_enable;
+ dma->dma_done_notifier = mdp3_dma_done_notifier;
+ dma->start = mdp3_dma_start;
+ dma->stop = mdp3_dma_stop;
+ dma->busy = mdp3_dmap_busy;
+ INIT_WORK(&dma->underrun_work, mdp3_dmap_underrun_worker);
+ break;
+ case MDP3_DMA_S:
+ dma->dma_config = mdp3_dmas_config;
+ dma->dma_sync_config = mdp3_dma_sync_config;
+ dma->dma_config_source = mdp3_dmas_config_source;
+ dma->config_cursor = NULL;
+ dma->config_ccs = NULL;
+ dma->config_histo = NULL;
+ dma->config_lut = NULL;
+ dma->update = mdp3_dmas_update;
+ dma->update_cursor = NULL;
+ dma->get_histo = NULL;
+ dma->histo_op = NULL;
+ dma->vsync_enable = mdp3_dma_vsync_enable;
+ dma->start = mdp3_dma_start;
+ dma->stop = mdp3_dma_stop;
+ break;
+ case MDP3_DMA_E:
+ default:
+ ret = -ENODEV;
+ break;
+ }
+
+ spin_lock_init(&dma->dma_lock);
+ spin_lock_init(&dma->histo_lock);
+ init_completion(&dma->vsync_comp);
+ init_completion(&dma->dma_comp);
+ init_completion(&dma->histo_comp);
+ dma->vsync_client.handler = NULL;
+ dma->vsync_client.arg = NULL;
+ dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE;
+ dma->update_src_cfg = false;
+
+ memset(&dma->cursor, 0, sizeof(dma->cursor));
+ memset(&dma->ccs_config, 0, sizeof(dma->ccs_config));
+ memset(&dma->histogram_config, 0, sizeof(dma->histogram_config));
+
+ return ret;
+}
+
+int lcdc_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+ u32 temp;
+ struct mdp3_video_intf_cfg *v = &cfg->video;
+
+ temp = v->hsync_pulse_width | (v->hsync_period << 16);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_HSYNC_CTL, temp);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_VSYNC_PERIOD, v->vsync_period);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_VSYNC_PULSE_WIDTH, v->vsync_pulse_width);
+ temp = v->display_start_x | (v->display_end_x << 16);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_HCTL, temp);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_V_START, v->display_start_y);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_V_END, v->display_end_y);
+ temp = v->active_start_x | (v->active_end_x);
+ if (v->active_h_enable)
+ temp |= BIT(31);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_HCTL, temp);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_V_START, v->active_start_y);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_V_END, v->active_end_y);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_HSYNC_SKEW, v->hsync_skew);
+ temp = 0;
+ if (!v->hsync_polarity)
+ temp = BIT(0);
+ if (!v->vsync_polarity)
+ temp = BIT(1);
+ if (!v->de_polarity)
+ temp = BIT(2);
+ MDP3_REG_WRITE(MDP3_REG_LCDC_CTL_POLARITY, temp);
+
+ return 0;
+}
+
+int lcdc_start(struct mdp3_intf *intf)
+{
+ MDP3_REG_WRITE(MDP3_REG_LCDC_EN, BIT(0));
+ wmb(); /* ensure write is finished before progressing */
+ intf->active = true;
+ return 0;
+}
+
+int lcdc_stop(struct mdp3_intf *intf)
+{
+ MDP3_REG_WRITE(MDP3_REG_LCDC_EN, 0);
+ wmb(); /* ensure write is finished before progressing */
+ intf->active = false;
+ return 0;
+}
+
+int dsi_video_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+ u32 temp;
+ struct mdp3_video_intf_cfg *v = &cfg->video;
+
+ pr_debug("dsi_video_config\n");
+
+ temp = v->hsync_pulse_width | (v->hsync_period << 16);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_HSYNC_CTL, temp);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_VSYNC_PERIOD, v->vsync_period);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_VSYNC_PULSE_WIDTH,
+ v->vsync_pulse_width);
+ temp = v->display_start_x | (v->display_end_x << 16);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_HCTL, temp);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_V_START, v->display_start_y);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_V_END, v->display_end_y);
+ temp = v->active_start_x | (v->active_end_x << 16);
+ if (v->active_h_enable)
+ temp |= BIT(31);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_HCTL, temp);
+
+ temp = v->active_start_y;
+ if (v->active_v_enable)
+ temp |= BIT(31);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_V_START, temp);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_V_END, v->active_end_y);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_HSYNC_SKEW, v->hsync_skew);
+ temp = 0;
+ if (!v->hsync_polarity)
+ temp |= BIT(0);
+ if (!v->vsync_polarity)
+ temp |= BIT(1);
+ if (!v->de_polarity)
+ temp |= BIT(2);
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_CTL_POLARITY, temp);
+
+ v->underflow_color |= 0x80000000;
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_UNDERFLOW_CTL, v->underflow_color);
+
+ return 0;
+}
+
+int dsi_video_start(struct mdp3_intf *intf)
+{
+ pr_debug("dsi_video_start\n");
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, BIT(0));
+ wmb(); /* ensure write is finished before progressing */
+ intf->active = true;
+ return 0;
+}
+
+int dsi_video_stop(struct mdp3_intf *intf)
+{
+ pr_debug("dsi_video_stop\n");
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 0);
+ wmb(); /* ensure write is finished before progressing */
+ intf->active = false;
+ return 0;
+}
+
+int dsi_cmd_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+ u32 id_map = 0;
+ u32 trigger_en = 0;
+
+ if (cfg->dsi_cmd.primary_dsi_cmd_id)
+ id_map = BIT(0);
+ if (cfg->dsi_cmd.secondary_dsi_cmd_id)
+ id_map = BIT(4);
+
+ if (cfg->dsi_cmd.dsi_cmd_tg_intf_sel)
+ trigger_en = BIT(4);
+
+ MDP3_REG_WRITE(MDP3_REG_DSI_CMD_MODE_ID_MAP, id_map);
+ MDP3_REG_WRITE(MDP3_REG_DSI_CMD_MODE_TRIGGER_EN, trigger_en);
+
+ return 0;
+}
+
+int dsi_cmd_start(struct mdp3_intf *intf)
+{
+ intf->active = true;
+ return 0;
+}
+
+int dsi_cmd_stop(struct mdp3_intf *intf)
+{
+ intf->active = false;
+ return 0;
+}
+
+int mdp3_intf_init(struct mdp3_intf *intf)
+{
+ switch (intf->cfg.type) {
+ case MDP3_DMA_OUTPUT_SEL_LCDC:
+ intf->config = lcdc_config;
+ intf->start = lcdc_start;
+ intf->stop = lcdc_stop;
+ break;
+ case MDP3_DMA_OUTPUT_SEL_DSI_VIDEO:
+ intf->config = dsi_video_config;
+ intf->start = dsi_video_start;
+ intf->stop = dsi_video_stop;
+ break;
+ case MDP3_DMA_OUTPUT_SEL_DSI_CMD:
+ intf->config = dsi_cmd_config;
+ intf->start = dsi_cmd_start;
+ intf->stop = dsi_cmd_stop;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdp3_dma.h b/drivers/video/fbdev/msm/mdp3_dma.h
new file mode 100644
index 0000000..6c8e7fe
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_dma.h
@@ -0,0 +1,396 @@
+/* Copyright (c) 2013-2014, 2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_DMA_H
+#define MDP3_DMA_H
+
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/msm_mdp.h>
+
+#define MDP_HISTOGRAM_BL_SCALE_MAX 1024
+#define MDP_HISTOGRAM_BL_LEVEL_MAX 255
+#define MDP_HISTOGRAM_FRAME_COUNT_MAX 0x20
+#define MDP_HISTOGRAM_BIT_MASK_MAX 0x4
+#define MDP_HISTOGRAM_CSC_MATRIX_MAX 0x2000
+#define MDP_HISTOGRAM_CSC_VECTOR_MAX 0x200
+#define MDP_HISTOGRAM_BIN_NUM 32
+#define MDP_LUT_SIZE 256
+
+enum {
+ MDP3_DMA_P,
+ MDP3_DMA_S,
+ MDP3_DMA_E,
+ MDP3_DMA_MAX
+};
+
+enum {
+ MDP3_DMA_CAP_CURSOR = 0x1,
+ MDP3_DMA_CAP_COLOR_CORRECTION = 0x2,
+ MDP3_DMA_CAP_HISTOGRAM = 0x4,
+ MDP3_DMA_CAP_GAMMA_CORRECTION = 0x8,
+ MDP3_DMA_CAP_DITHER = 0x10,
+ MDP3_DMA_CAP_ALL = 0x1F
+};
+
+enum {
+ MDP3_DMA_OUTPUT_SEL_AHB,
+ MDP3_DMA_OUTPUT_SEL_DSI_CMD,
+ MDP3_DMA_OUTPUT_SEL_LCDC,
+ MDP3_DMA_OUTPUT_SEL_DSI_VIDEO,
+ MDP3_DMA_OUTPUT_SEL_MAX
+};
+
+enum {
+ MDP3_DMA_IBUF_FORMAT_RGB888,
+ MDP3_DMA_IBUF_FORMAT_RGB565,
+ MDP3_DMA_IBUF_FORMAT_XRGB8888,
+ MDP3_DMA_IBUF_FORMAT_UNDEFINED
+};
+
+enum {
+ MDP3_DMA_OUTPUT_PACK_PATTERN_RGB = 0x21,
+ MDP3_DMA_OUTPUT_PACK_PATTERN_RBG = 0x24,
+ MDP3_DMA_OUTPUT_PACK_PATTERN_BGR = 0x12,
+ MDP3_DMA_OUTPUT_PACK_PATTERN_BRG = 0x18,
+ MDP3_DMA_OUTPUT_PACK_PATTERN_GBR = 0x06,
+ MDP3_DMA_OUTPUT_PACK_PATTERN_GRB = 0x09,
+};
+
+enum {
+ MDP3_DMA_OUTPUT_PACK_ALIGN_LSB,
+ MDP3_DMA_OUTPUT_PACK_ALIGN_MSB
+};
+
+enum {
+ MDP3_DMA_OUTPUT_COMP_BITS_4, /*4 bits per color component*/
+ MDP3_DMA_OUTPUT_COMP_BITS_5,
+ MDP3_DMA_OUTPUT_COMP_BITS_6,
+ MDP3_DMA_OUTPUT_COMP_BITS_8,
+};
+
+enum {
+ MDP3_DMA_CURSOR_FORMAT_ARGB888,
+};
+
+enum {
+ MDP3_DMA_COLOR_CORRECT_SET_1,
+ MDP3_DMA_COLOR_CORRECT_SET_2
+};
+
+enum {
+ MDP3_DMA_LUT_POSITION_PRE,
+ MDP3_DMA_LUT_POSITION_POST
+};
+
+enum {
+ MDP3_DMA_LUT_DISABLE = 0x0,
+ MDP3_DMA_LUT_ENABLE_C0 = 0x01,
+ MDP3_DMA_LUT_ENABLE_C1 = 0x02,
+ MDP3_DMA_LUT_ENABLE_C2 = 0x04,
+ MDP3_DMA_LUT_ENABLE_ALL = 0x07,
+};
+
+enum {
+ MDP3_DMA_HISTOGRAM_BIT_MASK_NONE = 0X0,
+ MDP3_DMA_HISTOGRAM_BIT_MASK_ONE_MSB = 0x1,
+ MDP3_DMA_HISTOGRAM_BIT_MASK_TWO_MSB = 0x2,
+ MDP3_DMA_HISTOGRAM_BIT_MASK_THREE_MSB = 0x3
+};
+
+enum {
+ MDP3_DMA_COLOR_FLIP_NONE,
+ MDP3_DMA_COLOR_FLIP_COMP1 = 0x1,
+ MDP3_DMA_COLOR_FLIP_COMP2 = 0x2,
+ MDP3_DMA_COLOR_FLIP_COMP3 = 0x4,
+};
+
+enum {
+ MDP3_DMA_CURSOR_BLEND_NONE = 0x0,
+ MDP3_DMA_CURSOR_BLEND_PER_PIXEL_ALPHA = 0x3,
+ MDP3_DMA_CURSOR_BLEND_CONSTANT_ALPHA = 0x5,
+ MDP3_DMA_CURSOR_BLEND_COLOR_KEYING = 0x9
+};
+
+enum {
+ MDP3_DMA_HISTO_OP_START,
+ MDP3_DMA_HISTO_OP_STOP,
+ MDP3_DMA_HISTO_OP_CANCEL,
+ MDP3_DMA_HISTO_OP_RESET
+};
+
+enum {
+ MDP3_DMA_HISTO_STATE_UNKNOWN,
+ MDP3_DMA_HISTO_STATE_IDLE,
+ MDP3_DMA_HISTO_STATE_RESET,
+ MDP3_DMA_HISTO_STATE_START,
+ MDP3_DMA_HISTO_STATE_READY,
+};
+
+enum {
+ MDP3_DMA_CALLBACK_TYPE_VSYNC = 0x01,
+ MDP3_DMA_CALLBACK_TYPE_DMA_DONE = 0x02,
+ MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE = 0x04,
+ MDP3_DMA_CALLBACK_TYPE_HIST_DONE = 0x08,
+};
+
+struct mdp3_dma_source {
+ u32 format;
+ int width;
+ int height;
+ int x;
+ int y;
+ dma_addr_t buf;
+ int stride;
+ int vsync_count;
+ int vporch;
+};
+
+struct mdp3_dma_output_config {
+ int dither_en;
+ u32 out_sel;
+ u32 bit_mask_polarity;
+ u32 color_components_flip;
+ u32 pack_pattern;
+ u32 pack_align;
+ u32 color_comp_out_bits;
+};
+
+struct mdp3_dma_cursor_blend_config {
+ u32 mode;
+ u32 transparent_color; /*color keying*/
+ u32 transparency_mask;
+ u32 constant_alpha;
+};
+
+struct mdp3_dma_cursor {
+ int enable; /* enable cursor or not*/
+ u32 format;
+ int width;
+ int height;
+ int x;
+ int y;
+ void *buf;
+ struct mdp3_dma_cursor_blend_config blend_config;
+};
+
+struct mdp3_dma_ccs {
+ u32 *mv; /*set1 matrix vector, 3x3 */
+ u32 *pre_bv; /*pre-bias vector for set1, 1x3*/
+ u32 *post_bv; /*post-bias vecotr for set1, */
+ u32 *pre_lv; /*pre-limit vector for set 1, 1x6*/
+ u32 *post_lv;
+};
+
+struct mdp3_dma_lut_config {
+ int lut_enable;
+ u32 lut_sel;
+ u32 lut_position;
+ bool lut_dirty;
+};
+
+struct mdp3_dma_color_correct_config {
+ int ccs_enable;
+ u32 post_limit_sel;
+ u32 pre_limit_sel;
+ u32 post_bias_sel;
+ u32 pre_bias_sel;
+ u32 ccs_sel;
+ bool ccs_dirty;
+};
+
+struct mdp3_dma_histogram_config {
+ int frame_count;
+ u32 bit_mask_polarity;
+ u32 bit_mask;
+ int auto_clear_en;
+};
+
+struct mdp3_dma_histogram_data {
+ u32 r_data[MDP_HISTOGRAM_BIN_NUM];
+ u32 g_data[MDP_HISTOGRAM_BIN_NUM];
+ u32 b_data[MDP_HISTOGRAM_BIN_NUM];
+ u32 extra[2];
+};
+
+struct mdp3_notification {
+ void (*handler)(void *arg);
+ void *arg;
+};
+
+struct mdp3_tear_check {
+ int frame_rate;
+ bool hw_vsync_mode;
+ u32 tear_check_en;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u32 refx100;
+};
+
+struct mdp3_rect {
+ u32 x;
+ u32 y;
+ u32 w;
+ u32 h;
+};
+
+struct mdp3_intf;
+
+struct mdp3_dma {
+ u32 dma_sel;
+ u32 capability;
+ int in_use;
+ int available;
+
+ spinlock_t dma_lock;
+ spinlock_t histo_lock;
+ struct completion vsync_comp;
+ struct completion dma_comp;
+ struct completion histo_comp;
+ struct kernfs_node *hist_event_sd;
+ struct mdp3_notification vsync_client;
+ struct mdp3_notification dma_notifier_client;
+ struct mdp3_notification retire_client;
+
+ struct mdp3_dma_output_config output_config;
+ struct mdp3_dma_source source_config;
+
+ struct mdp3_dma_cursor cursor;
+ struct mdp3_dma_color_correct_config ccs_config;
+ struct mdp_csc_cfg_data ccs_cache;
+ int cc_vect_sel;
+
+ struct work_struct underrun_work;
+ struct mutex pp_lock;
+
+ struct mdp3_dma_lut_config lut_config;
+ struct mdp3_dma_histogram_config histogram_config;
+ int histo_state;
+ struct mdp3_dma_histogram_data histo_data;
+ unsigned int vsync_status;
+ bool update_src_cfg;
+ bool has_panic_ctrl;
+ struct mdp3_rect roi;
+
+ u32 lut_sts;
+ u32 hist_events;
+ struct fb_cmap *gc_cmap;
+ struct fb_cmap *hist_cmap;
+
+ bool (*busy)(void);
+
+ int (*dma_config)(struct mdp3_dma *dma,
+ struct mdp3_dma_source *source_config,
+ struct mdp3_dma_output_config *output_config,
+ bool splash_screen_active);
+
+ int (*dma_sync_config)(struct mdp3_dma *dma, struct mdp3_dma_source
+ *source_config, struct mdp3_tear_check *te);
+
+ void (*dma_config_source)(struct mdp3_dma *dma);
+
+ int (*start)(struct mdp3_dma *dma, struct mdp3_intf *intf);
+
+ int (*stop)(struct mdp3_dma *dma, struct mdp3_intf *intf);
+
+ int (*config_cursor)(struct mdp3_dma *dma,
+ struct mdp3_dma_cursor *cursor);
+
+ int (*config_ccs)(struct mdp3_dma *dma,
+ struct mdp3_dma_color_correct_config *config,
+ struct mdp3_dma_ccs *ccs);
+
+ int (*config_lut)(struct mdp3_dma *dma,
+ struct mdp3_dma_lut_config *config,
+ struct fb_cmap *cmap);
+
+ int (*update)(struct mdp3_dma *dma,
+ void *buf, struct mdp3_intf *intf, void *data);
+
+ int (*update_cursor)(struct mdp3_dma *dma, int x, int y);
+
+ int (*get_histo)(struct mdp3_dma *dma);
+
+ int (*config_histo)(struct mdp3_dma *dma,
+ struct mdp3_dma_histogram_config *histo_config);
+
+ int (*histo_op)(struct mdp3_dma *dma, u32 op);
+
+ void (*vsync_enable)(struct mdp3_dma *dma,
+ struct mdp3_notification *vsync_client);
+
+ void (*retire_enable)(struct mdp3_dma *dma,
+ struct mdp3_notification *retire_client);
+
+ void (*dma_done_notifier)(struct mdp3_dma *dma,
+ struct mdp3_notification *dma_client);
+};
+
+struct mdp3_video_intf_cfg {
+ int hsync_period;
+ int hsync_pulse_width;
+ int vsync_period;
+ int vsync_pulse_width;
+ int display_start_x;
+ int display_end_x;
+ int display_start_y;
+ int display_end_y;
+ int active_start_x;
+ int active_end_x;
+ int active_h_enable;
+ int active_start_y;
+ int active_end_y;
+ int active_v_enable;
+ int hsync_skew;
+ int hsync_polarity;
+ int vsync_polarity;
+ int de_polarity;
+ int underflow_color;
+};
+
+struct mdp3_dsi_cmd_intf_cfg {
+ int primary_dsi_cmd_id;
+ int secondary_dsi_cmd_id;
+ int dsi_cmd_tg_intf_sel;
+};
+
+struct mdp3_intf_cfg {
+ u32 type;
+ struct mdp3_video_intf_cfg video;
+ struct mdp3_dsi_cmd_intf_cfg dsi_cmd;
+};
+
+struct mdp3_intf {
+ struct mdp3_intf_cfg cfg;
+ int active;
+ int available;
+ int in_use;
+ int (*config)(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg);
+ int (*start)(struct mdp3_intf *intf);
+ int (*stop)(struct mdp3_intf *intf);
+};
+
+int mdp3_dma_init(struct mdp3_dma *dma);
+
+int mdp3_intf_init(struct mdp3_intf *intf);
+
+void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type);
+
+void mdp3_dma_callback_disable(struct mdp3_dma *dma, int type);
+
+void mdp3_hist_intr_notify(struct mdp3_dma *dma);
+#endif /* MDP3_DMA_H */
diff --git a/drivers/video/fbdev/msm/mdp3_hwio.h b/drivers/video/fbdev/msm/mdp3_hwio.h
new file mode 100644
index 0000000..2e3d358
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_hwio.h
@@ -0,0 +1,361 @@
+/* Copyright (c) 2013-2014, 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_HWIO_H
+#define MDP3_HWIO_H
+
+#include <linux/bitops.h>
+
+/*synchronization*/
+#define MDP3_REG_SYNC_CONFIG_0 0x0300
+#define MDP3_REG_SYNC_CONFIG_1 0x0304
+#define MDP3_REG_SYNC_CONFIG_2 0x0308
+#define MDP3_REG_SYNC_STATUS_0 0x030c
+#define MDP3_REG_SYNC_STATUS_1 0x0310
+#define MDP3_REG_SYNC_STATUS_2 0x0314
+#define MDP3_REG_PRIMARY_VSYNC_OUT_CTRL 0x0318
+#define MDP3_REG_SECONDARY_VSYNC_OUT_CTRL 0x031c
+#define MDP3_REG_EXTERNAL_VSYNC_OUT_CTRL 0x0320
+#define MDP3_REG_VSYNC_SEL 0x0324
+#define MDP3_REG_PRIMARY_VSYNC_INIT_VAL 0x0328
+#define MDP3_REG_SECONDARY_VSYNC_INIT_VAL 0x032c
+#define MDP3_REG_EXTERNAL_VSYNC_INIT_VAL 0x0330
+#define MDP3_REG_AUTOREFRESH_CONFIG_P 0x034C
+#define MDP3_REG_SYNC_THRESH_0 0x0200
+#define MDP3_REG_SYNC_THRESH_1 0x0204
+#define MDP3_REG_SYNC_THRESH_2 0x0208
+#define MDP3_REG_TEAR_CHECK_EN 0x020C
+#define MDP3_REG_PRIMARY_START_P0S 0x0210
+#define MDP3_REG_SECONDARY_START_POS 0x0214
+#define MDP3_REG_EXTERNAL_START_POS 0x0218
+
+/*interrupt*/
+#define MDP3_REG_INTR_ENABLE 0x0020
+#define MDP3_REG_INTR_STATUS 0x0024
+#define MDP3_REG_INTR_CLEAR 0x0028
+
+#define MDP3_REG_PRIMARY_RD_PTR_IRQ 0x021C
+#define MDP3_REG_SECONDARY_RD_PTR_IRQ 0x0220
+
+/*operation control*/
+#define MDP3_REG_DMA_P_START 0x0044
+#define MDP3_REG_DMA_S_START 0x0048
+#define MDP3_REG_DMA_E_START 0x004c
+
+#define MDP3_REG_DISPLAY_STATUS 0x0038
+
+#define MDP3_REG_HW_VERSION 0x0070
+#define MDP3_REG_SW_RESET 0x0074
+#define MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS 0x007C
+
+/*EBI*/
+#define MDP3_REG_EBI2_LCD0 0x003c
+#define MDP3_REG_EBI2_LCD0_YSTRIDE 0x0050
+
+/*clock control*/
+#define MDP3_REG_CGC_EN 0x0100
+#define MDP3_VBIF_REG_FORCE_EN 0x0004
+
+/* QOS Remapper */
+#define MDP3_DMA_P_QOS_REMAPPER 0x90090
+#define MDP3_DMA_P_WATERMARK_0 0x90094
+#define MDP3_DMA_P_WATERMARK_1 0x90098
+#define MDP3_DMA_P_WATERMARK_2 0x9009C
+#define MDP3_PANIC_ROBUST_CTRL 0x900A0
+#define MDP3_PANIC_LUT0 0x900A4
+#define MDP3_PANIC_LUT1 0x900A8
+#define MDP3_ROBUST_LUT 0x900AC
+
+/*danger safe*/
+#define MDP3_PANIC_ROBUST_CTRL 0x900A0
+
+/*DMA_P*/
+#define MDP3_REG_DMA_P_CONFIG 0x90000
+#define MDP3_REG_DMA_P_SIZE 0x90004
+#define MDP3_REG_DMA_P_IBUF_ADDR 0x90008
+#define MDP3_REG_DMA_P_IBUF_Y_STRIDE 0x9000C
+#define MDP3_REG_DMA_P_PROFILE_EN 0x90020
+#define MDP3_REG_DMA_P_OUT_XY 0x90010
+#define MDP3_REG_DMA_P_CURSOR_FORMAT 0x90040
+#define MDP3_REG_DMA_P_CURSOR_SIZE 0x90044
+#define MDP3_REG_DMA_P_CURSOR_BUF_ADDR 0x90048
+#define MDP3_REG_DMA_P_CURSOR_POS 0x9004c
+#define MDP3_REG_DMA_P_CURSOR_BLEND_CONFIG 0x90060
+#define MDP3_REG_DMA_P_CURSOR_BLEND_PARAM 0x90064
+#define MDP3_REG_DMA_P_CURSOR_BLEND_TRANS_MASK 0x90068
+#define MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG 0x90070
+#define MDP3_REG_DMA_P_CSC_BYPASS 0X93004
+#define MDP3_REG_DMA_P_CSC_MV1 0x93400
+#define MDP3_REG_DMA_P_CSC_MV2 0x93440
+#define MDP3_REG_DMA_P_CSC_PRE_BV1 0x93500
+#define MDP3_REG_DMA_P_CSC_PRE_BV2 0x93540
+#define MDP3_REG_DMA_P_CSC_POST_BV1 0x93580
+#define MDP3_REG_DMA_P_CSC_POST_BV2 0x935c0
+#define MDP3_REG_DMA_P_CSC_PRE_LV1 0x93600
+#define MDP3_REG_DMA_P_CSC_PRE_LV2 0x93640
+#define MDP3_REG_DMA_P_CSC_POST_LV1 0x93680
+#define MDP3_REG_DMA_P_CSC_POST_LV2 0x936c0
+#define MDP3_REG_DMA_P_CSC_LUT1 0x93800
+#define MDP3_REG_DMA_P_CSC_LUT2 0x93c00
+#define MDP3_REG_DMA_P_HIST_START 0x94000
+#define MDP3_REG_DMA_P_HIST_FRAME_CNT 0x94004
+#define MDP3_REG_DMA_P_HIST_BIT_MASK 0x94008
+#define MDP3_REG_DMA_P_HIST_RESET_SEQ_START 0x9400c
+#define MDP3_REG_DMA_P_HIST_CONTROL 0x94010
+#define MDP3_REG_DMA_P_HIST_INTR_STATUS 0x94014
+#define MDP3_REG_DMA_P_HIST_INTR_CLEAR 0x94018
+#define MDP3_REG_DMA_P_HIST_INTR_ENABLE 0x9401c
+#define MDP3_REG_DMA_P_HIST_STOP_REQ 0x94020
+#define MDP3_REG_DMA_P_HIST_CANCEL_REQ 0x94024
+#define MDP3_REG_DMA_P_HIST_EXTRA_INFO_0 0x94028
+#define MDP3_REG_DMA_P_HIST_EXTRA_INFO_1 0x9402c
+#define MDP3_REG_DMA_P_HIST_R_DATA 0x94100
+#define MDP3_REG_DMA_P_HIST_G_DATA 0x94200
+#define MDP3_REG_DMA_P_HIST_B_DATA 0x94300
+#define MDP3_REG_DMA_P_FETCH_CFG 0x90074
+#define MDP3_REG_DMA_P_DCVS_CTRL 0x90080
+#define MDP3_REG_DMA_P_DCVS_STATUS 0x90084
+
+/*DMA_S*/
+#define MDP3_REG_DMA_S_CONFIG 0xA0000
+#define MDP3_REG_DMA_S_SIZE 0xA0004
+#define MDP3_REG_DMA_S_IBUF_ADDR 0xA0008
+#define MDP3_REG_DMA_S_IBUF_Y_STRIDE 0xA000C
+#define MDP3_REG_DMA_S_OUT_XY 0xA0010
+
+/*DMA MASK*/
+#define MDP3_DMA_IBUF_FORMAT_MASK 0x06000000
+#define MDP3_DMA_PACK_PATTERN_MASK 0x00003f00
+
+/*MISR*/
+#define MDP3_REG_MODE_CLK 0x000D0000
+#define MDP3_REG_MISR_RESET_CLK 0x000D0004
+#define MDP3_REG_EXPORT_MISR_CLK 0x000D0008
+#define MDP3_REG_MISR_CURR_VAL_CLK 0x000D000C
+#define MDP3_REG_MODE_HCLK 0x000D0100
+#define MDP3_REG_MISR_RESET_HCLK 0x000D0104
+#define MDP3_REG_EXPORT_MISR_HCLK 0x000D0108
+#define MDP3_REG_MISR_CURR_VAL_HCLK 0x000D010C
+#define MDP3_REG_MODE_DCLK 0x000D0200
+#define MDP3_REG_MISR_RESET_DCLK 0x000D0204
+#define MDP3_REG_EXPORT_MISR_DCLK 0x000D0208
+#define MDP3_REG_MISR_CURR_VAL_DCLK 0x000D020C
+#define MDP3_REG_CAPTURED_DCLK 0x000D0210
+#define MDP3_REG_MISR_CAPT_VAL_DCLK 0x000D0214
+#define MDP3_REG_MODE_TVCLK 0x000D0300
+#define MDP3_REG_MISR_RESET_TVCLK 0x000D0304
+#define MDP3_REG_EXPORT_MISR_TVCLK 0x000D0308
+#define MDP3_REG_MISR_CURR_VAL_TVCLK 0x000D030C
+#define MDP3_REG_CAPTURED_TVCLK 0x000D0310
+#define MDP3_REG_MISR_CAPT_VAL_TVCLK 0x000D0314
+
+/* Select DSI operation type(CMD/VIDEO) */
+#define MDP3_REG_MODE_DSI_PCLK 0x000D0400
+#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_CMD 0x10
+#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_VIDEO1 0x20
+#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_VIDEO2 0x30
+/* RESET DSI MISR STATE */
+#define MDP3_REG_MISR_RESET_DSI_PCLK 0x000D0404
+
+/* For reading MISR State(1) and driving data on test bus(0) */
+#define MDP3_REG_EXPORT_MISR_DSI_PCLK 0x000D0408
+/* Read MISR signature */
+#define MDP3_REG_MISR_CURR_VAL_DSI_PCLK 0x000D040C
+
+/* MISR status Bit0 (1) Capture Done */
+#define MDP3_REG_CAPTURED_DSI_PCLK 0x000D0410
+#define MDP3_REG_MISR_CAPT_VAL_DSI_PCLK 0x000D0414
+#define MDP3_REG_MISR_TESTBUS_CAPT_VAL 0x000D0600
+
+/*interface*/
+#define MDP3_REG_LCDC_EN 0xE0000
+#define MDP3_REG_LCDC_HSYNC_CTL 0xE0004
+#define MDP3_REG_LCDC_VSYNC_PERIOD 0xE0008
+#define MDP3_REG_LCDC_VSYNC_PULSE_WIDTH 0xE000C
+#define MDP3_REG_LCDC_DISPLAY_HCTL 0xE0010
+#define MDP3_REG_LCDC_DISPLAY_V_START 0xE0014
+#define MDP3_REG_LCDC_DISPLAY_V_END 0xE0018
+#define MDP3_REG_LCDC_ACTIVE_HCTL 0xE001C
+#define MDP3_REG_LCDC_ACTIVE_V_START 0xE0020
+#define MDP3_REG_LCDC_ACTIVE_V_END 0xE0024
+#define MDP3_REG_LCDC_BORDER_COLOR 0xE0028
+#define MDP3_REG_LCDC_UNDERFLOW_CTL 0xE002C
+#define MDP3_REG_LCDC_HSYNC_SKEW 0xE0030
+#define MDP3_REG_LCDC_TEST_CTL 0xE0034
+#define MDP3_REG_LCDC_CTL_POLARITY 0xE0038
+#define MDP3_REG_LCDC_TEST_COL_VAR1 0xE003C
+#define MDP3_REG_LCDC_TEST_COL_VAR2 0xE0040
+#define MDP3_REG_LCDC_UFLOW_HIDING_CTL 0xE0044
+#define MDP3_REG_LCDC_LOST_PIXEL_CNT_VALUE 0xE0048
+
+#define MDP3_REG_DSI_VIDEO_EN 0xF0000
+#define MDP3_REG_DSI_VIDEO_HSYNC_CTL 0xF0004
+#define MDP3_REG_DSI_VIDEO_VSYNC_PERIOD 0xF0008
+#define MDP3_REG_DSI_VIDEO_VSYNC_PULSE_WIDTH 0xF000C
+#define MDP3_REG_DSI_VIDEO_DISPLAY_HCTL 0xF0010
+#define MDP3_REG_DSI_VIDEO_DISPLAY_V_START 0xF0014
+#define MDP3_REG_DSI_VIDEO_DISPLAY_V_END 0xF0018
+#define MDP3_REG_DSI_VIDEO_ACTIVE_HCTL 0xF001C
+#define MDP3_REG_DSI_VIDEO_ACTIVE_V_START 0xF0020
+#define MDP3_REG_DSI_VIDEO_ACTIVE_V_END 0xF0024
+#define MDP3_REG_DSI_VIDEO_BORDER_COLOR 0xF0028
+#define MDP3_REG_DSI_VIDEO_UNDERFLOW_CTL 0xF002C
+#define MDP3_REG_DSI_VIDEO_HSYNC_SKEW 0xF0030
+#define MDP3_REG_DSI_VIDEO_TEST_CTL 0xF0034
+#define MDP3_REG_DSI_VIDEO_CTL_POLARITY 0xF0038
+#define MDP3_REG_DSI_VIDEO_TEST_COL_VAR1 0xF003C
+#define MDP3_REG_DSI_VIDEO_TEST_COL_VAR2 0xF0040
+#define MDP3_REG_DSI_VIDEO_UFLOW_HIDING_CTL 0xF0044
+#define MDP3_REG_DSI_VIDEO_LOST_PIXEL_CNT_VALUE 0xF0048
+
+#define MDP3_REG_DSI_CMD_MODE_ID_MAP 0xF1000
+#define MDP3_REG_DSI_CMD_MODE_TRIGGER_EN 0xF1004
+
+#define MDP3_PPP_CSC_PFMVn(n) (0x40400 + (4 * (n)))
+#define MDP3_PPP_CSC_PRMVn(n) (0x40440 + (4 * (n)))
+#define MDP3_PPP_CSC_PBVn(n) (0x40500 + (4 * (n)))
+#define MDP3_PPP_CSC_PLVn(n) (0x40580 + (4 * (n)))
+
+#define MDP3_PPP_CSC_SFMVn(n) (0x40480 + (4 * (n)))
+#define MDP3_PPP_CSC_SRMVn(n) (0x404C0 + (4 * (n)))
+#define MDP3_PPP_CSC_SBVn(n) (0x40540 + (4 * (n)))
+#define MDP3_PPP_CSC_SLVn(n) (0x405C0 + (4 * (n)))
+
+#define MDP3_PPP_SCALE_PHASEX_INIT 0x1013C
+#define MDP3_PPP_SCALE_PHASEY_INIT 0x10140
+#define MDP3_PPP_SCALE_PHASEX_STEP 0x10144
+#define MDP3_PPP_SCALE_PHASEY_STEP 0x10148
+
+#define MDP3_PPP_OP_MODE 0x10138
+
+#define MDP3_PPP_PRE_LUT 0x40800
+#define MDP3_PPP_POST_LUT 0x40C00
+#define MDP3_PPP_LUTn(n) ((4 * (n)))
+
+#define MDP3_PPP_BG_EDGE_REP 0x101BC
+#define MDP3_PPP_SRC_EDGE_REP 0x101B8
+
+#define MDP3_PPP_STRIDE_MASK 0x3FFF
+#define MDP3_PPP_STRIDE1_OFFSET 16
+
+#define MDP3_PPP_XY_MASK 0x0FFF
+#define MDP3_PPP_XY_OFFSET 16
+
+#define MDP3_PPP_SRC_SIZE 0x10108
+#define MDP3_PPP_SRCP0_ADDR 0x1010C
+#define MDP3_PPP_SRCP1_ADDR 0x10110
+#define MDP3_PPP_SRCP3_ADDR 0x10118
+#define MDP3_PPP_SRC_YSTRIDE1_ADDR 0x1011C
+#define MDP3_PPP_SRC_YSTRIDE2_ADDR 0x10120
+#define MDP3_PPP_SRC_FORMAT 0x10124
+#define MDP3_PPP_SRC_UNPACK_PATTERN1 0x10128
+#define MDP3_PPP_SRC_UNPACK_PATTERN2 0x1012C
+
+#define MDP3_PPP_OUT_FORMAT 0x10150
+#define MDP3_PPP_OUT_PACK_PATTERN1 0x10154
+#define MDP3_PPP_OUT_PACK_PATTERN2 0x10158
+#define MDP3_PPP_OUT_SIZE 0x10164
+#define MDP3_PPP_OUTP0_ADDR 0x10168
+#define MDP3_PPP_OUTP1_ADDR 0x1016C
+#define MDP3_PPP_OUTP3_ADDR 0x10174
+#define MDP3_PPP_OUT_YSTRIDE1_ADDR 0x10178
+#define MDP3_PPP_OUT_YSTRIDE2_ADDR 0x1017C
+#define MDP3_PPP_OUT_XY 0x1019C
+
+#define MDP3_PPP_BGP0_ADDR 0x101C0
+#define MDP3_PPP_BGP1_ADDR 0x101C4
+#define MDP3_PPP_BGP3_ADDR 0x101C8
+#define MDP3_PPP_BG_YSTRIDE1_ADDR 0x101CC
+#define MDP3_PPP_BG_YSTRIDE2_ADDR 0x101D0
+#define MDP3_PPP_BG_FORMAT 0x101D4
+#define MDP3_PPP_BG_UNPACK_PATTERN1 0x101D8
+#define MDP3_PPP_BG_UNPACK_PATTERN2 0x101DC
+
+#define MDP3_TFETCH_SOLID_FILL 0x20004
+#define MDP3_TFETCH_FILL_COLOR 0x20040
+
+#define MDP3_PPP_BLEND_PARAM 0x1014C
+
+#define MDP3_PPP_BLEND_BG_ALPHA_SEL 0x70010
+
+#define MDP3_PPP_ACTIVE BIT(0)
+
+/*interrupt mask*/
+
+#define MDP3_INTR_DP0_ROI_DONE_BIT BIT(0)
+#define MDP3_INTR_DP1_ROI_DONE_BIT BIT(1)
+#define MDP3_INTR_DMA_S_DONE_BIT BIT(2)
+#define MDP3_INTR_DMA_E_DONE_BIT BIT(3)
+#define MDP3_INTR_DP0_TERMINAL_FRAME_DONE_BIT BIT(4)
+#define MDP3_INTR_DP1_TERMINAL_FRAME_DONE_BIT BIT(5)
+#define MDP3_INTR_DMA_TV_DONE_BIT BIT(6)
+#define MDP3_INTR_TV_ENCODER_UNDER_RUN_BIT BIT(7)
+#define MDP3_INTR_SYNC_PRIMARY_LINE_BIT BIT(8)
+#define MDP3_INTR_SYNC_SECONDARY_LINE_BIT BIT(9)
+#define MDP3_INTR_SYNC_EXTERNAL_LINE_BIT BIT(10)
+#define MDP3_INTR_DP0_FETCH_DONE_BIT BIT(11)
+#define MDP3_INTR_DP1_FETCH_DONE_BIT BIT(12)
+#define MDP3_INTR_TV_OUT_FRAME_START_BIT BIT(13)
+#define MDP3_INTR_DMA_P_DONE_BIT BIT(14)
+#define MDP3_INTR_LCDC_START_OF_FRAME_BIT BIT(15)
+#define MDP3_INTR_LCDC_UNDERFLOW_BIT BIT(16)
+#define MDP3_INTR_DMA_P_LINE_BIT BIT(17)
+#define MDP3_INTR_DMA_S_LINE_BIT BIT(18)
+#define MDP3_INTR_DMA_E_LINE_BIT BIT(19)
+#define MDP3_INTR_DMA_P_HISTO_BIT BIT(20)
+#define MDP3_INTR_DTV_OUT_DONE_BIT BIT(21)
+#define MDP3_INTR_DTV_OUT_START_OF_FRAME_BIT BIT(22)
+#define MDP3_INTR_DTV_OUT_UNDERFLOW_BIT BIT(23)
+#define MDP3_INTR_DTV_OUT_LINE_BIT BIT(24)
+#define MDP3_INTR_DMA_P_AUTO_FREFRESH_START_BIT BIT(25)
+#define MDP3_INTR_DMA_S_AUTO_FREFRESH_START_BIT BIT(26)
+#define MDP3_INTR_QPIC_EOF_ENABLE_BIT BIT(27)
+
+enum {
+ MDP3_INTR_DP0_ROI_DONE,
+ MDP3_INTR_DP1_ROI_DONE,
+ MDP3_INTR_DMA_S_DONE,
+ MDP3_INTR_DMA_E_DONE,
+ MDP3_INTR_DP0_TERMINAL_FRAME_DONE,
+ MDP3_INTR_DP1_TERMINAL_FRAME_DONE,
+ MDP3_INTR_DMA_TV_DONE,
+ MDP3_INTR_TV_ENCODER_UNDER_RUN,
+ MDP3_INTR_SYNC_PRIMARY_LINE,
+ MDP3_INTR_SYNC_SECONDARY_LINE,
+ MDP3_INTR_SYNC_EXTERNAL_LINE,
+ MDP3_INTR_DP0_FETCH_DONE,
+ MDP3_INTR_DP1_FETCH_DONE,
+ MDP3_INTR_TV_OUT_FRAME_START,
+ MDP3_INTR_DMA_P_DONE,
+ MDP3_INTR_LCDC_START_OF_FRAME,
+ MDP3_INTR_LCDC_UNDERFLOW,
+ MDP3_INTR_DMA_P_LINE,
+ MDP3_INTR_DMA_S_LINE,
+ MDP3_INTR_DMA_E_LINE,
+ MDP3_INTR_DMA_P_HISTO,
+ MDP3_INTR_DTV_OUT_DONE,
+ MDP3_INTR_DTV_OUT_START_OF_FRAME,
+ MDP3_INTR_DTV_OUT_UNDERFLOW,
+ MDP3_INTR_DTV_OUT_LINE,
+ MDP3_INTR_DMA_P_AUTO_FREFRESH_START,
+ MDP3_INTR_DMA_S_AUTO_FREFRESH_START,
+ MDP3_INTR_QPIC_EOF_ENABLE,
+};
+
+#define MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT BIT(0)
+#define MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT BIT(1)
+#define MDP3_PPP_DONE MDP3_INTR_DP0_ROI_DONE
+
+#define MDP3_DMA_P_BUSY_BIT BIT(6)
+
+#endif /* MDP3_HWIO_H */
diff --git a/drivers/video/fbdev/msm/mdp3_layer.c b/drivers/video/fbdev/msm/mdp3_layer.c
new file mode 100644
index 0000000..6c45395
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_layer.c
@@ -0,0 +1,345 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/msm_mdp.h>
+#include <linux/memblock.h>
+#include <linux/sync.h>
+#include <linux/sw_sync.h>
+#include <linux/file.h>
+
+#include <soc/qcom/event_timer.h>
+#include "mdp3_ctrl.h"
+#include "mdp3.h"
+#include "mdp3_ppp.h"
+#include "mdp3_ctrl.h"
+#include "mdss_fb.h"
+
+enum {
+ MDP3_RELEASE_FENCE = 0,
+ MDP3_RETIRE_FENCE,
+};
+
+static struct sync_fence *__mdp3_create_fence(struct msm_fb_data_type *mfd,
+ struct msm_sync_pt_data *sync_pt_data, u32 fence_type,
+ int *fence_fd, int value)
+{
+ struct sync_fence *sync_fence = NULL;
+ char fence_name[32];
+ struct mdp3_session_data *mdp3_session;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+ if (fence_type == MDP3_RETIRE_FENCE)
+ snprintf(fence_name, sizeof(fence_name), "fb%d_retire",
+ mfd->index);
+ else
+ snprintf(fence_name, sizeof(fence_name), "fb%d_release",
+ mfd->index);
+
+ if ((fence_type == MDP3_RETIRE_FENCE) &&
+ (mfd->panel.type == MIPI_CMD_PANEL)) {
+ if (mdp3_session->vsync_timeline) {
+ value = mdp3_session->vsync_timeline->value + 1 +
+ mdp3_session->retire_cnt++;
+ sync_fence = mdss_fb_sync_get_fence(
+ mdp3_session->vsync_timeline,
+ fence_name, value);
+ } else {
+ return ERR_PTR(-EPERM);
+ }
+ } else {
+ sync_fence = mdss_fb_sync_get_fence(sync_pt_data->timeline,
+ fence_name, value);
+ }
+
+ if (IS_ERR_OR_NULL(sync_fence)) {
+ pr_err("%s: unable to retrieve release fence\n", fence_name);
+ goto end;
+ }
+
+ /* get fence fd */
+ *fence_fd = get_unused_fd_flags(0);
+ if (*fence_fd < 0) {
+ pr_err("%s: get_unused_fd_flags failed error:0x%x\n",
+ fence_name, *fence_fd);
+ sync_fence_put(sync_fence);
+ sync_fence = NULL;
+ goto end;
+ }
+
+ sync_fence_install(sync_fence, *fence_fd);
+end:
+
+ return sync_fence;
+}
+
+/*
+ * __handle_buffer_fences() - copy sync fences and return release
+ * fence to caller.
+ *
+ * This function copies all input sync fences to acquire fence array and
+ * returns release fences to caller. It acts like buff_sync ioctl.
+ */
+static int __mdp3_handle_buffer_fences(struct msm_fb_data_type *mfd,
+ struct mdp_layer_commit_v1 *commit, struct mdp_input_layer *layer_list)
+{
+ struct sync_fence *fence, *release_fence, *retire_fence;
+ struct msm_sync_pt_data *sync_pt_data = NULL;
+ struct mdp_input_layer *layer;
+ int value;
+
+ u32 acq_fen_count, i, ret = 0;
+ u32 layer_count = commit->input_layer_cnt;
+
+ sync_pt_data = &mfd->mdp_sync_pt_data;
+ if (!sync_pt_data) {
+ pr_err("sync point data are NULL\n");
+ return -EINVAL;
+ }
+
+ i = mdss_fb_wait_for_fence(sync_pt_data);
+ if (i > 0)
+ pr_warn("%s: waited on %d active fences\n",
+ sync_pt_data->fence_name, i);
+
+ mutex_lock(&sync_pt_data->sync_mutex);
+ for (i = 0, acq_fen_count = 0; i < layer_count; i++) {
+ layer = &layer_list[i];
+
+ if (layer->buffer.fence < 0)
+ continue;
+
+ fence = sync_fence_fdget(layer->buffer.fence);
+ if (!fence) {
+ pr_err("%s: sync fence get failed! fd=%d\n",
+ sync_pt_data->fence_name, layer->buffer.fence);
+ ret = -EINVAL;
+ goto sync_fence_err;
+ } else {
+ sync_pt_data->acq_fen[acq_fen_count++] = fence;
+ }
+ }
+
+ sync_pt_data->acq_fen_cnt = acq_fen_count;
+ if (ret)
+ goto sync_fence_err;
+
+ value = sync_pt_data->timeline_value + sync_pt_data->threshold +
+ atomic_read(&sync_pt_data->commit_cnt);
+
+ release_fence = __mdp3_create_fence(mfd, sync_pt_data,
+ MDP3_RELEASE_FENCE, &commit->release_fence, value);
+ if (IS_ERR_OR_NULL(release_fence)) {
+ pr_err("unable to retrieve release fence\n");
+ ret = PTR_ERR(release_fence);
+ goto release_fence_err;
+ }
+
+ retire_fence = __mdp3_create_fence(mfd, sync_pt_data,
+ MDP3_RETIRE_FENCE, &commit->retire_fence, value);
+ if (IS_ERR_OR_NULL(retire_fence)) {
+ pr_err("unable to retrieve retire fence\n");
+ ret = PTR_ERR(retire_fence);
+ goto retire_fence_err;
+ }
+
+ mutex_unlock(&sync_pt_data->sync_mutex);
+ return ret;
+
+retire_fence_err:
+ put_unused_fd(commit->release_fence);
+ sync_fence_put(release_fence);
+release_fence_err:
+ commit->retire_fence = -1;
+ commit->release_fence = -1;
+sync_fence_err:
+ for (i = 0; i < sync_pt_data->acq_fen_cnt; i++)
+ sync_fence_put(sync_pt_data->acq_fen[i]);
+ sync_pt_data->acq_fen_cnt = 0;
+
+ mutex_unlock(&sync_pt_data->sync_mutex);
+
+ return ret;
+}
+
+/*
+ * __map_layer_buffer() - map input layer buffer
+ *
+ */
+static int __mdp3_map_layer_buffer(struct msm_fb_data_type *mfd,
+ struct mdp_input_layer *input_layer)
+{
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ struct mdp3_dma *dma = mdp3_session->dma;
+ struct mdp_input_layer *layer = NULL;
+ struct mdp_layer_buffer *buffer;
+ struct msmfb_data img;
+ bool is_panel_type_cmd = false;
+ struct mdp3_img_data data;
+ int rc = 0;
+
+ layer = &input_layer[0];
+ buffer = &layer->buffer;
+
+ /* current implementation only supports one plane mapping */
+ if (buffer->planes[0].fd < 0) {
+ pr_err("invalid file descriptor for layer buffer\n");
+ goto err;
+ }
+
+ memset(&img, 0, sizeof(img));
+ img.memory_id = buffer->planes[0].fd;
+ img.offset = buffer->planes[0].offset;
+
+ memset(&data, 0, sizeof(struct mdp3_img_data));
+
+ if (mfd->panel.type == MIPI_CMD_PANEL)
+ is_panel_type_cmd = true;
+ if (is_panel_type_cmd) {
+ rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
+ if (rc) {
+ pr_err("fail to enable iommu\n");
+ return rc;
+ }
+ }
+
+ rc = mdp3_get_img(&img, &data, MDP3_CLIENT_DMA_P);
+ if (rc) {
+ pr_err("fail to get overlay buffer\n");
+ goto err;
+ }
+
+ if (data.len < dma->source_config.stride * dma->source_config.height) {
+ pr_err("buf size(0x%lx) is smaller than dma config(0x%x)\n",
+ data.len, (dma->source_config.stride *
+ dma->source_config.height));
+ mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = mdp3_bufq_push(&mdp3_session->bufq_in, &data);
+ if (rc) {
+ pr_err("fail to queue the overlay buffer, buffer drop\n");
+ mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+ goto err;
+ }
+ rc = 0;
+err:
+ if (is_panel_type_cmd)
+ mdp3_iommu_disable(MDP3_CLIENT_DMA_P);
+ return rc;
+}
+
+int mdp3_layer_pre_commit(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+ int ret;
+ struct mdp_input_layer *layer, *layer_list;
+ struct mdp3_session_data *mdp3_session;
+ struct mdp3_dma *dma;
+ int layer_count = commit->input_layer_cnt;
+ int stride, format;
+
+ /* Handle NULL commit */
+ if (!layer_count) {
+ pr_debug("Handle NULL commit\n");
+ return 0;
+ }
+
+ mdp3_session = mfd->mdp.private1;
+ dma = mdp3_session->dma;
+
+ mutex_lock(&mdp3_session->lock);
+
+ mdp3_bufq_deinit(&mdp3_session->bufq_in);
+
+ layer_list = commit->input_layers;
+ layer = &layer_list[0];
+
+ stride = layer->buffer.width * ppp_bpp(layer->buffer.format);
+ format = mdp3_ctrl_get_source_format(layer->buffer.format);
+ pr_debug("stride:%d layer_width:%d", stride, layer->buffer.width);
+
+ if ((dma->source_config.format != format) ||
+ (dma->source_config.stride != stride)) {
+ dma->source_config.format = format;
+ dma->source_config.stride = stride;
+ dma->output_config.pack_pattern =
+ mdp3_ctrl_get_pack_pattern(layer->buffer.format);
+ dma->update_src_cfg = true;
+ }
+ mdp3_session->overlay.id = 1;
+
+ ret = __mdp3_handle_buffer_fences(mfd, commit, layer_list);
+ if (ret) {
+ pr_err("Failed to handle buffer fences\n");
+ mutex_unlock(&mdp3_session->lock);
+ return ret;
+ }
+
+ ret = __mdp3_map_layer_buffer(mfd, layer);
+ if (ret) {
+ pr_err("Failed to map buffer\n");
+ mutex_unlock(&mdp3_session->lock);
+ return ret;
+ }
+
+ pr_debug("mdp3 precommit ret = %d\n", ret);
+ mutex_unlock(&mdp3_session->lock);
+ return ret;
+}
+
+/*
+ * mdp3_layer_atomic_validate() - validate input layers
+ * @mfd: Framebuffer data structure for display
+ * @commit: Commit version-1 structure for display
+ *
+ * This function validates only input layers received from client. It
+ * does perform any validation for mdp_output_layer defined for writeback
+ * display.
+ */
+int mdp3_layer_atomic_validate(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+ struct mdp3_session_data *mdp3_session;
+
+ if (!mfd || !commit) {
+ pr_err("invalid input params\n");
+ return -EINVAL;
+ }
+
+ if (mdss_fb_is_power_off(mfd)) {
+ pr_err("display interface is in off state fb:%d\n",
+ mfd->index);
+ return -EPERM;
+ }
+
+ mdp3_session = mfd->mdp.private1;
+
+ if (mdp3_session->in_splash_screen) {
+ mdp3_ctrl_reset(mfd);
+ mdp3_session->in_splash_screen = 0;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.c b/drivers/video/fbdev/msm/mdp3_ppp.c
new file mode 100644
index 0000000..3b72b2d
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ppp.c
@@ -0,0 +1,1733 @@
+/* Copyright (c) 2007, 2013-2014, 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/file.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/sync.h>
+#include <linux/sw_sync.h>
+#include "linux/proc_fs.h"
+#include <linux/delay.h>
+
+#include "mdss_fb.h"
+#include "mdp3_ppp.h"
+#include "mdp3_hwio.h"
+#include "mdp3.h"
+#include "mdss_debug.h"
+
+#define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT)
+#define MDP_RELEASE_BW_TIMEOUT 50
+
+#define MDP_PPP_MAX_BPP 4
+#define MDP_PPP_DYNAMIC_FACTOR 3
+#define MDP_PPP_MAX_READ_WRITE 3
+#define MDP_PPP_MAX_WIDTH 0xFFF
+#define ENABLE_SOLID_FILL 0x2
+#define DISABLE_SOLID_FILL 0x0
+#define BLEND_LATENCY 3
+#define CSC_LATENCY 1
+
+#define YUV_BW_FUDGE_NUM 10
+#define YUV_BW_FUDGE_DEN 10
+
+struct ppp_resource ppp_res;
+
+static const bool valid_fmt[MDP_IMGTYPE_LIMIT] = {
+ [MDP_RGB_565] = true,
+ [MDP_BGR_565] = true,
+ [MDP_RGB_888] = true,
+ [MDP_BGR_888] = true,
+ [MDP_BGRA_8888] = true,
+ [MDP_RGBA_8888] = true,
+ [MDP_ARGB_8888] = true,
+ [MDP_XRGB_8888] = true,
+ [MDP_RGBX_8888] = true,
+ [MDP_Y_CRCB_H2V2] = true,
+ [MDP_Y_CBCR_H2V2] = true,
+ [MDP_Y_CBCR_H2V2_ADRENO] = true,
+ [MDP_Y_CBCR_H2V2_VENUS] = true,
+ [MDP_YCRYCB_H2V1] = true,
+ [MDP_Y_CBCR_H2V1] = true,
+ [MDP_Y_CRCB_H2V1] = true,
+ [MDP_BGRX_8888] = true,
+};
+
+#define MAX_LIST_WINDOW 16
+#define MDP3_PPP_MAX_LIST_REQ 8
+
+struct blit_req_list {
+ int count;
+ struct mdp_blit_req req_list[MAX_LIST_WINDOW];
+ struct mdp3_img_data src_data[MAX_LIST_WINDOW];
+ struct mdp3_img_data dst_data[MAX_LIST_WINDOW];
+ struct sync_fence *acq_fen[MDP_MAX_FENCE_FD];
+ u32 acq_fen_cnt;
+ int cur_rel_fen_fd;
+ struct sync_pt *cur_rel_sync_pt;
+ struct sync_fence *cur_rel_fence;
+ struct sync_fence *last_rel_fence;
+};
+
+struct blit_req_queue {
+ struct blit_req_list req[MDP3_PPP_MAX_LIST_REQ];
+ int count;
+ int push_idx;
+ int pop_idx;
+};
+
+struct ppp_status {
+ bool wait_for_pop;
+ struct completion ppp_comp;
+ struct completion pop_q_comp;
+ struct mutex req_mutex; /* Protect request queue */
+ struct mutex config_ppp_mutex; /* Only one client configure register */
+ struct msm_fb_data_type *mfd;
+
+ struct kthread_work blit_work;
+ struct kthread_worker kworker;
+ struct task_struct *blit_thread;
+ struct blit_req_queue req_q;
+
+ struct sw_sync_timeline *timeline;
+ int timeline_value;
+
+ struct timer_list free_bw_timer;
+ struct work_struct free_bw_work;
+ bool bw_update;
+ bool bw_on;
+ u32 mdp_clk;
+};
+
+static struct ppp_status *ppp_stat;
+static bool is_blit_optimization_possible(struct blit_req_list *req, int indx);
+
+static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
+{
+ u64 result = (val * (u64)numer);
+
+ do_div(result, denom);
+ return result;
+}
+
+int ppp_get_bpp(uint32_t format, uint32_t fb_format)
+{
+ int bpp = -EINVAL;
+
+ if (format == MDP_FB_FORMAT)
+ format = fb_format;
+
+ bpp = ppp_bpp(format);
+ if (bpp <= 0)
+ pr_err("%s incorrect format %d\n", __func__, format);
+ return bpp;
+}
+
+int mdp3_ppp_get_img(struct mdp_img *img, struct mdp_blit_req *req,
+ struct mdp3_img_data *data)
+{
+ struct msmfb_data fb_data;
+ uint32_t stride;
+ int bpp = ppp_bpp(img->format);
+
+ if (bpp <= 0) {
+ pr_err("%s incorrect format %d\n", __func__, img->format);
+ return -EINVAL;
+ }
+
+ if (img->width > MDP_PPP_MAX_WIDTH) {
+ pr_err("%s incorrect width %d\n", __func__, img->width);
+ return -EINVAL;
+ }
+
+ fb_data.flags = img->priv;
+ fb_data.memory_id = img->memory_id;
+ fb_data.offset = 0;
+
+ stride = img->width * bpp;
+ data->padding = 16 * stride;
+
+ return mdp3_get_img(&fb_data, data, MDP3_CLIENT_PPP);
+}
+
+/* Check format */
+int mdp3_ppp_verify_fmt(struct mdp_blit_req *req)
+{
+ if (MDP_IS_IMGTYPE_BAD(req->src.format) ||
+ MDP_IS_IMGTYPE_BAD(req->dst.format)) {
+ pr_err("%s: Color format out of range\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!valid_fmt[req->src.format] ||
+ !valid_fmt[req->dst.format]) {
+ pr_err("%s: Color format not supported\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Check resolution */
+int mdp3_ppp_verify_res(struct mdp_blit_req *req)
+{
+ if ((req->src.width == 0) || (req->src.height == 0) ||
+ (req->src_rect.w == 0) || (req->src_rect.h == 0) ||
+ (req->dst.width == 0) || (req->dst.height == 0) ||
+ (req->dst_rect.w == 0) || (req->dst_rect.h == 0)) {
+ pr_err("%s: Height/width can't be 0\n", __func__);
+ return -EINVAL;
+ }
+
+ if (((req->src_rect.x + req->src_rect.w) > req->src.width) ||
+ ((req->src_rect.y + req->src_rect.h) > req->src.height)) {
+ pr_err("%s: src roi larger than boundary\n", __func__);
+ return -EINVAL;
+ }
+
+ if (((req->dst_rect.x + req->dst_rect.w) > req->dst.width) ||
+ ((req->dst_rect.y + req->dst_rect.h) > req->dst.height)) {
+ pr_err("%s: dst roi larger than boundary\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* scaling range check */
+int mdp3_ppp_verify_scale(struct mdp_blit_req *req)
+{
+ u32 src_width, src_height, dst_width, dst_height;
+
+ src_width = req->src_rect.w;
+ src_height = req->src_rect.h;
+
+ if (req->flags & MDP_ROT_90) {
+ dst_width = req->dst_rect.h;
+ dst_height = req->dst_rect.w;
+ } else {
+ dst_width = req->dst_rect.w;
+ dst_height = req->dst_rect.h;
+ }
+
+ switch (req->dst.format) {
+ case MDP_Y_CRCB_H2V2:
+ case MDP_Y_CBCR_H2V2:
+ src_width = (src_width / 2) * 2;
+ src_height = (src_height / 2) * 2;
+ dst_width = (dst_width / 2) * 2;
+ dst_height = (dst_height / 2) * 2;
+ break;
+
+ case MDP_Y_CRCB_H2V1:
+ case MDP_Y_CBCR_H2V1:
+ case MDP_YCRYCB_H2V1:
+ src_width = (src_width / 2) * 2;
+ dst_width = (dst_width / 2) * 2;
+ break;
+
+ default:
+ break;
+ }
+
+ if (((MDP_SCALE_Q_FACTOR * dst_width) / src_width >
+ MDP_MAX_X_SCALE_FACTOR)
+ || ((MDP_SCALE_Q_FACTOR * dst_width) / src_width <
+ MDP_MIN_X_SCALE_FACTOR)) {
+ pr_err("%s: x req scale factor beyond capability\n", __func__);
+ return -EINVAL;
+ }
+
+ if (((MDP_SCALE_Q_FACTOR * dst_height) / src_height >
+ MDP_MAX_Y_SCALE_FACTOR)
+ || ((MDP_SCALE_Q_FACTOR * dst_height) / src_height <
+ MDP_MIN_Y_SCALE_FACTOR)) {
+ pr_err("%s: y req scale factor beyond capability\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* operation check */
+int mdp3_ppp_verify_op(struct mdp_blit_req *req)
+{
+ /*
+ * MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3
+ * so using them together for MDP_SMART_BLIT.
+ */
+ if ((req->flags & MDP_SMART_BLIT) == MDP_SMART_BLIT)
+ return 0;
+ if (req->flags & MDP_DEINTERLACE) {
+ pr_err("\n%s(): deinterlace not supported", __func__);
+ return -EINVAL;
+ }
+
+ if (req->flags & MDP_SHARPENING) {
+ pr_err("\n%s(): sharpening not supported", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mdp3_ppp_verify_req(struct mdp_blit_req *req)
+{
+ int rc;
+
+ if (req == NULL) {
+ pr_err("%s: req == null\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = mdp3_ppp_verify_fmt(req);
+ rc |= mdp3_ppp_verify_res(req);
+ rc |= mdp3_ppp_verify_scale(req);
+ rc |= mdp3_ppp_verify_op(req);
+
+ return rc;
+}
+
+int mdp3_ppp_pipe_wait(void)
+{
+ int ret = 1;
+
+ /*
+ * wait 200 ms for ppp operation to complete before declaring
+ * the MDP hung
+ */
+ ret = wait_for_completion_timeout(
+ &ppp_stat->ppp_comp, msecs_to_jiffies(200));
+ if (!ret)
+ pr_err("%s: Timed out waiting for the MDP.\n",
+ __func__);
+
+ return ret;
+}
+
+uint32_t mdp3_calc_tpval(struct ppp_img_desc *img, uint32_t old_tp)
+{
+ uint32_t tpVal;
+ uint8_t plane_tp;
+
+ tpVal = 0;
+ if ((img->color_fmt == MDP_RGB_565)
+ || (img->color_fmt == MDP_BGR_565)) {
+ /* transparent color conversion into 24 bpp */
+ plane_tp = (uint8_t) ((old_tp & 0xF800) >> 11);
+ tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 16;
+ plane_tp = (uint8_t) (old_tp & 0x1F);
+ tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 8;
+
+ plane_tp = (uint8_t) ((old_tp & 0x7E0) >> 5);
+ tpVal |= ((plane_tp << 2) | ((plane_tp & 0x30) >> 4));
+ } else {
+ /* 24bit RGB to RBG conversion */
+ tpVal = (old_tp & 0xFF00) >> 8;
+ tpVal |= (old_tp & 0xFF) << 8;
+ tpVal |= (old_tp & 0xFF0000);
+ }
+
+ return tpVal;
+}
+
+static void mdp3_ppp_intr_handler(int type, void *arg)
+{
+ complete(&ppp_stat->ppp_comp);
+}
+
+static int mdp3_ppp_callback_setup(void)
+{
+ int rc;
+ struct mdp3_intr_cb ppp_done_cb = {
+ .cb = mdp3_ppp_intr_handler,
+ .data = NULL,
+ };
+
+ rc = mdp3_set_intr_callback(MDP3_PPP_DONE, &ppp_done_cb);
+ return rc;
+}
+
+void mdp3_ppp_kickoff(void)
+{
+ init_completion(&ppp_stat->ppp_comp);
+ mdp3_irq_enable(MDP3_PPP_DONE);
+ ppp_enable();
+ ATRACE_BEGIN("mdp3_wait_for_ppp_comp");
+ mdp3_ppp_pipe_wait();
+ ATRACE_END("mdp3_wait_for_ppp_comp");
+ mdp3_irq_disable(MDP3_PPP_DONE);
+}
+
+struct bpp_info {
+ int bpp_num;
+ int bpp_den;
+ int bpp_pln;
+};
+
+int mdp3_get_bpp_info(int format, struct bpp_info *bpp)
+{
+ int rc = 0;
+
+ switch (format) {
+ case MDP_RGB_565:
+ case MDP_BGR_565:
+ bpp->bpp_num = 2;
+ bpp->bpp_den = 1;
+ bpp->bpp_pln = 2;
+ break;
+ case MDP_RGB_888:
+ case MDP_BGR_888:
+ bpp->bpp_num = 3;
+ bpp->bpp_den = 1;
+ bpp->bpp_pln = 3;
+ break;
+ case MDP_BGRA_8888:
+ case MDP_RGBA_8888:
+ case MDP_ARGB_8888:
+ case MDP_XRGB_8888:
+ case MDP_RGBX_8888:
+ case MDP_BGRX_8888:
+ bpp->bpp_num = 4;
+ bpp->bpp_den = 1;
+ bpp->bpp_pln = 4;
+ break;
+ case MDP_Y_CRCB_H2V2:
+ case MDP_Y_CBCR_H2V2:
+ case MDP_Y_CBCR_H2V2_ADRENO:
+ case MDP_Y_CBCR_H2V2_VENUS:
+ bpp->bpp_num = 3;
+ bpp->bpp_den = 2;
+ bpp->bpp_pln = 1;
+ break;
+ case MDP_Y_CBCR_H2V1:
+ case MDP_Y_CRCB_H2V1:
+ bpp->bpp_num = 2;
+ bpp->bpp_den = 1;
+ bpp->bpp_pln = 1;
+ break;
+ case MDP_YCRYCB_H2V1:
+ bpp->bpp_num = 2;
+ bpp->bpp_den = 1;
+ bpp->bpp_pln = 2;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+bool mdp3_is_blend(struct mdp_blit_req *req)
+{
+ if ((req->transp_mask != MDP_TRANSP_NOP) ||
+ (req->alpha < MDP_ALPHA_NOP) ||
+ (req->src.format == MDP_ARGB_8888) ||
+ (req->src.format == MDP_BGRA_8888) ||
+ (req->src.format == MDP_RGBA_8888))
+ return true;
+ return false;
+}
+
+bool mdp3_is_scale(struct mdp_blit_req *req)
+{
+ if (req->flags & MDP_ROT_90) {
+ if (req->src_rect.w != req->dst_rect.h ||
+ req->src_rect.h != req->dst_rect.w)
+ return true;
+ } else {
+ if (req->src_rect.h != req->dst_rect.h ||
+ req->src_rect.w != req->dst_rect.w)
+ return true;
+ }
+ return false;
+}
+
+u32 mdp3_clk_calc(struct msm_fb_data_type *mfd,
+ struct blit_req_list *lreq, u32 fps)
+{
+ int i, lcount = 0;
+ struct mdp_blit_req *req;
+ u64 mdp_clk_rate = 0;
+ u32 scale_x = 0, scale_y = 0, scale = 0;
+ u32 blend_l, csc_l;
+
+ lcount = lreq->count;
+
+ blend_l = 100 * BLEND_LATENCY;
+ csc_l = 100 * CSC_LATENCY;
+
+ for (i = 0; i < lcount; i++) {
+ req = &(lreq->req_list[i]);
+
+ if (req->flags & MDP_SMART_BLIT)
+ continue;
+
+ if (mdp3_is_scale(req)) {
+ if (req->flags & MDP_ROT_90) {
+ scale_x = 100 * req->src_rect.h /
+ req->dst_rect.w;
+ scale_y = 100 * req->src_rect.w /
+ req->dst_rect.h;
+ } else {
+ scale_x = 100 * req->src_rect.w /
+ req->dst_rect.w;
+ scale_y = 100 * req->src_rect.h /
+ req->dst_rect.h;
+ }
+ scale = max(scale_x, scale_y);
+ }
+ scale = scale >= 100 ? scale : 100;
+ if (mdp3_is_blend(req))
+ scale = max(scale, blend_l);
+
+ if (!check_if_rgb(req->src.format))
+ scale = max(scale, csc_l);
+
+ mdp_clk_rate += (req->src_rect.w * req->src_rect.h *
+ scale / 100) * fps;
+ }
+ mdp_clk_rate += (ppp_res.solid_fill_pixel * fps);
+ mdp_clk_rate = fudge_factor(mdp_clk_rate,
+ CLK_FUDGE_NUM, CLK_FUDGE_DEN);
+ pr_debug("mdp_clk_rate for ppp = %llu\n", mdp_clk_rate);
+ mdp_clk_rate = mdp3_clk_round_off(mdp_clk_rate);
+
+ return mdp_clk_rate;
+}
+
+u64 mdp3_adjust_scale_factor(struct mdp_blit_req *req, u32 bw_req, int bpp)
+{
+ int src_h, src_w;
+ int dst_h, dst_w;
+
+ src_h = req->src_rect.h;
+ src_w = req->src_rect.w;
+
+ dst_h = req->dst_rect.h;
+ dst_w = req->dst_rect.w;
+
+ if ((!(req->flags & MDP_ROT_90) && src_h == dst_h &&
+ src_w == dst_w) || ((req->flags & MDP_ROT_90) &&
+ src_h == dst_w && src_w == dst_h))
+ return bw_req;
+
+ bw_req = (bw_req + (bw_req * dst_h) / (4 * src_h));
+ bw_req = (bw_req + (bw_req * dst_w) / (4 * src_w) +
+ (bw_req * dst_w) / (bpp * src_w));
+ return bw_req;
+}
+
+int mdp3_calc_ppp_res(struct msm_fb_data_type *mfd,
+ struct blit_req_list *lreq)
+{
+ struct mdss_panel_info *panel_info = mfd->panel_info;
+ int i, lcount = 0;
+ struct mdp_blit_req *req;
+ struct bpp_info bpp;
+ u64 old_solid_fill_pixel = 0;
+ u64 new_solid_fill_pixel = 0;
+ u64 src_read_bw = 0;
+ u32 bg_read_bw = 0;
+ u32 dst_write_bw = 0;
+ u64 honest_ppp_ab = 0;
+ u32 fps = 0;
+ int smart_blit_fg_indx = -1;
+ u32 smart_blit_bg_read_bw = 0;
+
+ ATRACE_BEGIN(__func__);
+ lcount = lreq->count;
+ if (lcount == 0) {
+ pr_err("Blit with request count 0, continue to recover!!!\n");
+ ATRACE_END(__func__);
+ return 0;
+ }
+ if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
+ req = &(lreq->req_list[0]);
+ mdp3_get_bpp_info(req->dst.format, &bpp);
+ old_solid_fill_pixel = ppp_res.solid_fill_pixel;
+ new_solid_fill_pixel = req->dst_rect.w * req->dst_rect.h;
+ ppp_res.solid_fill_pixel += new_solid_fill_pixel;
+ ppp_res.solid_fill_byte += req->dst_rect.w * req->dst_rect.h *
+ bpp.bpp_num / bpp.bpp_den;
+ if ((old_solid_fill_pixel >= new_solid_fill_pixel) ||
+ (mdp3_res->solid_fill_vote_en)) {
+ pr_debug("Last fill pixels are higher or fill_en %d\n",
+ mdp3_res->solid_fill_vote_en);
+ ATRACE_END(__func__);
+ return 0;
+ }
+ }
+
+ for (i = 0; i < lcount; i++) {
+ /* Set Smart blit flag before BW calculation */
+ is_blit_optimization_possible(lreq, i);
+ req = &(lreq->req_list[i]);
+
+ if (req->fps > 0 && req->fps <= panel_info->mipi.frame_rate) {
+ if (fps == 0)
+ fps = req->fps;
+ else
+ fps = panel_info->mipi.frame_rate;
+ }
+
+ mdp3_get_bpp_info(req->src.format, &bpp);
+ if (lreq->req_list[i].flags & MDP_SMART_BLIT) {
+ /*
+ * Flag for smart blit FG layer index
+ * If blit request at index "n" has
+ * MDP_SMART_BLIT flag set then it will be used as BG
+ * layer in smart blit and request at index "n+1"
+ * will be used as FG layer
+ */
+ smart_blit_fg_indx = i + 1;
+ bg_read_bw = req->src_rect.w * req->src_rect.h *
+ bpp.bpp_num / bpp.bpp_den;
+ bg_read_bw = mdp3_adjust_scale_factor(req,
+ bg_read_bw, bpp.bpp_pln);
+ /* Cache read BW of smart blit BG layer */
+ smart_blit_bg_read_bw = bg_read_bw;
+ } else {
+ src_read_bw = req->src_rect.w * req->src_rect.h *
+ bpp.bpp_num / bpp.bpp_den;
+ src_read_bw = mdp3_adjust_scale_factor(req,
+ src_read_bw, bpp.bpp_pln);
+ if (!(check_if_rgb(req->src.format))) {
+ src_read_bw = fudge_factor(src_read_bw,
+ YUV_BW_FUDGE_NUM,
+ YUV_BW_FUDGE_DEN);
+ }
+ mdp3_get_bpp_info(req->dst.format, &bpp);
+
+ if (smart_blit_fg_indx == i) {
+ bg_read_bw = smart_blit_bg_read_bw;
+ smart_blit_fg_indx = -1;
+ } else {
+ if ((req->transp_mask != MDP_TRANSP_NOP) ||
+ (req->alpha < MDP_ALPHA_NOP) ||
+ (req->src.format == MDP_ARGB_8888) ||
+ (req->src.format == MDP_BGRA_8888) ||
+ (req->src.format == MDP_RGBA_8888)) {
+ bg_read_bw = req->dst_rect.w *
+ req->dst_rect.h *
+ bpp.bpp_num / bpp.bpp_den;
+ bg_read_bw = mdp3_adjust_scale_factor(
+ req, bg_read_bw,
+ bpp.bpp_pln);
+ } else {
+ bg_read_bw = 0;
+ }
+ }
+ dst_write_bw = req->dst_rect.w * req->dst_rect.h *
+ bpp.bpp_num / bpp.bpp_den;
+ honest_ppp_ab += (src_read_bw + bg_read_bw +
+ dst_write_bw);
+ }
+ }
+
+ if (fps == 0)
+ fps = panel_info->mipi.frame_rate;
+
+ if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
+ honest_ppp_ab = ppp_res.solid_fill_byte * 4;
+ pr_debug("solid fill honest_ppp_ab %llu\n", honest_ppp_ab);
+ } else {
+ honest_ppp_ab += ppp_res.solid_fill_byte;
+ mdp3_res->solid_fill_vote_en = true;
+ }
+
+ honest_ppp_ab = honest_ppp_ab * fps;
+ if (honest_ppp_ab != ppp_res.next_ab) {
+ ppp_res.next_ab = honest_ppp_ab;
+ ppp_res.next_ib = honest_ppp_ab;
+ ppp_stat->bw_update = true;
+ pr_debug("solid fill ab = %llx, total ab = %llx ",
+ (ppp_res.solid_fill_byte * fps), honest_ppp_ab);
+ pr_debug("(%d fps) Solid_fill_vote %d\n",
+ fps, mdp3_res->solid_fill_vote_en);
+ ATRACE_INT("mdp3_ppp_bus_quota", honest_ppp_ab);
+ }
+ ppp_res.clk_rate = mdp3_clk_calc(mfd, lreq, fps);
+ ATRACE_INT("mdp3_ppp_clk_rate", ppp_res.clk_rate);
+ ATRACE_END(__func__);
+ return 0;
+}
+
+int mdp3_ppp_turnon(struct msm_fb_data_type *mfd, int on_off)
+{
+ uint64_t ab = 0, ib = 0;
+ int rate = 0;
+ int rc;
+
+ if (on_off) {
+ rate = ppp_res.clk_rate;
+ ab = ppp_res.next_ab;
+ ib = ppp_res.next_ib;
+ }
+ mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, rate, MDP3_CLIENT_PPP);
+ rc = mdp3_res_update(on_off, 0, MDP3_CLIENT_PPP);
+ if (rc < 0) {
+ pr_err("%s: mdp3_clk_enable failed\n", __func__);
+ return rc;
+ }
+ rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP, ab, ib);
+ if (rc < 0) {
+ mdp3_res_update(!on_off, 0, MDP3_CLIENT_PPP);
+ pr_err("%s: scale_set_quota failed\n", __func__);
+ return rc;
+ }
+ ppp_stat->bw_on = on_off;
+ ppp_stat->mdp_clk = MDP_CORE_CLK_RATE_SVS;
+ ppp_stat->bw_update = false;
+ return 0;
+}
+
+void mdp3_start_ppp(struct ppp_blit_op *blit_op)
+{
+ /* Wait for the pipe to clear */
+ if (MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS) &
+ MDP3_PPP_ACTIVE) {
+ pr_err("ppp core is hung up on previous request\n");
+ return;
+ }
+ config_ppp_op_mode(blit_op);
+ if (blit_op->solid_fill) {
+ MDP3_REG_WRITE(0x10138, 0x10000000);
+ MDP3_REG_WRITE(0x1014c, 0xffffffff);
+ MDP3_REG_WRITE(0x101b8, 0);
+ MDP3_REG_WRITE(0x101bc, 0);
+ MDP3_REG_WRITE(0x1013c, 0);
+ MDP3_REG_WRITE(0x10140, 0);
+ MDP3_REG_WRITE(0x10144, 0);
+ MDP3_REG_WRITE(0x10148, 0);
+ MDP3_REG_WRITE(MDP3_TFETCH_FILL_COLOR,
+ blit_op->solid_fill_color);
+ MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL,
+ ENABLE_SOLID_FILL);
+ } else {
+ MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL,
+ DISABLE_SOLID_FILL);
+ }
+ /* Skip PPP kickoff for SMART_BLIT BG layer */
+ if (blit_op->mdp_op & MDPOP_SMART_BLIT)
+ pr_debug("Skip mdp3_ppp_kickoff\n");
+ else
+ mdp3_ppp_kickoff();
+
+ if (!(blit_op->solid_fill)) {
+ ppp_res.solid_fill_pixel = 0;
+ ppp_res.solid_fill_byte = 0;
+ }
+}
+
+static int solid_fill_workaround(struct mdp_blit_req *req,
+ struct ppp_blit_op *blit_op)
+{
+ /* Make width 2 when there is a solid fill of width 1, and make
+ * sure width does not become zero while trying to avoid odd width
+ */
+ if (blit_op->dst.roi.width == 1) {
+ if (req->dst_rect.x + 2 > req->dst.width) {
+ pr_err("%s: Unable to handle solid fill of width 1",
+ __func__);
+ return -EINVAL;
+ }
+ blit_op->dst.roi.width = 2;
+ }
+ if (blit_op->src.roi.width == 1) {
+ if (req->src_rect.x + 2 > req->src.width) {
+ pr_err("%s: Unable to handle solid fill of width 1",
+ __func__);
+ return -EINVAL;
+ }
+ blit_op->src.roi.width = 2;
+ }
+
+ /* Avoid odd width, as it could hang ppp during solid fill */
+ blit_op->dst.roi.width = (blit_op->dst.roi.width / 2) * 2;
+ blit_op->src.roi.width = (blit_op->src.roi.width / 2) * 2;
+
+ /* Set src format to RGBX, to avoid ppp hang issues */
+ blit_op->src.color_fmt = MDP_RGBX_8888;
+
+ /* Avoid RGBA format, as it could hang ppp during solid fill */
+ if (blit_op->dst.color_fmt == MDP_RGBA_8888)
+ blit_op->dst.color_fmt = MDP_RGBX_8888;
+ return 0;
+}
+
+static int mdp3_ppp_process_req(struct ppp_blit_op *blit_op,
+ struct mdp_blit_req *req, struct mdp3_img_data *src_data,
+ struct mdp3_img_data *dst_data)
+{
+ unsigned long srcp0_start, srcp0_len, dst_start, dst_len;
+ uint32_t dst_width, dst_height;
+ int ret = 0;
+
+ srcp0_start = (unsigned long) src_data->addr;
+ srcp0_len = (unsigned long) src_data->len;
+ dst_start = (unsigned long) dst_data->addr;
+ dst_len = (unsigned long) dst_data->len;
+
+ blit_op->dst.prop.width = req->dst.width;
+ blit_op->dst.prop.height = req->dst.height;
+
+ blit_op->dst.color_fmt = req->dst.format;
+ blit_op->dst.p0 = (void *) dst_start;
+ blit_op->dst.p0 += req->dst.offset;
+
+ blit_op->dst.roi.x = req->dst_rect.x;
+ blit_op->dst.roi.y = req->dst_rect.y;
+ blit_op->dst.roi.width = req->dst_rect.w;
+ blit_op->dst.roi.height = req->dst_rect.h;
+
+ blit_op->src.roi.x = req->src_rect.x;
+ blit_op->src.roi.y = req->src_rect.y;
+ blit_op->src.roi.width = req->src_rect.w;
+ blit_op->src.roi.height = req->src_rect.h;
+
+ blit_op->src.prop.width = req->src.width;
+ blit_op->src.prop.height = req->src.height;
+ blit_op->src.color_fmt = req->src.format;
+
+
+ blit_op->src.p0 = (void *) (srcp0_start + req->src.offset);
+ if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO)
+ blit_op->src.p1 =
+ (void *) ((uint32_t) blit_op->src.p0 +
+ ALIGN((ALIGN(req->src.width, 32) *
+ ALIGN(req->src.height, 32)), 4096));
+ else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS)
+ blit_op->src.p1 =
+ (void *) ((uint32_t) blit_op->src.p0 +
+ ALIGN((ALIGN(req->src.width, 128) *
+ ALIGN(req->src.height, 32)), 4096));
+ else
+ blit_op->src.p1 = (void *) ((uint32_t) blit_op->src.p0 +
+ req->src.width * req->src.height);
+
+ if (req->flags & MDP_IS_FG)
+ blit_op->mdp_op |= MDPOP_LAYER_IS_FG;
+
+ /* blending check */
+ if (req->transp_mask != MDP_TRANSP_NOP) {
+ blit_op->mdp_op |= MDPOP_TRANSP;
+ blit_op->blend.trans_color =
+ mdp3_calc_tpval(&blit_op->src, req->transp_mask);
+ } else {
+ blit_op->blend.trans_color = 0;
+ }
+
+ req->alpha &= 0xff;
+ if (req->alpha < MDP_ALPHA_NOP) {
+ blit_op->mdp_op |= MDPOP_ALPHAB;
+ blit_op->blend.const_alpha = req->alpha;
+ } else {
+ blit_op->blend.const_alpha = 0xff;
+ }
+
+ /* rotation check */
+ if (req->flags & MDP_FLIP_LR)
+ blit_op->mdp_op |= MDPOP_LR;
+ if (req->flags & MDP_FLIP_UD)
+ blit_op->mdp_op |= MDPOP_UD;
+ if (req->flags & MDP_ROT_90)
+ blit_op->mdp_op |= MDPOP_ROT90;
+ if (req->flags & MDP_DITHER)
+ blit_op->mdp_op |= MDPOP_DITHER;
+
+ if (req->flags & MDP_BLEND_FG_PREMULT)
+ blit_op->mdp_op |= MDPOP_FG_PM_ALPHA;
+
+ /* scale check */
+ if (req->flags & MDP_ROT_90) {
+ dst_width = req->dst_rect.h;
+ dst_height = req->dst_rect.w;
+ } else {
+ dst_width = req->dst_rect.w;
+ dst_height = req->dst_rect.h;
+ }
+
+ if ((blit_op->src.roi.width != dst_width) ||
+ (blit_op->src.roi.height != dst_height))
+ blit_op->mdp_op |= MDPOP_ASCALE;
+
+ if (req->flags & MDP_BLUR)
+ blit_op->mdp_op |= MDPOP_ASCALE | MDPOP_BLUR;
+
+ if (req->flags & MDP_SOLID_FILL) {
+ ret = solid_fill_workaround(req, blit_op);
+ if (ret)
+ return ret;
+
+ blit_op->solid_fill_color = (req->const_color.g & 0xFF)|
+ (req->const_color.r & 0xFF) << 8 |
+ (req->const_color.b & 0xFF) << 16 |
+ (req->const_color.alpha & 0xFF) << 24;
+ blit_op->solid_fill = true;
+ } else {
+ blit_op->solid_fill = false;
+ }
+
+ if (req->flags & MDP_SMART_BLIT)
+ blit_op->mdp_op |= MDPOP_SMART_BLIT;
+
+ return ret;
+}
+
+static void mdp3_ppp_tile_workaround(struct ppp_blit_op *blit_op,
+ struct mdp_blit_req *req)
+{
+ int dst_h, src_w, i;
+ uint32_t mdp_op = blit_op->mdp_op;
+ void *src_p0 = blit_op->src.p0;
+ void *src_p1 = blit_op->src.p1;
+ void *dst_p0 = blit_op->dst.p0;
+
+ src_w = req->src_rect.w;
+ dst_h = blit_op->dst.roi.height;
+ /* bg tile fetching HW workaround */
+ for (i = 0; i < (req->dst_rect.h / 16); i++) {
+ /* this tile size */
+ blit_op->dst.roi.height = 16;
+ blit_op->src.roi.width =
+ (16 * req->src_rect.w) / req->dst_rect.h;
+
+ /* if it's out of scale range... */
+ if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
+ blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR)
+ blit_op->src.roi.width =
+ (MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) /
+ MDP_MAX_X_SCALE_FACTOR;
+ else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
+ blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR)
+ blit_op->src.roi.width =
+ (MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) /
+ MDP_MIN_X_SCALE_FACTOR;
+
+ mdp3_start_ppp(blit_op);
+
+ /* next tile location */
+ blit_op->dst.roi.y += 16;
+ blit_op->src.roi.x += blit_op->src.roi.width;
+
+ /* this is for a remainder update */
+ dst_h -= 16;
+ src_w -= blit_op->src.roi.width;
+ /* restore parameters that may have been overwritten */
+ blit_op->mdp_op = mdp_op;
+ blit_op->src.p0 = src_p0;
+ blit_op->src.p1 = src_p1;
+ blit_op->dst.p0 = dst_p0;
+ }
+
+ if ((dst_h < 0) || (src_w < 0))
+ pr_err("msm_fb: mdp_blt_ex() unexpected result! line:%d\n",
+ __LINE__);
+
+ /* remainder update */
+ if ((dst_h > 0) && (src_w > 0)) {
+ u32 tmp_v;
+
+ blit_op->dst.roi.height = dst_h;
+ blit_op->src.roi.width = src_w;
+
+ if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
+ blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR) {
+ tmp_v = (MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) /
+ MDP_MAX_X_SCALE_FACTOR +
+ ((MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) %
+ MDP_MAX_X_SCALE_FACTOR ? 1 : 0);
+
+ /* move x location as roi width gets bigger */
+ blit_op->src.roi.x -= tmp_v - blit_op->src.roi.width;
+ blit_op->src.roi.width = tmp_v;
+ } else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
+ blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR) {
+ tmp_v = (MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) /
+ MDP_MIN_X_SCALE_FACTOR +
+ ((MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) %
+ MDP_MIN_X_SCALE_FACTOR ? 1 : 0);
+ /*
+ * we don't move x location for continuity of
+ * source image
+ */
+ blit_op->src.roi.width = tmp_v;
+ }
+
+
+ mdp3_start_ppp(blit_op);
+ }
+}
+
+static int mdp3_ppp_blit(struct msm_fb_data_type *mfd,
+ struct mdp_blit_req *req, struct mdp3_img_data *src_data,
+ struct mdp3_img_data *dst_data)
+{
+ struct ppp_blit_op blit_op;
+ int ret = 0;
+
+ memset(&blit_op, 0, sizeof(blit_op));
+
+ if (req->dst.format == MDP_FB_FORMAT)
+ req->dst.format = mfd->fb_imgType;
+ if (req->src.format == MDP_FB_FORMAT)
+ req->src.format = mfd->fb_imgType;
+
+ if (mdp3_ppp_verify_req(req)) {
+ pr_err("%s: invalid image!\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = mdp3_ppp_process_req(&blit_op, req, src_data, dst_data);
+ if (ret) {
+ pr_err("%s: Failed to process the blit request", __func__);
+ return ret;
+ }
+
+ if (((blit_op.mdp_op & (MDPOP_TRANSP | MDPOP_ALPHAB)) ||
+ (req->src.format == MDP_ARGB_8888) ||
+ (req->src.format == MDP_BGRA_8888) ||
+ (req->src.format == MDP_RGBA_8888)) &&
+ (blit_op.mdp_op & MDPOP_ROT90) && (req->dst_rect.w <= 16)) {
+ mdp3_ppp_tile_workaround(&blit_op, req);
+ } else {
+ mdp3_start_ppp(&blit_op);
+ }
+
+ return 0;
+}
+
+static int mdp3_ppp_blit_workaround(struct msm_fb_data_type *mfd,
+ struct mdp_blit_req *req, unsigned int remainder,
+ struct mdp3_img_data *src_data,
+ struct mdp3_img_data *dst_data)
+{
+ int ret;
+ struct mdp_blit_req splitreq;
+ int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1;
+ int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1;
+
+ /* make new request as provide by user */
+ splitreq = *req;
+
+ /* break dest roi at width*/
+ d_y_0 = d_y_1 = req->dst_rect.y;
+ d_h_0 = d_h_1 = req->dst_rect.h;
+ d_x_0 = req->dst_rect.x;
+
+ if (remainder == 14 || remainder == 6)
+ d_w_1 = req->dst_rect.w / 2;
+ else
+ d_w_1 = (req->dst_rect.w - 1) / 2 - 1;
+
+ d_w_0 = req->dst_rect.w - d_w_1;
+ d_x_1 = d_x_0 + d_w_0;
+ /* blit first region */
+ if (((splitreq.flags & 0x07) == 0x07) ||
+ ((splitreq.flags & 0x07) == 0x05) ||
+ ((splitreq.flags & 0x07) == 0x02) ||
+ ((splitreq.flags & 0x07) == 0x0)) {
+
+ if (splitreq.flags & MDP_ROT_90) {
+ s_x_0 = s_x_1 = req->src_rect.x;
+ s_w_0 = s_w_1 = req->src_rect.w;
+ s_y_0 = req->src_rect.y;
+ s_h_1 = (req->src_rect.h * d_w_1) /
+ req->dst_rect.w;
+ s_h_0 = req->src_rect.h - s_h_1;
+ s_y_1 = s_y_0 + s_h_0;
+ if (d_w_1 >= 8 * s_h_1) {
+ s_h_1++;
+ s_y_1--;
+ }
+ } else {
+ s_y_0 = s_y_1 = req->src_rect.y;
+ s_h_0 = s_h_1 = req->src_rect.h;
+ s_x_0 = req->src_rect.x;
+ s_w_1 = (req->src_rect.w * d_w_1) /
+ req->dst_rect.w;
+ s_w_0 = req->src_rect.w - s_w_1;
+ s_x_1 = s_x_0 + s_w_0;
+ if (d_w_1 >= 8 * s_w_1) {
+ s_w_1++;
+ s_x_1--;
+ }
+ }
+
+ splitreq.src_rect.h = s_h_0;
+ splitreq.src_rect.y = s_y_0;
+ splitreq.dst_rect.h = d_h_0;
+ splitreq.dst_rect.y = d_y_0;
+ splitreq.src_rect.x = s_x_0;
+ splitreq.src_rect.w = s_w_0;
+ splitreq.dst_rect.x = d_x_0;
+ splitreq.dst_rect.w = d_w_0;
+ } else {
+ if (splitreq.flags & MDP_ROT_90) {
+ s_x_0 = s_x_1 = req->src_rect.x;
+ s_w_0 = s_w_1 = req->src_rect.w;
+ s_y_0 = req->src_rect.y;
+ s_h_1 = (req->src_rect.h * d_w_0) /
+ req->dst_rect.w;
+ s_h_0 = req->src_rect.h - s_h_1;
+ s_y_1 = s_y_0 + s_h_0;
+ if (d_w_0 >= 8 * s_h_1) {
+ s_h_1++;
+ s_y_1--;
+ }
+ } else {
+ s_y_0 = s_y_1 = req->src_rect.y;
+ s_h_0 = s_h_1 = req->src_rect.h;
+ s_x_0 = req->src_rect.x;
+ s_w_1 = (req->src_rect.w * d_w_0) /
+ req->dst_rect.w;
+ s_w_0 = req->src_rect.w - s_w_1;
+ s_x_1 = s_x_0 + s_w_0;
+ if (d_w_0 >= 8 * s_w_1) {
+ s_w_1++;
+ s_x_1--;
+ }
+ }
+ splitreq.src_rect.h = s_h_0;
+ splitreq.src_rect.y = s_y_0;
+ splitreq.dst_rect.h = d_h_1;
+ splitreq.dst_rect.y = d_y_1;
+ splitreq.src_rect.x = s_x_0;
+ splitreq.src_rect.w = s_w_0;
+ splitreq.dst_rect.x = d_x_1;
+ splitreq.dst_rect.w = d_w_1;
+ }
+
+ /* No need to split in height */
+ ret = mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data);
+
+ if (ret)
+ return ret;
+ /* blit second region */
+ if (((splitreq.flags & 0x07) == 0x07) ||
+ ((splitreq.flags & 0x07) == 0x05) ||
+ ((splitreq.flags & 0x07) == 0x02) ||
+ ((splitreq.flags & 0x07) == 0x0)) {
+ splitreq.src_rect.h = s_h_1;
+ splitreq.src_rect.y = s_y_1;
+ splitreq.dst_rect.h = d_h_1;
+ splitreq.dst_rect.y = d_y_1;
+ splitreq.src_rect.x = s_x_1;
+ splitreq.src_rect.w = s_w_1;
+ splitreq.dst_rect.x = d_x_1;
+ splitreq.dst_rect.w = d_w_1;
+ } else {
+ splitreq.src_rect.h = s_h_1;
+ splitreq.src_rect.y = s_y_1;
+ splitreq.dst_rect.h = d_h_0;
+ splitreq.dst_rect.y = d_y_0;
+ splitreq.src_rect.x = s_x_1;
+ splitreq.src_rect.w = s_w_1;
+ splitreq.dst_rect.x = d_x_0;
+ splitreq.dst_rect.w = d_w_0;
+ }
+
+ /* No need to split in height ... just width */
+ return mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data);
+}
+
+int mdp3_ppp_start_blit(struct msm_fb_data_type *mfd,
+ struct mdp_blit_req *req,
+ struct mdp3_img_data *src_data,
+ struct mdp3_img_data *dst_data)
+{
+ int ret;
+ unsigned int remainder = 0, is_bpp_4 = 0;
+
+ if (unlikely(req->src_rect.h == 0 || req->src_rect.w == 0)) {
+ pr_err("mdp_ppp: src img of zero size!\n");
+ return -EINVAL;
+ }
+ if (unlikely(req->dst_rect.h == 0 || req->dst_rect.w == 0))
+ return 0;
+
+ /* MDP width split workaround */
+ remainder = (req->dst_rect.w) % 16;
+ ret = ppp_get_bpp(req->dst.format, mfd->fb_imgType);
+ if (ret <= 0) {
+ pr_err("mdp_ppp: incorrect bpp!\n");
+ return -EINVAL;
+ }
+ is_bpp_4 = (ret == 4) ? 1 : 0;
+
+ if ((is_bpp_4 && (remainder == 6 || remainder == 14)) &&
+ !(req->flags & MDP_SOLID_FILL))
+ ret = mdp3_ppp_blit_workaround(mfd, req, remainder,
+ src_data, dst_data);
+ else
+ ret = mdp3_ppp_blit(mfd, req, src_data, dst_data);
+ return ret;
+}
+
+void mdp3_ppp_wait_for_fence(struct blit_req_list *req)
+{
+ int i, ret = 0;
+
+ ATRACE_BEGIN(__func__);
+ /* buf sync */
+ for (i = 0; i < req->acq_fen_cnt; i++) {
+ ret = sync_fence_wait(req->acq_fen[i],
+ WAIT_FENCE_FINAL_TIMEOUT);
+ if (ret < 0) {
+ pr_err("%s: sync_fence_wait failed! ret = %x\n",
+ __func__, ret);
+ break;
+ }
+ sync_fence_put(req->acq_fen[i]);
+ }
+ ATRACE_END(__func__);
+ if (ret < 0) {
+ while (i < req->acq_fen_cnt) {
+ sync_fence_put(req->acq_fen[i]);
+ i++;
+ }
+ }
+ req->acq_fen_cnt = 0;
+}
+
+void mdp3_ppp_signal_timeline(struct blit_req_list *req)
+{
+ sw_sync_timeline_inc(ppp_stat->timeline, 1);
+ MDSS_XLOG(ppp_stat->timeline->value, ppp_stat->timeline_value);
+ req->last_rel_fence = req->cur_rel_fence;
+ req->cur_rel_fence = 0;
+}
+
+
+static void mdp3_ppp_deinit_buf_sync(struct blit_req_list *req)
+{
+ int i;
+
+ put_unused_fd(req->cur_rel_fen_fd);
+ sync_fence_put(req->cur_rel_fence);
+ req->cur_rel_fence = NULL;
+ req->cur_rel_fen_fd = 0;
+ ppp_stat->timeline_value--;
+ for (i = 0; i < req->acq_fen_cnt; i++)
+ sync_fence_put(req->acq_fen[i]);
+ req->acq_fen_cnt = 0;
+}
+
+static int mdp3_ppp_handle_buf_sync(struct blit_req_list *req,
+ struct mdp_buf_sync *buf_sync)
+{
+ int i, fence_cnt = 0, ret = 0;
+ int acq_fen_fd[MDP_MAX_FENCE_FD];
+ struct sync_fence *fence;
+
+ if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
+ (ppp_stat->timeline == NULL))
+ return -EINVAL;
+
+ if (buf_sync->acq_fen_fd_cnt)
+ ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
+ buf_sync->acq_fen_fd_cnt * sizeof(int));
+ if (ret) {
+ pr_err("%s: copy_from_user failed\n", __func__);
+ return ret;
+ }
+ for (i = 0; i < buf_sync->acq_fen_fd_cnt; i++) {
+ fence = sync_fence_fdget(acq_fen_fd[i]);
+ if (fence == NULL) {
+ pr_info("%s: null fence! i=%d fd=%d\n", __func__, i,
+ acq_fen_fd[i]);
+ ret = -EINVAL;
+ break;
+ }
+ req->acq_fen[i] = fence;
+ }
+ fence_cnt = i;
+ if (ret)
+ goto buf_sync_err_1;
+ req->acq_fen_cnt = fence_cnt;
+ if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
+ mdp3_ppp_wait_for_fence(req);
+
+ req->cur_rel_sync_pt = sw_sync_pt_create(ppp_stat->timeline,
+ ppp_stat->timeline_value++);
+ MDSS_XLOG(ppp_stat->timeline_value);
+ if (req->cur_rel_sync_pt == NULL) {
+ pr_err("%s: cannot create sync point\n", __func__);
+ ret = -ENOMEM;
+ goto buf_sync_err_2;
+ }
+ /* create fence */
+ req->cur_rel_fence = sync_fence_create("ppp-fence",
+ req->cur_rel_sync_pt);
+ if (req->cur_rel_fence == NULL) {
+ sync_pt_free(req->cur_rel_sync_pt);
+ req->cur_rel_sync_pt = NULL;
+ pr_err("%s: cannot create fence\n", __func__);
+ ret = -ENOMEM;
+ goto buf_sync_err_2;
+ }
+ /* create fd */
+ return ret;
+buf_sync_err_2:
+ ppp_stat->timeline_value--;
+buf_sync_err_1:
+ for (i = 0; i < fence_cnt; i++)
+ sync_fence_put(req->acq_fen[i]);
+ req->acq_fen_cnt = 0;
+ return ret;
+}
+
+void mdp3_ppp_req_push(struct blit_req_queue *req_q, struct blit_req_list *req)
+{
+ int idx = req_q->push_idx;
+
+ req_q->req[idx] = *req;
+ req_q->count++;
+ req_q->push_idx = (req_q->push_idx + 1) % MDP3_PPP_MAX_LIST_REQ;
+}
+
+struct blit_req_list *mdp3_ppp_next_req(struct blit_req_queue *req_q)
+{
+ struct blit_req_list *req;
+
+ if (req_q->count == 0)
+ return NULL;
+ req = &req_q->req[req_q->pop_idx];
+ return req;
+}
+
+void mdp3_ppp_req_pop(struct blit_req_queue *req_q)
+{
+ req_q->count--;
+ req_q->pop_idx = (req_q->pop_idx + 1) % MDP3_PPP_MAX_LIST_REQ;
+}
+
+void mdp3_free_fw_timer_func(unsigned long arg)
+{
+ mdp3_res->solid_fill_vote_en = false;
+ schedule_work(&ppp_stat->free_bw_work);
+}
+
+static void mdp3_free_bw_wq_handler(struct work_struct *work)
+{
+ struct msm_fb_data_type *mfd = ppp_stat->mfd;
+
+ mutex_lock(&ppp_stat->config_ppp_mutex);
+ if (ppp_stat->bw_on)
+ mdp3_ppp_turnon(mfd, 0);
+ mutex_unlock(&ppp_stat->config_ppp_mutex);
+}
+
+static bool is_hw_workaround_needed(struct mdp_blit_req req)
+{
+ bool result = false;
+ bool is_bpp_4 = false;
+ uint32_t remainder = 0;
+ uint32_t bpp = ppp_get_bpp(req.dst.format, ppp_stat->mfd->fb_imgType);
+
+ /* MDP width split workaround */
+ remainder = (req.dst_rect.w) % 16;
+ is_bpp_4 = (bpp == 4) ? 1 : 0;
+ if ((is_bpp_4 && (remainder == 6 || remainder == 14)) &&
+ !(req.flags & MDP_SOLID_FILL))
+ result = true;
+
+ /* bg tile fetching HW workaround */
+ if (((req.alpha < MDP_ALPHA_NOP) ||
+ (req.transp_mask != MDP_TRANSP_NOP) ||
+ (req.src.format == MDP_ARGB_8888) ||
+ (req.src.format == MDP_BGRA_8888) ||
+ (req.src.format == MDP_RGBA_8888)) &&
+ (req.flags & MDP_ROT_90) && (req.dst_rect.w <= 16))
+ result = true;
+
+ return result;
+}
+
+static bool is_roi_equal(struct mdp_blit_req req0,
+ struct mdp_blit_req req1)
+{
+ bool result = false;
+ struct mdss_panel_info *panel_info = ppp_stat->mfd->panel_info;
+
+ /*
+ * Check req0 and req1 layer destination ROI and return true if
+ * they are equal.
+ */
+ if ((req0.dst_rect.x == req1.dst_rect.x) &&
+ (req0.dst_rect.y == req1.dst_rect.y) &&
+ (req0.dst_rect.w == req1.dst_rect.w) &&
+ (req0.dst_rect.h == req1.dst_rect.h))
+ result = true;
+ /*
+ * Layers are source cropped and cropped layer width and hight are
+ * same panel width and height
+ */
+ else if ((req0.dst_rect.w == req1.dst_rect.w) &&
+ (req0.dst_rect.h == req1.dst_rect.h) &&
+ (req0.dst_rect.w == panel_info->xres) &&
+ (req0.dst_rect.h == panel_info->yres))
+ result = true;
+
+ return result;
+}
+
+static bool is_scaling_needed(struct mdp_blit_req req)
+{
+ bool result = true;
+
+ /* Return true if layer need scaling else return false */
+ if ((req.src_rect.w == req.dst_rect.w) &&
+ (req.src_rect.h == req.dst_rect.h))
+ result = false;
+ return result;
+}
+
+static bool is_blit_optimization_possible(struct blit_req_list *req, int indx)
+{
+ int next = indx + 1;
+ bool status = false;
+ struct mdp3_img_data tmp_data;
+ bool dst_roi_equal = false;
+ bool hw_woraround_active = false;
+ struct mdp_blit_req bg_req;
+ struct mdp_blit_req fg_req;
+
+ if (!(mdp3_res->smart_blit_en)) {
+ pr_debug("Smart BLIT disabled from sysfs\n");
+ return status;
+ }
+ if (next < req->count) {
+ bg_req = req->req_list[indx];
+ fg_req = req->req_list[next];
+ hw_woraround_active = is_hw_workaround_needed(bg_req);
+ dst_roi_equal = is_roi_equal(bg_req, fg_req);
+ /*
+ * Check userspace Smart BLIT Flag for current and next
+ * request Flag for smart blit FG layer index If blit
+ * request at index "n" has MDP_SMART_BLIT flag set then
+ * it will be used as BG layer in smart blit
+ * and request at index "n+1" will be used as FG layer
+ */
+ if ((bg_req.flags & MDP_SMART_BLIT) &&
+ (!(fg_req.flags & MDP_SMART_BLIT)) &&
+ (!(hw_woraround_active)))
+ status = true;
+ /*
+ * Enable SMART blit between request 0(BG) & request 1(FG) when
+ * destination ROI of BG and FG layer are same,
+ * No scaling on BG layer
+ * No rotation on BG Layer.
+ * BG Layer color format is RGB and marked as MDP_IS_FG.
+ */
+ else if ((mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) &&
+ (indx == 0) && (dst_roi_equal) &&
+ (bg_req.flags & MDP_IS_FG) &&
+ (!(is_scaling_needed(bg_req))) &&
+ (!(bg_req.flags & (MDP_ROT_90))) &&
+ (check_if_rgb(bg_req.src.format)) &&
+ (!(hw_woraround_active))) {
+ status = true;
+ req->req_list[indx].flags |= MDP_SMART_BLIT;
+ pr_debug("Optimize RGB Blit for Req Indx %d\n", indx);
+ }
+ /*
+ * Swap BG and FG layer to enable SMART blit between request
+ * 0(BG) & request 1(FG) when destination ROI of BG and FG
+ * layer are same, No scaling on FG and BG layer
+ * No rotation on FG Layer. BG Layer color format is YUV
+ */
+ else if ((indx == 0) &&
+ (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) &&
+ (!(fg_req.flags & (MDP_ROT_90))) && (dst_roi_equal) &&
+ (!(check_if_rgb(bg_req.src.format))) &&
+ (!(hw_woraround_active))) {
+ /*
+ * swap blit requests at index 0 and 1. YUV layer at
+ * index 0 is replaced with UI layer request present
+ * at index 1. Since UI layer will be in background
+ * set IS_FG flag and clear it from YUV layer flags
+ */
+ if (!(is_scaling_needed(req->req_list[next]))) {
+ if (bg_req.flags & MDP_IS_FG) {
+ req->req_list[indx].flags &=
+ ~MDP_IS_FG;
+ req->req_list[next].flags |= MDP_IS_FG;
+ }
+ bg_req = req->req_list[next];
+ req->req_list[next] = req->req_list[indx];
+ req->req_list[indx] = bg_req;
+
+ tmp_data = req->src_data[next];
+ req->src_data[next] = req->src_data[indx];
+ req->src_data[indx] = tmp_data;
+
+ tmp_data = req->dst_data[next];
+ req->dst_data[next] = req->dst_data[indx];
+ req->dst_data[indx] = tmp_data;
+ status = true;
+ req->req_list[indx].flags |= MDP_SMART_BLIT;
+ pr_debug("Optimize YUV Blit for Req Indx %d\n",
+ indx);
+ }
+ }
+ }
+ return status;
+}
+
+static void mdp3_ppp_blit_handler(struct kthread_work *work)
+{
+ struct msm_fb_data_type *mfd = ppp_stat->mfd;
+ struct blit_req_list *req;
+ int i, rc = 0;
+ bool smart_blit = false;
+ int smart_blit_fg_index = -1;
+
+ mutex_lock(&ppp_stat->config_ppp_mutex);
+ req = mdp3_ppp_next_req(&ppp_stat->req_q);
+ if (!req) {
+ mutex_unlock(&ppp_stat->config_ppp_mutex);
+ return;
+ }
+
+ if (!ppp_stat->bw_on) {
+ mdp3_ppp_turnon(mfd, 1);
+ if (rc < 0) {
+ mutex_unlock(&ppp_stat->config_ppp_mutex);
+ pr_err("%s: Enable ppp resources failed\n", __func__);
+ return;
+ }
+ }
+ while (req) {
+ mdp3_ppp_wait_for_fence(req);
+ mdp3_calc_ppp_res(mfd, req);
+ if (ppp_res.clk_rate != ppp_stat->mdp_clk) {
+ ppp_stat->mdp_clk = ppp_res.clk_rate;
+ mdp3_clk_set_rate(MDP3_CLK_MDP_SRC,
+ ppp_stat->mdp_clk, MDP3_CLIENT_PPP);
+ }
+ if (ppp_stat->bw_update) {
+ rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP,
+ ppp_res.next_ab, ppp_res.next_ib);
+ if (rc < 0) {
+ pr_err("%s: bw set quota failed\n", __func__);
+ return;
+ }
+ ppp_stat->bw_update = false;
+ }
+ ATRACE_BEGIN("mpd3_ppp_start");
+ for (i = 0; i < req->count; i++) {
+ smart_blit = is_blit_optimization_possible(req, i);
+ if (smart_blit)
+ /*
+ * Blit request index of FG layer in
+ * smart blit
+ */
+ smart_blit_fg_index = i + 1;
+ if (!(req->req_list[i].flags & MDP_NO_BLIT)) {
+ /* Do the actual blit. */
+ if (!rc) {
+ rc = mdp3_ppp_start_blit(mfd,
+ &(req->req_list[i]),
+ &req->src_data[i],
+ &req->dst_data[i]);
+ }
+ /* Unmap blit source buffer */
+ if (smart_blit == false) {
+ mdp3_put_img(&req->src_data[i],
+ MDP3_CLIENT_PPP);
+ }
+ if (smart_blit_fg_index == i) {
+ /* Unmap smart blit BG buffer */
+ mdp3_put_img(&req->src_data[i - 1],
+ MDP3_CLIENT_PPP);
+ smart_blit_fg_index = -1;
+ }
+ mdp3_put_img(&req->dst_data[i],
+ MDP3_CLIENT_PPP);
+ smart_blit = false;
+ }
+ }
+ ATRACE_END("mdp3_ppp_start");
+ /* Signal to release fence */
+ mutex_lock(&ppp_stat->req_mutex);
+ mdp3_ppp_signal_timeline(req);
+ mdp3_ppp_req_pop(&ppp_stat->req_q);
+ req = mdp3_ppp_next_req(&ppp_stat->req_q);
+ if (ppp_stat->wait_for_pop)
+ complete(&ppp_stat->pop_q_comp);
+ mutex_unlock(&ppp_stat->req_mutex);
+ }
+ mod_timer(&ppp_stat->free_bw_timer, jiffies +
+ msecs_to_jiffies(MDP_RELEASE_BW_TIMEOUT));
+ mutex_unlock(&ppp_stat->config_ppp_mutex);
+}
+
+int mdp3_ppp_parse_req(void __user *p,
+ struct mdp_async_blit_req_list *req_list_header,
+ int async)
+{
+ struct blit_req_list *req;
+ struct blit_req_queue *req_q = &ppp_stat->req_q;
+ struct sync_fence *fence = NULL;
+ int count, rc, idx, i;
+
+ count = req_list_header->count;
+
+ mutex_lock(&ppp_stat->req_mutex);
+ while (req_q->count >= MDP3_PPP_MAX_LIST_REQ) {
+ ppp_stat->wait_for_pop = true;
+ mutex_unlock(&ppp_stat->req_mutex);
+ rc = wait_for_completion_timeout(
+ &ppp_stat->pop_q_comp, 5 * HZ);
+ if (rc == 0) {
+ /* This will only occur if there is serious problem */
+ pr_err("%s: timeout exiting queuing request\n",
+ __func__);
+ return -EBUSY;
+ }
+ mutex_lock(&ppp_stat->req_mutex);
+ ppp_stat->wait_for_pop = false;
+ }
+ idx = req_q->push_idx;
+ req = &req_q->req[idx];
+
+ if (copy_from_user(&req->req_list, p,
+ sizeof(struct mdp_blit_req) * count)) {
+ mutex_unlock(&ppp_stat->req_mutex);
+ return -EFAULT;
+ }
+
+ rc = mdp3_ppp_handle_buf_sync(req, &req_list_header->sync);
+ if (rc < 0) {
+ pr_err("%s: Failed create sync point\n", __func__);
+ mutex_unlock(&ppp_stat->req_mutex);
+ return rc;
+ }
+ req->count = count;
+
+ /* We need to grab ion handle while running in client thread */
+ for (i = 0; i < count; i++) {
+ rc = mdp3_ppp_get_img(&req->req_list[i].src,
+ &req->req_list[i], &req->src_data[i]);
+ if (rc < 0 || req->src_data[i].len == 0) {
+ pr_err("mdp_ppp: couldn't retrieve src img from mem\n");
+ goto parse_err_1;
+ }
+
+ rc = mdp3_ppp_get_img(&req->req_list[i].dst,
+ &req->req_list[i], &req->dst_data[i]);
+ if (rc < 0 || req->dst_data[i].len == 0) {
+ mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
+ pr_err("mdp_ppp: couldn't retrieve dest img from mem\n");
+ goto parse_err_1;
+ }
+ }
+
+ if (async) {
+ req->cur_rel_fen_fd = get_unused_fd_flags(0);
+ if (req->cur_rel_fen_fd < 0) {
+ pr_err("%s: get_unused_fd_flags failed\n", __func__);
+ rc = -ENOMEM;
+ goto parse_err_1;
+ }
+ sync_fence_install(req->cur_rel_fence, req->cur_rel_fen_fd);
+ rc = copy_to_user(req_list_header->sync.rel_fen_fd,
+ &req->cur_rel_fen_fd, sizeof(int));
+ if (rc) {
+ pr_err("%s:copy_to_user failed\n", __func__);
+ goto parse_err_2;
+ }
+ } else {
+ fence = req->cur_rel_fence;
+ }
+
+ mdp3_ppp_req_push(req_q, req);
+ mutex_unlock(&ppp_stat->req_mutex);
+ kthread_queue_work(&ppp_stat->kworker, &ppp_stat->blit_work);
+ if (!async) {
+ /* wait for release fence */
+ rc = sync_fence_wait(fence,
+ 5 * MSEC_PER_SEC);
+ if (rc < 0)
+ pr_err("%s: sync blit! rc = %x\n", __func__, rc);
+
+ sync_fence_put(fence);
+ fence = NULL;
+ }
+ return 0;
+
+parse_err_2:
+ put_unused_fd(req->cur_rel_fen_fd);
+parse_err_1:
+ for (i--; i >= 0; i--) {
+ mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
+ mdp3_put_img(&req->dst_data[i], MDP3_CLIENT_PPP);
+ }
+ mdp3_ppp_deinit_buf_sync(req);
+ mutex_unlock(&ppp_stat->req_mutex);
+ return rc;
+}
+
+int mdp3_ppp_res_init(struct msm_fb_data_type *mfd)
+{
+ int rc;
+ struct sched_param param = {.sched_priority = 16};
+ const char timeline_name[] = "mdp3_ppp";
+
+ ppp_stat = kzalloc(sizeof(struct ppp_status), GFP_KERNEL);
+ if (!ppp_stat)
+ return -ENOMEM;
+
+ /*Setup sync_pt timeline for ppp*/
+ ppp_stat->timeline = sw_sync_timeline_create(timeline_name);
+ if (ppp_stat->timeline == NULL) {
+ pr_err("%s: cannot create time line\n", __func__);
+ return -ENOMEM;
+ }
+ ppp_stat->timeline_value = 1;
+
+ init_kthread_worker(&ppp_stat->kworker);
+ init_kthread_work(&ppp_stat->blit_work, mdp3_ppp_blit_handler);
+ ppp_stat->blit_thread = kthread_run(kthread_worker_fn,
+ &ppp_stat->kworker,
+ "mdp3_ppp");
+
+ if (IS_ERR(ppp_stat->blit_thread)) {
+ rc = PTR_ERR(ppp_stat->blit_thread);
+ pr_err("ERROR: unable to start ppp blit thread,err = %d\n",
+ rc);
+ ppp_stat->blit_thread = NULL;
+ return rc;
+ }
+ if (sched_setscheduler(ppp_stat->blit_thread, SCHED_FIFO, ¶m))
+ pr_warn("set priority failed for mdp3 blit thread\n");
+
+ INIT_WORK(&ppp_stat->free_bw_work, mdp3_free_bw_wq_handler);
+ init_completion(&ppp_stat->pop_q_comp);
+ mutex_init(&ppp_stat->req_mutex);
+ mutex_init(&ppp_stat->config_ppp_mutex);
+ init_timer(&ppp_stat->free_bw_timer);
+ ppp_stat->free_bw_timer.function = mdp3_free_fw_timer_func;
+ ppp_stat->free_bw_timer.data = 0;
+ ppp_stat->mfd = mfd;
+ mdp3_ppp_callback_setup();
+ return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.h b/drivers/video/fbdev/msm/mdp3_ppp.h
new file mode 100644
index 0000000..1f82851
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ppp.h
@@ -0,0 +1,430 @@
+/* Copyright (c) 2007, 2013, 2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MDP3_PPP_H
+#define MDP3_PPP_H
+#include "mdp3.h"
+#include "mdss_fb.h"
+
+#define PPP_WRITEL(val, off) MDP3_REG_WRITE(off, val)
+
+#define MAX_BLIT_REQ 16
+#define PPP_UPSCALE_MAX 64
+#define PPP_BLUR_SCALE_MAX 128
+#define PPP_LUT_MAX 256
+
+#define MDPOP_SMART_BLIT BIT(31) /* blit optimization flag */
+
+/* MDP PPP Operations */
+#define MDPOP_NOP 0
+#define MDPOP_LR BIT(0) /* left to right flip */
+#define MDPOP_UD BIT(1) /* up and down flip */
+#define MDPOP_ROT90 BIT(2) /* rotate image to 90 degree */
+#define MDPOP_ROT180 (MDPOP_UD|MDPOP_LR)
+#define MDPOP_ROT270 (MDPOP_ROT90|MDPOP_UD|MDPOP_LR)
+#define MDPOP_ASCALE BIT(7)
+#define MDPOP_ALPHAB BIT(8) /* enable alpha blending */
+#define MDPOP_TRANSP BIT(9) /* enable transparency */
+#define MDPOP_DITHER BIT(10) /* enable dither */
+#define MDPOP_SHARPENING BIT(11) /* enable sharpening */
+#define MDPOP_BLUR BIT(12) /* enable blur */
+#define MDPOP_FG_PM_ALPHA BIT(13)
+#define MDPOP_LAYER_IS_FG BIT(14)
+
+#define MDPOP_ROTATION (MDPOP_ROT90|MDPOP_LR|MDPOP_UD)
+
+#define PPP_OP_CONVERT_YCBCR2RGB BIT(2)
+#define PPP_OP_CONVERT_ON BIT(3)
+#define PPP_OP_SCALE_X_ON BIT(0)
+#define PPP_OP_SCALE_Y_ON BIT(1)
+#define PPP_OP_ROT_ON BIT(8)
+#define PPP_OP_ROT_90 BIT(9)
+#define PPP_OP_FLIP_LR BIT(10)
+#define PPP_OP_FLIP_UD BIT(11)
+#define PPP_OP_BLEND_ON BIT(12)
+#define PPP_OP_BLEND_CONSTANT_ALPHA BIT(14)
+#define PPP_OP_BLEND_BG_ALPHA BIT(13)
+#define PPP_OP_BLEND_EQ_REVERSE BIT(15)
+#define PPP_OP_DITHER_EN BIT(16)
+#define PPP_BLEND_CALPHA_TRNASP BIT(24)
+
+#define PPP_OP_BLEND_SRCPIXEL_ALPHA 0
+#define PPP_OP_BLEND_ALPHA_BLEND_NORMAL 0
+#define PPP_OP_BLEND_ALPHA_BLEND_REVERSE BIT(15)
+
+#define PPP_BLEND_BG_USE_ALPHA_SEL (1 << 0)
+#define PPP_BLEND_BG_ALPHA_REVERSE (1 << 3)
+#define PPP_BLEND_BG_SRCPIXEL_ALPHA (0 << 1)
+#define PPP_BLEND_BG_DSTPIXEL_ALPHA (1 << 1)
+#define PPP_BLEND_BG_CONSTANT_ALPHA (2 << 1)
+#define PPP_BLEND_BG_CONST_ALPHA_VAL(x) ((x) << 24)
+#define PPP_OP_BG_CHROMA_H2V1 BIT(25)
+
+#define CLR_G 0x0
+#define CLR_B 0x1
+#define CLR_R 0x2
+#define CLR_ALPHA 0x3
+
+#define CLR_Y CLR_G
+#define CLR_CB CLR_B
+#define CLR_CR CLR_R
+
+/* from lsb to msb */
+#define PPP_GET_PACK_PATTERN(a, x, y, z, bit) \
+ (((a)<<(bit*3))|((x)<<(bit*2))|((y)<<bit)|(z))
+
+/* Frame unpacking */
+#define PPP_C0G_8BITS (BIT(1)|BIT(0))
+#define PPP_C1B_8BITS (BIT(3)|BIT(2))
+#define PPP_C2R_8BITS (BIT(5)|BIT(4))
+#define PPP_C3A_8BITS (BIT(7)|BIT(6))
+
+#define PPP_C0G_6BITS BIT(1)
+#define PPP_C1B_6BITS BIT(3)
+#define PPP_C2R_6BITS BIT(5)
+
+#define PPP_C0G_5BITS BIT(0)
+#define PPP_C1B_5BITS BIT(2)
+#define PPP_C2R_5BITS BIT(4)
+
+#define PPP_SRC_C3_ALPHA_EN BIT(8)
+
+#define PPP_SRC_BPP_INTERLVD_1BYTES 0
+#define PPP_SRC_BPP_INTERLVD_2BYTES BIT(9)
+#define PPP_SRC_BPP_INTERLVD_3BYTES BIT(10)
+#define PPP_SRC_BPP_INTERLVD_4BYTES (BIT(10)|BIT(9))
+
+#define PPP_SRC_BPP_ROI_ODD_X BIT(11)
+#define PPP_SRC_BPP_ROI_ODD_Y BIT(12)
+#define PPP_SRC_INTERLVD_2COMPONENTS BIT(13)
+#define PPP_SRC_INTERLVD_3COMPONENTS BIT(14)
+#define PPP_SRC_INTERLVD_4COMPONENTS (BIT(14)|BIT(13))
+
+#define PPP_SRC_UNPACK_TIGHT BIT(17)
+#define PPP_SRC_UNPACK_LOOSE 0
+#define PPP_SRC_UNPACK_ALIGN_LSB 0
+#define PPP_SRC_UNPACK_ALIGN_MSB BIT(18)
+
+#define PPP_SRC_FETCH_PLANES_INTERLVD 0
+#define PPP_SRC_FETCH_PLANES_PSEUDOPLNR BIT(20)
+
+#define PPP_OP_SRC_CHROMA_H2V1 BIT(18)
+#define PPP_OP_SRC_CHROMA_H1V2 BIT(19)
+#define PPP_OP_SRC_CHROMA_420 (BIT(18)|BIT(19))
+#define PPP_OP_SRC_CHROMA_OFFSITE BIT(20)
+
+#define PPP_DST_PACKET_CNT_INTERLVD_2ELEM BIT(9)
+#define PPP_DST_PACKET_CNT_INTERLVD_3ELEM BIT(10)
+#define PPP_DST_PACKET_CNT_INTERLVD_4ELEM (BIT(10)|BIT(9))
+#define PPP_DST_PACKET_CNT_INTERLVD_6ELEM (BIT(11)|BIT(9))
+
+#define PPP_DST_C3A_8BIT (BIT(7)|BIT(6))
+#define PPP_DST_C3ALPHA_EN BIT(8)
+
+#define PPP_DST_PACK_LOOSE 0
+#define PPP_DST_PACK_TIGHT BIT(13)
+#define PPP_DST_PACK_ALIGN_LSB 0
+#define PPP_DST_PACK_ALIGN_MSB BIT(14)
+
+#define PPP_DST_OUT_SEL_AXI 0
+#define PPP_DST_OUT_SEL_MDDI BIT(15)
+
+#define PPP_DST_BPP_2BYTES BIT(16)
+#define PPP_DST_BPP_3BYTES BIT(17)
+#define PPP_DST_BPP_4BYTES (BIT(17)|BIT(16))
+
+#define PPP_DST_PLANE_INTERLVD 0
+#define PPP_DST_PLANE_PLANAR BIT(18)
+#define PPP_DST_PLANE_PSEUDOPLN BIT(19)
+
+#define PPP_OP_DST_CHROMA_H2V1 BIT(21)
+#define PPP_OP_DST_CHROMA_420 (BIT(21)|BIT(22))
+#define PPP_OP_COLOR_SPACE_YCBCR BIT(17)
+
+#define MDP_SCALE_Q_FACTOR 512
+#define MDP_MAX_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*4)
+#define MDP_MIN_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/4)
+#define MDP_MAX_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*4)
+#define MDP_MIN_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/4)
+
+#define MDP_TOP_LUMA 16
+#define MDP_TOP_CHROMA 0
+#define MDP_BOTTOM_LUMA 19
+#define MDP_BOTTOM_CHROMA 3
+#define MDP_LEFT_LUMA 22
+#define MDP_LEFT_CHROMA 6
+#define MDP_RIGHT_LUMA 25
+#define MDP_RIGHT_CHROMA 9
+
+#define MDP_RGB_565_SRC_REG (PPP_C2R_5BITS | PPP_C0G_6BITS | \
+ PPP_C1B_5BITS | PPP_SRC_BPP_INTERLVD_2BYTES | \
+ PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT | \
+ PPP_SRC_UNPACK_ALIGN_LSB | \
+ PPP_SRC_FETCH_PLANES_INTERLVD)
+
+#define MDP_RGB_888_SRC_REG (PPP_C2R_8BITS | PPP_C0G_8BITS | \
+ PPP_C1B_8BITS | PPP_SRC_BPP_INTERLVD_3BYTES | \
+ PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT | \
+ PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_INTERLVD)
+
+#define MDP_RGBX_8888_SRC_REG (PPP_C2R_8BITS | PPP_C0G_8BITS | \
+ PPP_C1B_8BITS | PPP_C3A_8BITS | \
+ PPP_SRC_C3_ALPHA_EN | PPP_SRC_BPP_INTERLVD_4BYTES | \
+ PPP_SRC_INTERLVD_4COMPONENTS | PPP_SRC_UNPACK_TIGHT | \
+ PPP_SRC_UNPACK_ALIGN_LSB | \
+ PPP_SRC_FETCH_PLANES_INTERLVD)
+
+#define MDP_Y_CBCR_H2V2_SRC_REG (PPP_C2R_8BITS | PPP_C0G_8BITS | \
+ PPP_C1B_8BITS | PPP_SRC_BPP_INTERLVD_2BYTES | \
+ PPP_SRC_INTERLVD_2COMPONENTS | PPP_SRC_UNPACK_TIGHT | \
+ PPP_SRC_UNPACK_ALIGN_LSB | \
+ PPP_SRC_FETCH_PLANES_PSEUDOPLNR)
+
+#define MDP_YCRYCB_H2V1_SRC_REG (PPP_C2R_8BITS | \
+ PPP_C0G_8BITS | PPP_C1B_8BITS | \
+ PPP_C3A_8BITS | PPP_SRC_BPP_INTERLVD_2BYTES | \
+ PPP_SRC_INTERLVD_4COMPONENTS | \
+ PPP_SRC_UNPACK_TIGHT | PPP_SRC_UNPACK_ALIGN_LSB)
+
+#define MDP_Y_CRCB_H2V1_SRC_REG (PPP_C2R_8BITS | \
+ PPP_C0G_8BITS | PPP_C1B_8BITS | \
+ PPP_C3A_8BITS | PPP_SRC_BPP_INTERLVD_2BYTES | \
+ PPP_SRC_INTERLVD_2COMPONENTS | PPP_SRC_UNPACK_TIGHT | \
+ PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_PSEUDOPLNR)
+
+#define MDP_RGB_565_DST_REG (PPP_C0G_6BITS | \
+ PPP_C1B_5BITS | PPP_C2R_5BITS | \
+ PPP_DST_PACKET_CNT_INTERLVD_3ELEM | \
+ PPP_DST_PACK_TIGHT | PPP_DST_PACK_ALIGN_LSB | \
+ PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES | \
+ PPP_DST_PLANE_INTERLVD)
+
+#define MDP_RGB_888_DST_REG (PPP_C0G_8BITS | \
+ PPP_C1B_8BITS | PPP_C2R_8BITS | \
+ PPP_DST_PACKET_CNT_INTERLVD_3ELEM | PPP_DST_PACK_TIGHT | \
+ PPP_DST_PACK_ALIGN_LSB | PPP_DST_OUT_SEL_AXI | \
+ PPP_DST_BPP_3BYTES | PPP_DST_PLANE_INTERLVD)
+
+#define MDP_RGBX_8888_DST_REG (PPP_C0G_8BITS | \
+ PPP_C1B_8BITS | PPP_C2R_8BITS | PPP_C3A_8BITS | \
+ PPP_DST_C3ALPHA_EN | PPP_DST_PACKET_CNT_INTERLVD_4ELEM | \
+ PPP_DST_PACK_TIGHT | PPP_DST_PACK_ALIGN_LSB | \
+ PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_4BYTES | \
+ PPP_DST_PLANE_INTERLVD)
+
+#define MDP_Y_CBCR_H2V2_DST_REG (PPP_C2R_8BITS | \
+ PPP_C0G_8BITS | PPP_C1B_8BITS | PPP_C3A_8BITS | \
+ PPP_DST_PACKET_CNT_INTERLVD_2ELEM | \
+ PPP_DST_PACK_TIGHT | PPP_DST_PACK_ALIGN_LSB | \
+ PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES)
+
+#define MDP_YCRYCB_H2V1_DST_REG (PPP_C2R_8BITS | PPP_C0G_8BITS | \
+ PPP_C1B_8BITS | PPP_C3A_8BITS | PPP_DST_PACKET_CNT_INTERLVD_4ELEM | \
+ PPP_DST_PACK_TIGHT | PPP_DST_PACK_ALIGN_LSB | \
+ PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES | \
+ PPP_DST_PLANE_INTERLVD)
+
+#define MDP_Y_CRCB_H2V1_DST_REG (PPP_C2R_8BITS | \
+ PPP_C0G_8BITS | PPP_C1B_8BITS | PPP_C3A_8BITS | \
+ PPP_DST_PACKET_CNT_INTERLVD_2ELEM | PPP_DST_PACK_TIGHT | \
+ PPP_DST_PACK_ALIGN_LSB | PPP_DST_OUT_SEL_AXI | \
+ PPP_DST_BPP_2BYTES)
+
+/* LUT */
+#define MDP_LUT_C0_EN BIT(5)
+#define MDP_LUT_C1_EN BIT(6)
+#define MDP_LUT_C2_EN BIT(7)
+
+/* Dither */
+#define MDP_OP_DITHER_EN BIT(16)
+
+/* Rotator */
+#define MDP_OP_ROT_ON BIT(8)
+#define MDP_OP_ROT_90 BIT(9)
+#define MDP_OP_FLIP_LR BIT(10)
+#define MDP_OP_FLIP_UD BIT(11)
+
+/* Blend */
+#define MDP_OP_BLEND_EN BIT(12)
+#define MDP_OP_BLEND_EQ_SEL BIT(15)
+#define MDP_OP_BLEND_TRANSP_EN BIT(24)
+#define MDP_BLEND_MASK (MDP_OP_BLEND_EN | MDP_OP_BLEND_EQ_SEL | \
+ MDP_OP_BLEND_TRANSP_EN | BIT(14) | BIT(13))
+
+#define MDP_BLEND_ALPHA_SEL 13
+#define MDP_BLEND_ALPHA_MASK 0x3
+#define MDP_BLEND_CONST_ALPHA 24
+#define MDP_BLEND_TRASP_COL_MASK 0xFFFFFF
+
+/* CSC Matrix */
+#define MDP_CSC_RGB2YUV 0
+#define MDP_CSC_YUV2RGB 1
+
+#define MDP_CSC_SIZE 9
+#define MDP_BV_SIZE 3
+#define MDP_LV_SIZE 4
+
+enum ppp_lut_type {
+ LUT_PRE_TABLE = 0,
+ LUT_POST_TABLE,
+};
+
+enum ppp_csc_matrix {
+ CSC_PRIMARY_MATRIX = 0,
+ CSC_SECONDARY_MATRIX,
+};
+
+/* scale tables */
+enum {
+ PPP_DOWNSCALE_PT2TOPT4,
+ PPP_DOWNSCALE_PT4TOPT6,
+ PPP_DOWNSCALE_PT6TOPT8,
+ PPP_DOWNSCALE_PT8TOPT1,
+ PPP_DOWNSCALE_MAX,
+};
+
+struct ppp_table {
+ uint32_t reg;
+ uint32_t val;
+};
+
+struct ppp_resource {
+ u64 next_ab;
+ u64 next_ib;
+ u64 clk_rate;
+ u64 solid_fill_pixel;
+ u64 solid_fill_byte;
+};
+
+struct ppp_csc_table {
+ int direction; /* MDP_CCS_RGB2YUV or YUV2RGB */
+ uint16_t fwd_matrix[MDP_CCS_SIZE]; /* 3x3 color coefficients */
+ uint16_t rev_matrix[MDP_CCS_SIZE]; /* 3x3 color coefficients */
+ uint16_t bv[MDP_BV_SIZE]; /* 1x3 bias vector */
+ uint16_t lv[MDP_LV_SIZE]; /* 1x3 limit vector */
+};
+
+struct ppp_blend {
+ int const_alpha;
+ int trans_color; /*color keying*/
+};
+
+struct ppp_img_prop {
+ int32_t x;
+ int32_t y;
+ uint32_t width;
+ uint32_t height;
+};
+
+struct ppp_img_desc {
+ struct ppp_img_prop prop;
+ struct ppp_img_prop roi;
+ int color_fmt;
+ void *p0; /* plane 0 */
+ void *p1;
+ void *p3;
+ int stride0;
+ int stride1;
+ int stride2;
+};
+
+struct ppp_blit_op {
+ struct ppp_img_desc src;
+ struct ppp_img_desc dst;
+ struct ppp_img_desc bg;
+ struct ppp_blend blend;
+ uint32_t mdp_op; /* Operations */
+ uint32_t solid_fill_color;
+ bool solid_fill;
+};
+
+struct ppp_edge_rep {
+ uint32_t dst_roi_width;
+ uint32_t dst_roi_height;
+ uint32_t is_scale_enabled;
+
+ /*
+ * positions of the luma pixel(relative to the image ) required for
+ * scaling the ROI
+ */
+ int32_t luma_interp_point_left;
+ int32_t luma_interp_point_right;
+ int32_t luma_interp_point_top;
+ int32_t luma_interp_point_bottom;
+
+ /*
+ * positions of the chroma pixel(relative to the image ) required for
+ * interpolating a chroma value at all required luma positions
+ */
+ int32_t chroma_interp_point_left;
+ int32_t chroma_interp_point_right;
+ int32_t chroma_interp_point_top;
+ int32_t chroma_interp_point_bottom;
+
+ /*
+ * a rectangular region within the chroma plane of the "image".
+ * Chroma pixels falling inside of this rectangle belongs to the ROI
+ */
+ int32_t chroma_bound_left;
+ int32_t chroma_bound_right;
+ int32_t chroma_bound_top;
+ int32_t chroma_bound_bottom;
+
+ /*
+ * number of chroma pixels to replicate on the left, right,
+ * top and bottom edge of the ROI.
+ */
+ int32_t chroma_repeat_left;
+ int32_t chroma_repeat_right;
+ int32_t chroma_repeat_top;
+ int32_t chroma_repeat_bottom;
+
+ /*
+ * number of luma pixels to replicate on the left, right,
+ * top and bottom edge of the ROI.
+ */
+ int32_t luma_repeat_left;
+ int32_t luma_repeat_right;
+ int32_t luma_repeat_top;
+ int32_t luma_repeat_bottom;
+};
+
+bool check_if_rgb(int color);
+
+/* func for ppp register values */
+uint32_t ppp_bpp(uint32_t type);
+uint32_t ppp_src_config(uint32_t type);
+uint32_t ppp_out_config(uint32_t type);
+uint32_t ppp_pack_pattern(uint32_t type, uint32_t yuv2rgb);
+uint32_t ppp_dst_op_reg(uint32_t type);
+uint32_t ppp_src_op_reg(uint32_t type);
+bool ppp_per_p_alpha(uint32_t type);
+bool ppp_multi_plane(uint32_t type);
+uint32_t *ppp_default_pre_lut(void);
+uint32_t *ppp_default_post_lut(void);
+struct ppp_csc_table *ppp_csc_rgb2yuv(void);
+struct ppp_csc_table *ppp_csc_table2(void);
+void ppp_load_up_lut(void);
+void ppp_load_gaussian_lut(void);
+void ppp_load_x_scale_table(int idx);
+void ppp_load_y_scale_table(int idx);
+
+int mdp3_ppp_res_init(struct msm_fb_data_type *mfd);
+int mdp3_ppp_init(void);
+int config_ppp_op_mode(struct ppp_blit_op *blit_op);
+void ppp_enable(void);
+int mdp3_ppp_parse_req(void __user *p,
+ struct mdp_async_blit_req_list *req_list_header,
+ int async);
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdp3_ppp_data.c b/drivers/video/fbdev/msm/mdp3_ppp_data.c
new file mode 100644
index 0000000..ac88d9b
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ppp_data.c
@@ -0,0 +1,1619 @@
+/* Copyright (c) 2007, 2012-2013, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+
+#include "mdss_fb.h"
+#include "mdp3_ppp.h"
+
+#define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT)
+
+/* bg_config_lut not needed since it is same as src */
+const uint32_t src_cfg_lut[MDP_IMGTYPE_LIMIT] = {
+ [MDP_RGB_565] = MDP_RGB_565_SRC_REG,
+ [MDP_BGR_565] = MDP_RGB_565_SRC_REG,
+ [MDP_RGB_888] = MDP_RGB_888_SRC_REG,
+ [MDP_BGR_888] = MDP_RGB_888_SRC_REG,
+ [MDP_BGRA_8888] = MDP_RGBX_8888_SRC_REG,
+ [MDP_RGBA_8888] = MDP_RGBX_8888_SRC_REG,
+ [MDP_ARGB_8888] = MDP_RGBX_8888_SRC_REG,
+ [MDP_XRGB_8888] = MDP_RGBX_8888_SRC_REG,
+ [MDP_RGBX_8888] = MDP_RGBX_8888_SRC_REG,
+ [MDP_Y_CRCB_H2V2] = MDP_Y_CBCR_H2V2_SRC_REG,
+ [MDP_Y_CBCR_H2V2] = MDP_Y_CBCR_H2V2_SRC_REG,
+ [MDP_Y_CBCR_H2V2_ADRENO] = MDP_Y_CBCR_H2V2_SRC_REG,
+ [MDP_Y_CBCR_H2V2_VENUS] = MDP_Y_CBCR_H2V2_SRC_REG,
+ [MDP_YCRYCB_H2V1] = MDP_YCRYCB_H2V1_SRC_REG,
+ [MDP_Y_CBCR_H2V1] = MDP_Y_CRCB_H2V1_SRC_REG,
+ [MDP_Y_CRCB_H2V1] = MDP_Y_CRCB_H2V1_SRC_REG,
+ [MDP_BGRX_8888] = MDP_RGBX_8888_SRC_REG,
+};
+
+const uint32_t out_cfg_lut[MDP_IMGTYPE_LIMIT] = {
+ [MDP_RGB_565] = MDP_RGB_565_DST_REG,
+ [MDP_BGR_565] = MDP_RGB_565_DST_REG,
+ [MDP_RGB_888] = MDP_RGB_888_DST_REG,
+ [MDP_BGR_888] = MDP_RGB_888_DST_REG,
+ [MDP_BGRA_8888] = MDP_RGBX_8888_DST_REG,
+ [MDP_RGBA_8888] = MDP_RGBX_8888_DST_REG,
+ [MDP_ARGB_8888] = MDP_RGBX_8888_DST_REG,
+ [MDP_XRGB_8888] = MDP_RGBX_8888_DST_REG,
+ [MDP_RGBX_8888] = MDP_RGBX_8888_DST_REG,
+ [MDP_Y_CRCB_H2V2] = MDP_Y_CBCR_H2V2_DST_REG,
+ [MDP_Y_CBCR_H2V2] = MDP_Y_CBCR_H2V2_DST_REG,
+ [MDP_Y_CBCR_H2V2_ADRENO] = MDP_Y_CBCR_H2V2_DST_REG,
+ [MDP_Y_CBCR_H2V2_VENUS] = MDP_Y_CBCR_H2V2_DST_REG,
+ [MDP_YCRYCB_H2V1] = MDP_YCRYCB_H2V1_DST_REG,
+ [MDP_Y_CBCR_H2V1] = MDP_Y_CRCB_H2V1_DST_REG,
+ [MDP_Y_CRCB_H2V1] = MDP_Y_CRCB_H2V1_DST_REG,
+ [MDP_BGRX_8888] = MDP_RGBX_8888_DST_REG,
+};
+
+const uint32_t pack_patt_lut[MDP_IMGTYPE_LIMIT] = {
+ [MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+ [MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+ [MDP_RGB_888] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+ [MDP_BGR_888] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+ [MDP_BGRA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+ CLR_G, CLR_R, 8),
+ [MDP_RGBA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+ CLR_G, CLR_B, 8),
+ [MDP_ARGB_8888] = PPP_GET_PACK_PATTERN(CLR_R,
+ CLR_G, CLR_B, CLR_ALPHA, 8),
+ [MDP_XRGB_8888] = PPP_GET_PACK_PATTERN(CLR_R,
+ CLR_G, CLR_B, CLR_ALPHA, 8),
+ [MDP_RGBX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+ CLR_G, CLR_B, 8),
+ [MDP_Y_CRCB_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+ [MDP_Y_CBCR_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+ [MDP_Y_CBCR_H2V2_ADRENO] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB,
+ CLR_CR, 8),
+ [MDP_Y_CBCR_H2V2_VENUS] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB,
+ CLR_CR, 8),
+ [MDP_YCRYCB_H2V1] = PPP_GET_PACK_PATTERN(CLR_Y,
+ CLR_CR, CLR_Y, CLR_CB, 8),
+ [MDP_Y_CBCR_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+ [MDP_Y_CRCB_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+ [MDP_BGRX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+ CLR_G, CLR_R, 8),
+};
+
+const uint32_t swapped_pack_patt_lut[MDP_IMGTYPE_LIMIT] = {
+ [MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+ [MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+ [MDP_RGB_888] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+ [MDP_BGR_888] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+ [MDP_BGRA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+ CLR_G, CLR_B, 8),
+ [MDP_RGBA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+ CLR_G, CLR_R, 8),
+ [MDP_ARGB_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+ CLR_G, CLR_R, 8),
+ [MDP_XRGB_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+ CLR_G, CLR_R, 8),
+ [MDP_RGBX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+ CLR_G, CLR_R, 8),
+ [MDP_Y_CRCB_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+ [MDP_Y_CBCR_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+ [MDP_Y_CBCR_H2V2_ADRENO] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR,
+ CLR_CB, 8),
+ [MDP_Y_CBCR_H2V2_VENUS] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR,
+ CLR_CB, 8),
+ [MDP_YCRYCB_H2V1] = PPP_GET_PACK_PATTERN(CLR_Y,
+ CLR_CB, CLR_Y, CLR_CR, 8),
+ [MDP_Y_CBCR_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+ [MDP_Y_CRCB_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+ [MDP_BGRX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+ CLR_G, CLR_B, 8),
+};
+
+const uint32_t dst_op_reg[MDP_IMGTYPE_LIMIT] = {
+ [MDP_Y_CRCB_H2V2] = PPP_OP_DST_CHROMA_420,
+ [MDP_Y_CBCR_H2V2] = PPP_OP_DST_CHROMA_420,
+ [MDP_Y_CBCR_H2V1] = PPP_OP_DST_CHROMA_H2V1,
+ [MDP_Y_CRCB_H2V1] = PPP_OP_DST_CHROMA_H2V1,
+ [MDP_YCRYCB_H2V1] = PPP_OP_DST_CHROMA_H2V1,
+};
+
+const uint32_t src_op_reg[MDP_IMGTYPE_LIMIT] = {
+ [MDP_Y_CRCB_H2V2] = PPP_OP_SRC_CHROMA_420 | PPP_OP_COLOR_SPACE_YCBCR,
+ [MDP_Y_CBCR_H2V2] = PPP_OP_SRC_CHROMA_420 | PPP_OP_COLOR_SPACE_YCBCR,
+ [MDP_Y_CBCR_H2V2_ADRENO] = PPP_OP_SRC_CHROMA_420 |
+ PPP_OP_COLOR_SPACE_YCBCR,
+ [MDP_Y_CBCR_H2V2_VENUS] = PPP_OP_SRC_CHROMA_420 |
+ PPP_OP_COLOR_SPACE_YCBCR,
+ [MDP_Y_CBCR_H2V1] = PPP_OP_SRC_CHROMA_H2V1,
+ [MDP_Y_CRCB_H2V1] = PPP_OP_SRC_CHROMA_H2V1,
+ [MDP_YCRYCB_H2V1] = PPP_OP_SRC_CHROMA_H2V1,
+};
+
+const uint32_t bytes_per_pixel[MDP_IMGTYPE_LIMIT] = {
+ [MDP_RGB_565] = 2,
+ [MDP_BGR_565] = 2,
+ [MDP_RGB_888] = 3,
+ [MDP_BGR_888] = 3,
+ [MDP_XRGB_8888] = 4,
+ [MDP_ARGB_8888] = 4,
+ [MDP_RGBA_8888] = 4,
+ [MDP_BGRA_8888] = 4,
+ [MDP_RGBX_8888] = 4,
+ [MDP_Y_CBCR_H2V1] = 1,
+ [MDP_Y_CBCR_H2V2] = 1,
+ [MDP_Y_CBCR_H2V2_ADRENO] = 1,
+ [MDP_Y_CBCR_H2V2_VENUS] = 1,
+ [MDP_Y_CRCB_H2V1] = 1,
+ [MDP_Y_CRCB_H2V2] = 1,
+ [MDP_YCRYCB_H2V1] = 2,
+ [MDP_BGRX_8888] = 4,
+};
+
+const bool per_pixel_alpha[MDP_IMGTYPE_LIMIT] = {
+ [MDP_BGRA_8888] = true,
+ [MDP_RGBA_8888] = true,
+ [MDP_ARGB_8888] = true,
+};
+
+const bool multi_plane[MDP_IMGTYPE_LIMIT] = {
+ [MDP_Y_CRCB_H2V2] = true,
+ [MDP_Y_CBCR_H2V2] = true,
+ [MDP_Y_CBCR_H2V1] = true,
+ [MDP_Y_CRCB_H2V1] = true,
+};
+
+/* lut default */
+uint32_t default_pre_lut_val[PPP_LUT_MAX] = {
+ 0x0,
+ 0x151515,
+ 0x1d1d1d,
+ 0x232323,
+ 0x272727,
+ 0x2b2b2b,
+ 0x2f2f2f,
+ 0x333333,
+ 0x363636,
+ 0x393939,
+ 0x3b3b3b,
+ 0x3e3e3e,
+ 0x404040,
+ 0x434343,
+ 0x454545,
+ 0x474747,
+ 0x494949,
+ 0x4b4b4b,
+ 0x4d4d4d,
+ 0x4f4f4f,
+ 0x515151,
+ 0x535353,
+ 0x555555,
+ 0x565656,
+ 0x585858,
+ 0x5a5a5a,
+ 0x5b5b5b,
+ 0x5d5d5d,
+ 0x5e5e5e,
+ 0x606060,
+ 0x616161,
+ 0x636363,
+ 0x646464,
+ 0x666666,
+ 0x676767,
+ 0x686868,
+ 0x6a6a6a,
+ 0x6b6b6b,
+ 0x6c6c6c,
+ 0x6e6e6e,
+ 0x6f6f6f,
+ 0x707070,
+ 0x717171,
+ 0x727272,
+ 0x747474,
+ 0x757575,
+ 0x767676,
+ 0x777777,
+ 0x787878,
+ 0x797979,
+ 0x7a7a7a,
+ 0x7c7c7c,
+ 0x7d7d7d,
+ 0x7e7e7e,
+ 0x7f7f7f,
+ 0x808080,
+ 0x818181,
+ 0x828282,
+ 0x838383,
+ 0x848484,
+ 0x858585,
+ 0x868686,
+ 0x878787,
+ 0x888888,
+ 0x898989,
+ 0x8a8a8a,
+ 0x8b8b8b,
+ 0x8c8c8c,
+ 0x8d8d8d,
+ 0x8e8e8e,
+ 0x8f8f8f,
+ 0x8f8f8f,
+ 0x909090,
+ 0x919191,
+ 0x929292,
+ 0x939393,
+ 0x949494,
+ 0x959595,
+ 0x969696,
+ 0x969696,
+ 0x979797,
+ 0x989898,
+ 0x999999,
+ 0x9a9a9a,
+ 0x9b9b9b,
+ 0x9c9c9c,
+ 0x9c9c9c,
+ 0x9d9d9d,
+ 0x9e9e9e,
+ 0x9f9f9f,
+ 0xa0a0a0,
+ 0xa0a0a0,
+ 0xa1a1a1,
+ 0xa2a2a2,
+ 0xa3a3a3,
+ 0xa4a4a4,
+ 0xa4a4a4,
+ 0xa5a5a5,
+ 0xa6a6a6,
+ 0xa7a7a7,
+ 0xa7a7a7,
+ 0xa8a8a8,
+ 0xa9a9a9,
+ 0xaaaaaa,
+ 0xaaaaaa,
+ 0xababab,
+ 0xacacac,
+ 0xadadad,
+ 0xadadad,
+ 0xaeaeae,
+ 0xafafaf,
+ 0xafafaf,
+ 0xb0b0b0,
+ 0xb1b1b1,
+ 0xb2b2b2,
+ 0xb2b2b2,
+ 0xb3b3b3,
+ 0xb4b4b4,
+ 0xb4b4b4,
+ 0xb5b5b5,
+ 0xb6b6b6,
+ 0xb6b6b6,
+ 0xb7b7b7,
+ 0xb8b8b8,
+ 0xb8b8b8,
+ 0xb9b9b9,
+ 0xbababa,
+ 0xbababa,
+ 0xbbbbbb,
+ 0xbcbcbc,
+ 0xbcbcbc,
+ 0xbdbdbd,
+ 0xbebebe,
+ 0xbebebe,
+ 0xbfbfbf,
+ 0xc0c0c0,
+ 0xc0c0c0,
+ 0xc1c1c1,
+ 0xc1c1c1,
+ 0xc2c2c2,
+ 0xc3c3c3,
+ 0xc3c3c3,
+ 0xc4c4c4,
+ 0xc5c5c5,
+ 0xc5c5c5,
+ 0xc6c6c6,
+ 0xc6c6c6,
+ 0xc7c7c7,
+ 0xc8c8c8,
+ 0xc8c8c8,
+ 0xc9c9c9,
+ 0xc9c9c9,
+ 0xcacaca,
+ 0xcbcbcb,
+ 0xcbcbcb,
+ 0xcccccc,
+ 0xcccccc,
+ 0xcdcdcd,
+ 0xcecece,
+ 0xcecece,
+ 0xcfcfcf,
+ 0xcfcfcf,
+ 0xd0d0d0,
+ 0xd0d0d0,
+ 0xd1d1d1,
+ 0xd2d2d2,
+ 0xd2d2d2,
+ 0xd3d3d3,
+ 0xd3d3d3,
+ 0xd4d4d4,
+ 0xd4d4d4,
+ 0xd5d5d5,
+ 0xd6d6d6,
+ 0xd6d6d6,
+ 0xd7d7d7,
+ 0xd7d7d7,
+ 0xd8d8d8,
+ 0xd8d8d8,
+ 0xd9d9d9,
+ 0xd9d9d9,
+ 0xdadada,
+ 0xdbdbdb,
+ 0xdbdbdb,
+ 0xdcdcdc,
+ 0xdcdcdc,
+ 0xdddddd,
+ 0xdddddd,
+ 0xdedede,
+ 0xdedede,
+ 0xdfdfdf,
+ 0xdfdfdf,
+ 0xe0e0e0,
+ 0xe0e0e0,
+ 0xe1e1e1,
+ 0xe1e1e1,
+ 0xe2e2e2,
+ 0xe3e3e3,
+ 0xe3e3e3,
+ 0xe4e4e4,
+ 0xe4e4e4,
+ 0xe5e5e5,
+ 0xe5e5e5,
+ 0xe6e6e6,
+ 0xe6e6e6,
+ 0xe7e7e7,
+ 0xe7e7e7,
+ 0xe8e8e8,
+ 0xe8e8e8,
+ 0xe9e9e9,
+ 0xe9e9e9,
+ 0xeaeaea,
+ 0xeaeaea,
+ 0xebebeb,
+ 0xebebeb,
+ 0xececec,
+ 0xececec,
+ 0xededed,
+ 0xededed,
+ 0xeeeeee,
+ 0xeeeeee,
+ 0xefefef,
+ 0xefefef,
+ 0xf0f0f0,
+ 0xf0f0f0,
+ 0xf1f1f1,
+ 0xf1f1f1,
+ 0xf2f2f2,
+ 0xf2f2f2,
+ 0xf2f2f2,
+ 0xf3f3f3,
+ 0xf3f3f3,
+ 0xf4f4f4,
+ 0xf4f4f4,
+ 0xf5f5f5,
+ 0xf5f5f5,
+ 0xf6f6f6,
+ 0xf6f6f6,
+ 0xf7f7f7,
+ 0xf7f7f7,
+ 0xf8f8f8,
+ 0xf8f8f8,
+ 0xf9f9f9,
+ 0xf9f9f9,
+ 0xfafafa,
+ 0xfafafa,
+ 0xfafafa,
+ 0xfbfbfb,
+ 0xfbfbfb,
+ 0xfcfcfc,
+ 0xfcfcfc,
+ 0xfdfdfd,
+ 0xfdfdfd,
+ 0xfefefe,
+ 0xfefefe,
+ 0xffffff,
+ 0xffffff,
+};
+
+uint32_t default_post_lut_val[PPP_LUT_MAX] = {
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x10101,
+ 0x10101,
+ 0x10101,
+ 0x10101,
+ 0x10101,
+ 0x10101,
+ 0x10101,
+ 0x10101,
+ 0x10101,
+ 0x10101,
+ 0x20202,
+ 0x20202,
+ 0x20202,
+ 0x20202,
+ 0x20202,
+ 0x20202,
+ 0x30303,
+ 0x30303,
+ 0x30303,
+ 0x30303,
+ 0x30303,
+ 0x40404,
+ 0x40404,
+ 0x40404,
+ 0x40404,
+ 0x40404,
+ 0x50505,
+ 0x50505,
+ 0x50505,
+ 0x50505,
+ 0x60606,
+ 0x60606,
+ 0x60606,
+ 0x70707,
+ 0x70707,
+ 0x70707,
+ 0x70707,
+ 0x80808,
+ 0x80808,
+ 0x80808,
+ 0x90909,
+ 0x90909,
+ 0xa0a0a,
+ 0xa0a0a,
+ 0xa0a0a,
+ 0xb0b0b,
+ 0xb0b0b,
+ 0xb0b0b,
+ 0xc0c0c,
+ 0xc0c0c,
+ 0xd0d0d,
+ 0xd0d0d,
+ 0xe0e0e,
+ 0xe0e0e,
+ 0xe0e0e,
+ 0xf0f0f,
+ 0xf0f0f,
+ 0x101010,
+ 0x101010,
+ 0x111111,
+ 0x111111,
+ 0x121212,
+ 0x121212,
+ 0x131313,
+ 0x131313,
+ 0x141414,
+ 0x151515,
+ 0x151515,
+ 0x161616,
+ 0x161616,
+ 0x171717,
+ 0x171717,
+ 0x181818,
+ 0x191919,
+ 0x191919,
+ 0x1a1a1a,
+ 0x1b1b1b,
+ 0x1b1b1b,
+ 0x1c1c1c,
+ 0x1c1c1c,
+ 0x1d1d1d,
+ 0x1e1e1e,
+ 0x1f1f1f,
+ 0x1f1f1f,
+ 0x202020,
+ 0x212121,
+ 0x212121,
+ 0x222222,
+ 0x232323,
+ 0x242424,
+ 0x242424,
+ 0x252525,
+ 0x262626,
+ 0x272727,
+ 0x272727,
+ 0x282828,
+ 0x292929,
+ 0x2a2a2a,
+ 0x2b2b2b,
+ 0x2c2c2c,
+ 0x2c2c2c,
+ 0x2d2d2d,
+ 0x2e2e2e,
+ 0x2f2f2f,
+ 0x303030,
+ 0x313131,
+ 0x323232,
+ 0x333333,
+ 0x333333,
+ 0x343434,
+ 0x353535,
+ 0x363636,
+ 0x373737,
+ 0x383838,
+ 0x393939,
+ 0x3a3a3a,
+ 0x3b3b3b,
+ 0x3c3c3c,
+ 0x3d3d3d,
+ 0x3e3e3e,
+ 0x3f3f3f,
+ 0x404040,
+ 0x414141,
+ 0x424242,
+ 0x434343,
+ 0x444444,
+ 0x464646,
+ 0x474747,
+ 0x484848,
+ 0x494949,
+ 0x4a4a4a,
+ 0x4b4b4b,
+ 0x4c4c4c,
+ 0x4d4d4d,
+ 0x4f4f4f,
+ 0x505050,
+ 0x515151,
+ 0x525252,
+ 0x535353,
+ 0x545454,
+ 0x565656,
+ 0x575757,
+ 0x585858,
+ 0x595959,
+ 0x5b5b5b,
+ 0x5c5c5c,
+ 0x5d5d5d,
+ 0x5e5e5e,
+ 0x606060,
+ 0x616161,
+ 0x626262,
+ 0x646464,
+ 0x656565,
+ 0x666666,
+ 0x686868,
+ 0x696969,
+ 0x6a6a6a,
+ 0x6c6c6c,
+ 0x6d6d6d,
+ 0x6f6f6f,
+ 0x707070,
+ 0x717171,
+ 0x737373,
+ 0x747474,
+ 0x767676,
+ 0x777777,
+ 0x797979,
+ 0x7a7a7a,
+ 0x7c7c7c,
+ 0x7d7d7d,
+ 0x7f7f7f,
+ 0x808080,
+ 0x828282,
+ 0x838383,
+ 0x858585,
+ 0x868686,
+ 0x888888,
+ 0x898989,
+ 0x8b8b8b,
+ 0x8d8d8d,
+ 0x8e8e8e,
+ 0x909090,
+ 0x919191,
+ 0x939393,
+ 0x959595,
+ 0x969696,
+ 0x989898,
+ 0x9a9a9a,
+ 0x9b9b9b,
+ 0x9d9d9d,
+ 0x9f9f9f,
+ 0xa1a1a1,
+ 0xa2a2a2,
+ 0xa4a4a4,
+ 0xa6a6a6,
+ 0xa7a7a7,
+ 0xa9a9a9,
+ 0xababab,
+ 0xadadad,
+ 0xafafaf,
+ 0xb0b0b0,
+ 0xb2b2b2,
+ 0xb4b4b4,
+ 0xb6b6b6,
+ 0xb8b8b8,
+ 0xbababa,
+ 0xbbbbbb,
+ 0xbdbdbd,
+ 0xbfbfbf,
+ 0xc1c1c1,
+ 0xc3c3c3,
+ 0xc5c5c5,
+ 0xc7c7c7,
+ 0xc9c9c9,
+ 0xcbcbcb,
+ 0xcdcdcd,
+ 0xcfcfcf,
+ 0xd1d1d1,
+ 0xd3d3d3,
+ 0xd5d5d5,
+ 0xd7d7d7,
+ 0xd9d9d9,
+ 0xdbdbdb,
+ 0xdddddd,
+ 0xdfdfdf,
+ 0xe1e1e1,
+ 0xe3e3e3,
+ 0xe5e5e5,
+ 0xe7e7e7,
+ 0xe9e9e9,
+ 0xebebeb,
+ 0xeeeeee,
+ 0xf0f0f0,
+ 0xf2f2f2,
+ 0xf4f4f4,
+ 0xf6f6f6,
+ 0xf8f8f8,
+ 0xfbfbfb,
+ 0xfdfdfd,
+ 0xffffff,
+};
+
+struct ppp_csc_table rgb2yuv = {
+ .fwd_matrix = {
+ 0x83,
+ 0x102,
+ 0x32,
+ 0xffb5,
+ 0xff6c,
+ 0xe1,
+ 0xe1,
+ 0xff45,
+ 0xffdc,
+ },
+ .rev_matrix = {
+ 0x254,
+ 0x0,
+ 0x331,
+ 0x254,
+ 0xff38,
+ 0xfe61,
+ 0x254,
+ 0x409,
+ 0x0,
+ },
+ .bv = {
+ 0x10,
+ 0x80,
+ 0x80,
+ },
+ .lv = {
+ 0x10,
+ 0xeb,
+ 0x10,
+ 0xf0,
+ },
+};
+
+struct ppp_csc_table default_table2 = {
+ .fwd_matrix = {
+ 0x5d,
+ 0x13a,
+ 0x20,
+ 0xffcd,
+ 0xff54,
+ 0xe1,
+ 0xe1,
+ 0xff35,
+ },
+ .rev_matrix = {
+ 0x254,
+ 0x0,
+ 0x396,
+ 0x254,
+ 0xff94,
+ 0xfef0,
+ 0x254,
+ 0x43a,
+ 0x0,
+ },
+ .bv = {
+ 0x10,
+ 0x80,
+ 0x80,
+ },
+ .lv = {
+ 0x10,
+ 0xeb,
+ 0x10,
+ 0xf0,
+ },
+};
+
+const struct ppp_table upscale_table[PPP_UPSCALE_MAX] = {
+ { 0x5fffc, 0x0 },
+ { 0x50200, 0x7fc00000 },
+ { 0x5fffc, 0xff80000d },
+ { 0x50204, 0x7ec003f9 },
+ { 0x5fffc, 0xfec0001c },
+ { 0x50208, 0x7d4003f3 },
+ { 0x5fffc, 0xfe40002b },
+ { 0x5020c, 0x7b8003ed },
+ { 0x5fffc, 0xfd80003c },
+ { 0x50210, 0x794003e8 },
+ { 0x5fffc, 0xfcc0004d },
+ { 0x50214, 0x76c003e4 },
+ { 0x5fffc, 0xfc40005f },
+ { 0x50218, 0x73c003e0 },
+ { 0x5fffc, 0xfb800071 },
+ { 0x5021c, 0x708003de },
+ { 0x5fffc, 0xfac00085 },
+ { 0x50220, 0x6d0003db },
+ { 0x5fffc, 0xfa000098 },
+ { 0x50224, 0x698003d9 },
+ { 0x5fffc, 0xf98000ac },
+ { 0x50228, 0x654003d8 },
+ { 0x5fffc, 0xf8c000c1 },
+ { 0x5022c, 0x610003d7 },
+ { 0x5fffc, 0xf84000d5 },
+ { 0x50230, 0x5c8003d7 },
+ { 0x5fffc, 0xf7c000e9 },
+ { 0x50234, 0x580003d7 },
+ { 0x5fffc, 0xf74000fd },
+ { 0x50238, 0x534003d8 },
+ { 0x5fffc, 0xf6c00112 },
+ { 0x5023c, 0x4e8003d8 },
+ { 0x5fffc, 0xf6800126 },
+ { 0x50240, 0x494003da },
+ { 0x5fffc, 0xf600013a },
+ { 0x50244, 0x448003db },
+ { 0x5fffc, 0xf600014d },
+ { 0x50248, 0x3f4003dd },
+ { 0x5fffc, 0xf5c00160 },
+ { 0x5024c, 0x3a4003df },
+ { 0x5fffc, 0xf5c00172 },
+ { 0x50250, 0x354003e1 },
+ { 0x5fffc, 0xf5c00184 },
+ { 0x50254, 0x304003e3 },
+ { 0x5fffc, 0xf6000195 },
+ { 0x50258, 0x2b0003e6 },
+ { 0x5fffc, 0xf64001a6 },
+ { 0x5025c, 0x260003e8 },
+ { 0x5fffc, 0xf6c001b4 },
+ { 0x50260, 0x214003eb },
+ { 0x5fffc, 0xf78001c2 },
+ { 0x50264, 0x1c4003ee },
+ { 0x5fffc, 0xf80001cf },
+ { 0x50268, 0x17c003f1 },
+ { 0x5fffc, 0xf90001db },
+ { 0x5026c, 0x134003f3 },
+ { 0x5fffc, 0xfa0001e5 },
+ { 0x50270, 0xf0003f6 },
+ { 0x5fffc, 0xfb4001ee },
+ { 0x50274, 0xac003f9 },
+ { 0x5fffc, 0xfcc001f5 },
+ { 0x50278, 0x70003fb },
+ { 0x5fffc, 0xfe4001fb },
+ { 0x5027c, 0x34003fe },
+};
+
+const struct ppp_table mdp_gaussian_blur_table[PPP_BLUR_SCALE_MAX] = {
+ /* max variance */
+ { 0x5fffc, 0x20000080 },
+ { 0x50280, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50284, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50288, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5028c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50290, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50294, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50298, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5029c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502a0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502a4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502a8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502ac, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502b0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502b4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502b8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502bc, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502c0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502c4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502c8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502cc, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502d0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502d4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502d8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502dc, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502e0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502e4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502e8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502ec, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502f0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502f4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502f8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502fc, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50300, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50304, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50308, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5030c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50310, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50314, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50318, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5031c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50320, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50324, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50328, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5032c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50330, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50334, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50338, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5033c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50340, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50344, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50348, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5034c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50350, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50354, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50358, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5035c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50360, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50364, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50368, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5036c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50370, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50374, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50378, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5037c, 0x20000080 },
+};
+
+const struct ppp_table downscale_x_table_pt2topt4[] = {
+ { 0x5fffc, 0x740008c },
+ { 0x50280, 0x33800088 },
+ { 0x5fffc, 0x800008e },
+ { 0x50284, 0x33400084 },
+ { 0x5fffc, 0x8400092 },
+ { 0x50288, 0x33000080 },
+ { 0x5fffc, 0x9000094 },
+ { 0x5028c, 0x3300007b },
+ { 0x5fffc, 0x9c00098 },
+ { 0x50290, 0x32400077 },
+ { 0x5fffc, 0xa40009b },
+ { 0x50294, 0x32000073 },
+ { 0x5fffc, 0xb00009d },
+ { 0x50298, 0x31c0006f },
+ { 0x5fffc, 0xbc000a0 },
+ { 0x5029c, 0x3140006b },
+ { 0x5fffc, 0xc8000a2 },
+ { 0x502a0, 0x31000067 },
+ { 0x5fffc, 0xd8000a5 },
+ { 0x502a4, 0x30800062 },
+ { 0x5fffc, 0xe4000a8 },
+ { 0x502a8, 0x2fc0005f },
+ { 0x5fffc, 0xec000aa },
+ { 0x502ac, 0x2fc0005b },
+ { 0x5fffc, 0xf8000ad },
+ { 0x502b0, 0x2f400057 },
+ { 0x5fffc, 0x108000b0 },
+ { 0x502b4, 0x2e400054 },
+ { 0x5fffc, 0x114000b2 },
+ { 0x502b8, 0x2e000050 },
+ { 0x5fffc, 0x124000b4 },
+ { 0x502bc, 0x2d80004c },
+ { 0x5fffc, 0x130000b6 },
+ { 0x502c0, 0x2d000049 },
+ { 0x5fffc, 0x140000b8 },
+ { 0x502c4, 0x2c800045 },
+ { 0x5fffc, 0x150000b9 },
+ { 0x502c8, 0x2c000042 },
+ { 0x5fffc, 0x15c000bd },
+ { 0x502cc, 0x2b40003e },
+ { 0x5fffc, 0x16c000bf },
+ { 0x502d0, 0x2a80003b },
+ { 0x5fffc, 0x17c000bf },
+ { 0x502d4, 0x2a000039 },
+ { 0x5fffc, 0x188000c2 },
+ { 0x502d8, 0x29400036 },
+ { 0x5fffc, 0x19c000c4 },
+ { 0x502dc, 0x28800032 },
+ { 0x5fffc, 0x1ac000c5 },
+ { 0x502e0, 0x2800002f },
+ { 0x5fffc, 0x1bc000c7 },
+ { 0x502e4, 0x2740002c },
+ { 0x5fffc, 0x1cc000c8 },
+ { 0x502e8, 0x26c00029 },
+ { 0x5fffc, 0x1dc000c9 },
+ { 0x502ec, 0x26000027 },
+ { 0x5fffc, 0x1ec000cc },
+ { 0x502f0, 0x25000024 },
+ { 0x5fffc, 0x200000cc },
+ { 0x502f4, 0x24800021 },
+ { 0x5fffc, 0x210000cd },
+ { 0x502f8, 0x23800020 },
+ { 0x5fffc, 0x220000ce },
+ { 0x502fc, 0x2300001d },
+};
+
+static const struct ppp_table downscale_x_table_pt4topt6[] = {
+ { 0x5fffc, 0x740008c },
+ { 0x50280, 0x33800088 },
+ { 0x5fffc, 0x800008e },
+ { 0x50284, 0x33400084 },
+ { 0x5fffc, 0x8400092 },
+ { 0x50288, 0x33000080 },
+ { 0x5fffc, 0x9000094 },
+ { 0x5028c, 0x3300007b },
+ { 0x5fffc, 0x9c00098 },
+ { 0x50290, 0x32400077 },
+ { 0x5fffc, 0xa40009b },
+ { 0x50294, 0x32000073 },
+ { 0x5fffc, 0xb00009d },
+ { 0x50298, 0x31c0006f },
+ { 0x5fffc, 0xbc000a0 },
+ { 0x5029c, 0x3140006b },
+ { 0x5fffc, 0xc8000a2 },
+ { 0x502a0, 0x31000067 },
+ { 0x5fffc, 0xd8000a5 },
+ { 0x502a4, 0x30800062 },
+ { 0x5fffc, 0xe4000a8 },
+ { 0x502a8, 0x2fc0005f },
+ { 0x5fffc, 0xec000aa },
+ { 0x502ac, 0x2fc0005b },
+ { 0x5fffc, 0xf8000ad },
+ { 0x502b0, 0x2f400057 },
+ { 0x5fffc, 0x108000b0 },
+ { 0x502b4, 0x2e400054 },
+ { 0x5fffc, 0x114000b2 },
+ { 0x502b8, 0x2e000050 },
+ { 0x5fffc, 0x124000b4 },
+ { 0x502bc, 0x2d80004c },
+ { 0x5fffc, 0x130000b6 },
+ { 0x502c0, 0x2d000049 },
+ { 0x5fffc, 0x140000b8 },
+ { 0x502c4, 0x2c800045 },
+ { 0x5fffc, 0x150000b9 },
+ { 0x502c8, 0x2c000042 },
+ { 0x5fffc, 0x15c000bd },
+ { 0x502cc, 0x2b40003e },
+ { 0x5fffc, 0x16c000bf },
+ { 0x502d0, 0x2a80003b },
+ { 0x5fffc, 0x17c000bf },
+ { 0x502d4, 0x2a000039 },
+ { 0x5fffc, 0x188000c2 },
+ { 0x502d8, 0x29400036 },
+ { 0x5fffc, 0x19c000c4 },
+ { 0x502dc, 0x28800032 },
+ { 0x5fffc, 0x1ac000c5 },
+ { 0x502e0, 0x2800002f },
+ { 0x5fffc, 0x1bc000c7 },
+ { 0x502e4, 0x2740002c },
+ { 0x5fffc, 0x1cc000c8 },
+ { 0x502e8, 0x26c00029 },
+ { 0x5fffc, 0x1dc000c9 },
+ { 0x502ec, 0x26000027 },
+ { 0x5fffc, 0x1ec000cc },
+ { 0x502f0, 0x25000024 },
+ { 0x5fffc, 0x200000cc },
+ { 0x502f4, 0x24800021 },
+ { 0x5fffc, 0x210000cd },
+ { 0x502f8, 0x23800020 },
+ { 0x5fffc, 0x220000ce },
+ { 0x502fc, 0x2300001d },
+};
+
+static const struct ppp_table downscale_x_table_pt6topt8[] = {
+ { 0x5fffc, 0xfe000070 },
+ { 0x50280, 0x4bc00068 },
+ { 0x5fffc, 0xfe000078 },
+ { 0x50284, 0x4bc00060 },
+ { 0x5fffc, 0xfe000080 },
+ { 0x50288, 0x4b800059 },
+ { 0x5fffc, 0xfe000089 },
+ { 0x5028c, 0x4b000052 },
+ { 0x5fffc, 0xfe400091 },
+ { 0x50290, 0x4a80004b },
+ { 0x5fffc, 0xfe40009a },
+ { 0x50294, 0x4a000044 },
+ { 0x5fffc, 0xfe8000a3 },
+ { 0x50298, 0x4940003d },
+ { 0x5fffc, 0xfec000ac },
+ { 0x5029c, 0x48400037 },
+ { 0x5fffc, 0xff0000b4 },
+ { 0x502a0, 0x47800031 },
+ { 0x5fffc, 0xff8000bd },
+ { 0x502a4, 0x4640002b },
+ { 0x5fffc, 0xc5 },
+ { 0x502a8, 0x45000026 },
+ { 0x5fffc, 0x8000ce },
+ { 0x502ac, 0x43800021 },
+ { 0x5fffc, 0x10000d6 },
+ { 0x502b0, 0x4240001c },
+ { 0x5fffc, 0x18000df },
+ { 0x502b4, 0x40800018 },
+ { 0x5fffc, 0x24000e6 },
+ { 0x502b8, 0x3f000014 },
+ { 0x5fffc, 0x30000ee },
+ { 0x502bc, 0x3d400010 },
+ { 0x5fffc, 0x40000f5 },
+ { 0x502c0, 0x3b80000c },
+ { 0x5fffc, 0x50000fc },
+ { 0x502c4, 0x39800009 },
+ { 0x5fffc, 0x6000102 },
+ { 0x502c8, 0x37c00006 },
+ { 0x5fffc, 0x7000109 },
+ { 0x502cc, 0x35800004 },
+ { 0x5fffc, 0x840010e },
+ { 0x502d0, 0x33800002 },
+ { 0x5fffc, 0x9800114 },
+ { 0x502d4, 0x31400000 },
+ { 0x5fffc, 0xac00119 },
+ { 0x502d8, 0x2f4003fe },
+ { 0x5fffc, 0xc40011e },
+ { 0x502dc, 0x2d0003fc },
+ { 0x5fffc, 0xdc00121 },
+ { 0x502e0, 0x2b0003fb },
+ { 0x5fffc, 0xf400125 },
+ { 0x502e4, 0x28c003fa },
+ { 0x5fffc, 0x11000128 },
+ { 0x502e8, 0x268003f9 },
+ { 0x5fffc, 0x12c0012a },
+ { 0x502ec, 0x244003f9 },
+ { 0x5fffc, 0x1480012c },
+ { 0x502f0, 0x224003f8 },
+ { 0x5fffc, 0x1640012e },
+ { 0x502f4, 0x200003f8 },
+ { 0x5fffc, 0x1800012f },
+ { 0x502f8, 0x1e0003f8 },
+ { 0x5fffc, 0x1a00012f },
+ { 0x502fc, 0x1c0003f8 },
+};
+
+static const struct ppp_table downscale_x_table_pt8topt1[] = {
+ { 0x5fffc, 0x0 },
+ { 0x50280, 0x7fc00000 },
+ { 0x5fffc, 0xff80000d },
+ { 0x50284, 0x7ec003f9 },
+ { 0x5fffc, 0xfec0001c },
+ { 0x50288, 0x7d4003f3 },
+ { 0x5fffc, 0xfe40002b },
+ { 0x5028c, 0x7b8003ed },
+ { 0x5fffc, 0xfd80003c },
+ { 0x50290, 0x794003e8 },
+ { 0x5fffc, 0xfcc0004d },
+ { 0x50294, 0x76c003e4 },
+ { 0x5fffc, 0xfc40005f },
+ { 0x50298, 0x73c003e0 },
+ { 0x5fffc, 0xfb800071 },
+ { 0x5029c, 0x708003de },
+ { 0x5fffc, 0xfac00085 },
+ { 0x502a0, 0x6d0003db },
+ { 0x5fffc, 0xfa000098 },
+ { 0x502a4, 0x698003d9 },
+ { 0x5fffc, 0xf98000ac },
+ { 0x502a8, 0x654003d8 },
+ { 0x5fffc, 0xf8c000c1 },
+ { 0x502ac, 0x610003d7 },
+ { 0x5fffc, 0xf84000d5 },
+ { 0x502b0, 0x5c8003d7 },
+ { 0x5fffc, 0xf7c000e9 },
+ { 0x502b4, 0x580003d7 },
+ { 0x5fffc, 0xf74000fd },
+ { 0x502b8, 0x534003d8 },
+ { 0x5fffc, 0xf6c00112 },
+ { 0x502bc, 0x4e8003d8 },
+ { 0x5fffc, 0xf6800126 },
+ { 0x502c0, 0x494003da },
+ { 0x5fffc, 0xf600013a },
+ { 0x502c4, 0x448003db },
+ { 0x5fffc, 0xf600014d },
+ { 0x502c8, 0x3f4003dd },
+ { 0x5fffc, 0xf5c00160 },
+ { 0x502cc, 0x3a4003df },
+ { 0x5fffc, 0xf5c00172 },
+ { 0x502d0, 0x354003e1 },
+ { 0x5fffc, 0xf5c00184 },
+ { 0x502d4, 0x304003e3 },
+ { 0x5fffc, 0xf6000195 },
+ { 0x502d8, 0x2b0003e6 },
+ { 0x5fffc, 0xf64001a6 },
+ { 0x502dc, 0x260003e8 },
+ { 0x5fffc, 0xf6c001b4 },
+ { 0x502e0, 0x214003eb },
+ { 0x5fffc, 0xf78001c2 },
+ { 0x502e4, 0x1c4003ee },
+ { 0x5fffc, 0xf80001cf },
+ { 0x502e8, 0x17c003f1 },
+ { 0x5fffc, 0xf90001db },
+ { 0x502ec, 0x134003f3 },
+ { 0x5fffc, 0xfa0001e5 },
+ { 0x502f0, 0xf0003f6 },
+ { 0x5fffc, 0xfb4001ee },
+ { 0x502f4, 0xac003f9 },
+ { 0x5fffc, 0xfcc001f5 },
+ { 0x502f8, 0x70003fb },
+ { 0x5fffc, 0xfe4001fb },
+ { 0x502fc, 0x34003fe },
+};
+
+static const struct ppp_table *downscale_x_table[PPP_DOWNSCALE_MAX] = {
+ [PPP_DOWNSCALE_PT2TOPT4] = downscale_x_table_pt2topt4,
+ [PPP_DOWNSCALE_PT4TOPT6] = downscale_x_table_pt4topt6,
+ [PPP_DOWNSCALE_PT6TOPT8] = downscale_x_table_pt6topt8,
+ [PPP_DOWNSCALE_PT8TOPT1] = downscale_x_table_pt8topt1,
+};
+
+static const struct ppp_table downscale_y_table_pt2topt4[] = {
+ { 0x5fffc, 0x740008c },
+ { 0x50300, 0x33800088 },
+ { 0x5fffc, 0x800008e },
+ { 0x50304, 0x33400084 },
+ { 0x5fffc, 0x8400092 },
+ { 0x50308, 0x33000080 },
+ { 0x5fffc, 0x9000094 },
+ { 0x5030c, 0x3300007b },
+ { 0x5fffc, 0x9c00098 },
+ { 0x50310, 0x32400077 },
+ { 0x5fffc, 0xa40009b },
+ { 0x50314, 0x32000073 },
+ { 0x5fffc, 0xb00009d },
+ { 0x50318, 0x31c0006f },
+ { 0x5fffc, 0xbc000a0 },
+ { 0x5031c, 0x3140006b },
+ { 0x5fffc, 0xc8000a2 },
+ { 0x50320, 0x31000067 },
+ { 0x5fffc, 0xd8000a5 },
+ { 0x50324, 0x30800062 },
+ { 0x5fffc, 0xe4000a8 },
+ { 0x50328, 0x2fc0005f },
+ { 0x5fffc, 0xec000aa },
+ { 0x5032c, 0x2fc0005b },
+ { 0x5fffc, 0xf8000ad },
+ { 0x50330, 0x2f400057 },
+ { 0x5fffc, 0x108000b0 },
+ { 0x50334, 0x2e400054 },
+ { 0x5fffc, 0x114000b2 },
+ { 0x50338, 0x2e000050 },
+ { 0x5fffc, 0x124000b4 },
+ { 0x5033c, 0x2d80004c },
+ { 0x5fffc, 0x130000b6 },
+ { 0x50340, 0x2d000049 },
+ { 0x5fffc, 0x140000b8 },
+ { 0x50344, 0x2c800045 },
+ { 0x5fffc, 0x150000b9 },
+ { 0x50348, 0x2c000042 },
+ { 0x5fffc, 0x15c000bd },
+ { 0x5034c, 0x2b40003e },
+ { 0x5fffc, 0x16c000bf },
+ { 0x50350, 0x2a80003b },
+ { 0x5fffc, 0x17c000bf },
+ { 0x50354, 0x2a000039 },
+ { 0x5fffc, 0x188000c2 },
+ { 0x50358, 0x29400036 },
+ { 0x5fffc, 0x19c000c4 },
+ { 0x5035c, 0x28800032 },
+ { 0x5fffc, 0x1ac000c5 },
+ { 0x50360, 0x2800002f },
+ { 0x5fffc, 0x1bc000c7 },
+ { 0x50364, 0x2740002c },
+ { 0x5fffc, 0x1cc000c8 },
+ { 0x50368, 0x26c00029 },
+ { 0x5fffc, 0x1dc000c9 },
+ { 0x5036c, 0x26000027 },
+ { 0x5fffc, 0x1ec000cc },
+ { 0x50370, 0x25000024 },
+ { 0x5fffc, 0x200000cc },
+ { 0x50374, 0x24800021 },
+ { 0x5fffc, 0x210000cd },
+ { 0x50378, 0x23800020 },
+ { 0x5fffc, 0x220000ce },
+ { 0x5037c, 0x2300001d },
+};
+
+static const struct ppp_table downscale_y_table_pt4topt6[] = {
+ { 0x5fffc, 0x740008c },
+ { 0x50300, 0x33800088 },
+ { 0x5fffc, 0x800008e },
+ { 0x50304, 0x33400084 },
+ { 0x5fffc, 0x8400092 },
+ { 0x50308, 0x33000080 },
+ { 0x5fffc, 0x9000094 },
+ { 0x5030c, 0x3300007b },
+ { 0x5fffc, 0x9c00098 },
+ { 0x50310, 0x32400077 },
+ { 0x5fffc, 0xa40009b },
+ { 0x50314, 0x32000073 },
+ { 0x5fffc, 0xb00009d },
+ { 0x50318, 0x31c0006f },
+ { 0x5fffc, 0xbc000a0 },
+ { 0x5031c, 0x3140006b },
+ { 0x5fffc, 0xc8000a2 },
+ { 0x50320, 0x31000067 },
+ { 0x5fffc, 0xd8000a5 },
+ { 0x50324, 0x30800062 },
+ { 0x5fffc, 0xe4000a8 },
+ { 0x50328, 0x2fc0005f },
+ { 0x5fffc, 0xec000aa },
+ { 0x5032c, 0x2fc0005b },
+ { 0x5fffc, 0xf8000ad },
+ { 0x50330, 0x2f400057 },
+ { 0x5fffc, 0x108000b0 },
+ { 0x50334, 0x2e400054 },
+ { 0x5fffc, 0x114000b2 },
+ { 0x50338, 0x2e000050 },
+ { 0x5fffc, 0x124000b4 },
+ { 0x5033c, 0x2d80004c },
+ { 0x5fffc, 0x130000b6 },
+ { 0x50340, 0x2d000049 },
+ { 0x5fffc, 0x140000b8 },
+ { 0x50344, 0x2c800045 },
+ { 0x5fffc, 0x150000b9 },
+ { 0x50348, 0x2c000042 },
+ { 0x5fffc, 0x15c000bd },
+ { 0x5034c, 0x2b40003e },
+ { 0x5fffc, 0x16c000bf },
+ { 0x50350, 0x2a80003b },
+ { 0x5fffc, 0x17c000bf },
+ { 0x50354, 0x2a000039 },
+ { 0x5fffc, 0x188000c2 },
+ { 0x50358, 0x29400036 },
+ { 0x5fffc, 0x19c000c4 },
+ { 0x5035c, 0x28800032 },
+ { 0x5fffc, 0x1ac000c5 },
+ { 0x50360, 0x2800002f },
+ { 0x5fffc, 0x1bc000c7 },
+ { 0x50364, 0x2740002c },
+ { 0x5fffc, 0x1cc000c8 },
+ { 0x50368, 0x26c00029 },
+ { 0x5fffc, 0x1dc000c9 },
+ { 0x5036c, 0x26000027 },
+ { 0x5fffc, 0x1ec000cc },
+ { 0x50370, 0x25000024 },
+ { 0x5fffc, 0x200000cc },
+ { 0x50374, 0x24800021 },
+ { 0x5fffc, 0x210000cd },
+ { 0x50378, 0x23800020 },
+ { 0x5fffc, 0x220000ce },
+ { 0x5037c, 0x2300001d },
+};
+
+static const struct ppp_table downscale_y_table_pt6topt8[] = {
+ { 0x5fffc, 0xfe000070 },
+ { 0x50300, 0x4bc00068 },
+ { 0x5fffc, 0xfe000078 },
+ { 0x50304, 0x4bc00060 },
+ { 0x5fffc, 0xfe000080 },
+ { 0x50308, 0x4b800059 },
+ { 0x5fffc, 0xfe000089 },
+ { 0x5030c, 0x4b000052 },
+ { 0x5fffc, 0xfe400091 },
+ { 0x50310, 0x4a80004b },
+ { 0x5fffc, 0xfe40009a },
+ { 0x50314, 0x4a000044 },
+ { 0x5fffc, 0xfe8000a3 },
+ { 0x50318, 0x4940003d },
+ { 0x5fffc, 0xfec000ac },
+ { 0x5031c, 0x48400037 },
+ { 0x5fffc, 0xff0000b4 },
+ { 0x50320, 0x47800031 },
+ { 0x5fffc, 0xff8000bd },
+ { 0x50324, 0x4640002b },
+ { 0x5fffc, 0xc5 },
+ { 0x50328, 0x45000026 },
+ { 0x5fffc, 0x8000ce },
+ { 0x5032c, 0x43800021 },
+ { 0x5fffc, 0x10000d6 },
+ { 0x50330, 0x4240001c },
+ { 0x5fffc, 0x18000df },
+ { 0x50334, 0x40800018 },
+ { 0x5fffc, 0x24000e6 },
+ { 0x50338, 0x3f000014 },
+ { 0x5fffc, 0x30000ee },
+ { 0x5033c, 0x3d400010 },
+ { 0x5fffc, 0x40000f5 },
+ { 0x50340, 0x3b80000c },
+ { 0x5fffc, 0x50000fc },
+ { 0x50344, 0x39800009 },
+ { 0x5fffc, 0x6000102 },
+ { 0x50348, 0x37c00006 },
+ { 0x5fffc, 0x7000109 },
+ { 0x5034c, 0x35800004 },
+ { 0x5fffc, 0x840010e },
+ { 0x50350, 0x33800002 },
+ { 0x5fffc, 0x9800114 },
+ { 0x50354, 0x31400000 },
+ { 0x5fffc, 0xac00119 },
+ { 0x50358, 0x2f4003fe },
+ { 0x5fffc, 0xc40011e },
+ { 0x5035c, 0x2d0003fc },
+ { 0x5fffc, 0xdc00121 },
+ { 0x50360, 0x2b0003fb },
+ { 0x5fffc, 0xf400125 },
+ { 0x50364, 0x28c003fa },
+ { 0x5fffc, 0x11000128 },
+ { 0x50368, 0x268003f9 },
+ { 0x5fffc, 0x12c0012a },
+ { 0x5036c, 0x244003f9 },
+ { 0x5fffc, 0x1480012c },
+ { 0x50370, 0x224003f8 },
+ { 0x5fffc, 0x1640012e },
+ { 0x50374, 0x200003f8 },
+ { 0x5fffc, 0x1800012f },
+ { 0x50378, 0x1e0003f8 },
+ { 0x5fffc, 0x1a00012f },
+ { 0x5037c, 0x1c0003f8 },
+};
+
+static const struct ppp_table downscale_y_table_pt8topt1[] = {
+ { 0x5fffc, 0x0 },
+ { 0x50300, 0x7fc00000 },
+ { 0x5fffc, 0xff80000d },
+ { 0x50304, 0x7ec003f9 },
+ { 0x5fffc, 0xfec0001c },
+ { 0x50308, 0x7d4003f3 },
+ { 0x5fffc, 0xfe40002b },
+ { 0x5030c, 0x7b8003ed },
+ { 0x5fffc, 0xfd80003c },
+ { 0x50310, 0x794003e8 },
+ { 0x5fffc, 0xfcc0004d },
+ { 0x50314, 0x76c003e4 },
+ { 0x5fffc, 0xfc40005f },
+ { 0x50318, 0x73c003e0 },
+ { 0x5fffc, 0xfb800071 },
+ { 0x5031c, 0x708003de },
+ { 0x5fffc, 0xfac00085 },
+ { 0x50320, 0x6d0003db },
+ { 0x5fffc, 0xfa000098 },
+ { 0x50324, 0x698003d9 },
+ { 0x5fffc, 0xf98000ac },
+ { 0x50328, 0x654003d8 },
+ { 0x5fffc, 0xf8c000c1 },
+ { 0x5032c, 0x610003d7 },
+ { 0x5fffc, 0xf84000d5 },
+ { 0x50330, 0x5c8003d7 },
+ { 0x5fffc, 0xf7c000e9 },
+ { 0x50334, 0x580003d7 },
+ { 0x5fffc, 0xf74000fd },
+ { 0x50338, 0x534003d8 },
+ { 0x5fffc, 0xf6c00112 },
+ { 0x5033c, 0x4e8003d8 },
+ { 0x5fffc, 0xf6800126 },
+ { 0x50340, 0x494003da },
+ { 0x5fffc, 0xf600013a },
+ { 0x50344, 0x448003db },
+ { 0x5fffc, 0xf600014d },
+ { 0x50348, 0x3f4003dd },
+ { 0x5fffc, 0xf5c00160 },
+ { 0x5034c, 0x3a4003df },
+ { 0x5fffc, 0xf5c00172 },
+ { 0x50350, 0x354003e1 },
+ { 0x5fffc, 0xf5c00184 },
+ { 0x50354, 0x304003e3 },
+ { 0x5fffc, 0xf6000195 },
+ { 0x50358, 0x2b0003e6 },
+ { 0x5fffc, 0xf64001a6 },
+ { 0x5035c, 0x260003e8 },
+ { 0x5fffc, 0xf6c001b4 },
+ { 0x50360, 0x214003eb },
+ { 0x5fffc, 0xf78001c2 },
+ { 0x50364, 0x1c4003ee },
+ { 0x5fffc, 0xf80001cf },
+ { 0x50368, 0x17c003f1 },
+ { 0x5fffc, 0xf90001db },
+ { 0x5036c, 0x134003f3 },
+ { 0x5fffc, 0xfa0001e5 },
+ { 0x50370, 0xf0003f6 },
+ { 0x5fffc, 0xfb4001ee },
+ { 0x50374, 0xac003f9 },
+ { 0x5fffc, 0xfcc001f5 },
+ { 0x50378, 0x70003fb },
+ { 0x5fffc, 0xfe4001fb },
+ { 0x5037c, 0x34003fe },
+};
+
+static const struct ppp_table *downscale_y_table[PPP_DOWNSCALE_MAX] = {
+ [PPP_DOWNSCALE_PT2TOPT4] = downscale_y_table_pt2topt4,
+ [PPP_DOWNSCALE_PT4TOPT6] = downscale_y_table_pt4topt6,
+ [PPP_DOWNSCALE_PT6TOPT8] = downscale_y_table_pt6topt8,
+ [PPP_DOWNSCALE_PT8TOPT1] = downscale_y_table_pt8topt1,
+};
+
+void ppp_load_table(const struct ppp_table *table, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ PPP_WRITEL(table[i].val, table[i].reg);
+}
+
+void ppp_load_up_lut(void)
+{
+ ppp_load_table(upscale_table,
+ PPP_UPSCALE_MAX);
+}
+
+void ppp_load_gaussian_lut(void)
+{
+ ppp_load_table(mdp_gaussian_blur_table,
+ PPP_BLUR_SCALE_MAX);
+}
+
+void ppp_load_x_scale_table(int idx)
+{
+ ppp_load_table(downscale_x_table[idx], 64);
+}
+
+void ppp_load_y_scale_table(int idx)
+{
+ ppp_load_table(downscale_y_table[idx], 64);
+}
+
+uint32_t ppp_bpp(uint32_t type)
+{
+ if (MDP_IS_IMGTYPE_BAD(type))
+ return 0;
+ return bytes_per_pixel[type];
+}
+
+uint32_t ppp_src_config(uint32_t type)
+{
+ if (MDP_IS_IMGTYPE_BAD(type))
+ return 0;
+ return src_cfg_lut[type];
+}
+
+uint32_t ppp_out_config(uint32_t type)
+{
+ if (MDP_IS_IMGTYPE_BAD(type))
+ return 0;
+ return out_cfg_lut[type];
+}
+
+uint32_t ppp_pack_pattern(uint32_t type, uint32_t yuv2rgb)
+{
+ if (MDP_IS_IMGTYPE_BAD(type))
+ return 0;
+ if (yuv2rgb)
+ return swapped_pack_patt_lut[type];
+
+ return pack_patt_lut[type];
+}
+
+uint32_t ppp_dst_op_reg(uint32_t type)
+{
+ if (MDP_IS_IMGTYPE_BAD(type))
+ return 0;
+ return dst_op_reg[type];
+}
+
+uint32_t ppp_src_op_reg(uint32_t type)
+{
+ if (MDP_IS_IMGTYPE_BAD(type))
+ return 0;
+ return src_op_reg[type];
+}
+
+bool ppp_per_p_alpha(uint32_t type)
+{
+ if (MDP_IS_IMGTYPE_BAD(type))
+ return 0;
+ return per_pixel_alpha[type];
+}
+
+bool ppp_multi_plane(uint32_t type)
+{
+ if (MDP_IS_IMGTYPE_BAD(type))
+ return 0;
+ return multi_plane[type];
+}
+
+uint32_t *ppp_default_pre_lut(void)
+{
+ return default_pre_lut_val;
+}
+
+uint32_t *ppp_default_post_lut(void)
+{
+ return default_post_lut_val;
+}
+
+struct ppp_csc_table *ppp_csc_rgb2yuv(void)
+{
+ return &rgb2yuv;
+}
+
+struct ppp_csc_table *ppp_csc_table2(void)
+{
+ return &default_table2;
+}
diff --git a/drivers/video/fbdev/msm/mdp3_ppp_hwio.c b/drivers/video/fbdev/msm/mdp3_ppp_hwio.c
new file mode 100644
index 0000000..6f077e2
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ppp_hwio.c
@@ -0,0 +1,1365 @@
+/* Copyright (c) 2007, 2012-2013, 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/file.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "linux/proc_fs.h"
+
+#include "mdss_fb.h"
+#include "mdp3_ppp.h"
+#include "mdp3_hwio.h"
+#include "mdss_debug.h"
+
+/* SHIM Q Factor */
+#define PHI_Q_FACTOR 29
+#define PQF_PLUS_5 (PHI_Q_FACTOR + 5) /* due to 32 phases */
+#define PQF_PLUS_4 (PHI_Q_FACTOR + 4)
+#define PQF_PLUS_2 (PHI_Q_FACTOR + 2) /* to get 4.0 */
+#define PQF_MINUS_2 (PHI_Q_FACTOR - 2) /* to get 0.25 */
+#define PQF_PLUS_5_PLUS_2 (PQF_PLUS_5 + 2)
+#define PQF_PLUS_5_MINUS_2 (PQF_PLUS_5 - 2)
+
+enum {
+ LAYER_FG = 0,
+ LAYER_BG,
+ LAYER_FB,
+ LAYER_MAX,
+};
+
+static long long mdp_do_div(long long num, long long den)
+{
+ do_div(num, den);
+ return num;
+}
+
+static int mdp_calc_scale_params(uint32_t org, uint32_t dim_in,
+ uint32_t dim_out, bool is_W, int32_t *phase_init_ptr,
+ uint32_t *phase_step_ptr)
+{
+ bool rpa_on = false;
+ int init_phase = 0;
+ uint64_t numer = 0;
+ uint64_t denom = 0;
+ int64_t point5 = 1;
+ int64_t one = 1;
+ int64_t k1, k2, k3, k4; /* linear equation coefficients */
+ uint64_t int_mask;
+ uint64_t fract_mask;
+ uint64_t Os;
+ int64_t Osprime;
+ int64_t Od;
+ int64_t Odprime;
+ int64_t Oreq;
+ int64_t init_phase_temp;
+ int64_t delta;
+ uint32_t mult;
+
+ /*
+ * The phase accumulator should really be rational for all cases in a
+ * general purpose polyphase scaler for a tiled architecture with
+ * non-zero * origin capability because there is no way to represent
+ * certain scale factors in fixed point regardless of precision.
+ * The error incurred in attempting to use fixed point is most
+ * eggregious for SF where 1/SF is an integral multiple of 1/3.
+ *
+ * Set the RPA flag for this dimension.
+ *
+ * In order for 1/SF (dim_in/dim_out) to be an integral multiple of
+ * 1/3, dim_out must be an integral multiple of 3.
+ */
+ if (!(dim_out % 3)) {
+ mult = dim_out / 3;
+ rpa_on = (!(dim_in % mult));
+ }
+
+ numer = dim_out;
+ denom = dim_in;
+
+ /*
+ * convert to U30.34 before division
+ *
+ * The K vectors carry 4 extra bits of precision
+ * and are rounded.
+ *
+ * We initially go 5 bits over then round by adding
+ * 1 and right shifting by 1
+ * so final result is U31.33
+ */
+ numer <<= PQF_PLUS_5;
+
+ /* now calculate the scale factor (aka k3) */
+ k3 = ((mdp_do_div(numer, denom) + 1) >> 1);
+
+ /* check scale factor for legal range [0.25 - 4.0] */
+ if (((k3 >> 4) < (1LL << PQF_MINUS_2)) ||
+ ((k3 >> 4) > (1LL << PQF_PLUS_2))) {
+ return -EINVAL;
+ }
+
+ /* calculate inverse scale factor (aka k1) for phase init */
+ numer = dim_in;
+ denom = dim_out;
+ numer <<= PQF_PLUS_5;
+ k1 = ((mdp_do_div(numer, denom) + 1) >> 1);
+
+ /*
+ * calculate initial phase and ROI overfetch
+ */
+ /* convert point5 & one to S39.24 (will always be positive) */
+ point5 <<= (PQF_PLUS_4 - 1);
+ one <<= PQF_PLUS_4;
+ k2 = ((k1 - one) >> 1);
+ init_phase = (int)(k2 >> 4);
+ k4 = ((k3 - one) >> 1);
+ if (k3 != one) {
+ /* calculate the masks */
+ fract_mask = one - 1;
+ int_mask = ~fract_mask;
+
+ if (!rpa_on) {
+ /*
+ * FIXED POINT IMPLEMENTATION
+ */
+ if (org) {
+ /*
+ * The complicated case; ROI origin != 0
+ * init_phase needs to be adjusted
+ * OF is also position dependent
+ */
+
+ /* map (org - .5) into destination space */
+ Os = ((uint64_t) org << 1) - 1;
+ Od = ((k3 * Os) >> 1) + k4;
+
+ /* take the ceiling */
+ Odprime = (Od & int_mask);
+ if (Odprime != Od)
+ Odprime += one;
+
+ /* now map that back to source space */
+ Osprime = (k1 * (Odprime >> PQF_PLUS_4)) + k2;
+
+ /* then floor & decrement to calc the required
+ * starting coordinate
+ */
+ Oreq = (Osprime & int_mask) - one;
+
+ /* calculate initial phase */
+ init_phase_temp = Osprime - Oreq;
+ delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq;
+ init_phase_temp -= delta;
+
+ /* limit to valid range before left shift */
+ delta = (init_phase_temp & (1LL << 63)) ?
+ 4 : -4;
+ delta <<= PQF_PLUS_4;
+ while (abs((int)(init_phase_temp >>
+ PQF_PLUS_4)) > 4)
+ init_phase_temp += delta;
+
+ /*
+ * right shift to account for extra bits of
+ * precision
+ */
+ init_phase = (int)(init_phase_temp >> 4);
+
+ }
+ } else {
+ /*
+ * RPA IMPLEMENTATION
+ *
+ * init_phase needs to be calculated in all RPA_on
+ * cases because it's a numerator, not a fixed
+ * point value.
+ */
+
+ /* map (org - .5) into destination space */
+ Os = ((uint64_t) org << PQF_PLUS_4) - point5;
+ Od = mdp_do_div((dim_out * (Os + point5)),
+ dim_in);
+ Od -= point5;
+
+ /* take the ceiling */
+ Odprime = (Od & int_mask);
+ if (Odprime != Od)
+ Odprime += one;
+
+ /* now map that back to source space */
+ Osprime =
+ mdp_do_div((dim_in * (Odprime + point5)),
+ dim_out);
+ Osprime -= point5;
+
+ /*
+ * then floor & decrement to calculate the required
+ * starting coordinate
+ */
+ Oreq = (Osprime & int_mask) - one;
+
+ /* calculate initial phase */
+ init_phase_temp = Osprime - Oreq;
+ delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq;
+ init_phase_temp -= delta;
+
+ /* limit to valid range before the left shift */
+ delta = (init_phase_temp & (1LL << 63)) ? 4 : -4;
+ delta <<= PQF_PLUS_4;
+ while (abs((int)(init_phase_temp >> PQF_PLUS_4)) > 4)
+ init_phase_temp += delta;
+
+ /*
+ * right shift to account for extra bits of precision
+ */
+ init_phase = (int)(init_phase_temp >> 4);
+ }
+ }
+
+ /* return the scale parameters */
+ *phase_init_ptr = init_phase;
+ *phase_step_ptr = (uint32_t) (k1 >> 4);
+
+ return 0;
+}
+
+static int scale_idx(int factor)
+{
+ int idx;
+
+ if (factor > 80)
+ idx = PPP_DOWNSCALE_PT8TOPT1;
+ else if (factor > 60)
+ idx = PPP_DOWNSCALE_PT6TOPT8;
+ else if (factor > 40)
+ idx = PPP_DOWNSCALE_PT4TOPT6;
+ else
+ idx = PPP_DOWNSCALE_PT2TOPT4;
+
+ return idx;
+}
+
+inline int32_t comp_conv_rgb2yuv(int32_t comp, int32_t y_high,
+ int32_t y_low, int32_t c_high, int32_t c_low)
+{
+ if (comp < 0)
+ comp = 0;
+ if (comp > 255)
+ comp = 255;
+
+ /* clamp */
+ if (comp < y_low)
+ comp = y_low;
+ if (comp > y_high)
+ comp = y_high;
+ return comp;
+}
+
+static uint32_t conv_rgb2yuv(uint32_t input_pixel,
+ uint16_t *matrix_vector,
+ uint16_t *bv,
+ uint16_t *clamp_vector)
+{
+ uint8_t input_C2, input_C0, input_C1;
+ uint32_t output;
+ int32_t comp_C2, comp_C1, comp_C0, temp;
+ int32_t temp1, temp2, temp3;
+ int32_t matrix[9];
+ int32_t bias_vector[3];
+ int32_t Y_low_limit, Y_high_limit, C_low_limit, C_high_limit;
+ int32_t i;
+
+ input_C2 = (input_pixel >> 16) & 0xFF;
+ input_C1 = (input_pixel >> 8) & 0xFF;
+ input_C0 = (input_pixel >> 0) & 0xFF;
+
+ comp_C0 = input_C0;
+ comp_C1 = input_C1;
+ comp_C2 = input_C2;
+
+ for (i = 0; i < MDP_CSC_SIZE; i++)
+ matrix[i] =
+ ((int32_t) (((int32_t) matrix_vector[i]) << 20)) >> 20;
+
+ bias_vector[0] = (int32_t) (bv[0] & 0xFF);
+ bias_vector[1] = (int32_t) (bv[1] & 0xFF);
+ bias_vector[2] = (int32_t) (bv[2] & 0xFF);
+
+ Y_low_limit = (int32_t) clamp_vector[0];
+ Y_high_limit = (int32_t) clamp_vector[1];
+ C_low_limit = (int32_t) clamp_vector[2];
+ C_high_limit = (int32_t) clamp_vector[3];
+
+ /*
+ * Color Conversion
+ * reorder input colors
+ */
+ temp = comp_C2;
+ comp_C2 = comp_C1;
+ comp_C1 = comp_C0;
+ comp_C0 = temp;
+
+ /* matrix multiplication */
+ temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] +
+ comp_C2 * matrix[2];
+ temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] +
+ comp_C2 * matrix[5];
+ temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] +
+ comp_C2 * matrix[8];
+
+ comp_C0 = temp1 + 0x100;
+ comp_C1 = temp2 + 0x100;
+ comp_C2 = temp3 + 0x100;
+
+ /* take integer part */
+ comp_C0 >>= 9;
+ comp_C1 >>= 9;
+ comp_C2 >>= 9;
+
+ /* post bias (+) */
+ comp_C0 += bias_vector[0];
+ comp_C1 += bias_vector[1];
+ comp_C2 += bias_vector[2];
+
+ /* limit pixel to 8-bit */
+ comp_C0 = comp_conv_rgb2yuv(comp_C0, Y_high_limit,
+ Y_low_limit, C_high_limit, C_low_limit);
+ comp_C1 = comp_conv_rgb2yuv(comp_C1, Y_high_limit,
+ Y_low_limit, C_high_limit, C_low_limit);
+ comp_C2 = comp_conv_rgb2yuv(comp_C2, Y_high_limit,
+ Y_low_limit, C_high_limit, C_low_limit);
+
+ output = (comp_C2 << 16) | (comp_C1 << 8) | comp_C0;
+ return output;
+}
+
+inline void y_h_even_num(struct ppp_img_desc *img)
+{
+ img->roi.y = (img->roi.y / 2) * 2;
+ img->roi.height = (img->roi.height / 2) * 2;
+}
+
+inline void x_w_even_num(struct ppp_img_desc *img)
+{
+ img->roi.x = (img->roi.x / 2) * 2;
+ img->roi.width = (img->roi.width / 2) * 2;
+}
+
+bool check_if_rgb(int color)
+{
+ bool rgb = false;
+
+ switch (color) {
+ case MDP_RGB_565:
+ case MDP_BGR_565:
+ case MDP_RGB_888:
+ case MDP_BGR_888:
+ case MDP_BGRA_8888:
+ case MDP_RGBA_8888:
+ case MDP_ARGB_8888:
+ case MDP_XRGB_8888:
+ case MDP_RGBX_8888:
+ case MDP_BGRX_8888:
+ rgb = true;
+ default:
+ break;
+ }
+ return rgb;
+}
+
+uint8_t *mdp_adjust_rot_addr(struct ppp_blit_op *iBuf,
+ uint8_t *addr, uint32_t bpp, uint32_t uv, uint32_t layer)
+{
+ uint32_t ystride = 0;
+ uint32_t h_slice = 1;
+ uint32_t roi_width = 0;
+ uint32_t roi_height = 0;
+ uint32_t color_fmt = 0;
+
+ if (layer == LAYER_BG) {
+ ystride = iBuf->bg.prop.width * bpp;
+ roi_width = iBuf->bg.roi.width;
+ roi_height = iBuf->bg.roi.height;
+ color_fmt = iBuf->bg.color_fmt;
+ } else {
+ ystride = iBuf->dst.prop.width * bpp;
+ roi_width = iBuf->dst.roi.width;
+ roi_height = iBuf->dst.roi.height;
+ color_fmt = iBuf->dst.color_fmt;
+ }
+ if (uv && ((color_fmt == MDP_Y_CBCR_H2V2) ||
+ (color_fmt == MDP_Y_CRCB_H2V2)))
+ h_slice = 2;
+
+ if (((iBuf->mdp_op & MDPOP_ROT90) == MDPOP_ROT90) ^
+ ((iBuf->mdp_op & MDPOP_LR) == MDPOP_LR)) {
+ addr += (roi_width - MIN(16, roi_width)) * bpp;
+ }
+ if ((iBuf->mdp_op & MDPOP_UD) == MDPOP_UD) {
+ addr += ((roi_height - MIN(16, roi_height))/h_slice) *
+ ystride;
+ }
+
+ return addr;
+}
+
+void mdp_adjust_start_addr(struct ppp_blit_op *blit_op,
+ struct ppp_img_desc *img, int v_slice,
+ int h_slice, uint32_t layer)
+{
+ uint32_t bpp = ppp_bpp(img->color_fmt);
+ int x = img->roi.x;
+ int y = img->roi.y;
+ uint32_t width = img->prop.width;
+
+ if (img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO && layer == 0)
+ img->p0 += (x + y * ALIGN(width, 32)) * bpp;
+ else if (img->color_fmt == MDP_Y_CBCR_H2V2_VENUS && layer == 0)
+ img->p0 += (x + y * ALIGN(width, 128)) * bpp;
+ else
+ img->p0 += (x + y * width) * bpp;
+ if (layer != LAYER_FG)
+ img->p0 = mdp_adjust_rot_addr(blit_op, img->p0, bpp, 0, layer);
+
+ if (img->p1) {
+ /*
+ * MDP_Y_CBCR_H2V2/MDP_Y_CRCB_H2V2 cosite for now
+ * we need to shift x direction same as y dir for offsite
+ */
+ if ((img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO ||
+ img->color_fmt == MDP_Y_CBCR_H2V2_VENUS)
+ && layer == 0)
+ img->p1 += ((x / h_slice) * h_slice + ((y == 0) ? 0 :
+ (((y + 1) / v_slice - 1) * (ALIGN(width/2, 32) * 2))))
+ * bpp;
+ else
+ img->p1 += ((x / h_slice) * h_slice +
+ ((y == 0) ? 0 : ((y + 1) / v_slice - 1) * width)) * bpp;
+
+ if (layer != LAYER_FG)
+ img->p1 = mdp_adjust_rot_addr(blit_op,
+ img->p1, bpp, 0, layer);
+ }
+}
+
+int load_ppp_lut(int tableType, uint32_t *lut)
+{
+ int i;
+ uint32_t base_addr;
+
+ base_addr = tableType ? MDP3_PPP_POST_LUT : MDP3_PPP_PRE_LUT;
+ for (i = 0; i < PPP_LUT_MAX; i++)
+ PPP_WRITEL(lut[i], base_addr + MDP3_PPP_LUTn(i));
+
+ return 0;
+}
+
+/* Configure Primary CSC Matrix */
+int load_primary_matrix(struct ppp_csc_table *csc)
+{
+ int i;
+
+ for (i = 0; i < MDP_CSC_SIZE; i++)
+ PPP_WRITEL(csc->fwd_matrix[i], MDP3_PPP_CSC_PFMVn(i));
+
+ for (i = 0; i < MDP_CSC_SIZE; i++)
+ PPP_WRITEL(csc->rev_matrix[i], MDP3_PPP_CSC_PRMVn(i));
+
+ for (i = 0; i < MDP_BV_SIZE; i++)
+ PPP_WRITEL(csc->bv[i], MDP3_PPP_CSC_PBVn(i));
+
+ for (i = 0; i < MDP_LV_SIZE; i++)
+ PPP_WRITEL(csc->lv[i], MDP3_PPP_CSC_PLVn(i));
+
+ return 0;
+}
+
+/* Load Secondary CSC Matrix */
+int load_secondary_matrix(struct ppp_csc_table *csc)
+{
+ int i;
+
+ for (i = 0; i < MDP_CSC_SIZE; i++)
+ PPP_WRITEL(csc->fwd_matrix[i], MDP3_PPP_CSC_SFMVn(i));
+
+ for (i = 0; i < MDP_CSC_SIZE; i++)
+ PPP_WRITEL(csc->rev_matrix[i], MDP3_PPP_CSC_SRMVn(i));
+
+ for (i = 0; i < MDP_BV_SIZE; i++)
+ PPP_WRITEL(csc->bv[i], MDP3_PPP_CSC_SBVn(i));
+
+ for (i = 0; i < MDP_LV_SIZE; i++)
+ PPP_WRITEL(csc->lv[i], MDP3_PPP_CSC_SLVn(i));
+ return 0;
+}
+
+int load_csc_matrix(int matrix_type, struct ppp_csc_table *csc)
+{
+ if (matrix_type == CSC_PRIMARY_MATRIX)
+ return load_primary_matrix(csc);
+
+ return load_secondary_matrix(csc);
+}
+
+int config_ppp_src(struct ppp_img_desc *src, uint32_t yuv2rgb)
+{
+ uint32_t val;
+
+ val = ((src->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) |
+ (src->roi.width & MDP3_PPP_XY_MASK);
+ PPP_WRITEL(val, MDP3_PPP_SRC_SIZE);
+
+ PPP_WRITEL(src->p0, MDP3_PPP_SRCP0_ADDR);
+ PPP_WRITEL(src->p1, MDP3_PPP_SRCP1_ADDR);
+ PPP_WRITEL(src->p3, MDP3_PPP_SRCP3_ADDR);
+
+ val = (src->stride0 & MDP3_PPP_STRIDE_MASK) |
+ ((src->stride1 & MDP3_PPP_STRIDE_MASK) <<
+ MDP3_PPP_STRIDE1_OFFSET);
+ PPP_WRITEL(val, MDP3_PPP_SRC_YSTRIDE1_ADDR);
+ val = ((src->stride2 & MDP3_PPP_STRIDE_MASK) <<
+ MDP3_PPP_STRIDE1_OFFSET);
+ PPP_WRITEL(val, MDP3_PPP_SRC_YSTRIDE2_ADDR);
+
+ val = ppp_src_config(src->color_fmt);
+ val |= (src->roi.x % 2) ? PPP_SRC_BPP_ROI_ODD_X : 0;
+ val |= (src->roi.y % 2) ? PPP_SRC_BPP_ROI_ODD_Y : 0;
+ PPP_WRITEL(val, MDP3_PPP_SRC_FORMAT);
+ PPP_WRITEL(ppp_pack_pattern(src->color_fmt, yuv2rgb),
+ MDP3_PPP_SRC_UNPACK_PATTERN1);
+ return 0;
+}
+
+int config_ppp_out(struct ppp_img_desc *dst, uint32_t yuv2rgb)
+{
+ uint32_t val;
+ bool pseudoplanr_output = false;
+
+ switch (dst->color_fmt) {
+ case MDP_Y_CBCR_H2V2:
+ case MDP_Y_CRCB_H2V2:
+ case MDP_Y_CBCR_H2V1:
+ case MDP_Y_CRCB_H2V1:
+ pseudoplanr_output = true;
+ break;
+ default:
+ break;
+ }
+ val = ppp_out_config(dst->color_fmt);
+ if (pseudoplanr_output)
+ val |= PPP_DST_PLANE_PSEUDOPLN;
+ PPP_WRITEL(val, MDP3_PPP_OUT_FORMAT);
+ PPP_WRITEL(ppp_pack_pattern(dst->color_fmt, yuv2rgb),
+ MDP3_PPP_OUT_PACK_PATTERN1);
+
+ val = ((dst->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) |
+ (dst->roi.width & MDP3_PPP_XY_MASK);
+ PPP_WRITEL(val, MDP3_PPP_OUT_SIZE);
+
+ PPP_WRITEL(dst->p0, MDP3_PPP_OUTP0_ADDR);
+ PPP_WRITEL(dst->p1, MDP3_PPP_OUTP1_ADDR);
+ PPP_WRITEL(dst->p3, MDP3_PPP_OUTP3_ADDR);
+
+ val = (dst->stride0 & MDP3_PPP_STRIDE_MASK) |
+ ((dst->stride1 & MDP3_PPP_STRIDE_MASK) <<
+ MDP3_PPP_STRIDE1_OFFSET);
+ PPP_WRITEL(val, MDP3_PPP_OUT_YSTRIDE1_ADDR);
+ val = ((dst->stride2 & MDP3_PPP_STRIDE_MASK) <<
+ MDP3_PPP_STRIDE1_OFFSET);
+ PPP_WRITEL(val, MDP3_PPP_OUT_YSTRIDE2_ADDR);
+ return 0;
+}
+
+int config_ppp_background(struct ppp_img_desc *bg, uint32_t yuv2rgb)
+{
+ uint32_t val;
+
+ PPP_WRITEL(bg->p0, MDP3_PPP_BGP0_ADDR);
+ PPP_WRITEL(bg->p1, MDP3_PPP_BGP1_ADDR);
+ PPP_WRITEL(bg->p3, MDP3_PPP_BGP3_ADDR);
+
+ val = (bg->stride0 & MDP3_PPP_STRIDE_MASK) |
+ ((bg->stride1 & MDP3_PPP_STRIDE_MASK) <<
+ MDP3_PPP_STRIDE1_OFFSET);
+ PPP_WRITEL(val, MDP3_PPP_BG_YSTRIDE1_ADDR);
+ val = ((bg->stride2 & MDP3_PPP_STRIDE_MASK) <<
+ MDP3_PPP_STRIDE1_OFFSET);
+ PPP_WRITEL(val, MDP3_PPP_BG_YSTRIDE2_ADDR);
+
+ PPP_WRITEL(ppp_src_config(bg->color_fmt),
+ MDP3_PPP_BG_FORMAT);
+ PPP_WRITEL(ppp_pack_pattern(bg->color_fmt, yuv2rgb),
+ MDP3_PPP_BG_UNPACK_PATTERN1);
+ return 0;
+}
+
+void ppp_edge_rep_luma_pixel(struct ppp_blit_op *blit_op,
+ struct ppp_edge_rep *er)
+{
+ if (blit_op->mdp_op & MDPOP_ASCALE) {
+
+ er->is_scale_enabled = 1;
+
+ if (blit_op->mdp_op & MDPOP_ROT90) {
+ er->dst_roi_width = blit_op->dst.roi.height;
+ er->dst_roi_height = blit_op->dst.roi.width;
+ } else {
+ er->dst_roi_width = blit_op->dst.roi.width;
+ er->dst_roi_height = blit_op->dst.roi.height;
+ }
+
+ /*
+ * Find out the luma pixels needed for scaling in the
+ * x direction (LEFT and RIGHT). Locations of pixels are
+ * relative to the ROI. Upper-left corner of ROI corresponds
+ * to coordinates (0,0). Also set the number of luma pixel
+ * to repeat.
+ */
+ if (blit_op->src.roi.width > 3 * er->dst_roi_width) {
+ /* scale factor < 1/3 */
+ er->luma_interp_point_right =
+ (blit_op->src.roi.width - 1);
+ } else if (blit_op->src.roi.width == 3 * er->dst_roi_width) {
+ /* scale factor == 1/3 */
+ er->luma_interp_point_right =
+ (blit_op->src.roi.width - 1) + 1;
+ er->luma_repeat_right = 1;
+ } else if ((blit_op->src.roi.width > er->dst_roi_width) &&
+ (blit_op->src.roi.width < 3 * er->dst_roi_width)) {
+ /* 1/3 < scale factor < 1 */
+ er->luma_interp_point_left = -1;
+ er->luma_interp_point_right =
+ (blit_op->src.roi.width - 1) + 1;
+ er->luma_repeat_left = 1;
+ er->luma_repeat_right = 1;
+ } else if (blit_op->src.roi.width == er->dst_roi_width) {
+ /* scale factor == 1 */
+ er->luma_interp_point_left = -1;
+ er->luma_interp_point_right =
+ (blit_op->src.roi.width - 1) + 2;
+ er->luma_repeat_left = 1;
+ er->luma_repeat_right = 2;
+ } else {
+ /* scale factor > 1 */
+ er->luma_interp_point_left = -2;
+ er->luma_interp_point_right =
+ (blit_op->src.roi.width - 1) + 2;
+ er->luma_repeat_left = 2;
+ er->luma_repeat_right = 2;
+ }
+
+ /*
+ * Find out the number of pixels needed for scaling in the
+ * y direction (TOP and BOTTOM). Locations of pixels are
+ * relative to the ROI. Upper-left corner of ROI corresponds
+ * to coordinates (0,0). Also set the number of luma pixel
+ * to repeat.
+ */
+ if (blit_op->src.roi.height > 3 * er->dst_roi_height) {
+ er->luma_interp_point_bottom =
+ (blit_op->src.roi.height - 1);
+ } else if (blit_op->src.roi.height == 3 * er->dst_roi_height) {
+ er->luma_interp_point_bottom =
+ (blit_op->src.roi.height - 1) + 1;
+ er->luma_repeat_bottom = 1;
+ } else if ((blit_op->src.roi.height > er->dst_roi_height) &&
+ (blit_op->src.roi.height < 3 * er->dst_roi_height)) {
+ er->luma_interp_point_top = -1;
+ er->luma_interp_point_bottom =
+ (blit_op->src.roi.height - 1) + 1;
+ er->luma_repeat_top = 1;
+ er->luma_repeat_bottom = 1;
+ } else if (blit_op->src.roi.height == er->dst_roi_height) {
+ er->luma_interp_point_top = -1;
+ er->luma_interp_point_bottom =
+ (blit_op->src.roi.height - 1) + 2;
+ er->luma_repeat_top = 1;
+ er->luma_repeat_bottom = 2;
+ } else {
+ er->luma_interp_point_top = -2;
+ er->luma_interp_point_bottom =
+ (blit_op->src.roi.height - 1) + 2;
+ er->luma_repeat_top = 2;
+ er->luma_repeat_bottom = 2;
+ }
+ } else {
+ /*
+ * Since no scaling needed, Tile Fetch does not require any
+ * more luma pixel than what the ROI contains.
+ */
+ er->luma_interp_point_right =
+ (int32_t) (blit_op->src.roi.width - 1);
+ er->luma_interp_point_bottom =
+ (int32_t) (blit_op->src.roi.height - 1);
+ }
+ /* After adding the ROI offsets, we have locations of
+ * luma_interp_points relative to the image.
+ */
+ er->luma_interp_point_left += (int32_t) (blit_op->src.roi.x);
+ er->luma_interp_point_right += (int32_t) (blit_op->src.roi.x);
+ er->luma_interp_point_top += (int32_t) (blit_op->src.roi.y);
+ er->luma_interp_point_bottom += (int32_t) (blit_op->src.roi.y);
+}
+
+void ppp_edge_rep_chroma_pixel(struct ppp_blit_op *blit_op,
+ struct ppp_edge_rep *er)
+{
+ bool chroma_edge_enable = true;
+ uint32_t is_yuv_offsite_vertical = 0;
+
+ /* find out which chroma pixels are needed for chroma upsampling. */
+ switch (blit_op->src.color_fmt) {
+ case MDP_Y_CBCR_H2V1:
+ case MDP_Y_CRCB_H2V1:
+ case MDP_YCRYCB_H2V1:
+ er->chroma_interp_point_left = er->luma_interp_point_left >> 1;
+ er->chroma_interp_point_right =
+ (er->luma_interp_point_right + 1) >> 1;
+ er->chroma_interp_point_top = er->luma_interp_point_top;
+ er->chroma_interp_point_bottom = er->luma_interp_point_bottom;
+ break;
+
+ case MDP_Y_CBCR_H2V2:
+ case MDP_Y_CBCR_H2V2_ADRENO:
+ case MDP_Y_CBCR_H2V2_VENUS:
+ case MDP_Y_CRCB_H2V2:
+ er->chroma_interp_point_left = er->luma_interp_point_left >> 1;
+ er->chroma_interp_point_right =
+ (er->luma_interp_point_right + 1) >> 1;
+ er->chroma_interp_point_top =
+ (er->luma_interp_point_top - 1) >> 1;
+ er->chroma_interp_point_bottom =
+ (er->luma_interp_point_bottom + 1) >> 1;
+ is_yuv_offsite_vertical = 1;
+ break;
+
+ default:
+ chroma_edge_enable = false;
+ er->chroma_interp_point_left = er->luma_interp_point_left;
+ er->chroma_interp_point_right = er->luma_interp_point_right;
+ er->chroma_interp_point_top = er->luma_interp_point_top;
+ er->chroma_interp_point_bottom = er->luma_interp_point_bottom;
+
+ break;
+ }
+
+ if (chroma_edge_enable) {
+ /* Defines which chroma pixels belongs to the roi */
+ switch (blit_op->src.color_fmt) {
+ case MDP_Y_CBCR_H2V1:
+ case MDP_Y_CRCB_H2V1:
+ case MDP_YCRYCB_H2V1:
+ er->chroma_bound_left = blit_op->src.roi.x / 2;
+ /* there are half as many chroma pixel as luma pixels */
+ er->chroma_bound_right =
+ (blit_op->src.roi.width +
+ blit_op->src.roi.x - 1) / 2;
+ er->chroma_bound_top = blit_op->src.roi.y;
+ er->chroma_bound_bottom =
+ (blit_op->src.roi.height + blit_op->src.roi.y - 1);
+ break;
+ case MDP_Y_CBCR_H2V2:
+ case MDP_Y_CBCR_H2V2_ADRENO:
+ case MDP_Y_CBCR_H2V2_VENUS:
+ case MDP_Y_CRCB_H2V2:
+ /*
+ * cosite in horizontal dir, and offsite in vertical dir
+ * width of chroma ROI is 1/2 of size of luma ROI
+ * height of chroma ROI is 1/2 of size of luma ROI
+ */
+ er->chroma_bound_left = blit_op->src.roi.x / 2;
+ er->chroma_bound_right =
+ (blit_op->src.roi.width +
+ blit_op->src.roi.x - 1) / 2;
+ er->chroma_bound_top = blit_op->src.roi.y / 2;
+ er->chroma_bound_bottom =
+ (blit_op->src.roi.height +
+ blit_op->src.roi.y - 1) / 2;
+ break;
+
+ default:
+ /*
+ * If no valid chroma sub-sampling format specified,
+ * assume 4:4:4 ( i.e. fully sampled).
+ */
+ er->chroma_bound_left = blit_op->src.roi.x;
+ er->chroma_bound_right = blit_op->src.roi.width +
+ blit_op->src.roi.x - 1;
+ er->chroma_bound_top = blit_op->src.roi.y;
+ er->chroma_bound_bottom =
+ (blit_op->src.roi.height + blit_op->src.roi.y - 1);
+ break;
+ }
+
+ /*
+ * Knowing which chroma pixels are needed, and which chroma
+ * pixels belong to the ROI (i.e. available for fetching ),
+ * calculate how many chroma pixels Tile Fetch needs to
+ * duplicate. If any required chroma pixels falls outside
+ * of the ROI, Tile Fetch must obtain them by replicating
+ * pixels.
+ */
+ if (er->chroma_bound_left > er->chroma_interp_point_left)
+ er->chroma_repeat_left =
+ er->chroma_bound_left -
+ er->chroma_interp_point_left;
+ else
+ er->chroma_repeat_left = 0;
+
+ if (er->chroma_interp_point_right > er->chroma_bound_right)
+ er->chroma_repeat_right =
+ er->chroma_interp_point_right -
+ er->chroma_bound_right;
+ else
+ er->chroma_repeat_right = 0;
+
+ if (er->chroma_bound_top > er->chroma_interp_point_top)
+ er->chroma_repeat_top =
+ er->chroma_bound_top -
+ er->chroma_interp_point_top;
+ else
+ er->chroma_repeat_top = 0;
+
+ if (er->chroma_interp_point_bottom > er->chroma_bound_bottom)
+ er->chroma_repeat_bottom =
+ er->chroma_interp_point_bottom -
+ er->chroma_bound_bottom;
+ else
+ er->chroma_repeat_bottom = 0;
+
+ if (er->is_scale_enabled && (blit_op->src.roi.height == 1)
+ && is_yuv_offsite_vertical) {
+ er->chroma_repeat_bottom = 3;
+ er->chroma_repeat_top = 0;
+ }
+ }
+}
+
+int config_ppp_edge_rep(struct ppp_blit_op *blit_op)
+{
+ uint32_t reg = 0;
+ struct ppp_edge_rep er;
+
+ memset(&er, 0, sizeof(er));
+
+ ppp_edge_rep_luma_pixel(blit_op, &er);
+
+ /*
+ * After adding the ROI offsets, we have locations of
+ * chroma_interp_points relative to the image.
+ */
+ er.chroma_interp_point_left = er.luma_interp_point_left;
+ er.chroma_interp_point_right = er.luma_interp_point_right;
+ er.chroma_interp_point_top = er.luma_interp_point_top;
+ er.chroma_interp_point_bottom = er.luma_interp_point_bottom;
+
+ ppp_edge_rep_chroma_pixel(blit_op, &er);
+ /* ensure repeats are >=0 and no larger than 3 pixels */
+ if ((er.chroma_repeat_left < 0) || (er.chroma_repeat_right < 0) ||
+ (er.chroma_repeat_top < 0) || (er.chroma_repeat_bottom < 0))
+ return -EINVAL;
+ if ((er.chroma_repeat_left > 3) || (er.chroma_repeat_right > 3) ||
+ (er.chroma_repeat_top > 3) || (er.chroma_repeat_bottom > 3))
+ return -EINVAL;
+ if ((er.luma_repeat_left < 0) || (er.luma_repeat_right < 0) ||
+ (er.luma_repeat_top < 0) || (er.luma_repeat_bottom < 0))
+ return -EINVAL;
+ if ((er.luma_repeat_left > 3) || (er.luma_repeat_right > 3) ||
+ (er.luma_repeat_top > 3) || (er.luma_repeat_bottom > 3))
+ return -EINVAL;
+
+ reg |= (er.chroma_repeat_left & 3) << MDP_LEFT_CHROMA;
+ reg |= (er.chroma_repeat_right & 3) << MDP_RIGHT_CHROMA;
+ reg |= (er.chroma_repeat_top & 3) << MDP_TOP_CHROMA;
+ reg |= (er.chroma_repeat_bottom & 3) << MDP_BOTTOM_CHROMA;
+ reg |= (er.luma_repeat_left & 3) << MDP_LEFT_LUMA;
+ reg |= (er.luma_repeat_right & 3) << MDP_RIGHT_LUMA;
+ reg |= (er.luma_repeat_top & 3) << MDP_TOP_LUMA;
+ reg |= (er.luma_repeat_bottom & 3) << MDP_BOTTOM_LUMA;
+ PPP_WRITEL(reg, MDP3_PPP_SRC_EDGE_REP);
+ return 0;
+}
+
+int config_ppp_bg_edge_rep(struct ppp_blit_op *blit_op)
+{
+ uint32_t reg = 0;
+
+ switch (blit_op->dst.color_fmt) {
+ case MDP_Y_CBCR_H2V2:
+ case MDP_Y_CRCB_H2V2:
+ if (blit_op->dst.roi.y == 0)
+ reg |= BIT(MDP_TOP_CHROMA);
+
+ if ((blit_op->dst.roi.y + blit_op->dst.roi.height) ==
+ blit_op->dst.prop.height) {
+ reg |= BIT(MDP_BOTTOM_CHROMA);
+ }
+
+ if (((blit_op->dst.roi.x + blit_op->dst.roi.width) ==
+ blit_op->dst.prop.width) &&
+ ((blit_op->dst.roi.width % 2) == 0))
+ reg |= BIT(MDP_RIGHT_CHROMA);
+ break;
+ case MDP_Y_CBCR_H2V1:
+ case MDP_Y_CRCB_H2V1:
+ case MDP_YCRYCB_H2V1:
+ if (((blit_op->dst.roi.x + blit_op->dst.roi.width) ==
+ blit_op->dst.prop.width) &&
+ ((blit_op->dst.roi.width % 2) == 0))
+ reg |= BIT(MDP_RIGHT_CHROMA);
+ break;
+ default:
+ break;
+ }
+ PPP_WRITEL(reg, MDP3_PPP_BG_EDGE_REP);
+ return 0;
+}
+
+int config_ppp_lut(uint32_t *pppop_reg_ptr, int lut_c0_en,
+ int lut_c1_en, int lut_c2_en)
+{
+ if (lut_c0_en)
+ *pppop_reg_ptr |= MDP_LUT_C0_EN;
+ if (lut_c1_en)
+ *pppop_reg_ptr |= MDP_LUT_C1_EN;
+ if (lut_c2_en)
+ *pppop_reg_ptr |= MDP_LUT_C2_EN;
+ return 0;
+}
+
+int config_ppp_scale(struct ppp_blit_op *blit_op, uint32_t *pppop_reg_ptr)
+{
+ struct ppp_img_desc *src = &blit_op->src;
+ struct ppp_img_desc *dst = &blit_op->dst;
+ uint32_t dstW, dstH;
+ uint32_t x_fac, y_fac;
+ uint32_t mdp_blur = 0;
+ uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y;
+ int x_idx, y_idx;
+
+ if (blit_op->mdp_op & MDPOP_ASCALE) {
+ if (blit_op->mdp_op & MDPOP_ROT90) {
+ dstW = dst->roi.height;
+ dstH = dst->roi.width;
+ } else {
+ dstW = dst->roi.width;
+ dstH = dst->roi.height;
+ }
+ *pppop_reg_ptr |=
+ (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
+
+ mdp_blur = blit_op->mdp_op & MDPOP_BLUR;
+
+ if ((dstW != src->roi.width) ||
+ (dstH != src->roi.height) || mdp_blur) {
+
+ /*
+ * Use source origin as 0 for computing initial
+ * phase and step size. Incorrect initial phase and
+ * step size value results in green line issue.
+ */
+ mdp_calc_scale_params(0,
+ blit_op->src.roi.width,
+ dstW, 1, &phase_init_x,
+ &phase_step_x);
+ mdp_calc_scale_params(0,
+ blit_op->src.roi.height,
+ dstH, 0, &phase_init_y,
+ &phase_step_y);
+
+ PPP_WRITEL(phase_init_x, MDP3_PPP_SCALE_PHASEX_INIT);
+ PPP_WRITEL(phase_init_y, MDP3_PPP_SCALE_PHASEY_INIT);
+ PPP_WRITEL(phase_step_x, MDP3_PPP_SCALE_PHASEX_STEP);
+ PPP_WRITEL(phase_step_y, MDP3_PPP_SCALE_PHASEY_STEP);
+
+
+ if (dstW > src->roi.width || dstH > src->roi.height)
+ ppp_load_up_lut();
+
+ if (mdp_blur)
+ ppp_load_gaussian_lut();
+
+ if (dstW <= src->roi.width) {
+ x_fac = (dstW * 100) / src->roi.width;
+ x_idx = scale_idx(x_fac);
+ ppp_load_x_scale_table(x_idx);
+ }
+ if (dstH <= src->roi.height) {
+ y_fac = (dstH * 100) / src->roi.height;
+ y_idx = scale_idx(y_fac);
+ ppp_load_y_scale_table(y_idx);
+ }
+
+ } else {
+ blit_op->mdp_op &= ~(MDPOP_ASCALE);
+ }
+ }
+ config_ppp_edge_rep(blit_op);
+ config_ppp_bg_edge_rep(blit_op);
+ return 0;
+}
+
+int config_ppp_csc(int src_color, int dst_color, uint32_t *pppop_reg_ptr)
+{
+ bool inputRGB, outputRGB;
+
+ inputRGB = check_if_rgb(src_color);
+ outputRGB = check_if_rgb(dst_color);
+
+ if ((!inputRGB) && (outputRGB))
+ *pppop_reg_ptr |= PPP_OP_CONVERT_YCBCR2RGB |
+ PPP_OP_CONVERT_ON;
+ if ((inputRGB) && (!outputRGB))
+ *pppop_reg_ptr |= PPP_OP_CONVERT_ON;
+
+ return 0;
+}
+
+int config_ppp_blend(struct ppp_blit_op *blit_op,
+ uint32_t *pppop_reg_ptr,
+ bool is_yuv_smart_blit, int smart_blit_bg_alpha)
+{
+ struct ppp_csc_table *csc;
+ uint32_t alpha, trans_color;
+ uint32_t val = 0;
+ int c_fmt = blit_op->src.color_fmt;
+ int bg_alpha;
+
+ csc = ppp_csc_rgb2yuv();
+ alpha = blit_op->blend.const_alpha;
+ trans_color = blit_op->blend.trans_color;
+ if (blit_op->mdp_op & MDPOP_FG_PM_ALPHA) {
+ if (ppp_per_p_alpha(c_fmt)) {
+ *pppop_reg_ptr |= PPP_OP_ROT_ON |
+ PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_CONSTANT_ALPHA;
+ } else {
+ if ((blit_op->mdp_op & MDPOP_ALPHAB)
+ && (blit_op->blend.const_alpha == 0xff)) {
+ blit_op->mdp_op &= ~(MDPOP_ALPHAB);
+ }
+
+ if ((blit_op->mdp_op & MDPOP_ALPHAB)
+ || (blit_op->mdp_op & MDPOP_TRANSP)) {
+
+ *pppop_reg_ptr |= PPP_OP_ROT_ON |
+ PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_CONSTANT_ALPHA |
+ PPP_OP_BLEND_ALPHA_BLEND_NORMAL;
+ }
+ }
+
+ bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
+ PPP_BLEND_BG_ALPHA_REVERSE;
+
+ if ((ppp_per_p_alpha(c_fmt)) && !(blit_op->mdp_op &
+ MDPOP_LAYER_IS_FG)) {
+ bg_alpha |= PPP_BLEND_BG_SRCPIXEL_ALPHA;
+ } else {
+ bg_alpha |= PPP_BLEND_BG_CONSTANT_ALPHA;
+ bg_alpha |= blit_op->blend.const_alpha << 24;
+ }
+ PPP_WRITEL(bg_alpha, MDP3_PPP_BLEND_BG_ALPHA_SEL);
+
+ if (blit_op->mdp_op & MDPOP_TRANSP)
+ *pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP;
+ } else if (ppp_per_p_alpha(c_fmt)) {
+ if (blit_op->mdp_op & MDPOP_LAYER_IS_FG)
+ *pppop_reg_ptr |= PPP_OP_ROT_ON |
+ PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_CONSTANT_ALPHA;
+ else
+ *pppop_reg_ptr |= PPP_OP_ROT_ON |
+ PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_SRCPIXEL_ALPHA;
+ PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL);
+ } else {
+ if ((blit_op->mdp_op & MDPOP_ALPHAB)
+ && (blit_op->blend.const_alpha == 0xff)) {
+ blit_op->mdp_op &=
+ ~(MDPOP_ALPHAB);
+ }
+
+ if ((blit_op->mdp_op & MDPOP_ALPHAB)
+ || (blit_op->mdp_op & MDPOP_TRANSP)) {
+ *pppop_reg_ptr |= PPP_OP_ROT_ON |
+ PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_CONSTANT_ALPHA |
+ PPP_OP_BLEND_ALPHA_BLEND_NORMAL;
+ }
+
+ if (blit_op->mdp_op & MDPOP_TRANSP)
+ *pppop_reg_ptr |=
+ PPP_BLEND_CALPHA_TRNASP;
+ if (is_yuv_smart_blit) {
+ *pppop_reg_ptr |= PPP_OP_ROT_ON |
+ PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_BG_ALPHA |
+ PPP_OP_BLEND_EQ_REVERSE;
+
+ if (smart_blit_bg_alpha < 0xFF)
+ bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
+ PPP_BLEND_BG_DSTPIXEL_ALPHA;
+ else
+ bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
+ PPP_BLEND_BG_DSTPIXEL_ALPHA |
+ PPP_BLEND_BG_CONSTANT_ALPHA;
+
+ bg_alpha |= smart_blit_bg_alpha << 24;
+ PPP_WRITEL(bg_alpha, MDP3_PPP_BLEND_BG_ALPHA_SEL);
+ } else {
+ PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL);
+ }
+ }
+
+ if (*pppop_reg_ptr & PPP_OP_BLEND_ON) {
+ if (is_yuv_smart_blit)
+ config_ppp_background(&blit_op->bg, 1);
+ else
+ config_ppp_background(&blit_op->bg, 0);
+
+ if (blit_op->dst.color_fmt == MDP_YCRYCB_H2V1) {
+ *pppop_reg_ptr |= PPP_OP_BG_CHROMA_H2V1;
+ if (blit_op->mdp_op & MDPOP_TRANSP) {
+ trans_color = conv_rgb2yuv(trans_color,
+ &csc->fwd_matrix[0],
+ &csc->bv[0],
+ &csc->lv[0]);
+ }
+ }
+ }
+ if (is_yuv_smart_blit) {
+ PPP_WRITEL(0, MDP3_PPP_BLEND_PARAM);
+ } else {
+ val = (alpha << MDP_BLEND_CONST_ALPHA);
+ val |= (trans_color & MDP_BLEND_TRASP_COL_MASK);
+ PPP_WRITEL(val, MDP3_PPP_BLEND_PARAM);
+ }
+ return 0;
+}
+
+int config_ppp_rotation(uint32_t mdp_op, uint32_t *pppop_reg_ptr)
+{
+ *pppop_reg_ptr |= PPP_OP_ROT_ON;
+
+ if (mdp_op & MDPOP_ROT90)
+ *pppop_reg_ptr |= PPP_OP_ROT_90;
+ if (mdp_op & MDPOP_LR)
+ *pppop_reg_ptr |= PPP_OP_FLIP_LR;
+ if (mdp_op & MDPOP_UD)
+ *pppop_reg_ptr |= PPP_OP_FLIP_UD;
+
+ return 0;
+}
+
+int config_ppp_op_mode(struct ppp_blit_op *blit_op)
+{
+ uint32_t yuv2rgb;
+ uint32_t ppp_operation_reg = 0;
+ int sv_slice, sh_slice;
+ int dv_slice, dh_slice;
+ static struct ppp_img_desc bg_img_param;
+ static int bg_alpha;
+ static int bg_mdp_ops;
+ bool is_yuv_smart_blit = false;
+
+ /*
+ * Detect YUV smart blit,
+ * Check cached BG image plane 0 address is not NILL and
+ * source color format is YUV than it is YUV smart blit
+ * mark is_yuv_smart_blit true.
+ */
+ if ((bg_img_param.p0) &&
+ (!(check_if_rgb(blit_op->src.color_fmt))))
+ is_yuv_smart_blit = true;
+
+ sv_slice = sh_slice = dv_slice = dh_slice = 1;
+
+ ppp_operation_reg |= ppp_dst_op_reg(blit_op->dst.color_fmt);
+ switch (blit_op->dst.color_fmt) {
+ case MDP_Y_CBCR_H2V2:
+ case MDP_Y_CRCB_H2V2:
+ y_h_even_num(&blit_op->dst);
+ y_h_even_num(&blit_op->src);
+ dv_slice = 2;
+ /* fall-through */
+ case MDP_Y_CBCR_H2V1:
+ case MDP_Y_CRCB_H2V1:
+ case MDP_YCRYCB_H2V1:
+ x_w_even_num(&blit_op->dst);
+ x_w_even_num(&blit_op->src);
+ dh_slice = 2;
+ break;
+ default:
+ break;
+ }
+
+ ppp_operation_reg |= ppp_src_op_reg(blit_op->src.color_fmt);
+ switch (blit_op->src.color_fmt) {
+ case MDP_Y_CBCR_H2V2:
+ case MDP_Y_CBCR_H2V2_ADRENO:
+ case MDP_Y_CBCR_H2V2_VENUS:
+ case MDP_Y_CRCB_H2V2:
+ sh_slice = sv_slice = 2;
+ break;
+ case MDP_YCRYCB_H2V1:
+ x_w_even_num(&blit_op->dst);
+ x_w_even_num(&blit_op->src);
+ /* fall-through */
+ case MDP_Y_CBCR_H2V1:
+ case MDP_Y_CRCB_H2V1:
+ sh_slice = 2;
+ break;
+ default:
+ break;
+ }
+
+ config_ppp_csc(blit_op->src.color_fmt,
+ blit_op->dst.color_fmt, &ppp_operation_reg);
+ yuv2rgb = ppp_operation_reg & PPP_OP_CONVERT_YCBCR2RGB;
+
+ if (blit_op->mdp_op & MDPOP_DITHER)
+ ppp_operation_reg |= PPP_OP_DITHER_EN;
+
+ if (blit_op->mdp_op & MDPOP_ROTATION)
+ config_ppp_rotation(blit_op->mdp_op, &ppp_operation_reg);
+
+ if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO) {
+ blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 32) *
+ ppp_bpp(blit_op->src.color_fmt);
+ blit_op->src.stride1 = 2 * ALIGN(blit_op->src.prop.width/2, 32);
+ } else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS) {
+ blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 128) *
+ ppp_bpp(blit_op->src.color_fmt);
+ blit_op->src.stride1 = blit_op->src.stride0;
+ } else {
+ blit_op->src.stride0 = blit_op->src.prop.width *
+ ppp_bpp(blit_op->src.color_fmt);
+ blit_op->src.stride1 = blit_op->src.stride0;
+ }
+
+ blit_op->dst.stride0 = blit_op->dst.prop.width *
+ ppp_bpp(blit_op->dst.color_fmt);
+
+ if (ppp_multi_plane(blit_op->dst.color_fmt)) {
+ blit_op->dst.p1 = blit_op->dst.p0;
+ blit_op->dst.p1 += blit_op->dst.prop.width *
+ blit_op->dst.prop.height *
+ ppp_bpp(blit_op->dst.color_fmt);
+ } else {
+ blit_op->dst.p1 = NULL;
+ }
+
+ if ((bg_img_param.p0) && (!(blit_op->mdp_op & MDPOP_SMART_BLIT))) {
+ /*
+ * Use cached smart blit BG layer info in
+ * smart Blit FG request
+ */
+ blit_op->bg = bg_img_param;
+ if (check_if_rgb(blit_op->bg.color_fmt)) {
+ blit_op->bg.p1 = 0;
+ blit_op->bg.stride1 = 0;
+ }
+ memset(&bg_img_param, 0, sizeof(bg_img_param));
+ } else {
+ blit_op->bg = blit_op->dst;
+ }
+ /* Cache smart blit BG layer info */
+ if (blit_op->mdp_op & MDPOP_SMART_BLIT)
+ bg_img_param = blit_op->src;
+
+ /* Jumping from Y-Plane to Chroma Plane */
+ /* first pixel addr calculation */
+ mdp_adjust_start_addr(blit_op, &blit_op->src, sv_slice,
+ sh_slice, LAYER_FG);
+ mdp_adjust_start_addr(blit_op, &blit_op->bg, dv_slice,
+ dh_slice, LAYER_BG);
+ mdp_adjust_start_addr(blit_op, &blit_op->dst, dv_slice,
+ dh_slice, LAYER_FB);
+
+ config_ppp_scale(blit_op, &ppp_operation_reg);
+
+ config_ppp_blend(blit_op, &ppp_operation_reg, is_yuv_smart_blit,
+ bg_alpha);
+
+ config_ppp_src(&blit_op->src, yuv2rgb);
+ config_ppp_out(&blit_op->dst, yuv2rgb);
+
+ /* Cache Smart blit BG alpha adn MDP OP values */
+ if (blit_op->mdp_op & MDPOP_SMART_BLIT) {
+ bg_alpha = blit_op->blend.const_alpha;
+ bg_mdp_ops = blit_op->mdp_op;
+ } else {
+ bg_alpha = 0;
+ bg_mdp_ops = 0;
+ }
+ pr_debug("BLIT FG Param Fmt %d (x %d,y %d,w %d,h %d), ",
+ blit_op->src.color_fmt, blit_op->src.prop.x,
+ blit_op->src.prop.y, blit_op->src.prop.width,
+ blit_op->src.prop.height);
+ pr_debug("ROI(x %d,y %d,w %d, h %d) ",
+ blit_op->src.roi.x, blit_op->src.roi.y,
+ blit_op->src.roi.width, blit_op->src.roi.height);
+ pr_debug("Addr_P0 %pK, Stride S0 %d Addr_P1 %pK, Stride S1 %d\n",
+ blit_op->src.p0, blit_op->src.stride0,
+ blit_op->src.p1, blit_op->src.stride1);
+
+ if (blit_op->bg.p0 != blit_op->dst.p0) {
+ pr_debug("BLIT BG Param Fmt %d (x %d,y %d,w %d,h %d), ",
+ blit_op->bg.color_fmt, blit_op->bg.prop.x,
+ blit_op->bg.prop.y, blit_op->bg.prop.width,
+ blit_op->bg.prop.height);
+ pr_debug("ROI(x %d,y %d, w %d, h %d) ",
+ blit_op->bg.roi.x, blit_op->bg.roi.y,
+ blit_op->bg.roi.width, blit_op->bg.roi.height);
+ pr_debug("Addr %pK, Stride S0 %d Addr_P1 %pK, Stride S1 %d\n",
+ blit_op->bg.p0, blit_op->bg.stride0,
+ blit_op->bg.p1, blit_op->bg.stride1);
+ }
+ pr_debug("BLIT FB Param Fmt %d (x %d,y %d,w %d,h %d), ",
+ blit_op->dst.color_fmt, blit_op->dst.prop.x,
+ blit_op->dst.prop.y, blit_op->dst.prop.width,
+ blit_op->dst.prop.height);
+ pr_debug("ROI(x %d,y %d, w %d, h %d) ",
+ blit_op->dst.roi.x, blit_op->dst.roi.y,
+ blit_op->dst.roi.width, blit_op->dst.roi.height);
+ pr_debug("Addr %p, Stride S0 %d Addr_P1 %p, Stride S1 %d\n",
+ blit_op->dst.p0, blit_op->dst.stride0,
+ blit_op->dst.p1, blit_op->dst.stride1);
+
+ PPP_WRITEL(ppp_operation_reg, MDP3_PPP_OP_MODE);
+ mb(); /* make sure everything is written before enable */
+ MDSS_XLOG(ppp_operation_reg, blit_op->src.roi.x, blit_op->src.roi.y,
+ blit_op->src.roi.width, blit_op->src.roi.height);
+ MDSS_XLOG(blit_op->dst.roi.x, blit_op->dst.roi.y,
+ blit_op->dst.roi.width, blit_op->dst.roi.height);
+ return 0;
+}
+
+void ppp_enable(void)
+{
+ PPP_WRITEL(0x1000, 0x30);
+ mb(); /* make sure everything is written before enable */
+}
+
+int mdp3_ppp_init(void)
+{
+ load_ppp_lut(LUT_PRE_TABLE, ppp_default_pre_lut());
+ load_ppp_lut(LUT_POST_TABLE, ppp_default_post_lut());
+ load_csc_matrix(CSC_PRIMARY_MATRIX, ppp_csc_rgb2yuv());
+ load_csc_matrix(CSC_SECONDARY_MATRIX, ppp_csc_table2());
+ return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
new file mode 100644
index 0000000..17bad06
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -0,0 +1,603 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_H
+#define MDSS_H
+
+#include <linux/msm_ion.h>
+#include <linux/msm_mdp.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/irqreturn.h>
+#include <linux/irqdomain.h>
+#include <linux/mdss_io_util.h>
+
+#include <linux/msm-bus.h>
+#include <linux/file.h>
+#include <linux/dma-direction.h>
+
+#include "mdss_panel.h"
+
+#define MAX_DRV_SUP_MMB_BLKS 44
+#define MAX_DRV_SUP_PIPES 10
+#define MAX_CLIENT_NAME_LEN 20
+
+#define MDSS_PINCTRL_STATE_DEFAULT "mdss_default"
+#define MDSS_PINCTRL_STATE_SLEEP "mdss_sleep"
+
+enum mdss_mdp_clk_type {
+ MDSS_CLK_AHB,
+ MDSS_CLK_AXI,
+ MDSS_CLK_MDP_CORE,
+ MDSS_CLK_MDP_LUT,
+ MDSS_CLK_MDP_VSYNC,
+ MDSS_MAX_CLK
+};
+
+enum mdss_iommu_domain_type {
+ MDSS_IOMMU_DOMAIN_UNSECURE,
+ MDSS_IOMMU_DOMAIN_ROT_UNSECURE,
+ MDSS_IOMMU_DOMAIN_SECURE,
+ MDSS_IOMMU_DOMAIN_ROT_SECURE,
+ MDSS_IOMMU_MAX_DOMAIN
+};
+
+enum mdss_bus_vote_type {
+ VOTE_INDEX_DISABLE,
+ VOTE_INDEX_LOW,
+ VOTE_INDEX_MID,
+ VOTE_INDEX_HIGH,
+ VOTE_INDEX_MAX,
+};
+
+struct mdss_hw_settings {
+ char __iomem *reg;
+ u32 val;
+};
+
+struct mdss_max_bw_settings {
+ u32 mdss_max_bw_mode;
+ u32 mdss_max_bw_val;
+};
+
+struct mdss_debug_inf {
+ void *debug_data;
+ void (*debug_enable_clock)(int on);
+};
+
+struct mdss_perf_tune {
+ unsigned long min_mdp_clk;
+ u64 min_bus_vote;
+};
+
+#define MDSS_IRQ_SUSPEND -1
+#define MDSS_IRQ_RESUME 1
+#define MDSS_IRQ_REQ 0
+
+struct mdss_intr {
+ /* requested intr */
+ u32 req;
+ /* currently enabled intr */
+ u32 curr;
+ int state;
+ spinlock_t lock;
+};
+
+struct simplified_prefill_factors {
+ u32 fmt_mt_nv12_factor;
+ u32 fmt_mt_factor;
+ u32 fmt_linear_factor;
+ u32 scale_factor;
+ u32 xtra_ff_factor;
+};
+
+struct mdss_prefill_data {
+ u32 ot_bytes;
+ u32 y_buf_bytes;
+ u32 y_scaler_lines_bilinear;
+ u32 y_scaler_lines_caf;
+ u32 post_scaler_pixels;
+ u32 pp_pixels;
+ u32 fbc_lines;
+ u32 ts_threshold;
+ u32 ts_end;
+ u32 ts_overhead;
+ struct mult_factor ts_rate;
+ struct simplified_prefill_factors prefill_factors;
+};
+
+struct mdss_mdp_dsc {
+ u32 num;
+ char __iomem *base;
+};
+
+enum mdss_hw_index {
+ MDSS_HW_MDP,
+ MDSS_HW_DSI0 = 1,
+ MDSS_HW_DSI1,
+ MDSS_HW_HDMI,
+ MDSS_HW_EDP,
+ MDSS_HW_MISC,
+ MDSS_MAX_HW_BLK
+};
+
+enum mdss_bus_clients {
+ MDSS_MDP_RT,
+ MDSS_DSI_RT,
+ MDSS_HW_RT,
+ MDSS_MDP_NRT,
+ MDSS_MAX_BUS_CLIENTS
+};
+
+struct mdss_pp_block_off {
+ u32 sspp_igc_lut_off;
+ u32 vig_pcc_off;
+ u32 rgb_pcc_off;
+ u32 dma_pcc_off;
+ u32 lm_pgc_off;
+ u32 dspp_gamut_off;
+ u32 dspp_pcc_off;
+ u32 dspp_pgc_off;
+};
+
+enum mdss_hw_quirk {
+ MDSS_QUIRK_BWCPANIC,
+ MDSS_QUIRK_ROTCDP,
+ MDSS_QUIRK_DOWNSCALE_HANG,
+ MDSS_QUIRK_DSC_RIGHT_ONLY_PU,
+ MDSS_QUIRK_DSC_2SLICE_PU_THRPUT,
+ MDSS_QUIRK_DMA_BI_DIR,
+ MDSS_QUIRK_FMT_PACK_PATTERN,
+ MDSS_QUIRK_NEED_SECURE_MAP,
+ MDSS_QUIRK_SRC_SPLIT_ALWAYS,
+ MDSS_QUIRK_HDR_SUPPORT_ENABLED,
+ MDSS_QUIRK_MAX,
+};
+
+enum mdss_hw_capabilities {
+ MDSS_CAPS_YUV_CONFIG,
+ MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
+ MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
+ MDSS_CAPS_MIXER_1_FOR_WB,
+ MDSS_CAPS_QSEED3,
+ MDSS_CAPS_DEST_SCALER,
+ MDSS_CAPS_10_BIT_SUPPORTED,
+ MDSS_CAPS_MAX,
+};
+
+enum mdss_qos_settings {
+ MDSS_QOS_PER_PIPE_IB,
+ MDSS_QOS_OVERHEAD_FACTOR,
+ MDSS_QOS_CDP,
+ MDSS_QOS_OTLIM,
+ MDSS_QOS_PER_PIPE_LUT,
+ MDSS_QOS_SIMPLIFIED_PREFILL,
+ MDSS_QOS_VBLANK_PANIC_CTRL,
+ MDSS_QOS_TS_PREFILL,
+ MDSS_QOS_REMAPPER,
+ MDSS_QOS_IB_NOCR,
+ MDSS_QOS_MAX,
+};
+
+enum mdss_mdp_pipe_type {
+ MDSS_MDP_PIPE_TYPE_INVALID = -1,
+ MDSS_MDP_PIPE_TYPE_VIG = 0,
+ MDSS_MDP_PIPE_TYPE_RGB,
+ MDSS_MDP_PIPE_TYPE_DMA,
+ MDSS_MDP_PIPE_TYPE_CURSOR,
+ MDSS_MDP_PIPE_TYPE_MAX,
+};
+
+struct reg_bus_client {
+ char name[MAX_CLIENT_NAME_LEN];
+ short usecase_ndx;
+ u32 id;
+ struct list_head list;
+};
+
+struct mdss_smmu_client {
+ struct device *dev;
+ struct dma_iommu_mapping *mmu_mapping;
+ struct mdss_module_power mp;
+ struct reg_bus_client *reg_bus_clt;
+ bool domain_attached;
+ bool handoff_pending;
+ char __iomem *mmu_base;
+};
+
+struct mdss_mdp_qseed3_lut_tbl {
+ bool valid;
+ u32 *dir_lut;
+ u32 *cir_lut;
+ u32 *sep_lut;
+};
+
+struct mdss_scaler_block {
+ u32 vig_scaler_off;
+ u32 vig_scaler_lut_off;
+ u32 has_dest_scaler;
+ char __iomem *dest_base;
+ u32 ndest_scalers;
+ u32 *dest_scaler_off;
+ u32 *dest_scaler_lut_off;
+ struct mdss_mdp_qseed3_lut_tbl lut_tbl;
+};
+
+struct mdss_data_type;
+
+struct mdss_smmu_ops {
+ int (*smmu_attach)(struct mdss_data_type *mdata);
+ int (*smmu_detach)(struct mdss_data_type *mdata);
+ int (*smmu_get_domain_id)(u32 type);
+ struct dma_buf_attachment * (*smmu_dma_buf_attach)(
+ struct dma_buf *dma_buf, struct device *devce,
+ int domain);
+ int (*smmu_map_dma_buf)(struct dma_buf *dma_buf,
+ struct sg_table *table, int domain,
+ dma_addr_t *iova, unsigned long *size, int dir);
+ void (*smmu_unmap_dma_buf)(struct sg_table *table, int domain,
+ int dir, struct dma_buf *dma_buf);
+ int (*smmu_dma_alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *phys, dma_addr_t *iova, void **cpu_addr,
+ gfp_t gfp, int domain);
+ void (*smmu_dma_free_coherent)(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t phys, dma_addr_t iova,
+ int domain);
+ int (*smmu_map)(int domain, phys_addr_t iova, phys_addr_t phys, int
+ gfp_order, int prot);
+ void (*smmu_unmap)(int domain, unsigned long iova, int gfp_order);
+ char * (*smmu_dsi_alloc_buf)(struct device *dev, int size,
+ dma_addr_t *dmap, gfp_t gfp);
+ int (*smmu_dsi_map_buffer)(phys_addr_t phys, unsigned int domain,
+ unsigned long size, dma_addr_t *dma_addr,
+ void *cpu_addr, int dir);
+ void (*smmu_dsi_unmap_buffer)(dma_addr_t dma_addr, int domain,
+ unsigned long size, int dir);
+ void (*smmu_deinit)(struct mdss_data_type *mdata);
+ struct sg_table * (*smmu_sg_table_clone)(struct sg_table *orig_table,
+ gfp_t gfp_mask, bool padding);
+};
+
+struct mdss_data_type {
+ u32 mdp_rev;
+ struct clk *mdp_clk[MDSS_MAX_CLK];
+ struct regulator *fs;
+ struct regulator *venus;
+ struct regulator *vdd_cx;
+ bool batfet_required;
+ struct regulator *batfet;
+ bool en_svs_high;
+ u32 max_mdp_clk_rate;
+ struct mdss_util_intf *mdss_util;
+ struct mdss_panel_data *pdata;
+
+ struct platform_device *pdev;
+ struct mdss_io_data mdss_io;
+ struct mdss_io_data vbif_io;
+ struct mdss_io_data vbif_nrt_io;
+ char __iomem *mdp_base;
+
+ struct mdss_smmu_client mdss_smmu[MDSS_IOMMU_MAX_DOMAIN];
+ struct mdss_smmu_ops smmu_ops;
+ struct mutex reg_lock;
+
+ /* bitmap to track pipes that have BWC enabled */
+ DECLARE_BITMAP(bwc_enable_map, MAX_DRV_SUP_PIPES);
+ /* bitmap to track hw workarounds */
+ DECLARE_BITMAP(mdss_quirk_map, MDSS_QUIRK_MAX);
+ /* bitmap to track total mmbs in use */
+ DECLARE_BITMAP(mmb_alloc_map, MAX_DRV_SUP_MMB_BLKS);
+ /* bitmap to track qos applicable settings */
+ DECLARE_BITMAP(mdss_qos_map, MDSS_QOS_MAX);
+ /* bitmap to track hw capabilities/features */
+ DECLARE_BITMAP(mdss_caps_map, MDSS_CAPS_MAX);
+
+ u32 has_bwc;
+ /* values used when HW has a common panic/robust LUT */
+ u32 default_panic_lut0;
+ u32 default_panic_lut1;
+ u32 default_robust_lut;
+
+ /* values used when HW has panic/robust LUTs per pipe */
+ u32 default_panic_lut_per_pipe_linear;
+ u32 default_panic_lut_per_pipe_tile;
+ u32 default_robust_lut_per_pipe_linear;
+ u32 default_robust_lut_per_pipe_tile;
+
+ u32 has_decimation;
+ bool has_fixed_qos_arbiter_enabled;
+ bool has_panic_ctrl;
+ u32 wfd_mode;
+ u32 has_no_lut_read;
+ atomic_t sd_client_count;
+ u8 has_wb_ad;
+ u8 has_non_scalar_rgb;
+ bool has_src_split;
+ bool idle_pc_enabled;
+ bool has_pingpong_split;
+ bool has_pixel_ram;
+ bool needs_hist_vote;
+ bool has_ubwc;
+ bool has_wb_ubwc;
+ bool has_separate_rotator;
+
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+
+ struct irq_domain *irq_domain;
+ u32 *mdp_irq_mask;
+ u32 mdp_hist_irq_mask;
+ u32 mdp_intf_irq_mask;
+
+ int suspend_fs_ena;
+ u8 clk_ena;
+ u8 fs_ena;
+ u8 vsync_ena;
+
+ struct notifier_block gdsc_cb;
+
+ u32 res_init;
+
+ u32 highest_bank_bit;
+ u32 smp_mb_cnt;
+ u32 smp_mb_size;
+ u32 smp_mb_per_pipe;
+ u32 pixel_ram_size;
+
+ u32 rot_block_size;
+
+ /* HW RT bus (AXI) */
+ u32 hw_rt_bus_hdl;
+ u32 hw_rt_bus_ref_cnt;
+
+ /* data bus (AXI) */
+ u32 bus_hdl;
+ u32 bus_ref_cnt;
+ struct mutex bus_lock;
+
+ /* register bus (AHB) */
+ u32 reg_bus_hdl;
+ u32 reg_bus_usecase_ndx;
+ struct list_head reg_bus_clist;
+ struct mutex reg_bus_lock;
+ struct reg_bus_client *reg_bus_clt;
+ struct reg_bus_client *pp_reg_bus_clt;
+
+ u32 axi_port_cnt;
+ u32 nrt_axi_port_cnt;
+ u32 bus_channels;
+ u32 curr_bw_uc_idx;
+ u32 ao_bw_uc_idx; /* active only idx */
+ struct msm_bus_scale_pdata *bus_scale_table;
+ struct msm_bus_scale_pdata *reg_bus_scale_table;
+ struct msm_bus_scale_pdata *hw_rt_bus_scale_table;
+ u32 max_bw_low;
+ u32 max_bw_high;
+ u32 max_bw_per_pipe;
+ u32 *vbif_rt_qos;
+ u32 *vbif_nrt_qos;
+ u32 npriority_lvl;
+ u32 rot_dwnscale_min;
+ u32 rot_dwnscale_max;
+
+ struct mult_factor ab_factor;
+ struct mult_factor ib_factor;
+ struct mult_factor ib_factor_overlap;
+ struct mult_factor clk_factor;
+ struct mult_factor per_pipe_ib_factor;
+ bool apply_post_scale_bytes;
+ bool hflip_buffer_reused;
+
+ u32 disable_prefill;
+ u32 *clock_levels;
+ u32 nclk_lvl;
+
+ u32 enable_gate;
+ u32 enable_bw_release;
+ u32 enable_rotator_bw_release;
+ u32 serialize_wait4pp;
+ u32 wait4autorefresh;
+ u32 lines_before_active;
+
+ struct mdss_hw_settings *hw_settings;
+
+ int rects_per_sspp[MDSS_MDP_PIPE_TYPE_MAX];
+ struct mdss_mdp_pipe *vig_pipes;
+ struct mdss_mdp_pipe *rgb_pipes;
+ struct mdss_mdp_pipe *dma_pipes;
+ struct mdss_mdp_pipe *cursor_pipes;
+ u32 nvig_pipes;
+ u32 nrgb_pipes;
+ u32 ndma_pipes;
+ u32 max_target_zorder;
+ u8 ncursor_pipes;
+ u32 max_cursor_size;
+
+ u32 nppb_ctl;
+ u32 *ppb_ctl;
+ u32 nppb_cfg;
+ u32 *ppb_cfg;
+ char __iomem *slave_pingpong_base;
+
+ struct mdss_mdp_mixer *mixer_intf;
+ struct mdss_mdp_mixer *mixer_wb;
+ u32 nmixers_intf;
+ u32 nmixers_wb;
+ u32 max_mixer_width;
+ u32 max_pipe_width;
+
+ struct mdss_mdp_writeback *wb;
+ u32 nwb;
+ u32 *wb_offsets;
+ u32 nwb_offsets;
+ struct mutex wb_lock;
+
+ struct mdss_mdp_ctl *ctl_off;
+ u32 nctl;
+ u32 ndspp;
+
+ struct mdss_mdp_dp_intf *dp_off;
+ u32 ndp;
+ void *video_intf;
+ u32 nintf;
+
+ struct mdss_mdp_ad *ad_off;
+ struct mdss_ad_info *ad_cfgs;
+ u32 nad_cfgs;
+ u32 nmax_concurrent_ad_hw;
+ struct workqueue_struct *ad_calc_wq;
+ u32 ad_debugen;
+
+ struct mdss_intr hist_intr;
+
+ struct ion_client *iclient;
+ int iommu_attached;
+
+ struct debug_bus *dbg_bus;
+ u32 dbg_bus_size;
+ struct vbif_debug_bus *vbif_dbg_bus;
+ u32 vbif_dbg_bus_size;
+ struct vbif_debug_bus *nrt_vbif_dbg_bus;
+ u32 nrt_vbif_dbg_bus_size;
+ struct mdss_debug_inf debug_inf;
+ bool mixer_switched;
+ struct mdss_panel_cfg pan_cfg;
+ struct mdss_prefill_data prefill_data;
+ u32 min_prefill_lines; /* this changes within different chipsets */
+ u32 props;
+
+ int handoff_pending;
+ bool idle_pc;
+ struct mdss_perf_tune perf_tune;
+ bool traffic_shaper_en;
+ int iommu_ref_cnt;
+ u32 latency_buff_per;
+ atomic_t active_intf_cnt;
+ bool has_rot_dwnscale;
+ bool regulator_notif_register;
+
+ u64 ab[MDSS_MAX_BUS_CLIENTS];
+ u64 ib[MDSS_MAX_BUS_CLIENTS];
+ struct mdss_pp_block_off pp_block_off;
+
+ struct mdss_mdp_cdm *cdm_off;
+ u32 ncdm;
+ struct mutex cdm_lock;
+
+ struct mdss_mdp_dsc *dsc_off;
+ u32 ndsc;
+
+ struct mdss_max_bw_settings *max_bw_settings;
+ u32 bw_mode_bitmap;
+ u32 max_bw_settings_cnt;
+ bool bw_limit_pending;
+
+ struct mdss_max_bw_settings *max_per_pipe_bw_settings;
+ u32 mdss_per_pipe_bw_cnt;
+ u32 min_bw_per_pipe;
+
+ u32 bcolor0;
+ u32 bcolor1;
+ u32 bcolor2;
+ struct mdss_scaler_block *scaler_off;
+
+ u32 splash_intf_sel;
+ u32 splash_split_disp;
+};
+
+extern struct mdss_data_type *mdss_res;
+
+struct irq_info {
+ u32 irq;
+ u32 irq_mask;
+ u32 irq_wake_mask;
+ u32 irq_ena;
+ u32 irq_wake_ena;
+ u32 irq_buzy;
+};
+
+struct mdss_hw {
+ u32 hw_ndx;
+ void *ptr;
+ struct irq_info *irq_info;
+ irqreturn_t (*irq_handler)(int irq, void *ptr);
+};
+
+struct irq_info *mdss_intr_line(void);
+void mdss_bus_bandwidth_ctrl(int enable);
+int mdss_iommu_ctrl(int enable);
+int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota);
+int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client,
+ u32 usecase_ndx);
+struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name);
+void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *bus_client);
+
+struct mdss_util_intf {
+ bool mdp_probe_done;
+ int (*register_irq)(struct mdss_hw *hw);
+ void (*enable_irq)(struct mdss_hw *hw);
+ void (*disable_irq)(struct mdss_hw *hw);
+ void (*enable_wake_irq)(struct mdss_hw *hw);
+ void (*disable_wake_irq)(struct mdss_hw *hw);
+ void (*disable_irq_nosync)(struct mdss_hw *hw);
+ int (*irq_dispatch)(u32 hw_ndx, int irq, void *ptr);
+ int (*get_iommu_domain)(u32 type);
+ int (*iommu_attached)(void);
+ int (*iommu_ctrl)(int enable);
+ void (*iommu_lock)(void);
+ void (*iommu_unlock)(void);
+ void (*bus_bandwidth_ctrl)(int enable);
+ int (*bus_scale_set_quota)(int client, u64 ab_quota, u64 ib_quota);
+ int (*panel_intf_status)(u32 disp_num, u32 intf_type);
+ struct mdss_panel_cfg* (*panel_intf_type)(int intf_val);
+ int (*dyn_clk_gating_ctrl)(int enable);
+ bool (*param_check)(char *param_string);
+ bool display_disabled;
+};
+
+struct mdss_util_intf *mdss_get_util_intf(void);
+bool mdss_get_irq_enable_state(struct mdss_hw *hw);
+
+static inline int mdss_get_sd_client_cnt(void)
+{
+ if (!mdss_res)
+ return 0;
+ else
+ return atomic_read(&mdss_res->sd_client_count);
+}
+
+static inline void mdss_set_quirk(struct mdss_data_type *mdata,
+ enum mdss_hw_quirk bit)
+{
+ set_bit(bit, mdata->mdss_quirk_map);
+}
+
+static inline bool mdss_has_quirk(struct mdss_data_type *mdata,
+ enum mdss_hw_quirk bit)
+{
+ return test_bit(bit, mdata->mdss_quirk_map);
+}
+
+#define MDSS_VBIF_WRITE(mdata, offset, value, nrt_vbif) \
+ (nrt_vbif ? mdss_reg_w(&mdata->vbif_nrt_io, offset, value, 0) :\
+ mdss_reg_w(&mdata->vbif_io, offset, value, 0))
+#define MDSS_VBIF_READ(mdata, offset, nrt_vbif) \
+ (nrt_vbif ? mdss_reg_r(&mdata->vbif_nrt_io, offset, 0) :\
+ mdss_reg_r(&mdata->vbif_io, offset, 0))
+#define MDSS_REG_WRITE(mdata, offset, value) \
+ mdss_reg_w(&mdata->mdss_io, offset, value, 0)
+#define MDSS_REG_READ(mdata, offset) \
+ mdss_reg_r(&mdata->mdss_io, offset, 0)
+
+#endif /* MDSS_H */
diff --git a/drivers/video/fbdev/msm/mdss_cec_core.c b/drivers/video/fbdev/msm/mdss_cec_core.c
new file mode 100644
index 0000000..23a3ce5
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_cec_core.c
@@ -0,0 +1,799 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "mdss_fb.h"
+#include "mdss_cec_core.h"
+
+#define CEC_ENABLE_MASK BIT(0)
+#define CEC_WAKEUP_ENABLE_MASK BIT(1)
+
+struct cec_msg_node {
+ struct cec_msg msg;
+ struct list_head list;
+};
+
+struct cec_ctl {
+ bool enabled;
+ bool compliance_enabled;
+ bool cec_wakeup_en;
+
+ u8 logical_addr;
+
+ spinlock_t lock;
+ struct list_head msg_head;
+ struct cec_abstract_init_data init_data;
+
+};
+
+static struct cec_ctl *cec_get_ctl(struct device *dev)
+{
+ struct fb_info *fbi;
+ struct msm_fb_data_type *mfd;
+ struct mdss_panel_info *pinfo;
+
+ if (!dev) {
+ pr_err("invalid input\n");
+ goto error;
+ }
+
+ fbi = dev_get_drvdata(dev);
+ if (!fbi) {
+ pr_err("invalid fbi\n");
+ goto error;
+ }
+
+ mfd = fbi->par;
+ if (!mfd) {
+ pr_err("invalid mfd\n");
+ goto error;
+ }
+
+ pinfo = mfd->panel_info;
+ if (!pinfo) {
+ pr_err("invalid pinfo\n");
+ goto error;
+ }
+
+ return pinfo->cec_data;
+
+error:
+ return NULL;
+}
+
+static int cec_msg_send(struct cec_ctl *ctl, struct cec_msg *msg)
+{
+ int ret = -EINVAL;
+ struct cec_ops *ops;
+
+ if (!ctl || !msg) {
+ pr_err("invalid input\n");
+ goto end;
+ }
+
+ ops = ctl->init_data.ops;
+
+ if (ops && ops->send_msg)
+ ret = ops->send_msg(ops->data, msg);
+end:
+ return ret;
+}
+
+static void cec_dump_msg(struct cec_ctl *ctl, struct cec_msg *msg)
+{
+ int i;
+ unsigned long flags;
+
+ if (!ctl || !msg) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ spin_lock_irqsave(&ctl->lock, flags);
+ pr_debug("==%pS dump start ==\n",
+ __builtin_return_address(0));
+
+ pr_debug("cec: sender_id: %d\n", msg->sender_id);
+ pr_debug("cec: recvr_id: %d\n", msg->recvr_id);
+
+ if (msg->frame_size < 2) {
+ pr_debug("cec: polling message\n");
+ spin_unlock_irqrestore(&ctl->lock, flags);
+ return;
+ }
+
+ pr_debug("cec: opcode: %02x\n", msg->opcode);
+ for (i = 0; i < msg->frame_size - 2; i++)
+ pr_debug("cec: operand(%2d) : %02x\n", i + 1, msg->operand[i]);
+
+ pr_debug("==%pS dump end ==\n",
+ __builtin_return_address(0));
+ spin_unlock_irqrestore(&ctl->lock, flags);
+}
+
+static int cec_disable(struct cec_ctl *ctl)
+{
+ unsigned long flags;
+ int ret = -EINVAL;
+ struct cec_msg_node *msg_node, *tmp;
+ struct cec_ops *ops;
+
+ if (!ctl) {
+ pr_err("Invalid input\n");
+ goto end;
+ }
+
+ spin_lock_irqsave(&ctl->lock, flags);
+ list_for_each_entry_safe(msg_node, tmp, &ctl->msg_head, list) {
+ list_del(&msg_node->list);
+ kfree(msg_node);
+ }
+ spin_unlock_irqrestore(&ctl->lock, flags);
+
+ ops = ctl->init_data.ops;
+
+ if (ops && ops->enable)
+ ret = ops->enable(ops->data, false);
+
+ if (!ret)
+ ctl->enabled = false;
+
+end:
+ return ret;
+}
+
+static int cec_enable(struct cec_ctl *ctl)
+{
+ int ret = -EINVAL;
+ struct cec_ops *ops;
+
+ if (!ctl) {
+ pr_err("Invalid input\n");
+ goto end;
+ }
+
+ INIT_LIST_HEAD(&ctl->msg_head);
+
+ ops = ctl->init_data.ops;
+
+ if (ops && ops->enable)
+ ret = ops->enable(ops->data, true);
+
+ if (!ret)
+ ctl->enabled = true;
+
+end:
+ return ret;
+}
+
+static int cec_send_abort_opcode(struct cec_ctl *ctl,
+ struct cec_msg *in_msg, u8 reason_operand)
+{
+ int i = 0;
+ struct cec_msg out_msg;
+
+ if (!ctl || !in_msg) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = in_msg->sender_id;
+ out_msg.opcode = 0x0; /* opcode for feature abort */
+ out_msg.operand[i++] = in_msg->opcode;
+ out_msg.operand[i++] = reason_operand;
+ out_msg.frame_size = i + 2;
+
+ return cec_msg_send(ctl, &out_msg);
+}
+
+static int cec_msg_parser(struct cec_ctl *ctl, struct cec_msg *in_msg)
+{
+ int rc = 0, i = 0;
+ struct cec_msg out_msg;
+
+ if (!ctl || !in_msg) {
+ pr_err("Invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ pr_debug("in_msg->opcode = 0x%x\n", in_msg->opcode);
+ switch (in_msg->opcode) {
+ case CEC_MSG_SET_OSD_STRING:
+ /* Set OSD String */
+ pr_debug("Recvd OSD Str=[0x%x]\n",
+ in_msg->operand[3]);
+ break;
+ case CEC_MSG_GIVE_PHYS_ADDR:
+ /* Give Phy Addr */
+ pr_debug("Recvd a Give Phy Addr cmd\n");
+
+ out_msg.sender_id = 0x4;
+ /* Broadcast */
+ out_msg.recvr_id = 0xF;
+ out_msg.opcode = 0x84;
+ out_msg.operand[i++] = 0x10;
+ out_msg.operand[i++] = 0x0;
+ out_msg.operand[i++] = 0x04;
+ out_msg.frame_size = i + 2;
+
+ rc = cec_msg_send(ctl, &out_msg);
+ break;
+ case CEC_MSG_ABORT:
+ /* Abort */
+ pr_debug("Recvd an abort cmd.\n");
+
+ /* reason = "Refused" */
+ rc = cec_send_abort_opcode(ctl, in_msg, 0x04);
+ break;
+ case CEC_MSG_GIVE_OSD_NAME:
+ /* Give OSD name */
+ pr_debug("Recvd 'Give OSD name' cmd.\n");
+
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = in_msg->sender_id;
+ out_msg.opcode = 0x47; /* OSD Name */
+ /* Display control byte */
+ out_msg.operand[i++] = 0x0;
+ out_msg.operand[i++] = 'H';
+ out_msg.operand[i++] = 'e';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'o';
+ out_msg.operand[i++] = ' ';
+ out_msg.operand[i++] = 'W';
+ out_msg.operand[i++] = 'o';
+ out_msg.operand[i++] = 'r';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'd';
+ out_msg.frame_size = i + 2;
+
+ rc = cec_msg_send(ctl, &out_msg);
+ break;
+ case CEC_MSG_GIVE_POWER_STATUS:
+ /* Give Device Power status */
+ pr_debug("Recvd a Power status message\n");
+
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = in_msg->sender_id;
+ out_msg.opcode = 0x90; /* OSD String */
+ out_msg.operand[i++] = 'H';
+ out_msg.operand[i++] = 'e';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'o';
+ out_msg.operand[i++] = ' ';
+ out_msg.operand[i++] = 'W';
+ out_msg.operand[i++] = 'o';
+ out_msg.operand[i++] = 'r';
+ out_msg.operand[i++] = 'l';
+ out_msg.operand[i++] = 'd';
+ out_msg.frame_size = i + 2;
+
+ rc = cec_msg_send(ctl, &out_msg);
+ break;
+ case CEC_MSG_ROUTE_CHANGE_CMD:
+ /* Routing Change cmd */
+ case CEC_MSG_SET_STREAM_PATH:
+ /* Set Stream Path */
+ pr_debug("Recvd Set Stream or Routing Change cmd\n");
+
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = 0xF; /* broadcast this message */
+ out_msg.opcode = 0x82; /* Active Source */
+ out_msg.operand[i++] = 0x10;
+ out_msg.operand[i++] = 0x0;
+ out_msg.frame_size = i + 2;
+
+ rc = cec_msg_send(ctl, &out_msg);
+ if (rc)
+ goto end;
+
+ /* sending <Image View On> message */
+ memset(&out_msg, 0x0, sizeof(struct cec_msg));
+ i = 0;
+ out_msg.sender_id = 0x4;
+ out_msg.recvr_id = in_msg->sender_id;
+ out_msg.opcode = 0x04; /* opcode for Image View On */
+ out_msg.frame_size = i + 2;
+
+ rc = cec_msg_send(ctl, &out_msg);
+ break;
+ case CEC_MSG_USER_CTRL_PRESS:
+ /* User Control Pressed */
+ pr_debug("User Control Pressed\n");
+ break;
+ case CEC_MSG_USER_CTRL_RELEASE:
+ /* User Control Released */
+ pr_debug("User Control Released\n");
+ break;
+ default:
+ pr_debug("Recvd an unknown cmd = [%u]\n",
+ in_msg->opcode);
+
+ /* reason = "Unrecognized opcode" */
+ rc = cec_send_abort_opcode(ctl, in_msg, 0x0);
+ break;
+ }
+end:
+ return rc;
+}
+
+static int cec_msg_recv(void *data, struct cec_msg *msg)
+{
+ unsigned long flags;
+ struct cec_ctl *ctl = data;
+ struct cec_msg_node *msg_node;
+ int ret = 0;
+
+ if (!ctl) {
+ pr_err("invalid input\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (!ctl->enabled) {
+ pr_err("cec not enabled\n");
+ ret = -ENODEV;
+ goto end;
+ }
+
+ msg_node = kzalloc(sizeof(*msg_node), GFP_KERNEL);
+ if (!msg_node) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ msg_node->msg = *msg;
+
+ pr_debug("CEC read frame done\n");
+ cec_dump_msg(ctl, &msg_node->msg);
+
+ spin_lock_irqsave(&ctl->lock, flags);
+ if (ctl->compliance_enabled) {
+ spin_unlock_irqrestore(&ctl->lock, flags);
+
+ ret = cec_msg_parser(ctl, &msg_node->msg);
+ if (ret)
+ pr_err("msg parsing failed\n");
+
+ kfree(msg_node);
+ } else {
+ list_add_tail(&msg_node->list, &ctl->msg_head);
+ spin_unlock_irqrestore(&ctl->lock, flags);
+
+ /* wake-up sysfs read_msg context */
+ sysfs_notify(ctl->init_data.kobj, "cec", "rd_msg");
+ }
+end:
+ return ret;
+}
+
+static ssize_t cec_rda_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long flags;
+ struct cec_ctl *ctl = cec_get_ctl(dev);
+
+ if (!ctl) {
+ pr_err("Invalid input\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ spin_lock_irqsave(&ctl->lock, flags);
+ if (ctl->enabled) {
+ pr_debug("cec is enabled\n");
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", 1);
+ } else {
+ pr_err("cec is disabled\n");
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", 0);
+ }
+ spin_unlock_irqrestore(&ctl->lock, flags);
+end:
+ return ret;
+}
+
+static ssize_t cec_wta_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+ bool cec_en;
+ ssize_t ret;
+ struct cec_ctl *ctl = cec_get_ctl(dev);
+ struct cec_ops *ops;
+
+ if (!ctl) {
+ pr_err("Invalid input\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ops = ctl->init_data.ops;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("kstrtoint failed.\n");
+ goto end;
+ }
+
+ cec_en = (val & CEC_ENABLE_MASK) ? true : false;
+
+ /* bit 1 is used for wakeup feature */
+ if ((val & CEC_ENABLE_MASK) && (val & CEC_WAKEUP_ENABLE_MASK))
+ ctl->cec_wakeup_en = true;
+ else
+ ctl->cec_wakeup_en = false;
+
+ if (ops && ops->wakeup_en)
+ ops->wakeup_en(ops->data, ctl->cec_wakeup_en);
+
+ if (ctl->enabled == cec_en) {
+ pr_debug("cec is already %s\n",
+ cec_en ? "enabled" : "disabled");
+ goto bail;
+ }
+
+ if (cec_en)
+ ret = cec_enable(ctl);
+ else
+ ret = cec_disable(ctl);
+
+ if (ret)
+ goto end;
+
+bail:
+ ret = strnlen(buf, PAGE_SIZE);
+end:
+ return ret;
+}
+
+static ssize_t cec_rda_enable_compliance(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long flags;
+ ssize_t ret;
+ struct cec_ctl *ctl = cec_get_ctl(dev);
+
+ if (!ctl) {
+ pr_err("Invalid ctl\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ctl->lock, flags);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ ctl->compliance_enabled);
+
+ spin_unlock_irqrestore(&ctl->lock, flags);
+
+ return ret;
+}
+
+static ssize_t cec_wta_enable_compliance(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+ ssize_t ret;
+ struct cec_ctl *ctl = cec_get_ctl(dev);
+ struct cec_ops *ops;
+
+ if (!ctl) {
+ pr_err("Invalid ctl\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ops = ctl->init_data.ops;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("kstrtoint failed.\n");
+ goto end;
+ }
+
+ ctl->compliance_enabled = (val == 1) ? true : false;
+
+ if (ctl->compliance_enabled) {
+ ret = cec_enable(ctl);
+ if (ret)
+ goto end;
+
+ ctl->logical_addr = 0x4;
+
+ if (ops && ops->wt_logical_addr)
+ ops->wt_logical_addr(ops->data, ctl->logical_addr);
+
+ } else {
+ ctl->logical_addr = 0;
+
+ ret = cec_disable(ctl);
+ if (ret)
+ goto end;
+ }
+
+ ret = strnlen(buf, PAGE_SIZE);
+end:
+ return ret;
+}
+
+static ssize_t cec_rda_logical_addr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long flags;
+ ssize_t ret;
+ struct cec_ctl *ctl = cec_get_ctl(dev);
+
+ if (!ctl) {
+ pr_err("Invalid ctl\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ctl->lock, flags);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", ctl->logical_addr);
+ spin_unlock_irqrestore(&ctl->lock, flags);
+
+ return ret;
+}
+
+static ssize_t cec_wta_logical_addr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int logical_addr;
+ unsigned long flags;
+ ssize_t ret = strnlen(buf, PAGE_SIZE);
+ struct cec_ctl *ctl = cec_get_ctl(dev);
+ struct cec_ops *ops;
+
+ if (!ctl) {
+ pr_err("Invalid ctl\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ops = ctl->init_data.ops;
+
+ ret = kstrtoint(buf, 10, &logical_addr);
+ if (ret) {
+ pr_err("kstrtoint failed\n");
+ goto end;
+ }
+
+ if (logical_addr < 0 || logical_addr > 15) {
+ pr_err("Invalid logical address\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ spin_lock_irqsave(&ctl->lock, flags);
+ ctl->logical_addr = (u8)logical_addr;
+ if (ctl->enabled) {
+ if (ops && ops->wt_logical_addr)
+ ops->wt_logical_addr(ops->data, ctl->logical_addr);
+ }
+ spin_unlock_irqrestore(&ctl->lock, flags);
+end:
+ return ret;
+}
+
+static ssize_t cec_rda_msg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i = 0;
+ unsigned long flags;
+ struct cec_msg_node *msg_node, *tmp;
+ struct cec_ctl *ctl = cec_get_ctl(dev);
+ ssize_t ret;
+
+ if (!ctl) {
+ pr_err("Invalid ctl\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (!ctl->enabled) {
+ pr_err("cec not enabled\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ spin_lock_irqsave(&ctl->lock, flags);
+
+ if (ctl->compliance_enabled) {
+ spin_unlock_irqrestore(&ctl->lock, flags);
+ pr_err("Read no allowed in compliance mode\n");
+ ret = -EPERM;
+ goto end;
+ }
+
+ if (list_empty_careful(&ctl->msg_head)) {
+ spin_unlock_irqrestore(&ctl->lock, flags);
+ pr_err("CEC message queue is empty\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ list_for_each_entry_safe(msg_node, tmp, &ctl->msg_head, list) {
+ if ((i + 1) * sizeof(struct cec_msg) > PAGE_SIZE) {
+ pr_debug("Overflowing PAGE_SIZE.\n");
+ break;
+ }
+
+ memcpy(buf + (i * sizeof(struct cec_msg)), &msg_node->msg,
+ sizeof(struct cec_msg));
+ list_del(&msg_node->list);
+ kfree(msg_node);
+ i++;
+ }
+
+ spin_unlock_irqrestore(&ctl->lock, flags);
+
+ ret = i * sizeof(struct cec_msg);
+end:
+ return ret;
+}
+
+static ssize_t cec_wta_msg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ ssize_t ret;
+ unsigned long flags;
+ struct cec_msg *msg = (struct cec_msg *)buf;
+ struct cec_ctl *ctl = cec_get_ctl(dev);
+
+ if (!ctl) {
+ pr_err("Invalid ctl\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ spin_lock_irqsave(&ctl->lock, flags);
+ if (ctl->compliance_enabled) {
+ spin_unlock_irqrestore(&ctl->lock, flags);
+ pr_err("Write not allowed in compliance mode\n");
+ ret = -EPERM;
+ goto end;
+ }
+
+ if (!ctl->enabled) {
+ spin_unlock_irqrestore(&ctl->lock, flags);
+ pr_err("CEC is not configed.\n");
+ ret = -EPERM;
+ goto end;
+ }
+ spin_unlock_irqrestore(&ctl->lock, flags);
+
+ if (msg->frame_size > MAX_OPERAND_SIZE) {
+ pr_err("msg frame too big!\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = cec_msg_send(ctl, msg);
+ if (ret) {
+ pr_err("cec_msg_send failed\n");
+ goto end;
+ }
+
+ ret = sizeof(struct cec_msg);
+end:
+ return ret;
+}
+
+static DEVICE_ATTR(enable, 0644, cec_rda_enable,
+ cec_wta_enable);
+static DEVICE_ATTR(enable_compliance, 0644,
+ cec_rda_enable_compliance, cec_wta_enable_compliance);
+static DEVICE_ATTR(logical_addr, 0600,
+ cec_rda_logical_addr, cec_wta_logical_addr);
+static DEVICE_ATTR(rd_msg, 0444, cec_rda_msg, NULL);
+static DEVICE_ATTR(wr_msg, 0600, NULL, cec_wta_msg);
+
+static struct attribute *cec_fs_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_enable_compliance.attr,
+ &dev_attr_logical_addr.attr,
+ &dev_attr_rd_msg.attr,
+ &dev_attr_wr_msg.attr,
+ NULL,
+};
+
+static struct attribute_group cec_fs_attr_group = {
+ .name = "cec",
+ .attrs = cec_fs_attrs,
+};
+
+/**
+ * cec_abstract_deinit() - Release CEC abstract module
+ * @input: CEC abstract data
+ *
+ * This API release all the resources allocated for this
+ * module.
+ *
+ * Return: 0 on success otherwise error code.
+ */
+int cec_abstract_deinit(void *input)
+{
+ struct cec_ctl *ctl = (struct cec_ctl *)input;
+
+ if (!ctl)
+ return -EINVAL;
+
+ sysfs_remove_group(ctl->init_data.kobj, &cec_fs_attr_group);
+
+ kfree(ctl);
+
+ return 0;
+}
+
+/**
+ * cec_abstract_init() - Initialize CEC abstract module
+ * @init_data: data needed to initialize the CEC abstraction module
+ *
+ * This API will initialize the CEC abstract module which connects
+ * CEC client with CEC hardware. It creates sysfs nodes for client
+ * to read and write CEC messages. It interacts with hardware with
+ * provided operation function pointers. Also provides callback
+ * function pointers to let the hardware inform about incoming
+ * CEC message.
+ *
+ * Return: pinter to cec abstract data which needs to be passed
+ * as parameter with callback functions.
+ */
+void *cec_abstract_init(struct cec_abstract_init_data *init_data)
+{
+ struct cec_ctl *ctl = NULL;
+ int ret = 0;
+
+ if (!init_data) {
+ pr_err("invalid input\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ /* keep a copy of init data */
+ ctl->init_data = *init_data;
+
+ ret = sysfs_create_group(ctl->init_data.kobj, &cec_fs_attr_group);
+ if (ret) {
+ pr_err("cec sysfs group creation failed\n");
+ goto end;
+ }
+
+ spin_lock_init(&ctl->lock);
+
+ /* provide callback function pointers */
+ if (init_data->cbs) {
+ init_data->cbs->msg_recv_notify = cec_msg_recv;
+ init_data->cbs->data = ctl;
+ }
+
+ return ctl;
+end:
+ kfree(ctl);
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_cec_core.h b/drivers/video/fbdev/msm/mdss_cec_core.h
new file mode 100644
index 0000000..f8196a0
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_cec_core.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_CEC_CORE_H__
+#define __MDSS_CEC_CORE_H__
+
+#define MAX_OPERAND_SIZE 14
+
+/* total size: HEADER block (1) + opcode block (1) + operands (14) */
+#define MAX_CEC_FRAME_SIZE (MAX_OPERAND_SIZE + 2)
+
+/* CEC message set */
+#define CEC_MSG_SET_OSD_STRING 0x64
+#define CEC_MSG_GIVE_PHYS_ADDR 0x83
+#define CEC_MSG_ABORT 0xFF
+#define CEC_MSG_GIVE_OSD_NAME 0x46
+#define CEC_MSG_GIVE_POWER_STATUS 0x8F
+#define CEC_MSG_ROUTE_CHANGE_CMD 0x80
+#define CEC_MSG_SET_STREAM_PATH 0x86
+#define CEC_MSG_USER_CTRL_PRESS 0x44
+#define CEC_MSG_USER_CTRL_RELEASE 0x45
+
+/**
+ * struct cec_msg - CEC message related data
+ * @sender_id: CEC message initiator's id
+ * @recvr_id: CEC message destination's id
+ * @opcode: CEC message opcode
+ * @operand: CEC message operands corresponding to opcode
+ * @frame_size: total CEC frame size
+ * @retransmit: number of re-tries to transmit message
+ *
+ * Basic CEC message structure used by both client and driver.
+ */
+struct cec_msg {
+ u8 sender_id;
+ u8 recvr_id;
+ u8 opcode;
+ u8 operand[MAX_OPERAND_SIZE];
+ u8 frame_size;
+ u8 retransmit;
+};
+
+/**
+ * struct cec_ops - CEC operations function pointers
+ * @enable: function pointer to enable CEC
+ * @send_msg: function pointer to send CEC message
+ * @wt_logical_addr: function pointer to write logical address
+ * @wakeup_en: function pointer to enable wakeup feature
+ * @is_wakeup_en: function pointer to query wakeup feature state
+ * @device_suspend: function pointer to update device suspend state
+ * @data: pointer to the data needed to send with operation functions
+ *
+ * Defines all the operations that abstract module can call
+ * to programe the CEC driver.
+ */
+struct cec_ops {
+ int (*enable)(void *data, bool enable);
+ int (*send_msg)(void *data,
+ struct cec_msg *msg);
+ void (*wt_logical_addr)(void *data, u8 addr);
+ void (*wakeup_en)(void *data, bool en);
+ bool (*is_wakeup_en)(void *data);
+ void (*device_suspend)(void *data, bool suspend);
+ void *data;
+};
+
+/**
+ * struct cec_cbs - CEC callback function pointers
+ * @msg_recv_notify: function pointer called CEC driver to notify incoming msg
+ * @data: pointer to data needed to be send with the callback function
+ *
+ * Defines callback functions which CEC driver can callback to notify any
+ * change in the hardware.
+ */
+struct cec_cbs {
+ int (*msg_recv_notify)(void *data, struct cec_msg *msg);
+ void *data;
+};
+
+/**
+ * struct cec_abstract_init_data - initalization data for abstract module
+ * @ops: pointer to struct containing all operation function pointers
+ * @cbs: pointer to struct containing all callack function pointers
+ * @kobj: pointer to kobject instance associated with CEC driver.
+ *
+ * Defines initialization data needed by init API to initialize the module.
+ */
+struct cec_abstract_init_data {
+ struct cec_ops *ops;
+ struct cec_cbs *cbs;
+ struct kobject *kobj;
+};
+
+void *cec_abstract_init(struct cec_abstract_init_data *init_data);
+int cec_abstract_deinit(void *input);
+#endif /* __MDSS_CEC_CORE_H_*/
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c
new file mode 100644
index 0000000..9a9f5e4
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.c
@@ -0,0 +1,4317 @@
+/*
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 1994 Martin Schaller
+ *
+ * 2001 - Documented with DocBook
+ * - Brad Douglas <brad@neruo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/compat.h>
+#include <linux/fb.h>
+
+#include <linux/uaccess.h>
+
+#include "mdss_fb.h"
+#include "mdss_compat_utils.h"
+#include "mdss_mdp_hwio.h"
+#include "mdss_mdp.h"
+
+#define MSMFB_CURSOR32 _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor32)
+#define MSMFB_SET_LUT32 _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap32)
+#define MSMFB_HISTOGRAM32 _IOWR(MSMFB_IOCTL_MAGIC, 132,\
+ struct mdp_histogram_data32)
+#define MSMFB_GET_CCS_MATRIX32 _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs32)
+#define MSMFB_SET_CCS_MATRIX32 _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs32)
+#define MSMFB_OVERLAY_SET32 _IOWR(MSMFB_IOCTL_MAGIC, 135,\
+ struct mdp_overlay32)
+
+#define MSMFB_OVERLAY_GET32 _IOR(MSMFB_IOCTL_MAGIC, 140,\
+ struct mdp_overlay32)
+#define MSMFB_OVERLAY_BLT32 _IOWR(MSMFB_IOCTL_MAGIC, 142,\
+ struct msmfb_overlay_blt32)
+#define MSMFB_HISTOGRAM_START32 _IOR(MSMFB_IOCTL_MAGIC, 144,\
+ struct mdp_histogram_start_req32)
+
+#define MSMFB_OVERLAY_3D32 _IOWR(MSMFB_IOCTL_MAGIC, 147,\
+ struct msmfb_overlay_3d32)
+
+#define MSMFB_MIXER_INFO32 _IOWR(MSMFB_IOCTL_MAGIC, 148,\
+ struct msmfb_mixer_info_req32)
+#define MSMFB_MDP_PP32 _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp32)
+#define MSMFB_BUFFER_SYNC32 _IOW(MSMFB_IOCTL_MAGIC, 162, struct mdp_buf_sync32)
+#define MSMFB_OVERLAY_PREPARE32 _IOWR(MSMFB_IOCTL_MAGIC, 169, \
+ struct mdp_overlay_list32)
+#define MSMFB_ATOMIC_COMMIT32 _IOWR(MDP_IOCTL_MAGIC, 128, compat_caddr_t)
+
+#define MSMFB_ASYNC_POSITION_UPDATE_32 _IOWR(MDP_IOCTL_MAGIC, 129, \
+ struct mdp_position_update32)
+
+static int __copy_layer_pp_info_params(struct mdp_input_layer *layer,
+ struct mdp_input_layer32 *layer32);
+
+static unsigned int __do_compat_ioctl_nr(unsigned int cmd32)
+{
+ unsigned int cmd;
+
+ switch (cmd32) {
+ case MSMFB_CURSOR32:
+ cmd = MSMFB_CURSOR;
+ break;
+ case MSMFB_SET_LUT32:
+ cmd = MSMFB_SET_LUT;
+ break;
+ case MSMFB_HISTOGRAM32:
+ cmd = MSMFB_HISTOGRAM;
+ break;
+ case MSMFB_GET_CCS_MATRIX32:
+ cmd = MSMFB_GET_CCS_MATRIX;
+ break;
+ case MSMFB_SET_CCS_MATRIX32:
+ cmd = MSMFB_SET_CCS_MATRIX;
+ break;
+ case MSMFB_OVERLAY_SET32:
+ cmd = MSMFB_OVERLAY_SET;
+ break;
+ case MSMFB_OVERLAY_GET32:
+ cmd = MSMFB_OVERLAY_GET;
+ break;
+ case MSMFB_OVERLAY_BLT32:
+ cmd = MSMFB_OVERLAY_BLT;
+ break;
+ case MSMFB_OVERLAY_3D32:
+ cmd = MSMFB_OVERLAY_3D;
+ break;
+ case MSMFB_MIXER_INFO32:
+ cmd = MSMFB_MIXER_INFO;
+ break;
+ case MSMFB_MDP_PP32:
+ cmd = MSMFB_MDP_PP;
+ break;
+ case MSMFB_BUFFER_SYNC32:
+ cmd = MSMFB_BUFFER_SYNC;
+ break;
+ case MSMFB_OVERLAY_PREPARE32:
+ cmd = MSMFB_OVERLAY_PREPARE;
+ break;
+ case MSMFB_ATOMIC_COMMIT32:
+ cmd = MSMFB_ATOMIC_COMMIT;
+ break;
+ case MSMFB_ASYNC_POSITION_UPDATE_32:
+ cmd = MSMFB_ASYNC_POSITION_UPDATE;
+ break;
+ default:
+ cmd = cmd32;
+ break;
+ }
+
+ return cmd;
+}
+
+static void __copy_atomic_commit_struct(struct mdp_layer_commit *commit,
+ struct mdp_layer_commit32 *commit32)
+{
+ unsigned int destsize = sizeof(commit->commit_v1.reserved);
+ unsigned int srcsize = sizeof(commit32->commit_v1.reserved);
+ unsigned int count = (destsize <= srcsize ? destsize : srcsize);
+
+ commit->version = commit32->version;
+ commit->commit_v1.flags = commit32->commit_v1.flags;
+ commit->commit_v1.input_layer_cnt =
+ commit32->commit_v1.input_layer_cnt;
+ commit->commit_v1.left_roi = commit32->commit_v1.left_roi;
+ commit->commit_v1.right_roi = commit32->commit_v1.right_roi;
+ memcpy(&commit->commit_v1.reserved, &commit32->commit_v1.reserved,
+ count);
+}
+
+static struct mdp_input_layer32 *__create_layer_list32(
+ struct mdp_layer_commit32 *commit32,
+ u32 layer_count)
+{
+ u32 buffer_size32;
+ struct mdp_input_layer32 *layer_list32;
+ int ret;
+
+ buffer_size32 = sizeof(struct mdp_input_layer32) * layer_count;
+
+ layer_list32 = kmalloc(buffer_size32, GFP_KERNEL);
+ if (!layer_list32) {
+ layer_list32 = ERR_PTR(-ENOMEM);
+ goto end;
+ }
+
+ ret = copy_from_user(layer_list32,
+ compat_ptr(commit32->commit_v1.input_layers),
+ sizeof(struct mdp_input_layer32) * layer_count);
+ if (ret) {
+ pr_err("layer list32 copy from user failed, ptr %pK\n",
+ compat_ptr(commit32->commit_v1.input_layers));
+ kfree(layer_list32);
+ ret = -EFAULT;
+ layer_list32 = ERR_PTR(ret);
+ }
+
+end:
+ return layer_list32;
+}
+
+static int __copy_scale_params(struct mdp_input_layer *layer,
+ struct mdp_input_layer32 *layer32)
+{
+ struct mdp_scale_data *scale;
+ int ret;
+
+ if (!(layer->flags & MDP_LAYER_ENABLE_PIXEL_EXT))
+ return 0;
+
+ scale = kmalloc(sizeof(struct mdp_scale_data), GFP_KERNEL);
+ if (!scale) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ /* scale structure size is same for compat and 64bit version */
+ ret = copy_from_user(scale, compat_ptr(layer32->scale),
+ sizeof(struct mdp_scale_data));
+ if (ret) {
+ kfree(scale);
+ pr_err("scale param copy from user failed, ptr %pK\n",
+ compat_ptr(layer32->scale));
+ ret = -EFAULT;
+ } else {
+ layer->scale = scale;
+ }
+end:
+ return ret;
+}
+
+static struct mdp_input_layer *__create_layer_list(
+ struct mdp_layer_commit *commit,
+ struct mdp_input_layer32 *layer_list32,
+ u32 layer_count)
+{
+ int i, ret = 0;
+ u32 buffer_size;
+ struct mdp_input_layer *layer, *layer_list;
+ struct mdp_input_layer32 *layer32;
+
+ buffer_size = sizeof(struct mdp_input_layer) * layer_count;
+
+ layer_list = kmalloc(buffer_size, GFP_KERNEL);
+ if (!layer_list) {
+ layer_list = ERR_PTR(-ENOMEM);
+ goto end;
+ }
+
+ commit->commit_v1.input_layers = layer_list;
+
+ for (i = 0; i < layer_count; i++) {
+ layer = &layer_list[i];
+ layer32 = &layer_list32[i];
+
+ layer->flags = layer32->flags;
+ layer->pipe_ndx = layer32->pipe_ndx;
+ layer->horz_deci = layer32->horz_deci;
+ layer->vert_deci = layer32->vert_deci;
+ layer->z_order = layer32->z_order;
+ layer->transp_mask = layer32->transp_mask;
+ layer->bg_color = layer32->bg_color;
+ layer->blend_op = layer32->blend_op;
+ layer->alpha = layer32->alpha;
+ layer->color_space = layer32->color_space;
+ layer->src_rect = layer32->src_rect;
+ layer->dst_rect = layer32->dst_rect;
+ layer->buffer = layer32->buffer;
+ memcpy(&layer->reserved, &layer32->reserved,
+ sizeof(layer->reserved));
+
+ layer->scale = NULL;
+ ret = __copy_scale_params(layer, layer32);
+ if (ret)
+ break;
+
+ layer->pp_info = NULL;
+ ret = __copy_layer_pp_info_params(layer, layer32);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ for (i--; i >= 0; i--) {
+ kfree(layer_list[i].scale);
+ mdss_mdp_free_layer_pp_info(&layer_list[i]);
+ }
+ kfree(layer_list);
+ layer_list = ERR_PTR(ret);
+ }
+
+end:
+ return layer_list;
+}
+
+static int __copy_to_user_atomic_commit(struct mdp_layer_commit *commit,
+ struct mdp_layer_commit32 *commit32,
+ struct mdp_input_layer32 *layer_list32,
+ unsigned long argp, u32 layer_count)
+{
+ int i, ret;
+ struct mdp_input_layer *layer_list;
+
+ layer_list = commit->commit_v1.input_layers;
+
+ for (i = 0; i < layer_count; i++)
+ layer_list32[i].error_code = layer_list[i].error_code;
+
+ ret = copy_to_user(compat_ptr(commit32->commit_v1.input_layers),
+ layer_list32,
+ sizeof(struct mdp_input_layer32) * layer_count);
+ if (ret)
+ goto end;
+
+ ret = copy_to_user(compat_ptr(commit32->commit_v1.output_layer),
+ commit->commit_v1.output_layer,
+ sizeof(struct mdp_output_layer));
+ if (ret)
+ goto end;
+
+ commit32->commit_v1.release_fence =
+ commit->commit_v1.release_fence;
+ commit32->commit_v1.retire_fence =
+ commit->commit_v1.retire_fence;
+
+ ret = copy_to_user((void __user *)argp, commit32,
+ sizeof(struct mdp_layer_commit32));
+
+end:
+ return ret;
+}
+
+static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
+ unsigned long argp, struct file *file)
+{
+ int ret, i;
+ struct mdp_layer_commit commit;
+ struct mdp_layer_commit32 commit32;
+ u32 layer_count;
+ struct mdp_input_layer *layer_list = NULL;
+ struct mdp_input_layer32 *layer_list32 = NULL;
+ struct mdp_output_layer *output_layer = NULL;
+ struct mdp_frc_info *frc_info = NULL;
+
+ /* copy top level memory from 32 bit structure to kernel memory */
+ ret = copy_from_user(&commit32, (void __user *)argp,
+ sizeof(struct mdp_layer_commit32));
+ if (ret) {
+ pr_err("%s:copy_from_user failed, ptr %pK\n", __func__,
+ (void __user *)argp);
+ ret = -EFAULT;
+ return ret;
+ }
+
+ memset(&commit, 0, sizeof(struct mdp_layer_commit));
+ __copy_atomic_commit_struct(&commit, &commit32);
+
+ if (commit32.commit_v1.output_layer) {
+ int buffer_size = sizeof(struct mdp_output_layer);
+
+ output_layer = kzalloc(buffer_size, GFP_KERNEL);
+ if (!output_layer)
+ return -ENOMEM;
+
+ ret = copy_from_user(output_layer,
+ compat_ptr(commit32.commit_v1.output_layer),
+ buffer_size);
+ if (ret) {
+ pr_err("fail to copy output layer from user, ptr %pK\n",
+ compat_ptr(commit32.commit_v1.output_layer));
+ ret = -EFAULT;
+ goto layer_list_err;
+ }
+
+ commit.commit_v1.output_layer = output_layer;
+ }
+
+ layer_count = commit32.commit_v1.input_layer_cnt;
+ if (layer_count > MAX_LAYER_COUNT) {
+ ret = -EINVAL;
+ goto layer_list_err;
+ } else if (layer_count) {
+ /*
+ * allocate memory for layer list in 32bit domain and copy it
+ * from user
+ */
+ layer_list32 = __create_layer_list32(&commit32, layer_count);
+ if (IS_ERR_OR_NULL(layer_list32)) {
+ ret = PTR_ERR(layer_list32);
+ goto layer_list_err;
+ }
+
+ /*
+ * allocate memory for layer list in kernel memory domain and
+ * copy layer info from 32bit structures to kernel memory
+ */
+ layer_list = __create_layer_list(&commit, layer_list32,
+ layer_count);
+ if (IS_ERR_OR_NULL(layer_list)) {
+ ret = PTR_ERR(layer_list);
+ goto layer_list_err;
+ }
+ }
+
+ if (commit32.commit_v1.frc_info) {
+ int buffer_size = sizeof(struct mdp_frc_info);
+
+ frc_info = kzalloc(buffer_size, GFP_KERNEL);
+ if (!frc_info) {
+ ret = -ENOMEM;
+ goto frc_err;
+ }
+
+ ret = copy_from_user(frc_info,
+ compat_ptr(commit32.commit_v1.frc_info),
+ buffer_size);
+ if (ret) {
+ pr_err("fail to copy frc info from user, ptr %p\n",
+ compat_ptr(commit32.commit_v1.frc_info));
+ kfree(frc_info);
+ ret = -EFAULT;
+ goto frc_err;
+ }
+
+ commit.commit_v1.frc_info = frc_info;
+ }
+
+ ret = mdss_fb_atomic_commit(info, &commit, file);
+ if (ret)
+ pr_err("atomic commit failed ret:%d\n", ret);
+
+ if (layer_count)
+ __copy_to_user_atomic_commit(&commit, &commit32, layer_list32,
+ argp, layer_count);
+
+ for (i = 0; i < layer_count; i++) {
+ kfree(layer_list[i].scale);
+ mdss_mdp_free_layer_pp_info(&layer_list[i]);
+ }
+
+ kfree(frc_info);
+frc_err:
+ kfree(layer_list);
+layer_list_err:
+ kfree(layer_list32);
+ kfree(output_layer);
+ return ret;
+}
+
+static int __copy_to_user_async_position_update(
+ struct mdp_position_update *update_pos,
+ struct mdp_position_update32 *update_pos32,
+ unsigned long argp, u32 layer_cnt)
+{
+ int ret;
+
+ ret = copy_to_user(update_pos32->input_layers,
+ update_pos->input_layers,
+ sizeof(struct mdp_async_layer) * layer_cnt);
+ if (ret)
+ goto end;
+
+ ret = copy_to_user((void __user *) argp, update_pos32,
+ sizeof(struct mdp_position_update32));
+
+end:
+ return ret;
+}
+
+static struct mdp_async_layer *__create_async_layer_list(
+ struct mdp_position_update32 *update_pos32, u32 layer_cnt)
+{
+ u32 buffer_size;
+ struct mdp_async_layer *layer_list;
+ int ret;
+
+ buffer_size = sizeof(struct mdp_async_layer) * layer_cnt;
+
+ layer_list = kmalloc(buffer_size, GFP_KERNEL);
+ if (!layer_list) {
+ layer_list = ERR_PTR(-ENOMEM);
+ goto end;
+ }
+
+ ret = copy_from_user(layer_list,
+ update_pos32->input_layers, buffer_size);
+ if (ret) {
+ pr_err("layer list32 copy from user failed\n");
+ kfree(layer_list);
+ layer_list = ERR_PTR(ret);
+ }
+
+end:
+ return layer_list;
+}
+
+static int __compat_async_position_update(struct fb_info *info,
+ unsigned int cmd, unsigned long argp)
+{
+ struct mdp_position_update update_pos;
+ struct mdp_position_update32 update_pos32;
+ struct mdp_async_layer *layer_list = NULL;
+ u32 layer_cnt, ret;
+
+ /* copy top level memory from 32 bit structure to kernel memory */
+ ret = copy_from_user(&update_pos32, (void __user *)argp,
+ sizeof(struct mdp_position_update32));
+ if (ret) {
+ pr_err("%s:copy_from_user failed\n", __func__);
+ return ret;
+ }
+
+ update_pos.input_layer_cnt = update_pos32.input_layer_cnt;
+ layer_cnt = update_pos32.input_layer_cnt;
+ if ((!layer_cnt) || (layer_cnt > MAX_LAYER_COUNT)) {
+ pr_err("invalid async layers :%d to update\n", layer_cnt);
+ return -EINVAL;
+ }
+
+ layer_list = __create_async_layer_list(&update_pos32,
+ layer_cnt);
+ if (IS_ERR_OR_NULL(layer_list))
+ return PTR_ERR(layer_list);
+
+ update_pos.input_layers = layer_list;
+
+ ret = mdss_fb_async_position_update(info, &update_pos);
+ if (ret)
+ pr_err("async position update failed ret:%d\n", ret);
+
+ ret = __copy_to_user_async_position_update(&update_pos, &update_pos32,
+ argp, layer_cnt);
+ if (ret)
+ pr_err("copy to user of async update position failed\n");
+
+ kfree(layer_list);
+ return ret;
+}
+
+static int mdss_fb_compat_buf_sync(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file)
+{
+ struct mdp_buf_sync32 __user *buf_sync32;
+ struct mdp_buf_sync __user *buf_sync;
+ u32 data;
+ int ret;
+
+ buf_sync = compat_alloc_user_space(sizeof(*buf_sync));
+ if (!buf_sync) {
+ pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+ __func__, __LINE__, sizeof(*buf_sync));
+ return -EINVAL;
+ }
+ buf_sync32 = compat_ptr(arg);
+
+ if (copy_in_user(&buf_sync->flags, &buf_sync32->flags,
+ 3 * sizeof(u32)))
+ return -EFAULT;
+
+ if (get_user(data, &buf_sync32->acq_fen_fd) ||
+ put_user(compat_ptr(data), &buf_sync->acq_fen_fd) ||
+ get_user(data, &buf_sync32->rel_fen_fd) ||
+ put_user(compat_ptr(data), &buf_sync->rel_fen_fd) ||
+ get_user(data, &buf_sync32->retire_fen_fd) ||
+ put_user(compat_ptr(data), &buf_sync->retire_fen_fd))
+ return -EFAULT;
+
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) buf_sync, file);
+ if (ret) {
+ pr_err("%s: failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ if (copy_in_user(compat_ptr(buf_sync32->rel_fen_fd),
+ buf_sync->rel_fen_fd,
+ sizeof(int)))
+ return -EFAULT;
+ if (copy_in_user(compat_ptr(buf_sync32->retire_fen_fd),
+ buf_sync->retire_fen_fd,
+ sizeof(int))) {
+ if (buf_sync->flags & MDP_BUF_SYNC_FLAG_RETIRE_FENCE)
+ return -EFAULT;
+ pr_debug("%s: no retire fence fd for wb\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+static int __from_user_fb_cmap(struct fb_cmap __user *cmap,
+ struct fb_cmap32 __user *cmap32)
+{
+ __u32 data;
+
+ if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
+ return -EFAULT;
+
+ if (get_user(data, &cmap32->red) ||
+ put_user(compat_ptr(data), &cmap->red) ||
+ get_user(data, &cmap32->green) ||
+ put_user(compat_ptr(data), &cmap->green) ||
+ get_user(data, &cmap32->blue) ||
+ put_user(compat_ptr(data), &cmap->blue) ||
+ get_user(data, &cmap32->transp) ||
+ put_user(compat_ptr(data), &cmap->transp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_fb_cmap(struct fb_cmap __user *cmap,
+ struct fb_cmap32 __user *cmap32)
+{
+ unsigned long data;
+
+ if (copy_in_user(&cmap32->start, &cmap->start, 2 * sizeof(__u32)))
+ return -EFAULT;
+
+ if (get_user(data, (unsigned long *) &cmap->red) ||
+ put_user((compat_caddr_t) data, &cmap32->red) ||
+ get_user(data, (unsigned long *) &cmap->green) ||
+ put_user((compat_caddr_t) data, &cmap32->green) ||
+ get_user(data, (unsigned long *) &cmap->blue) ||
+ put_user((compat_caddr_t) data, &cmap32->blue) ||
+ get_user(data, (unsigned long *) &cmap->transp) ||
+ put_user((compat_caddr_t) data, &cmap32->transp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_fb_image(struct fb_image __user *image,
+ struct fb_image32 __user *image32)
+{
+ __u32 data;
+
+ if (copy_in_user(&image->dx, &image32->dx, 6 * sizeof(u32)) ||
+ copy_in_user(&image->depth, &image32->depth, sizeof(u8)))
+ return -EFAULT;
+
+ if (get_user(data, &image32->data) ||
+ put_user(compat_ptr(data), &image->data))
+ return -EFAULT;
+
+ if (__from_user_fb_cmap(&image->cmap, &image32->cmap))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mdss_fb_compat_cursor(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file)
+{
+ struct fb_cursor32 __user *cursor32;
+ struct fb_cursor __user *cursor;
+ __u32 data;
+ int ret;
+
+ cursor = compat_alloc_user_space(sizeof(*cursor));
+ if (!cursor) {
+ pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+ __func__, __LINE__, sizeof(*cursor));
+ return -EINVAL;
+ }
+ cursor32 = compat_ptr(arg);
+
+ if (copy_in_user(&cursor->set, &cursor32->set, 3 * sizeof(u16)))
+ return -EFAULT;
+
+ if (get_user(data, &cursor32->mask) ||
+ put_user(compat_ptr(data), &cursor->mask))
+ return -EFAULT;
+
+ if (copy_in_user(&cursor->hot, &cursor32->hot, sizeof(struct fbcurpos)))
+ return -EFAULT;
+
+ if (__from_user_fb_image(&cursor->image, &cursor32->image))
+ return -EFAULT;
+
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) cursor, file);
+ return ret;
+}
+
+static int mdss_fb_compat_set_lut(struct fb_info *info, unsigned long arg,
+ struct file *file)
+{
+ struct fb_cmap_user __user *cmap;
+ struct fb_cmap32 __user *cmap32;
+ __u32 data;
+ int ret;
+
+ cmap = compat_alloc_user_space(sizeof(*cmap));
+ cmap32 = compat_ptr(arg);
+
+ if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
+ return -EFAULT;
+
+ if (get_user(data, &cmap32->red) ||
+ put_user(compat_ptr(data), &cmap->red) ||
+ get_user(data, &cmap32->green) ||
+ put_user(compat_ptr(data), &cmap->green) ||
+ get_user(data, &cmap32->blue) ||
+ put_user(compat_ptr(data), &cmap->blue) ||
+ get_user(data, &cmap32->transp) ||
+ put_user(compat_ptr(data), &cmap->transp))
+ return -EFAULT;
+
+ ret = mdss_fb_do_ioctl(info, MSMFB_SET_LUT, (unsigned long) cmap, file);
+ if (!ret)
+ pr_debug("%s: compat ioctl successful\n", __func__);
+
+ return ret;
+}
+
+static int __from_user_sharp_cfg(
+ struct mdp_sharp_cfg32 __user *sharp_cfg32,
+ struct mdp_sharp_cfg __user *sharp_cfg)
+{
+ if (copy_in_user(&sharp_cfg->flags,
+ &sharp_cfg32->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&sharp_cfg->strength,
+ &sharp_cfg32->strength,
+ sizeof(uint32_t)) ||
+ copy_in_user(&sharp_cfg->edge_thr,
+ &sharp_cfg32->edge_thr,
+ sizeof(uint32_t)) ||
+ copy_in_user(&sharp_cfg->smooth_thr,
+ &sharp_cfg32->smooth_thr,
+ sizeof(uint32_t)) ||
+ copy_in_user(&sharp_cfg->noise_thr,
+ &sharp_cfg32->noise_thr,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_sharp_cfg(
+ struct mdp_sharp_cfg32 __user *sharp_cfg32,
+ struct mdp_sharp_cfg __user *sharp_cfg)
+{
+ if (copy_in_user(&sharp_cfg32->flags,
+ &sharp_cfg->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&sharp_cfg32->strength,
+ &sharp_cfg->strength,
+ sizeof(uint32_t)) ||
+ copy_in_user(&sharp_cfg32->edge_thr,
+ &sharp_cfg->edge_thr,
+ sizeof(uint32_t)) ||
+ copy_in_user(&sharp_cfg32->smooth_thr,
+ &sharp_cfg->smooth_thr,
+ sizeof(uint32_t)) ||
+ copy_in_user(&sharp_cfg32->noise_thr,
+ &sharp_cfg->noise_thr,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_histogram_cfg(
+ struct mdp_histogram_cfg32 __user *hist_cfg32,
+ struct mdp_histogram_cfg __user *hist_cfg)
+{
+ if (copy_in_user(&hist_cfg->ops,
+ &hist_cfg32->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_cfg->block,
+ &hist_cfg32->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_cfg->frame_cnt,
+ &hist_cfg32->frame_cnt,
+ sizeof(uint8_t)) ||
+ copy_in_user(&hist_cfg->bit_mask,
+ &hist_cfg32->bit_mask,
+ sizeof(uint8_t)) ||
+ copy_in_user(&hist_cfg->num_bins,
+ &hist_cfg32->num_bins,
+ sizeof(uint16_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_histogram_cfg(
+ struct mdp_histogram_cfg32 __user *hist_cfg32,
+ struct mdp_histogram_cfg __user *hist_cfg)
+{
+ if (copy_in_user(&hist_cfg32->ops,
+ &hist_cfg->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_cfg32->block,
+ &hist_cfg->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_cfg32->frame_cnt,
+ &hist_cfg->frame_cnt,
+ sizeof(uint8_t)) ||
+ copy_in_user(&hist_cfg32->bit_mask,
+ &hist_cfg->bit_mask,
+ sizeof(uint8_t)) ||
+ copy_in_user(&hist_cfg32->num_bins,
+ &hist_cfg->num_bins,
+ sizeof(uint16_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_pcc_coeff(
+ struct mdp_pcc_coeff32 __user *pcc_coeff32,
+ struct mdp_pcc_coeff __user *pcc_coeff)
+{
+ if (copy_in_user(&pcc_coeff->c,
+ &pcc_coeff32->c,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->r,
+ &pcc_coeff32->r,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->g,
+ &pcc_coeff32->g,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->b,
+ &pcc_coeff32->b,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->rr,
+ &pcc_coeff32->rr,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->gg,
+ &pcc_coeff32->gg,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->bb,
+ &pcc_coeff32->bb,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->rg,
+ &pcc_coeff32->rg,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->gb,
+ &pcc_coeff32->gb,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->rb,
+ &pcc_coeff32->rb,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->rgb_0,
+ &pcc_coeff32->rgb_0,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff->rgb_1,
+ &pcc_coeff32->rgb_1,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_pcc_coeff(
+ struct mdp_pcc_coeff32 __user *pcc_coeff32,
+ struct mdp_pcc_coeff __user *pcc_coeff)
+{
+ if (copy_in_user(&pcc_coeff32->c,
+ &pcc_coeff->c,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->r,
+ &pcc_coeff->r,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->g,
+ &pcc_coeff->g,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->b,
+ &pcc_coeff->b,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->rr,
+ &pcc_coeff->rr,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->gg,
+ &pcc_coeff->gg,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->bb,
+ &pcc_coeff->bb,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->rg,
+ &pcc_coeff->rg,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->gb,
+ &pcc_coeff->gb,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->rb,
+ &pcc_coeff->rb,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->rgb_0,
+ &pcc_coeff->rgb_0,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_coeff32->rgb_1,
+ &pcc_coeff->rgb_1,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_pcc_coeff_v17(
+ struct mdp_pcc_cfg_data32 __user *pcc_cfg32,
+ struct mdp_pcc_cfg_data __user *pcc_cfg)
+{
+ struct mdp_pcc_data_v1_7_32 pcc_cfg_payload32;
+ struct mdp_pcc_data_v1_7 pcc_cfg_payload;
+
+ if (copy_from_user(&pcc_cfg_payload32,
+ compat_ptr(pcc_cfg32->cfg_payload),
+ sizeof(struct mdp_pcc_data_v1_7_32))) {
+ pr_err("failed to copy payload for pcc from user\n");
+ return -EFAULT;
+ }
+
+ memset(&pcc_cfg_payload, 0, sizeof(pcc_cfg_payload));
+ pcc_cfg_payload.r.b = pcc_cfg_payload32.r.b;
+ pcc_cfg_payload.r.g = pcc_cfg_payload32.r.g;
+ pcc_cfg_payload.r.c = pcc_cfg_payload32.r.c;
+ pcc_cfg_payload.r.r = pcc_cfg_payload32.r.r;
+ pcc_cfg_payload.r.gb = pcc_cfg_payload32.r.gb;
+ pcc_cfg_payload.r.rb = pcc_cfg_payload32.r.rb;
+ pcc_cfg_payload.r.rg = pcc_cfg_payload32.r.rg;
+ pcc_cfg_payload.r.rgb = pcc_cfg_payload32.r.rgb;
+
+ pcc_cfg_payload.g.b = pcc_cfg_payload32.g.b;
+ pcc_cfg_payload.g.g = pcc_cfg_payload32.g.g;
+ pcc_cfg_payload.g.c = pcc_cfg_payload32.g.c;
+ pcc_cfg_payload.g.r = pcc_cfg_payload32.g.r;
+ pcc_cfg_payload.g.gb = pcc_cfg_payload32.g.gb;
+ pcc_cfg_payload.g.rb = pcc_cfg_payload32.g.rb;
+ pcc_cfg_payload.g.rg = pcc_cfg_payload32.g.rg;
+ pcc_cfg_payload.g.rgb = pcc_cfg_payload32.g.rgb;
+
+ pcc_cfg_payload.b.b = pcc_cfg_payload32.b.b;
+ pcc_cfg_payload.b.g = pcc_cfg_payload32.b.g;
+ pcc_cfg_payload.b.c = pcc_cfg_payload32.b.c;
+ pcc_cfg_payload.b.r = pcc_cfg_payload32.b.r;
+ pcc_cfg_payload.b.gb = pcc_cfg_payload32.b.gb;
+ pcc_cfg_payload.b.rb = pcc_cfg_payload32.b.rb;
+ pcc_cfg_payload.b.rg = pcc_cfg_payload32.b.rg;
+ pcc_cfg_payload.b.rgb = pcc_cfg_payload32.b.rgb;
+
+ if (copy_to_user(pcc_cfg->cfg_payload, &pcc_cfg_payload,
+ sizeof(pcc_cfg_payload))) {
+ pr_err("failed to copy payload for pcc to user\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int __from_user_pcc_cfg_data(
+ struct mdp_pcc_cfg_data32 __user *pcc_cfg32,
+ struct mdp_pcc_cfg_data __user *pcc_cfg)
+{
+ u32 version;
+
+ if (copy_in_user(&pcc_cfg->block,
+ &pcc_cfg32->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_cfg->ops,
+ &pcc_cfg32->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pcc_cfg->version,
+ &pcc_cfg32->version,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (copy_from_user(&version, &pcc_cfg32->version, sizeof(u32))) {
+ pr_err("failed to copy version for pcc\n");
+ return -EFAULT;
+ }
+
+ switch (version) {
+ case mdp_pcc_v1_7:
+ if (__from_user_pcc_coeff_v17(pcc_cfg32, pcc_cfg)) {
+ pr_err("failed to copy pcc v17 data\n");
+ return -EFAULT;
+ }
+ break;
+ default:
+ pr_debug("pcc version %d not supported use legacy\n", version);
+ if (__from_user_pcc_coeff(
+ compat_ptr((uintptr_t)&pcc_cfg32->r),
+ &pcc_cfg->r) ||
+ __from_user_pcc_coeff(
+ compat_ptr((uintptr_t)&pcc_cfg32->g),
+ &pcc_cfg->g) ||
+ __from_user_pcc_coeff(
+ compat_ptr((uintptr_t)&pcc_cfg32->b),
+ &pcc_cfg->b))
+ return -EFAULT;
+ break;
+ }
+ return 0;
+}
+
+static int __to_user_pcc_coeff_v1_7(
+ struct mdp_pcc_cfg_data32 __user *pcc_cfg32,
+ struct mdp_pcc_cfg_data __user *pcc_cfg)
+{
+ struct mdp_pcc_data_v1_7_32 pcc_cfg_payload32;
+ struct mdp_pcc_data_v1_7 pcc_cfg_payload;
+
+ memset(&pcc_cfg_payload32, 0, sizeof(pcc_cfg_payload32));
+ if (copy_from_user(&pcc_cfg_payload,
+ pcc_cfg->cfg_payload,
+ sizeof(struct mdp_pcc_data_v1_7))) {
+ pr_err("failed to copy payload for pcc from user\n");
+ return -EFAULT;
+ }
+
+ pcc_cfg_payload32.r.b = pcc_cfg_payload.r.b;
+ pcc_cfg_payload32.r.g = pcc_cfg_payload.r.g;
+ pcc_cfg_payload32.r.c = pcc_cfg_payload.r.c;
+ pcc_cfg_payload32.r.r = pcc_cfg_payload.r.r;
+ pcc_cfg_payload32.r.gb = pcc_cfg_payload.r.gb;
+ pcc_cfg_payload32.r.rb = pcc_cfg_payload.r.rb;
+ pcc_cfg_payload32.r.rg = pcc_cfg_payload.r.rg;
+ pcc_cfg_payload32.r.rgb = pcc_cfg_payload.r.rgb;
+
+ pcc_cfg_payload32.g.b = pcc_cfg_payload.g.b;
+ pcc_cfg_payload32.g.g = pcc_cfg_payload.g.g;
+ pcc_cfg_payload32.g.c = pcc_cfg_payload.g.c;
+ pcc_cfg_payload32.g.r = pcc_cfg_payload.g.r;
+ pcc_cfg_payload32.g.gb = pcc_cfg_payload.g.gb;
+ pcc_cfg_payload32.g.rb = pcc_cfg_payload.g.rb;
+ pcc_cfg_payload32.g.rg = pcc_cfg_payload.g.rg;
+ pcc_cfg_payload32.g.rgb = pcc_cfg_payload.g.rgb;
+
+ pcc_cfg_payload32.b.b = pcc_cfg_payload.b.b;
+ pcc_cfg_payload32.b.g = pcc_cfg_payload.b.g;
+ pcc_cfg_payload32.b.c = pcc_cfg_payload.b.c;
+ pcc_cfg_payload32.b.r = pcc_cfg_payload.b.r;
+ pcc_cfg_payload32.b.gb = pcc_cfg_payload.b.gb;
+ pcc_cfg_payload32.b.rb = pcc_cfg_payload.b.rb;
+ pcc_cfg_payload32.b.rg = pcc_cfg_payload.b.rg;
+ pcc_cfg_payload32.b.rgb = pcc_cfg_payload.b.rgb;
+
+ if (copy_to_user(compat_ptr(pcc_cfg32->cfg_payload),
+ &pcc_cfg_payload32,
+ sizeof(pcc_cfg_payload32))) {
+ pr_err("failed to copy payload for pcc to user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+
+static int __to_user_pcc_cfg_data(
+ struct mdp_pcc_cfg_data32 __user *pcc_cfg32,
+ struct mdp_pcc_cfg_data __user *pcc_cfg)
+{
+ u32 version;
+ u32 ops;
+
+ if (copy_from_user(&ops, &pcc_cfg->ops, sizeof(u32))) {
+ pr_err("failed to copy op for pcc\n");
+ return -EFAULT;
+ }
+
+ if (!(ops & MDP_PP_OPS_READ)) {
+ pr_debug("Read op is not set. Skipping compat copyback\n");
+ return 0;
+ }
+
+ if (copy_from_user(&version, &pcc_cfg->version, sizeof(u32))) {
+ pr_err("failed to copy version for pcc\n");
+ return -EFAULT;
+ }
+
+ switch (version) {
+ case mdp_pcc_v1_7:
+ if (__to_user_pcc_coeff_v1_7(pcc_cfg32, pcc_cfg)) {
+ pr_err("failed to copy pcc v1_7 data\n");
+ return -EFAULT;
+ }
+ break;
+ default:
+ pr_debug("version invalid, fallback to legacy\n");
+
+ if (__to_user_pcc_coeff(
+ compat_ptr((uintptr_t)&pcc_cfg32->r),
+ &pcc_cfg->r) ||
+ __to_user_pcc_coeff(
+ compat_ptr((uintptr_t)&pcc_cfg32->g),
+ &pcc_cfg->g) ||
+ __to_user_pcc_coeff(
+ compat_ptr((uintptr_t)&pcc_cfg32->b),
+ &pcc_cfg->b))
+ return -EFAULT;
+ break;
+ }
+
+ return 0;
+}
+
+static int __from_user_csc_cfg(
+ struct mdp_csc_cfg32 __user *csc_data32,
+ struct mdp_csc_cfg __user *csc_data)
+{
+ if (copy_in_user(&csc_data->flags,
+ &csc_data32->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&csc_data->csc_mv[0],
+ &csc_data32->csc_mv[0],
+ 9 * sizeof(uint32_t)) ||
+ copy_in_user(&csc_data->csc_pre_bv[0],
+ &csc_data32->csc_pre_bv[0],
+ 3 * sizeof(uint32_t)) ||
+ copy_in_user(&csc_data->csc_post_bv[0],
+ &csc_data32->csc_post_bv[0],
+ 3 * sizeof(uint32_t)) ||
+ copy_in_user(&csc_data->csc_pre_lv[0],
+ &csc_data32->csc_pre_lv[0],
+ 6 * sizeof(uint32_t)) ||
+ copy_in_user(&csc_data->csc_post_lv[0],
+ &csc_data32->csc_post_lv[0],
+ 6 * sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+static int __to_user_csc_cfg(
+ struct mdp_csc_cfg32 __user *csc_data32,
+ struct mdp_csc_cfg __user *csc_data)
+{
+ if (copy_in_user(&csc_data32->flags,
+ &csc_data->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&csc_data32->csc_mv[0],
+ &csc_data->csc_mv[0],
+ 9 * sizeof(uint32_t)) ||
+ copy_in_user(&csc_data32->csc_pre_bv[0],
+ &csc_data->csc_pre_bv[0],
+ 3 * sizeof(uint32_t)) ||
+ copy_in_user(&csc_data32->csc_post_bv[0],
+ &csc_data->csc_post_bv[0],
+ 3 * sizeof(uint32_t)) ||
+ copy_in_user(&csc_data32->csc_pre_lv[0],
+ &csc_data->csc_pre_lv[0],
+ 6 * sizeof(uint32_t)) ||
+ copy_in_user(&csc_data32->csc_post_lv[0],
+ &csc_data->csc_post_lv[0],
+ 6 * sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_csc_cfg_data(
+ struct mdp_csc_cfg_data32 __user *csc_cfg32,
+ struct mdp_csc_cfg_data __user *csc_cfg)
+{
+ if (copy_in_user(&csc_cfg->block,
+ &csc_cfg32->block,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (__from_user_csc_cfg(
+ compat_ptr((uintptr_t)&csc_cfg32->csc_data),
+ &csc_cfg->csc_data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_csc_cfg_data(
+ struct mdp_csc_cfg_data32 __user *csc_cfg32,
+ struct mdp_csc_cfg_data __user *csc_cfg)
+{
+ if (copy_in_user(&csc_cfg32->block,
+ &csc_cfg->block,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (__to_user_csc_cfg(
+ compat_ptr((uintptr_t)&csc_cfg32->csc_data),
+ &csc_cfg->csc_data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_igc_lut_data_v17(
+ struct mdp_igc_lut_data32 __user *igc_lut32,
+ struct mdp_igc_lut_data __user *igc_lut)
+{
+ struct mdp_igc_lut_data_v1_7_32 igc_cfg_payload_32;
+ struct mdp_igc_lut_data_v1_7 igc_cfg_payload;
+
+ if (copy_from_user(&igc_cfg_payload_32,
+ compat_ptr(igc_lut32->cfg_payload),
+ sizeof(igc_cfg_payload_32))) {
+ pr_err("failed to copy payload from user for igc\n");
+ return -EFAULT;
+ }
+
+ memset(&igc_cfg_payload, 0, sizeof(igc_cfg_payload));
+ igc_cfg_payload.c0_c1_data = compat_ptr(igc_cfg_payload_32.c0_c1_data);
+ igc_cfg_payload.c2_data = compat_ptr(igc_cfg_payload_32.c2_data);
+ igc_cfg_payload.len = igc_cfg_payload_32.len;
+ igc_cfg_payload.table_fmt = igc_cfg_payload_32.table_fmt;
+ if (copy_to_user(igc_lut->cfg_payload, &igc_cfg_payload,
+ sizeof(igc_cfg_payload))) {
+ pr_err("failed to copy payload to user for igc\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int __from_user_igc_lut_data(
+ struct mdp_igc_lut_data32 __user *igc_lut32,
+ struct mdp_igc_lut_data __user *igc_lut)
+{
+ uint32_t data;
+ uint32_t version = mdp_igc_vmax;
+ int ret = 0;
+
+ if (copy_in_user(&igc_lut->block,
+ &igc_lut32->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&igc_lut->len,
+ &igc_lut32->len,
+ sizeof(uint32_t)) ||
+ copy_in_user(&igc_lut->ops,
+ &igc_lut32->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&igc_lut->version,
+ &igc_lut32->version,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(version, &igc_lut32->version)) {
+ pr_err("failed to copy the version for IGC\n");
+ return -EFAULT;
+ }
+
+ switch (version) {
+ case mdp_igc_v1_7:
+ ret = __from_user_igc_lut_data_v17(igc_lut32, igc_lut);
+ if (ret)
+ pr_err("failed to copy payload for igc version %d ret %d\n",
+ version, ret);
+ break;
+ default:
+ pr_debug("version not supported fallback to legacy %d\n",
+ version);
+ if (get_user(data, &igc_lut32->c0_c1_data) ||
+ put_user(compat_ptr(data), &igc_lut->c0_c1_data) ||
+ get_user(data, &igc_lut32->c2_data) ||
+ put_user(compat_ptr(data), &igc_lut->c2_data))
+ return -EFAULT;
+ break;
+ }
+ return ret;
+}
+
+static int __to_user_igc_lut_data(
+ struct mdp_igc_lut_data32 __user *igc_lut32,
+ struct mdp_igc_lut_data __user *igc_lut)
+{
+ unsigned long data;
+
+ if (copy_in_user(&igc_lut32->block,
+ &igc_lut->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&igc_lut32->len,
+ &igc_lut->len,
+ sizeof(uint32_t)) ||
+ copy_in_user(&igc_lut32->ops,
+ &igc_lut->ops,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(data, (unsigned long *) &igc_lut->c0_c1_data) ||
+ put_user((compat_caddr_t) data, &igc_lut32->c0_c1_data) ||
+ get_user(data, (unsigned long *) &igc_lut->c2_data) ||
+ put_user((compat_caddr_t) data, &igc_lut32->c2_data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_ar_gc_lut_data(
+ struct mdp_ar_gc_lut_data32 __user *ar_gc_data32,
+ struct mdp_ar_gc_lut_data __user *ar_gc_data)
+{
+ if (copy_in_user(&ar_gc_data->x_start,
+ &ar_gc_data32->x_start,
+ sizeof(uint32_t)) ||
+ copy_in_user(&ar_gc_data->slope,
+ &ar_gc_data32->slope,
+ sizeof(uint32_t)) ||
+ copy_in_user(&ar_gc_data->offset,
+ &ar_gc_data32->offset,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_ar_gc_lut_data(
+ struct mdp_ar_gc_lut_data32 __user *ar_gc_data32,
+ struct mdp_ar_gc_lut_data __user *ar_gc_data)
+{
+ if (copy_in_user(&ar_gc_data32->x_start,
+ &ar_gc_data->x_start,
+ sizeof(uint32_t)) ||
+ copy_in_user(&ar_gc_data32->slope,
+ &ar_gc_data->slope,
+ sizeof(uint32_t)) ||
+ copy_in_user(&ar_gc_data32->offset,
+ &ar_gc_data->offset,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+
+static int __from_user_pgc_lut_data_v1_7(
+ struct mdp_pgc_lut_data32 __user *pgc_lut32,
+ struct mdp_pgc_lut_data __user *pgc_lut)
+{
+ struct mdp_pgc_lut_data_v1_7_32 pgc_cfg_payload_32;
+ struct mdp_pgc_lut_data_v1_7 pgc_cfg_payload;
+
+ if (copy_from_user(&pgc_cfg_payload_32,
+ compat_ptr(pgc_lut32->cfg_payload),
+ sizeof(pgc_cfg_payload_32))) {
+ pr_err("failed to copy from user the pgc32 payload\n");
+ return -EFAULT;
+ }
+ memset(&pgc_cfg_payload, 0, sizeof(pgc_cfg_payload));
+ pgc_cfg_payload.c0_data = compat_ptr(pgc_cfg_payload_32.c0_data);
+ pgc_cfg_payload.c1_data = compat_ptr(pgc_cfg_payload_32.c1_data);
+ pgc_cfg_payload.c2_data = compat_ptr(pgc_cfg_payload_32.c2_data);
+ pgc_cfg_payload.len = pgc_cfg_payload_32.len;
+ if (copy_to_user(pgc_lut->cfg_payload, &pgc_cfg_payload,
+ sizeof(pgc_cfg_payload))) {
+ pr_err("failed to copy to user pgc payload\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int __from_user_pgc_lut_data_legacy(
+ struct mdp_pgc_lut_data32 __user *pgc_lut32,
+ struct mdp_pgc_lut_data __user *pgc_lut)
+{
+ struct mdp_ar_gc_lut_data32 __user *r_data_temp32;
+ struct mdp_ar_gc_lut_data32 __user *g_data_temp32;
+ struct mdp_ar_gc_lut_data32 __user *b_data_temp32;
+ struct mdp_ar_gc_lut_data __user *r_data_temp;
+ struct mdp_ar_gc_lut_data __user *g_data_temp;
+ struct mdp_ar_gc_lut_data __user *b_data_temp;
+ uint8_t num_r_stages, num_g_stages, num_b_stages;
+ int i;
+
+ if (copy_from_user(&num_r_stages,
+ &pgc_lut32->num_r_stages,
+ sizeof(uint8_t)) ||
+ copy_from_user(&num_g_stages,
+ &pgc_lut32->num_g_stages,
+ sizeof(uint8_t)) ||
+ copy_from_user(&num_b_stages,
+ &pgc_lut32->num_b_stages,
+ sizeof(uint8_t)))
+ return -EFAULT;
+
+ if (num_r_stages > GC_LUT_SEGMENTS || num_b_stages > GC_LUT_SEGMENTS
+ || num_r_stages > GC_LUT_SEGMENTS || !num_r_stages || !num_b_stages
+ || !num_g_stages) {
+ pr_err("invalid number of stages r_stages %d b_stages %d g_stages %d\n",
+ num_r_stages, num_b_stages, num_r_stages);
+ return -EFAULT;
+ }
+
+ r_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->r_data);
+ r_data_temp = pgc_lut->r_data;
+
+ for (i = 0; i < num_r_stages; i++) {
+ if (__from_user_ar_gc_lut_data(
+ &r_data_temp32[i],
+ &r_data_temp[i]))
+ return -EFAULT;
+ }
+
+ g_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->g_data);
+ g_data_temp = pgc_lut->g_data;
+
+ for (i = 0; i < num_g_stages; i++) {
+ if (__from_user_ar_gc_lut_data(
+ &g_data_temp32[i],
+ &g_data_temp[i]))
+ return -EFAULT;
+ }
+
+ b_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->b_data);
+ b_data_temp = pgc_lut->b_data;
+
+ for (i = 0; i < num_b_stages; i++) {
+ if (__from_user_ar_gc_lut_data(
+ &b_data_temp32[i],
+ &b_data_temp[i]))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int __from_user_pgc_lut_data(
+ struct mdp_pgc_lut_data32 __user *pgc_lut32,
+ struct mdp_pgc_lut_data __user *pgc_lut)
+{
+ u32 version = mdp_pgc_vmax;
+ int ret = 0;
+
+ if (copy_in_user(&pgc_lut->block,
+ &pgc_lut32->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pgc_lut->flags,
+ &pgc_lut32->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pgc_lut->num_r_stages,
+ &pgc_lut32->num_r_stages,
+ sizeof(uint8_t)) ||
+ copy_in_user(&pgc_lut->num_g_stages,
+ &pgc_lut32->num_g_stages,
+ sizeof(uint8_t)) ||
+ copy_in_user(&pgc_lut->num_b_stages,
+ &pgc_lut32->num_b_stages,
+ sizeof(uint8_t)) ||
+ copy_in_user(&pgc_lut->version,
+ &pgc_lut32->version,
+ sizeof(uint32_t)))
+ return -EFAULT;
+ if (copy_from_user(&version, &pgc_lut32->version, sizeof(u32))) {
+ pr_err("version copying failed\n");
+ return -EFAULT;
+ }
+ switch (version) {
+ case mdp_pgc_v1_7:
+ ret = __from_user_pgc_lut_data_v1_7(pgc_lut32, pgc_lut);
+ if (ret)
+ pr_err("failed to copy pgc v17\n");
+ break;
+ default:
+ pr_debug("version %d not supported fallback to legacy\n",
+ version);
+ ret = __from_user_pgc_lut_data_legacy(pgc_lut32, pgc_lut);
+ if (ret)
+ pr_err("copy from user pgc lut legacy failed ret %d\n",
+ ret);
+ break;
+ }
+ return ret;
+}
+
+static int __to_user_pgc_lut_data(
+ struct mdp_pgc_lut_data32 __user *pgc_lut32,
+ struct mdp_pgc_lut_data __user *pgc_lut)
+{
+ struct mdp_ar_gc_lut_data32 __user *r_data_temp32;
+ struct mdp_ar_gc_lut_data32 __user *g_data_temp32;
+ struct mdp_ar_gc_lut_data32 __user *b_data_temp32;
+ struct mdp_ar_gc_lut_data __user *r_data_temp;
+ struct mdp_ar_gc_lut_data __user *g_data_temp;
+ struct mdp_ar_gc_lut_data __user *b_data_temp;
+ uint8_t num_r_stages, num_g_stages, num_b_stages;
+ int i;
+
+ if (copy_in_user(&pgc_lut32->block,
+ &pgc_lut->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pgc_lut32->flags,
+ &pgc_lut->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pgc_lut32->num_r_stages,
+ &pgc_lut->num_r_stages,
+ sizeof(uint8_t)) ||
+ copy_in_user(&pgc_lut32->num_g_stages,
+ &pgc_lut->num_g_stages,
+ sizeof(uint8_t)) ||
+ copy_in_user(&pgc_lut32->num_b_stages,
+ &pgc_lut->num_b_stages,
+ sizeof(uint8_t)))
+ return -EFAULT;
+
+ if (copy_from_user(&num_r_stages,
+ &pgc_lut->num_r_stages,
+ sizeof(uint8_t)) ||
+ copy_from_user(&num_g_stages,
+ &pgc_lut->num_g_stages,
+ sizeof(uint8_t)) ||
+ copy_from_user(&num_b_stages,
+ &pgc_lut->num_b_stages,
+ sizeof(uint8_t)))
+ return -EFAULT;
+
+ r_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->r_data);
+ r_data_temp = pgc_lut->r_data;
+ for (i = 0; i < num_r_stages; i++) {
+ if (__to_user_ar_gc_lut_data(
+ &r_data_temp32[i],
+ &r_data_temp[i]))
+ return -EFAULT;
+ }
+
+ g_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->g_data);
+ g_data_temp = pgc_lut->g_data;
+ for (i = 0; i < num_g_stages; i++) {
+ if (__to_user_ar_gc_lut_data(
+ &g_data_temp32[i],
+ &g_data_temp[i]))
+ return -EFAULT;
+ }
+
+ b_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->b_data);
+ b_data_temp = pgc_lut->b_data;
+ for (i = 0; i < num_b_stages; i++) {
+ if (__to_user_ar_gc_lut_data(
+ &b_data_temp32[i],
+ &b_data_temp[i]))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int __from_user_hist_lut_data_v1_7(
+ struct mdp_hist_lut_data32 __user *hist_lut32,
+ struct mdp_hist_lut_data __user *hist_lut)
+{
+ struct mdp_hist_lut_data_v1_7_32 hist_lut_cfg_payload32;
+ struct mdp_hist_lut_data_v1_7 hist_lut_cfg_payload;
+
+ if (copy_from_user(&hist_lut_cfg_payload32,
+ compat_ptr(hist_lut32->cfg_payload),
+ sizeof(hist_lut_cfg_payload32))) {
+ pr_err("failed to copy the Hist Lut payload from userspace\n");
+ return -EFAULT;
+ }
+
+ memset(&hist_lut_cfg_payload, 0, sizeof(hist_lut_cfg_payload));
+ hist_lut_cfg_payload.len = hist_lut_cfg_payload32.len;
+ hist_lut_cfg_payload.data = compat_ptr(hist_lut_cfg_payload32.data);
+
+ if (copy_to_user(hist_lut->cfg_payload,
+ &hist_lut_cfg_payload,
+ sizeof(hist_lut_cfg_payload))) {
+ pr_err("Failed to copy to user hist lut cfg payload\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int __from_user_hist_lut_data(
+ struct mdp_hist_lut_data32 __user *hist_lut32,
+ struct mdp_hist_lut_data __user *hist_lut)
+{
+ uint32_t version = 0;
+ uint32_t data;
+
+ if (copy_in_user(&hist_lut->block,
+ &hist_lut32->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_lut->version,
+ &hist_lut32->version,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_lut->hist_lut_first,
+ &hist_lut32->hist_lut_first,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_lut->ops,
+ &hist_lut32->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_lut->len,
+ &hist_lut32->len,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (copy_from_user(&version,
+ &hist_lut32->version,
+ sizeof(uint32_t))) {
+ pr_err("failed to copy the version info\n");
+ return -EFAULT;
+ }
+
+ switch (version) {
+ case mdp_hist_lut_v1_7:
+ if (__from_user_hist_lut_data_v1_7(hist_lut32, hist_lut)) {
+ pr_err("failed to get hist lut data for version %d\n",
+ version);
+ return -EFAULT;
+ }
+ break;
+ default:
+ pr_debug("version invalid, fallback to legacy\n");
+ if (get_user(data, &hist_lut32->data) ||
+ put_user(compat_ptr(data), &hist_lut->data))
+ return -EFAULT;
+ break;
+ }
+
+ return 0;
+}
+
+static int __to_user_hist_lut_data(
+ struct mdp_hist_lut_data32 __user *hist_lut32,
+ struct mdp_hist_lut_data __user *hist_lut)
+{
+ unsigned long data;
+
+ if (copy_in_user(&hist_lut32->block,
+ &hist_lut->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_lut32->ops,
+ &hist_lut->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_lut32->len,
+ &hist_lut->len,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(data, (unsigned long *) &hist_lut->data) ||
+ put_user((compat_caddr_t) data, &hist_lut32->data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_rgb_lut_data(
+ struct mdp_rgb_lut_data32 __user *rgb_lut32,
+ struct mdp_rgb_lut_data __user *rgb_lut)
+{
+ if (copy_in_user(&rgb_lut->flags, &rgb_lut32->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&rgb_lut->lut_type, &rgb_lut32->lut_type,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return __from_user_fb_cmap(&rgb_lut->cmap, &rgb_lut32->cmap);
+}
+
+static int __to_user_rgb_lut_data(
+ struct mdp_rgb_lut_data32 __user *rgb_lut32,
+ struct mdp_rgb_lut_data __user *rgb_lut)
+{
+ if (copy_in_user(&rgb_lut32->flags, &rgb_lut->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&rgb_lut32->lut_type, &rgb_lut->lut_type,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return __to_user_fb_cmap(&rgb_lut->cmap, &rgb_lut32->cmap);
+}
+
+static int __from_user_lut_cfg_data(
+ struct mdp_lut_cfg_data32 __user *lut_cfg32,
+ struct mdp_lut_cfg_data __user *lut_cfg)
+{
+ uint32_t lut_type;
+ int ret = 0;
+
+ if (copy_from_user(&lut_type, &lut_cfg32->lut_type,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (copy_in_user(&lut_cfg->lut_type,
+ &lut_cfg32->lut_type,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ switch (lut_type) {
+ case mdp_lut_igc:
+ ret = __from_user_igc_lut_data(
+ compat_ptr((uintptr_t)&lut_cfg32->data.igc_lut_data),
+ &lut_cfg->data.igc_lut_data);
+ break;
+ case mdp_lut_pgc:
+ ret = __from_user_pgc_lut_data(
+ compat_ptr((uintptr_t)&lut_cfg32->data.pgc_lut_data),
+ &lut_cfg->data.pgc_lut_data);
+ break;
+ case mdp_lut_hist:
+ ret = __from_user_hist_lut_data(
+ compat_ptr((uintptr_t)&lut_cfg32->data.hist_lut_data),
+ &lut_cfg->data.hist_lut_data);
+ break;
+ case mdp_lut_rgb:
+ ret = __from_user_rgb_lut_data(
+ compat_ptr((uintptr_t)&lut_cfg32->data.rgb_lut_data),
+ &lut_cfg->data.rgb_lut_data);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int __to_user_lut_cfg_data(
+ struct mdp_lut_cfg_data32 __user *lut_cfg32,
+ struct mdp_lut_cfg_data __user *lut_cfg)
+{
+ uint32_t lut_type;
+ int ret = 0;
+
+ if (copy_from_user(&lut_type, &lut_cfg->lut_type,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (copy_in_user(&lut_cfg32->lut_type,
+ &lut_cfg->lut_type,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ switch (lut_type) {
+ case mdp_lut_igc:
+ ret = __to_user_igc_lut_data(
+ compat_ptr((uintptr_t)&lut_cfg32->data.igc_lut_data),
+ &lut_cfg->data.igc_lut_data);
+ break;
+ case mdp_lut_pgc:
+ ret = __to_user_pgc_lut_data(
+ compat_ptr((uintptr_t)&lut_cfg32->data.pgc_lut_data),
+ &lut_cfg->data.pgc_lut_data);
+ break;
+ case mdp_lut_hist:
+ ret = __to_user_hist_lut_data(
+ compat_ptr((uintptr_t)&lut_cfg32->data.hist_lut_data),
+ &lut_cfg->data.hist_lut_data);
+ break;
+ case mdp_lut_rgb:
+ ret = __to_user_rgb_lut_data(
+ compat_ptr((uintptr_t)&lut_cfg32->data.rgb_lut_data),
+ &lut_cfg->data.rgb_lut_data);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int __from_user_qseed_cfg(
+ struct mdp_qseed_cfg32 __user *qseed_data32,
+ struct mdp_qseed_cfg __user *qseed_data)
+{
+ uint32_t data;
+
+ if (copy_in_user(&qseed_data->table_num,
+ &qseed_data32->table_num,
+ sizeof(uint32_t)) ||
+ copy_in_user(&qseed_data->ops,
+ &qseed_data32->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&qseed_data->len,
+ &qseed_data32->len,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(data, &qseed_data32->data) ||
+ put_user(compat_ptr(data), &qseed_data->data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_qseed_cfg(
+ struct mdp_qseed_cfg32 __user *qseed_data32,
+ struct mdp_qseed_cfg __user *qseed_data)
+{
+ unsigned long data;
+
+ if (copy_in_user(&qseed_data32->table_num,
+ &qseed_data->table_num,
+ sizeof(uint32_t)) ||
+ copy_in_user(&qseed_data32->ops,
+ &qseed_data->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&qseed_data32->len,
+ &qseed_data->len,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(data, (unsigned long *) &qseed_data->data) ||
+ put_user((compat_caddr_t) data, &qseed_data32->data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_qseed_cfg_data(
+ struct mdp_qseed_cfg_data32 __user *qseed_cfg32,
+ struct mdp_qseed_cfg_data __user *qseed_cfg)
+{
+ if (copy_in_user(&qseed_cfg->block,
+ &qseed_cfg32->block,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (__from_user_qseed_cfg(
+ compat_ptr((uintptr_t)&qseed_cfg32->qseed_data),
+ &qseed_cfg->qseed_data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_qseed_cfg_data(
+ struct mdp_qseed_cfg_data32 __user *qseed_cfg32,
+ struct mdp_qseed_cfg_data __user *qseed_cfg)
+{
+ if (copy_in_user(&qseed_cfg32->block,
+ &qseed_cfg->block,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (__to_user_qseed_cfg(
+ compat_ptr((uintptr_t)&qseed_cfg32->qseed_data),
+ &qseed_cfg->qseed_data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_bl_scale_data(
+ struct mdp_bl_scale_data32 __user *bl_scale32,
+ struct mdp_bl_scale_data __user *bl_scale)
+{
+ if (copy_in_user(&bl_scale->min_lvl,
+ &bl_scale32->min_lvl,
+ sizeof(uint32_t)) ||
+ copy_in_user(&bl_scale->scale,
+ &bl_scale32->scale,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_pa_cfg(
+ struct mdp_pa_cfg32 __user *pa_data32,
+ struct mdp_pa_cfg __user *pa_data)
+{
+ if (copy_in_user(&pa_data->flags,
+ &pa_data32->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_data->hue_adj,
+ &pa_data32->hue_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_data->sat_adj,
+ &pa_data32->sat_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_data->val_adj,
+ &pa_data32->val_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_data->cont_adj,
+ &pa_data32->cont_adj,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_pa_cfg(
+ struct mdp_pa_cfg32 __user *pa_data32,
+ struct mdp_pa_cfg __user *pa_data)
+{
+ if (copy_in_user(&pa_data32->flags,
+ &pa_data->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_data32->hue_adj,
+ &pa_data->hue_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_data32->sat_adj,
+ &pa_data->sat_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_data32->val_adj,
+ &pa_data->val_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_data32->cont_adj,
+ &pa_data->cont_adj,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_pa_cfg_data(
+ struct mdp_pa_cfg_data32 __user *pa_cfg32,
+ struct mdp_pa_cfg_data __user *pa_cfg)
+{
+ if (copy_in_user(&pa_cfg->block,
+ &pa_cfg32->block,
+ sizeof(uint32_t)))
+ return -EFAULT;
+ if (__from_user_pa_cfg(
+ compat_ptr((uintptr_t)&pa_cfg32->pa_data),
+ &pa_cfg->pa_data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_pa_cfg_data(
+ struct mdp_pa_cfg_data32 __user *pa_cfg32,
+ struct mdp_pa_cfg_data __user *pa_cfg)
+{
+ if (copy_in_user(&pa_cfg32->block,
+ &pa_cfg->block,
+ sizeof(uint32_t)))
+ return -EFAULT;
+ if (__to_user_pa_cfg(
+ compat_ptr((uintptr_t)&pa_cfg32->pa_data),
+ &pa_cfg->pa_data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_mem_col_cfg(
+ struct mdp_pa_mem_col_cfg32 __user *mem_col_cfg32,
+ struct mdp_pa_mem_col_cfg __user *mem_col_cfg)
+{
+ if (copy_in_user(&mem_col_cfg->color_adjust_p0,
+ &mem_col_cfg32->color_adjust_p0,
+ sizeof(uint32_t)) ||
+ copy_in_user(&mem_col_cfg->color_adjust_p1,
+ &mem_col_cfg32->color_adjust_p1,
+ sizeof(uint32_t)) ||
+ copy_in_user(&mem_col_cfg->hue_region,
+ &mem_col_cfg32->hue_region,
+ sizeof(uint32_t)) ||
+ copy_in_user(&mem_col_cfg->sat_region,
+ &mem_col_cfg32->sat_region,
+ sizeof(uint32_t)) ||
+ copy_in_user(&mem_col_cfg->val_region,
+ &mem_col_cfg32->val_region,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_mem_col_cfg(
+ struct mdp_pa_mem_col_cfg32 __user *mem_col_cfg32,
+ struct mdp_pa_mem_col_cfg __user *mem_col_cfg)
+{
+ if (copy_in_user(&mem_col_cfg32->color_adjust_p0,
+ &mem_col_cfg->color_adjust_p0,
+ sizeof(uint32_t)) ||
+ copy_in_user(&mem_col_cfg32->color_adjust_p1,
+ &mem_col_cfg->color_adjust_p1,
+ sizeof(uint32_t)) ||
+ copy_in_user(&mem_col_cfg32->hue_region,
+ &mem_col_cfg->hue_region,
+ sizeof(uint32_t)) ||
+ copy_in_user(&mem_col_cfg32->sat_region,
+ &mem_col_cfg->sat_region,
+ sizeof(uint32_t)) ||
+ copy_in_user(&mem_col_cfg32->val_region,
+ &mem_col_cfg->val_region,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_pa_v2_data(
+ struct mdp_pa_v2_data32 __user *pa_v2_data32,
+ struct mdp_pa_v2_data __user *pa_v2_data)
+{
+ uint32_t data;
+
+ if (copy_in_user(&pa_v2_data->flags,
+ &pa_v2_data32->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data->global_hue_adj,
+ &pa_v2_data32->global_hue_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data->global_sat_adj,
+ &pa_v2_data32->global_sat_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data->global_val_adj,
+ &pa_v2_data32->global_val_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data->global_cont_adj,
+ &pa_v2_data32->global_cont_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data->six_zone_thresh,
+ &pa_v2_data32->six_zone_thresh,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data->six_zone_len,
+ &pa_v2_data32->six_zone_len,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(data, &pa_v2_data32->six_zone_curve_p0) ||
+ put_user(compat_ptr(data), &pa_v2_data->six_zone_curve_p0) ||
+ get_user(data, &pa_v2_data32->six_zone_curve_p1) ||
+ put_user(compat_ptr(data), &pa_v2_data->six_zone_curve_p1))
+ return -EFAULT;
+
+ if (__from_user_mem_col_cfg(
+ compat_ptr((uintptr_t)&pa_v2_data32->skin_cfg),
+ &pa_v2_data->skin_cfg) ||
+ __from_user_mem_col_cfg(
+ compat_ptr((uintptr_t)&pa_v2_data32->sky_cfg),
+ &pa_v2_data->sky_cfg) ||
+ __from_user_mem_col_cfg(
+ compat_ptr((uintptr_t)&pa_v2_data32->fol_cfg),
+ &pa_v2_data->fol_cfg))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_pa_v2_data(
+ struct mdp_pa_v2_data32 __user *pa_v2_data32,
+ struct mdp_pa_v2_data __user *pa_v2_data)
+{
+ unsigned long data;
+
+ if (copy_in_user(&pa_v2_data32->flags,
+ &pa_v2_data->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data32->global_hue_adj,
+ &pa_v2_data->global_hue_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data32->global_sat_adj,
+ &pa_v2_data->global_sat_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data32->global_val_adj,
+ &pa_v2_data->global_val_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data32->global_cont_adj,
+ &pa_v2_data->global_cont_adj,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data32->six_zone_thresh,
+ &pa_v2_data->six_zone_thresh,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_data32->six_zone_len,
+ &pa_v2_data->six_zone_len,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(data, (unsigned long *) &pa_v2_data->six_zone_curve_p0) ||
+ put_user((compat_caddr_t) data, &pa_v2_data32->six_zone_curve_p0) ||
+ get_user(data, (unsigned long *) &pa_v2_data->six_zone_curve_p1) ||
+ put_user((compat_caddr_t) data, &pa_v2_data32->six_zone_curve_p1))
+ return -EFAULT;
+
+ if (__to_user_mem_col_cfg(
+ compat_ptr((uintptr_t)&pa_v2_data32->skin_cfg),
+ &pa_v2_data->skin_cfg) ||
+ __to_user_mem_col_cfg(
+ compat_ptr((uintptr_t)&pa_v2_data32->sky_cfg),
+ &pa_v2_data->sky_cfg) ||
+ __to_user_mem_col_cfg(
+ compat_ptr((uintptr_t)&pa_v2_data32->fol_cfg),
+ &pa_v2_data->fol_cfg))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline void __from_user_pa_mem_col_data_v1_7(
+ struct mdp_pa_mem_col_data_v1_7_32 *mem_col_data32,
+ struct mdp_pa_mem_col_data_v1_7 *mem_col_data)
+{
+ mem_col_data->color_adjust_p0 = mem_col_data32->color_adjust_p0;
+ mem_col_data->color_adjust_p1 = mem_col_data32->color_adjust_p1;
+ mem_col_data->color_adjust_p2 = mem_col_data32->color_adjust_p2;
+ mem_col_data->blend_gain = mem_col_data32->blend_gain;
+ mem_col_data->sat_hold = mem_col_data32->sat_hold;
+ mem_col_data->val_hold = mem_col_data32->val_hold;
+ mem_col_data->hue_region = mem_col_data32->hue_region;
+ mem_col_data->sat_region = mem_col_data32->sat_region;
+ mem_col_data->val_region = mem_col_data32->val_region;
+}
+
+
+static int __from_user_pa_data_v1_7(
+ struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32,
+ struct mdp_pa_v2_cfg_data __user *pa_v2_cfg)
+{
+ struct mdp_pa_data_v1_7_32 pa_cfg_payload32;
+ struct mdp_pa_data_v1_7 pa_cfg_payload;
+
+ if (copy_from_user(&pa_cfg_payload32,
+ compat_ptr(pa_v2_cfg32->cfg_payload),
+ sizeof(pa_cfg_payload32))) {
+ pr_err("failed to copy the PA payload from userspace\n");
+ return -EFAULT;
+ }
+
+ memset(&pa_cfg_payload, 0, sizeof(pa_cfg_payload));
+ pa_cfg_payload.mode = pa_cfg_payload32.mode;
+ pa_cfg_payload.global_hue_adj = pa_cfg_payload32.global_hue_adj;
+ pa_cfg_payload.global_sat_adj = pa_cfg_payload32.global_sat_adj;
+ pa_cfg_payload.global_val_adj = pa_cfg_payload32.global_val_adj;
+ pa_cfg_payload.global_cont_adj = pa_cfg_payload32.global_cont_adj;
+
+ __from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.skin_cfg,
+ &pa_cfg_payload.skin_cfg);
+ __from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.sky_cfg,
+ &pa_cfg_payload.sky_cfg);
+ __from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.fol_cfg,
+ &pa_cfg_payload.fol_cfg);
+
+ pa_cfg_payload.six_zone_thresh = pa_cfg_payload32.six_zone_thresh;
+ pa_cfg_payload.six_zone_adj_p0 = pa_cfg_payload32.six_zone_adj_p0;
+ pa_cfg_payload.six_zone_adj_p1 = pa_cfg_payload32.six_zone_adj_p1;
+ pa_cfg_payload.six_zone_sat_hold = pa_cfg_payload32.six_zone_sat_hold;
+ pa_cfg_payload.six_zone_val_hold = pa_cfg_payload32.six_zone_val_hold;
+ pa_cfg_payload.six_zone_len = pa_cfg_payload32.six_zone_len;
+
+ pa_cfg_payload.six_zone_curve_p0 =
+ compat_ptr(pa_cfg_payload32.six_zone_curve_p0);
+ pa_cfg_payload.six_zone_curve_p1 =
+ compat_ptr(pa_cfg_payload32.six_zone_curve_p1);
+
+ if (copy_to_user(pa_v2_cfg->cfg_payload, &pa_cfg_payload,
+ sizeof(pa_cfg_payload))) {
+ pr_err("Failed to copy to user pa cfg payload\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int __from_user_pa_v2_cfg_data(
+ struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32,
+ struct mdp_pa_v2_cfg_data __user *pa_v2_cfg)
+{
+ uint32_t version;
+
+ if (copy_in_user(&pa_v2_cfg->block,
+ &pa_v2_cfg32->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_cfg->version,
+ &pa_v2_cfg32->version,
+ sizeof(uint32_t)) ||
+ copy_in_user(&pa_v2_cfg->flags,
+ &pa_v2_cfg32->flags,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (copy_from_user(&version,
+ &pa_v2_cfg32->version,
+ sizeof(uint32_t))) {
+ pr_err("failed to copy the version info\n");
+ return -EFAULT;
+ }
+
+ switch (version) {
+ case mdp_pa_v1_7:
+ if (__from_user_pa_data_v1_7(pa_v2_cfg32, pa_v2_cfg)) {
+ pr_err("failed to get pa data for version %d\n",
+ version);
+ return -EFAULT;
+ }
+ break;
+ default:
+ pr_debug("version invalid, fallback to legacy\n");
+ if (__from_user_pa_v2_data(
+ compat_ptr((uintptr_t)&pa_v2_cfg32->pa_v2_data),
+ &pa_v2_cfg->pa_v2_data))
+ return -EFAULT;
+ break;
+ }
+
+ return 0;
+}
+
+static inline void __to_user_pa_mem_col_data_v1_7(
+ struct mdp_pa_mem_col_data_v1_7_32 *mem_col_data32,
+ struct mdp_pa_mem_col_data_v1_7 *mem_col_data)
+{
+ mem_col_data32->color_adjust_p0 = mem_col_data->color_adjust_p0;
+ mem_col_data32->color_adjust_p1 = mem_col_data->color_adjust_p1;
+ mem_col_data32->color_adjust_p2 = mem_col_data->color_adjust_p2;
+ mem_col_data32->blend_gain = mem_col_data->blend_gain;
+ mem_col_data32->sat_hold = mem_col_data->sat_hold;
+ mem_col_data32->val_hold = mem_col_data->val_hold;
+ mem_col_data32->hue_region = mem_col_data->hue_region;
+ mem_col_data32->sat_region = mem_col_data->sat_region;
+ mem_col_data32->val_region = mem_col_data->val_region;
+}
+
+static int __to_user_pa_data_v1_7(
+ struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32,
+ struct mdp_pa_v2_cfg_data __user *pa_v2_cfg)
+{
+ struct mdp_pa_data_v1_7_32 pa_cfg_payload32;
+ struct mdp_pa_data_v1_7 pa_cfg_payload;
+
+ memset(&pa_cfg_payload32, 0, sizeof(pa_cfg_payload32));
+ if (copy_from_user(&pa_cfg_payload,
+ pa_v2_cfg->cfg_payload,
+ sizeof(pa_cfg_payload))) {
+ pr_err("failed to copy the PA payload from userspace\n");
+ return -EFAULT;
+ }
+
+ pa_cfg_payload32.mode = pa_cfg_payload.mode;
+ pa_cfg_payload32.global_hue_adj = pa_cfg_payload.global_hue_adj;
+ pa_cfg_payload32.global_sat_adj = pa_cfg_payload.global_sat_adj;
+ pa_cfg_payload32.global_val_adj = pa_cfg_payload.global_val_adj;
+ pa_cfg_payload32.global_cont_adj = pa_cfg_payload.global_cont_adj;
+
+ __to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.skin_cfg,
+ &pa_cfg_payload.skin_cfg);
+ __to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.sky_cfg,
+ &pa_cfg_payload.sky_cfg);
+ __to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.fol_cfg,
+ &pa_cfg_payload.fol_cfg);
+
+ pa_cfg_payload32.six_zone_thresh = pa_cfg_payload.six_zone_thresh;
+ pa_cfg_payload32.six_zone_adj_p0 = pa_cfg_payload.six_zone_adj_p0;
+ pa_cfg_payload32.six_zone_adj_p1 = pa_cfg_payload.six_zone_adj_p1;
+ pa_cfg_payload32.six_zone_sat_hold = pa_cfg_payload.six_zone_sat_hold;
+ pa_cfg_payload32.six_zone_val_hold = pa_cfg_payload.six_zone_val_hold;
+ pa_cfg_payload32.six_zone_len = pa_cfg_payload.six_zone_len;
+
+ if (copy_to_user(compat_ptr(pa_v2_cfg32->cfg_payload),
+ &pa_cfg_payload32,
+ sizeof(pa_cfg_payload32))) {
+ pr_err("Failed to copy to user pa cfg payload\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int __to_user_pa_v2_cfg_data(
+ struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32,
+ struct mdp_pa_v2_cfg_data __user *pa_v2_cfg)
+{
+ uint32_t version = 0;
+ uint32_t flags = 0;
+
+ if (copy_from_user(&version,
+ &pa_v2_cfg32->version,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ switch (version) {
+ case mdp_pa_v1_7:
+ if (copy_from_user(&flags,
+ &pa_v2_cfg32->flags,
+ sizeof(uint32_t))) {
+ pr_err("failed to get PA v1_7 flags\n");
+ return -EFAULT;
+ }
+
+ if (!(flags & MDP_PP_OPS_READ)) {
+ pr_debug("Read op not set. Skipping compat copyback\n");
+ return 0;
+ }
+
+ if (__to_user_pa_data_v1_7(pa_v2_cfg32, pa_v2_cfg)) {
+ pr_err("failed to set pa data for version %d\n",
+ version);
+ return -EFAULT;
+ }
+ break;
+ default:
+ pr_debug("version invalid, fallback to legacy\n");
+
+ if (copy_from_user(&flags,
+ &pa_v2_cfg32->pa_v2_data.flags,
+ sizeof(uint32_t))) {
+ pr_err("failed to get PAv2 flags\n");
+ return -EFAULT;
+ }
+
+ if (!(flags & MDP_PP_OPS_READ)) {
+ pr_debug("Read op not set. Skipping compat copyback\n");
+ return 0;
+ }
+
+ if (__to_user_pa_v2_data(
+ compat_ptr((uintptr_t)&pa_v2_cfg32->pa_v2_data),
+ &pa_v2_cfg->pa_v2_data))
+ return -EFAULT;
+ break;
+ }
+
+ return 0;
+}
+
+static int __from_user_dither_cfg_data(
+ struct mdp_dither_cfg_data32 __user *dither_cfg32,
+ struct mdp_dither_cfg_data __user *dither_cfg)
+{
+ if (copy_in_user(&dither_cfg->block,
+ &dither_cfg32->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&dither_cfg->flags,
+ &dither_cfg32->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&dither_cfg->g_y_depth,
+ &dither_cfg32->g_y_depth,
+ sizeof(uint32_t)) ||
+ copy_in_user(&dither_cfg->r_cr_depth,
+ &dither_cfg32->r_cr_depth,
+ sizeof(uint32_t)) ||
+ copy_in_user(&dither_cfg->b_cb_depth,
+ &dither_cfg32->b_cb_depth,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_dither_cfg_data(
+ struct mdp_dither_cfg_data32 __user *dither_cfg32,
+ struct mdp_dither_cfg_data __user *dither_cfg)
+{
+ if (copy_in_user(&dither_cfg32->block,
+ &dither_cfg->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&dither_cfg32->flags,
+ &dither_cfg->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&dither_cfg32->g_y_depth,
+ &dither_cfg->g_y_depth,
+ sizeof(uint32_t)) ||
+ copy_in_user(&dither_cfg32->r_cr_depth,
+ &dither_cfg->r_cr_depth,
+ sizeof(uint32_t)) ||
+ copy_in_user(&dither_cfg32->b_cb_depth,
+ &dither_cfg->b_cb_depth,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_gamut_cfg_data_v17(
+ struct mdp_gamut_cfg_data32 __user *gamut_cfg32,
+ struct mdp_gamut_cfg_data __user *gamut_cfg)
+{
+ struct mdp_gamut_data_v1_7 gamut_cfg_payload;
+ struct mdp_gamut_data_v1_7_32 gamut_cfg_payload32;
+ u32 i = 0;
+
+ if (copy_from_user(&gamut_cfg_payload32,
+ compat_ptr(gamut_cfg32->cfg_payload),
+ sizeof(gamut_cfg_payload32))) {
+ pr_err("failed to copy the gamut payload from userspace\n");
+ return -EFAULT;
+ }
+
+ memset(&gamut_cfg_payload, 0, sizeof(gamut_cfg_payload));
+ gamut_cfg_payload.mode = gamut_cfg_payload32.mode;
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+ gamut_cfg_payload.tbl_size[i] =
+ gamut_cfg_payload32.tbl_size[i];
+ gamut_cfg_payload.c0_data[i] =
+ compat_ptr(gamut_cfg_payload32.c0_data[i]);
+ gamut_cfg_payload.c1_c2_data[i] =
+ compat_ptr(gamut_cfg_payload32.c1_c2_data[i]);
+ }
+ for (i = 0; i < MDP_GAMUT_SCALE_OFF_TABLE_NUM; i++) {
+ gamut_cfg_payload.tbl_scale_off_sz[i] =
+ gamut_cfg_payload32.tbl_scale_off_sz[i];
+ gamut_cfg_payload.scale_off_data[i] =
+ compat_ptr(gamut_cfg_payload32.scale_off_data[i]);
+ }
+ if (copy_to_user(gamut_cfg->cfg_payload, &gamut_cfg_payload,
+ sizeof(gamut_cfg_payload))) {
+ pr_err("failed to copy the gamut payload to userspace\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int __from_user_gamut_cfg_data(
+ struct mdp_gamut_cfg_data32 __user *gamut_cfg32,
+ struct mdp_gamut_cfg_data __user *gamut_cfg)
+{
+ uint32_t data, version;
+ int i;
+
+ if (copy_in_user(&gamut_cfg->block,
+ &gamut_cfg32->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&gamut_cfg->flags,
+ &gamut_cfg32->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&gamut_cfg->gamut_first,
+ &gamut_cfg32->gamut_first,
+ sizeof(uint32_t)) ||
+ copy_in_user(&gamut_cfg->tbl_size[0],
+ &gamut_cfg32->tbl_size[0],
+ MDP_GAMUT_TABLE_NUM * sizeof(uint32_t)) ||
+ copy_in_user(&gamut_cfg->version,
+ &gamut_cfg32->version,
+ sizeof(uint32_t)))
+ return 0;
+
+ if (copy_from_user(&version, &gamut_cfg32->version, sizeof(u32))) {
+ pr_err("failed to copy the version info\n");
+ return -EFAULT;
+ }
+
+ switch (version) {
+ case mdp_gamut_v1_7:
+ if (__from_user_gamut_cfg_data_v17(gamut_cfg32, gamut_cfg)) {
+ pr_err("failed to get the gamut data for version %d\n",
+ version);
+ return -EFAULT;
+ }
+ break;
+ default:
+ pr_debug("version invalid fallback to legacy\n");
+ /* The Gamut LUT data contains 3 static arrays for R, G, and B
+ * gamut data. Each these arrays contains pointers dynamic arrays
+ * which hold the gamut LUTs for R, G, and B. Must copy the array of
+ * pointers from 32 bit to 64 bit addresses.
+ */
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ if (get_user(data, &gamut_cfg32->r_tbl[i]) ||
+ put_user(compat_ptr(data), &gamut_cfg->r_tbl[i]))
+ return -EFAULT;
+ }
+
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ if (get_user(data, &gamut_cfg32->g_tbl[i]) ||
+ put_user(compat_ptr(data), &gamut_cfg->g_tbl[i]))
+ return -EFAULT;
+ }
+
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ if (get_user(data, &gamut_cfg32->b_tbl[i]) ||
+ put_user(compat_ptr(data), &gamut_cfg->b_tbl[i]))
+ return -EFAULT;
+ }
+ break;
+ }
+ return 0;
+}
+
+static int __to_user_gamut_cfg_data(
+ struct mdp_gamut_cfg_data32 __user *gamut_cfg32,
+ struct mdp_gamut_cfg_data __user *gamut_cfg)
+{
+ unsigned long data;
+ int i;
+
+ if (copy_in_user(&gamut_cfg32->block,
+ &gamut_cfg->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&gamut_cfg32->flags,
+ &gamut_cfg->flags,
+ sizeof(uint32_t)) ||
+ copy_in_user(&gamut_cfg32->gamut_first,
+ &gamut_cfg->gamut_first,
+ sizeof(uint32_t)) ||
+ copy_in_user(&gamut_cfg32->tbl_size[0],
+ &gamut_cfg->tbl_size[0],
+ MDP_GAMUT_TABLE_NUM * sizeof(uint32_t)))
+ return 0;
+
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ if (get_user(data, (unsigned long *) &gamut_cfg->r_tbl[i]) ||
+ put_user((compat_caddr_t)data, &gamut_cfg32->r_tbl[i]))
+ return -EFAULT;
+ }
+
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ if (get_user(data, (unsigned long *) &gamut_cfg->g_tbl[i]) ||
+ put_user((compat_caddr_t)data, &gamut_cfg32->g_tbl[i]))
+ return -EFAULT;
+ }
+
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ if (get_user(data, (unsigned long *) &gamut_cfg->b_tbl[i]) ||
+ put_user((compat_caddr_t)data, &gamut_cfg32->g_tbl[i]))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int __from_user_calib_config_data(
+ struct mdp_calib_config_data32 __user *calib_cfg32,
+ struct mdp_calib_config_data __user *calib_cfg)
+{
+ if (copy_in_user(&calib_cfg->ops,
+ &calib_cfg32->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&calib_cfg->addr,
+ &calib_cfg32->addr,
+ sizeof(uint32_t)) ||
+ copy_in_user(&calib_cfg->data,
+ &calib_cfg32->data,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_calib_config_data(
+ struct mdp_calib_config_data32 __user *calib_cfg32,
+ struct mdp_calib_config_data __user *calib_cfg)
+{
+ if (copy_in_user(&calib_cfg32->ops,
+ &calib_cfg->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&calib_cfg32->addr,
+ &calib_cfg->addr,
+ sizeof(uint32_t)) ||
+ copy_in_user(&calib_cfg32->data,
+ &calib_cfg->data,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_ad_init(
+ struct mdss_ad_init32 __user *ad_init32,
+ struct mdss_ad_init __user *ad_init)
+{
+ uint32_t data;
+
+ if (copy_in_user(&ad_init->asym_lut[0],
+ &ad_init32->asym_lut[0],
+ 33 * sizeof(uint32_t)) ||
+ copy_in_user(&ad_init->color_corr_lut[0],
+ &ad_init32->color_corr_lut[0],
+ 33 * sizeof(uint32_t)) ||
+ copy_in_user(&ad_init->i_control[0],
+ &ad_init32->i_control[0],
+ 2 * sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->black_lvl,
+ &ad_init32->black_lvl,
+ sizeof(uint16_t)) ||
+ copy_in_user(&ad_init->white_lvl,
+ &ad_init32->white_lvl,
+ sizeof(uint16_t)) ||
+ copy_in_user(&ad_init->var,
+ &ad_init32->var,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->limit_ampl,
+ &ad_init32->limit_ampl,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->i_dither,
+ &ad_init32->i_dither,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->slope_max,
+ &ad_init32->slope_max,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->slope_min,
+ &ad_init32->slope_min,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->dither_ctl,
+ &ad_init32->dither_ctl,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->format,
+ &ad_init32->format,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->auto_size,
+ &ad_init32->auto_size,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->frame_w,
+ &ad_init32->frame_w,
+ sizeof(uint16_t)) ||
+ copy_in_user(&ad_init->frame_h,
+ &ad_init32->frame_h,
+ sizeof(uint16_t)) ||
+ copy_in_user(&ad_init->logo_v,
+ &ad_init32->logo_v,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->logo_h,
+ &ad_init32->logo_h,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_init->alpha,
+ &ad_init32->alpha,
+ sizeof(uint32_t)) ||
+ copy_in_user(&ad_init->alpha_base,
+ &ad_init32->alpha_base,
+ sizeof(uint32_t)) ||
+ copy_in_user(&ad_init->bl_lin_len,
+ &ad_init32->bl_lin_len,
+ sizeof(uint32_t)) ||
+ copy_in_user(&ad_init->bl_att_len,
+ &ad_init32->bl_att_len,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+
+ if (get_user(data, &ad_init32->bl_lin) ||
+ put_user(compat_ptr(data), &ad_init->bl_lin) ||
+ get_user(data, &ad_init32->bl_lin_inv) ||
+ put_user(compat_ptr(data), &ad_init->bl_lin_inv) ||
+ get_user(data, &ad_init32->bl_att_lut) ||
+ put_user(compat_ptr(data), &ad_init->bl_att_lut))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_ad_cfg(
+ struct mdss_ad_cfg32 __user *ad_cfg32,
+ struct mdss_ad_cfg __user *ad_cfg)
+{
+ if (copy_in_user(&ad_cfg->mode,
+ &ad_cfg32->mode,
+ sizeof(uint32_t)) ||
+ copy_in_user(&ad_cfg->al_calib_lut[0],
+ &ad_cfg32->al_calib_lut[0],
+ 33 * sizeof(uint32_t)) ||
+ copy_in_user(&ad_cfg->backlight_min,
+ &ad_cfg32->backlight_min,
+ sizeof(uint16_t)) ||
+ copy_in_user(&ad_cfg->backlight_max,
+ &ad_cfg32->backlight_max,
+ sizeof(uint16_t)) ||
+ copy_in_user(&ad_cfg->backlight_scale,
+ &ad_cfg32->backlight_scale,
+ sizeof(uint16_t)) ||
+ copy_in_user(&ad_cfg->amb_light_min,
+ &ad_cfg32->amb_light_min,
+ sizeof(uint16_t)) ||
+ copy_in_user(&ad_cfg->filter[0],
+ &ad_cfg32->filter[0],
+ 2 * sizeof(uint16_t)) ||
+ copy_in_user(&ad_cfg->calib[0],
+ &ad_cfg32->calib[0],
+ 4 * sizeof(uint16_t)) ||
+ copy_in_user(&ad_cfg->strength_limit,
+ &ad_cfg32->strength_limit,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_cfg->t_filter_recursion,
+ &ad_cfg32->t_filter_recursion,
+ sizeof(uint8_t)) ||
+ copy_in_user(&ad_cfg->stab_itr,
+ &ad_cfg32->stab_itr,
+ sizeof(uint16_t)) ||
+ copy_in_user(&ad_cfg->bl_ctrl_mode,
+ &ad_cfg32->bl_ctrl_mode,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_ad_init_cfg(
+ struct mdss_ad_init_cfg32 __user *ad_info32,
+ struct mdss_ad_init_cfg __user *ad_info)
+{
+ uint32_t op;
+
+ if (copy_from_user(&op, &ad_info32->ops,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (copy_in_user(&ad_info->ops,
+ &ad_info32->ops,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (op & MDP_PP_AD_INIT) {
+ if (__from_user_ad_init(
+ compat_ptr((uintptr_t)&ad_info32->params.init),
+ &ad_info->params.init))
+ return -EFAULT;
+ } else if (op & MDP_PP_AD_CFG) {
+ if (__from_user_ad_cfg(
+ compat_ptr((uintptr_t)&ad_info32->params.cfg),
+ &ad_info->params.cfg))
+ return -EFAULT;
+ } else {
+ pr_err("Invalid AD init/config operation\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __from_user_ad_input(
+ struct mdss_ad_input32 __user *ad_input32,
+ struct mdss_ad_input __user *ad_input)
+{
+ int mode;
+
+ if (copy_from_user(&mode,
+ &ad_input32->mode,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (copy_in_user(&ad_input->mode,
+ &ad_input32->mode,
+ sizeof(uint32_t)) ||
+ copy_in_user(&ad_input->output,
+ &ad_input32->output,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ switch (mode) {
+ case MDSS_AD_MODE_AUTO_BL:
+ case MDSS_AD_MODE_AUTO_STR:
+ if (copy_in_user(&ad_input->in.amb_light,
+ &ad_input32->in.amb_light,
+ sizeof(uint32_t)))
+ return -EFAULT;
+ break;
+ case MDSS_AD_MODE_TARG_STR:
+ case MDSS_AD_MODE_MAN_STR:
+ if (copy_in_user(&ad_input->in.strength,
+ &ad_input32->in.strength,
+ sizeof(uint32_t)))
+ return -EFAULT;
+ break;
+ case MDSS_AD_MODE_CALIB:
+ if (copy_in_user(&ad_input->in.calib_bl,
+ &ad_input32->in.calib_bl,
+ sizeof(uint32_t)))
+ return -EFAULT;
+ break;
+ }
+
+ return 0;
+}
+
+static int __to_user_ad_input(
+ struct mdss_ad_input32 __user *ad_input32,
+ struct mdss_ad_input __user *ad_input)
+{
+ int mode;
+
+ if (copy_from_user(&mode,
+ &ad_input->mode,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (copy_in_user(&ad_input32->mode,
+ &ad_input->mode,
+ sizeof(uint32_t)) ||
+ copy_in_user(&ad_input32->output,
+ &ad_input->output,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ switch (mode) {
+ case MDSS_AD_MODE_AUTO_BL:
+ case MDSS_AD_MODE_AUTO_STR:
+ if (copy_in_user(&ad_input32->in.amb_light,
+ &ad_input->in.amb_light,
+ sizeof(uint32_t)))
+ return -EFAULT;
+ break;
+ case MDSS_AD_MODE_TARG_STR:
+ case MDSS_AD_MODE_MAN_STR:
+ if (copy_in_user(&ad_input32->in.strength,
+ &ad_input->in.strength,
+ sizeof(uint32_t)))
+ return -EFAULT;
+ break;
+ case MDSS_AD_MODE_CALIB:
+ if (copy_in_user(&ad_input32->in.calib_bl,
+ &ad_input->in.calib_bl,
+ sizeof(uint32_t)))
+ return -EFAULT;
+ break;
+ }
+
+ return 0;
+}
+
+static int __from_user_calib_cfg(
+ struct mdss_calib_cfg32 __user *calib_cfg32,
+ struct mdss_calib_cfg __user *calib_cfg)
+{
+ if (copy_in_user(&calib_cfg->ops,
+ &calib_cfg32->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&calib_cfg->calib_mask,
+ &calib_cfg32->calib_mask,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_calib_config_buffer(
+ struct mdp_calib_config_buffer32 __user *calib_buffer32,
+ struct mdp_calib_config_buffer __user *calib_buffer)
+{
+ uint32_t data;
+
+ if (copy_in_user(&calib_buffer->ops,
+ &calib_buffer32->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&calib_buffer->size,
+ &calib_buffer32->size,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(data, &calib_buffer32->buffer) ||
+ put_user(compat_ptr(data), &calib_buffer->buffer))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_calib_config_buffer(
+ struct mdp_calib_config_buffer32 __user *calib_buffer32,
+ struct mdp_calib_config_buffer __user *calib_buffer)
+{
+ unsigned long data;
+
+ if (copy_in_user(&calib_buffer32->ops,
+ &calib_buffer->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&calib_buffer32->size,
+ &calib_buffer->size,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(data, (unsigned long *) &calib_buffer->buffer) ||
+ put_user((compat_caddr_t) data, &calib_buffer32->buffer))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_calib_dcm_state(
+ struct mdp_calib_dcm_state32 __user *calib_dcm32,
+ struct mdp_calib_dcm_state __user *calib_dcm)
+{
+ if (copy_in_user(&calib_dcm->ops,
+ &calib_dcm32->ops,
+ sizeof(uint32_t)) ||
+ copy_in_user(&calib_dcm->dcm_state,
+ &calib_dcm32->dcm_state,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static u32 __pp_compat_size_igc(void)
+{
+ u32 alloc_size = 0;
+ /* When we have multiple versions pick largest struct size */
+ alloc_size = sizeof(struct mdp_igc_lut_data_v1_7);
+ return alloc_size;
+}
+
+static u32 __pp_compat_size_hist_lut(void)
+{
+ u32 alloc_size = 0;
+ /* When we have multiple versions pick largest struct size */
+ alloc_size = sizeof(struct mdp_hist_lut_data_v1_7);
+ return alloc_size;
+}
+
+static u32 __pp_compat_size_pgc(void)
+{
+ u32 tbl_sz_max = 0;
+
+ tbl_sz_max = 3 * GC_LUT_SEGMENTS * sizeof(struct mdp_ar_gc_lut_data);
+ tbl_sz_max += sizeof(struct mdp_pgc_lut_data_v1_7);
+ return tbl_sz_max;
+}
+
+static u32 __pp_compat_size_pcc(void)
+{
+ /* if new version of PCC is added return max struct size */
+ return sizeof(struct mdp_pcc_data_v1_7);
+}
+
+static u32 __pp_compat_size_pa(void)
+{
+ /* if new version of PA is added return max struct size */
+ return sizeof(struct mdp_pa_data_v1_7);
+}
+
+static u32 __pp_compat_size_gamut(void)
+{
+ return sizeof(struct mdp_gamut_data_v1_7);
+}
+
+static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32,
+ struct msmfb_mdp_pp __user **pp,
+ uint32_t op)
+{
+ uint32_t alloc_size = 0, lut_type, pgc_size = 0;
+
+ alloc_size = sizeof(struct msmfb_mdp_pp);
+ switch (op) {
+ case mdp_op_lut_cfg:
+ if (copy_from_user(&lut_type,
+ &pp32->data.lut_cfg_data.lut_type,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ switch (lut_type) {
+ case mdp_lut_pgc:
+
+ pgc_size = GC_LUT_SEGMENTS *
+ sizeof(struct mdp_ar_gc_lut_data);
+ alloc_size += __pp_compat_size_pgc();
+
+ *pp = compat_alloc_user_space(alloc_size);
+ if (*pp == NULL)
+ return -ENOMEM;
+ memset(*pp, 0, alloc_size);
+
+ (*pp)->data.lut_cfg_data.data.pgc_lut_data.r_data =
+ (struct mdp_ar_gc_lut_data *)
+ ((unsigned long) *pp +
+ sizeof(struct msmfb_mdp_pp));
+ (*pp)->data.lut_cfg_data.data.pgc_lut_data.g_data =
+ (struct mdp_ar_gc_lut_data *)
+ ((unsigned long) *pp +
+ sizeof(struct msmfb_mdp_pp) +
+ pgc_size);
+ (*pp)->data.lut_cfg_data.data.pgc_lut_data.b_data =
+ (struct mdp_ar_gc_lut_data *)
+ ((unsigned long) *pp +
+ sizeof(struct msmfb_mdp_pp) +
+ (2 * pgc_size));
+ (*pp)->data.lut_cfg_data.data.pgc_lut_data.cfg_payload
+ = (void *)((unsigned long) *pp +
+ sizeof(struct msmfb_mdp_pp) +
+ (3 * pgc_size));
+ break;
+ case mdp_lut_igc:
+ alloc_size += __pp_compat_size_igc();
+ *pp = compat_alloc_user_space(alloc_size);
+ if (*pp == NULL) {
+ pr_err("failed to alloc from user size %d for igc\n",
+ alloc_size);
+ return -ENOMEM;
+ }
+ memset(*pp, 0, alloc_size);
+ (*pp)->data.lut_cfg_data.data.igc_lut_data.cfg_payload
+ = (void *)((unsigned long)(*pp) +
+ sizeof(struct msmfb_mdp_pp));
+ break;
+ case mdp_lut_hist:
+ alloc_size += __pp_compat_size_hist_lut();
+ *pp = compat_alloc_user_space(alloc_size);
+ if (*pp == NULL) {
+ pr_err("failed to alloc from user size %d for hist lut\n",
+ alloc_size);
+ return -ENOMEM;
+ }
+ memset(*pp, 0, alloc_size);
+ (*pp)->data.lut_cfg_data.data.hist_lut_data.cfg_payload
+ = (void *)((unsigned long)(*pp) +
+ sizeof(struct msmfb_mdp_pp));
+ break;
+ default:
+ *pp = compat_alloc_user_space(alloc_size);
+ if (*pp == NULL) {
+ pr_err("failed to alloc from user size %d for lut_type %d\n",
+ alloc_size, lut_type);
+ return -ENOMEM;
+ }
+ memset(*pp, 0, alloc_size);
+ break;
+ }
+ break;
+ case mdp_op_pcc_cfg:
+ alloc_size += __pp_compat_size_pcc();
+ *pp = compat_alloc_user_space(alloc_size);
+ if (*pp == NULL) {
+ pr_err("alloc from user size %d for pcc fail\n",
+ alloc_size);
+ return -ENOMEM;
+ }
+ memset(*pp, 0, alloc_size);
+ (*pp)->data.pcc_cfg_data.cfg_payload =
+ (void *)((unsigned long)(*pp) +
+ sizeof(struct msmfb_mdp_pp));
+ break;
+ case mdp_op_gamut_cfg:
+ alloc_size += __pp_compat_size_gamut();
+ *pp = compat_alloc_user_space(alloc_size);
+ if (*pp == NULL) {
+ pr_err("alloc from user size %d for pcc fail\n",
+ alloc_size);
+ return -ENOMEM;
+ }
+ memset(*pp, 0, alloc_size);
+ (*pp)->data.gamut_cfg_data.cfg_payload =
+ (void *)((unsigned long)(*pp) +
+ sizeof(struct msmfb_mdp_pp));
+ break;
+ case mdp_op_pa_v2_cfg:
+ alloc_size += __pp_compat_size_pa();
+ *pp = compat_alloc_user_space(alloc_size);
+ if (*pp == NULL) {
+ pr_err("alloc from user size %d for pcc fail\n",
+ alloc_size);
+ return -ENOMEM;
+ }
+ memset(*pp, 0, alloc_size);
+ (*pp)->data.pa_v2_cfg_data.cfg_payload =
+ (void *)((unsigned long)(*pp) +
+ sizeof(struct msmfb_mdp_pp));
+ break;
+ default:
+ *pp = compat_alloc_user_space(alloc_size);
+ if (*pp == NULL)
+ return -ENOMEM;
+ memset(*pp, 0, alloc_size);
+ break;
+ }
+ return 0;
+}
+
+static int mdss_compat_pp_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file)
+{
+ uint32_t op;
+ int ret = 0;
+ struct msmfb_mdp_pp32 __user *pp32;
+ struct msmfb_mdp_pp __user *pp;
+
+ pp32 = compat_ptr(arg);
+ if (copy_from_user(&op, &pp32->op, sizeof(uint32_t)))
+ return -EFAULT;
+
+ ret = __pp_compat_alloc(pp32, &pp, op);
+ if (ret)
+ return ret;
+
+ if (copy_in_user(&pp->op, &pp32->op, sizeof(uint32_t)))
+ return -EFAULT;
+
+ switch (op) {
+ case mdp_op_pcc_cfg:
+ ret = __from_user_pcc_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.pcc_cfg_data),
+ &pp->data.pcc_cfg_data);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_pcc_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.pcc_cfg_data),
+ &pp->data.pcc_cfg_data);
+ break;
+ case mdp_op_csc_cfg:
+ ret = __from_user_csc_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.csc_cfg_data),
+ &pp->data.csc_cfg_data);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_csc_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.csc_cfg_data),
+ &pp->data.csc_cfg_data);
+ break;
+ case mdp_op_lut_cfg:
+ ret = __from_user_lut_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.lut_cfg_data),
+ &pp->data.lut_cfg_data);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_lut_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.lut_cfg_data),
+ &pp->data.lut_cfg_data);
+ break;
+ case mdp_op_qseed_cfg:
+ ret = __from_user_qseed_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.qseed_cfg_data),
+ &pp->data.qseed_cfg_data);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_qseed_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.qseed_cfg_data),
+ &pp->data.qseed_cfg_data);
+ break;
+ case mdp_bl_scale_cfg:
+ ret = __from_user_bl_scale_data(
+ compat_ptr((uintptr_t)&pp32->data.bl_scale_data),
+ &pp->data.bl_scale_data);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ break;
+ case mdp_op_pa_cfg:
+ ret = __from_user_pa_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.pa_cfg_data),
+ &pp->data.pa_cfg_data);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_pa_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.pa_cfg_data),
+ &pp->data.pa_cfg_data);
+ break;
+ case mdp_op_pa_v2_cfg:
+ ret = __from_user_pa_v2_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.pa_v2_cfg_data),
+ &pp->data.pa_v2_cfg_data);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_pa_v2_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.pa_v2_cfg_data),
+ &pp->data.pa_v2_cfg_data);
+ break;
+ case mdp_op_dither_cfg:
+ ret = __from_user_dither_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.dither_cfg_data),
+ &pp->data.dither_cfg_data);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_dither_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.dither_cfg_data),
+ &pp->data.dither_cfg_data);
+ break;
+ case mdp_op_gamut_cfg:
+ ret = __from_user_gamut_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.gamut_cfg_data),
+ &pp->data.gamut_cfg_data);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_gamut_cfg_data(
+ compat_ptr((uintptr_t)&pp32->data.gamut_cfg_data),
+ &pp->data.gamut_cfg_data);
+ break;
+ case mdp_op_calib_cfg:
+ ret = __from_user_calib_config_data(
+ compat_ptr((uintptr_t)&pp32->data.calib_cfg),
+ &pp->data.calib_cfg);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_calib_config_data(
+ compat_ptr((uintptr_t)&pp32->data.calib_cfg),
+ &pp->data.calib_cfg);
+ break;
+ case mdp_op_ad_cfg:
+ ret = __from_user_ad_init_cfg(
+ compat_ptr((uintptr_t)&pp32->data.ad_init_cfg),
+ &pp->data.ad_init_cfg);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ break;
+ case mdp_op_ad_input:
+ ret = __from_user_ad_input(
+ compat_ptr((uintptr_t)&pp32->data.ad_input),
+ &pp->data.ad_input);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_ad_input(
+ compat_ptr((uintptr_t)&pp32->data.ad_input),
+ &pp->data.ad_input);
+ break;
+ case mdp_op_calib_mode:
+ ret = __from_user_calib_cfg(
+ compat_ptr((uintptr_t)&pp32->data.mdss_calib_cfg),
+ &pp->data.mdss_calib_cfg);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ break;
+ case mdp_op_calib_buffer:
+ ret = __from_user_calib_config_buffer(
+ compat_ptr((uintptr_t)&pp32->data.calib_buffer),
+ &pp->data.calib_buffer);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ if (ret)
+ goto pp_compat_exit;
+ ret = __to_user_calib_config_buffer(
+ compat_ptr((uintptr_t)&pp32->data.calib_buffer),
+ &pp->data.calib_buffer);
+ break;
+ case mdp_op_calib_dcm_state:
+ ret = __from_user_calib_dcm_state(
+ compat_ptr((uintptr_t)&pp32->data.calib_dcm),
+ &pp->data.calib_dcm);
+ if (ret)
+ goto pp_compat_exit;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+ break;
+ default:
+ break;
+ }
+
+pp_compat_exit:
+ return ret;
+}
+
+static int __from_user_pp_params(struct mdp_overlay_pp_params32 *ppp32,
+ struct mdp_overlay_pp_params *ppp)
+{
+ int ret = 0;
+
+ if (copy_in_user(&ppp->config_ops,
+ &ppp32->config_ops,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ ret = __from_user_csc_cfg(
+ compat_ptr((uintptr_t)&ppp32->csc_cfg),
+ &ppp->csc_cfg);
+ if (ret)
+ return ret;
+ ret = __from_user_qseed_cfg(
+ compat_ptr((uintptr_t)&ppp32->qseed_cfg[0]),
+ &ppp->qseed_cfg[0]);
+ if (ret)
+ return ret;
+ ret = __from_user_qseed_cfg(
+ compat_ptr((uintptr_t)&ppp32->qseed_cfg[1]),
+ &ppp->qseed_cfg[1]);
+ if (ret)
+ return ret;
+ ret = __from_user_pa_cfg(
+ compat_ptr((uintptr_t)&ppp32->pa_cfg),
+ &ppp->pa_cfg);
+ if (ret)
+ return ret;
+ ret = __from_user_igc_lut_data(
+ compat_ptr((uintptr_t)&ppp32->igc_cfg),
+ &ppp->igc_cfg);
+ if (ret)
+ return ret;
+ ret = __from_user_sharp_cfg(
+ compat_ptr((uintptr_t)&ppp32->sharp_cfg),
+ &ppp->sharp_cfg);
+ if (ret)
+ return ret;
+ ret = __from_user_histogram_cfg(
+ compat_ptr((uintptr_t)&ppp32->hist_cfg),
+ &ppp->hist_cfg);
+ if (ret)
+ return ret;
+ ret = __from_user_hist_lut_data(
+ compat_ptr((uintptr_t)&ppp32->hist_lut_cfg),
+ &ppp->hist_lut_cfg);
+ if (ret)
+ return ret;
+ ret = __from_user_pa_v2_data(
+ compat_ptr((uintptr_t)&ppp32->pa_v2_cfg),
+ &ppp->pa_v2_cfg);
+
+ return ret;
+}
+
+static int __to_user_pp_params(struct mdp_overlay_pp_params *ppp,
+ struct mdp_overlay_pp_params32 *ppp32)
+{
+ int ret = 0;
+
+ if (copy_in_user(&ppp32->config_ops,
+ &ppp->config_ops,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ ret = __to_user_csc_cfg(
+ compat_ptr((uintptr_t)&ppp32->csc_cfg),
+ &ppp->csc_cfg);
+ if (ret)
+ return ret;
+ ret = __to_user_qseed_cfg(
+ compat_ptr((uintptr_t)&ppp32->qseed_cfg[0]),
+ &ppp->qseed_cfg[0]);
+ if (ret)
+ return ret;
+ ret = __to_user_qseed_cfg(
+ compat_ptr((uintptr_t)&ppp32->qseed_cfg[1]),
+ &ppp->qseed_cfg[1]);
+ if (ret)
+ return ret;
+ ret = __to_user_pa_cfg(
+ compat_ptr((uintptr_t)&ppp32->pa_cfg),
+ &ppp->pa_cfg);
+ if (ret)
+ return ret;
+ ret = __to_user_igc_lut_data(
+ compat_ptr((uintptr_t)&ppp32->igc_cfg),
+ &ppp->igc_cfg);
+ if (ret)
+ return ret;
+ ret = __to_user_sharp_cfg(
+ compat_ptr((uintptr_t)&ppp32->sharp_cfg),
+ &ppp->sharp_cfg);
+ if (ret)
+ return ret;
+ ret = __to_user_histogram_cfg(
+ compat_ptr((uintptr_t)&ppp32->hist_cfg),
+ &ppp->hist_cfg);
+ if (ret)
+ return ret;
+ ret = __to_user_hist_lut_data(
+ compat_ptr((uintptr_t)&ppp32->hist_lut_cfg),
+ &ppp->hist_lut_cfg);
+ if (ret)
+ return ret;
+ ret = __to_user_pa_v2_data(
+ compat_ptr((uintptr_t)&ppp32->pa_v2_cfg),
+ &ppp->pa_v2_cfg);
+
+ return ret;
+}
+
+static int __from_user_hist_start_req(
+ struct mdp_histogram_start_req32 __user *hist_req32,
+ struct mdp_histogram_start_req __user *hist_req)
+{
+ if (copy_in_user(&hist_req->block,
+ &hist_req32->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_req->frame_cnt,
+ &hist_req32->frame_cnt,
+ sizeof(uint8_t)) ||
+ copy_in_user(&hist_req->bit_mask,
+ &hist_req32->bit_mask,
+ sizeof(uint8_t)) ||
+ copy_in_user(&hist_req->num_bins,
+ &hist_req32->num_bins,
+ sizeof(uint16_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_hist_data(
+ struct mdp_histogram_data32 __user *hist_data32,
+ struct mdp_histogram_data __user *hist_data)
+{
+ uint32_t data;
+
+ if (copy_in_user(&hist_data->block,
+ &hist_data32->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_data->bin_cnt,
+ &hist_data32->bin_cnt,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(data, &hist_data32->c0) ||
+ put_user(compat_ptr(data), &hist_data->c0) ||
+ get_user(data, &hist_data32->c1) ||
+ put_user(compat_ptr(data), &hist_data->c1) ||
+ get_user(data, &hist_data32->c2) ||
+ put_user(compat_ptr(data), &hist_data->c2) ||
+ get_user(data, &hist_data32->extra_info) ||
+ put_user(compat_ptr(data), &hist_data->extra_info))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __to_user_hist_data(
+ struct mdp_histogram_data32 __user *hist_data32,
+ struct mdp_histogram_data __user *hist_data)
+{
+ unsigned long data;
+
+ if (copy_in_user(&hist_data32->block,
+ &hist_data->block,
+ sizeof(uint32_t)) ||
+ copy_in_user(&hist_data32->bin_cnt,
+ &hist_data->bin_cnt,
+ sizeof(uint32_t)))
+ return -EFAULT;
+
+ if (get_user(data, (unsigned long *) &hist_data->c0) ||
+ put_user((compat_caddr_t) data, &hist_data32->c0) ||
+ get_user(data, (unsigned long *) &hist_data->c1) ||
+ put_user((compat_caddr_t) data, &hist_data32->c1) ||
+ get_user(data, (unsigned long *) &hist_data->c2) ||
+ put_user((compat_caddr_t) data, &hist_data32->c2) ||
+ get_user(data, (unsigned long *) &hist_data->extra_info) ||
+ put_user((compat_caddr_t) data, &hist_data32->extra_info))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mdss_histo_compat_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file)
+{
+ struct mdp_histogram_data __user *hist;
+ struct mdp_histogram_data32 __user *hist32;
+ struct mdp_histogram_start_req __user *hist_req;
+ struct mdp_histogram_start_req32 __user *hist_req32;
+ int ret = 0;
+
+ switch (cmd) {
+ case MSMFB_HISTOGRAM_START:
+ hist_req32 = compat_ptr(arg);
+ hist_req = compat_alloc_user_space(
+ sizeof(struct mdp_histogram_start_req));
+ if (!hist_req) {
+ pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+ __func__, __LINE__,
+ sizeof(struct mdp_histogram_start_req));
+ return -EINVAL;
+ }
+ memset(hist_req, 0, sizeof(struct mdp_histogram_start_req));
+ ret = __from_user_hist_start_req(hist_req32, hist_req);
+ if (ret)
+ goto histo_compat_err;
+ ret = mdss_fb_do_ioctl(info, cmd,
+ (unsigned long) hist_req, file);
+ break;
+ case MSMFB_HISTOGRAM_STOP:
+ ret = mdss_fb_do_ioctl(info, cmd, arg, file);
+ break;
+ case MSMFB_HISTOGRAM:
+ hist32 = compat_ptr(arg);
+ hist = compat_alloc_user_space(
+ sizeof(struct mdp_histogram_data));
+ if (!hist) {
+ pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+ __func__, __LINE__,
+ sizeof(struct mdp_histogram_data));
+ return -EINVAL;
+ }
+ memset(hist, 0, sizeof(struct mdp_histogram_data));
+ ret = __from_user_hist_data(hist32, hist);
+ if (ret)
+ goto histo_compat_err;
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) hist, file);
+ if (ret)
+ goto histo_compat_err;
+ ret = __to_user_hist_data(hist32, hist);
+ break;
+ default:
+ break;
+ }
+
+histo_compat_err:
+ return ret;
+}
+
+static int __copy_layer_pp_info_qseed_params(
+ struct mdp_overlay_pp_params *pp_info,
+ struct mdp_overlay_pp_params32 *pp_info32)
+{
+ pp_info->qseed_cfg[0].table_num = pp_info32->qseed_cfg[0].table_num;
+ pp_info->qseed_cfg[0].ops = pp_info32->qseed_cfg[0].ops;
+ pp_info->qseed_cfg[0].len = pp_info32->qseed_cfg[0].len;
+ pp_info->qseed_cfg[0].data = compat_ptr(pp_info32->qseed_cfg[0].data);
+
+ pp_info->qseed_cfg[1].table_num = pp_info32->qseed_cfg[1].table_num;
+ pp_info->qseed_cfg[1].ops = pp_info32->qseed_cfg[1].ops;
+ pp_info->qseed_cfg[1].len = pp_info32->qseed_cfg[1].len;
+ pp_info->qseed_cfg[1].data = compat_ptr(pp_info32->qseed_cfg[1].data);
+
+ return 0;
+}
+
+static int __copy_layer_igc_lut_data_v1_7(
+ struct mdp_igc_lut_data_v1_7 *cfg_payload,
+ struct mdp_igc_lut_data_v1_7_32 __user *cfg_payload32)
+{
+ struct mdp_igc_lut_data_v1_7_32 local_cfg_payload32;
+ int ret = 0;
+
+ ret = copy_from_user(&local_cfg_payload32,
+ cfg_payload32,
+ sizeof(struct mdp_igc_lut_data_v1_7_32));
+ if (ret) {
+ pr_err("copy from user failed, IGC cfg payload = %pK\n",
+ cfg_payload32);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ cfg_payload->table_fmt = local_cfg_payload32.table_fmt;
+ cfg_payload->len = local_cfg_payload32.len;
+ cfg_payload->c0_c1_data = compat_ptr(local_cfg_payload32.c0_c1_data);
+ cfg_payload->c2_data = compat_ptr(local_cfg_payload32.c2_data);
+
+exit:
+ return ret;
+}
+
+static int __copy_layer_pp_info_igc_params(
+ struct mdp_overlay_pp_params *pp_info,
+ struct mdp_overlay_pp_params32 *pp_info32)
+{
+ void *cfg_payload = NULL;
+ uint32_t payload_size = 0;
+ int ret = 0;
+
+ pp_info->igc_cfg.block = pp_info32->igc_cfg.block;
+ pp_info->igc_cfg.version = pp_info32->igc_cfg.version;
+ pp_info->igc_cfg.ops = pp_info32->igc_cfg.ops;
+
+ if (pp_info->igc_cfg.version != 0) {
+ payload_size = __pp_compat_size_igc();
+
+ cfg_payload = kmalloc(payload_size, GFP_KERNEL);
+ if (!cfg_payload) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ switch (pp_info->igc_cfg.version) {
+ case mdp_igc_v1_7:
+ ret = __copy_layer_igc_lut_data_v1_7(cfg_payload,
+ compat_ptr(pp_info32->igc_cfg.cfg_payload));
+ if (ret) {
+ pr_err("compat copy of IGC cfg payload failed, ret %d\n",
+ ret);
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ goto exit;
+ }
+ break;
+ default:
+ pr_debug("No version set, fallback to legacy IGC version\n");
+ pp_info->igc_cfg.len = pp_info32->igc_cfg.len;
+ pp_info->igc_cfg.c0_c1_data =
+ compat_ptr(pp_info32->igc_cfg.c0_c1_data);
+ pp_info->igc_cfg.c2_data =
+ compat_ptr(pp_info32->igc_cfg.c2_data);
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ break;
+ }
+exit:
+ pp_info->igc_cfg.cfg_payload = cfg_payload;
+ return ret;
+}
+
+static int __copy_layer_hist_lut_data_v1_7(
+ struct mdp_hist_lut_data_v1_7 *cfg_payload,
+ struct mdp_hist_lut_data_v1_7_32 __user *cfg_payload32)
+{
+ struct mdp_hist_lut_data_v1_7_32 local_cfg_payload32;
+ int ret = 0;
+
+ ret = copy_from_user(&local_cfg_payload32,
+ cfg_payload32,
+ sizeof(struct mdp_hist_lut_data_v1_7_32));
+ if (ret) {
+ pr_err("copy from user failed, hist lut cfg_payload = %pK\n",
+ cfg_payload32);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ cfg_payload->len = local_cfg_payload32.len;
+ cfg_payload->data = compat_ptr(local_cfg_payload32.data);
+exit:
+ return ret;
+}
+
+static int __copy_layer_pp_info_hist_lut_params(
+ struct mdp_overlay_pp_params *pp_info,
+ struct mdp_overlay_pp_params32 *pp_info32)
+{
+ void *cfg_payload = NULL;
+ uint32_t payload_size = 0;
+ int ret = 0;
+
+ pp_info->hist_lut_cfg.block = pp_info32->hist_lut_cfg.block;
+ pp_info->hist_lut_cfg.version = pp_info32->hist_lut_cfg.version;
+ pp_info->hist_lut_cfg.ops = pp_info32->hist_lut_cfg.ops;
+ pp_info->hist_lut_cfg.hist_lut_first =
+ pp_info32->hist_lut_cfg.hist_lut_first;
+
+ if (pp_info->hist_lut_cfg.version != 0) {
+ payload_size = __pp_compat_size_hist_lut();
+
+ cfg_payload = kmalloc(payload_size, GFP_KERNEL);
+ if (!cfg_payload) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ switch (pp_info->hist_lut_cfg.version) {
+ case mdp_hist_lut_v1_7:
+ ret = __copy_layer_hist_lut_data_v1_7(cfg_payload,
+ compat_ptr(pp_info32->hist_lut_cfg.cfg_payload));
+ if (ret) {
+ pr_err("compat copy of Hist LUT cfg payload failed, ret %d\n",
+ ret);
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ goto exit;
+ }
+ break;
+ default:
+ pr_debug("version invalid, fallback to legacy\n");
+ pp_info->hist_lut_cfg.len = pp_info32->hist_lut_cfg.len;
+ pp_info->hist_lut_cfg.data =
+ compat_ptr(pp_info32->hist_lut_cfg.data);
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ break;
+ }
+exit:
+ pp_info->hist_lut_cfg.cfg_payload = cfg_payload;
+ return ret;
+}
+
+static int __copy_layer_pa_data_v1_7(
+ struct mdp_pa_data_v1_7 *cfg_payload,
+ struct mdp_pa_data_v1_7_32 __user *cfg_payload32)
+{
+ struct mdp_pa_data_v1_7_32 local_cfg_payload32;
+ int ret = 0;
+
+ ret = copy_from_user(&local_cfg_payload32,
+ cfg_payload32,
+ sizeof(struct mdp_pa_data_v1_7_32));
+ if (ret) {
+ pr_err("copy from user failed, pa cfg_payload = %pK\n",
+ cfg_payload32);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ cfg_payload->mode = local_cfg_payload32.mode;
+ cfg_payload->global_hue_adj = local_cfg_payload32.global_hue_adj;
+ cfg_payload->global_sat_adj = local_cfg_payload32.global_sat_adj;
+ cfg_payload->global_val_adj = local_cfg_payload32.global_val_adj;
+ cfg_payload->global_cont_adj = local_cfg_payload32.global_cont_adj;
+
+ memcpy(&cfg_payload->skin_cfg, &local_cfg_payload32.skin_cfg,
+ sizeof(struct mdp_pa_mem_col_data_v1_7));
+ memcpy(&cfg_payload->sky_cfg, &local_cfg_payload32.sky_cfg,
+ sizeof(struct mdp_pa_mem_col_data_v1_7));
+ memcpy(&cfg_payload->fol_cfg, &local_cfg_payload32.fol_cfg,
+ sizeof(struct mdp_pa_mem_col_data_v1_7));
+
+ cfg_payload->six_zone_thresh = local_cfg_payload32.six_zone_thresh;
+ cfg_payload->six_zone_adj_p0 = local_cfg_payload32.six_zone_adj_p0;
+ cfg_payload->six_zone_adj_p1 = local_cfg_payload32.six_zone_adj_p1;
+ cfg_payload->six_zone_sat_hold = local_cfg_payload32.six_zone_sat_hold;
+ cfg_payload->six_zone_val_hold = local_cfg_payload32.six_zone_val_hold;
+ cfg_payload->six_zone_len = local_cfg_payload32.six_zone_len;
+
+ cfg_payload->six_zone_curve_p0 =
+ compat_ptr(local_cfg_payload32.six_zone_curve_p0);
+ cfg_payload->six_zone_curve_p1 =
+ compat_ptr(local_cfg_payload32.six_zone_curve_p1);
+exit:
+ return ret;
+}
+
+static int __copy_layer_pp_info_pa_v2_params(
+ struct mdp_overlay_pp_params *pp_info,
+ struct mdp_overlay_pp_params32 *pp_info32)
+{
+ void *cfg_payload = NULL;
+ uint32_t payload_size = 0;
+ int ret = 0;
+
+ pp_info->pa_v2_cfg_data.block = pp_info32->pa_v2_cfg_data.block;
+ pp_info->pa_v2_cfg_data.version = pp_info32->pa_v2_cfg_data.version;
+ pp_info->pa_v2_cfg_data.flags = pp_info32->pa_v2_cfg_data.flags;
+
+ if (pp_info->pa_v2_cfg_data.version != 0) {
+ payload_size = __pp_compat_size_pa();
+
+ cfg_payload = kmalloc(payload_size, GFP_KERNEL);
+ if (!cfg_payload) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ switch (pp_info->pa_v2_cfg_data.version) {
+ case mdp_pa_v1_7:
+ ret = __copy_layer_pa_data_v1_7(cfg_payload,
+ compat_ptr(pp_info32->pa_v2_cfg_data.cfg_payload));
+ if (ret) {
+ pr_err("compat copy of PA cfg payload failed, ret %d\n",
+ ret);
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ goto exit;
+ }
+ break;
+ default:
+ pr_debug("version invalid\n");
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ break;
+ }
+exit:
+ pp_info->pa_v2_cfg_data.cfg_payload = cfg_payload;
+ return ret;
+}
+
+static int __copy_layer_pp_info_legacy_pa_v2_params(
+ struct mdp_overlay_pp_params *pp_info,
+ struct mdp_overlay_pp_params32 *pp_info32)
+{
+ pp_info->pa_v2_cfg.global_hue_adj =
+ pp_info32->pa_v2_cfg.global_hue_adj;
+ pp_info->pa_v2_cfg.global_sat_adj =
+ pp_info32->pa_v2_cfg.global_sat_adj;
+ pp_info->pa_v2_cfg.global_val_adj =
+ pp_info32->pa_v2_cfg.global_val_adj;
+ pp_info->pa_v2_cfg.global_cont_adj =
+ pp_info32->pa_v2_cfg.global_cont_adj;
+
+ memcpy(&pp_info->pa_v2_cfg.skin_cfg,
+ &pp_info32->pa_v2_cfg.skin_cfg,
+ sizeof(struct mdp_pa_mem_col_cfg));
+ memcpy(&pp_info->pa_v2_cfg.sky_cfg,
+ &pp_info32->pa_v2_cfg.sky_cfg,
+ sizeof(struct mdp_pa_mem_col_cfg));
+ memcpy(&pp_info->pa_v2_cfg.fol_cfg,
+ &pp_info32->pa_v2_cfg.fol_cfg,
+ sizeof(struct mdp_pa_mem_col_cfg));
+
+ pp_info->pa_v2_cfg.six_zone_thresh =
+ pp_info32->pa_v2_cfg.six_zone_thresh;
+ pp_info->pa_v2_cfg.six_zone_len =
+ pp_info32->pa_v2_cfg.six_zone_len;
+
+ pp_info->pa_v2_cfg.six_zone_curve_p0 =
+ compat_ptr(pp_info32->pa_v2_cfg.six_zone_curve_p0);
+ pp_info->pa_v2_cfg.six_zone_curve_p1 =
+ compat_ptr(pp_info32->pa_v2_cfg.six_zone_curve_p1);
+
+ return 0;
+}
+
+static int __copy_layer_pp_info_pcc_params(
+ struct mdp_overlay_pp_params *pp_info,
+ struct mdp_overlay_pp_params32 *pp_info32)
+{
+ void *cfg_payload = NULL;
+ uint32_t payload_size = 0;
+ int ret = 0;
+
+ pp_info->pcc_cfg_data.block = pp_info32->pcc_cfg_data.block;
+ pp_info->pcc_cfg_data.version = pp_info32->pcc_cfg_data.version;
+ pp_info->pcc_cfg_data.ops = pp_info32->pcc_cfg_data.ops;
+
+ if (pp_info->pcc_cfg_data.version != 0) {
+ payload_size = __pp_compat_size_pcc();
+
+ cfg_payload = kmalloc(payload_size, GFP_KERNEL);
+ if (!cfg_payload) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ switch (pp_info->pcc_cfg_data.version) {
+ case mdp_pcc_v1_7:
+ ret = copy_from_user(cfg_payload,
+ compat_ptr(pp_info32->pcc_cfg_data.cfg_payload),
+ sizeof(struct mdp_pcc_data_v1_7));
+ if (ret) {
+ pr_err("compat copy of PCC cfg payload failed, ptr %pK\n",
+ compat_ptr(
+ pp_info32->pcc_cfg_data.cfg_payload));
+ ret = -EFAULT;
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ goto exit;
+ }
+ break;
+ default:
+ pr_debug("version invalid, fallback to legacy\n");
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ break;
+ }
+exit:
+ pp_info->pcc_cfg_data.cfg_payload = cfg_payload;
+ return ret;
+}
+
+
+static int __copy_layer_pp_info_params(struct mdp_input_layer *layer,
+ struct mdp_input_layer32 *layer32)
+{
+ struct mdp_overlay_pp_params *pp_info;
+ struct mdp_overlay_pp_params32 pp_info32;
+ int ret = 0;
+
+ if (!(layer->flags & MDP_LAYER_PP))
+ return 0;
+
+ ret = copy_from_user(&pp_info32,
+ compat_ptr(layer32->pp_info),
+ sizeof(struct mdp_overlay_pp_params32));
+ if (ret) {
+ pr_err("pp info copy from user failed, pp_info %pK\n",
+ compat_ptr(layer32->pp_info));
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ pp_info = kmalloc(sizeof(struct mdp_overlay_pp_params), GFP_KERNEL);
+ if (!pp_info) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ memset(pp_info, 0, sizeof(struct mdp_overlay_pp_params));
+
+ pp_info->config_ops = pp_info32.config_ops;
+
+ memcpy(&pp_info->csc_cfg, &pp_info32.csc_cfg,
+ sizeof(struct mdp_csc_cfg));
+ memcpy(&pp_info->sharp_cfg, &pp_info32.sharp_cfg,
+ sizeof(struct mdp_sharp_cfg));
+ memcpy(&pp_info->hist_cfg, &pp_info32.hist_cfg,
+ sizeof(struct mdp_histogram_cfg));
+ memcpy(&pp_info->pa_cfg, &pp_info32.pa_cfg,
+ sizeof(struct mdp_pa_cfg));
+
+ ret = __copy_layer_pp_info_qseed_params(pp_info, &pp_info32);
+ if (ret) {
+ pr_err("compat copy pp_info QSEED params failed, ret %d\n",
+ ret);
+ goto exit_pp_info;
+ }
+ ret = __copy_layer_pp_info_legacy_pa_v2_params(pp_info, &pp_info32);
+ if (ret) {
+ pr_err("compat copy pp_info Legacy PAv2 params failed, ret %d\n",
+ ret);
+ goto exit_pp_info;
+ }
+ ret = __copy_layer_pp_info_igc_params(pp_info, &pp_info32);
+ if (ret) {
+ pr_err("compat copy pp_info IGC params failed, ret %d\n",
+ ret);
+ goto exit_pp_info;
+ }
+ ret = __copy_layer_pp_info_hist_lut_params(pp_info, &pp_info32);
+ if (ret) {
+ pr_err("compat copy pp_info Hist LUT params failed, ret %d\n",
+ ret);
+ goto exit_igc;
+ }
+ ret = __copy_layer_pp_info_pa_v2_params(pp_info, &pp_info32);
+ if (ret) {
+ pr_err("compat copy pp_info PAv2 params failed, ret %d\n",
+ ret);
+ goto exit_hist_lut;
+ }
+ ret = __copy_layer_pp_info_pcc_params(pp_info, &pp_info32);
+ if (ret) {
+ pr_err("compat copy pp_info PCC params failed, ret %d\n",
+ ret);
+ goto exit_pa;
+ }
+
+ layer->pp_info = pp_info;
+
+ return ret;
+
+exit_pa:
+ kfree(pp_info->pa_v2_cfg_data.cfg_payload);
+exit_hist_lut:
+ kfree(pp_info->hist_lut_cfg.cfg_payload);
+exit_igc:
+ kfree(pp_info->igc_cfg.cfg_payload);
+exit_pp_info:
+ kfree(pp_info);
+exit:
+ return ret;
+}
+
+
+static int __to_user_mdp_overlay(struct mdp_overlay32 __user *ov32,
+ struct mdp_overlay __user *ov)
+{
+ int ret = 0;
+
+ ret = copy_in_user(&ov32->src, &ov->src, sizeof(ov32->src)) ||
+ copy_in_user(&ov32->src_rect,
+ &ov->src_rect, sizeof(ov32->src_rect)) ||
+ copy_in_user(&ov32->dst_rect,
+ &ov->dst_rect, sizeof(ov32->dst_rect));
+ if (ret)
+ return -EFAULT;
+
+ ret |= put_user(ov->z_order, &ov32->z_order);
+ ret |= put_user(ov->is_fg, &ov32->is_fg);
+ ret |= put_user(ov->alpha, &ov32->alpha);
+ ret |= put_user(ov->blend_op, &ov32->blend_op);
+ ret |= put_user(ov->transp_mask, &ov32->transp_mask);
+ ret |= put_user(ov->flags, &ov32->flags);
+ ret |= put_user(ov->id, &ov32->id);
+ ret |= put_user(ov->priority, &ov32->priority);
+ if (ret)
+ return -EFAULT;
+
+ ret = copy_in_user(&ov32->user_data, &ov->user_data,
+ sizeof(ov32->user_data));
+ if (ret)
+ return -EFAULT;
+
+ ret |= put_user(ov->horz_deci, &ov32->horz_deci);
+ ret |= put_user(ov->vert_deci, &ov32->vert_deci);
+ if (ret)
+ return -EFAULT;
+
+ ret = __to_user_pp_params(
+ &ov->overlay_pp_cfg,
+ compat_ptr((uintptr_t) &ov32->overlay_pp_cfg));
+ if (ret)
+ return -EFAULT;
+
+ ret = copy_in_user(&ov32->scale, &ov->scale,
+ sizeof(struct mdp_scale_data));
+ if (ret)
+ return -EFAULT;
+
+ ret = put_user(ov->frame_rate, &ov32->frame_rate);
+ if (ret)
+ return -EFAULT;
+
+ return 0;
+}
+
+
+static int __from_user_mdp_overlay(struct mdp_overlay *ov,
+ struct mdp_overlay32 __user *ov32)
+{
+ __u32 data;
+
+ if (copy_in_user(&ov->src, &ov32->src,
+ sizeof(ov32->src)) ||
+ copy_in_user(&ov->src_rect, &ov32->src_rect,
+ sizeof(ov32->src_rect)) ||
+ copy_in_user(&ov->dst_rect, &ov32->dst_rect,
+ sizeof(ov32->dst_rect)))
+ return -EFAULT;
+
+ if (get_user(data, &ov32->z_order) ||
+ put_user(data, &ov->z_order) ||
+ get_user(data, &ov32->is_fg) ||
+ put_user(data, &ov->is_fg) ||
+ get_user(data, &ov32->alpha) ||
+ put_user(data, &ov->alpha) ||
+ get_user(data, &ov32->blend_op) ||
+ put_user(data, &ov->blend_op) ||
+ get_user(data, &ov32->transp_mask) ||
+ put_user(data, &ov->transp_mask) ||
+ get_user(data, &ov32->flags) ||
+ put_user(data, &ov->flags) ||
+ get_user(data, &ov32->pipe_type) ||
+ put_user(data, &ov->pipe_type) ||
+ get_user(data, &ov32->id) ||
+ put_user(data, &ov->id) ||
+ get_user(data, &ov32->priority) ||
+ put_user(data, &ov->priority))
+ return -EFAULT;
+
+ if (copy_in_user(&ov->user_data, &ov32->user_data,
+ sizeof(ov32->user_data)))
+ return -EFAULT;
+
+ if (get_user(data, &ov32->horz_deci) ||
+ put_user(data, &ov->horz_deci) ||
+ get_user(data, &ov32->vert_deci) ||
+ put_user(data, &ov->vert_deci))
+ return -EFAULT;
+
+ if (__from_user_pp_params(
+ compat_ptr((uintptr_t) &ov32->overlay_pp_cfg),
+ &ov->overlay_pp_cfg))
+ return -EFAULT;
+
+ if (copy_in_user(&ov->scale, &ov32->scale,
+ sizeof(struct mdp_scale_data)))
+ return -EFAULT;
+
+ if (get_user(data, &ov32->frame_rate) ||
+ put_user(data, &ov->frame_rate))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __from_user_mdp_overlaylist(struct mdp_overlay_list *ovlist,
+ struct mdp_overlay_list32 *ovlist32,
+ struct mdp_overlay **to_list_head)
+{
+ __u32 i, ret;
+ unsigned long data, from_list_head;
+ struct mdp_overlay32 *iter;
+
+ if (!to_list_head || !ovlist32 || !ovlist) {
+ pr_err("%s:%u: null error\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (copy_in_user(&ovlist->num_overlays, &ovlist32->num_overlays,
+ sizeof(ovlist32->num_overlays)))
+ return -EFAULT;
+
+ if (copy_in_user(&ovlist->flags, &ovlist32->flags,
+ sizeof(ovlist32->flags)))
+ return -EFAULT;
+
+ if (copy_in_user(&ovlist->processed_overlays,
+ &ovlist32->processed_overlays,
+ sizeof(ovlist32->processed_overlays)))
+ return -EFAULT;
+
+ if (get_user(data, &ovlist32->overlay_list)) {
+ ret = -EFAULT;
+ goto validate_exit;
+ }
+ for (i = 0; i < ovlist32->num_overlays; i++) {
+ if (get_user(from_list_head, (__u32 *)data + i)) {
+ ret = -EFAULT;
+ goto validate_exit;
+ }
+
+ iter = compat_ptr(from_list_head);
+ if (__from_user_mdp_overlay(to_list_head[i],
+ (struct mdp_overlay32 *)(iter))) {
+ ret = -EFAULT;
+ goto validate_exit;
+ }
+ }
+ ovlist->overlay_list = to_list_head;
+
+ return 0;
+
+validate_exit:
+ pr_err("%s: %u: copy error\n", __func__, __LINE__);
+ return -EFAULT;
+}
+
+static int __to_user_mdp_overlaylist(struct mdp_overlay_list32 *ovlist32,
+ struct mdp_overlay_list *ovlist,
+ struct mdp_overlay **l_ptr)
+{
+ __u32 i, ret;
+ unsigned long data, data1;
+ struct mdp_overlay32 *temp;
+ struct mdp_overlay *l = l_ptr[0];
+
+ if (copy_in_user(&ovlist32->num_overlays, &ovlist->num_overlays,
+ sizeof(ovlist32->num_overlays)))
+ return -EFAULT;
+
+ if (get_user(data, &ovlist32->overlay_list)) {
+ ret = -EFAULT;
+ pr_err("%s:%u: err\n", __func__, __LINE__);
+ goto validate_exit;
+ }
+
+ for (i = 0; i < ovlist32->num_overlays; i++) {
+ if (get_user(data1, (__u32 *)data + i)) {
+ ret = -EFAULT;
+ goto validate_exit;
+ }
+ temp = compat_ptr(data1);
+ if (__to_user_mdp_overlay(
+ (struct mdp_overlay32 *) temp,
+ l + i)) {
+ ret = -EFAULT;
+ goto validate_exit;
+ }
+ }
+
+ if (copy_in_user(&ovlist32->flags, &ovlist->flags,
+ sizeof(ovlist32->flags)))
+ return -EFAULT;
+
+ if (copy_in_user(&ovlist32->processed_overlays,
+ &ovlist->processed_overlays,
+ sizeof(ovlist32->processed_overlays)))
+ return -EFAULT;
+
+ return 0;
+
+validate_exit:
+ pr_err("%s: %u: copy error\n", __func__, __LINE__);
+ return -EFAULT;
+
+}
+
+void mdss_compat_align_list(void __user *total_mem_chunk,
+ struct mdp_overlay __user **list_ptr, u32 num_ov)
+{
+ int i = 0;
+ struct mdp_overlay __user *contig_overlays;
+
+ contig_overlays = total_mem_chunk + sizeof(struct mdp_overlay_list) +
+ (num_ov * sizeof(struct mdp_overlay *));
+
+ for (i = 0; i < num_ov; i++)
+ list_ptr[i] = contig_overlays + i;
+}
+
+static u32 __pp_sspp_size(void)
+{
+ u32 size = 0;
+ /* pick the largest of the revision when multiple revs are supported */
+ size = sizeof(struct mdp_igc_lut_data_v1_7);
+ size += sizeof(struct mdp_pa_data_v1_7);
+ size += sizeof(struct mdp_pcc_data_v1_7);
+ size += sizeof(struct mdp_hist_lut_data_v1_7);
+ return size;
+}
+
+static int __pp_sspp_set_offsets(struct mdp_overlay *ov)
+{
+ if (!ov) {
+ pr_err("invalid overlay pointer\n");
+ return -EFAULT;
+ }
+ ov->overlay_pp_cfg.igc_cfg.cfg_payload = (void *)((unsigned long)ov +
+ sizeof(struct mdp_overlay));
+ ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload =
+ ov->overlay_pp_cfg.igc_cfg.cfg_payload +
+ sizeof(struct mdp_igc_lut_data_v1_7);
+ ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload =
+ ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload +
+ sizeof(struct mdp_pa_data_v1_7);
+ ov->overlay_pp_cfg.hist_lut_cfg.cfg_payload =
+ ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload +
+ sizeof(struct mdp_pcc_data_v1_7);
+ return 0;
+}
+
+int mdss_compat_overlay_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file)
+{
+ struct mdp_overlay *ov, **layers_head;
+ struct mdp_overlay32 *ov32;
+ struct mdp_overlay_list __user *ovlist;
+ struct mdp_overlay_list32 __user *ovlist32;
+ size_t layers_refs_sz, layers_sz, prepare_sz;
+ void __user *total_mem_chunk;
+ uint32_t num_overlays;
+ uint32_t alloc_size = 0;
+ int ret;
+
+ if (!info || !info->par)
+ return -EINVAL;
+
+
+ switch (cmd) {
+ case MSMFB_MDP_PP:
+ ret = mdss_compat_pp_ioctl(info, cmd, arg, file);
+ break;
+ case MSMFB_HISTOGRAM_START:
+ case MSMFB_HISTOGRAM_STOP:
+ case MSMFB_HISTOGRAM:
+ ret = mdss_histo_compat_ioctl(info, cmd, arg, file);
+ break;
+ case MSMFB_OVERLAY_GET:
+ alloc_size += sizeof(*ov) + __pp_sspp_size();
+ ov = compat_alloc_user_space(alloc_size);
+ if (!ov) {
+ pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+ __func__, __LINE__, sizeof(*ov));
+ return -EINVAL;
+ }
+ ov32 = compat_ptr(arg);
+ ret = __pp_sspp_set_offsets(ov);
+ if (ret) {
+ pr_err("setting the pp offsets failed ret %d\n", ret);
+ return ret;
+ }
+ ret = __from_user_mdp_overlay(ov, ov32);
+ if (ret)
+ pr_err("%s: compat mdp overlay failed\n", __func__);
+ else
+ ret = mdss_fb_do_ioctl(info, cmd,
+ (unsigned long) ov, file);
+ ret = __to_user_mdp_overlay(ov32, ov);
+ break;
+ case MSMFB_OVERLAY_SET:
+ alloc_size += sizeof(*ov) + __pp_sspp_size();
+ ov = compat_alloc_user_space(alloc_size);
+ if (!ov) {
+ pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+ __func__, __LINE__, sizeof(*ov));
+ return -EINVAL;
+ }
+ ret = __pp_sspp_set_offsets(ov);
+ if (ret) {
+ pr_err("setting the pp offsets failed ret %d\n", ret);
+ return ret;
+ }
+ ov32 = compat_ptr(arg);
+ ret = __from_user_mdp_overlay(ov, ov32);
+ if (ret) {
+ pr_err("%s: compat mdp overlay failed\n", __func__);
+ } else {
+ ret = mdss_fb_do_ioctl(info, cmd,
+ (unsigned long) ov, file);
+ ret = __to_user_mdp_overlay(ov32, ov);
+ }
+ break;
+ case MSMFB_OVERLAY_PREPARE:
+ ovlist32 = compat_ptr(arg);
+ if (get_user(num_overlays, &ovlist32->num_overlays)) {
+ pr_err("compat mdp prepare failed: invalid arg\n");
+ return -EFAULT;
+ }
+
+ if (num_overlays >= OVERLAY_MAX) {
+ pr_err("%s: No: of overlays exceeds max\n", __func__);
+ return -EINVAL;
+ }
+
+ layers_sz = num_overlays * sizeof(struct mdp_overlay);
+ prepare_sz = sizeof(struct mdp_overlay_list);
+ layers_refs_sz = num_overlays * sizeof(struct mdp_overlay *);
+
+ total_mem_chunk = compat_alloc_user_space(
+ prepare_sz + layers_refs_sz + layers_sz);
+ if (!total_mem_chunk) {
+ pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+ __func__, __LINE__,
+ layers_refs_sz + layers_sz + prepare_sz);
+ return -EINVAL;
+ }
+
+ layers_head = total_mem_chunk + prepare_sz;
+ mdss_compat_align_list(total_mem_chunk, layers_head,
+ num_overlays);
+ ovlist = (struct mdp_overlay_list *)total_mem_chunk;
+
+ ret = __from_user_mdp_overlaylist(ovlist, ovlist32,
+ layers_head);
+ if (ret) {
+ pr_err("compat mdp overlaylist failed\n");
+ } else {
+ ret = mdss_fb_do_ioctl(info, cmd,
+ (unsigned long) ovlist, file);
+ if (!ret)
+ ret = __to_user_mdp_overlaylist(ovlist32,
+ ovlist, layers_head);
+ }
+ break;
+ case MSMFB_OVERLAY_UNSET:
+ case MSMFB_OVERLAY_PLAY:
+ case MSMFB_OVERLAY_VSYNC_CTRL:
+ case MSMFB_METADATA_SET:
+ case MSMFB_METADATA_GET:
+ default:
+ pr_debug("%s: overlay ioctl cmd=[%u]\n", __func__, cmd);
+ ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) arg, file);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * mdss_fb_compat_ioctl() - MDSS Framebuffer compat ioctl function
+ * @info: pointer to framebuffer info
+ * @cmd: ioctl command
+ * @arg: argument to ioctl
+ *
+ * This function adds the compat translation layer for framebuffer
+ * ioctls to allow 32-bit userspace call ioctls on the mdss
+ * framebuffer device driven in 64-bit kernel.
+ */
+int mdss_fb_compat_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file)
+{
+ int ret;
+
+ if (!info || !info->par)
+ return -EINVAL;
+
+ cmd = __do_compat_ioctl_nr(cmd);
+ switch (cmd) {
+ case MSMFB_CURSOR:
+ ret = mdss_fb_compat_cursor(info, cmd, arg, file);
+ break;
+ case MSMFB_SET_LUT:
+ ret = mdss_fb_compat_set_lut(info, arg, file);
+ break;
+ case MSMFB_BUFFER_SYNC:
+ ret = mdss_fb_compat_buf_sync(info, cmd, arg, file);
+ break;
+ case MSMFB_ATOMIC_COMMIT:
+ ret = __compat_atomic_commit(info, cmd, arg, file);
+ break;
+ case MSMFB_ASYNC_POSITION_UPDATE:
+ ret = __compat_async_position_update(info, cmd, arg);
+ break;
+ case MSMFB_MDP_PP:
+ case MSMFB_HISTOGRAM_START:
+ case MSMFB_HISTOGRAM_STOP:
+ case MSMFB_HISTOGRAM:
+ case MSMFB_OVERLAY_GET:
+ case MSMFB_OVERLAY_SET:
+ case MSMFB_OVERLAY_UNSET:
+ case MSMFB_OVERLAY_PLAY:
+ case MSMFB_OVERLAY_VSYNC_CTRL:
+ case MSMFB_METADATA_SET:
+ case MSMFB_METADATA_GET:
+ case MSMFB_OVERLAY_PREPARE:
+ ret = mdss_compat_overlay_ioctl(info, cmd, arg, file);
+ break;
+ case MSMFB_NOTIFY_UPDATE:
+ case MSMFB_DISPLAY_COMMIT:
+ default:
+ ret = mdss_fb_do_ioctl(info, cmd, arg, file);
+ break;
+ }
+
+ if (ret == -ENOTSUPP)
+ pr_err("%s: unsupported ioctl\n", __func__);
+ else if (ret)
+ pr_debug("%s: ioctl err cmd=%u ret=%d\n", __func__, cmd, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(mdss_fb_compat_ioctl);
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.h b/drivers/video/fbdev/msm/mdss_compat_utils.h
new file mode 100644
index 0000000..ebae393
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.h
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_COMPAT_UTILS_H
+#define MDSS_COMPAT_UTILS_H
+
+/*
+ * To allow proper structure padding for 64bit/32bit target
+ */
+#ifndef MDP_LAYER_COMMIT_V1_PAD
+#ifdef __LP64
+#define MDP_LAYER_COMMIT_V1_PAD 2
+#else
+#define MDP_LAYER_COMMIT_V1_PAD 3
+#endif
+#endif
+
+struct mdp_buf_sync32 {
+ u32 flags;
+ u32 acq_fen_fd_cnt;
+ u32 session_id;
+ compat_caddr_t acq_fen_fd;
+ compat_caddr_t rel_fen_fd;
+ compat_caddr_t retire_fen_fd;
+};
+
+struct fb_cmap32 {
+ u32 start;
+ u32 len;
+ compat_caddr_t red;
+ compat_caddr_t green;
+ compat_caddr_t blue;
+ compat_caddr_t transp;
+};
+
+struct fb_image32 {
+ u32 dx;
+ u32 dy;
+ u32 width;
+ u32 height;
+ u32 fg_color;
+ u32 bg_color;
+ u8 depth;
+ compat_caddr_t data;
+ struct fb_cmap32 cmap;
+};
+
+struct fb_cursor32 {
+ u16 set;
+ u16 enable;
+ u16 rop;
+ compat_caddr_t mask;
+ struct fbcurpos hot;
+ struct fb_image32 image;
+};
+
+struct mdp_ccs32 {
+};
+
+struct msmfb_overlay_blt32 {
+};
+
+struct msmfb_overlay_3d32 {
+};
+
+struct msmfb_mixer_info_req32 {
+};
+
+struct msmfb_metadata32 {
+ uint32_t op;
+ uint32_t flags;
+ union {
+ struct mdp_misr misr_request;
+ struct mdp_blend_cfg blend_cfg;
+ struct mdp_mixer_cfg mixer_cfg;
+ uint32_t panel_frame_rate;
+ uint32_t video_info_code;
+ struct mdss_hw_caps caps;
+ uint8_t secure_en;
+ } data;
+};
+
+struct mdp_histogram_start_req32 {
+ uint32_t block;
+ uint8_t frame_cnt;
+ uint8_t bit_mask;
+ uint16_t num_bins;
+};
+
+struct mdp_histogram_data32 {
+ uint32_t block;
+ uint32_t bin_cnt;
+ compat_caddr_t c0;
+ compat_caddr_t c1;
+ compat_caddr_t c2;
+ compat_caddr_t extra_info;
+};
+
+struct mdp_pcc_coeff32 {
+ uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1;
+};
+
+struct mdp_pcc_coeff_v1_7_32 {
+ uint32_t c, r, g, b, rg, gb, rb, rgb;
+};
+
+struct mdp_pcc_data_v1_7_32 {
+ struct mdp_pcc_coeff_v1_7_32 r, g, b;
+};
+struct mdp_pcc_cfg_data32 {
+ uint32_t version;
+ uint32_t block;
+ uint32_t ops;
+ struct mdp_pcc_coeff32 r, g, b;
+ compat_caddr_t cfg_payload;
+};
+
+struct mdp_csc_cfg32 {
+ /* flags for enable CSC, toggling RGB,YUV input/output */
+ uint32_t flags;
+ uint32_t csc_mv[9];
+ uint32_t csc_pre_bv[3];
+ uint32_t csc_post_bv[3];
+ uint32_t csc_pre_lv[6];
+ uint32_t csc_post_lv[6];
+};
+
+struct mdp_csc_cfg_data32 {
+ uint32_t block;
+ struct mdp_csc_cfg32 csc_data;
+};
+
+struct mdp_bl_scale_data32 {
+ uint32_t min_lvl;
+ uint32_t scale;
+};
+
+struct mdp_pa_mem_col_cfg32 {
+ uint32_t color_adjust_p0;
+ uint32_t color_adjust_p1;
+ uint32_t hue_region;
+ uint32_t sat_region;
+ uint32_t val_region;
+};
+
+struct mdp_pa_v2_data32 {
+ /* Mask bits for PA features */
+ uint32_t flags;
+ uint32_t global_hue_adj;
+ uint32_t global_sat_adj;
+ uint32_t global_val_adj;
+ uint32_t global_cont_adj;
+ struct mdp_pa_mem_col_cfg32 skin_cfg;
+ struct mdp_pa_mem_col_cfg32 sky_cfg;
+ struct mdp_pa_mem_col_cfg32 fol_cfg;
+ uint32_t six_zone_len;
+ uint32_t six_zone_thresh;
+ compat_caddr_t six_zone_curve_p0;
+ compat_caddr_t six_zone_curve_p1;
+};
+
+struct mdp_pa_mem_col_data_v1_7_32 {
+ uint32_t color_adjust_p0;
+ uint32_t color_adjust_p1;
+ uint32_t color_adjust_p2;
+ uint32_t blend_gain;
+ uint8_t sat_hold;
+ uint8_t val_hold;
+ uint32_t hue_region;
+ uint32_t sat_region;
+ uint32_t val_region;
+};
+
+struct mdp_pa_data_v1_7_32 {
+ uint32_t mode;
+ uint32_t global_hue_adj;
+ uint32_t global_sat_adj;
+ uint32_t global_val_adj;
+ uint32_t global_cont_adj;
+ struct mdp_pa_mem_col_data_v1_7_32 skin_cfg;
+ struct mdp_pa_mem_col_data_v1_7_32 sky_cfg;
+ struct mdp_pa_mem_col_data_v1_7_32 fol_cfg;
+ uint32_t six_zone_thresh;
+ uint32_t six_zone_adj_p0;
+ uint32_t six_zone_adj_p1;
+ uint8_t six_zone_sat_hold;
+ uint8_t six_zone_val_hold;
+ uint32_t six_zone_len;
+ compat_caddr_t six_zone_curve_p0;
+ compat_caddr_t six_zone_curve_p1;
+};
+
+struct mdp_pa_v2_cfg_data32 {
+ uint32_t version;
+ uint32_t block;
+ uint32_t flags;
+ struct mdp_pa_v2_data32 pa_v2_data;
+ compat_caddr_t cfg_payload;
+};
+
+struct mdp_pa_cfg32 {
+ uint32_t flags;
+ uint32_t hue_adj;
+ uint32_t sat_adj;
+ uint32_t val_adj;
+ uint32_t cont_adj;
+};
+
+struct mdp_pa_cfg_data32 {
+ uint32_t block;
+ struct mdp_pa_cfg32 pa_data;
+};
+
+struct mdp_igc_lut_data_v1_7_32 {
+ uint32_t table_fmt;
+ uint32_t len;
+ compat_caddr_t c0_c1_data;
+ compat_caddr_t c2_data;
+};
+
+struct mdp_rgb_lut_data32 {
+ uint32_t flags;
+ uint32_t lut_type;
+ struct fb_cmap32 cmap;
+};
+
+struct mdp_igc_lut_data32 {
+ uint32_t block;
+ uint32_t version;
+ uint32_t len, ops;
+ compat_caddr_t c0_c1_data;
+ compat_caddr_t c2_data;
+ compat_caddr_t cfg_payload;
+};
+
+struct mdp_hist_lut_data_v1_7_32 {
+ uint32_t len;
+ compat_caddr_t data;
+};
+
+struct mdp_hist_lut_data32 {
+ uint32_t block;
+ uint32_t version;
+ uint32_t hist_lut_first;
+ uint32_t ops;
+ uint32_t len;
+ compat_caddr_t data;
+ compat_caddr_t cfg_payload;
+};
+
+struct mdp_ar_gc_lut_data32 {
+ uint32_t x_start;
+ uint32_t slope;
+ uint32_t offset;
+};
+
+struct mdp_pgc_lut_data_v1_7_32 {
+ uint32_t len;
+ compat_caddr_t c0_data;
+ compat_caddr_t c1_data;
+ compat_caddr_t c2_data;
+};
+
+struct mdp_pgc_lut_data32 {
+ uint32_t version;
+ uint32_t block;
+ uint32_t flags;
+ uint8_t num_r_stages;
+ uint8_t num_g_stages;
+ uint8_t num_b_stages;
+ compat_caddr_t r_data;
+ compat_caddr_t g_data;
+ compat_caddr_t b_data;
+ compat_caddr_t cfg_payload;
+};
+
+struct mdp_lut_cfg_data32 {
+ uint32_t lut_type;
+ union {
+ struct mdp_igc_lut_data32 igc_lut_data;
+ struct mdp_pgc_lut_data32 pgc_lut_data;
+ struct mdp_hist_lut_data32 hist_lut_data;
+ struct mdp_rgb_lut_data32 rgb_lut_data;
+ } data;
+};
+
+struct mdp_qseed_cfg32 {
+ uint32_t table_num;
+ uint32_t ops;
+ uint32_t len;
+ compat_caddr_t data;
+};
+
+struct mdp_qseed_cfg_data32 {
+ uint32_t block;
+ struct mdp_qseed_cfg32 qseed_data;
+};
+
+struct mdp_dither_cfg_data32 {
+ uint32_t block;
+ uint32_t flags;
+ uint32_t g_y_depth;
+ uint32_t r_cr_depth;
+ uint32_t b_cb_depth;
+};
+
+struct mdp_gamut_data_v1_7_32 {
+ uint32_t mode;
+ uint32_t tbl_size[MDP_GAMUT_TABLE_NUM_V1_7];
+ compat_caddr_t c0_data[MDP_GAMUT_TABLE_NUM_V1_7];
+ compat_caddr_t c1_c2_data[MDP_GAMUT_TABLE_NUM_V1_7];
+ uint32_t tbl_scale_off_sz[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+ compat_caddr_t scale_off_data[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+};
+
+struct mdp_gamut_cfg_data32 {
+ uint32_t block;
+ uint32_t flags;
+ uint32_t version;
+ uint32_t gamut_first;
+ uint32_t tbl_size[MDP_GAMUT_TABLE_NUM];
+ compat_caddr_t r_tbl[MDP_GAMUT_TABLE_NUM];
+ compat_caddr_t g_tbl[MDP_GAMUT_TABLE_NUM];
+ compat_caddr_t b_tbl[MDP_GAMUT_TABLE_NUM];
+ compat_caddr_t cfg_payload;
+};
+
+struct mdp_calib_config_data32 {
+ uint32_t ops;
+ uint32_t addr;
+ uint32_t data;
+};
+
+struct mdp_calib_config_buffer32 {
+ uint32_t ops;
+ uint32_t size;
+ compat_caddr_t buffer;
+};
+
+struct mdp_calib_dcm_state32 {
+ uint32_t ops;
+ uint32_t dcm_state;
+};
+
+struct mdss_ad_init32 {
+ uint32_t asym_lut[33];
+ uint32_t color_corr_lut[33];
+ uint8_t i_control[2];
+ uint16_t black_lvl;
+ uint16_t white_lvl;
+ uint8_t var;
+ uint8_t limit_ampl;
+ uint8_t i_dither;
+ uint8_t slope_max;
+ uint8_t slope_min;
+ uint8_t dither_ctl;
+ uint8_t format;
+ uint8_t auto_size;
+ uint16_t frame_w;
+ uint16_t frame_h;
+ uint8_t logo_v;
+ uint8_t logo_h;
+ uint32_t alpha;
+ uint32_t alpha_base;
+ uint32_t bl_lin_len;
+ uint32_t bl_att_len;
+ compat_caddr_t bl_lin;
+ compat_caddr_t bl_lin_inv;
+ compat_caddr_t bl_att_lut;
+};
+
+struct mdss_ad_cfg32 {
+ uint32_t mode;
+ uint32_t al_calib_lut[33];
+ uint16_t backlight_min;
+ uint16_t backlight_max;
+ uint16_t backlight_scale;
+ uint16_t amb_light_min;
+ uint16_t filter[2];
+ uint16_t calib[4];
+ uint8_t strength_limit;
+ uint8_t t_filter_recursion;
+ uint16_t stab_itr;
+ uint32_t bl_ctrl_mode;
+};
+
+/* ops uses standard MDP_PP_* flags */
+struct mdss_ad_init_cfg32 {
+ uint32_t ops;
+ union {
+ struct mdss_ad_init32 init;
+ struct mdss_ad_cfg32 cfg;
+ } params;
+};
+
+struct mdss_ad_input32 {
+ uint32_t mode;
+ union {
+ uint32_t amb_light;
+ uint32_t strength;
+ uint32_t calib_bl;
+ } in;
+ uint32_t output;
+};
+
+struct mdss_calib_cfg32 {
+ uint32_t ops;
+ uint32_t calib_mask;
+};
+
+struct mdp_histogram_cfg32 {
+ uint32_t ops;
+ uint32_t block;
+ uint8_t frame_cnt;
+ uint8_t bit_mask;
+ uint16_t num_bins;
+};
+
+struct mdp_sharp_cfg32 {
+ uint32_t flags;
+ uint32_t strength;
+ uint32_t edge_thr;
+ uint32_t smooth_thr;
+ uint32_t noise_thr;
+};
+
+struct mdp_overlay_pp_params32 {
+ uint32_t config_ops;
+ struct mdp_csc_cfg32 csc_cfg;
+ struct mdp_qseed_cfg32 qseed_cfg[2];
+ struct mdp_pa_cfg32 pa_cfg;
+ struct mdp_pa_v2_data32 pa_v2_cfg;
+ struct mdp_igc_lut_data32 igc_cfg;
+ struct mdp_sharp_cfg32 sharp_cfg;
+ struct mdp_histogram_cfg32 hist_cfg;
+ struct mdp_hist_lut_data32 hist_lut_cfg;
+ struct mdp_pa_v2_cfg_data32 pa_v2_cfg_data;
+ struct mdp_pcc_cfg_data32 pcc_cfg_data;
+};
+
+struct msmfb_mdp_pp32 {
+ uint32_t op;
+ union {
+ struct mdp_pcc_cfg_data32 pcc_cfg_data;
+ struct mdp_csc_cfg_data32 csc_cfg_data;
+ struct mdp_lut_cfg_data32 lut_cfg_data;
+ struct mdp_qseed_cfg_data32 qseed_cfg_data;
+ struct mdp_bl_scale_data32 bl_scale_data;
+ struct mdp_pa_cfg_data32 pa_cfg_data;
+ struct mdp_pa_v2_cfg_data32 pa_v2_cfg_data;
+ struct mdp_dither_cfg_data32 dither_cfg_data;
+ struct mdp_gamut_cfg_data32 gamut_cfg_data;
+ struct mdp_calib_config_data32 calib_cfg;
+ struct mdss_ad_init_cfg32 ad_init_cfg;
+ struct mdss_calib_cfg32 mdss_calib_cfg;
+ struct mdss_ad_input32 ad_input;
+ struct mdp_calib_config_buffer32 calib_buffer;
+ struct mdp_calib_dcm_state32 calib_dcm;
+ } data;
+};
+
+struct mdp_overlay32 {
+ struct msmfb_img src;
+ struct mdp_rect src_rect;
+ struct mdp_rect dst_rect;
+ uint32_t z_order; /* stage number */
+ uint32_t is_fg; /* control alpha & transp */
+ uint32_t alpha;
+ uint32_t blend_op;
+ uint32_t transp_mask;
+ uint32_t flags;
+ uint32_t pipe_type;
+ uint32_t id;
+ uint8_t priority;
+ uint32_t user_data[6];
+ uint32_t bg_color;
+ uint8_t horz_deci;
+ uint8_t vert_deci;
+ struct mdp_overlay_pp_params32 overlay_pp_cfg;
+ struct mdp_scale_data scale;
+ uint8_t color_space;
+ uint32_t frame_rate;
+};
+
+struct mdp_overlay_list32 {
+ uint32_t num_overlays;
+ compat_caddr_t overlay_list;
+ uint32_t flags;
+ uint32_t processed_overlays;
+};
+
+struct mdp_input_layer32 {
+ uint32_t flags;
+ uint32_t pipe_ndx;
+ uint8_t horz_deci;
+ uint8_t vert_deci;
+ uint8_t alpha;
+ uint16_t z_order;
+ uint32_t transp_mask;
+ uint32_t bg_color;
+ enum mdss_mdp_blend_op blend_op;
+ enum mdp_color_space color_space;
+ struct mdp_rect src_rect;
+ struct mdp_rect dst_rect;
+ compat_caddr_t scale;
+ struct mdp_layer_buffer buffer;
+ compat_caddr_t pp_info;
+ int error_code;
+ uint32_t reserved[6];
+};
+
+struct mdp_output_layer32 {
+ uint32_t flags;
+ uint32_t writeback_ndx;
+ struct mdp_layer_buffer buffer;
+ enum mdp_color_space color_space;
+ uint32_t reserved[5];
+};
+struct mdp_layer_commit_v1_32 {
+ uint32_t flags;
+ int release_fence;
+ struct mdp_rect left_roi;
+ struct mdp_rect right_roi;
+ compat_caddr_t input_layers;
+ uint32_t input_layer_cnt;
+ compat_caddr_t output_layer;
+ int retire_fence;
+ compat_caddr_t dest_scaler;
+ uint32_t dest_scaler_cnt;
+ compat_caddr_t frc_info;
+ uint32_t reserved[MDP_LAYER_COMMIT_V1_PAD];
+};
+
+struct mdp_layer_commit32 {
+ uint32_t version;
+ union {
+ struct mdp_layer_commit_v1_32 commit_v1;
+ };
+};
+
+struct mdp_position_update32 {
+ compat_caddr_t __user *input_layers;
+ uint32_t input_layer_cnt;
+};
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_dba_utils.c b/drivers/video/fbdev/msm/mdss_dba_utils.c
new file mode 100644
index 0000000..2758a5a
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dba_utils.c
@@ -0,0 +1,912 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <video/msm_dba.h>
+#include <linux/extcon.h>
+
+#include "mdss_dba_utils.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss_cec_core.h"
+#include "mdss_fb.h"
+
+/* standard cec buf size + 1 byte specific to driver */
+#define CEC_BUF_SIZE (MAX_CEC_FRAME_SIZE + 1)
+#define MAX_SWITCH_NAME_SIZE 5
+#define MSM_DBA_MAX_PCLK 148500
+#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
+
+struct mdss_dba_utils_data {
+ struct msm_dba_ops ops;
+ bool hpd_state;
+ bool audio_switch_registered;
+ bool display_switch_registered;
+ struct extcon_dev sdev_display;
+ struct extcon_dev sdev_audio;
+ struct kobject *kobj;
+ struct mdss_panel_info *pinfo;
+ void *dba_data;
+ void *edid_data;
+ void *timing_data;
+ void *cec_abst_data;
+ u8 *edid_buf;
+ u32 edid_buf_size;
+ u8 cec_buf[CEC_BUF_SIZE];
+ struct cec_ops cops;
+ struct cec_cbs ccbs;
+ char disp_switch_name[MAX_SWITCH_NAME_SIZE];
+ u32 current_vic;
+ bool support_audio;
+};
+
+static struct mdss_dba_utils_data *mdss_dba_utils_get_data(
+ struct device *device)
+{
+ struct msm_fb_data_type *mfd;
+ struct mdss_panel_info *pinfo;
+ struct fb_info *fbi;
+ struct mdss_dba_utils_data *udata = NULL;
+
+ if (!device) {
+ pr_err("Invalid device data\n");
+ goto end;
+ }
+
+ fbi = dev_get_drvdata(device);
+ if (!fbi) {
+ pr_err("Invalid fbi data\n");
+ goto end;
+ }
+
+ mfd = (struct msm_fb_data_type *)fbi->par;
+ if (!mfd) {
+ pr_err("Invalid mfd data\n");
+ goto end;
+ }
+
+ pinfo = mfd->panel_info;
+ if (!pinfo) {
+ pr_err("Invalid pinfo data\n");
+ goto end;
+ }
+
+ udata = pinfo->dba_data;
+end:
+ return udata;
+}
+
+static void mdss_dba_utils_notify_display(
+ struct mdss_dba_utils_data *udata, int val)
+{
+ int state = 0;
+
+ if (!udata) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (!udata->display_switch_registered) {
+ pr_err("display switch not registered\n");
+ return;
+ }
+
+ state = udata->sdev_display.state;
+
+ extcon_set_state_sync(&udata->sdev_display, 0, val);
+
+ pr_debug("cable state %s %d\n",
+ udata->sdev_display.state == state ?
+ "is same" : "switched to",
+ udata->sdev_display.state);
+}
+
+static void mdss_dba_utils_notify_audio(
+ struct mdss_dba_utils_data *udata, int val)
+{
+ int state = 0;
+
+ if (!udata) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (!udata->audio_switch_registered) {
+ pr_err("audio switch not registered\n");
+ return;
+ }
+
+ state = udata->sdev_audio.state;
+
+ extcon_set_state_sync(&udata->sdev_audio, 0, val);
+
+ pr_debug("audio state %s %d\n",
+ udata->sdev_audio.state == state ?
+ "is same" : "switched to",
+ udata->sdev_audio.state);
+}
+
+static ssize_t mdss_dba_utils_sysfs_rda_connected(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct mdss_dba_utils_data *udata = NULL;
+
+ if (!dev) {
+ pr_err("invalid device\n");
+ return -EINVAL;
+ }
+
+ udata = mdss_dba_utils_get_data(dev);
+
+ if (!udata) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", udata->hpd_state);
+ pr_debug("'%d'\n", udata->hpd_state);
+
+ return ret;
+}
+
+static ssize_t mdss_dba_utils_sysfs_rda_video_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct mdss_dba_utils_data *udata = NULL;
+
+ if (!dev) {
+ pr_debug("invalid device\n");
+ return -EINVAL;
+ }
+
+ udata = mdss_dba_utils_get_data(dev);
+
+ if (!udata) {
+ pr_debug("invalid input\n");
+ return -EINVAL;
+ }
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", udata->current_vic);
+ pr_debug("'%d'\n", udata->current_vic);
+
+ return ret;
+}
+
+static ssize_t mdss_dba_utils_sysfs_wta_hpd(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mdss_dba_utils_data *udata = NULL;
+ int rc, hpd;
+
+ udata = mdss_dba_utils_get_data(dev);
+ if (!udata) {
+ pr_debug("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = kstrtoint(buf, 10, &hpd);
+ if (rc) {
+ pr_debug("%s: kstrtoint failed\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: set value: %d hpd state: %d\n", __func__,
+ hpd, udata->hpd_state);
+ if (!hpd) {
+ if (udata->ops.power_on)
+ udata->ops.power_on(udata->dba_data, false, 0);
+ return count;
+ }
+
+ /* power on downstream device */
+ if (udata->ops.power_on)
+ udata->ops.power_on(udata->dba_data, true, 0);
+
+ /* check if cable is connected to bridge chip */
+ if (udata->ops.check_hpd)
+ udata->ops.check_hpd(udata->dba_data, 0);
+
+ return count;
+}
+
+static ssize_t mdss_dba_utils_sysfs_rda_hpd(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct mdss_dba_utils_data *udata = NULL;
+
+ if (!dev) {
+ pr_debug("invalid device\n");
+ return -EINVAL;
+ }
+
+ udata = mdss_dba_utils_get_data(dev);
+
+ if (!udata) {
+ pr_debug("invalid input\n");
+ return -EINVAL;
+ }
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", udata->hpd_state);
+ pr_debug("'%d'\n", udata->hpd_state);
+
+ return ret;
+}
+
+static DEVICE_ATTR(connected, 0444,
+ mdss_dba_utils_sysfs_rda_connected, NULL);
+
+static DEVICE_ATTR(video_mode, 0444,
+ mdss_dba_utils_sysfs_rda_video_mode, NULL);
+
+static DEVICE_ATTR(hpd, 0644, mdss_dba_utils_sysfs_rda_hpd,
+ mdss_dba_utils_sysfs_wta_hpd);
+
+static struct attribute *mdss_dba_utils_fs_attrs[] = {
+ &dev_attr_connected.attr,
+ &dev_attr_video_mode.attr,
+ &dev_attr_hpd.attr,
+ NULL,
+};
+
+static struct attribute_group mdss_dba_utils_fs_attrs_group = {
+ .attrs = mdss_dba_utils_fs_attrs,
+};
+
+static int mdss_dba_utils_sysfs_create(struct kobject *kobj)
+{
+ int rc;
+
+ if (!kobj) {
+ pr_err("invalid input\n");
+ return -ENODEV;
+ }
+
+ rc = sysfs_create_group(kobj, &mdss_dba_utils_fs_attrs_group);
+ if (rc) {
+ pr_err("failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mdss_dba_utils_sysfs_remove(struct kobject *kobj)
+{
+ if (!kobj) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ sysfs_remove_group(kobj, &mdss_dba_utils_fs_attrs_group);
+}
+
+static bool mdss_dba_check_audio_support(struct mdss_dba_utils_data *udata)
+{
+ bool dvi_mode = false;
+ int audio_blk_size = 0;
+ struct msm_hdmi_audio_edid_blk audio_blk;
+
+ if (!udata) {
+ pr_debug("%s: Invalid input\n", __func__);
+ return false;
+ }
+ memset(&audio_blk, 0, sizeof(audio_blk));
+
+ /* check if sink is in DVI mode */
+ dvi_mode = !hdmi_edid_get_sink_mode(udata->edid_data);
+
+ /* get the audio block size info from EDID */
+ hdmi_edid_get_audio_blk(udata->edid_data, &audio_blk);
+ audio_blk_size = audio_blk.audio_data_blk_size;
+
+ if (dvi_mode || !audio_blk_size)
+ return false;
+ else
+ return true;
+}
+
+static void mdss_dba_utils_dba_cb(void *data, enum msm_dba_callback_event event)
+{
+ int ret = -EINVAL;
+ struct mdss_dba_utils_data *udata = data;
+ struct cec_msg msg = {0};
+ bool pluggable = false;
+ bool operands_present = false;
+ u32 no_of_operands, size, i;
+ u32 operands_offset = MAX_CEC_FRAME_SIZE - MAX_OPERAND_SIZE;
+ struct msm_hdmi_audio_edid_blk blk;
+
+ if (!udata) {
+ pr_err("Invalid data\n");
+ return;
+ }
+
+ pr_debug("event: %d\n", event);
+
+ if (udata->pinfo)
+ pluggable = udata->pinfo->is_pluggable;
+
+ switch (event) {
+ case MSM_DBA_CB_HPD_CONNECT:
+ if (udata->hpd_state)
+ break;
+ if (udata->ops.get_raw_edid) {
+ ret = udata->ops.get_raw_edid(udata->dba_data,
+ udata->edid_buf_size, udata->edid_buf, 0);
+
+ if (!ret) {
+ hdmi_edid_parser(udata->edid_data);
+ /* check whether audio is supported or not */
+ udata->support_audio =
+ mdss_dba_check_audio_support(udata);
+ if (udata->support_audio) {
+ hdmi_edid_get_audio_blk(
+ udata->edid_data, &blk);
+ if (udata->ops.set_audio_block)
+ udata->ops.set_audio_block(
+ udata->dba_data,
+ sizeof(blk), &blk);
+ }
+ } else {
+ pr_err("failed to get edid%d\n", ret);
+ }
+ }
+
+ if (pluggable) {
+ mdss_dba_utils_notify_display(udata, 1);
+ if (udata->support_audio)
+ mdss_dba_utils_notify_audio(udata, 1);
+ } else {
+ mdss_dba_utils_video_on(udata, udata->pinfo);
+ }
+
+ udata->hpd_state = true;
+ break;
+
+ case MSM_DBA_CB_HPD_DISCONNECT:
+ if (!udata->hpd_state)
+ break;
+ if (pluggable) {
+ if (udata->support_audio)
+ mdss_dba_utils_notify_audio(udata, 0);
+ mdss_dba_utils_notify_display(udata, 0);
+ } else {
+ mdss_dba_utils_video_off(udata);
+ }
+
+ udata->hpd_state = false;
+ break;
+
+ case MSM_DBA_CB_CEC_READ_PENDING:
+ if (udata->ops.hdmi_cec_read) {
+ ret = udata->ops.hdmi_cec_read(
+ udata->dba_data,
+ &size,
+ udata->cec_buf, 0);
+
+ if (ret || !size || size > CEC_BUF_SIZE) {
+ pr_err("%s: cec read failed\n", __func__);
+ return;
+ }
+ }
+
+ /* prepare cec msg */
+ msg.recvr_id = udata->cec_buf[0] & 0x0F;
+ msg.sender_id = (udata->cec_buf[0] & 0xF0) >> 4;
+ msg.opcode = udata->cec_buf[1];
+ msg.frame_size = (udata->cec_buf[MAX_CEC_FRAME_SIZE] & 0x1F);
+
+ operands_present = (msg.frame_size > operands_offset) &&
+ (msg.frame_size <= MAX_CEC_FRAME_SIZE);
+
+ if (operands_present) {
+ no_of_operands = msg.frame_size - operands_offset;
+
+ for (i = 0; i < no_of_operands; i++)
+ msg.operand[i] =
+ udata->cec_buf[operands_offset + i];
+ }
+
+ ret = udata->ccbs.msg_recv_notify(udata->ccbs.data, &msg);
+ if (ret)
+ pr_err("%s: failed to notify cec msg\n", __func__);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int mdss_dba_utils_cec_enable(void *data, bool enable)
+{
+ int ret = -EINVAL;
+ struct mdss_dba_utils_data *udata = data;
+
+ if (!udata) {
+ pr_err("%s: Invalid data\n", __func__);
+ return -EINVAL;
+ }
+
+ if (udata->ops.hdmi_cec_on)
+ ret = udata->ops.hdmi_cec_on(udata->dba_data, enable, 0);
+
+ return ret;
+}
+
+static int mdss_dba_utils_send_cec_msg(void *data, struct cec_msg *msg)
+{
+ int ret = -EINVAL, i;
+ u32 operands_offset = MAX_CEC_FRAME_SIZE - MAX_OPERAND_SIZE;
+ struct mdss_dba_utils_data *udata = data;
+
+ u8 buf[MAX_CEC_FRAME_SIZE];
+
+ if (!udata || !msg) {
+ pr_err("%s: Invalid data\n", __func__);
+ return -EINVAL;
+ }
+
+ buf[0] = (msg->sender_id << 4) | msg->recvr_id;
+ buf[1] = msg->opcode;
+
+ for (i = 0; i < MAX_OPERAND_SIZE &&
+ i < msg->frame_size - operands_offset; i++)
+ buf[operands_offset + i] = msg->operand[i];
+
+ if (udata->ops.hdmi_cec_write)
+ ret = udata->ops.hdmi_cec_write(udata->dba_data,
+ msg->frame_size, (char *)buf, 0);
+
+ return ret;
+}
+
+static int mdss_dba_utils_init_switch_dev(struct mdss_dba_utils_data *udata,
+ u32 fb_node)
+{
+ int rc = -EINVAL, ret;
+
+ if (!udata) {
+ pr_err("invalid input\n");
+ goto end;
+ }
+
+ /* create switch device to update display modules */
+ udata->sdev_display.name = "hdmi";
+ rc = extcon_dev_register(&udata->sdev_display);
+ if (rc) {
+ pr_err("display switch registration failed\n");
+ goto end;
+ }
+
+ udata->display_switch_registered = true;
+
+ /* create switch device to update audio modules */
+ udata->sdev_audio.name = "hdmi_audio";
+ ret = extcon_dev_register(&udata->sdev_audio);
+ if (ret) {
+ pr_err("audio switch registration failed\n");
+ goto end;
+ }
+
+ udata->audio_switch_registered = true;
+end:
+ return rc;
+}
+
+static int mdss_dba_get_vic_panel_info(struct mdss_dba_utils_data *udata,
+ struct mdss_panel_info *pinfo)
+{
+ struct msm_hdmi_mode_timing_info timing;
+ struct hdmi_util_ds_data ds_data;
+ u32 h_total, v_total, vic = 0;
+
+ if (!udata || !pinfo) {
+ pr_err("%s: invalid input\n", __func__);
+ return 0;
+ }
+
+ timing.active_h = pinfo->xres;
+ timing.back_porch_h = pinfo->lcdc.h_back_porch;
+ timing.front_porch_h = pinfo->lcdc.h_front_porch;
+ timing.pulse_width_h = pinfo->lcdc.h_pulse_width;
+ h_total = (timing.active_h + timing.back_porch_h +
+ timing.front_porch_h + timing.pulse_width_h);
+
+ timing.active_v = pinfo->yres;
+ timing.back_porch_v = pinfo->lcdc.v_back_porch;
+ timing.front_porch_v = pinfo->lcdc.v_front_porch;
+ timing.pulse_width_v = pinfo->lcdc.v_pulse_width;
+ v_total = (timing.active_v + timing.back_porch_v +
+ timing.front_porch_v + timing.pulse_width_v);
+
+ timing.refresh_rate = pinfo->mipi.frame_rate * 1000;
+ timing.pixel_freq = (h_total * v_total *
+ pinfo->mipi.frame_rate) / 1000;
+
+ ds_data.ds_registered = true;
+ ds_data.ds_max_clk = MSM_DBA_MAX_PCLK;
+
+ vic = hdmi_get_video_id_code(&timing, &ds_data);
+ pr_debug("%s: current vic code is %d\n", __func__, vic);
+
+ return vic;
+}
+
+/**
+ * mdss_dba_utils_video_on() - Allow clients to switch on the video
+ * @data: DBA utils instance which was allocated during registration
+ * @pinfo: detailed panel information like x, y, porch values etc
+ *
+ * This API is used to power on the video on device registered
+ * with DBA.
+ *
+ * Return: returns the result of the video on call on device.
+ */
+int mdss_dba_utils_video_on(void *data, struct mdss_panel_info *pinfo)
+{
+ struct mdss_dba_utils_data *ud = data;
+ struct msm_dba_video_cfg video_cfg;
+ int ret = -EINVAL;
+
+ if (!ud || !pinfo) {
+ pr_err("invalid input\n");
+ goto end;
+ }
+
+ memset(&video_cfg, 0, sizeof(video_cfg));
+
+ video_cfg.h_active = pinfo->xres;
+ video_cfg.v_active = pinfo->yres;
+ video_cfg.h_front_porch = pinfo->lcdc.h_front_porch;
+ video_cfg.v_front_porch = pinfo->lcdc.v_front_porch;
+ video_cfg.h_back_porch = pinfo->lcdc.h_back_porch;
+ video_cfg.v_back_porch = pinfo->lcdc.v_back_porch;
+ video_cfg.h_pulse_width = pinfo->lcdc.h_pulse_width;
+ video_cfg.v_pulse_width = pinfo->lcdc.v_pulse_width;
+ video_cfg.pclk_khz = (unsigned long)pinfo->clk_rate / 1000;
+ video_cfg.hdmi_mode = hdmi_edid_get_sink_mode(ud->edid_data);
+
+ /* Calculate number of DSI lanes configured */
+ video_cfg.num_of_input_lanes = 0;
+ if (pinfo->mipi.data_lane0)
+ video_cfg.num_of_input_lanes++;
+ if (pinfo->mipi.data_lane1)
+ video_cfg.num_of_input_lanes++;
+ if (pinfo->mipi.data_lane2)
+ video_cfg.num_of_input_lanes++;
+ if (pinfo->mipi.data_lane3)
+ video_cfg.num_of_input_lanes++;
+
+ /* Get scan information from EDID */
+ video_cfg.vic = mdss_dba_get_vic_panel_info(ud, pinfo);
+ ud->current_vic = video_cfg.vic;
+ video_cfg.scaninfo = hdmi_edid_get_sink_scaninfo(ud->edid_data,
+ video_cfg.vic);
+ if (ud->ops.video_on)
+ ret = ud->ops.video_on(ud->dba_data, true, &video_cfg, 0);
+
+end:
+ return ret;
+}
+
+/**
+ * mdss_dba_utils_video_off() - Allow clients to switch off the video
+ * @data: DBA utils instance which was allocated during registration
+ *
+ * This API is used to power off the video on device registered
+ * with DBA.
+ *
+ * Return: returns the result of the video off call on device.
+ */
+int mdss_dba_utils_video_off(void *data)
+{
+ struct mdss_dba_utils_data *ud = data;
+ int ret = -EINVAL;
+
+ if (!ud) {
+ pr_err("invalid input\n");
+ goto end;
+ }
+
+ if (ud->ops.video_on)
+ ret = ud->ops.video_on(ud->dba_data, false, NULL, 0);
+
+end:
+ return ret;
+}
+
+/**
+ * mdss_dba_utils_hdcp_enable() - Allow clients to switch on HDCP.
+ * @data: DBA utils instance which was allocated during registration
+ * @enable: flag to enable or disable HDCP authentication
+ *
+ * This API is used to start the HDCP authentication process with the
+ * device registered with DBA.
+ */
+void mdss_dba_utils_hdcp_enable(void *data, bool enable)
+{
+ struct mdss_dba_utils_data *ud = data;
+
+ if (!ud) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (ud->ops.hdcp_enable)
+ ud->ops.hdcp_enable(ud->dba_data, enable, enable, 0);
+}
+
+void mdss_dba_update_lane_cfg(struct mdss_panel_info *pinfo)
+{
+ struct mdss_dba_utils_data *dba_data;
+ struct mdss_dba_timing_info *cfg_tbl;
+ int i = 0, lanes;
+
+ if (pinfo == NULL)
+ return;
+
+ /*
+ * Restore to default value from DT
+ * if resolution not found in
+ * supported resolutions
+ */
+ lanes = pinfo->mipi.default_lanes;
+
+ dba_data = (struct mdss_dba_utils_data *)(pinfo->dba_data);
+ if (dba_data == NULL)
+ goto lane_cfg;
+
+ /* get adv supported timing info */
+ cfg_tbl = (struct mdss_dba_timing_info *)(dba_data->timing_data);
+ if (cfg_tbl == NULL)
+ goto lane_cfg;
+
+ while (cfg_tbl[i].xres != 0xffff) {
+ if (cfg_tbl[i].xres == pinfo->xres &&
+ cfg_tbl[i].yres == pinfo->yres &&
+ cfg_tbl[i].bpp == pinfo->bpp &&
+ cfg_tbl[i].fps == pinfo->mipi.frame_rate) {
+ lanes = cfg_tbl[i].lanes;
+ break;
+ }
+ i++;
+ }
+
+lane_cfg:
+ switch (lanes) {
+ case 1:
+ pinfo->mipi.data_lane0 = 1;
+ pinfo->mipi.data_lane1 = 0;
+ pinfo->mipi.data_lane2 = 0;
+ pinfo->mipi.data_lane3 = 0;
+ break;
+ case 2:
+ pinfo->mipi.data_lane0 = 1;
+ pinfo->mipi.data_lane1 = 1;
+ pinfo->mipi.data_lane2 = 0;
+ pinfo->mipi.data_lane3 = 0;
+ break;
+ case 3:
+ pinfo->mipi.data_lane0 = 1;
+ pinfo->mipi.data_lane1 = 1;
+ pinfo->mipi.data_lane2 = 1;
+ pinfo->mipi.data_lane3 = 0;
+ break;
+ case 4:
+ default:
+ pinfo->mipi.data_lane0 = 1;
+ pinfo->mipi.data_lane1 = 1;
+ pinfo->mipi.data_lane2 = 1;
+ pinfo->mipi.data_lane3 = 1;
+ break;
+ }
+}
+
+/**
+ * mdss_dba_utils_init() - Allow clients to register with DBA utils
+ * @uid: Initialization data for registration.
+ *
+ * This API lets the client to register with DBA Utils module.
+ * This allocate utils' instance and register with DBA (Display
+ * Bridge Abstract). Creates sysfs nodes and switch nodes to interact
+ * with other modules. Also registers with EDID parser to parse
+ * the EDID buffer.
+ *
+ * Return: Instance of DBA utils which needs to be sent as parameter
+ * when calling DBA utils APIs.
+ */
+void *mdss_dba_utils_init(struct mdss_dba_utils_init_data *uid)
+{
+ struct hdmi_edid_init_data edid_init_data;
+ struct mdss_dba_utils_data *udata = NULL;
+ struct msm_dba_reg_info info;
+ struct cec_abstract_init_data cec_abst_init_data;
+ void *cec_abst_data;
+ int ret = 0;
+
+ if (!uid) {
+ pr_err("invalid input\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ udata = kzalloc(sizeof(*udata), GFP_KERNEL);
+ if (!udata) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memset(&edid_init_data, 0, sizeof(edid_init_data));
+ memset(&info, 0, sizeof(info));
+
+ /* initialize DBA registration data */
+ strlcpy(info.client_name, uid->client_name, MSM_DBA_CLIENT_NAME_LEN);
+ strlcpy(info.chip_name, uid->chip_name, MSM_DBA_CHIP_NAME_MAX_LEN);
+ info.instance_id = uid->instance_id;
+ info.cb = mdss_dba_utils_dba_cb;
+ info.cb_data = udata;
+
+ /* register client with DBA and get device's ops*/
+ if (IS_ENABLED(CONFIG_MSM_DBA)) {
+ udata->dba_data = msm_dba_register_client(&info, &udata->ops);
+ if (IS_ERR_OR_NULL(udata->dba_data)) {
+ pr_err("ds not configured\n");
+ ret = PTR_ERR(udata->dba_data);
+ goto error;
+ }
+ } else {
+ pr_err("DBA not enabled\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ /* create sysfs nodes for other modules to intract with utils */
+ ret = mdss_dba_utils_sysfs_create(uid->kobj);
+ if (ret) {
+ pr_err("sysfs creation failed\n");
+ goto error;
+ }
+
+ /* keep init data for future use */
+ udata->kobj = uid->kobj;
+ udata->pinfo = uid->pinfo;
+
+ /* Initialize EDID feature */
+ edid_init_data.kobj = uid->kobj;
+ edid_init_data.ds_data.ds_registered = true;
+ edid_init_data.ds_data.ds_max_clk = MSM_DBA_MAX_PCLK;
+ edid_init_data.max_pclk_khz = MSM_DBA_MAX_PCLK;
+
+ /* register with edid module for parsing edid buffer */
+ udata->edid_data = hdmi_edid_init(&edid_init_data);
+ if (!udata->edid_data) {
+ pr_err("edid parser init failed\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ /* update edid data to retrieve it back in edid parser */
+ if (uid->pinfo) {
+ uid->pinfo->edid_data = udata->edid_data;
+ /* Initialize to default resolution */
+ hdmi_edid_set_video_resolution(uid->pinfo->edid_data,
+ DEFAULT_VIDEO_RESOLUTION, true);
+ }
+
+ /* get edid buffer from edid parser */
+ udata->edid_buf = edid_init_data.buf;
+ udata->edid_buf_size = edid_init_data.buf_size;
+
+ /* Initialize cec abstract layer and get callbacks */
+ udata->cops.send_msg = mdss_dba_utils_send_cec_msg;
+ udata->cops.enable = mdss_dba_utils_cec_enable;
+ udata->cops.data = udata;
+
+ /* initialize cec abstraction module */
+ cec_abst_init_data.kobj = uid->kobj;
+ cec_abst_init_data.ops = &udata->cops;
+ cec_abst_init_data.cbs = &udata->ccbs;
+
+ udata->cec_abst_data = cec_abstract_init(&cec_abst_init_data);
+ if (IS_ERR_OR_NULL(udata->cec_abst_data)) {
+ pr_err("error initializing cec abstract module\n");
+ ret = PTR_ERR(cec_abst_data);
+ goto error;
+ }
+
+ /* get the timing data for the adv chip */
+ if (udata->ops.get_supp_timing_info)
+ udata->timing_data = udata->ops.get_supp_timing_info();
+ else
+ udata->timing_data = NULL;
+
+ /* update cec data to retrieve it back in cec abstract module */
+ if (uid->pinfo) {
+ uid->pinfo->is_cec_supported = true;
+ uid->pinfo->cec_data = udata->cec_abst_data;
+
+ /*
+ * TODO: Currently there is no support from HAL to send
+ * HPD events to driver for usecase where bridge chip
+ * is used as primary panel. Once support is added remove
+ * this explicit calls to bridge chip driver.
+ */
+ if (!uid->pinfo->is_pluggable) {
+ if (udata->ops.power_on && !(uid->cont_splash_enabled))
+ udata->ops.power_on(udata->dba_data, true, 0);
+ if (udata->ops.check_hpd)
+ udata->ops.check_hpd(udata->dba_data, 0);
+ } else {
+ /* register display and audio switch devices */
+ ret = mdss_dba_utils_init_switch_dev(udata,
+ uid->fb_node);
+ if (ret) {
+ pr_err("switch dev registration failed\n");
+ goto error;
+ }
+ }
+ }
+
+ return udata;
+
+error:
+ mdss_dba_utils_deinit(udata);
+ return ERR_PTR(ret);
+}
+
+/**
+ * mdss_dba_utils_deinit() - Allow clients to de-register with DBA utils
+ * @data: DBA utils data that was allocated during registration.
+ *
+ * This API will release all the resources allocated during registration
+ * and delete the DBA utils instance.
+ */
+void mdss_dba_utils_deinit(void *data)
+{
+ struct mdss_dba_utils_data *udata = data;
+
+ if (!udata) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (!IS_ERR_OR_NULL(udata->cec_abst_data))
+ cec_abstract_deinit(udata->cec_abst_data);
+
+ if (udata->edid_data)
+ hdmi_edid_deinit(udata->edid_data);
+
+ if (udata->pinfo) {
+ udata->pinfo->edid_data = NULL;
+ udata->pinfo->is_cec_supported = false;
+ }
+
+ if (udata->audio_switch_registered)
+ extcon_dev_unregister(&udata->sdev_audio);
+
+ if (udata->display_switch_registered)
+ extcon_dev_unregister(&udata->sdev_display);
+
+ if (udata->kobj)
+ mdss_dba_utils_sysfs_remove(udata->kobj);
+
+ if (IS_ENABLED(CONFIG_MSM_DBA)) {
+ if (!IS_ERR_OR_NULL(udata->dba_data))
+ msm_dba_deregister_client(udata->dba_data);
+ }
+
+ kfree(udata);
+}
diff --git a/drivers/video/fbdev/msm/mdss_dba_utils.h b/drivers/video/fbdev/msm/mdss_dba_utils.h
new file mode 100644
index 0000000..be18d2f
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dba_utils.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_DBA_UTILS__
+#define __MDSS_DBA_UTILS__
+
+#include <linux/types.h>
+
+#include "mdss_panel.h"
+
+/**
+ * struct mdss_dba_utils_init_data - Init data for registering with DBA utils.
+ * @kobj: An instance of Kobject for sysfs creation
+ * @instance_id: Instance ID of device registered with DBA
+ * @chip_name: Name of the device registered with DBA
+ * @client_name: Name of the client registering with DBA
+ * @pinfo: Detailed panel information
+ * @cont_splash_enabled: Flag to check if cont splash was enabled on bridge
+ *
+ * This structure's instance is needed to be passed as parameter
+ * to register API to let the DBA utils module configure and
+ * allocate an instance of DBA utils for the client.
+ */
+struct mdss_dba_utils_init_data {
+ struct kobject *kobj;
+ u32 instance_id;
+ u32 fb_node;
+ char *chip_name;
+ char *client_name;
+ struct mdss_panel_info *pinfo;
+ bool cont_splash_enabled;
+};
+
+int mdss_dba_utils_video_on(void *data, struct mdss_panel_info *pinfo);
+int mdss_dba_utils_video_off(void *data);
+void mdss_dba_utils_hdcp_enable(void *data, bool enable);
+
+void *mdss_dba_utils_init(struct mdss_dba_utils_init_data *init_data);
+void mdss_dba_utils_deinit(void *data);
+void mdss_dba_update_lane_cfg(struct mdss_panel_info *pinfo);
+#endif /* __MDSS_DBA_UTILS__ */
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
new file mode 100644
index 0000000..f38d40c
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -0,0 +1,1810 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_hwio.h"
+#include "mdss_debug.h"
+#include "mdss_dsi.h"
+
+#define DEFAULT_BASE_REG_CNT 0x100
+#define GROUP_BYTES 4
+#define ROW_BYTES 16
+#define MAX_VSYNC_COUNT 0xFFFFFFF
+
+#define DEFAULT_READ_PANEL_POWER_MODE_REG 0x0A
+#define PANEL_REG_ADDR_LEN 8
+#define PANEL_REG_FORMAT_LEN 5
+#define PANEL_TX_MAX_BUF 256
+#define PANEL_CMD_MIN_TX_COUNT 2
+#define PANEL_DATA_NODE_LEN 80
+/* MDP3 HW Version */
+#define MDP_CORE_HW_VERSION 0x03050306
+
+/* Hex number + whitespace */
+#define NEXT_VALUE_OFFSET 3
+
+#define INVALID_XIN_ID 0xFF
+
+static DEFINE_MUTEX(mdss_debug_lock);
+
+static char panel_reg[2] = {DEFAULT_READ_PANEL_POWER_MODE_REG, 0x00};
+
+static int panel_debug_base_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int panel_debug_base_release(struct inode *inode, struct file *file)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+
+ mutex_lock(&mdss_debug_lock);
+ if (dbg && dbg->buf) {
+ kfree(dbg->buf);
+ dbg->buf_len = 0;
+ dbg->buf = NULL;
+ }
+ mutex_unlock(&mdss_debug_lock);
+ return 0;
+}
+
+static ssize_t panel_debug_base_offset_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ u32 off = 0;
+ u32 cnt = DEFAULT_BASE_REG_CNT;
+ char buf[PANEL_TX_MAX_BUF] = {0x0};
+
+ if (!dbg)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (sscanf(buf, "%x %u", &off, &cnt) != 2)
+ return -EFAULT;
+
+ if (off > dbg->max_offset)
+ return -EINVAL;
+
+ if (cnt > (dbg->max_offset - off))
+ cnt = dbg->max_offset - off;
+
+ mutex_lock(&mdss_debug_lock);
+ dbg->off = off;
+ dbg->cnt = cnt;
+ mutex_unlock(&mdss_debug_lock);
+
+ pr_debug("offset=%x cnt=%d\n", off, cnt);
+
+ return count;
+}
+
+static ssize_t panel_debug_base_offset_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ int len = 0;
+ char buf[PANEL_TX_MAX_BUF] = {0x0};
+
+ if (!dbg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ mutex_lock(&mdss_debug_lock);
+ len = snprintf(buf, sizeof(buf), "0x%02zx %zx\n", dbg->off, dbg->cnt);
+ if (len < 0 || len >= sizeof(buf)) {
+ mutex_unlock(&mdss_debug_lock);
+ return 0;
+ }
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
+ mutex_unlock(&mdss_debug_lock);
+ return -EFAULT;
+ }
+
+ *ppos += len; /* increase offset */
+
+ mutex_unlock(&mdss_debug_lock);
+ return len;
+}
+
+static ssize_t panel_debug_base_reg_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ char buf[PANEL_TX_MAX_BUF] = {0x0};
+ char reg[PANEL_TX_MAX_BUF] = {0x0};
+ u32 len = 0, value = 0;
+ char *bufp;
+
+ struct mdss_data_type *mdata = mdss_res;
+ struct mdss_mdp_ctl *ctl = mdata->ctl_off + 0;
+ struct mdss_panel_data *panel_data = NULL;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ struct dsi_cmd_desc dsi_write_cmd = {
+ {0/*data type*/, 1, 0, 0, 0, 0/* len */}, reg};
+ struct dcs_cmd_req cmdreq;
+
+ if (!dbg || !mdata)
+ return -ENODEV;
+
+ /* get command string from user */
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if ((mdata->mdp_rev <= MDSS_MDP_HW_REV_105) ||
+ (mdata->mdp_rev == MDP_CORE_HW_VERSION))
+ panel_data = mdss_res->pdata;
+ else
+ panel_data = ctl->panel_data;
+
+ ctrl_pdata = container_of(panel_data,
+ struct mdss_dsi_ctrl_pdata, panel_data);
+
+ buf[count] = 0; /* end of string */
+
+ bufp = buf;
+ /* End of a hex value in given string */
+ bufp[NEXT_VALUE_OFFSET - 1] = 0;
+ while (kstrtouint(bufp, 16, &value) == 0) {
+ reg[len++] = value;
+ if (len >= PANEL_TX_MAX_BUF) {
+ pr_err("wrong input reg len\n");
+ return -EFAULT;
+ }
+ bufp += NEXT_VALUE_OFFSET;
+ if ((bufp >= (buf + count)) || (bufp < buf)) {
+ pr_warn("%s,buffer out-of-bounds\n", __func__);
+ break;
+ }
+ /* End of a hex value in given string */
+ if ((bufp + NEXT_VALUE_OFFSET - 1) < (buf + count))
+ bufp[NEXT_VALUE_OFFSET - 1] = 0;
+ }
+ if (len < PANEL_CMD_MIN_TX_COUNT) {
+ pr_err("wrong input reg len\n");
+ return -EFAULT;
+ }
+
+ /* put command to cmdlist */
+ dsi_write_cmd.dchdr.dtype = dbg->cmd_data_type;
+ dsi_write_cmd.dchdr.dlen = len;
+ dsi_write_cmd.payload = reg;
+
+ cmdreq.cmds = &dsi_write_cmd;
+ cmdreq.cmds_cnt = 1;
+ cmdreq.flags = CMD_REQ_COMMIT;
+ cmdreq.rlen = 0;
+ cmdreq.cb = NULL;
+
+ ctl = mdata->ctl_off + 0;
+ ctrl_pdata = container_of(ctl->panel_data,
+ struct mdss_dsi_ctrl_pdata, panel_data);
+
+ if (mdata->debug_inf.debug_enable_clock)
+ mdata->debug_inf.debug_enable_clock(1);
+
+ if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT)
+ mdss_dsi_cmdlist_put(ctrl_pdata, &cmdreq);
+
+ if (mdata->debug_inf.debug_enable_clock)
+ mdata->debug_inf.debug_enable_clock(0);
+
+ return count;
+}
+
+static ssize_t panel_debug_base_reg_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ u32 i, len = 0, reg_buf_len = 0;
+ char *panel_reg_buf, *rx_buf;
+ struct mdss_data_type *mdata = mdss_res;
+ struct mdss_mdp_ctl *ctl = mdata->ctl_off + 0;
+ struct mdss_panel_data *panel_data = NULL;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ int rc = -EFAULT;
+
+ if (!dbg)
+ return -ENODEV;
+
+ mutex_lock(&mdss_debug_lock);
+ if (!dbg->cnt) {
+ mutex_unlock(&mdss_debug_lock);
+ return 0;
+ }
+
+ if (*ppos) {
+ mutex_unlock(&mdss_debug_lock);
+ return 0; /* the end */
+ }
+
+ /* '0x' + 2 digit + blank = 5 bytes for each number */
+ reg_buf_len = (dbg->cnt * PANEL_REG_FORMAT_LEN)
+ + PANEL_REG_ADDR_LEN + 1;
+ rx_buf = kzalloc(dbg->cnt, GFP_KERNEL);
+ panel_reg_buf = kzalloc(reg_buf_len, GFP_KERNEL);
+
+ if (!rx_buf || !panel_reg_buf) {
+ pr_err("not enough memory to hold panel reg dump\n");
+ rc = -ENOMEM;
+ goto read_reg_fail;
+ }
+
+ if (mdata->debug_inf.debug_enable_clock)
+ mdata->debug_inf.debug_enable_clock(1);
+
+ panel_reg[0] = dbg->off;
+ if ((mdata->mdp_rev <= MDSS_MDP_HW_REV_105) ||
+ (mdata->mdp_rev == MDP_CORE_HW_VERSION))
+ panel_data = mdss_res->pdata;
+ else
+ panel_data = ctl->panel_data;
+
+ ctrl_pdata = container_of(panel_data,
+ struct mdss_dsi_ctrl_pdata, panel_data);
+
+ mdss_dsi_panel_cmd_read(ctrl_pdata, panel_reg[0],
+ panel_reg[1], NULL, rx_buf, dbg->cnt);
+
+ len = scnprintf(panel_reg_buf, reg_buf_len, "0x%02zx: ", dbg->off);
+
+ for (i = 0; (len < reg_buf_len) && (i < ctrl_pdata->rx_len); i++)
+ len += scnprintf(panel_reg_buf + len, reg_buf_len - len,
+ "0x%02x ", rx_buf[i]);
+
+ if (len)
+ panel_reg_buf[len - 1] = '\n';
+
+ if (mdata->debug_inf.debug_enable_clock)
+ mdata->debug_inf.debug_enable_clock(0);
+
+ if ((count < reg_buf_len)
+ || (copy_to_user(user_buf, panel_reg_buf, len)))
+ goto read_reg_fail;
+
+ kfree(rx_buf);
+ kfree(panel_reg_buf);
+
+ *ppos += len; /* increase offset */
+ mutex_unlock(&mdss_debug_lock);
+ return len;
+
+read_reg_fail:
+ kfree(rx_buf);
+ kfree(panel_reg_buf);
+ mutex_unlock(&mdss_debug_lock);
+ return rc;
+}
+
+static const struct file_operations panel_off_fops = {
+ .open = panel_debug_base_open,
+ .release = panel_debug_base_release,
+ .read = panel_debug_base_offset_read,
+ .write = panel_debug_base_offset_write,
+};
+
+static const struct file_operations panel_reg_fops = {
+ .open = panel_debug_base_open,
+ .release = panel_debug_base_release,
+ .read = panel_debug_base_reg_read,
+ .write = panel_debug_base_reg_write,
+};
+
+int panel_debug_register_base(const char *name, void __iomem *base,
+ size_t max_offset)
+{
+ struct mdss_data_type *mdata = mdss_res;
+ struct mdss_debug_data *mdd;
+ struct mdss_debug_base *dbg;
+ struct dentry *ent_off, *ent_reg, *ent_type;
+ char dn[PANEL_DATA_NODE_LEN] = "";
+ int prefix_len = 0;
+
+ if (!mdata || !mdata->debug_inf.debug_data)
+ return -ENODEV;
+
+ mdd = mdata->debug_inf.debug_data;
+
+ dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return -ENOMEM;
+
+ dbg->base = base;
+ dbg->max_offset = max_offset;
+ dbg->off = 0x0a;
+ dbg->cnt = 0x01;
+ dbg->cmd_data_type = DTYPE_DCS_LWRITE;
+
+ if (name)
+ prefix_len = snprintf(dn, sizeof(dn), "%s_", name);
+
+ strlcpy(dn + prefix_len, "cmd_data_type", sizeof(dn) - prefix_len);
+ ent_type = debugfs_create_x8(dn, 0644, mdd->root,
+ (u8 *)&dbg->cmd_data_type);
+
+ if (IS_ERR_OR_NULL(ent_type)) {
+ pr_err("debugfs_create_file: data_type fail\n");
+ goto type_fail;
+ }
+
+ strlcpy(dn + prefix_len, "off", sizeof(dn) - prefix_len);
+ ent_off = debugfs_create_file(dn, 0644, mdd->root,
+ dbg, &panel_off_fops);
+
+ if (IS_ERR_OR_NULL(ent_off)) {
+ pr_err("debugfs_create_file: offset fail\n");
+ goto off_fail;
+ }
+
+ strlcpy(dn + prefix_len, "reg", sizeof(dn) - prefix_len);
+ ent_reg = debugfs_create_file(dn, 0644, mdd->root,
+ dbg, &panel_reg_fops);
+ if (IS_ERR_OR_NULL(ent_reg)) {
+ pr_err("debugfs_create_file: reg fail\n");
+ goto reg_fail;
+ }
+
+ /* Initialize list to make sure check for null list will be valid */
+ INIT_LIST_HEAD(&dbg->dump_list);
+
+ list_add(&dbg->head, &mdd->base_list);
+
+ return 0;
+
+reg_fail:
+ debugfs_remove(ent_off);
+off_fail:
+ debugfs_remove(ent_type);
+type_fail:
+ kfree(dbg);
+ return -ENODEV;
+}
+
+static int mdss_debug_base_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int mdss_debug_base_release(struct inode *inode, struct file *file)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+
+ mutex_lock(&mdss_debug_lock);
+ if (dbg && dbg->buf) {
+ kfree(dbg->buf);
+ dbg->buf_len = 0;
+ dbg->buf = NULL;
+ }
+ mutex_unlock(&mdss_debug_lock);
+ return 0;
+}
+
+static ssize_t mdss_debug_base_offset_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ u32 off = 0;
+ u32 cnt = DEFAULT_BASE_REG_CNT;
+ char buf[24];
+
+ if (!dbg)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (off % sizeof(u32))
+ return -EINVAL;
+
+ if (sscanf(buf, "%5x %x", &off, &cnt) != 2)
+ return -EFAULT;
+
+ if (off > dbg->max_offset)
+ return -EINVAL;
+
+ if (cnt > (dbg->max_offset - off))
+ cnt = dbg->max_offset - off;
+
+ mutex_lock(&mdss_debug_lock);
+ dbg->off = off;
+ dbg->cnt = cnt;
+ mutex_unlock(&mdss_debug_lock);
+
+ pr_debug("offset=%x cnt=%x\n", off, cnt);
+
+ return count;
+}
+
+static ssize_t mdss_debug_base_offset_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ int len = 0;
+ char buf[24] = {'\0'};
+
+ if (!dbg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ mutex_lock(&mdss_debug_lock);
+ len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
+ if (len < 0 || len >= sizeof(buf)) {
+ mutex_unlock(&mdss_debug_lock);
+ return 0;
+ }
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
+ mutex_unlock(&mdss_debug_lock);
+ return -EFAULT;
+ }
+
+ *ppos += len; /* increase offset */
+
+ mutex_unlock(&mdss_debug_lock);
+ return len;
+}
+
+static ssize_t mdss_debug_base_reg_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ struct mdss_data_type *mdata = mdss_res;
+ size_t off;
+ u32 data, cnt;
+ char buf[24];
+
+ if (!dbg || !mdata)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ cnt = sscanf(buf, "%zx %x", &off, &data);
+
+ if (cnt < 2)
+ return -EFAULT;
+
+ if (off % sizeof(u32))
+ return -EFAULT;
+
+ if (off >= dbg->max_offset)
+ return -EFAULT;
+
+ if (mdata->debug_inf.debug_enable_clock)
+ mdata->debug_inf.debug_enable_clock(1);
+
+ writel_relaxed(data, dbg->base + off);
+
+ if (mdata->debug_inf.debug_enable_clock)
+ mdata->debug_inf.debug_enable_clock(0);
+
+ pr_debug("addr=%zx data=%x\n", off, data);
+
+ return count;
+}
+
+static ssize_t mdss_debug_base_reg_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ struct mdss_data_type *mdata = mdss_res;
+ size_t len;
+
+ if (!dbg || !mdata) {
+ pr_err("invalid handle\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&mdss_debug_lock);
+
+ if (!dbg->buf) {
+ char dump_buf[64];
+ char *ptr;
+ int cnt, tot;
+
+ dbg->buf_len = sizeof(dump_buf) *
+ DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
+ dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
+
+ if (!dbg->buf) {
+ mutex_unlock(&mdss_debug_lock);
+ return -ENOMEM;
+ }
+
+ if (dbg->off % sizeof(u32))
+ return -EFAULT;
+
+ ptr = dbg->base + dbg->off;
+ tot = 0;
+
+ if (mdata->debug_inf.debug_enable_clock)
+ mdata->debug_inf.debug_enable_clock(1);
+
+ for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
+ hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
+ ROW_BYTES, GROUP_BYTES, dump_buf,
+ sizeof(dump_buf), false);
+ len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
+ "0x%08x: %s\n",
+ ((int) (unsigned long) ptr) -
+ ((int) (unsigned long) dbg->base),
+ dump_buf);
+
+ ptr += ROW_BYTES;
+ tot += len;
+ if (tot >= dbg->buf_len)
+ break;
+ }
+ if (mdata->debug_inf.debug_enable_clock)
+ mdata->debug_inf.debug_enable_clock(0);
+
+ dbg->buf_len = tot;
+ }
+
+ if (*ppos >= dbg->buf_len) {
+ mutex_unlock(&mdss_debug_lock);
+ return 0; /* done reading */
+ }
+
+ len = min(count, dbg->buf_len - (size_t) *ppos);
+ if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
+ pr_err("failed to copy to user\n");
+ mutex_unlock(&mdss_debug_lock);
+ return -EFAULT;
+ }
+
+ *ppos += len; /* increase offset */
+
+ mutex_unlock(&mdss_debug_lock);
+ return len;
+}
+
+static const struct file_operations mdss_off_fops = {
+ .open = mdss_debug_base_open,
+ .release = mdss_debug_base_release,
+ .read = mdss_debug_base_offset_read,
+ .write = mdss_debug_base_offset_write,
+};
+
+static const struct file_operations mdss_reg_fops = {
+ .open = mdss_debug_base_open,
+ .release = mdss_debug_base_release,
+ .read = mdss_debug_base_reg_read,
+ .write = mdss_debug_base_reg_write,
+};
+
+int mdss_debug_register_base(const char *name, void __iomem *base,
+ size_t max_offset, struct mdss_debug_base **dbg_blk)
+{
+ struct mdss_data_type *mdata = mdss_res;
+ struct mdss_debug_data *mdd;
+ struct mdss_debug_base *dbg;
+ struct dentry *ent_off, *ent_reg;
+ char dn[80] = "";
+ int prefix_len = 0;
+
+ if (dbg_blk)
+ (*dbg_blk) = NULL;
+
+ if (!mdata || !mdata->debug_inf.debug_data)
+ return -ENODEV;
+
+ mdd = mdata->debug_inf.debug_data;
+
+ dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return -ENOMEM;
+
+ if (name)
+ strlcpy(dbg->name, name, sizeof(dbg->name));
+ dbg->base = base;
+ dbg->max_offset = max_offset;
+ dbg->off = 0;
+ dbg->cnt = DEFAULT_BASE_REG_CNT;
+ dbg->reg_dump = NULL;
+
+ if (name && strcmp(name, "mdp"))
+ prefix_len = snprintf(dn, sizeof(dn), "%s_", name);
+
+ strlcpy(dn + prefix_len, "off", sizeof(dn) - prefix_len);
+ ent_off = debugfs_create_file(dn, 0644, mdd->root, dbg, &mdss_off_fops);
+ if (IS_ERR_OR_NULL(ent_off)) {
+ pr_err("debugfs_create_file: offset fail\n");
+ goto off_fail;
+ }
+
+ strlcpy(dn + prefix_len, "reg", sizeof(dn) - prefix_len);
+ ent_reg = debugfs_create_file(dn, 0644, mdd->root, dbg, &mdss_reg_fops);
+ if (IS_ERR_OR_NULL(ent_reg)) {
+ pr_err("debugfs_create_file: reg fail\n");
+ goto reg_fail;
+ }
+
+ /* Initialize list to make sure check for null list will be valid */
+ INIT_LIST_HEAD(&dbg->dump_list);
+
+ list_add(&dbg->head, &mdd->base_list);
+
+ if (dbg_blk)
+ (*dbg_blk) = dbg;
+
+ return 0;
+reg_fail:
+ debugfs_remove(ent_off);
+off_fail:
+ kfree(dbg);
+ return -ENODEV;
+}
+
+static void parse_dump_range_name(struct device_node *node,
+ int total_names, int index, char *range_name, u32 range_size,
+ const char *name_prop)
+{
+ int rc = 0;
+ const char *st = NULL;
+
+ if ((total_names > 0) && (index < total_names)) {
+ rc = of_property_read_string_index(node,
+ name_prop, index, &st);
+ if (rc) {
+ pr_err("error reading name. index=%d, rc=%d\n",
+ index, rc);
+ goto error;
+ }
+ snprintf(range_name, range_size, "%s", st);
+ return;
+ }
+
+error:
+ snprintf(range_name, range_size, "%s", "<no named range>");
+}
+
+static int parse_dt_xlog_dump_list(const u32 *arr, int count,
+ struct list_head *xlog_dump_list, struct platform_device *pdev,
+ const char *name_prop, const char *xin_prop)
+{
+ struct range_dump_node *xlog_node;
+ u32 len;
+ int i, total_names, total_xin_ids, rc;
+ u32 *offsets = NULL;
+
+ /* Get the property with the name of the ranges */
+ total_names = of_property_count_strings(pdev->dev.of_node,
+ name_prop);
+ if (total_names < 0) {
+ pr_warn("dump names not found. rc=%d\n", total_names);
+ total_names = 0;
+ }
+
+ of_find_property(pdev->dev.of_node, xin_prop, &total_xin_ids);
+ if (total_xin_ids > 0) {
+ total_xin_ids /= sizeof(u32);
+ offsets = kcalloc(total_xin_ids, sizeof(u32), GFP_KERNEL);
+ if (offsets) {
+ rc = of_property_read_u32_array(pdev->dev.of_node,
+ xin_prop, offsets, total_xin_ids);
+ if (rc)
+ total_xin_ids = 0;
+ } else {
+ total_xin_ids = 0;
+ }
+ } else {
+ total_xin_ids = 0;
+ }
+
+ for (i = 0, len = count * 2; i < len; i += 2) {
+ xlog_node = kzalloc(sizeof(*xlog_node), GFP_KERNEL);
+ if (!xlog_node)
+ return -ENOMEM;
+
+ xlog_node->offset.start = be32_to_cpu(arr[i]);
+ xlog_node->offset.end = be32_to_cpu(arr[i + 1]);
+
+ parse_dump_range_name(pdev->dev.of_node, total_names, i/2,
+ xlog_node->range_name,
+ ARRAY_SIZE(xlog_node->range_name), name_prop);
+
+ if ((i / 2) < total_xin_ids)
+ xlog_node->xin_id = offsets[i / 2];
+ else
+ xlog_node->xin_id = INVALID_XIN_ID;
+
+ list_add_tail(&xlog_node->head, xlog_dump_list);
+ }
+
+ kfree(offsets);
+ return 0;
+}
+
+void mdss_debug_register_dump_range(struct platform_device *pdev,
+ struct mdss_debug_base *blk_base, const char *ranges_prop,
+ const char *name_prop, const char *xin_prop)
+{
+ int mdp_len;
+ const u32 *mdp_arr;
+
+ if (!blk_base || !ranges_prop || !name_prop)
+ return;
+
+ mdp_arr = of_get_property(pdev->dev.of_node, ranges_prop,
+ &mdp_len);
+ if (!mdp_arr) {
+ pr_warn("No xlog range dump found, continue\n");
+ mdp_len = 0;
+ } else {
+ /* 2 is the number of entries per row to calculate the rows */
+ mdp_len /= 2 * sizeof(u32);
+ parse_dt_xlog_dump_list(mdp_arr, mdp_len, &blk_base->dump_list,
+ pdev, name_prop, xin_prop);
+ }
+}
+
+static ssize_t mdss_debug_factor_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mult_factor *factor = file->private_data;
+ u32 numer;
+ u32 denom;
+ char buf[32];
+
+ if (!factor)
+ return -ENODEV;
+
+ numer = factor->numer;
+ denom = factor->denom;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (strnchr(buf, count, '/')) {
+ /* Parsing buf as fraction */
+ if (sscanf(buf, "%u/%u", &numer, &denom) != 2)
+ return -EFAULT;
+ } else {
+ /* Parsing buf as percentage */
+ if (kstrtouint(buf, 0, &numer))
+ return -EFAULT;
+ denom = 100;
+ }
+
+ if (numer && denom) {
+ factor->numer = numer;
+ factor->denom = denom;
+ }
+
+ pr_debug("numer=%d denom=%d\n", numer, denom);
+
+ return count;
+}
+
+static ssize_t mdss_debug_factor_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct mult_factor *factor = file->private_data;
+ int len = 0;
+ char buf[32] = {'\0'};
+
+ if (!factor)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "%d/%d\n",
+ factor->numer, factor->denom);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations mdss_factor_fops = {
+ .open = simple_open,
+ .read = mdss_debug_factor_read,
+ .write = mdss_debug_factor_write,
+};
+
+static ssize_t mdss_debug_perf_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_perf_tune *perf_tune = file->private_data;
+ struct mdss_data_type *mdata = mdss_res;
+ int perf_mode = 0;
+ char buf[10];
+
+ if (!perf_tune)
+ return -EFAULT;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 10, &perf_mode) != 1)
+ return -EFAULT;
+
+ if (perf_mode) {
+ /* run the driver with max clk and BW vote */
+ mdata->perf_tune.min_mdp_clk = mdata->max_mdp_clk_rate;
+ mdata->perf_tune.min_bus_vote = (u64)mdata->max_bw_high*1000;
+ } else {
+ /* reset the perf tune params to 0 */
+ mdata->perf_tune.min_mdp_clk = 0;
+ mdata->perf_tune.min_bus_vote = 0;
+ }
+ return count;
+}
+
+static ssize_t mdss_debug_perf_mode_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct mdss_perf_tune *perf_tune = file->private_data;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!perf_tune)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "min_mdp_clk %lu min_bus_vote %llu\n",
+ perf_tune->min_mdp_clk, perf_tune->min_bus_vote);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+
+static const struct file_operations mdss_perf_mode_fops = {
+ .open = simple_open,
+ .read = mdss_debug_perf_mode_read,
+ .write = mdss_debug_perf_mode_write,
+};
+
+static ssize_t mdss_debug_perf_panic_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct mdss_data_type *mdata = file->private_data;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!mdata)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "%d\n",
+ !mdata->has_panic_ctrl);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static int mdss_debug_set_panic_signal(struct mdss_mdp_pipe *pipe_pool,
+ u32 pool_size, struct mdss_data_type *mdata, bool enable)
+{
+ int i, cnt = 0;
+ struct mdss_mdp_pipe *pipe;
+
+ for (i = 0; i < pool_size; i++) {
+ pipe = pipe_pool + i;
+ if (pipe && (atomic_read(&pipe->kref.refcount) != 0) &&
+ mdss_mdp_panic_signal_support_mode(mdata)) {
+ mdss_mdp_pipe_panic_signal_ctrl(pipe, enable);
+ pr_debug("pnum:%d count:%d img:%dx%d ",
+ pipe->num, pipe->play_cnt, pipe->img_width,
+ pipe->img_height);
+ pr_cont("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ pipe->src.x, pipe->src.y, pipe->src.w,
+ pipe->src.h, pipe->dst.x, pipe->dst.y,
+ pipe->dst.w, pipe->dst.h);
+ cnt++;
+ } else if (pipe) {
+ pr_debug("Inactive pipe num:%d supported:%d\n",
+ atomic_read(&pipe->kref.refcount),
+ mdss_mdp_panic_signal_support_mode(mdata));
+ }
+ }
+ return cnt;
+}
+
+static void mdss_debug_set_panic_state(struct mdss_data_type *mdata,
+ bool enable)
+{
+ pr_debug("VIG:\n");
+ if (!mdss_debug_set_panic_signal(mdata->vig_pipes, mdata->nvig_pipes,
+ mdata, enable))
+ pr_debug("no active pipes found\n");
+ pr_debug("RGB:\n");
+ if (!mdss_debug_set_panic_signal(mdata->rgb_pipes, mdata->nrgb_pipes,
+ mdata, enable))
+ pr_debug("no active pipes found\n");
+ pr_debug("DMA:\n");
+ if (!mdss_debug_set_panic_signal(mdata->vig_pipes, mdata->ndma_pipes,
+ mdata, enable))
+ pr_debug("no active pipes found\n");
+}
+
+static ssize_t mdss_debug_perf_panic_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_data_type *mdata = file->private_data;
+ int disable_panic;
+ char buf[10];
+
+ if (!mdata)
+ return -EFAULT;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 10, &disable_panic) != 1)
+ return -EFAULT;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ pr_debug("Disabling panic:\n");
+ mdss_debug_set_panic_state(mdata, false);
+ mdata->has_panic_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ pr_debug("Enabling panic:\n");
+ mdata->has_panic_ctrl = true;
+ mdss_debug_set_panic_state(mdata, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations mdss_perf_panic_enable = {
+ .open = simple_open,
+ .read = mdss_debug_perf_panic_read,
+ .write = mdss_debug_perf_panic_write,
+};
+
+static int mdss_debugfs_cleanup(struct mdss_debug_data *mdd)
+{
+ struct mdss_debug_base *base, *tmp;
+
+ if (!mdd)
+ return 0;
+
+ list_for_each_entry_safe(base, tmp, &mdd->base_list, head) {
+ list_del(&base->head);
+ kfree(base);
+ }
+
+ if (mdd)
+ debugfs_remove_recursive(mdd->root);
+
+ kfree(mdd);
+
+ return 0;
+}
+
+static ssize_t mdss_debug_perf_bw_limit_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct mdss_data_type *mdata = file->private_data;
+ struct mdss_max_bw_settings *temp_settings;
+ int len = 0, i;
+ char buf[256];
+
+ if (!mdata)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ pr_debug("mdata->max_bw_settings_cnt = %d\n",
+ mdata->max_bw_settings_cnt);
+
+ temp_settings = mdata->max_bw_settings;
+ for (i = 0; i < mdata->max_bw_settings_cnt; i++) {
+ len += snprintf(buf + len, sizeof(buf), "%d %d\n",
+ temp_settings->mdss_max_bw_mode,
+ temp_settings->mdss_max_bw_val);
+ temp_settings++;
+ }
+
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static ssize_t mdss_debug_perf_bw_limit_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_data_type *mdata = file->private_data;
+ char buf[32];
+ u32 mode = 0, val = 0;
+ u32 cnt;
+ struct mdss_max_bw_settings *temp_settings;
+
+ if (!mdata)
+ return -ENODEV;
+
+ cnt = mdata->max_bw_settings_cnt;
+ temp_settings = mdata->max_bw_settings;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (strnchr(buf, count, ' ')) {
+ /* Parsing buf */
+ if (sscanf(buf, "%u %u", &mode, &val) != 2)
+ return -EFAULT;
+ }
+
+ while (cnt--) {
+ if (mode == temp_settings->mdss_max_bw_mode) {
+ temp_settings->mdss_max_bw_val = val;
+ break;
+ }
+ temp_settings++;
+
+ }
+
+ if (cnt == 0)
+ pr_err("Input mode is invalid\n");
+
+ return count;
+}
+
+static const struct file_operations mdss_perf_bw_limit_fops = {
+ .open = simple_open,
+ .read = mdss_debug_perf_bw_limit_read,
+ .write = mdss_debug_perf_bw_limit_write,
+};
+
+static int mdss_debugfs_perf_init(struct mdss_debug_data *mdd,
+ struct mdss_data_type *mdata) {
+
+ debugfs_create_u32("min_mdp_clk", 0644, mdd->perf,
+ (u32 *)&mdata->perf_tune.min_mdp_clk);
+
+ debugfs_create_u64("min_bus_vote", 0644, mdd->perf,
+ (u64 *)&mdata->perf_tune.min_bus_vote);
+
+ debugfs_create_u32("disable_prefill", 0644, mdd->perf,
+ (u32 *)&mdata->disable_prefill);
+
+ debugfs_create_file("disable_panic", 0644, mdd->perf,
+ (struct mdss_data_type *)mdata, &mdss_perf_panic_enable);
+
+ debugfs_create_bool("enable_bw_release", 0644, mdd->perf,
+ (bool *)&mdata->enable_bw_release);
+
+ debugfs_create_bool("enable_rotator_bw_release", 0644, mdd->perf,
+ (bool *)&mdata->enable_rotator_bw_release);
+
+ debugfs_create_file("ab_factor", 0644, mdd->perf,
+ &mdata->ab_factor, &mdss_factor_fops);
+
+ debugfs_create_file("ib_factor", 0644, mdd->perf,
+ &mdata->ib_factor, &mdss_factor_fops);
+
+ debugfs_create_file("ib_factor_overlap", 0644, mdd->perf,
+ &mdata->ib_factor_overlap, &mdss_factor_fops);
+
+ debugfs_create_file("clk_factor", 0644, mdd->perf,
+ &mdata->clk_factor, &mdss_factor_fops);
+
+ debugfs_create_u32("threshold_low", 0644, mdd->perf,
+ (u32 *)&mdata->max_bw_low);
+
+ debugfs_create_u32("threshold_high", 0644, mdd->perf,
+ (u32 *)&mdata->max_bw_high);
+
+ debugfs_create_u32("threshold_pipe", 0644, mdd->perf,
+ (u32 *)&mdata->max_bw_per_pipe);
+
+ debugfs_create_file("perf_mode", 0644, mdd->perf,
+ (u32 *)&mdata->perf_tune, &mdss_perf_mode_fops);
+
+ /* Initialize percentage to 0% */
+ mdata->latency_buff_per = 0;
+ debugfs_create_u32("latency_buff_per", 0644, mdd->perf,
+ (u32 *)&mdata->latency_buff_per);
+
+ debugfs_create_file("threshold_bw_limit", 0644, mdd->perf,
+ (struct mdss_data_type *)mdata, &mdss_perf_bw_limit_fops);
+
+ debugfs_create_u32("lines_before_active", 0644, mdd->perf,
+ (u32 *)&mdata->lines_before_active);
+
+ return 0;
+}
+
+int mdss_debugfs_init(struct mdss_data_type *mdata)
+{
+ struct mdss_debug_data *mdd;
+
+ if (mdata->debug_inf.debug_data) {
+ pr_warn("mdss debugfs already initialized\n");
+ return -EBUSY;
+ }
+
+ mdd = kzalloc(sizeof(*mdd), GFP_KERNEL);
+ if (!mdd)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mdd->base_list);
+
+ mdd->root = debugfs_create_dir("mdp", NULL);
+ if (IS_ERR_OR_NULL(mdd->root)) {
+ pr_err("debugfs_create_dir for mdp failed, error %ld\n",
+ PTR_ERR(mdd->root));
+ goto err;
+ }
+
+ mdd->perf = debugfs_create_dir("perf", mdd->root);
+ if (IS_ERR_OR_NULL(mdd->perf)) {
+ pr_err("debugfs_create_dir perf fail, error %ld\n",
+ PTR_ERR(mdd->perf));
+ goto err;
+ }
+
+ mdd->bordercolor = debugfs_create_dir("bordercolor", mdd->root);
+ if (IS_ERR_OR_NULL(mdd->bordercolor)) {
+ pr_err("debugfs_create_dir for bordercolor failed, error %ld\n",
+ PTR_ERR(mdd->bordercolor));
+ goto err;
+ }
+
+ mdd->postproc = debugfs_create_dir("postproc", mdd->root);
+ if (IS_ERR_OR_NULL(mdd->postproc)) {
+ pr_err("debugfs_create_dir postproc for mdp failed, error %ld\n",
+ PTR_ERR(mdd->postproc));
+ goto err;
+ }
+ mdss_debugfs_perf_init(mdd, mdata);
+
+ if (mdss_create_xlog_debug(mdd))
+ goto err;
+
+ if (mdss_create_frc_debug(mdd))
+ goto err;
+
+ mdata->debug_inf.debug_data = mdd;
+
+ return 0;
+
+err:
+ mdss_debugfs_cleanup(mdd);
+ return -ENODEV;
+}
+
+int mdss_debugfs_remove(struct mdss_data_type *mdata)
+{
+ struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+
+ mdss_debugfs_cleanup(mdd);
+ mdata->debug_inf.debug_data = NULL;
+
+ return 0;
+}
+
+int vsync_count;
+static struct mdss_mdp_misr_map {
+ u32 ctrl_reg;
+ u32 value_reg;
+ u32 crc_op_mode;
+ u32 crc_index;
+ u32 last_misr;
+ bool use_ping;
+ bool is_ping_full;
+ bool is_pong_full;
+ struct mutex crc_lock;
+ u32 crc_ping[MISR_CRC_BATCH_SIZE];
+ u32 crc_pong[MISR_CRC_BATCH_SIZE];
+} mdss_mdp_misr_table[DISPLAY_MISR_MAX] = {
+ [DISPLAY_MISR_DSI0] = {
+ .ctrl_reg = MDSS_MDP_LP_MISR_CTRL_DSI0,
+ .value_reg = MDSS_MDP_LP_MISR_SIGN_DSI0,
+ .crc_op_mode = 0,
+ .crc_index = 0,
+ .last_misr = 0,
+ .use_ping = true,
+ .is_ping_full = false,
+ .is_pong_full = false,
+ },
+ [DISPLAY_MISR_DSI1] = {
+ .ctrl_reg = MDSS_MDP_LP_MISR_CTRL_DSI1,
+ .value_reg = MDSS_MDP_LP_MISR_SIGN_DSI1,
+ .crc_op_mode = 0,
+ .crc_index = 0,
+ .last_misr = 0,
+ .use_ping = true,
+ .is_ping_full = false,
+ .is_pong_full = false,
+ },
+ [DISPLAY_MISR_EDP] = {
+ .ctrl_reg = MDSS_MDP_LP_MISR_CTRL_EDP,
+ .value_reg = MDSS_MDP_LP_MISR_SIGN_EDP,
+ .crc_op_mode = 0,
+ .crc_index = 0,
+ .last_misr = 0,
+ .use_ping = true,
+ .is_ping_full = false,
+ .is_pong_full = false,
+ },
+ [DISPLAY_MISR_HDMI] = {
+ .ctrl_reg = MDSS_MDP_LP_MISR_CTRL_HDMI,
+ .value_reg = MDSS_MDP_LP_MISR_SIGN_HDMI,
+ .crc_op_mode = 0,
+ .crc_index = 0,
+ .last_misr = 0,
+ .use_ping = true,
+ .is_ping_full = false,
+ .is_pong_full = false,
+ },
+ [DISPLAY_MISR_MDP] = {
+ .ctrl_reg = MDSS_MDP_LP_MISR_CTRL_MDP,
+ .value_reg = MDSS_MDP_LP_MISR_SIGN_MDP,
+ .crc_op_mode = 0,
+ .crc_index = 0,
+ .last_misr = 0,
+ .use_ping = true,
+ .is_ping_full = false,
+ .is_pong_full = false,
+ },
+};
+
+static inline struct mdss_mdp_misr_map *mdss_misr_get_map(u32 block_id,
+ struct mdss_mdp_ctl *ctl, struct mdss_data_type *mdata,
+ bool is_video_mode)
+{
+ struct mdss_mdp_misr_map *map;
+ struct mdss_mdp_mixer *mixer;
+ char *ctrl_reg = NULL, *value_reg = NULL;
+ char *intf_base = NULL;
+
+ if (block_id > DISPLAY_MISR_HDMI && block_id != DISPLAY_MISR_MDP) {
+ pr_err("MISR Block id (%d) out of range\n", block_id);
+ return NULL;
+ }
+
+ if (mdata->mdp_rev >= MDSS_MDP_HW_REV_105) {
+ /* Use updated MDP Interface MISR Block address offset */
+ if (block_id == DISPLAY_MISR_MDP) {
+ if (ctl) {
+ mixer = mdss_mdp_mixer_get(ctl,
+ MDSS_MDP_MIXER_MUX_DEFAULT);
+
+ if (mixer) {
+ ctrl_reg = mixer->base +
+ MDSS_MDP_LAYER_MIXER_MISR_CTRL;
+ value_reg = mixer->base +
+ MDSS_MDP_LAYER_MIXER_MISR_SIGNATURE;
+ }
+ }
+ } else {
+ if (block_id <= DISPLAY_MISR_HDMI) {
+ intf_base = (char *)mdss_mdp_get_intf_base_addr(
+ mdata, block_id);
+
+ if ((block_id == DISPLAY_MISR_DSI0 ||
+ block_id == DISPLAY_MISR_DSI1) &&
+ !is_video_mode) {
+ ctrl_reg = intf_base +
+ MDSS_MDP_INTF_CMD_MISR_CTRL;
+ value_reg = intf_base +
+ MDSS_MDP_INTF_CMD_MISR_SIGNATURE;
+
+ /*
+ * extra offset required for
+ * cmd misr in 8996
+ */
+ if (IS_MDSS_MAJOR_MINOR_SAME(
+ mdata->mdp_rev,
+ MDSS_MDP_HW_REV_107)) {
+ ctrl_reg += 0x8;
+ value_reg += 0x8;
+ }
+
+ } else {
+ ctrl_reg = intf_base +
+ MDSS_MDP_INTF_MISR_CTRL;
+ value_reg = intf_base +
+ MDSS_MDP_INTF_MISR_SIGNATURE;
+ }
+ }
+ /*
+ * For msm8916/8939, additional offset of 0x10
+ * is required
+ */
+ if ((mdata->mdp_rev == MDSS_MDP_HW_REV_106) ||
+ (mdata->mdp_rev == MDSS_MDP_HW_REV_108) ||
+ (mdata->mdp_rev == MDSS_MDP_HW_REV_112)) {
+ ctrl_reg += 0x10;
+ value_reg += 0x10;
+ }
+ }
+ mdss_mdp_misr_table[block_id].ctrl_reg = (u32)(ctrl_reg -
+ mdata->mdp_base);
+ mdss_mdp_misr_table[block_id].value_reg = (u32)(value_reg -
+ mdata->mdp_base);
+ }
+
+ map = mdss_mdp_misr_table + block_id;
+ if ((map->ctrl_reg == 0) || (map->value_reg == 0)) {
+ pr_err("MISR Block id (%d) config not found\n", block_id);
+ return NULL;
+ }
+
+ pr_debug("MISR Module(%d) CTRL(0x%x) SIG(0x%x) intf_base(0x%pK)\n",
+ block_id, map->ctrl_reg, map->value_reg, intf_base);
+ return map;
+}
+
+/*
+ * switch_mdp_misr_offset() - Update MDP MISR register offset for MDSS
+ * Hardware Revision 103.
+ * @map: mdss_mdp_misr_map
+ * @mdp_rev: MDSS Hardware Revision
+ * @block_id: Logical MISR Block ID
+ *
+ * Return: true when MDSS Revision is 103 else false.
+ */
+static bool switch_mdp_misr_offset(struct mdss_mdp_misr_map *map, u32 mdp_rev,
+ u32 block_id)
+{
+ bool use_mdp_up_misr = false;
+
+ if ((IS_MDSS_MAJOR_MINOR_SAME(mdp_rev, MDSS_MDP_HW_REV_103)) &&
+ (block_id == DISPLAY_MISR_MDP)) {
+ /* Use Upper pipe MISR for Layer Mixer CRC */
+ map->ctrl_reg = MDSS_MDP_UP_MISR_CTRL_MDP;
+ map->value_reg = MDSS_MDP_UP_MISR_SIGN_MDP;
+ use_mdp_up_misr = true;
+ }
+ pr_debug("MISR Module(%d) Offset of MISR_CTRL = 0x%x MISR_SIG = 0x%x\n",
+ block_id, map->ctrl_reg, map->value_reg);
+ return use_mdp_up_misr;
+}
+
+void mdss_misr_disable(struct mdss_data_type *mdata,
+ struct mdp_misr *req,
+ struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_misr_map *map;
+
+ map = mdss_misr_get_map(req->block_id, ctl, mdata,
+ ctl->is_video_mode);
+
+ if (!map)
+ return;
+
+ /* clear the map data */
+ memset(map->crc_ping, 0, sizeof(map->crc_ping));
+ memset(map->crc_pong, 0, sizeof(map->crc_pong));
+ map->crc_index = 0;
+ map->use_ping = true;
+ map->is_ping_full = false;
+ map->is_pong_full = false;
+ map->crc_op_mode = 0;
+ map->last_misr = 0;
+
+ /* disable MISR and clear the status */
+ writel_relaxed(MDSS_MDP_MISR_CTRL_STATUS_CLEAR,
+ mdata->mdp_base + map->ctrl_reg);
+
+ /* make sure status is clear */
+ wmb();
+}
+
+int mdss_misr_set(struct mdss_data_type *mdata,
+ struct mdp_misr *req,
+ struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_misr_map *map;
+ struct mdss_mdp_mixer *mixer;
+ u32 config = 0, val = 0;
+ u32 mixer_num = 0;
+ bool is_valid_wb_mixer = true;
+ bool use_mdp_up_misr = false;
+
+ if (!mdata || !req || !ctl) {
+ pr_err("Invalid input params: mdata = %pK req = %pK ctl = %pK",
+ mdata, req, ctl);
+ return -EINVAL;
+ }
+ pr_debug("req[block:%d frame:%d op_mode:%d]\n",
+ req->block_id, req->frame_count, req->crc_op_mode);
+
+ map = mdss_misr_get_map(req->block_id, ctl, mdata,
+ ctl->is_video_mode);
+ if (!map) {
+ pr_err("Invalid MISR Block=%d\n", req->block_id);
+ return -EINVAL;
+ }
+ use_mdp_up_misr = switch_mdp_misr_offset(map, mdata->mdp_rev,
+ req->block_id);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if (req->block_id == DISPLAY_MISR_MDP) {
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
+ if (!mixer) {
+ pr_err("failed to get default mixer, Block=%d\n",
+ req->block_id);
+ return -EINVAL;
+ }
+ mixer_num = mixer->num;
+ pr_debug("SET MDP MISR BLK to MDSS_MDP_LP_MISR_SEL_LMIX%d_GC\n",
+ mixer_num);
+ switch (mixer_num) {
+ case MDSS_MDP_INTF_LAYERMIXER0:
+ pr_debug("Use Layer Mixer 0 for WB CRC\n");
+ val = MDSS_MDP_LP_MISR_SEL_LMIX0_GC;
+ break;
+ case MDSS_MDP_INTF_LAYERMIXER1:
+ pr_debug("Use Layer Mixer 1 for WB CRC\n");
+ val = MDSS_MDP_LP_MISR_SEL_LMIX1_GC;
+ break;
+ case MDSS_MDP_INTF_LAYERMIXER2:
+ pr_debug("Use Layer Mixer 2 for WB CRC\n");
+ val = MDSS_MDP_LP_MISR_SEL_LMIX2_GC;
+ break;
+ default:
+ pr_err("Invalid Layer Mixer %d selected for WB CRC\n",
+ mixer_num);
+ is_valid_wb_mixer = false;
+ break;
+ }
+ if ((is_valid_wb_mixer) &&
+ (mdata->mdp_rev < MDSS_MDP_HW_REV_106)) {
+ if (use_mdp_up_misr)
+ writel_relaxed((val +
+ MDSS_MDP_UP_MISR_LMIX_SEL_OFFSET),
+ (mdata->mdp_base +
+ MDSS_MDP_UP_MISR_SEL));
+ else
+ writel_relaxed(val,
+ (mdata->mdp_base +
+ MDSS_MDP_LP_MISR_SEL));
+ }
+ }
+ vsync_count = 0;
+ map->crc_op_mode = req->crc_op_mode;
+ config = (MDSS_MDP_MISR_CTRL_FRAME_COUNT_MASK & req->frame_count) |
+ (MDSS_MDP_MISR_CTRL_ENABLE);
+
+ writel_relaxed(MDSS_MDP_MISR_CTRL_STATUS_CLEAR,
+ mdata->mdp_base + map->ctrl_reg);
+ /* ensure clear is done */
+ wmb();
+
+ memset(map->crc_ping, 0, sizeof(map->crc_ping));
+ memset(map->crc_pong, 0, sizeof(map->crc_pong));
+ map->crc_index = 0;
+ map->use_ping = true;
+ map->is_ping_full = false;
+ map->is_pong_full = false;
+
+ if (map->crc_op_mode != MISR_OP_BM) {
+
+ writel_relaxed(config,
+ mdata->mdp_base + map->ctrl_reg);
+ pr_debug("MISR_CTRL=0x%x [base:0x%pK reg:0x%x config:0x%x]\n",
+ readl_relaxed(mdata->mdp_base + map->ctrl_reg),
+ mdata->mdp_base, map->ctrl_reg, config);
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return 0;
+}
+
+char *get_misr_block_name(int misr_block_id)
+{
+ switch (misr_block_id) {
+ case DISPLAY_MISR_EDP: return "eDP";
+ case DISPLAY_MISR_DSI0: return "DSI_0";
+ case DISPLAY_MISR_DSI1: return "DSI_1";
+ case DISPLAY_MISR_HDMI: return "HDMI";
+ case DISPLAY_MISR_MDP: return "Writeback";
+ case DISPLAY_MISR_DSI_CMD: return "DSI_CMD";
+ default: return "???";
+ }
+}
+
+int mdss_misr_get(struct mdss_data_type *mdata,
+ struct mdp_misr *resp,
+ struct mdss_mdp_ctl *ctl,
+ bool is_video_mode)
+{
+ struct mdss_mdp_misr_map *map;
+ struct mdss_mdp_mixer *mixer;
+ u32 status;
+ int ret = -1;
+ int i;
+
+ pr_debug("req[block:%d frame:%d op_mode:%d]\n",
+ resp->block_id, resp->frame_count, resp->crc_op_mode);
+
+ map = mdss_misr_get_map(resp->block_id, ctl, mdata,
+ is_video_mode);
+ if (!map) {
+ pr_err("Invalid MISR Block=%d\n", resp->block_id);
+ return -EINVAL;
+ }
+ switch_mdp_misr_offset(map, mdata->mdp_rev, resp->block_id);
+
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ switch (map->crc_op_mode) {
+ case MISR_OP_SFM:
+ case MISR_OP_MFM:
+ ret = readl_poll_timeout(mdata->mdp_base + map->ctrl_reg,
+ status, status & MDSS_MDP_MISR_CTRL_STATUS,
+ MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+ if (ret == 0) {
+ resp->crc_value[0] = readl_relaxed(mdata->mdp_base +
+ map->value_reg);
+ pr_debug("CRC %s=0x%x\n",
+ get_misr_block_name(resp->block_id),
+ resp->crc_value[0]);
+ writel_relaxed(0, mdata->mdp_base + map->ctrl_reg);
+ } else {
+ pr_debug("Get MISR TimeOut %s\n",
+ get_misr_block_name(resp->block_id));
+
+ ret = readl_poll_timeout(mdata->mdp_base +
+ map->ctrl_reg, status,
+ status & MDSS_MDP_MISR_CTRL_STATUS,
+ MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+ if (ret == 0) {
+ resp->crc_value[0] =
+ readl_relaxed(mdata->mdp_base +
+ map->value_reg);
+ pr_debug("Retry CRC %s=0x%x\n",
+ get_misr_block_name(resp->block_id),
+ resp->crc_value[0]);
+ } else {
+ pr_err("Get MISR TimeOut %s\n",
+ get_misr_block_name(resp->block_id));
+ }
+ writel_relaxed(0, mdata->mdp_base + map->ctrl_reg);
+ }
+ break;
+ case MISR_OP_BM:
+ if (map->is_ping_full) {
+ for (i = 0; i < MISR_CRC_BATCH_SIZE; i++)
+ resp->crc_value[i] = map->crc_ping[i];
+ memset(map->crc_ping, 0, sizeof(map->crc_ping));
+ map->is_ping_full = false;
+ ret = 0;
+ } else if (map->is_pong_full) {
+ for (i = 0; i < MISR_CRC_BATCH_SIZE; i++)
+ resp->crc_value[i] = map->crc_pong[i];
+ memset(map->crc_pong, 0, sizeof(map->crc_pong));
+ map->is_pong_full = false;
+ ret = 0;
+ } else {
+ pr_debug("mdss_mdp_misr_crc_get PING BUF %s\n",
+ map->is_ping_full ? "FULL" : "EMPTRY");
+ pr_debug("mdss_mdp_misr_crc_get PONG BUF %s\n",
+ map->is_pong_full ? "FULL" : "EMPTRY");
+ }
+ resp->crc_op_mode = map->crc_op_mode;
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return ret;
+}
+
+/* This function is expected to be called from interrupt context */
+void mdss_misr_crc_collect(struct mdss_data_type *mdata, int block_id,
+ bool is_video_mode)
+{
+ struct mdss_mdp_misr_map *map;
+ u32 status = 0;
+ u32 crc = 0x0BAD0BAD;
+ bool crc_stored = false;
+
+ map = mdss_misr_get_map(block_id, NULL, mdata, is_video_mode);
+ if (!map || (map->crc_op_mode != MISR_OP_BM))
+ return;
+
+ switch_mdp_misr_offset(map, mdata->mdp_rev, block_id);
+
+ status = readl_relaxed(mdata->mdp_base + map->ctrl_reg);
+
+ if (MDSS_MDP_MISR_CTRL_STATUS & status) {
+
+ crc = readl_relaxed(mdata->mdp_base + map->value_reg);
+ map->last_misr = crc; /* cache crc to get it from sysfs */
+
+ if (map->use_ping) {
+ if (map->is_ping_full) {
+ pr_err_once("PING Buffer FULL\n");
+ } else {
+ map->crc_ping[map->crc_index] = crc;
+ crc_stored = true;
+ }
+ } else {
+ if (map->is_pong_full) {
+ pr_err_once("PONG Buffer FULL\n");
+ } else {
+ map->crc_pong[map->crc_index] = crc;
+ crc_stored = true;
+ }
+ }
+
+ if (crc_stored) {
+ map->crc_index = (map->crc_index + 1);
+ if (map->crc_index == MISR_CRC_BATCH_SIZE) {
+ map->crc_index = 0;
+ if (true == map->use_ping) {
+ map->is_ping_full = true;
+ map->use_ping = false;
+ } else {
+ map->is_pong_full = true;
+ map->use_ping = true;
+ }
+ pr_debug("USE BUFF %s\n", map->use_ping ?
+ "PING" : "PONG");
+ pr_debug("mdss_misr_crc_collect PING BUF %s\n",
+ map->is_ping_full ? "FULL" : "EMPTRY");
+ pr_debug("mdss_misr_crc_collect PONG BUF %s\n",
+ map->is_pong_full ? "FULL" : "EMPTRY");
+ }
+ } else {
+ pr_err_once("CRC(%d) Not saved\n", crc);
+ }
+
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_105) {
+ writel_relaxed(MDSS_MDP_MISR_CTRL_STATUS_CLEAR,
+ mdata->mdp_base + map->ctrl_reg);
+ writel_relaxed(MISR_CRC_BATCH_CFG,
+ mdata->mdp_base + map->ctrl_reg);
+ }
+
+ } else if (status == 0) {
+
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_105)
+ writel_relaxed(MISR_CRC_BATCH_CFG,
+ mdata->mdp_base + map->ctrl_reg);
+ else
+ writel_relaxed(MISR_CRC_BATCH_CFG |
+ MDSS_MDP_LP_MISR_CTRL_FREE_RUN_MASK,
+ mdata->mdp_base + map->ctrl_reg);
+
+ pr_debug("$$ Batch CRC Start $$\n");
+ }
+
+ pr_debug("$$ Vsync Count = %d, CRC=0x%x Indx = %d$$\n",
+ vsync_count, crc, map->crc_index);
+ trace_mdp_misr_crc(block_id, vsync_count, crc);
+
+ if (vsync_count == MAX_VSYNC_COUNT) {
+ pr_debug("RESET vsync_count(%d)\n", vsync_count);
+ vsync_count = 0;
+ } else {
+ vsync_count += 1;
+ }
+
+}
+
+int mdss_dump_misr_data(char **buf, u32 size)
+{
+ struct mdss_mdp_misr_map *dsi0_map;
+ struct mdss_mdp_misr_map *dsi1_map;
+ struct mdss_mdp_misr_map *hdmi_map;
+ int ret;
+
+ dsi0_map = &mdss_mdp_misr_table[DISPLAY_MISR_DSI0];
+ dsi1_map = &mdss_mdp_misr_table[DISPLAY_MISR_DSI1];
+ hdmi_map = &mdss_mdp_misr_table[DISPLAY_MISR_HDMI];
+
+ ret = scnprintf(*buf, PAGE_SIZE,
+ "\tDSI0 mode:%02d MISR:0x%08x\n"
+ "\tDSI1 mode:%02d MISR:0x%08x\n"
+ "\tHDMI mode:%02d MISR:0x%08x\n",
+ dsi0_map->crc_op_mode, dsi0_map->last_misr,
+ dsi1_map->crc_op_mode, dsi1_map->last_misr,
+ hdmi_map->crc_op_mode, hdmi_map->last_misr
+ );
+
+ return ret;
+}
diff --git a/drivers/video/fbdev/msm/mdss_debug.h b/drivers/video/fbdev/msm/mdss_debug.h
new file mode 100644
index 0000000..0d482c0
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_debug.h
@@ -0,0 +1,256 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DEBUG_H
+#define MDSS_DEBUG_H
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/mdss_io_util.h>
+
+#include "mdss.h"
+#include "mdss_mdp_trace.h"
+
+#define MISR_POLL_SLEEP 2000
+#define MISR_POLL_TIMEOUT 32000
+#define MISR_CRC_BATCH_CFG 0x101
+#define DATA_LIMITER (-1)
+#define XLOG_TOUT_DATA_LIMITER (NULL)
+#define XLOG_FUNC_ENTRY 0x1111
+#define XLOG_FUNC_EXIT 0x2222
+#define MDSS_REG_BLOCK_NAME_LEN (5)
+
+enum mdss_dbg_reg_dump_flag {
+ MDSS_DBG_DUMP_IN_LOG = BIT(0),
+ MDSS_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+enum mdss_dbg_xlog_flag {
+ MDSS_XLOG_DEFAULT = BIT(0),
+ MDSS_XLOG_IOMMU = BIT(1),
+ MDSS_XLOG_DBG = BIT(6),
+ MDSS_XLOG_ALL = BIT(7)
+};
+
+#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
+struct debug_bus {
+ u32 wr_addr;
+ u32 block_id;
+ u32 test_id;
+};
+
+struct vbif_debug_bus {
+ u32 disable_bus_addr;
+ u32 block_bus_addr;
+ u32 bit_offset;
+ u32 block_cnt;
+ u32 test_pnt_cnt;
+};
+
+#define MDSS_XLOG(...) mdss_xlog(__func__, __LINE__, MDSS_XLOG_DEFAULT, \
+ ##__VA_ARGS__, DATA_LIMITER)
+
+#define MDSS_XLOG_TOUT_HANDLER(...) \
+ mdss_xlog_tout_handler_default(false, __func__, ##__VA_ARGS__, \
+ XLOG_TOUT_DATA_LIMITER)
+
+#define MDSS_XLOG_TOUT_HANDLER_WQ(...) \
+ mdss_xlog_tout_handler_default(true, __func__, ##__VA_ARGS__, \
+ XLOG_TOUT_DATA_LIMITER)
+
+#define MDSS_XLOG_DBG(...) mdss_xlog(__func__, __LINE__, MDSS_XLOG_DBG, \
+ ##__VA_ARGS__, DATA_LIMITER)
+
+#define MDSS_XLOG_ALL(...) mdss_xlog(__func__, __LINE__, MDSS_XLOG_ALL, \
+ ##__VA_ARGS__, DATA_LIMITER)
+
+#define MDSS_XLOG_IOMMU(...) mdss_xlog(__func__, __LINE__, MDSS_XLOG_IOMMU, \
+ ##__VA_ARGS__, DATA_LIMITER)
+
+#define ATRACE_END(name) trace_mdss_mark_write(current->tgid, name, 0)
+#define ATRACE_BEGIN(name) trace_mdss_mark_write(current->tgid, name, 1)
+#define ATRACE_FUNC() ATRACE_BEGIN(__func__)
+
+#define ATRACE_INT(name, value) \
+ trace_mdp_trace_counter(current->tgid, name, value)
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_FB_MSM_MDSS)
+
+#define MDSS_DEBUG_BASE_MAX 10
+
+struct mdss_debug_base {
+ struct list_head head; /* head of this node */
+ struct list_head dump_list; /* head to the list with dump ranges */
+ struct mdss_debug_data *mdd;
+ char name[80];
+ void __iomem *base;
+ size_t off;
+ size_t cnt;
+ u8 cmd_data_type;
+ size_t max_offset;
+ char *buf;
+ size_t buf_len;
+ u32 *reg_dump; /* address for the mem dump if no ranges used */
+};
+
+struct mdss_debug_data {
+ struct dentry *root;
+ struct dentry *perf;
+ struct dentry *bordercolor;
+ struct dentry *postproc;
+ struct list_head base_list;
+};
+
+struct dump_offset {
+ u32 start;
+ u32 end;
+};
+
+struct range_dump_node {
+ struct list_head head; /* head of this node */
+ u32 *reg_dump; /* address for the mem dump */
+ char range_name[40]; /* name of this range */
+ struct dump_offset offset; /* range to dump */
+ uint32_t xin_id; /* client xin id */
+};
+
+#define DEFINE_MDSS_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+int mdss_debugfs_init(struct mdss_data_type *mdata);
+int mdss_debugfs_remove(struct mdss_data_type *mdata);
+int mdss_debug_register_base(const char *name, void __iomem *base,
+ size_t max_offset, struct mdss_debug_base **dbg_blk);
+void mdss_debug_register_dump_range(struct platform_device *pdev,
+ struct mdss_debug_base *blk_base, const char *ranges_prop,
+ const char *name_prop, const char *xin_prop);
+int panel_debug_register_base(const char *name, void __iomem *base,
+ size_t max_offset);
+int mdss_misr_set(struct mdss_data_type *mdata,
+ struct mdp_misr *req,
+ struct mdss_mdp_ctl *ctl);
+int mdss_misr_get(struct mdss_data_type *mdata,
+ struct mdp_misr *resp,
+ struct mdss_mdp_ctl *ctl,
+ bool is_video_mode);
+void mdss_misr_disable(struct mdss_data_type *mdata,
+ struct mdp_misr *req,
+ struct mdss_mdp_ctl *ctl);
+void mdss_misr_crc_collect(struct mdss_data_type *mdata, int block_id,
+ bool is_video_mode);
+
+int mdss_create_xlog_debug(struct mdss_debug_data *mdd);
+#if defined(CONFIG_FB_MSM_MDSS_FRC_DEBUG)
+int mdss_create_frc_debug(struct mdss_debug_data *mdd);
+#else
+static inline int mdss_create_frc_debug(struct mdss_debug_data *mdd)
+ {return 0; }
+#endif
+void mdss_xlog(const char *name, int line, int flag, ...);
+void mdss_xlog_tout_handler_default(bool queue, const char *name, ...);
+u32 get_dump_range(struct dump_offset *range_node, size_t max_offset);
+void mdss_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
+ int len, u32 **dump_mem, bool from_isr);
+void mdss_mdp_debug_mid(u32 mid);
+#else
+struct mdss_debug_base;
+struct dump_offset;
+
+static inline int mdss_debugfs_init(struct mdss_data_type *mdata) { return 0; }
+static inline int mdss_debugfs_remove(struct mdss_data_type *mdata)
+{
+ return 0;
+}
+static inline int mdss_debug_register_base(const char *name, void __iomem *base,
+ size_t max_offset, struct mdss_debug_base **dbg_blk) { return 0; }
+static inline void mdss_debug_register_dump_range(struct platform_device *pdev,
+ struct mdss_debug_base *blk_base, const char *ranges_prop,
+ const char *name_prop, const char *xin_prop) { }
+static inline int panel_debug_register_base(const char *name,
+ void __iomem *base,
+ size_t max_offset)
+{ return 0; }
+static inline int mdss_misr_set(struct mdss_data_type *mdata,
+ struct mdp_misr *req,
+ struct mdss_mdp_ctl *ctl)
+{ return 0; }
+static inline int mdss_misr_get(struct mdss_data_type *mdata,
+ struct mdp_misr *resp,
+ struct mdss_mdp_ctl *ctl,
+ bool is_video_mode)
+{ return 0; }
+static inline void mdss_misr_disable(struct mdss_data_type *mdata,
+ struct mdp_misr *req,
+ struct mdss_mdp_ctl *ctl)
+{ return; }
+
+static inline void mdss_misr_crc_collect(struct mdss_data_type *mdata,
+ int block_id, bool is_video_mode) { }
+
+static inline int create_xlog_debug(struct mdss_data_type *mdata) { return 0; }
+static inline void mdss_xlog_dump(void) { }
+static inline void mdss_xlog(const char *name, int line, int flag, ...) { }
+
+static inline void mdss_dsi_debug_check_te(struct mdss_panel_data *pdata) { }
+static inline void mdss_xlog_tout_handler_default(bool queue,
+ const char *name, ...) { }
+u32 get_dump_range(struct dump_offset *range_node, size_t max_offset)
+ { return 0; }
+void mdss_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
+ int len, u32 **dump_mem, bool from_isr) { }
+void mdss_mdp_debug_mid(u32 mid) { }
+#endif
+
+int mdss_dump_misr_data(char **buf, u32 size);
+
+static inline int mdss_debug_register_io(const char *name,
+ struct mdss_io_data *io_data, struct mdss_debug_base **dbg_blk)
+{
+ return mdss_debug_register_base(name, io_data->base, io_data->len,
+ dbg_blk);
+}
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_FB_MSM_MDSS_FRC_DEBUG)
+void mdss_debug_frc_add_vsync_sample(struct mdss_mdp_ctl *ctl,
+ ktime_t vsync_time);
+void mdss_debug_frc_add_kickoff_sample_pre(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_frc_info *frc_info, int remaining);
+void mdss_debug_frc_add_kickoff_sample_post(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_frc_info *frc_info, int remaining);
+int mdss_debug_frc_frame_repeat_disabled(void);
+#else
+static inline void mdss_debug_frc_add_vsync_sample(
+ struct mdss_mdp_ctl *ctl, ktime_t vsync_time) {}
+static inline void mdss_debug_frc_add_kickoff_sample_pre(
+ struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_frc_info *frc_info,
+ int remaining) {}
+static inline void mdss_debug_frc_add_kickoff_sample_post(
+ struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_frc_info *frc_info,
+ int remaining) {}
+static inline int mdss_debug_frc_frame_repeat_disabled(void) {return false; }
+#endif
+
+#endif /* MDSS_DEBUG_H */
diff --git a/drivers/video/fbdev/msm/mdss_debug_frc.c b/drivers/video/fbdev/msm/mdss_debug_frc.c
new file mode 100644
index 0000000..9965d03
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_debug_frc.c
@@ -0,0 +1,574 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+
+#define FRC_DEFAULT_ENABLE 1
+#define FRC_DEFAULT_LOG_ENABLE 0
+
+#define FRC_DEBUG_STAT_MAX_SLOT 1024
+
+DEFINE_SPINLOCK(frc_lock);
+
+struct cadence {
+ int repeat;
+ int kickoff_idx;
+ int vsync_idx;
+};
+
+struct vsync_stat {
+ int vsync_cnt;
+ s64 vsync_ts;
+};
+
+struct kick_stat {
+ s64 kickoff_ts;
+ u32 vsync;
+ int remain;
+ struct mdss_mdp_frc_info frc_info;
+};
+
+struct circ_buf {
+ int index;
+ int size;
+ int cnt;
+};
+
+struct vsync_samples {
+ struct circ_buf cbuf;
+ struct vsync_stat samples[FRC_DEBUG_STAT_MAX_SLOT];
+};
+
+struct kickoff_samples {
+ struct circ_buf cbuf;
+ struct kick_stat samples[FRC_DEBUG_STAT_MAX_SLOT];
+};
+
+#define cbuf_init(cbuf, len) { \
+ struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+ cb->index = 0; \
+ cb->cnt = 0; \
+ cb->size = (len); }
+
+#define cbuf_begin(cbuf, start) ({ \
+ struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+ (cb->index > cb->size) ? (cb->index + (start)) % cb->size : (start); })
+
+#define cbuf_end(cbuf, end) ({ \
+ struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+ ((cb->index - 1 - (end)) % cb->size); })
+
+#define cbuf_cur(cbuf) ({ \
+ struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+ (cb->index % cb->size); })
+
+#define cbuf_next(cbuf, idx) ({ \
+ struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+ (((idx)+1) % cb->size); })
+
+#define cbuf_prev(cbuf, idx) ({ \
+ struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+ (((idx)-1) % cb->size); })
+
+#define current_sample(cbuf) ({ \
+ struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+ int idx = cb->index % cb->size; \
+ &((cbuf)->samples[idx]); })
+
+#define insert_sample(cbuf, sample) { \
+ struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+ int idx = cb->index % cb->size; \
+ (cbuf)->samples[idx] = (sample); \
+ cb->cnt++; \
+ cb->index++; }
+
+#define advance_sample(cbuf) { \
+ struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+ cb->cnt++; \
+ cb->index++; }
+
+#define sample_cnt(cbuf) ({ \
+ struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+ (cb->cnt % cb->size); })
+
+struct mdss_dbg_frc_stat {
+ int cadence_id;
+ int display_fp1000s;
+ struct vsync_samples vs;
+ struct kickoff_samples ks;
+ struct cadence cadence_info[FRC_DEBUG_STAT_MAX_SLOT];
+};
+
+struct mdss_dbg_frc {
+ struct dentry *frc;
+ int frc_enable;
+ int log_enable;
+ struct mdss_dbg_frc_stat frc_stat[2];
+ int index;
+} mdss_dbg_frc;
+
+static struct mdss_dbg_frc_stat *__current_frc_stat(
+ struct mdss_dbg_frc *dbg_frc)
+{
+ return &dbg_frc->frc_stat[dbg_frc->index];
+}
+
+static void __init_frc_stat(struct mdss_dbg_frc *dbg_frc)
+{
+ struct mdss_dbg_frc_stat *frc_stat = __current_frc_stat(dbg_frc);
+
+ memset(frc_stat, 0, sizeof(struct mdss_dbg_frc_stat));
+
+ /* TODO: increase vsync buffer to avoid wrap around */
+ cbuf_init(&frc_stat->ks, FRC_DEBUG_STAT_MAX_SLOT);
+ cbuf_init(&frc_stat->vs, FRC_DEBUG_STAT_MAX_SLOT/2);
+}
+
+static struct mdss_dbg_frc_stat *__swap_frc_stat(
+ struct mdss_dbg_frc *dbg_frc)
+{
+ int prev_index = dbg_frc->index;
+
+ dbg_frc->index = (dbg_frc->index + 1) % 2;
+ __init_frc_stat(dbg_frc);
+
+ return &dbg_frc->frc_stat[prev_index];
+}
+
+void mdss_debug_frc_add_vsync_sample(struct mdss_mdp_ctl *ctl,
+ ktime_t vsync_time)
+{
+ if (mdss_dbg_frc.log_enable) {
+ unsigned long flags;
+ struct mdss_dbg_frc_stat *frc_stat;
+ struct vsync_stat vstat;
+
+ spin_lock_irqsave(&frc_lock, flags);
+ frc_stat = __current_frc_stat(&mdss_dbg_frc);
+ vstat.vsync_cnt = ctl->vsync_cnt;
+ vstat.vsync_ts = ktime_to_us(vsync_time);
+ insert_sample(&frc_stat->vs, vstat);
+ spin_unlock_irqrestore(&frc_lock, flags);
+ }
+}
+
+/* collect FRC data for debug ahead of repeat */
+void mdss_debug_frc_add_kickoff_sample_pre(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_frc_info *frc_info, int remaining)
+{
+ if (mdss_dbg_frc.log_enable) {
+ unsigned long flags;
+ struct mdss_dbg_frc_stat *frc_stat;
+
+ spin_lock_irqsave(&frc_lock, flags);
+ frc_stat = __current_frc_stat(&mdss_dbg_frc);
+
+ /* Don't update statistics when video repeats */
+ if (frc_info->cur_frc.frame_cnt
+ != frc_info->last_frc.frame_cnt) {
+ struct kick_stat *kstat = current_sample(&frc_stat->ks);
+
+ kstat->vsync = ctl->vsync_cnt;
+ }
+
+ frc_stat->cadence_id = frc_info->cadence_id;
+ frc_stat->display_fp1000s = frc_info->display_fp1000s;
+ spin_unlock_irqrestore(&frc_lock, flags);
+ }
+}
+
+/* collect FRC data for debug later than repeat */
+void mdss_debug_frc_add_kickoff_sample_post(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_frc_info *frc_info, int remaining)
+{
+ if (mdss_dbg_frc.log_enable) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&frc_lock, flags);
+ /* Don't update statistics when video repeats */
+ if (frc_info->cur_frc.frame_cnt
+ != frc_info->last_frc.frame_cnt) {
+ struct mdss_dbg_frc_stat *frc_stat
+ = __current_frc_stat(&mdss_dbg_frc);
+ struct kick_stat *kstat = current_sample(&frc_stat->ks);
+ ktime_t kickoff_time = ktime_get();
+
+ kstat->kickoff_ts = ktime_to_us(kickoff_time);
+ kstat->frc_info = *frc_info;
+ kstat->remain = remaining;
+
+ advance_sample(&frc_stat->ks);
+ }
+ spin_unlock_irqrestore(&frc_lock, flags);
+ }
+}
+
+int mdss_debug_frc_frame_repeat_disabled(void)
+{
+ return !mdss_dbg_frc.frc_enable;
+}
+
+/* find the closest vsync right to this kickoff time */
+static int __find_right_vsync(struct vsync_samples *vs, s64 kick)
+{
+ int idx = cbuf_begin(vs, 0);
+
+ for (; idx != cbuf_end(vs, 0); idx = cbuf_next(vs, idx)) {
+ if (vs->samples[idx].vsync_ts >= kick)
+ return idx;
+ }
+
+ return -EBADSLT;
+}
+
+/*
+ * These repeat number might start from any position in the sequence. E.g.,
+ * given cadence 23223, the first repeat might be 3 and the repeating pattern
+ * might be 32232, also, the first repeat could be the 4th 3, so the repeating
+ * pattern will be 32322. Below predefined patterns are going to be used to
+ * find the position of the first repeat in the full sequence, then we can
+ * easily known what the remaining expected repeats.
+ */
+#define CADENCE_22_LEN 2
+static int pattern_22[CADENCE_22_LEN] = {2, 2};
+
+#define CADENCE_23_LEN 2
+static int pattern_23[CADENCE_23_LEN][CADENCE_23_LEN] = {
+ {2, 3},
+ {3, 2}
+};
+
+#define CADENCE_23223_LEN 5
+static int pattern_23223[CADENCE_23223_LEN][CADENCE_23223_LEN] = {
+ {2, 3, 2, 2, 3},
+ {3, 2, 2, 3, 2},
+ {2, 2, 3, 2, 3},
+ {2, 3, 2, 3, 2},
+ {3, 2, 3, 2, 2}
+};
+
+static int __compare_init_pattern(struct mdss_dbg_frc_stat *frc_stat,
+ int *pattern, int s_idx, int e_idx)
+{
+ int i;
+
+ for (i = 0; i < min(CADENCE_23223_LEN, e_idx-s_idx+1); i++) {
+ if (frc_stat->cadence_info[i].repeat != pattern[i])
+ break;
+ }
+
+ return i == min(CADENCE_23223_LEN, e_idx-s_idx+1);
+}
+
+static int __pattern_len(int cadence_id)
+{
+ switch (cadence_id) {
+ case FRC_CADENCE_22:
+ return CADENCE_22_LEN;
+ case FRC_CADENCE_23:
+ return CADENCE_23_LEN;
+ case FRC_CADENCE_23223:
+ return CADENCE_23223_LEN;
+ }
+
+ return 0;
+}
+
+static int *__select_pattern(struct mdss_dbg_frc_stat *frc_stat,
+ int s_idx, int e_idx)
+{
+ int i;
+
+ switch (frc_stat->cadence_id) {
+ case FRC_CADENCE_22:
+ return pattern_22;
+ case FRC_CADENCE_23:
+ return frc_stat->cadence_info[s_idx].repeat == 2 ?
+ pattern_23[0] : pattern_23[1];
+ case FRC_CADENCE_23223:
+ for (i = 0; i < CADENCE_23223_LEN; i++) {
+ if (__compare_init_pattern(frc_stat,
+ pattern_23223[i], s_idx, e_idx))
+ return pattern_23223[i];
+ }
+ }
+
+ return NULL;
+}
+
+static void __check_cadence_pattern(struct mdss_dbg_frc_stat *frc_stat,
+ int s_idx, int e_idx)
+{
+ if (s_idx < e_idx) {
+ int *pattern = __select_pattern(frc_stat, s_idx, e_idx);
+ int pattern_len = __pattern_len(frc_stat->cadence_id);
+ struct vsync_samples *vs = &frc_stat->vs;
+ struct kickoff_samples *ks = &frc_stat->ks;
+ int i;
+
+ if (!pattern) {
+ pr_info("Can't match pattern in the beginning\n");
+ return;
+ }
+
+ for (i = s_idx; i < e_idx; i++) {
+ if (frc_stat->cadence_info[i].repeat !=
+ pattern[i % pattern_len]) {
+ int kidx =
+ frc_stat->cadence_info[i].kickoff_idx;
+ pr_info("\tUnexpected Sample: repeat=%d, kickoff=%lld, vsync=%lld\n",
+ frc_stat->cadence_info[i].repeat,
+ ks->samples[kidx].kickoff_ts,
+ vs->samples[kidx].vsync_ts);
+ break;
+ }
+ }
+
+ /* init check */
+ if (i < e_idx)
+ __check_cadence_pattern(frc_stat, i+1, e_idx);
+ }
+}
+
+static int __is_cadence_check_supported(struct mdss_dbg_frc_stat *frc_stat)
+{
+ int cadence = frc_stat->cadence_id;
+
+ return cadence == FRC_CADENCE_22 ||
+ cadence == FRC_CADENCE_23 ||
+ cadence == FRC_CADENCE_23223;
+}
+
+static int __find_first_valid_sample(struct mdss_dbg_frc_stat *frc_stat)
+{
+ int i = 0;
+ struct kickoff_samples *ks = &frc_stat->ks;
+ struct vsync_samples *vs = &frc_stat->vs;
+ struct kick_stat *cur_kstat = &ks->samples[cbuf_begin(ks, 0)];
+ s64 cur_kick = cur_kstat->kickoff_ts;
+ int cur_disp = __find_right_vsync(vs, cur_kick);
+ struct vsync_stat *vstat = &vs->samples[cur_disp];
+
+ i = cbuf_begin(ks, 0);
+ for (; i != cbuf_end(ks, 1); i = cbuf_next(ks, i)) {
+ if (vstat->vsync_ts < ks->samples[i].kickoff_ts)
+ break;
+ }
+
+ return i;
+}
+
+static int __analyze_frc_samples(struct mdss_dbg_frc_stat *frc_stat, int start)
+{
+ struct kickoff_samples *ks = &frc_stat->ks;
+ struct vsync_samples *vs = &frc_stat->vs;
+ int i = start;
+ int cnt = 0;
+
+ /* analyze kickoff & vsync samples */
+ for (; i != cbuf_end(ks, 1); i = cbuf_next(ks, i)) {
+ /*
+ * TODO: vsync buffer is not enough so it might
+ * wrap around and drop the samples in the beginning.
+ * skip the first/last sample.
+ */
+ s64 cur_kick = ks->samples[i].kickoff_ts;
+ s64 right_kick = ks->samples[cbuf_next(ks, i)].kickoff_ts;
+ int cur_disp = __find_right_vsync(vs, cur_kick);
+ int right_disp = __find_right_vsync(vs, right_kick);
+
+ frc_stat->cadence_info[cnt].repeat =
+ right_disp >= cur_disp ? right_disp - cur_disp :
+ right_disp - cur_disp + vs->cbuf.size;
+ frc_stat->cadence_info[cnt].kickoff_idx = i;
+ frc_stat->cadence_info[cnt].vsync_idx = cur_disp;
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void __dump_frc_samples(struct mdss_dbg_frc_stat *frc_stat, int cnt)
+{
+ struct kickoff_samples *ks = &frc_stat->ks;
+ struct vsync_samples *vs = &frc_stat->vs;
+ int i = 0;
+
+ pr_info("===== Collected FRC statistics: Cadence %d, FPS %d =====\n",
+ frc_stat->cadence_id, frc_stat->display_fp1000s);
+ pr_info("\tKickoff VS. VSYNC:\n");
+ for (i = 0; i < cnt; i++) {
+ struct cadence *p_info = &frc_stat->cadence_info[i];
+ struct kick_stat *kickoff = &ks->samples[p_info->kickoff_idx];
+ struct vsync_stat *vsync = &vs->samples[p_info->vsync_idx];
+
+ pr_info("\t[K: %lld V: (%d)%lld R: %d] c_ts: %lld c_cnt: %d b_ts: %lld b_cnt: %d l_ts: %lld l_cnt: %d b_v: %d l_v: %d l_r: %d pos: %d vs: %d remain: %d\n",
+ kickoff->kickoff_ts,
+ vsync->vsync_cnt,
+ vsync->vsync_ts,
+ p_info->repeat,
+ kickoff->frc_info.cur_frc.timestamp,
+ kickoff->frc_info.cur_frc.frame_cnt,
+ kickoff->frc_info.base_frc.timestamp,
+ kickoff->frc_info.base_frc.frame_cnt,
+ kickoff->frc_info.last_frc.timestamp,
+ kickoff->frc_info.last_frc.frame_cnt,
+ kickoff->frc_info.base_vsync_cnt,
+ kickoff->frc_info.last_vsync_cnt,
+ kickoff->frc_info.last_repeat,
+ kickoff->frc_info.gen.pos,
+ kickoff->vsync,
+ kickoff->remain);
+ }
+
+ pr_info("===== End FRC statistics: =====\n");
+}
+
+static bool __is_frc_stat_empty(struct mdss_dbg_frc_stat *frc_stat)
+{
+ return sample_cnt(&frc_stat->vs) == 0
+ || sample_cnt(&frc_stat->ks) == 0;
+}
+
+static void mdss_frc_dump_debug_stat(struct mdss_dbg_frc *frc_debug)
+{
+ int i = 0;
+ int cnt = 0;
+ struct mdss_dbg_frc_stat *frc_stat = NULL;
+ unsigned long flags;
+
+ /* swap buffer of collect & analyze */
+ spin_lock_irqsave(&frc_lock, flags);
+ frc_stat = __swap_frc_stat(frc_debug);
+ spin_unlock_irqrestore(&frc_lock, flags);
+
+ if (__is_frc_stat_empty(frc_stat))
+ return;
+
+ /* find the first valid kickoff sample */
+ i = __find_first_valid_sample(frc_stat);
+
+ /* analyze kickoff & vsync samples */
+ cnt = __analyze_frc_samples(frc_stat, i);
+
+ /* print collected statistics FRC data */
+ __dump_frc_samples(frc_stat, cnt);
+
+ if (__is_cadence_check_supported(frc_stat)) {
+ pr_info("===== Check Cadence Pattern: =====\n");
+ __check_cadence_pattern(frc_stat, 0, cnt);
+ pr_info("===== Check Cadence Pattern End =====\n");
+ }
+}
+
+static ssize_t mdss_frc_log_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ int len = 0;
+ char buf[32] = {'\0'};
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "%d\n", mdss_dbg_frc.log_enable);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static ssize_t mdss_frc_log_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int enable;
+ unsigned long flags;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ if (kstrtoint(buf, 0, &enable))
+ return -EFAULT;
+
+ if (enable && !mdss_dbg_frc.log_enable) {
+ spin_lock_irqsave(&frc_lock, flags);
+ __init_frc_stat(&mdss_dbg_frc);
+ spin_unlock_irqrestore(&frc_lock, flags);
+ }
+ mdss_dbg_frc.log_enable = enable;
+
+ pr_info("log_enable = %d\n", mdss_dbg_frc.log_enable);
+
+ return count;
+}
+
+static const struct file_operations mdss_dbg_frc_log_fops = {
+ .read = mdss_frc_log_read,
+ .write = mdss_frc_log_write,
+};
+
+static ssize_t mdss_frc_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ mdss_frc_dump_debug_stat(&mdss_dbg_frc);
+
+ return count;
+}
+
+static const struct file_operations mdss_dbg_frc_dump_fops = {
+ .read = NULL,
+ .write = mdss_frc_dump_write,
+};
+
+int mdss_create_frc_debug(struct mdss_debug_data *mdd)
+{
+ mdss_dbg_frc.frc = debugfs_create_dir("frc", mdd->root);
+ if (IS_ERR_OR_NULL(mdss_dbg_frc.frc)) {
+ pr_err("debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(mdss_dbg_frc.frc));
+ mdss_dbg_frc.frc = NULL;
+ return -ENODEV;
+ }
+
+ debugfs_create_u32("enable", 0644, mdss_dbg_frc.frc,
+ &mdss_dbg_frc.frc_enable);
+ debugfs_create_file("log", 0644, mdss_dbg_frc.frc, NULL,
+ &mdss_dbg_frc_log_fops);
+ debugfs_create_file("dump", 0644, mdss_dbg_frc.frc, NULL,
+ &mdss_dbg_frc_dump_fops);
+
+ mdss_dbg_frc.frc_enable = FRC_DEFAULT_ENABLE;
+ mdss_dbg_frc.log_enable = FRC_DEFAULT_LOG_ENABLE;
+ mdss_dbg_frc.index = 0;
+
+ pr_debug("frc_dbg: frc_enable:%d log_enable:%d\n",
+ mdss_dbg_frc.frc_enable, mdss_dbg_frc.log_enable);
+
+ return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss_debug_xlog.c b/drivers/video/fbdev/msm/mdss_debug_xlog.c
new file mode 100644
index 0000000..49684f4
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_debug_xlog.c
@@ -0,0 +1,756 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+
+#ifdef CONFIG_FB_MSM_MDSS_XLOG_DEBUG
+#define XLOG_DEFAULT_ENABLE 1
+#else
+#define XLOG_DEFAULT_ENABLE 0
+#endif
+
+#define XLOG_DEFAULT_PANIC 1
+#define XLOG_DEFAULT_REGDUMP 0x2 /* dump in RAM */
+#define XLOG_DEFAULT_DBGBUSDUMP 0x2 /* dump in RAM */
+#define XLOG_DEFAULT_VBIF_DBGBUSDUMP 0x2 /* dump in RAM */
+
+/*
+ * xlog will print this number of entries when it is called through
+ * sysfs node or panic. This prevents kernel log from xlog message
+ * flood.
+ */
+#define MDSS_XLOG_PRINT_ENTRY 256
+
+/*
+ * xlog keeps this number of entries in memory for debug purpose. This
+ * number must be greater than print entry to prevent out of bound xlog
+ * entry array access.
+ */
+#define MDSS_XLOG_ENTRY (MDSS_XLOG_PRINT_ENTRY * 4)
+#define MDSS_XLOG_MAX_DATA 15
+#define MDSS_XLOG_BUF_MAX 512
+#define MDSS_XLOG_BUF_ALIGN 32
+
+DEFINE_SPINLOCK(xlock);
+
+struct tlog {
+ u32 counter;
+ s64 time;
+ const char *name;
+ int line;
+ u32 data[MDSS_XLOG_MAX_DATA];
+ u32 data_cnt;
+ int pid;
+};
+
+struct mdss_dbg_xlog {
+ struct tlog logs[MDSS_XLOG_ENTRY];
+ u32 first;
+ u32 last;
+ u32 curr;
+ struct dentry *xlog;
+ u32 xlog_enable;
+ u32 panic_on_err;
+ u32 enable_reg_dump;
+ u32 enable_dbgbus_dump;
+ u32 enable_vbif_dbgbus_dump;
+ struct work_struct xlog_dump_work;
+ struct mdss_debug_base *blk_arr[MDSS_DEBUG_BASE_MAX];
+ bool work_panic;
+ bool work_dbgbus;
+ bool work_vbif_dbgbus;
+ u32 *dbgbus_dump; /* address for the debug bus dump */
+ u32 *vbif_dbgbus_dump; /* address for the vbif debug bus dump */
+ u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */
+} mdss_dbg_xlog;
+
+static inline bool mdss_xlog_is_enabled(u32 flag)
+{
+ return (flag & mdss_dbg_xlog.xlog_enable) ||
+ (flag == MDSS_XLOG_ALL && mdss_dbg_xlog.xlog_enable);
+}
+
+void mdss_xlog(const char *name, int line, int flag, ...)
+{
+ unsigned long flags;
+ int i, val = 0;
+ va_list args;
+ struct tlog *log;
+
+ if (!mdss_xlog_is_enabled(flag))
+ return;
+
+ spin_lock_irqsave(&xlock, flags);
+ log = &mdss_dbg_xlog.logs[mdss_dbg_xlog.curr];
+ log->time = ktime_to_us(ktime_get());
+ log->name = name;
+ log->line = line;
+ log->data_cnt = 0;
+ log->pid = current->pid;
+
+ va_start(args, flag);
+ for (i = 0; i < MDSS_XLOG_MAX_DATA; i++) {
+
+ val = va_arg(args, int);
+ if (val == DATA_LIMITER)
+ break;
+
+ log->data[i] = val;
+ }
+ va_end(args);
+ log->data_cnt = i;
+ mdss_dbg_xlog.curr = (mdss_dbg_xlog.curr + 1) % MDSS_XLOG_ENTRY;
+ mdss_dbg_xlog.last++;
+
+ spin_unlock_irqrestore(&xlock, flags);
+}
+
+/* always dump the last entries which are not dumped yet */
+static bool __mdss_xlog_dump_calc_range(void)
+{
+ static u32 next;
+ bool need_dump = true;
+ unsigned long flags;
+ struct mdss_dbg_xlog *xlog = &mdss_dbg_xlog;
+
+ spin_lock_irqsave(&xlock, flags);
+
+ xlog->first = next;
+
+ if (xlog->last == xlog->first) {
+ need_dump = false;
+ goto dump_exit;
+ }
+
+ if (xlog->last < xlog->first) {
+ xlog->first %= MDSS_XLOG_ENTRY;
+ if (xlog->last < xlog->first)
+ xlog->last += MDSS_XLOG_ENTRY;
+ }
+
+ if ((xlog->last - xlog->first) > MDSS_XLOG_PRINT_ENTRY) {
+ pr_warn("xlog buffer overflow before dump: %d\n",
+ xlog->last - xlog->first);
+ xlog->first = xlog->last - MDSS_XLOG_PRINT_ENTRY;
+ }
+ next = xlog->first + 1;
+
+dump_exit:
+ spin_unlock_irqrestore(&xlock, flags);
+
+ return need_dump;
+}
+
+static ssize_t mdss_xlog_dump_entry(char *xlog_buf, ssize_t xlog_buf_size)
+{
+ int i;
+ ssize_t off = 0;
+ struct tlog *log, *prev_log;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xlock, flags);
+
+ log = &mdss_dbg_xlog.logs[mdss_dbg_xlog.first %
+ MDSS_XLOG_ENTRY];
+
+ prev_log = &mdss_dbg_xlog.logs[(mdss_dbg_xlog.first - 1) %
+ MDSS_XLOG_ENTRY];
+
+ off = snprintf((xlog_buf + off), (xlog_buf_size - off), "%s:%-4d",
+ log->name, log->line);
+
+ if (off < MDSS_XLOG_BUF_ALIGN) {
+ memset((xlog_buf + off), 0x20, (MDSS_XLOG_BUF_ALIGN - off));
+ off = MDSS_XLOG_BUF_ALIGN;
+ }
+
+ off += snprintf((xlog_buf + off), (xlog_buf_size - off),
+ "=>[%-8d:%-11llu:%9llu][%-4d]:", mdss_dbg_xlog.first,
+ log->time, (log->time - prev_log->time), log->pid);
+
+ for (i = 0; i < log->data_cnt; i++)
+ off += snprintf((xlog_buf + off), (xlog_buf_size - off),
+ "%x ", log->data[i]);
+
+ off += snprintf((xlog_buf + off), (xlog_buf_size - off), "\n");
+
+ spin_unlock_irqrestore(&xlock, flags);
+
+ return off;
+}
+
+static void mdss_xlog_dump_all(void)
+{
+ char xlog_buf[MDSS_XLOG_BUF_MAX];
+
+ while (__mdss_xlog_dump_calc_range()) {
+ mdss_xlog_dump_entry(xlog_buf, MDSS_XLOG_BUF_MAX);
+ pr_info("%s", xlog_buf);
+ }
+}
+
+u32 get_dump_range(struct dump_offset *range_node, size_t max_offset)
+{
+ u32 length = 0;
+
+ if ((range_node->start > range_node->end) ||
+ (range_node->end > max_offset) || (range_node->start == 0
+ && range_node->end == 0)) {
+ length = max_offset;
+ } else {
+ length = range_node->end - range_node->start;
+ }
+
+ return length;
+}
+
+static void mdss_dump_debug_bus(u32 bus_dump_flag,
+ u32 **dump_mem)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool in_log, in_mem;
+ u32 *dump_addr = NULL;
+ u32 status = 0;
+ struct debug_bus *head;
+ phys_addr_t phys = 0;
+ int list_size = mdata->dbg_bus_size;
+ int i;
+
+ if (!(mdata->dbg_bus && list_size))
+ return;
+
+ /* will keep in memory 4 entries of 4 bytes each */
+ list_size = (list_size * 4 * 4);
+
+ in_log = (bus_dump_flag & MDSS_DBG_DUMP_IN_LOG);
+ in_mem = (bus_dump_flag & MDSS_DBG_DUMP_IN_MEM);
+
+ pr_info("======== Debug bus DUMP =========\n");
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
+ __func__, dump_addr, dump_addr + list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ for (i = 0; i < mdata->dbg_bus_size; i++) {
+ head = mdata->dbg_bus + i;
+ writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+ mdss_res->mdp_base + head->wr_addr);
+ wmb(); /* make sure test bits were written */
+ status = readl_relaxed(mdss_res->mdp_base +
+ head->wr_addr + 0x4);
+
+ if (in_log)
+ pr_err("waddr=0x%x blk=%d tst=%d val=0x%x\n",
+ head->wr_addr, head->block_id, head->test_id,
+ status);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = head->wr_addr;
+ dump_addr[i*4 + 1] = head->block_id;
+ dump_addr[i*4 + 2] = head->test_id;
+ dump_addr[i*4 + 3] = status;
+ }
+
+ /* Disable debug bus once we are done */
+ writel_relaxed(0, mdss_res->mdp_base + head->wr_addr);
+
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ pr_info("========End Debug bus=========\n");
+}
+
+static void __vbif_debug_bus(struct vbif_debug_bus *head,
+ void __iomem *vbif_base, u32 *dump_addr, bool in_log)
+{
+ int i, j;
+ u32 val;
+
+ if (!dump_addr && !in_log)
+ return;
+
+ for (i = 0; i < head->block_cnt; i++) {
+ writel_relaxed(1 << (i + head->bit_offset),
+ vbif_base + head->block_bus_addr);
+ /* make sure that current bus blcok enable */
+ wmb();
+ for (j = 0; j < head->test_pnt_cnt; j++) {
+ writel_relaxed(j, vbif_base + head->block_bus_addr + 4);
+ /* make sure that test point is enabled */
+ wmb();
+ val = readl_relaxed(vbif_base + MMSS_VBIF_TEST_BUS_OUT);
+ if (dump_addr) {
+ *dump_addr++ = head->block_bus_addr;
+ *dump_addr++ = i;
+ *dump_addr++ = j;
+ *dump_addr++ = val;
+ }
+ if (in_log)
+ pr_err("testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+ head->block_bus_addr, i, j, val);
+ }
+ }
+}
+
+static void mdss_dump_vbif_debug_bus(u32 bus_dump_flag,
+ u32 **dump_mem, bool real_time)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool in_log, in_mem;
+ u32 *dump_addr = NULL;
+ u32 value;
+ struct vbif_debug_bus *head;
+ phys_addr_t phys = 0;
+ int i, list_size = 0;
+ void __iomem *vbif_base;
+ struct vbif_debug_bus *dbg_bus;
+ u32 bus_size;
+
+ if (real_time) {
+ pr_info("======== VBIF Debug bus DUMP =========\n");
+ vbif_base = mdata->vbif_io.base;
+ dbg_bus = mdata->vbif_dbg_bus;
+ bus_size = mdata->vbif_dbg_bus_size;
+ } else {
+ pr_info("======== NRT VBIF Debug bus DUMP =========\n");
+ vbif_base = mdata->vbif_nrt_io.base;
+ dbg_bus = mdata->nrt_vbif_dbg_bus;
+ bus_size = mdata->nrt_vbif_dbg_bus_size;
+ }
+
+ if (!dbg_bus || !bus_size)
+ return;
+
+ /* allocate memory for each test point */
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+ list_size += (head->block_cnt * head->test_pnt_cnt);
+ }
+
+ /* 4 bytes * 4 entries for each test point*/
+ list_size *= 16;
+
+ in_log = (bus_dump_flag & MDSS_DBG_DUMP_IN_LOG);
+ in_mem = (bus_dump_flag & MDSS_DBG_DUMP_IN_MEM);
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
+ __func__, dump_addr, dump_addr + list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ value = readl_relaxed(vbif_base + MMSS_VBIF_CLKON);
+ writel_relaxed(value | BIT(1), vbif_base + MMSS_VBIF_CLKON);
+
+ /* make sure that vbif core is on */
+ wmb();
+
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+
+ writel_relaxed(0, vbif_base + head->disable_bus_addr);
+ writel_relaxed(BIT(0), vbif_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+ /* make sure that other bus is off */
+ wmb();
+
+ __vbif_debug_bus(head, vbif_base, dump_addr, in_log);
+ if (dump_addr)
+ dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ pr_info("========End VBIF Debug bus=========\n");
+}
+
+void mdss_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
+ int len, u32 **dump_mem, bool from_isr)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool in_log, in_mem;
+ u32 *dump_addr = NULL;
+ phys_addr_t phys = 0;
+ int i;
+
+ in_log = (reg_dump_flag & MDSS_DBG_DUMP_IN_LOG);
+ in_mem = (reg_dump_flag & MDSS_DBG_DUMP_IN_MEM);
+
+ pr_debug("reg_dump_flag=%d in_log=%d in_mem=%d\n",
+ reg_dump_flag, in_log, in_mem);
+
+ if (len % 16)
+ len += 16;
+ len /= 16;
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
+ len * 16, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ pr_info("%s: start_addr:0x%pK end_addr:0x%pK reg_addr=0x%pK\n",
+ dump_name, dump_addr, dump_addr + (u32)len * 16,
+ addr);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: kzalloc fails!\n");
+ }
+ }
+
+ if (!from_isr)
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ for (i = 0; i < len; i++) {
+ u32 x0, x4, x8, xc;
+
+ x0 = readl_relaxed(addr+0x0);
+ x4 = readl_relaxed(addr+0x4);
+ x8 = readl_relaxed(addr+0x8);
+ xc = readl_relaxed(addr+0xc);
+
+ if (in_log)
+ pr_info("%pK : %08x %08x %08x %08x\n", addr, x0, x4, x8,
+ xc);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = x0;
+ dump_addr[i*4 + 1] = x4;
+ dump_addr[i*4 + 2] = x8;
+ dump_addr[i*4 + 3] = xc;
+ }
+
+ addr += 16;
+ }
+
+ if (!from_isr)
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+static void mdss_dump_reg_by_ranges(struct mdss_debug_base *dbg,
+ u32 reg_dump_flag)
+{
+ char *addr;
+ int len;
+ struct range_dump_node *xlog_node, *xlog_tmp;
+
+ if (!dbg || !dbg->base) {
+ pr_err("dbg base is null!\n");
+ return;
+ }
+
+ pr_info("%s:=========%s DUMP=========\n", __func__, dbg->name);
+
+ /* If there is a list to dump the registers by ranges, use the ranges */
+ if (!list_empty(&dbg->dump_list)) {
+ list_for_each_entry_safe(xlog_node, xlog_tmp,
+ &dbg->dump_list, head) {
+ len = get_dump_range(&xlog_node->offset,
+ dbg->max_offset);
+ addr = dbg->base + xlog_node->offset.start;
+ pr_debug("%s: range_base=0x%pK start=0x%x end=0x%x\n",
+ xlog_node->range_name,
+ addr, xlog_node->offset.start,
+ xlog_node->offset.end);
+ mdss_dump_reg((const char *)xlog_node->range_name,
+ reg_dump_flag, addr, len, &xlog_node->reg_dump,
+ false);
+ }
+ } else {
+ /* If there is no list to dump ranges, dump all registers */
+ pr_info("Ranges not found, will dump full registers");
+ pr_info("base:0x%pK len:%zu\n", dbg->base, dbg->max_offset);
+ addr = dbg->base;
+ len = dbg->max_offset;
+ mdss_dump_reg((const char *)dbg->name, reg_dump_flag, addr,
+ len, &dbg->reg_dump, false);
+ }
+}
+
+static void mdss_dump_reg_by_blk(const char *blk_name)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+ struct mdss_debug_base *blk_base, *tmp;
+
+ if (!mdd)
+ return;
+
+ list_for_each_entry_safe(blk_base, tmp, &mdd->base_list, head) {
+ if (strlen(blk_base->name) &&
+ !strcmp(blk_base->name, blk_name)) {
+ mdss_dump_reg_by_ranges(blk_base,
+ mdss_dbg_xlog.enable_reg_dump);
+ break;
+ }
+ }
+}
+
+static void mdss_dump_reg_all(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+ struct mdss_debug_base *blk_base, *tmp;
+
+ if (!mdd)
+ return;
+
+ list_for_each_entry_safe(blk_base, tmp, &mdd->base_list, head) {
+ if (strlen(blk_base->name))
+ mdss_dump_reg_by_blk(blk_base->name);
+ }
+}
+
+static void clear_dump_blk_arr(struct mdss_debug_base *blk_arr[],
+ u32 blk_len)
+{
+ int i;
+
+ for (i = 0; i < blk_len; i++)
+ blk_arr[i] = NULL;
+}
+
+struct mdss_debug_base *get_dump_blk_addr(const char *blk_name)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+ struct mdss_debug_base *blk_base, *tmp;
+
+ if (!mdd)
+ return NULL;
+
+ list_for_each_entry_safe(blk_base, tmp, &mdd->base_list, head) {
+ if (strlen(blk_base->name) &&
+ !strcmp(blk_base->name, blk_name))
+ return blk_base;
+ }
+
+ return NULL;
+}
+
+static void mdss_xlog_dump_array(struct mdss_debug_base *blk_arr[],
+ u32 len, bool dead, const char *name, bool dump_dbgbus,
+ bool dump_vbif_dbgbus)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (blk_arr[i] != NULL)
+ mdss_dump_reg_by_ranges(blk_arr[i],
+ mdss_dbg_xlog.enable_reg_dump);
+ }
+
+ mdss_xlog_dump_all();
+
+ if (dump_dbgbus)
+ mdss_dump_debug_bus(mdss_dbg_xlog.enable_dbgbus_dump,
+ &mdss_dbg_xlog.dbgbus_dump);
+
+ if (dump_vbif_dbgbus) {
+ mdss_dump_vbif_debug_bus(mdss_dbg_xlog.enable_vbif_dbgbus_dump,
+ &mdss_dbg_xlog.vbif_dbgbus_dump, true);
+
+ mdss_dump_vbif_debug_bus(mdss_dbg_xlog.enable_vbif_dbgbus_dump,
+ &mdss_dbg_xlog.nrt_vbif_dbgbus_dump, false);
+ }
+
+ if (dead && mdss_dbg_xlog.panic_on_err)
+ panic(name);
+}
+
+static void xlog_debug_work(struct work_struct *work)
+{
+
+ mdss_xlog_dump_array(mdss_dbg_xlog.blk_arr,
+ ARRAY_SIZE(mdss_dbg_xlog.blk_arr),
+ mdss_dbg_xlog.work_panic, "xlog_workitem",
+ mdss_dbg_xlog.work_dbgbus,
+ mdss_dbg_xlog.work_vbif_dbgbus);
+}
+
+void mdss_xlog_tout_handler_default(bool queue, const char *name, ...)
+{
+ int i, index = 0;
+ bool dead = false;
+ bool dump_dbgbus = false, dump_vbif_dbgbus = false;
+ va_list args;
+ char *blk_name = NULL;
+ struct mdss_debug_base *blk_base = NULL;
+ struct mdss_debug_base **blk_arr;
+ u32 blk_len;
+
+ if (!mdss_xlog_is_enabled(MDSS_XLOG_DEFAULT))
+ return;
+
+ if (queue && work_pending(&mdss_dbg_xlog.xlog_dump_work))
+ return;
+
+ blk_arr = &mdss_dbg_xlog.blk_arr[0];
+ blk_len = ARRAY_SIZE(mdss_dbg_xlog.blk_arr);
+
+ clear_dump_blk_arr(blk_arr, blk_len);
+
+ va_start(args, name);
+ for (i = 0; i < MDSS_XLOG_MAX_DATA; i++) {
+ blk_name = va_arg(args, char*);
+ if (IS_ERR_OR_NULL(blk_name))
+ break;
+
+ blk_base = get_dump_blk_addr(blk_name);
+ if (blk_base && (index < blk_len)) {
+ blk_arr[index] = blk_base;
+ index++;
+ }
+
+ if (!strcmp(blk_name, "dbg_bus"))
+ dump_dbgbus = true;
+
+ if (!strcmp(blk_name, "vbif_dbg_bus"))
+ dump_vbif_dbgbus = true;
+
+ if (!strcmp(blk_name, "panic"))
+ dead = true;
+ }
+ va_end(args);
+
+ if (queue) {
+ /* schedule work to dump later */
+ mdss_dbg_xlog.work_panic = dead;
+ mdss_dbg_xlog.work_dbgbus = dump_dbgbus;
+ mdss_dbg_xlog.work_vbif_dbgbus = dump_vbif_dbgbus;
+ schedule_work(&mdss_dbg_xlog.xlog_dump_work);
+ } else {
+ mdss_xlog_dump_array(blk_arr, blk_len, dead, name, dump_dbgbus,
+ dump_vbif_dbgbus);
+ }
+}
+
+static int mdss_xlog_dump_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t mdss_xlog_dump_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ ssize_t len = 0;
+ char xlog_buf[MDSS_XLOG_BUF_MAX];
+
+ if (__mdss_xlog_dump_calc_range()) {
+ len = mdss_xlog_dump_entry(xlog_buf, MDSS_XLOG_BUF_MAX);
+ if (copy_to_user(buff, xlog_buf, len))
+ return -EFAULT;
+ *ppos += len;
+ }
+
+ return len;
+}
+
+static ssize_t mdss_xlog_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ mdss_dump_reg_all();
+
+ mdss_xlog_dump_all();
+
+ if (mdss_dbg_xlog.panic_on_err)
+ panic("mdss");
+
+ return count;
+}
+
+
+static const struct file_operations mdss_xlog_fops = {
+ .open = mdss_xlog_dump_open,
+ .read = mdss_xlog_dump_read,
+ .write = mdss_xlog_dump_write,
+};
+
+int mdss_create_xlog_debug(struct mdss_debug_data *mdd)
+{
+ int i;
+
+ mdss_dbg_xlog.xlog = debugfs_create_dir("xlog", mdd->root);
+ if (IS_ERR_OR_NULL(mdss_dbg_xlog.xlog)) {
+ pr_err("debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(mdss_dbg_xlog.xlog));
+ mdss_dbg_xlog.xlog = NULL;
+ return -ENODEV;
+ }
+
+ INIT_WORK(&mdss_dbg_xlog.xlog_dump_work, xlog_debug_work);
+ mdss_dbg_xlog.work_panic = false;
+
+ for (i = 0; i < MDSS_XLOG_ENTRY; i++)
+ mdss_dbg_xlog.logs[i].counter = i;
+
+ debugfs_create_file("dump", 0644, mdss_dbg_xlog.xlog, NULL,
+ &mdss_xlog_fops);
+ debugfs_create_u32("enable", 0644, mdss_dbg_xlog.xlog,
+ &mdss_dbg_xlog.xlog_enable);
+ debugfs_create_bool("panic", 0644, mdss_dbg_xlog.xlog,
+ (bool *)&mdss_dbg_xlog.panic_on_err);
+ debugfs_create_u32("reg_dump", 0644, mdss_dbg_xlog.xlog,
+ &mdss_dbg_xlog.enable_reg_dump);
+ debugfs_create_u32("dbgbus_dump", 0644, mdss_dbg_xlog.xlog,
+ &mdss_dbg_xlog.enable_dbgbus_dump);
+ debugfs_create_u32("vbif_dbgbus_dump", 0644, mdss_dbg_xlog.xlog,
+ &mdss_dbg_xlog.enable_vbif_dbgbus_dump);
+
+ mdss_dbg_xlog.xlog_enable = XLOG_DEFAULT_ENABLE;
+ mdss_dbg_xlog.panic_on_err = XLOG_DEFAULT_PANIC;
+ mdss_dbg_xlog.enable_reg_dump = XLOG_DEFAULT_REGDUMP;
+ mdss_dbg_xlog.enable_dbgbus_dump = XLOG_DEFAULT_DBGBUSDUMP;
+ mdss_dbg_xlog.enable_vbif_dbgbus_dump = XLOG_DEFAULT_VBIF_DBGBUSDUMP;
+
+ pr_info("xlog_status: enable:%d, panic:%d, dump:%d\n",
+ mdss_dbg_xlog.xlog_enable, mdss_dbg_xlog.panic_on_err,
+ mdss_dbg_xlog.enable_reg_dump);
+
+ return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
new file mode 100644
index 0000000..d70c1e8
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -0,0 +1,4377 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/leds-qpnp-wled.h>
+#include <linux/clk.h>
+#include <linux/uaccess.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_qos.h>
+#include <linux/mdss_io_util.h>
+
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_dsi.h"
+#include "mdss_debug.h"
+#include "mdss_dsi_phy.h"
+#include "mdss_dba_utils.h"
+
+#define XO_CLK_RATE 19200000
+#define CMDLINE_DSI_CTL_NUM_STRING_LEN 2
+
+/* Master structure to hold all the information about the DSI/panel */
+static struct mdss_dsi_data *mdss_dsi_res;
+
+#define DSI_DISABLE_PC_LATENCY 100
+#define DSI_ENABLE_PC_LATENCY PM_QOS_DEFAULT_VALUE
+
+static struct pm_qos_request mdss_dsi_pm_qos_request;
+
+static void mdss_dsi_pm_qos_add_request(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct irq_info *irq_info;
+
+ if (!ctrl_pdata || !ctrl_pdata->shared_data)
+ return;
+
+ irq_info = ctrl_pdata->dsi_hw->irq_info;
+
+ if (!irq_info)
+ return;
+
+ mutex_lock(&ctrl_pdata->shared_data->pm_qos_lock);
+ if (!ctrl_pdata->shared_data->pm_qos_req_cnt) {
+ pr_debug("%s: add request irq\n", __func__);
+
+ mdss_dsi_pm_qos_request.type = PM_QOS_REQ_AFFINE_IRQ;
+ mdss_dsi_pm_qos_request.irq = irq_info->irq;
+ pm_qos_add_request(&mdss_dsi_pm_qos_request,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ }
+ ctrl_pdata->shared_data->pm_qos_req_cnt++;
+ mutex_unlock(&ctrl_pdata->shared_data->pm_qos_lock);
+}
+
+static void mdss_dsi_pm_qos_remove_request(struct dsi_shared_data *sdata)
+{
+ if (!sdata)
+ return;
+
+ mutex_lock(&sdata->pm_qos_lock);
+ if (sdata->pm_qos_req_cnt) {
+ sdata->pm_qos_req_cnt--;
+ if (!sdata->pm_qos_req_cnt) {
+ pr_debug("%s: remove request", __func__);
+ pm_qos_remove_request(&mdss_dsi_pm_qos_request);
+ }
+ } else {
+ pr_warn("%s: unbalanced pm_qos ref count\n", __func__);
+ }
+ mutex_unlock(&sdata->pm_qos_lock);
+}
+
+static void mdss_dsi_pm_qos_update_request(int val)
+{
+ pr_debug("%s: update request %d", __func__, val);
+ pm_qos_update_request(&mdss_dsi_pm_qos_request, val);
+}
+
+static int mdss_dsi_pinctrl_set_state(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ bool active);
+
+static struct mdss_dsi_ctrl_pdata *mdss_dsi_get_ctrl(u32 ctrl_id)
+{
+ if (ctrl_id >= DSI_CTRL_MAX || !mdss_dsi_res)
+ return NULL;
+
+ return mdss_dsi_res->ctrl_pdata[ctrl_id];
+}
+
+static void mdss_dsi_config_clk_src(struct platform_device *pdev)
+{
+ struct mdss_dsi_data *dsi_res = platform_get_drvdata(pdev);
+ struct dsi_shared_data *sdata = dsi_res->shared_data;
+
+ if (!sdata->ext_byte0_clk || !sdata->ext_pixel0_clk) {
+ pr_debug("%s: DSI-0 ext. clocks not present\n", __func__);
+ return;
+ }
+
+ if (mdss_dsi_is_pll_src_default(sdata)) {
+ /*
+ * Default Mapping:
+ * 1. dual-dsi/single-dsi:
+ * DSI0 <--> PLL0
+ * DSI1 <--> PLL1
+ * 2. split-dsi:
+ * DSI0 <--> PLL0
+ * DSI1 <--> PLL0
+ */
+ sdata->byte0_parent = sdata->ext_byte0_clk;
+ sdata->pixel0_parent = sdata->ext_pixel0_clk;
+
+ if (mdss_dsi_is_hw_config_split(sdata)) {
+ sdata->byte1_parent = sdata->byte0_parent;
+ sdata->pixel1_parent = sdata->pixel0_parent;
+ } else if (sdata->ext_byte1_clk && sdata->ext_pixel1_clk) {
+ sdata->byte1_parent = sdata->ext_byte1_clk;
+ sdata->pixel1_parent = sdata->ext_pixel1_clk;
+ } else {
+ pr_debug("%s: DSI-1 external clocks not present\n",
+ __func__);
+ return;
+ }
+
+ pr_debug("%s: default: DSI0 <--> PLL0, DSI1 <--> %s", __func__,
+ mdss_dsi_is_hw_config_split(sdata) ? "PLL0" : "PLL1");
+ } else {
+ /*
+ * For split-dsi and single-dsi use cases, map the PLL source
+ * based on the pll source configuration. It is possible that
+ * for split-dsi case, the only supported config is to source
+ * the clocks from PLL0. This is not explicitly checked here as
+ * it should have been already enforced when validating the
+ * board configuration.
+ */
+ if (mdss_dsi_is_pll_src_pll0(sdata)) {
+ pr_debug("%s: single source: PLL0", __func__);
+ sdata->byte0_parent = sdata->ext_byte0_clk;
+ sdata->pixel0_parent = sdata->ext_pixel0_clk;
+ } else if (mdss_dsi_is_pll_src_pll1(sdata)) {
+ if (sdata->ext_byte1_clk && sdata->ext_pixel1_clk) {
+ pr_debug("%s: single source: PLL1", __func__);
+ sdata->byte0_parent = sdata->ext_byte1_clk;
+ sdata->pixel0_parent = sdata->ext_pixel1_clk;
+ } else {
+ pr_err("%s: DSI-1 external clocks not present\n",
+ __func__);
+ return;
+ }
+ }
+ sdata->byte1_parent = sdata->byte0_parent;
+ sdata->pixel1_parent = sdata->pixel0_parent;
+ }
+}
+
+static char const *mdss_dsi_get_clk_src(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct dsi_shared_data *sdata;
+
+ if (!ctrl) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return "????";
+ }
+
+ sdata = ctrl->shared_data;
+
+ if (mdss_dsi_is_left_ctrl(ctrl)) {
+ if (sdata->byte0_parent == sdata->ext_byte0_clk)
+ return "PLL0";
+ else
+ return "PLL1";
+ } else {
+ if (sdata->byte1_parent == sdata->ext_byte0_clk)
+ return "PLL0";
+ else
+ return "PLL1";
+ }
+}
+
+static int mdss_dsi_set_clk_src(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int rc;
+ struct dsi_shared_data *sdata;
+ struct clk *byte_parent, *pixel_parent;
+
+ if (!ctrl) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ sdata = ctrl->shared_data;
+
+ if (!ctrl->byte_clk_rcg || !ctrl->pixel_clk_rcg) {
+ pr_debug("%s: set_clk_src not needed\n", __func__);
+ return 0;
+ }
+
+ if (mdss_dsi_is_left_ctrl(ctrl)) {
+ byte_parent = sdata->byte0_parent;
+ pixel_parent = sdata->pixel0_parent;
+ } else {
+ byte_parent = sdata->byte1_parent;
+ pixel_parent = sdata->pixel1_parent;
+ }
+
+ rc = clk_set_parent(ctrl->byte_clk_rcg, byte_parent);
+ if (rc) {
+ pr_err("%s: failed to set parent for byte clk for ctrl%d. rc=%d\n",
+ __func__, ctrl->ndx, rc);
+ goto error;
+ }
+
+ rc = clk_set_parent(ctrl->pixel_clk_rcg, pixel_parent);
+ if (rc) {
+ pr_err("%s: failed to set parent for pixel clk for ctrl%d. rc=%d\n",
+ __func__, ctrl->ndx, rc);
+ goto error;
+ }
+
+ pr_debug("%s: ctrl%d clock source set to %s", __func__, ctrl->ndx,
+ mdss_dsi_get_clk_src(ctrl));
+
+error:
+ return rc;
+}
+
+static int mdss_dsi_regulator_init(struct platform_device *pdev,
+ struct dsi_shared_data *sdata)
+{
+ int rc = 0, i = 0, j = 0;
+
+ if (!pdev || !sdata) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = DSI_CORE_PM; !rc && (i < DSI_MAX_PM); i++) {
+ rc = msm_mdss_config_vreg(&pdev->dev,
+ sdata->power_data[i].vreg_config,
+ sdata->power_data[i].num_vreg, 1);
+ if (rc) {
+ pr_err("%s: failed to init vregs for %s\n",
+ __func__, __mdss_dsi_pm_name(i));
+ for (j = i-1; j >= DSI_CORE_PM; j--) {
+ msm_mdss_config_vreg(&pdev->dev,
+ sdata->power_data[j].vreg_config,
+ sdata->power_data[j].num_vreg, 0);
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int mdss_dsi_panel_power_off(struct mdss_panel_data *pdata)
+{
+ int ret = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ ret = mdss_dsi_panel_reset(pdata, 0);
+ if (ret) {
+ pr_warn("%s: Panel reset failed. rc=%d\n", __func__, ret);
+ ret = 0;
+ }
+
+ if (mdss_dsi_pinctrl_set_state(ctrl_pdata, false))
+ pr_debug("reset disable: pinctrl not enabled\n");
+
+ ret = msm_mdss_enable_vreg(
+ ctrl_pdata->panel_power_data.vreg_config,
+ ctrl_pdata->panel_power_data.num_vreg, 0);
+ if (ret)
+ pr_err("%s: failed to disable vregs for %s\n",
+ __func__, __mdss_dsi_pm_name(DSI_PANEL_PM));
+
+end:
+ return ret;
+}
+
+static int mdss_dsi_panel_power_on(struct mdss_panel_data *pdata)
+{
+ int ret = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ ret = msm_mdss_enable_vreg(
+ ctrl_pdata->panel_power_data.vreg_config,
+ ctrl_pdata->panel_power_data.num_vreg, 1);
+ if (ret) {
+ pr_err("%s: failed to enable vregs for %s\n",
+ __func__, __mdss_dsi_pm_name(DSI_PANEL_PM));
+ return ret;
+ }
+
+ /*
+ * If continuous splash screen feature is enabled, then we need to
+ * request all the GPIOs that have already been configured in the
+ * bootloader. This needs to be done irresepective of whether
+ * the lp11_init flag is set or not.
+ */
+ if (pdata->panel_info.cont_splash_enabled ||
+ !pdata->panel_info.mipi.lp11_init) {
+ if (mdss_dsi_pinctrl_set_state(ctrl_pdata, true))
+ pr_debug("reset enable: pinctrl not enabled\n");
+
+ ret = mdss_dsi_panel_reset(pdata, 1);
+ if (ret)
+ pr_err("%s: Panel reset failed. rc=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+static int mdss_dsi_panel_power_lp(struct mdss_panel_data *pdata, int enable)
+{
+ /* Panel power control when entering/exiting lp mode */
+ return 0;
+}
+
+static int mdss_dsi_panel_power_ulp(struct mdss_panel_data *pdata,
+ int enable)
+{
+ int ret = 0, i;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ u32 mode = enable ? DSS_REG_MODE_ULP : DSS_REG_MODE_ENABLE;
+ struct dsi_shared_data *sdata;
+
+ pr_debug("%s: +\n", __func__);
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ sdata = ctrl_pdata->shared_data;
+
+ for (i = 0; i < DSI_MAX_PM; i++) {
+ /*
+ * Core power module need to be controlled along with
+ * DSI core clocks.
+ */
+ if (i == DSI_CORE_PM)
+ continue;
+ if (i == DSI_PANEL_PM)
+ ret = msm_mdss_config_vreg_opt_mode(
+ ctrl_pdata->panel_power_data.vreg_config,
+ ctrl_pdata->panel_power_data.num_vreg, mode);
+ else
+ ret = msm_mdss_config_vreg_opt_mode(
+ sdata->power_data[i].vreg_config,
+ sdata->power_data[i].num_vreg, mode);
+ if (ret) {
+ pr_err("%s: failed to config ulp opt mode for %s.rc=%d\n",
+ __func__, __mdss_dsi_pm_name(i), ret);
+ break;
+ }
+ }
+
+ if (ret) {
+ mode = enable ? DSS_REG_MODE_ENABLE : DSS_REG_MODE_ULP;
+ for (; i >= 0; i--)
+ msm_mdss_config_vreg_opt_mode(
+ ctrl_pdata->power_data[i].vreg_config,
+ ctrl_pdata->power_data[i].num_vreg, mode);
+ }
+ return ret;
+}
+
+int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata,
+ int power_state)
+{
+ int ret = 0;
+ struct mdss_panel_info *pinfo;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ pinfo = &pdata->panel_info;
+ pr_debug("%pS-->%s: cur_power_state=%d req_power_state=%d\n",
+ __builtin_return_address(0), __func__,
+ pinfo->panel_power_state, power_state);
+
+ if (pinfo->panel_power_state == power_state) {
+ pr_debug("%s: no change needed\n", __func__);
+ return 0;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ /*
+ * If a dynamic mode switch is pending, the regulators should not
+ * be turned off or on.
+ */
+ if (pdata->panel_info.dynamic_switch_pending)
+ return 0;
+
+ switch (power_state) {
+ case MDSS_PANEL_POWER_OFF:
+ ret = mdss_dsi_panel_power_off(pdata);
+ break;
+ case MDSS_PANEL_POWER_ON:
+ if (mdss_dsi_is_panel_on_ulp(pdata)) {
+ ret = mdss_dsi_panel_power_ulp(pdata, false);
+ goto end;
+ } else if (mdss_dsi_is_panel_on_lp(pdata)) {
+ ret = mdss_dsi_panel_power_lp(pdata, false);
+ goto end;
+ } else {
+ ret = mdss_dsi_panel_power_on(pdata);
+ }
+ break;
+ case MDSS_PANEL_POWER_LP1:
+ if (mdss_dsi_is_panel_on_ulp(pdata))
+ ret = mdss_dsi_panel_power_ulp(pdata, false);
+ else
+ ret = mdss_dsi_panel_power_lp(pdata, true);
+ /*
+ * temp workaround until framework issues pertaining to LP2
+ * power state transitions are fixed. For now, we internally
+ * transition to LP2 state whenever core power is turned off
+ * in LP1 state
+ */
+ break;
+ case MDSS_PANEL_POWER_LP2:
+ if (!ctrl_pdata->core_power)
+ ret = mdss_dsi_panel_power_ulp(pdata, true);
+ break;
+ default:
+ pr_err("%s: unknown panel power state requested (%d)\n",
+ __func__, power_state);
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ pinfo->panel_power_state = power_state;
+end:
+ return ret;
+}
+
+static void mdss_dsi_put_dt_vreg_data(struct device *dev,
+ struct mdss_module_power *module_power)
+{
+ if (!module_power) {
+ pr_err("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (module_power->vreg_config) {
+ devm_kfree(dev, module_power->vreg_config);
+ module_power->vreg_config = NULL;
+ }
+ module_power->num_vreg = 0;
+}
+
+static int mdss_dsi_get_dt_vreg_data(struct device *dev,
+ struct device_node *of_node, struct mdss_module_power *mp,
+ enum dsi_pm_type module)
+{
+ int i = 0, rc = 0;
+ u32 tmp = 0;
+ struct device_node *supply_node = NULL;
+ const char *pm_supply_name = NULL;
+ struct device_node *supply_root_node = NULL;
+
+ if (!dev || !mp) {
+ pr_err("%s: invalid input\n", __func__);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ mp->num_vreg = 0;
+ pm_supply_name = __mdss_dsi_pm_supply_node_name(module);
+ supply_root_node = of_get_child_by_name(of_node, pm_supply_name);
+ if (!supply_root_node) {
+ /*
+ * Try to get the root node for panel power supply using
+ * of_parse_phandle() API if of_get_child_by_name() API fails.
+ */
+ supply_root_node = of_parse_phandle(of_node, pm_supply_name, 0);
+ if (!supply_root_node) {
+ pr_err("no supply entry present: %s\n", pm_supply_name);
+ goto novreg;
+ }
+ }
+
+
+ for_each_child_of_node(supply_root_node, supply_node) {
+ mp->num_vreg++;
+ }
+
+ if (mp->num_vreg == 0) {
+ pr_debug("%s: no vreg\n", __func__);
+ goto novreg;
+ } else {
+ pr_debug("%s: vreg found. count=%d\n", __func__, mp->num_vreg);
+ }
+
+ mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) *
+ mp->num_vreg, GFP_KERNEL);
+ if (!mp->vreg_config) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node) {
+ const char *st = NULL;
+ /* vreg-name */
+ rc = of_property_read_string(supply_node,
+ "qcom,supply-name", &st);
+ if (rc) {
+ pr_err("%s: error reading name. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ snprintf(mp->vreg_config[i].vreg_name,
+ ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st);
+ /* vreg-min-voltage */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-min-voltage", &tmp);
+ if (rc) {
+ pr_err("%s: error reading min volt. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ mp->vreg_config[i].min_voltage = tmp;
+
+ /* vreg-max-voltage */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-max-voltage", &tmp);
+ if (rc) {
+ pr_err("%s: error reading max volt. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ mp->vreg_config[i].max_voltage = tmp;
+
+ /* enable-load */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-enable-load", &tmp);
+ if (rc) {
+ pr_err("%s: error reading enable load. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ mp->vreg_config[i].load[DSS_REG_MODE_ENABLE] = tmp;
+
+ /* disable-load */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-disable-load", &tmp);
+ if (rc) {
+ pr_err("%s: error reading disable load. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] = tmp;
+
+ /* ulp-load */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-ulp-load", &tmp);
+ if (rc) {
+ pr_warn("%s: error reading ulp load. rc=%d\n",
+ __func__, rc);
+ rc = 0;
+ }
+ mp->vreg_config[i].load[DSS_REG_MODE_ULP] = (!rc ? tmp :
+ mp->vreg_config[i].load[DSS_REG_MODE_ENABLE]);
+
+ /* pre-sleep */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-on-sleep", &tmp);
+ if (rc) {
+ pr_debug("%s: error reading supply pre sleep value. rc=%d\n",
+ __func__, rc);
+ rc = 0;
+ } else {
+ mp->vreg_config[i].pre_on_sleep = tmp;
+ }
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-off-sleep", &tmp);
+ if (rc) {
+ pr_debug("%s: error reading supply pre sleep value. rc=%d\n",
+ __func__, rc);
+ rc = 0;
+ } else {
+ mp->vreg_config[i].pre_off_sleep = tmp;
+ }
+
+ /* post-sleep */
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-on-sleep", &tmp);
+ if (rc) {
+ pr_debug("%s: error reading supply post sleep value. rc=%d\n",
+ __func__, rc);
+ rc = 0;
+ } else {
+ mp->vreg_config[i].post_on_sleep = tmp;
+ }
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-off-sleep", &tmp);
+ if (rc) {
+ pr_debug("%s: error reading supply post sleep value. rc=%d\n",
+ __func__, rc);
+ rc = 0;
+ } else {
+ mp->vreg_config[i].post_off_sleep = tmp;
+ }
+
+ pr_debug("%s: %s min=%d, max=%d, enable=%d, disable=%d, ulp_load=%d preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+ __func__,
+ mp->vreg_config[i].vreg_name,
+ mp->vreg_config[i].min_voltage,
+ mp->vreg_config[i].max_voltage,
+ mp->vreg_config[i].load[DSS_REG_MODE_ENABLE],
+ mp->vreg_config[i].load[DSS_REG_MODE_DISABLE],
+ mp->vreg_config[i].load[DSS_REG_MODE_ULP],
+ mp->vreg_config[i].pre_on_sleep,
+ mp->vreg_config[i].post_on_sleep,
+ mp->vreg_config[i].pre_off_sleep,
+ mp->vreg_config[i].post_off_sleep
+ );
+ ++i;
+ }
+
+ return rc;
+
+error:
+ if (mp->vreg_config) {
+ devm_kfree(dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ }
+novreg:
+ mp->num_vreg = 0;
+
+ return rc;
+}
+
+static int mdss_dsi_get_panel_cfg(char *panel_cfg,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int rc;
+ struct mdss_panel_cfg *pan_cfg = NULL;
+
+ if (!panel_cfg)
+ return MDSS_PANEL_INTF_INVALID;
+
+ pan_cfg = ctrl->mdss_util->panel_intf_type(MDSS_PANEL_INTF_DSI);
+ if (IS_ERR(pan_cfg)) {
+ return PTR_ERR(pan_cfg);
+ } else if (!pan_cfg) {
+ panel_cfg[0] = 0;
+ return 0;
+ }
+
+ pr_debug("%s:%d: cfg:[%s]\n", __func__, __LINE__,
+ pan_cfg->arg_cfg);
+ rc = strlcpy(panel_cfg, pan_cfg->arg_cfg,
+ sizeof(pan_cfg->arg_cfg));
+ return rc;
+}
+
+struct buf_data {
+ char *buf; /* cmd buf */
+ int blen; /* cmd buf length */
+ char *string_buf; /* cmd buf as string, 3 bytes per number */
+ int sblen; /* string buffer length */
+ int sync_flag;
+ struct mutex dbg_mutex; /* mutex to synchronize read/write/flush */
+};
+
+struct mdss_dsi_debugfs_info {
+ struct dentry *root;
+ struct mdss_dsi_ctrl_pdata ctrl_pdata;
+ struct buf_data on_cmd;
+ struct buf_data off_cmd;
+ u32 override_flag;
+};
+
+static int mdss_dsi_cmd_state_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->private_data = inode->i_private;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t mdss_dsi_cmd_state_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int *link_state = file->private_data;
+ char buffer[32];
+ int blen = 0;
+
+ if (*ppos)
+ return 0;
+
+ if ((*link_state) == DSI_HS_MODE)
+ blen = snprintf(buffer, sizeof(buffer), "dsi_hs_mode\n");
+ else
+ blen = snprintf(buffer, sizeof(buffer), "dsi_lp_mode\n");
+
+ if (blen < 0)
+ return 0;
+
+ if (copy_to_user(buf, buffer, blen))
+ return -EFAULT;
+
+ *ppos += blen;
+ return blen;
+}
+
+static ssize_t mdss_dsi_cmd_state_write(struct file *file,
+ const char __user *p, size_t count, loff_t *ppos)
+{
+ int *link_state = file->private_data;
+ char *input;
+
+ if (!count) {
+ pr_err("%s: Zero bytes to be written\n", __func__);
+ return -EINVAL;
+ }
+
+ input = kmalloc(count, GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ if (copy_from_user(input, p, count)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ input[count-1] = '\0';
+
+ if (strnstr(input, "dsi_hs_mode", strlen("dsi_hs_mode")))
+ *link_state = DSI_HS_MODE;
+ else
+ *link_state = DSI_LP_MODE;
+
+ kfree(input);
+ return count;
+}
+
+static const struct file_operations mdss_dsi_cmd_state_fop = {
+ .open = mdss_dsi_cmd_state_open,
+ .read = mdss_dsi_cmd_state_read,
+ .write = mdss_dsi_cmd_state_write,
+};
+
+static int mdss_dsi_cmd_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->private_data = inode->i_private;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t mdss_dsi_cmd_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct buf_data *pcmds = file->private_data;
+ char *bp;
+ ssize_t ret = 0;
+
+ mutex_lock(&pcmds->dbg_mutex);
+ if (*ppos == 0) {
+ kfree(pcmds->string_buf);
+ pcmds->string_buf = NULL;
+ pcmds->sblen = 0;
+ }
+
+ if (!pcmds->string_buf) {
+ /*
+ * Buffer size is the sum of cmd length (3 bytes per number)
+ * with NULL terminater
+ */
+ int bsize = ((pcmds->blen)*3 + 1);
+ int blen = 0;
+ char *buffer;
+
+ buffer = kmalloc(bsize, GFP_KERNEL);
+ if (!buffer) {
+ mutex_unlock(&pcmds->dbg_mutex);
+ return -ENOMEM;
+ }
+
+ bp = pcmds->buf;
+ while ((blen < (bsize-1)) &&
+ (bp < ((pcmds->buf) + (pcmds->blen)))) {
+ struct dsi_ctrl_hdr dchdr =
+ *((struct dsi_ctrl_hdr *)bp);
+ int dhrlen = sizeof(dchdr), dlen;
+ char *tmp = (char *)(&dchdr);
+
+ dlen = dchdr.dlen;
+ dchdr.dlen = htons(dchdr.dlen);
+ while (dhrlen--)
+ blen += snprintf(buffer+blen, bsize-blen,
+ "%02x ", (*tmp++));
+
+ bp += sizeof(dchdr);
+ while (dlen--)
+ blen += snprintf(buffer+blen, bsize-blen,
+ "%02x ", (*bp++));
+ buffer[blen-1] = '\n';
+ }
+ buffer[blen] = '\0';
+ pcmds->string_buf = buffer;
+ pcmds->sblen = blen;
+ }
+
+ /*
+ * The max value of count is PAGE_SIZE(4096).
+ * It may need multiple times of reading if string buf is too large
+ */
+ if (*ppos >= (pcmds->sblen)) {
+ kfree(pcmds->string_buf);
+ pcmds->string_buf = NULL;
+ pcmds->sblen = 0;
+ mutex_unlock(&pcmds->dbg_mutex);
+ return 0; /* the end */
+ }
+ ret = simple_read_from_buffer(buf, count, ppos, pcmds->string_buf,
+ pcmds->sblen);
+ mutex_unlock(&pcmds->dbg_mutex);
+ return ret;
+}
+
+static ssize_t mdss_dsi_cmd_write(struct file *file, const char __user *p,
+ size_t count, loff_t *ppos)
+{
+ struct buf_data *pcmds = file->private_data;
+ ssize_t ret = 0;
+ int blen = 0;
+ char *string_buf;
+
+ mutex_lock(&pcmds->dbg_mutex);
+ if (*ppos == 0) {
+ kfree(pcmds->string_buf);
+ pcmds->string_buf = NULL;
+ pcmds->sblen = 0;
+ }
+
+ /* Allocate memory for the received string */
+ blen = count + (pcmds->sblen);
+ string_buf = krealloc(pcmds->string_buf, blen + 1, GFP_KERNEL);
+ if (!string_buf) {
+ pr_err("%s: Failed to allocate memory\n", __func__);
+ mutex_unlock(&pcmds->dbg_mutex);
+ return -ENOMEM;
+ }
+
+ /* Writing in batches is possible */
+ ret = simple_write_to_buffer(string_buf, blen, ppos, p, count);
+ if (ret < 0) {
+ pr_err("%s: Failed to copy data\n", __func__);
+ mutex_unlock(&pcmds->dbg_mutex);
+ return -EINVAL;
+ }
+
+ string_buf[ret] = '\0';
+ pcmds->string_buf = string_buf;
+ pcmds->sblen = count;
+ mutex_unlock(&pcmds->dbg_mutex);
+ return ret;
+}
+
+static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id)
+{
+ struct buf_data *pcmds = file->private_data;
+ int blen, len, i;
+ char *buf, *bufp, *bp;
+ struct dsi_ctrl_hdr *dchdr;
+
+ mutex_lock(&pcmds->dbg_mutex);
+
+ if (!pcmds->string_buf) {
+ mutex_unlock(&pcmds->dbg_mutex);
+ return 0;
+ }
+
+ /*
+ * Allocate memory for command buffer
+ * 3 bytes per number, and 2 bytes for the last one
+ */
+ blen = ((pcmds->sblen) + 2) / 3;
+ buf = kcalloc(1, blen, GFP_KERNEL);
+ if (!buf) {
+ pr_err("%s: Failed to allocate memory\n", __func__);
+ kfree(pcmds->string_buf);
+ pcmds->string_buf = NULL;
+ pcmds->sblen = 0;
+ mutex_unlock(&pcmds->dbg_mutex);
+ return -ENOMEM;
+ }
+
+ /* Translate the input string to command array */
+ bufp = pcmds->string_buf;
+ for (i = 0; i < blen; i++) {
+ uint32_t value = 0;
+ int step = 0;
+
+ if (sscanf(bufp, "%02x%n", &value, &step) > 0) {
+ *(buf+i) = (char)value;
+ bufp += step;
+ }
+ }
+
+ /* Scan dcs commands */
+ bp = buf;
+ len = blen;
+ while (len >= sizeof(*dchdr)) {
+ dchdr = (struct dsi_ctrl_hdr *)bp;
+ dchdr->dlen = ntohs(dchdr->dlen);
+ if (dchdr->dlen > len || dchdr->dlen < 0) {
+ pr_err("%s: dtsi cmd=%x error, len=%d\n",
+ __func__, dchdr->dtype, dchdr->dlen);
+ kfree(buf);
+ mutex_unlock(&pcmds->dbg_mutex);
+ return -EINVAL;
+ }
+ bp += sizeof(*dchdr);
+ len -= sizeof(*dchdr);
+ bp += dchdr->dlen;
+ len -= dchdr->dlen;
+ }
+ if (len != 0) {
+ pr_err("%s: dcs_cmd=%x len=%d error!\n", __func__,
+ bp[0], len);
+ kfree(buf);
+ mutex_unlock(&pcmds->dbg_mutex);
+ return -EINVAL;
+ }
+
+ if (pcmds->sync_flag) {
+ pcmds->buf = buf;
+ pcmds->blen = blen;
+ pcmds->sync_flag = 0;
+ } else {
+ kfree(pcmds->buf);
+ pcmds->buf = buf;
+ pcmds->blen = blen;
+ }
+ mutex_unlock(&pcmds->dbg_mutex);
+ return 0;
+}
+
+static const struct file_operations mdss_dsi_cmd_fop = {
+ .open = mdss_dsi_cmd_open,
+ .read = mdss_dsi_cmd_read,
+ .write = mdss_dsi_cmd_write,
+ .flush = mdss_dsi_cmd_flush,
+};
+
+struct dentry *dsi_debugfs_create_dcs_cmd(const char *name, umode_t mode,
+ struct dentry *parent, struct buf_data *cmd,
+ struct dsi_panel_cmds ctrl_cmds)
+{
+ mutex_init(&cmd->dbg_mutex);
+ cmd->buf = ctrl_cmds.buf;
+ cmd->blen = ctrl_cmds.blen;
+ cmd->string_buf = NULL;
+ cmd->sblen = 0;
+ cmd->sync_flag = 1;
+
+ return debugfs_create_file(name, mode, parent,
+ cmd, &mdss_dsi_cmd_fop);
+}
+
+#define DEBUGFS_CREATE_DCS_CMD(name, node, cmd, ctrl_cmd) \
+ dsi_debugfs_create_dcs_cmd(name, 0644, node, cmd, ctrl_cmd)
+
+static int mdss_dsi_debugfs_setup(struct mdss_panel_data *pdata,
+ struct dentry *parent)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata, *dfs_ctrl;
+ struct mdss_dsi_debugfs_info *dfs;
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ dfs = kcalloc(1, sizeof(*dfs), GFP_KERNEL);
+ if (!dfs)
+ return -ENOMEM;
+
+ dfs->root = debugfs_create_dir("dsi_ctrl_pdata", parent);
+ if (IS_ERR_OR_NULL(dfs->root)) {
+ pr_err("%s: debugfs_create_dir dsi fail, error %ld\n",
+ __func__, PTR_ERR(dfs->root));
+ kfree(dfs);
+ return -ENODEV;
+ }
+
+ dfs_ctrl = &dfs->ctrl_pdata;
+ debugfs_create_u32("override_flag", 0644, dfs->root,
+ &dfs->override_flag);
+
+ debugfs_create_bool("cmd_sync_wait_broadcast", 0644, dfs->root,
+ &dfs_ctrl->cmd_sync_wait_broadcast);
+ debugfs_create_bool("cmd_sync_wait_trigger", 0644, dfs->root,
+ &dfs_ctrl->cmd_sync_wait_trigger);
+
+ debugfs_create_file("dsi_on_cmd_state", 0644, dfs->root,
+ &dfs_ctrl->on_cmds.link_state, &mdss_dsi_cmd_state_fop);
+ debugfs_create_file("dsi_off_cmd_state", 0644, dfs->root,
+ &dfs_ctrl->off_cmds.link_state, &mdss_dsi_cmd_state_fop);
+
+ DEBUGFS_CREATE_DCS_CMD("dsi_on_cmd", dfs->root, &dfs->on_cmd,
+ ctrl_pdata->on_cmds);
+ DEBUGFS_CREATE_DCS_CMD("dsi_off_cmd", dfs->root, &dfs->off_cmd,
+ ctrl_pdata->off_cmds);
+
+ debugfs_create_u32("dsi_err_counter", 0644, dfs->root,
+ &dfs_ctrl->err_cont.max_err_index);
+ debugfs_create_u32("dsi_err_time_delta", 0644, dfs->root,
+ &dfs_ctrl->err_cont.err_time_delta);
+
+ dfs->override_flag = 0;
+ dfs->ctrl_pdata = *ctrl_pdata;
+ ctrl_pdata->debugfs_info = dfs;
+ return 0;
+}
+
+static int mdss_dsi_debugfs_init(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int rc;
+ struct mdss_panel_data *pdata;
+ struct mdss_panel_info panel_info;
+
+ if (!ctrl_pdata) {
+ pr_warn_once("%s: Invalid pdata!\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = &ctrl_pdata->panel_data;
+ if (!pdata)
+ return -EINVAL;
+
+ panel_info = pdata->panel_info;
+ rc = mdss_dsi_debugfs_setup(pdata, panel_info.debugfs_info->root);
+ if (rc) {
+ pr_err("%s: Error in initilizing dsi ctrl debugfs\n",
+ __func__);
+ return rc;
+ }
+
+ pr_debug("%s: Initialized mdss_dsi_debugfs_init\n", __func__);
+ return 0;
+}
+
+static void mdss_dsi_debugfs_cleanup(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct mdss_panel_data *pdata = &ctrl_pdata->panel_data;
+
+ do {
+ struct mdss_dsi_ctrl_pdata *ctrl = container_of(pdata,
+ struct mdss_dsi_ctrl_pdata, panel_data);
+ struct mdss_dsi_debugfs_info *dfs = ctrl->debugfs_info;
+
+ if (dfs && dfs->root)
+ debugfs_remove_recursive(dfs->root);
+ kfree(dfs);
+ pdata = pdata->next;
+ } while (pdata);
+ pr_debug("%s: Cleaned up mdss_dsi_debugfs_info\n", __func__);
+}
+
+static int _mdss_dsi_refresh_cmd(struct buf_data *new_cmds,
+ struct dsi_panel_cmds *original_pcmds)
+{
+ char *bp;
+ int len, cnt, i;
+ struct dsi_ctrl_hdr *dchdr;
+ struct dsi_cmd_desc *cmds;
+
+ if (new_cmds->sync_flag)
+ return 0;
+
+ bp = new_cmds->buf;
+ len = new_cmds->blen;
+ cnt = 0;
+ /* Scan dcs commands and get dcs command count */
+ while (len >= sizeof(*dchdr)) {
+ dchdr = (struct dsi_ctrl_hdr *)bp;
+ if (dchdr->dlen > len) {
+ pr_err("%s: dtsi cmd=%x error, len=%d\n",
+ __func__, dchdr->dtype, dchdr->dlen);
+ return -EINVAL;
+ }
+ bp += sizeof(*dchdr) + dchdr->dlen;
+ len -= sizeof(*dchdr) + dchdr->dlen;
+ cnt++;
+ }
+
+ if (len != 0) {
+ pr_err("%s: dcs_cmd=%x len=%d error!\n", __func__,
+ bp[0], len);
+ return -EINVAL;
+ }
+
+ /* Reallocate space for dcs commands */
+ cmds = kcalloc(cnt, sizeof(struct dsi_cmd_desc), GFP_KERNEL);
+ if (!cmds)
+ return -ENOMEM;
+
+ kfree(original_pcmds->buf);
+ kfree(original_pcmds->cmds);
+ original_pcmds->cmd_cnt = cnt;
+ original_pcmds->cmds = cmds;
+ original_pcmds->buf = new_cmds->buf;
+ original_pcmds->blen = new_cmds->blen;
+
+ bp = original_pcmds->buf;
+ len = original_pcmds->blen;
+ for (i = 0; i < cnt; i++) {
+ dchdr = (struct dsi_ctrl_hdr *)bp;
+ len -= sizeof(*dchdr);
+ bp += sizeof(*dchdr);
+ original_pcmds->cmds[i].dchdr = *dchdr;
+ original_pcmds->cmds[i].payload = bp;
+ bp += dchdr->dlen;
+ len -= dchdr->dlen;
+ }
+
+ new_cmds->sync_flag = 1;
+ return 0;
+}
+
+static void mdss_dsi_debugfsinfo_to_dsictrl_info(
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct mdss_dsi_debugfs_info *dfs = ctrl_pdata->debugfs_info;
+ struct dsi_err_container *dfs_err_cont = &dfs->ctrl_pdata.err_cont;
+ struct dsi_err_container *err_cont = &ctrl_pdata->err_cont;
+
+ ctrl_pdata->cmd_sync_wait_broadcast =
+ dfs->ctrl_pdata.cmd_sync_wait_broadcast;
+ ctrl_pdata->cmd_sync_wait_trigger =
+ dfs->ctrl_pdata.cmd_sync_wait_trigger;
+
+ _mdss_dsi_refresh_cmd(&dfs->on_cmd, &ctrl_pdata->on_cmds);
+ _mdss_dsi_refresh_cmd(&dfs->off_cmd, &ctrl_pdata->off_cmds);
+
+ ctrl_pdata->on_cmds.link_state =
+ dfs->ctrl_pdata.on_cmds.link_state;
+ ctrl_pdata->off_cmds.link_state =
+ dfs->ctrl_pdata.off_cmds.link_state;
+
+ /* keep error counter between 2 to 10 */
+ if (dfs_err_cont->max_err_index >= 2 &&
+ dfs_err_cont->max_err_index <= MAX_ERR_INDEX) {
+ err_cont->max_err_index = dfs_err_cont->max_err_index;
+ } else {
+ dfs_err_cont->max_err_index = err_cont->max_err_index;
+ pr_warn("resetting the dsi error counter to %d\n",
+ err_cont->max_err_index);
+ }
+
+ /* keep error duration between 16 ms to 100 seconds */
+ if (dfs_err_cont->err_time_delta >= 16 &&
+ dfs_err_cont->err_time_delta <= 100000) {
+ err_cont->err_time_delta = dfs_err_cont->err_time_delta;
+ } else {
+ dfs_err_cont->err_time_delta = err_cont->err_time_delta;
+ pr_warn("resetting the dsi error time delta to %d ms\n",
+ err_cont->err_time_delta);
+ }
+}
+
+static void mdss_dsi_validate_debugfs_info(
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct mdss_dsi_debugfs_info *dfs = ctrl_pdata->debugfs_info;
+
+ if (dfs->override_flag) {
+ pr_debug("%s: Overriding dsi ctrl_pdata with debugfs data\n",
+ __func__);
+ dfs->override_flag = 0;
+ mdss_dsi_debugfsinfo_to_dsictrl_info(ctrl_pdata);
+ }
+}
+
+static int mdss_dsi_off(struct mdss_panel_data *pdata, int power_state)
+{
+ int ret = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *panel_info = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ panel_info = &ctrl_pdata->panel_data.panel_info;
+
+ pr_debug("%s+: ctrl=%pK ndx=%d power_state=%d\n",
+ __func__, ctrl_pdata, ctrl_pdata->ndx, power_state);
+
+ if (power_state == panel_info->panel_power_state) {
+ pr_debug("%s: No change in power state %d -> %d\n", __func__,
+ panel_info->panel_power_state, power_state);
+ goto end;
+ }
+
+ if (mdss_panel_is_power_on(power_state)) {
+ pr_debug("%s: dsi_off with panel always on\n", __func__);
+ goto panel_power_ctrl;
+ }
+
+ /*
+ * Link clocks should be turned off before PHY can be disabled.
+ * For command mode panels, all clocks are turned off prior to reaching
+ * here, so core clocks should be turned on before accessing hardware
+ * registers. For video mode panel, turn off link clocks and then
+ * disable PHY
+ */
+ if (pdata->panel_info.type == MIPI_CMD_PANEL)
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_ON);
+ else
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_CLK, MDSS_DSI_CLK_OFF);
+
+ if (!pdata->panel_info.ulps_suspend_enabled) {
+ /* disable DSI controller */
+ mdss_dsi_controller_cfg(0, pdata);
+
+ /* disable DSI phy */
+ mdss_dsi_phy_disable(ctrl_pdata);
+ }
+ ctrl_pdata->ctrl_state &= ~CTRL_STATE_DSI_ACTIVE;
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_OFF);
+
+panel_power_ctrl:
+ ret = mdss_dsi_panel_power_ctrl(pdata, power_state);
+ if (ret) {
+ pr_err("%s: Panel power off failed\n", __func__);
+ goto end;
+ }
+
+ if (panel_info->dynamic_fps
+ && (panel_info->dfps_update == DFPS_SUSPEND_RESUME_MODE)
+ && (panel_info->new_fps != panel_info->mipi.frame_rate))
+ panel_info->mipi.frame_rate = panel_info->new_fps;
+
+ /* Initialize Max Packet size for DCS reads */
+ ctrl_pdata->cur_max_pkt_size = 0;
+end:
+ pr_debug("%s-:\n", __func__);
+
+ return ret;
+}
+
+int mdss_dsi_switch_mode(struct mdss_panel_data *pdata, int mode)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mipi_panel_info *pinfo;
+ bool dsi_ctrl_setup_needed = false;
+
+ if (!pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s, start\n", __func__);
+
+ pinfo = &pdata->panel_info.mipi;
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ if ((pinfo->dms_mode != DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE) &&
+ (pinfo->dms_mode != DYNAMIC_MODE_SWITCH_IMMEDIATE)) {
+ pr_debug("%s: Dynamic mode switch not enabled.\n", __func__);
+ return -EPERM;
+ }
+
+ if (mode == MIPI_VIDEO_PANEL) {
+ mode = SWITCH_TO_VIDEO_MODE;
+ } else if (mode == MIPI_CMD_PANEL) {
+ mode = SWITCH_TO_CMD_MODE;
+ } else if (mode == SWITCH_RESOLUTION) {
+ dsi_ctrl_setup_needed = true;
+ pr_debug("Resolution switch mode selected\n");
+ } else {
+ pr_err("Invalid mode selected, mode=%d\n", mode);
+ return -EINVAL;
+ }
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+ if (dsi_ctrl_setup_needed)
+ mdss_dsi_ctrl_setup(ctrl_pdata);
+ ctrl_pdata->switch_mode(pdata, mode);
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+
+ pr_debug("%s, end\n", __func__);
+ return 0;
+}
+
+static int mdss_dsi_reconfig(struct mdss_panel_data *pdata, int mode)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mipi_panel_info *pinfo;
+
+ if (!pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s, start\n", __func__);
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ pinfo = &pdata->panel_info.mipi;
+
+ if (pinfo->dms_mode == DYNAMIC_MODE_SWITCH_IMMEDIATE) {
+ /* reset DSI */
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+ mdss_dsi_sw_reset(ctrl_pdata, true);
+ mdss_dsi_ctrl_setup(ctrl_pdata);
+ mdss_dsi_controller_cfg(true, pdata);
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+ }
+
+ pr_debug("%s, end\n", __func__);
+ return 0;
+}
+static int mdss_dsi_update_panel_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ int mode)
+{
+ int ret = 0;
+ struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+ if (mode == DSI_CMD_MODE) {
+ pinfo->mipi.mode = DSI_CMD_MODE;
+ pinfo->type = MIPI_CMD_PANEL;
+ pinfo->mipi.vsync_enable = 1;
+ pinfo->mipi.hw_vsync_mode = 1;
+ pinfo->partial_update_enabled = pinfo->partial_update_supported;
+ } else { /*video mode*/
+ pinfo->mipi.mode = DSI_VIDEO_MODE;
+ pinfo->type = MIPI_VIDEO_PANEL;
+ pinfo->mipi.vsync_enable = 0;
+ pinfo->mipi.hw_vsync_mode = 0;
+ pinfo->partial_update_enabled = 0;
+ }
+
+ ctrl_pdata->panel_mode = pinfo->mipi.mode;
+ mdss_panel_get_dst_fmt(pinfo->bpp, pinfo->mipi.mode,
+ pinfo->mipi.pixel_packing, &(pinfo->mipi.dst_format));
+ return ret;
+}
+
+int mdss_dsi_on(struct mdss_panel_data *pdata)
+{
+ int ret = 0;
+ struct mdss_panel_info *pinfo;
+ struct mipi_panel_info *mipi;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ int cur_power_state;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ if (ctrl_pdata->debugfs_info)
+ mdss_dsi_validate_debugfs_info(ctrl_pdata);
+
+ cur_power_state = pdata->panel_info.panel_power_state;
+ pr_debug("%s+: ctrl=%pK ndx=%d cur_power_state=%d\n", __func__,
+ ctrl_pdata, ctrl_pdata->ndx, cur_power_state);
+
+ pinfo = &pdata->panel_info;
+ mipi = &pdata->panel_info.mipi;
+
+ if (mdss_dsi_is_panel_on_interactive(pdata)) {
+ /*
+ * all interrupts are disabled at LK
+ * for cont_splash case, intr mask bits need
+ * to be restored to allow dcs command be
+ * sent to panel
+ */
+ mdss_dsi_restore_intr_mask(ctrl_pdata);
+ pr_debug("%s: panel already on\n", __func__);
+ goto end;
+ }
+
+ ret = mdss_dsi_panel_power_ctrl(pdata, MDSS_PANEL_POWER_ON);
+ if (ret) {
+ pr_err("%s:Panel power on failed. rc=%d\n", __func__, ret);
+ goto end;
+ }
+
+ if (mdss_panel_is_power_on(cur_power_state)) {
+ pr_debug("%s: dsi_on from panel low power state\n", __func__);
+ goto end;
+ }
+
+ ret = mdss_dsi_set_clk_src(ctrl_pdata);
+ if (ret) {
+ pr_err("%s: failed to set clk src. rc=%d\n", __func__, ret);
+ goto end;
+ }
+
+ /*
+ * Enable DSI core clocks prior to resetting and initializing DSI
+ * Phy. Phy and ctrl setup need to be done before enabling the link
+ * clocks.
+ */
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_ON);
+
+ /*
+ * If ULPS during suspend feature is enabled, then DSI PHY was
+ * left on during suspend. In this case, we do not need to reset/init
+ * PHY. This would have already been done when the CORE clocks are
+ * turned on. However, if cont splash is disabled, the first time DSI
+ * is powered on, phy init needs to be done unconditionally.
+ */
+ if (!pdata->panel_info.ulps_suspend_enabled || !ctrl_pdata->ulps) {
+ mdss_dsi_phy_sw_reset(ctrl_pdata);
+ mdss_dsi_phy_init(ctrl_pdata);
+ mdss_dsi_ctrl_setup(ctrl_pdata);
+ }
+ ctrl_pdata->ctrl_state |= CTRL_STATE_DSI_ACTIVE;
+
+ /* DSI link clocks need to be on prior to ctrl sw reset */
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_CLK, MDSS_DSI_CLK_ON);
+ mdss_dsi_sw_reset(ctrl_pdata, true);
+
+ /*
+ * Issue hardware reset line after enabling the DSI clocks and data
+ * data lanes for LP11 init
+ */
+ if (mipi->lp11_init) {
+ if (mdss_dsi_pinctrl_set_state(ctrl_pdata, true))
+ pr_debug("reset enable: pinctrl not enabled\n");
+ mdss_dsi_panel_reset(pdata, 1);
+ }
+
+ if (mipi->init_delay)
+ usleep_range(mipi->init_delay, mipi->init_delay + 10);
+
+ if (mipi->force_clk_lane_hs) {
+ u32 tmp;
+
+ tmp = MIPI_INP((ctrl_pdata->ctrl_base) + 0xac);
+ tmp |= (1<<28);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xac, tmp);
+ wmb(); /* ensure write is finished before progressing */
+ }
+
+ if (pdata->panel_info.type == MIPI_CMD_PANEL)
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+
+end:
+ pr_debug("%s-:\n", __func__);
+ return ret;
+}
+
+static int mdss_dsi_pinctrl_set_state(
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ bool active)
+{
+ struct pinctrl_state *pin_state;
+ struct mdss_panel_info *pinfo = NULL;
+ int rc = -EFAULT;
+
+ if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.pinctrl))
+ return PTR_ERR(ctrl_pdata->pin_res.pinctrl);
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+ if ((mdss_dsi_is_right_ctrl(ctrl_pdata) &&
+ mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) ||
+ pinfo->is_dba_panel) {
+ pr_debug("%s:%d, right ctrl pinctrl config not needed\n",
+ __func__, __LINE__);
+ return 0;
+ }
+
+ pin_state = active ? ctrl_pdata->pin_res.gpio_state_active
+ : ctrl_pdata->pin_res.gpio_state_suspend;
+ if (!IS_ERR_OR_NULL(pin_state)) {
+ rc = pinctrl_select_state(ctrl_pdata->pin_res.pinctrl,
+ pin_state);
+ if (rc)
+ pr_err("%s: can not set %s pins\n", __func__,
+ active ? MDSS_PINCTRL_STATE_DEFAULT
+ : MDSS_PINCTRL_STATE_SLEEP);
+ } else {
+ pr_err("%s: invalid '%s' pinstate\n", __func__,
+ active ? MDSS_PINCTRL_STATE_DEFAULT
+ : MDSS_PINCTRL_STATE_SLEEP);
+ }
+ return rc;
+}
+
+static int mdss_dsi_pinctrl_init(struct platform_device *pdev)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata;
+
+ ctrl_pdata = platform_get_drvdata(pdev);
+ ctrl_pdata->pin_res.pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.pinctrl)) {
+ pr_err("%s: failed to get pinctrl\n", __func__);
+ return PTR_ERR(ctrl_pdata->pin_res.pinctrl);
+ }
+
+ ctrl_pdata->pin_res.gpio_state_active
+ = pinctrl_lookup_state(ctrl_pdata->pin_res.pinctrl,
+ MDSS_PINCTRL_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.gpio_state_active))
+ pr_warn("%s: can not get default pinstate\n", __func__);
+
+ ctrl_pdata->pin_res.gpio_state_suspend
+ = pinctrl_lookup_state(ctrl_pdata->pin_res.pinctrl,
+ MDSS_PINCTRL_STATE_SLEEP);
+ if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.gpio_state_suspend))
+ pr_warn("%s: can not get sleep pinstate\n", __func__);
+
+ return 0;
+}
+
+static int mdss_dsi_unblank(struct mdss_panel_data *pdata)
+{
+ int ret = 0;
+ struct mipi_panel_info *mipi;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ mipi = &pdata->panel_info.mipi;
+
+ pr_debug("%s+: ctrl=%pK ndx=%d cur_power_state=%d ctrl_state=%x\n",
+ __func__, ctrl_pdata, ctrl_pdata->ndx,
+ pdata->panel_info.panel_power_state, ctrl_pdata->ctrl_state);
+
+ mdss_dsi_pm_qos_update_request(DSI_DISABLE_PC_LATENCY);
+
+ if (mdss_dsi_is_ctrl_clk_master(ctrl_pdata))
+ sctrl = mdss_dsi_get_ctrl_clk_slave();
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+ if (sctrl)
+ mdss_dsi_clk_ctrl(sctrl, sctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+
+ if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_LP) {
+ pr_debug("%s: dsi_unblank with panel always on\n", __func__);
+ if (ctrl_pdata->low_power_config)
+ ret = ctrl_pdata->low_power_config(pdata, false);
+ if (!ret)
+ ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_LP;
+ goto error;
+ }
+
+ if (!(ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT)) {
+ if (!pdata->panel_info.dynamic_switch_pending) {
+ ATRACE_BEGIN("dsi_panel_on");
+ ret = ctrl_pdata->on(pdata);
+ if (ret) {
+ pr_err("%s: unable to initialize the panel\n",
+ __func__);
+ goto error;
+ }
+ ATRACE_END("dsi_panel_on");
+ }
+ }
+
+ if ((pdata->panel_info.type == MIPI_CMD_PANEL) &&
+ mipi->vsync_enable && mipi->hw_vsync_mode) {
+ mdss_dsi_set_tear_on(ctrl_pdata);
+ }
+
+ ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_INIT;
+
+error:
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+ if (sctrl)
+ mdss_dsi_clk_ctrl(sctrl, sctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+
+ mdss_dsi_pm_qos_update_request(DSI_ENABLE_PC_LATENCY);
+
+ pr_debug("%s-:\n", __func__);
+
+ return ret;
+}
+
+static int mdss_dsi_blank(struct mdss_panel_data *pdata, int power_state)
+{
+ int ret = 0;
+ struct mipi_panel_info *mipi;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ mipi = &pdata->panel_info.mipi;
+
+ pr_debug("%s+: ctrl=%pK ndx=%d power_state=%d\n",
+ __func__, ctrl_pdata, ctrl_pdata->ndx, power_state);
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+
+ if (mdss_panel_is_power_on_lp(power_state)) {
+ pr_debug("%s: low power state requested\n", __func__);
+ if (ctrl_pdata->low_power_config)
+ ret = ctrl_pdata->low_power_config(pdata, true);
+ if (!ret)
+ ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_LP;
+ goto error;
+ }
+
+ if (pdata->panel_info.type == MIPI_VIDEO_PANEL &&
+ ctrl_pdata->off_cmds.link_state == DSI_LP_MODE) {
+ mdss_dsi_sw_reset(ctrl_pdata, false);
+ mdss_dsi_host_init(pdata);
+ }
+
+ mdss_dsi_op_mode_config(DSI_CMD_MODE, pdata);
+
+ if (pdata->panel_info.dynamic_switch_pending) {
+ pr_info("%s: switching to %s mode\n", __func__,
+ (pdata->panel_info.mipi.mode ? "video" : "command"));
+ if (pdata->panel_info.type == MIPI_CMD_PANEL) {
+ ctrl_pdata->switch_mode(pdata, SWITCH_TO_VIDEO_MODE);
+ } else if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+ ctrl_pdata->switch_mode(pdata, SWITCH_TO_CMD_MODE);
+ mdss_dsi_set_tear_off(ctrl_pdata);
+ }
+ }
+
+ if ((pdata->panel_info.type == MIPI_CMD_PANEL) &&
+ mipi->vsync_enable && mipi->hw_vsync_mode) {
+ mdss_dsi_set_tear_off(ctrl_pdata);
+ }
+
+ if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) {
+ if (!pdata->panel_info.dynamic_switch_pending) {
+ ATRACE_BEGIN("dsi_panel_off");
+ ret = ctrl_pdata->off(pdata);
+ if (ret) {
+ pr_err("%s: Panel OFF failed\n", __func__);
+ goto error;
+ }
+ ATRACE_END("dsi_panel_off");
+ }
+ ctrl_pdata->ctrl_state &= ~(CTRL_STATE_PANEL_INIT |
+ CTRL_STATE_PANEL_LP);
+ }
+
+error:
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+ pr_debug("%s-:End\n", __func__);
+ return ret;
+}
+
+static int mdss_dsi_post_panel_on(struct mdss_panel_data *pdata)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pr_debug("%s+: ctrl=%pK ndx=%d\n", __func__,
+ ctrl_pdata, ctrl_pdata->ndx);
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+
+ if (ctrl_pdata->post_panel_on)
+ ctrl_pdata->post_panel_on(pdata);
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+ pr_debug("%s-:\n", __func__);
+
+ return 0;
+}
+
+static irqreturn_t test_hw_vsync_handler(int irq, void *data)
+{
+ struct mdss_panel_data *pdata = (struct mdss_panel_data *)data;
+
+ pr_debug("HW VSYNC\n");
+ MDSS_XLOG(0xaaa, irq);
+ complete_all(&pdata->te_done);
+ if (pdata->next)
+ complete_all(&pdata->next->te_done);
+ return IRQ_HANDLED;
+}
+
+int mdss_dsi_cont_splash_on(struct mdss_panel_data *pdata)
+{
+ int ret = 0;
+ struct mipi_panel_info *mipi;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ pr_info("%s:%d DSI on for continuous splash.\n", __func__, __LINE__);
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ mipi = &pdata->panel_info.mipi;
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pr_debug("%s+: ctrl=%pK ndx=%d\n", __func__,
+ ctrl_pdata, ctrl_pdata->ndx);
+
+ WARN((ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT),
+ "Incorrect Ctrl state=0x%x\n", ctrl_pdata->ctrl_state);
+
+ mdss_dsi_ctrl_setup(ctrl_pdata);
+ mdss_dsi_sw_reset(ctrl_pdata, true);
+ pr_debug("%s-:End\n", __func__);
+ return ret;
+}
+
+static void __mdss_dsi_mask_dfps_errors(struct mdss_dsi_ctrl_pdata *ctrl,
+ bool mask)
+{
+ u32 data = 0;
+
+ /*
+ * Assumption is that the DSI clocks will be enabled
+ * when this API is called from dfps thread
+ */
+ if (mask) {
+ /* mask FIFO underflow and PLL unlock bits */
+ mdss_dsi_set_reg(ctrl, 0x10c, 0x7c000000, 0x7c000000);
+ } else {
+ data = MIPI_INP((ctrl->ctrl_base) + 0x0120);
+ if (data & BIT(16)) {
+ pr_debug("pll unlocked: 0x%x\n", data);
+ /* clear PLL unlock bit */
+ MIPI_OUTP((ctrl->ctrl_base) + 0x120, BIT(16));
+ }
+
+ data = MIPI_INP((ctrl->ctrl_base) + 0x00c);
+ if (data & 0x88880000) {
+ pr_debug("dsi fifo underflow: 0x%x\n", data);
+ /* clear DSI FIFO underflow and empty */
+ MIPI_OUTP((ctrl->ctrl_base) + 0x00c, 0x99990000);
+ }
+
+ /* restore FIFO underflow and PLL unlock bits */
+ mdss_dsi_set_reg(ctrl, 0x10c, 0x7c000000, 0x0);
+ }
+}
+
+static void __mdss_dsi_update_video_mode_total(struct mdss_panel_data *pdata,
+ int new_fps)
+{
+ u32 hsync_period, vsync_period;
+ u32 new_dsi_v_total, current_dsi_v_total;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s Invalid pdata\n", __func__);
+ return;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ if (ctrl_pdata == NULL) {
+ pr_err("%s Invalid ctrl_pdata\n", __func__);
+ return;
+ }
+
+ vsync_period =
+ mdss_panel_get_vtotal(&pdata->panel_info);
+ hsync_period =
+ mdss_panel_get_htotal(&pdata->panel_info, true);
+ current_dsi_v_total =
+ MIPI_INP((ctrl_pdata->ctrl_base) + 0x2C);
+ new_dsi_v_total =
+ ((vsync_period - 1) << 16) | (hsync_period - 1);
+
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
+ (current_dsi_v_total | 0x8000000));
+ if (new_dsi_v_total & 0x8000000) {
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
+ new_dsi_v_total);
+ } else {
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
+ (new_dsi_v_total | 0x8000000));
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
+ (new_dsi_v_total & 0x7ffffff));
+ }
+
+ if (ctrl_pdata->timing_db_mode)
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e4, 0x1);
+
+ pr_debug("%s new_fps:%d vsync:%d hsync:%d frame_rate:%d\n",
+ __func__, new_fps, vsync_period, hsync_period,
+ ctrl_pdata->panel_data.panel_info.mipi.frame_rate);
+
+ ctrl_pdata->panel_data.panel_info.current_fps = new_fps;
+ MDSS_XLOG(current_dsi_v_total, new_dsi_v_total, new_fps,
+ ctrl_pdata->timing_db_mode);
+
+}
+
+static void __mdss_dsi_dyn_refresh_config(
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int reg_data = 0;
+ u32 phy_rev = ctrl_pdata->shared_data->phy_rev;
+
+ /* configure only for master control in split display */
+ if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+ mdss_dsi_is_ctrl_clk_slave(ctrl_pdata))
+ return;
+
+ switch (phy_rev) {
+ case DSI_PHY_REV_10:
+ reg_data = MIPI_INP((ctrl_pdata->ctrl_base) +
+ DSI_DYNAMIC_REFRESH_CTRL);
+ reg_data &= ~BIT(12);
+ MIPI_OUTP((ctrl_pdata->ctrl_base)
+ + DSI_DYNAMIC_REFRESH_CTRL, reg_data);
+ break;
+ case DSI_PHY_REV_20:
+ reg_data = BIT(13);
+ MIPI_OUTP((ctrl_pdata->ctrl_base)
+ + DSI_DYNAMIC_REFRESH_CTRL, reg_data);
+ break;
+ default:
+ pr_err("Phy rev %d unsupported\n", phy_rev);
+ break;
+ }
+
+ pr_debug("Dynamic fps ctrl = 0x%x\n", reg_data);
+}
+
+static void __mdss_dsi_calc_dfps_delay(struct mdss_panel_data *pdata)
+{
+ u32 esc_clk_rate = XO_CLK_RATE;
+ u32 pipe_delay, pipe_delay2 = 0, pll_delay;
+ u32 hsync_period = 0;
+ u32 pclk_to_esc_ratio, byte_to_esc_ratio, hr_bit_to_esc_ratio;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo = NULL;
+ struct mdss_dsi_phy_ctrl *pd = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s Invalid pdata\n", __func__);
+ return;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ if (ctrl_pdata == NULL) {
+ pr_err("%s Invalid ctrl_pdata\n", __func__);
+ return;
+ }
+
+ if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+ mdss_dsi_is_ctrl_clk_slave(ctrl_pdata))
+ return;
+
+ pinfo = &pdata->panel_info;
+ pd = &(pinfo->mipi.dsi_phy_db);
+
+ pclk_to_esc_ratio = (ctrl_pdata->pclk_rate / esc_clk_rate);
+ byte_to_esc_ratio = (ctrl_pdata->byte_clk_rate / esc_clk_rate);
+ hr_bit_to_esc_ratio = ((ctrl_pdata->byte_clk_rate * 4) / esc_clk_rate);
+
+ hsync_period = mdss_panel_get_htotal(pinfo, true);
+ pipe_delay = (hsync_period + 1) / pclk_to_esc_ratio;
+ if (pinfo->mipi.eof_bllp_power_stop == 0)
+ pipe_delay += (17 / pclk_to_esc_ratio) +
+ ((21 + (pinfo->mipi.t_clk_pre + 1) +
+ (pinfo->mipi.t_clk_post + 1)) /
+ byte_to_esc_ratio) +
+ ((((pd->timing[8] >> 1) + 1) +
+ ((pd->timing[6] >> 1) + 1) +
+ ((pd->timing[3] * 4) + (pd->timing[5] >> 1) + 1) +
+ ((pd->timing[7] >> 1) + 1) +
+ ((pd->timing[1] >> 1) + 1) +
+ ((pd->timing[4] >> 1) + 1)) / hr_bit_to_esc_ratio);
+
+ if (pinfo->mipi.force_clk_lane_hs)
+ pipe_delay2 = (6 / byte_to_esc_ratio) +
+ ((((pd->timing[1] >> 1) + 1) +
+ ((pd->timing[4] >> 1) + 1)) / hr_bit_to_esc_ratio);
+
+ /* 130 us pll delay recommended by h/w doc */
+ pll_delay = ((130 * esc_clk_rate) / 1000000) * 2;
+
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_PIPE_DELAY,
+ pipe_delay);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_PIPE_DELAY2,
+ pipe_delay2);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_PLL_DELAY,
+ pll_delay);
+}
+
+static int __mdss_dsi_dfps_calc_clks(struct mdss_panel_data *pdata,
+ int new_fps)
+{
+ int rc = 0;
+ u64 clk_rate;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo;
+ u32 phy_rev;
+
+ if (pdata == NULL) {
+ pr_err("%s Invalid pdata\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ if (ctrl_pdata == NULL) {
+ pr_err("%s Invalid ctrl_pdata\n", __func__);
+ return -EINVAL;
+ }
+
+ pinfo = &pdata->panel_info;
+ phy_rev = ctrl_pdata->shared_data->phy_rev;
+
+ rc = mdss_dsi_clk_div_config
+ (&ctrl_pdata->panel_data.panel_info, new_fps);
+ if (rc) {
+ pr_err("%s: unable to initialize the clk dividers\n",
+ __func__);
+ return rc;
+ }
+
+ __mdss_dsi_dyn_refresh_config(ctrl_pdata);
+
+ if (phy_rev == DSI_PHY_REV_20)
+ mdss_dsi_dfps_config_8996(ctrl_pdata);
+
+ __mdss_dsi_calc_dfps_delay(pdata);
+
+ /* take a backup of current clk rates */
+ ctrl_pdata->pclk_rate_bkp = ctrl_pdata->pclk_rate;
+ ctrl_pdata->byte_clk_rate_bkp = ctrl_pdata->byte_clk_rate;
+
+ ctrl_pdata->pclk_rate = pinfo->mipi.dsi_pclk_rate;
+ clk_rate = pinfo->clk_rate;
+ do_div(clk_rate, 8U);
+ ctrl_pdata->byte_clk_rate = (u32) clk_rate;
+
+ pr_debug("byte_rate=%i\n", ctrl_pdata->byte_clk_rate);
+ pr_debug("pclk_rate=%i\n", ctrl_pdata->pclk_rate);
+
+ return rc;
+}
+
+static int __mdss_dsi_dfps_update_clks(struct mdss_panel_data *pdata,
+ int new_fps)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_dsi_ctrl_pdata *sctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo, *spinfo = NULL;
+ int rc = 0;
+
+ if (pdata == NULL) {
+ pr_err("%s Invalid pdata\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ if (IS_ERR_OR_NULL(ctrl_pdata)) {
+ pr_err("Invalid sctrl_pdata = %lu\n", PTR_ERR(ctrl_pdata));
+ return PTR_ERR(ctrl_pdata);
+ }
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+
+ /*
+ * In split display case, configure and enable dynamic refresh
+ * register only after both the ctrl data is programmed. So,
+ * ignore enabling dynamic refresh for the master control and
+ * configure only when it is slave control.
+ */
+ if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+ mdss_dsi_is_ctrl_clk_master(ctrl_pdata))
+ return 0;
+
+ if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+ mdss_dsi_is_ctrl_clk_slave(ctrl_pdata)) {
+ sctrl_pdata = ctrl_pdata;
+ spinfo = pinfo;
+ ctrl_pdata = mdss_dsi_get_ctrl_clk_master();
+ if (IS_ERR_OR_NULL(ctrl_pdata)) {
+ pr_err("Invalid ctrl_pdata = %lu\n",
+ PTR_ERR(ctrl_pdata));
+ return PTR_ERR(ctrl_pdata);
+ }
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+ }
+
+ /*
+ * For programming dynamic refresh registers, we need to change
+ * the parent to shadow clocks for the software byte and pixel mux.
+ * After switching to shadow clocks, if there is no ref count on
+ * main byte and pixel clocks, clock driver may shutdown those
+ * unreferenced byte and pixel clocks. Hence add an extra reference
+ * count to avoid shutting down the main byte and pixel clocks.
+ */
+ rc = clk_prepare_enable(ctrl_pdata->pll_byte_clk);
+ if (rc) {
+ pr_err("Unable to add extra refcnt for byte clock\n");
+ goto error_byte;
+ }
+
+ rc = clk_prepare_enable(ctrl_pdata->pll_pixel_clk);
+ if (rc) {
+ pr_err("Unable to add extra refcnt for pixel clock\n");
+ goto error_pixel;
+ }
+
+ /* change the parent to shadow clocks*/
+ rc = clk_set_parent(ctrl_pdata->mux_byte_clk,
+ ctrl_pdata->shadow_byte_clk);
+ if (rc) {
+ pr_err("Unable to set parent to shadow byte clock\n");
+ goto error_shadow_byte;
+ }
+
+ rc = clk_set_parent(ctrl_pdata->mux_pixel_clk,
+ ctrl_pdata->shadow_pixel_clk);
+ if (rc) {
+ pr_err("Unable to set parent to shadow pixel clock\n");
+ goto error_shadow_pixel;
+ }
+
+ rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_BYTE_CLK, ctrl_pdata->byte_clk_rate, 0);
+ if (rc) {
+ pr_err("%s: dsi_byte_clk - clk_set_rate failed\n",
+ __func__);
+ goto error_byte_link;
+ }
+
+ rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_PIX_CLK, ctrl_pdata->pclk_rate, 0);
+ if (rc) {
+ pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n",
+ __func__);
+ goto error_pixel_link;
+ }
+
+ if (sctrl_pdata) {
+ rc = mdss_dsi_clk_set_link_rate(sctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_BYTE_CLK, sctrl_pdata->byte_clk_rate, 0);
+ if (rc) {
+ pr_err("%s: slv dsi_byte_clk - clk_set_rate failed\n",
+ __func__);
+ goto error_sbyte_link;
+ }
+
+ rc = mdss_dsi_clk_set_link_rate(sctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_PIX_CLK, sctrl_pdata->pclk_rate, 0);
+ if (rc) {
+ pr_err("%s: slv dsi_pixel_clk - clk_set_rate failed\n",
+ __func__);
+ goto error_spixel_link;
+ }
+ }
+
+ rc = mdss_dsi_en_wait4dynamic_done(ctrl_pdata);
+ if (rc < 0) {
+ pr_err("Unsuccessful dynamic fps change");
+ goto dfps_timeout;
+ }
+
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_CTRL, 0x00);
+ if (sctrl_pdata)
+ MIPI_OUTP((sctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_CTRL,
+ 0x00);
+
+ rc = mdss_dsi_phy_pll_reset_status(ctrl_pdata);
+ if (rc) {
+ pr_err("%s: pll cannot be locked reset core ready failed %d\n",
+ __func__, rc);
+ goto dfps_timeout;
+ }
+
+ __mdss_dsi_mask_dfps_errors(ctrl_pdata, false);
+ if (sctrl_pdata)
+ __mdss_dsi_mask_dfps_errors(sctrl_pdata, false);
+
+ /* Move the mux clocks to main byte and pixel clocks */
+ rc = clk_set_parent(ctrl_pdata->mux_byte_clk,
+ ctrl_pdata->pll_byte_clk);
+ if (rc)
+ pr_err("Unable to set parent back to main byte clock\n");
+
+ rc = clk_set_parent(ctrl_pdata->mux_pixel_clk,
+ ctrl_pdata->pll_pixel_clk);
+ if (rc)
+ pr_err("Unable to set parent back to main pixel clock\n");
+
+ /* Remove extra ref count on parent clocks */
+ clk_disable_unprepare(ctrl_pdata->pll_byte_clk);
+ clk_disable_unprepare(ctrl_pdata->pll_pixel_clk);
+
+ /* update new fps that at this point is already updated in hw */
+ pinfo->current_fps = new_fps;
+ if (sctrl_pdata)
+ spinfo->current_fps = new_fps;
+
+ return rc;
+
+dfps_timeout:
+ if (sctrl_pdata)
+ mdss_dsi_clk_set_link_rate(sctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_PIX_CLK,
+ sctrl_pdata->pclk_rate_bkp, 0);
+error_spixel_link:
+ if (sctrl_pdata)
+ mdss_dsi_clk_set_link_rate(sctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_BYTE_CLK,
+ sctrl_pdata->byte_clk_rate_bkp, 0);
+error_sbyte_link:
+ mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_PIX_CLK, ctrl_pdata->pclk_rate_bkp, 0);
+error_pixel_link:
+ mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_BYTE_CLK, ctrl_pdata->byte_clk_rate_bkp, 0);
+error_byte_link:
+ clk_set_parent(ctrl_pdata->mux_pixel_clk, ctrl_pdata->pll_pixel_clk);
+error_shadow_pixel:
+ clk_set_parent(ctrl_pdata->mux_byte_clk, ctrl_pdata->pll_byte_clk);
+error_shadow_byte:
+ clk_disable_unprepare(ctrl_pdata->pll_pixel_clk);
+error_pixel:
+ clk_disable_unprepare(ctrl_pdata->pll_byte_clk);
+error_byte:
+ return rc;
+}
+
+static int mdss_dsi_check_params(struct mdss_dsi_ctrl_pdata *ctrl, void *arg)
+{
+ struct mdss_panel_info *var_pinfo, *pinfo;
+ int rc = 0;
+
+ if (!ctrl || !arg)
+ return 0;
+
+ pinfo = &ctrl->panel_data.panel_info;
+ if (!pinfo->is_pluggable)
+ return 0;
+
+ var_pinfo = (struct mdss_panel_info *)arg;
+
+ pr_debug("%s: reconfig xres: %d yres: %d, current xres: %d yres: %d\n",
+ __func__, var_pinfo->xres, var_pinfo->yres,
+ pinfo->xres, pinfo->yres);
+ if ((var_pinfo->xres != pinfo->xres) ||
+ (var_pinfo->yres != pinfo->yres) ||
+ (var_pinfo->lcdc.h_back_porch != pinfo->lcdc.h_back_porch) ||
+ (var_pinfo->lcdc.h_front_porch != pinfo->lcdc.h_front_porch) ||
+ (var_pinfo->lcdc.h_pulse_width != pinfo->lcdc.h_pulse_width) ||
+ (var_pinfo->lcdc.v_back_porch != pinfo->lcdc.v_back_porch) ||
+ (var_pinfo->lcdc.v_front_porch != pinfo->lcdc.v_front_porch) ||
+ (var_pinfo->lcdc.v_pulse_width != pinfo->lcdc.v_pulse_width)
+ )
+ rc = 1;
+
+ return rc;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_update_params(struct mdss_dsi_ctrl_pdata *ctrl, void *arg)
+{
+ struct mdss_panel_info *pinfo;
+
+ if (!ctrl || !arg)
+ return;
+
+ pinfo = &ctrl->panel_data.panel_info;
+ mdss_dba_update_lane_cfg(pinfo);
+}
+#else
+static void mdss_dsi_update_params(struct mdss_dsi_ctrl_pdata *ctrl, void *arg)
+{
+}
+#endif
+
+static int mdss_dsi_dfps_config(struct mdss_panel_data *pdata, int new_fps)
+{
+ int rc = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo;
+ u32 phy_rev;
+ u32 frame_rate_bkp;
+
+ pr_debug("%s+:\n", __func__);
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ if (!ctrl_pdata->panel_data.panel_info.dynamic_fps) {
+ pr_err("Dynamic fps not enabled for this panel\n");
+ return -EINVAL;
+ }
+
+ phy_rev = ctrl_pdata->shared_data->phy_rev;
+ pinfo = &pdata->panel_info;
+
+ /* get the fps configured in HW */
+ frame_rate_bkp = pinfo->current_fps;
+
+ if (new_fps == pinfo->current_fps) {
+ /*
+ * This is unlikely as mdss driver checks for previously
+ * configured frame rate.
+ */
+ pr_debug("Panel is already at this FPS\n");
+ goto end_update;
+ }
+
+ if (pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP ||
+ pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) {
+ /* Porch method */
+ __mdss_dsi_update_video_mode_total(pdata, new_fps);
+ } else if (pinfo->dfps_update == DFPS_IMMEDIATE_CLK_UPDATE_MODE) {
+ /* Clock update method */
+
+ __mdss_dsi_mask_dfps_errors(ctrl_pdata, true);
+
+ if (phy_rev == DSI_PHY_REV_20) {
+ rc = mdss_dsi_phy_calc_timing_param(pinfo, phy_rev,
+ new_fps);
+ if (rc) {
+ pr_err("PHY calculations failed-%d\n", new_fps);
+ goto end_update;
+ }
+ }
+
+ rc = __mdss_dsi_dfps_calc_clks(pdata, new_fps);
+ if (rc) {
+ pr_err("error calculating clocks for %d\n", new_fps);
+ goto error_clks;
+ }
+
+ rc = __mdss_dsi_dfps_update_clks(pdata, new_fps);
+ if (rc) {
+ pr_err("Dynamic refresh failed-%d\n", new_fps);
+ goto error_dfps;
+ }
+ }
+
+ return rc;
+error_dfps:
+ if (__mdss_dsi_dfps_calc_clks(pdata, frame_rate_bkp))
+ pr_err("error reverting clock calculations for %d\n",
+ frame_rate_bkp);
+error_clks:
+ if (mdss_dsi_phy_calc_timing_param(pinfo, phy_rev, frame_rate_bkp))
+ pr_err("Unable to revert phy timing-%d\n", frame_rate_bkp);
+end_update:
+ return rc;
+}
+
+static int mdss_dsi_ctl_partial_roi(struct mdss_panel_data *pdata)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ int rc = -EINVAL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!pdata->panel_info.partial_update_enabled)
+ return 0;
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ if (ctrl_pdata->set_col_page_addr)
+ rc = ctrl_pdata->set_col_page_addr(pdata, false);
+
+ return rc;
+}
+
+static int mdss_dsi_set_stream_size(struct mdss_panel_data *pdata)
+{
+ u32 stream_ctrl, stream_total, idle;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo;
+ struct dsc_desc *dsc = NULL;
+ struct mdss_rect *roi;
+ struct panel_horizontal_idle *pidle;
+ int i;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pinfo = &pdata->panel_info;
+
+ if (!pinfo->partial_update_supported)
+ return -EINVAL;
+
+ if (pinfo->compression_mode == COMPRESSION_DSC)
+ dsc = &pinfo->dsc;
+
+ roi = &pinfo->roi;
+
+ /* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
+ if (dsc) {
+ u16 byte_num = dsc->bytes_per_pkt;
+
+ if (pinfo->mipi.insert_dcs_cmd)
+ byte_num++;
+
+ stream_ctrl = (byte_num << 16) | (pinfo->mipi.vc << 8) |
+ DTYPE_DCS_LWRITE;
+ stream_total = dsc->pic_height << 16 | dsc->pclk_per_line;
+ } else {
+
+ stream_ctrl = (((roi->w * 3) + 1) << 16) |
+ (pdata->panel_info.mipi.vc << 8) | DTYPE_DCS_LWRITE;
+ stream_total = roi->h << 16 | roi->w;
+ }
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x60, stream_ctrl);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x58, stream_ctrl);
+
+ /* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x64, stream_total);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x5C, stream_total);
+
+ /* set idle control -- dsi clk cycle */
+ idle = 0;
+ pidle = ctrl_pdata->line_idle;
+ for (i = 0; i < ctrl_pdata->horizontal_idle_cnt; i++) {
+ if (roi->w > pidle->min && roi->w <= pidle->max) {
+ idle = pidle->idle;
+ pr_debug("%s: ndx=%d w=%d range=%d-%d idle=%d\n",
+ __func__, ctrl_pdata->ndx, roi->w,
+ pidle->min, pidle->max, pidle->idle);
+ break;
+ }
+ pidle++;
+ }
+
+ if (idle)
+ idle |= BIT(12); /* enable */
+
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x194, idle);
+
+ if (dsc)
+ mdss_dsi_dsc_config(ctrl_pdata, dsc);
+
+ return 0;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_dba_work(struct work_struct *work)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct delayed_work *dw = to_delayed_work(work);
+ struct mdss_dba_utils_init_data utils_init_data;
+ struct mdss_panel_info *pinfo;
+
+ ctrl_pdata = container_of(dw, struct mdss_dsi_ctrl_pdata, dba_work);
+ if (!ctrl_pdata) {
+ pr_err("%s: invalid ctrl data\n", __func__);
+ return;
+ }
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+ if (!pinfo) {
+ pr_err("%s: invalid ctrl data\n", __func__);
+ return;
+ }
+
+ memset(&utils_init_data, 0, sizeof(utils_init_data));
+
+ utils_init_data.chip_name = ctrl_pdata->bridge_name;
+ utils_init_data.client_name = "dsi";
+ utils_init_data.instance_id = ctrl_pdata->bridge_index;
+ utils_init_data.fb_node = ctrl_pdata->fb_node;
+ utils_init_data.kobj = ctrl_pdata->kobj;
+ utils_init_data.pinfo = pinfo;
+ if (ctrl_pdata->mdss_util)
+ utils_init_data.cont_splash_enabled =
+ ctrl_pdata->mdss_util->panel_intf_status(
+ ctrl_pdata->panel_data.panel_info.pdest,
+ MDSS_PANEL_INTF_DSI) ? true : false;
+ else
+ utils_init_data.cont_splash_enabled = false;
+
+ pinfo->dba_data = mdss_dba_utils_init(&utils_init_data);
+
+ if (!IS_ERR_OR_NULL(pinfo->dba_data)) {
+ ctrl_pdata->ds_registered = true;
+ } else {
+ pr_debug("%s: dba device not ready, queue again\n", __func__);
+ queue_delayed_work(ctrl_pdata->workq,
+ &ctrl_pdata->dba_work, HZ);
+ }
+}
+#else
+static void mdss_dsi_dba_work(struct work_struct *work)
+{
+ (void)(*work);
+}
+#endif
+static int mdss_dsi_reset_write_ptr(struct mdss_panel_data *pdata)
+{
+
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo;
+ int rc = 0;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+ /* Need to reset the DSI core since the pixel stream was stopped. */
+ mdss_dsi_sw_reset(ctrl_pdata, true);
+
+ /*
+ * Reset the partial update co-ordinates to the panel height and
+ * width
+ */
+ if (pinfo->dcs_cmd_by_left && (ctrl_pdata->ndx == 1))
+ goto skip_cmd_send;
+
+ pinfo->roi.x = 0;
+ pinfo->roi.y = 0;
+ pinfo->roi.w = pinfo->xres;
+ if (pinfo->dcs_cmd_by_left)
+ pinfo->roi.w = pinfo->xres;
+ if (pdata->next)
+ pinfo->roi.w += pdata->next->panel_info.xres;
+ pinfo->roi.h = pinfo->yres;
+
+ mdss_dsi_set_stream_size(pdata);
+
+ if (ctrl_pdata->set_col_page_addr)
+ rc = ctrl_pdata->set_col_page_addr(pdata, true);
+
+skip_cmd_send:
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+
+ pr_debug("%s: DSI%d write ptr reset finished\n", __func__,
+ ctrl_pdata->ndx);
+
+ return rc;
+}
+
+int mdss_dsi_register_recovery_handler(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_intf_recovery *recovery)
+{
+ mutex_lock(&ctrl->mutex);
+ ctrl->recovery = recovery;
+ mutex_unlock(&ctrl->mutex);
+ return 0;
+}
+
+static int mdss_dsi_register_mdp_callback(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_intf_recovery *mdp_callback)
+{
+ mutex_lock(&ctrl->mutex);
+ ctrl->mdp_callback = mdp_callback;
+ mutex_unlock(&ctrl->mutex);
+ return 0;
+}
+
+static struct device_node *mdss_dsi_get_fb_node_cb(struct platform_device *pdev)
+{
+ struct device_node *fb_node;
+ struct platform_device *dsi_dev;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata;
+
+ if (pdev == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return NULL;
+ }
+
+ ctrl_pdata = platform_get_drvdata(pdev);
+ dsi_dev = of_find_device_by_node(pdev->dev.of_node->parent);
+ if (!dsi_dev) {
+ pr_err("Unable to find dsi master device: %s\n",
+ pdev->dev.of_node->full_name);
+ return NULL;
+ }
+
+ fb_node = of_parse_phandle(dsi_dev->dev.of_node,
+ mdss_dsi_get_fb_name(ctrl_pdata), 0);
+ if (!fb_node) {
+ pr_err("Unable to find fb node for device: %s\n", pdev->name);
+ return NULL;
+ }
+
+ return fb_node;
+}
+
+static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct fb_info *fbi;
+ int power_state;
+ u32 mode;
+ struct mdss_panel_info *pinfo;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+ pinfo = &pdata->panel_info;
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ pr_debug("%s+: ctrl=%d event=%d\n", __func__, ctrl_pdata->ndx, event);
+
+ MDSS_XLOG(event, arg, ctrl_pdata->ndx, 0x3333);
+
+ switch (event) {
+ case MDSS_EVENT_UPDATE_PARAMS:
+ pr_debug("%s:Entered Case MDSS_EVENT_UPDATE_PARAMS\n",
+ __func__);
+ mdss_dsi_update_params(ctrl_pdata, arg);
+ break;
+ case MDSS_EVENT_CHECK_PARAMS:
+ pr_debug("%s:Entered Case MDSS_EVENT_CHECK_PARAMS\n", __func__);
+ if (mdss_dsi_check_params(ctrl_pdata, arg)) {
+ ctrl_pdata->update_phy_timing = true;
+ /*
+ * Call to MDSS_EVENT_CHECK_PARAMS expects
+ * the return value of 1, if there is a change
+ * in panel timing parameters.
+ */
+ rc = 1;
+ }
+ ctrl_pdata->refresh_clk_rate = true;
+ break;
+ case MDSS_EVENT_LINK_READY:
+ if (ctrl_pdata->refresh_clk_rate)
+ rc = mdss_dsi_clk_refresh(pdata,
+ ctrl_pdata->update_phy_timing);
+
+ rc = mdss_dsi_on(pdata);
+ mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode,
+ pdata);
+ break;
+ case MDSS_EVENT_UNBLANK:
+ if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE)
+ rc = mdss_dsi_unblank(pdata);
+ break;
+ case MDSS_EVENT_POST_PANEL_ON:
+ rc = mdss_dsi_post_panel_on(pdata);
+ break;
+ case MDSS_EVENT_PANEL_ON:
+ ctrl_pdata->ctrl_state |= CTRL_STATE_MDP_ACTIVE;
+ if (ctrl_pdata->on_cmds.link_state == DSI_HS_MODE)
+ rc = mdss_dsi_unblank(pdata);
+ pdata->panel_info.esd_rdy = true;
+ break;
+ case MDSS_EVENT_BLANK:
+ power_state = (int) (unsigned long) arg;
+ if (ctrl_pdata->off_cmds.link_state == DSI_HS_MODE)
+ rc = mdss_dsi_blank(pdata, power_state);
+ break;
+ case MDSS_EVENT_PANEL_OFF:
+ power_state = (int) (unsigned long) arg;
+ disable_esd_thread();
+ ctrl_pdata->ctrl_state &= ~CTRL_STATE_MDP_ACTIVE;
+ if (ctrl_pdata->off_cmds.link_state == DSI_LP_MODE)
+ rc = mdss_dsi_blank(pdata, power_state);
+ rc = mdss_dsi_off(pdata, power_state);
+ break;
+ case MDSS_EVENT_CONT_SPLASH_FINISH:
+ if (ctrl_pdata->off_cmds.link_state == DSI_LP_MODE)
+ rc = mdss_dsi_blank(pdata, MDSS_PANEL_POWER_OFF);
+ ctrl_pdata->ctrl_state &= ~CTRL_STATE_MDP_ACTIVE;
+ rc = mdss_dsi_cont_splash_on(pdata);
+ break;
+ case MDSS_EVENT_PANEL_CLK_CTRL:
+ mdss_dsi_clk_req(ctrl_pdata,
+ (struct dsi_panel_clk_ctrl *) arg);
+ break;
+ case MDSS_EVENT_DSI_CMDLIST_KOFF:
+ mdss_dsi_cmdlist_commit(ctrl_pdata, 1);
+ break;
+ case MDSS_EVENT_PANEL_UPDATE_FPS:
+ if (arg != NULL) {
+ rc = mdss_dsi_dfps_config(pdata,
+ (int) (unsigned long) arg);
+ if (rc)
+ pr_err("unable to change fps-%d, error-%d\n",
+ (int) (unsigned long) arg, rc);
+ else
+ pr_debug("panel frame rate changed to %d\n",
+ (int) (unsigned long) arg);
+ }
+ break;
+ case MDSS_EVENT_CONT_SPLASH_BEGIN:
+ if (ctrl_pdata->off_cmds.link_state == DSI_HS_MODE) {
+ /* Panel is Enabled in Bootloader */
+ rc = mdss_dsi_blank(pdata, MDSS_PANEL_POWER_OFF);
+ }
+ break;
+ case MDSS_EVENT_DSC_PPS_SEND:
+ if (pinfo->compression_mode == COMPRESSION_DSC)
+ mdss_dsi_panel_dsc_pps_send(ctrl_pdata, pinfo);
+ break;
+ case MDSS_EVENT_ENABLE_PARTIAL_ROI:
+ rc = mdss_dsi_ctl_partial_roi(pdata);
+ break;
+ case MDSS_EVENT_DSI_RESET_WRITE_PTR:
+ rc = mdss_dsi_reset_write_ptr(pdata);
+ break;
+ case MDSS_EVENT_DSI_STREAM_SIZE:
+ rc = mdss_dsi_set_stream_size(pdata);
+ break;
+ case MDSS_EVENT_DSI_UPDATE_PANEL_DATA:
+ rc = mdss_dsi_update_panel_config(ctrl_pdata,
+ (int)(unsigned long) arg);
+ break;
+ case MDSS_EVENT_REGISTER_RECOVERY_HANDLER:
+ rc = mdss_dsi_register_recovery_handler(ctrl_pdata,
+ (struct mdss_intf_recovery *)arg);
+ break;
+ case MDSS_EVENT_REGISTER_MDP_CALLBACK:
+ rc = mdss_dsi_register_mdp_callback(ctrl_pdata,
+ (struct mdss_intf_recovery *)arg);
+ break;
+ case MDSS_EVENT_DSI_DYNAMIC_SWITCH:
+ mode = (u32)(unsigned long) arg;
+ mdss_dsi_switch_mode(pdata, mode);
+ break;
+ case MDSS_EVENT_DSI_RECONFIG_CMD:
+ mode = (u32)(unsigned long) arg;
+ rc = mdss_dsi_reconfig(pdata, mode);
+ break;
+ case MDSS_EVENT_DSI_PANEL_STATUS:
+ if (ctrl_pdata->check_status)
+ rc = ctrl_pdata->check_status(ctrl_pdata);
+ else
+ rc = true;
+ break;
+ case MDSS_EVENT_PANEL_TIMING_SWITCH:
+ rc = mdss_dsi_panel_timing_switch(ctrl_pdata, arg);
+ break;
+ case MDSS_EVENT_FB_REGISTERED:
+ mdss_dsi_debugfs_init(ctrl_pdata);
+
+ fbi = (struct fb_info *)arg;
+ if (!fbi || !fbi->dev)
+ break;
+
+ ctrl_pdata->kobj = &fbi->dev->kobj;
+ ctrl_pdata->fb_node = fbi->node;
+
+ if (IS_ENABLED(CONFIG_MSM_DBA) &&
+ pdata->panel_info.is_dba_panel) {
+ queue_delayed_work(ctrl_pdata->workq,
+ &ctrl_pdata->dba_work, HZ);
+ }
+ break;
+ default:
+ pr_debug("%s: unhandled event=%d\n", __func__, event);
+ break;
+ }
+ pr_debug("%s-:event=%d, rc=%d\n", __func__, event, rc);
+ return rc;
+}
+
+static int mdss_dsi_set_override_cfg(char *override_cfg,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata, char *panel_cfg)
+{
+ struct mdss_panel_info *pinfo = &ctrl_pdata->panel_data.panel_info;
+ char *token = NULL;
+
+ pr_debug("%s: override config:%s\n", __func__, override_cfg);
+ while ((token = strsep(&override_cfg, ":"))) {
+ if (!strcmp(token, OVERRIDE_CFG)) {
+ continue;
+ } else if (!strcmp(token, SIM_HW_TE_PANEL)) {
+ pinfo->sim_panel_mode = SIM_HW_TE_MODE;
+ } else if (!strcmp(token, SIM_SW_TE_PANEL)) {
+ pinfo->sim_panel_mode = SIM_SW_TE_MODE;
+ } else if (!strcmp(token, SIM_PANEL)) {
+ pinfo->sim_panel_mode = SIM_MODE;
+ } else {
+ pr_err("%s: invalid override_cfg token: %s\n",
+ __func__, token);
+ return -EINVAL;
+ }
+ }
+ pr_debug("%s:sim_panel_mode:%d\n", __func__, pinfo->sim_panel_mode);
+
+ return 0;
+}
+
+static struct device_node *mdss_dsi_pref_prim_panel(
+ struct platform_device *pdev)
+{
+ struct device_node *dsi_pan_node = NULL;
+
+ pr_debug("%s:%d: Select primary panel from dt\n",
+ __func__, __LINE__);
+ dsi_pan_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,dsi-pref-prim-pan", 0);
+ if (!dsi_pan_node)
+ pr_err("%s:can't find panel phandle\n", __func__);
+
+ return dsi_pan_node;
+}
+
+/**
+ * mdss_dsi_find_panel_of_node(): find device node of dsi panel
+ * @pdev: platform_device of the dsi ctrl node
+ * @panel_cfg: string containing intf specific config data
+ *
+ * Function finds the panel device node using the interface
+ * specific configuration data. This configuration data is
+ * could be derived from the result of bootloader's GCDB
+ * panel detection mechanism. If such config data doesn't
+ * exist then this panel returns the default panel configured
+ * in the device tree.
+ *
+ * returns pointer to panel node on success, NULL on error.
+ */
+static struct device_node *mdss_dsi_find_panel_of_node(
+ struct platform_device *pdev, char *panel_cfg)
+{
+ int len, i = 0;
+ int ctrl_id = pdev->id - 1;
+ char panel_name[MDSS_MAX_PANEL_LEN] = "";
+ char ctrl_id_stream[3] = "0:";
+ char *str1 = NULL, *str2 = NULL, *override_cfg = NULL;
+ char cfg_np_name[MDSS_MAX_PANEL_LEN] = "";
+ struct device_node *dsi_pan_node = NULL, *mdss_node = NULL;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = platform_get_drvdata(pdev);
+ struct mdss_panel_info *pinfo = &ctrl_pdata->panel_data.panel_info;
+
+ len = strlen(panel_cfg);
+ ctrl_pdata->panel_data.dsc_cfg_np_name[0] = '\0';
+ if (!len) {
+ /* no panel cfg chg, parse dt */
+ pr_debug("%s:%d: no cmd line cfg present\n",
+ __func__, __LINE__);
+ goto end;
+ } else {
+ /* check if any override parameters are set */
+ pinfo->sim_panel_mode = 0;
+ override_cfg = strnstr(panel_cfg, "#" OVERRIDE_CFG, len);
+ if (override_cfg) {
+ *override_cfg = '\0';
+ if (mdss_dsi_set_override_cfg(override_cfg + 1,
+ ctrl_pdata, panel_cfg))
+ return NULL;
+ len = strlen(panel_cfg);
+ }
+
+ if (ctrl_id == 1)
+ strlcpy(ctrl_id_stream, "1:", 3);
+
+ /* get controller number */
+ str1 = strnstr(panel_cfg, ctrl_id_stream, len);
+ if (!str1) {
+ pr_err("%s: controller %s is not present in %s\n",
+ __func__, ctrl_id_stream, panel_cfg);
+ goto end;
+ }
+ if ((str1 != panel_cfg) && (*(str1-1) != ':')) {
+ str1 += CMDLINE_DSI_CTL_NUM_STRING_LEN;
+ pr_debug("false match with config node name in \"%s\". search again in \"%s\"\n",
+ panel_cfg, str1);
+ str1 = strnstr(str1, ctrl_id_stream, len);
+ if (!str1) {
+ pr_err("%s: 2. controller %s is not present in %s\n",
+ __func__, ctrl_id_stream, str1);
+ goto end;
+ }
+ }
+ str1 += CMDLINE_DSI_CTL_NUM_STRING_LEN;
+
+ /* get panel name */
+ str2 = strnchr(str1, strlen(str1), ':');
+ if (!str2) {
+ strlcpy(panel_name, str1, MDSS_MAX_PANEL_LEN);
+ } else {
+ for (i = 0; (str1 + i) < str2; i++)
+ panel_name[i] = *(str1 + i);
+ panel_name[i] = 0;
+ }
+ pr_info("%s: cmdline:%s panel_name:%s\n",
+ __func__, panel_cfg, panel_name);
+ if (!strcmp(panel_name, NONE_PANEL))
+ goto exit;
+
+ mdss_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,mdss-mdp", 0);
+ if (!mdss_node) {
+ pr_err("%s: %d: mdss_node null\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+ dsi_pan_node = of_find_node_by_name(mdss_node, panel_name);
+ if (!dsi_pan_node) {
+ pr_err("%s: invalid pan node \"%s\"\n",
+ __func__, panel_name);
+ goto end;
+ } else {
+ /* extract config node name if present */
+ str1 += i;
+ str2 = strnstr(str1, "config", strlen(str1));
+ if (str2) {
+ str1 = strnchr(str2, strlen(str2), ':');
+ if (str1) {
+ for (i = 0; ((str2 + i) < str1) &&
+ i < (MDSS_MAX_PANEL_LEN - 1); i++)
+ cfg_np_name[i] = *(str2 + i);
+ if ((i >= 0)
+ && (i < MDSS_MAX_PANEL_LEN))
+ cfg_np_name[i] = 0;
+ } else {
+ strlcpy(cfg_np_name, str2,
+ MDSS_MAX_PANEL_LEN);
+ }
+ strlcpy(ctrl_pdata->panel_data.dsc_cfg_np_name,
+ cfg_np_name, MDSS_MAX_PANEL_LEN);
+ }
+ }
+
+ return dsi_pan_node;
+ }
+end:
+ if (strcmp(panel_name, NONE_PANEL))
+ dsi_pan_node = mdss_dsi_pref_prim_panel(pdev);
+exit:
+ return dsi_pan_node;
+}
+
+static struct device_node *mdss_dsi_config_panel(struct platform_device *pdev,
+ int ndx)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = platform_get_drvdata(pdev);
+ char panel_cfg[MDSS_MAX_PANEL_LEN];
+ struct device_node *dsi_pan_node = NULL;
+ int rc = 0;
+
+ if (!ctrl_pdata) {
+ pr_err("%s: Unable to get the ctrl_pdata\n", __func__);
+ return NULL;
+ }
+
+ /* DSI panels can be different between controllers */
+ rc = mdss_dsi_get_panel_cfg(panel_cfg, ctrl_pdata);
+ if (!rc)
+ /* dsi panel cfg not present */
+ pr_warn("%s:%d:dsi specific cfg not present\n",
+ __func__, __LINE__);
+
+ /* find panel device node */
+ dsi_pan_node = mdss_dsi_find_panel_of_node(pdev, panel_cfg);
+ if (!dsi_pan_node) {
+ pr_err("%s: can't find panel node %s\n", __func__, panel_cfg);
+ of_node_put(dsi_pan_node);
+ return NULL;
+ }
+
+ rc = mdss_dsi_panel_init(dsi_pan_node, ctrl_pdata, ndx);
+ if (rc) {
+ pr_err("%s: dsi panel init failed\n", __func__);
+ of_node_put(dsi_pan_node);
+ return NULL;
+ }
+
+ return dsi_pan_node;
+}
+
+static int mdss_dsi_ctrl_clock_init(struct platform_device *ctrl_pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_info info;
+ struct mdss_dsi_clk_client client1 = {"dsi_clk_client"};
+ struct mdss_dsi_clk_client client2 = {"mdp_event_client"};
+ void *handle;
+
+ if (mdss_dsi_link_clk_init(ctrl_pdev, ctrl_pdata)) {
+ pr_err("%s: unable to initialize Dsi ctrl clks\n", __func__);
+ return -EPERM;
+ }
+
+ memset(&info, 0x0, sizeof(info));
+
+ info.core_clks.mdp_core_clk = ctrl_pdata->shared_data->mdp_core_clk;
+ info.core_clks.ahb_clk = ctrl_pdata->shared_data->ahb_clk;
+ info.core_clks.axi_clk = ctrl_pdata->shared_data->axi_clk;
+ info.core_clks.mmss_misc_ahb_clk =
+ ctrl_pdata->shared_data->mmss_misc_ahb_clk;
+
+ info.link_clks.esc_clk = ctrl_pdata->esc_clk;
+ info.link_clks.byte_clk = ctrl_pdata->byte_clk;
+ info.link_clks.pixel_clk = ctrl_pdata->pixel_clk;
+
+ info.pre_clkoff_cb = mdss_dsi_pre_clkoff_cb;
+ info.post_clkon_cb = mdss_dsi_post_clkon_cb;
+ info.pre_clkon_cb = mdss_dsi_pre_clkon_cb;
+ info.post_clkoff_cb = mdss_dsi_post_clkoff_cb;
+ info.priv_data = ctrl_pdata;
+ snprintf(info.name, DSI_CLK_NAME_LEN, "DSI%d", ctrl_pdata->ndx);
+ ctrl_pdata->clk_mngr = mdss_dsi_clk_init(&info);
+ if (IS_ERR_OR_NULL(ctrl_pdata->clk_mngr)) {
+ rc = PTR_ERR(ctrl_pdata->clk_mngr);
+ ctrl_pdata->clk_mngr = NULL;
+ pr_err("dsi clock registration failed, rc = %d\n", rc);
+ goto error_link_clk_deinit;
+ }
+
+ /*
+ * There are two clients that control dsi clocks. MDP driver controls
+ * the clock through MDSS_PANEL_EVENT_CLK_CTRL event and dsi driver
+ * through clock interface. To differentiate between the votes from the
+ * two clients, dsi driver will use two different handles to vote for
+ * clock states from dsi and mdp driver.
+ */
+ handle = mdss_dsi_clk_register(ctrl_pdata->clk_mngr, &client1);
+ if (IS_ERR_OR_NULL(handle)) {
+ rc = PTR_ERR(handle);
+ pr_err("failed to register %s client, rc = %d\n",
+ client1.client_name, rc);
+ goto error_clk_deinit;
+ } else {
+ ctrl_pdata->dsi_clk_handle = handle;
+ }
+
+ handle = mdss_dsi_clk_register(ctrl_pdata->clk_mngr, &client2);
+ if (IS_ERR_OR_NULL(handle)) {
+ rc = PTR_ERR(handle);
+ pr_err("failed to register %s client, rc = %d\n",
+ client2.client_name, rc);
+ goto error_clk_client_deregister;
+ } else {
+ ctrl_pdata->mdp_clk_handle = handle;
+ }
+
+ return rc;
+error_clk_client_deregister:
+ mdss_dsi_clk_deregister(ctrl_pdata->dsi_clk_handle);
+error_clk_deinit:
+ mdss_dsi_clk_deinit(ctrl_pdata->clk_mngr);
+error_link_clk_deinit:
+ mdss_dsi_link_clk_deinit(&ctrl_pdev->dev, ctrl_pdata);
+ return rc;
+}
+
+static int mdss_dsi_set_clk_rates(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int rc = 0;
+
+ rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_BYTE_CLK,
+ ctrl_pdata->byte_clk_rate,
+ MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON);
+ if (rc) {
+ pr_err("%s: dsi_byte_clk - clk_set_rate failed\n",
+ __func__);
+ return rc;
+ }
+
+ rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_PIX_CLK,
+ ctrl_pdata->pclk_rate,
+ MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON);
+ if (rc) {
+ pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n",
+ __func__);
+ return rc;
+ }
+
+ rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_ESC_CLK,
+ 19200000,
+ MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON);
+ if (rc) {
+ pr_err("%s: dsi_esc_clk - clk_set_rate failed\n",
+ __func__);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int mdss_dsi_cont_splash_config(struct mdss_panel_info *pinfo,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ void *clk_handle;
+ int rc = 0;
+
+ if (pinfo->cont_splash_enabled) {
+ rc = mdss_dsi_panel_power_ctrl(&(ctrl_pdata->panel_data),
+ MDSS_PANEL_POWER_ON);
+ if (rc) {
+ pr_err("%s: Panel power on failed\n", __func__);
+ return rc;
+ }
+ if (ctrl_pdata->bklt_ctrl == BL_PWM)
+ mdss_dsi_panel_pwm_enable(ctrl_pdata);
+ ctrl_pdata->ctrl_state |= (CTRL_STATE_PANEL_INIT |
+ CTRL_STATE_MDP_ACTIVE | CTRL_STATE_DSI_ACTIVE);
+
+ /*
+ * MDP client removes this extra vote during splash reconfigure
+ * for command mode panel from interface. DSI removes the vote
+ * during suspend-resume for video mode panel.
+ */
+ if (ctrl_pdata->panel_data.panel_info.type == MIPI_CMD_PANEL)
+ clk_handle = ctrl_pdata->mdp_clk_handle;
+ else
+ clk_handle = ctrl_pdata->dsi_clk_handle;
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+ mdss_dsi_read_hw_revision(ctrl_pdata);
+ mdss_dsi_read_phy_revision(ctrl_pdata);
+ ctrl_pdata->is_phyreg_enabled = 1;
+ if (pinfo->type == MIPI_CMD_PANEL)
+ mdss_dsi_set_burst_mode(ctrl_pdata);
+ } else {
+ /* Turn on the clocks to read the DSI and PHY revision */
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_ON);
+ mdss_dsi_read_hw_revision(ctrl_pdata);
+ mdss_dsi_read_phy_revision(ctrl_pdata);
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_OFF);
+ pinfo->panel_power_state = MDSS_PANEL_POWER_OFF;
+ }
+
+ return rc;
+}
+
+static int mdss_dsi_get_bridge_chip_params(struct mdss_panel_info *pinfo,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 temp_val = 0;
+
+ if (!ctrl_pdata || !pdev || !pinfo) {
+ pr_err("%s: Invalid Params ctrl_pdata=%pK, pdev=%pK\n",
+ __func__, ctrl_pdata, pdev);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (pinfo->is_dba_panel) {
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,bridge-index", &temp_val);
+ if (rc) {
+ pr_err("%s:%d Unable to read qcom,bridge-index, ret=%d\n",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+ pr_debug("%s: DT property %s is %X\n", __func__,
+ "qcom,bridge-index", temp_val);
+ ctrl_pdata->bridge_index = temp_val;
+ }
+end:
+ return rc;
+}
+
+static int mdss_dsi_ctrl_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 index;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo = NULL;
+ struct device_node *dsi_pan_node = NULL;
+ const char *ctrl_name;
+ struct mdss_util_intf *util;
+ static int te_irq_registered;
+ struct mdss_panel_data *pdata;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("%s: pdev not found for DSI controller\n", __func__);
+ return -ENODEV;
+ }
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &index);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Cell-index not specified, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (index == 0)
+ pdev->id = 1;
+ else
+ pdev->id = 2;
+
+ ctrl_pdata = mdss_dsi_get_ctrl(index);
+ if (!ctrl_pdata) {
+ pr_err("%s: Unable to get the ctrl_pdata\n", __func__);
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, ctrl_pdata);
+
+ util = mdss_get_util_intf();
+ if (util == NULL) {
+ pr_err("Failed to get mdss utility functions\n");
+ return -ENODEV;
+ }
+
+ ctrl_pdata->mdss_util = util;
+ atomic_set(&ctrl_pdata->te_irq_ready, 0);
+
+ ctrl_name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!ctrl_name)
+ pr_info("%s:%d, DSI Ctrl name not specified\n",
+ __func__, __LINE__);
+ else
+ pr_info("%s: DSI Ctrl name = %s\n",
+ __func__, ctrl_name);
+
+ rc = mdss_dsi_pinctrl_init(pdev);
+ if (rc)
+ pr_warn("%s: failed to get pin resources\n", __func__);
+
+ if (index == 0) {
+ ctrl_pdata->panel_data.panel_info.pdest = DISPLAY_1;
+ ctrl_pdata->ndx = DSI_CTRL_0;
+ } else {
+ ctrl_pdata->panel_data.panel_info.pdest = DISPLAY_2;
+ ctrl_pdata->ndx = DSI_CTRL_1;
+ }
+
+ if (mdss_dsi_ctrl_clock_init(pdev, ctrl_pdata)) {
+ pr_err("%s: unable to initialize dsi clk manager\n", __func__);
+ return -EPERM;
+ }
+
+ dsi_pan_node = mdss_dsi_config_panel(pdev, index);
+ if (!dsi_pan_node) {
+ pr_err("%s: panel configuration failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) ||
+ (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+ (ctrl_pdata->panel_data.panel_info.pdest == DISPLAY_1))) {
+ rc = mdss_panel_parse_bl_settings(dsi_pan_node, ctrl_pdata);
+ if (rc) {
+ pr_warn("%s: dsi bl settings parse failed\n", __func__);
+ /* Panels like AMOLED and dsi2hdmi chip
+ * does not need backlight control.
+ * So we should not fail probe here.
+ */
+ ctrl_pdata->bklt_ctrl = UNKNOWN_CTRL;
+ }
+ } else {
+ ctrl_pdata->bklt_ctrl = UNKNOWN_CTRL;
+ }
+
+ rc = dsi_panel_device_register(pdev, dsi_pan_node, ctrl_pdata);
+ if (rc) {
+ pr_err("%s: dsi panel dev reg failed\n", __func__);
+ goto error_pan_node;
+ }
+
+ pinfo = &(ctrl_pdata->panel_data.panel_info);
+ if (!(mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+ mdss_dsi_is_ctrl_clk_slave(ctrl_pdata)) &&
+ pinfo->dynamic_fps) {
+ rc = mdss_dsi_shadow_clk_init(pdev, ctrl_pdata);
+
+ if (rc) {
+ pr_err("%s: unable to initialize shadow ctrl clks\n",
+ __func__);
+ rc = -EPERM;
+ }
+ }
+
+ rc = mdss_dsi_set_clk_rates(ctrl_pdata);
+ if (rc) {
+ pr_err("%s: Failed to set dsi clk rates\n", __func__);
+ return rc;
+ }
+
+ rc = mdss_dsi_cont_splash_config(pinfo, ctrl_pdata);
+ if (rc) {
+ pr_err("%s: Failed to set dsi splash config\n", __func__);
+ return rc;
+ }
+
+ if (mdss_dsi_is_te_based_esd(ctrl_pdata)) {
+ init_completion(&ctrl_pdata->te_irq_comp);
+ rc = devm_request_irq(&pdev->dev,
+ gpio_to_irq(ctrl_pdata->disp_te_gpio),
+ hw_vsync_handler, IRQF_TRIGGER_FALLING,
+ "VSYNC_GPIO", ctrl_pdata);
+ if (rc) {
+ pr_err("%s: TE request_irq failed for ESD\n", __func__);
+ goto error_shadow_clk_deinit;
+ }
+ te_irq_registered = 1;
+ disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+ }
+
+ pdata = &ctrl_pdata->panel_data;
+ init_completion(&pdata->te_done);
+ if (pdata->panel_info.type == MIPI_CMD_PANEL) {
+ if (!te_irq_registered) {
+ rc = devm_request_irq(&pdev->dev,
+ gpio_to_irq(pdata->panel_te_gpio),
+ test_hw_vsync_handler, IRQF_TRIGGER_FALLING,
+ "VSYNC_GPIO", &ctrl_pdata->panel_data);
+ if (rc) {
+ pr_err("%s: TE request_irq failed\n", __func__);
+ goto error_shadow_clk_deinit;
+ }
+ te_irq_registered = 1;
+ disable_irq_nosync(gpio_to_irq(pdata->panel_te_gpio));
+ }
+ }
+
+ rc = mdss_dsi_get_bridge_chip_params(pinfo, ctrl_pdata, pdev);
+ if (rc) {
+ pr_err("%s: Failed to get bridge params\n", __func__);
+ goto error_shadow_clk_deinit;
+ }
+
+ ctrl_pdata->workq = create_workqueue("mdss_dsi_dba");
+ if (!ctrl_pdata->workq) {
+ pr_err("%s: Error creating workqueue\n", __func__);
+ rc = -EPERM;
+ goto error_pan_node;
+ }
+
+ INIT_DELAYED_WORK(&ctrl_pdata->dba_work, mdss_dsi_dba_work);
+
+ pr_info("%s: Dsi Ctrl->%d initialized, DSI rev:0x%x, PHY rev:0x%x\n",
+ __func__, index, ctrl_pdata->shared_data->hw_rev,
+ ctrl_pdata->shared_data->phy_rev);
+ mdss_dsi_pm_qos_add_request(ctrl_pdata);
+
+ if (index == 0)
+ ctrl_pdata->shared_data->dsi0_active = true;
+ else
+ ctrl_pdata->shared_data->dsi1_active = true;
+
+ return 0;
+
+error_shadow_clk_deinit:
+ mdss_dsi_shadow_clk_deinit(&pdev->dev, ctrl_pdata);
+error_pan_node:
+ mdss_dsi_unregister_bl_settings(ctrl_pdata);
+ of_node_put(dsi_pan_node);
+ return rc;
+}
+
+static int mdss_dsi_bus_scale_init(struct platform_device *pdev,
+ struct dsi_shared_data *sdata)
+{
+ int rc = 0;
+
+ sdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (IS_ERR_OR_NULL(sdata->bus_scale_table)) {
+ rc = PTR_ERR(sdata->bus_scale_table);
+ pr_err("%s: msm_bus_cl_get_pdata() failed, rc=%d\n", __func__,
+ rc);
+ return rc;
+ sdata->bus_scale_table = NULL;
+ }
+
+ sdata->bus_handle =
+ msm_bus_scale_register_client(sdata->bus_scale_table);
+
+ if (!sdata->bus_handle) {
+ rc = -EINVAL;
+ pr_err("%sbus_client register failed\n", __func__);
+ }
+
+ return rc;
+}
+
+static void mdss_dsi_bus_scale_deinit(struct dsi_shared_data *sdata)
+{
+ if (sdata->bus_handle) {
+ if (sdata->bus_refcount)
+ msm_bus_scale_client_update_request(sdata->bus_handle,
+ 0);
+
+ sdata->bus_refcount = 0;
+ msm_bus_scale_unregister_client(sdata->bus_handle);
+ sdata->bus_handle = 0;
+ }
+}
+
+static int mdss_dsi_parse_dt_params(struct platform_device *pdev,
+ struct dsi_shared_data *sdata)
+{
+ int rc = 0;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mmss-ulp-clamp-ctrl-offset",
+ &sdata->ulps_clamp_ctrl_off);
+ if (!rc) {
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mmss-phyreset-ctrl-offset",
+ &sdata->ulps_phyrst_ctrl_off);
+ }
+
+ sdata->cmd_clk_ln_recovery_en =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,dsi-clk-ln-recovery");
+
+ return 0;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_res_deinit_hdmi(struct platform_device *pdev, int val)
+{
+ struct mdss_dsi_data *dsi_res = platform_get_drvdata(pdev);
+
+ if (dsi_res->ctrl_pdata[val]->ds_registered) {
+ struct mdss_panel_info *pinfo =
+ &dsi_res->ctrl_pdata[val]->
+ panel_data.panel_info;
+ if (pinfo)
+ mdss_dba_utils_deinit(pinfo->dba_data);
+ }
+}
+#else
+static void mdss_dsi_res_deinit_hdmi(struct platform_device *pdev, int val)
+{
+ (void)(*pdev);
+ (void)(val);
+}
+#endif
+
+static void mdss_dsi_res_deinit(struct platform_device *pdev)
+{
+ int i;
+ struct mdss_dsi_data *dsi_res = platform_get_drvdata(pdev);
+ struct dsi_shared_data *sdata;
+
+ if (!dsi_res) {
+ pr_err("%s: DSI root device drvdata not found\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < DSI_CTRL_MAX; i++) {
+ if (dsi_res->ctrl_pdata[i]) {
+ mdss_dsi_res_deinit_hdmi(pdev, i);
+ devm_kfree(&pdev->dev, dsi_res->ctrl_pdata[i]);
+ }
+ }
+
+ sdata = dsi_res->shared_data;
+ if (!sdata)
+ goto res_release;
+
+ for (i = (DSI_MAX_PM - 1); i >= DSI_CORE_PM; i--) {
+ if (msm_mdss_config_vreg(&pdev->dev,
+ sdata->power_data[i].vreg_config,
+ sdata->power_data[i].num_vreg, 1) < 0)
+ pr_err("%s: failed to de-init vregs for %s\n",
+ __func__, __mdss_dsi_pm_name(i));
+ mdss_dsi_put_dt_vreg_data(&pdev->dev,
+ &sdata->power_data[i]);
+ }
+
+ mdss_dsi_bus_scale_deinit(sdata);
+ mdss_dsi_core_clk_deinit(&pdev->dev, sdata);
+
+ if (sdata)
+ devm_kfree(&pdev->dev, sdata);
+
+res_release:
+ if (dsi_res)
+ devm_kfree(&pdev->dev, dsi_res);
+
+}
+
+static int mdss_dsi_res_init(struct platform_device *pdev)
+{
+ int rc = 0, i;
+ struct dsi_shared_data *sdata;
+
+ mdss_dsi_res = platform_get_drvdata(pdev);
+ if (!mdss_dsi_res) {
+ mdss_dsi_res = devm_kzalloc(&pdev->dev,
+ sizeof(struct mdss_dsi_data),
+ GFP_KERNEL);
+ if (!mdss_dsi_res) {
+ pr_err("%s: FAILED: cannot alloc dsi data\n",
+ __func__);
+ rc = -ENOMEM;
+ goto mem_fail;
+ }
+
+ mdss_dsi_res->shared_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct dsi_shared_data),
+ GFP_KERNEL);
+ pr_debug("%s Allocated shared_data=%pK\n", __func__,
+ mdss_dsi_res->shared_data);
+ if (!mdss_dsi_res->shared_data) {
+ pr_err("%s Unable to alloc mem for shared_data\n",
+ __func__);
+ rc = -ENOMEM;
+ goto mem_fail;
+ }
+
+ sdata = mdss_dsi_res->shared_data;
+
+ rc = mdss_dsi_parse_dt_params(pdev, sdata);
+ if (rc) {
+ pr_err("%s: failed to parse mdss dsi DT params\n",
+ __func__);
+ goto mem_fail;
+ }
+
+ rc = mdss_dsi_core_clk_init(pdev, sdata);
+ if (rc) {
+ pr_err("%s: failed to initialize DSI core clocks\n",
+ __func__);
+ goto mem_fail;
+ }
+
+ /* Parse the regulator information */
+ for (i = DSI_CORE_PM; i < DSI_MAX_PM; i++) {
+ rc = mdss_dsi_get_dt_vreg_data(&pdev->dev,
+ pdev->dev.of_node, &sdata->power_data[i], i);
+ if (rc) {
+ pr_err("%s: '%s' get_dt_vreg_data failed.rc=%d\n",
+ __func__, __mdss_dsi_pm_name(i), rc);
+ i--;
+ for (; i >= DSI_CORE_PM; i--)
+ mdss_dsi_put_dt_vreg_data(&pdev->dev,
+ &sdata->power_data[i]);
+ goto mem_fail;
+ }
+ }
+ rc = mdss_dsi_regulator_init(pdev, sdata);
+ if (rc) {
+ pr_err("%s: failed to init regulator, rc=%d\n",
+ __func__, rc);
+ goto mem_fail;
+ }
+
+ rc = mdss_dsi_bus_scale_init(pdev, sdata);
+ if (rc) {
+ pr_err("%s: failed to init bus scale settings, rc=%d\n",
+ __func__, rc);
+ goto mem_fail;
+ }
+
+ mutex_init(&sdata->phy_reg_lock);
+ mutex_init(&sdata->pm_qos_lock);
+
+ for (i = 0; i < DSI_CTRL_MAX; i++) {
+ mdss_dsi_res->ctrl_pdata[i] = devm_kzalloc(&pdev->dev,
+ sizeof(struct mdss_dsi_ctrl_pdata),
+ GFP_KERNEL);
+ if (!mdss_dsi_res->ctrl_pdata[i]) {
+ pr_err("%s Unable to alloc mem for ctrl=%d\n",
+ __func__, i);
+ rc = -ENOMEM;
+ goto mem_fail;
+ }
+ pr_debug("%s Allocated ctrl_pdata[%d]=%pK\n",
+ __func__, i, mdss_dsi_res->ctrl_pdata[i]);
+ mdss_dsi_res->ctrl_pdata[i]->shared_data =
+ mdss_dsi_res->shared_data;
+ }
+
+ platform_set_drvdata(pdev, mdss_dsi_res);
+ }
+
+ mdss_dsi_res->pdev = pdev;
+ pr_debug("%s: Setting up mdss_dsi_res=%pK\n", __func__, mdss_dsi_res);
+
+ return 0;
+
+mem_fail:
+ mdss_dsi_res_deinit(pdev);
+ return rc;
+}
+
+static int mdss_dsi_parse_hw_cfg(struct platform_device *pdev, char *pan_cfg)
+{
+ const char *data;
+ struct mdss_dsi_data *dsi_res = platform_get_drvdata(pdev);
+ struct dsi_shared_data *sdata;
+ char dsi_cfg[20];
+ char *cfg_prim = NULL, *cfg_sec = NULL, *ch = NULL;
+ int i = 0;
+
+ if (!dsi_res) {
+ pr_err("%s: DSI root device drvdata not found\n", __func__);
+ return -EINVAL;
+ }
+
+ sdata = mdss_dsi_res->shared_data;
+ if (!sdata) {
+ pr_err("%s: DSI shared data not found\n", __func__);
+ return -EINVAL;
+ }
+
+ sdata->hw_config = SINGLE_DSI;
+
+ if (pan_cfg)
+ cfg_prim = strnstr(pan_cfg, "cfg:", strlen(pan_cfg));
+ if (cfg_prim) {
+ cfg_prim += 4;
+
+ cfg_sec = strnchr(cfg_prim, strlen(cfg_prim), ':');
+ if (!cfg_sec)
+ cfg_sec = cfg_prim + strlen(cfg_prim);
+
+ for (i = 0; ((cfg_prim + i) < cfg_sec) &&
+ (*(cfg_prim+i) != '#'); i++)
+ dsi_cfg[i] = *(cfg_prim + i);
+
+ dsi_cfg[i] = '\0';
+ data = dsi_cfg;
+ } else {
+ data = of_get_property(pdev->dev.of_node,
+ "hw-config", NULL);
+ }
+
+ if (data) {
+ /*
+ * To handle the override parameter (#override:sim)
+ * passed for simulator panels
+ */
+ ch = strnstr(data, "#", strlen(data));
+ ch ? *ch = '\0' : false;
+
+ if (!strcmp(data, "dual_dsi"))
+ sdata->hw_config = DUAL_DSI;
+ else if (!strcmp(data, "split_dsi"))
+ sdata->hw_config = SPLIT_DSI;
+ else if (!strcmp(data, "single_dsi"))
+ sdata->hw_config = SINGLE_DSI;
+ else
+ pr_err("%s: Incorrect string for DSI config:%s. Setting default as SINGLE_DSI\n",
+ __func__, data);
+ } else {
+ pr_err("%s: Error: No DSI HW config found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: DSI h/w configuration is %d\n", __func__,
+ sdata->hw_config);
+
+ return 0;
+}
+
+static void mdss_dsi_parse_pll_src_cfg(struct platform_device *pdev,
+ char *pan_cfg)
+{
+ const char *data;
+ char *pll_ptr, pll_cfg[10] = {'\0'};
+ struct dsi_shared_data *sdata = mdss_dsi_res->shared_data;
+
+ sdata->pll_src_config = PLL_SRC_DEFAULT;
+
+ if (pan_cfg) {
+ pll_ptr = strnstr(pan_cfg, ":pll0", strlen(pan_cfg));
+ if (!pll_ptr) {
+ pll_ptr = strnstr(pan_cfg, ":pll1", strlen(pan_cfg));
+ if (pll_ptr)
+ strlcpy(pll_cfg, "PLL1", strlen(pll_cfg));
+ } else {
+ strlcpy(pll_cfg, "PLL0", strlen(pll_cfg));
+ }
+ }
+ data = pll_cfg;
+
+ if (!data || !strcmp(data, ""))
+ data = of_get_property(pdev->dev.of_node,
+ "pll-src-config", NULL);
+ if (data) {
+ if (!strcmp(data, "PLL0"))
+ sdata->pll_src_config = PLL_SRC_0;
+ else if (!strcmp(data, "PLL1"))
+ sdata->pll_src_config = PLL_SRC_1;
+ else
+ pr_err("%s: invalid pll src config %s\n",
+ __func__, data);
+ } else {
+ pr_debug("%s: PLL src config not specified\n", __func__);
+ }
+
+ pr_debug("%s: pll_src_config = %d", __func__, sdata->pll_src_config);
+}
+
+static int mdss_dsi_validate_pll_src_config(struct dsi_shared_data *sdata)
+{
+ int rc = 0;
+
+ /*
+ * DSI PLL1 can only drive DSI PHY1. As such:
+ * - For split dsi config, only PLL0 is supported
+ * - For dual dsi config, DSI0-PLL0 and DSI1-PLL1 is the only
+ * possible configuration
+ */
+ if (mdss_dsi_is_hw_config_split(sdata) &&
+ mdss_dsi_is_pll_src_pll1(sdata)) {
+ pr_err("%s: unsupported PLL config: using PLL1 for split-dsi\n",
+ __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (mdss_dsi_is_hw_config_dual(sdata) &&
+ !mdss_dsi_is_pll_src_default(sdata)) {
+ pr_debug("%s: pll src config not applicable for dual-dsi\n",
+ __func__);
+ sdata->pll_src_config = PLL_SRC_DEFAULT;
+ }
+
+error:
+ return rc;
+}
+
+static int mdss_dsi_validate_config(struct platform_device *pdev)
+{
+ struct dsi_shared_data *sdata = mdss_dsi_res->shared_data;
+
+ return mdss_dsi_validate_pll_src_config(sdata);
+}
+
+static const struct of_device_id mdss_dsi_ctrl_dt_match[] = {
+ {.compatible = "qcom,mdss-dsi-ctrl"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdss_dsi_ctrl_dt_match);
+
+static int mdss_dsi_probe(struct platform_device *pdev)
+{
+ struct mdss_panel_cfg *pan_cfg = NULL;
+ struct mdss_util_intf *util;
+ char *panel_cfg;
+ int rc = 0;
+
+ util = mdss_get_util_intf();
+ if (util == NULL) {
+ pr_err("%s: Failed to get mdss utility functions\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!util->mdp_probe_done) {
+ pr_err("%s: MDP not probed yet!\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("%s: DSI driver only supports device tree probe\n",
+ __func__);
+ return -ENOTSUPP;
+ }
+
+ pan_cfg = util->panel_intf_type(MDSS_PANEL_INTF_HDMI);
+ if (IS_ERR(pan_cfg)) {
+ return PTR_ERR(pan_cfg);
+ } else if (pan_cfg) {
+ pr_debug("%s: HDMI is primary\n", __func__);
+ return -ENODEV;
+ }
+
+ pan_cfg = util->panel_intf_type(MDSS_PANEL_INTF_DSI);
+ if (IS_ERR_OR_NULL(pan_cfg)) {
+ rc = PTR_ERR(pan_cfg);
+ goto error;
+ } else {
+ panel_cfg = pan_cfg->arg_cfg;
+ }
+
+ rc = mdss_dsi_res_init(pdev);
+ if (rc) {
+ pr_err("%s Unable to set dsi res\n", __func__);
+ return rc;
+ }
+
+ rc = mdss_dsi_parse_hw_cfg(pdev, panel_cfg);
+ if (rc) {
+ pr_err("%s Unable to parse dsi h/w config\n", __func__);
+ mdss_dsi_res_deinit(pdev);
+ return rc;
+ }
+
+ mdss_dsi_parse_pll_src_cfg(pdev, panel_cfg);
+
+ of_platform_populate(pdev->dev.of_node, mdss_dsi_ctrl_dt_match,
+ NULL, &pdev->dev);
+
+ rc = mdss_dsi_validate_config(pdev);
+ if (rc) {
+ pr_err("%s: Invalid DSI hw configuration\n", __func__);
+ goto error;
+ }
+
+ mdss_dsi_config_clk_src(pdev);
+
+error:
+ return rc;
+}
+
+static int mdss_dsi_remove(struct platform_device *pdev)
+{
+ mdss_dsi_res_deinit(pdev);
+ return 0;
+}
+
+static int mdss_dsi_ctrl_remove(struct platform_device *pdev)
+{
+ struct msm_fb_data_type *mfd;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = platform_get_drvdata(pdev);
+
+ if (!ctrl_pdata) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ mdss_dsi_pm_qos_remove_request(ctrl_pdata->shared_data);
+
+ if (msm_mdss_config_vreg(&pdev->dev,
+ ctrl_pdata->panel_power_data.vreg_config,
+ ctrl_pdata->panel_power_data.num_vreg, 1) < 0)
+ pr_err("%s: failed to de-init vregs for %s\n",
+ __func__, __mdss_dsi_pm_name(DSI_PANEL_PM));
+ mdss_dsi_put_dt_vreg_data(&pdev->dev, &ctrl_pdata->panel_power_data);
+
+ mfd = platform_get_drvdata(pdev);
+ msm_mdss_iounmap(&ctrl_pdata->mmss_misc_io);
+ msm_mdss_iounmap(&ctrl_pdata->phy_io);
+ msm_mdss_iounmap(&ctrl_pdata->ctrl_io);
+ mdss_dsi_debugfs_cleanup(ctrl_pdata);
+
+ if (ctrl_pdata->workq)
+ destroy_workqueue(ctrl_pdata->workq);
+
+ return 0;
+}
+
+struct device dsi_dev;
+
+int mdss_dsi_retrieve_ctrl_resources(struct platform_device *pdev, int mode,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int rc = 0;
+ u32 index;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: Cell-index not specified, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (index == 0) {
+ if (mode != DISPLAY_1) {
+ pr_err("%s:%d Panel->Ctrl mapping is wrong\n",
+ __func__, __LINE__);
+ return -EPERM;
+ }
+ } else if (index == 1) {
+ if (mode != DISPLAY_2) {
+ pr_err("%s:%d Panel->Ctrl mapping is wrong\n",
+ __func__, __LINE__);
+ return -EPERM;
+ }
+ } else {
+ pr_err("%s:%d Unknown Ctrl mapped to panel\n",
+ __func__, __LINE__);
+ return -EPERM;
+ }
+
+ rc = msm_mdss_ioremap_byname(pdev, &ctrl->ctrl_io, "dsi_ctrl");
+ if (rc) {
+ pr_err("%s:%d unable to remap dsi ctrl resources\n",
+ __func__, __LINE__);
+ return rc;
+ }
+
+ ctrl->ctrl_base = ctrl->ctrl_io.base;
+ ctrl->reg_size = ctrl->ctrl_io.len;
+
+ rc = msm_mdss_ioremap_byname(pdev, &ctrl->phy_io, "dsi_phy");
+ if (rc) {
+ pr_err("%s:%d unable to remap dsi phy resources\n",
+ __func__, __LINE__);
+ return rc;
+ }
+
+ rc = msm_mdss_ioremap_byname(pdev, &ctrl->phy_regulator_io,
+ "dsi_phy_regulator");
+ if (rc)
+ pr_debug("%s:%d unable to remap dsi phy regulator resources\n",
+ __func__, __LINE__);
+ else
+ pr_info("%s: phy_regulator_base=%pK phy_regulator_size=%x\n",
+ __func__, ctrl->phy_regulator_io.base,
+ ctrl->phy_regulator_io.len);
+
+ pr_info("%s: ctrl_base=%pK ctrl_size=%x phy_base=%pK phy_size=%x\n",
+ __func__, ctrl->ctrl_base, ctrl->reg_size, ctrl->phy_io.base,
+ ctrl->phy_io.len);
+
+ rc = msm_mdss_ioremap_byname(pdev, &ctrl->mmss_misc_io,
+ "mmss_misc_phys");
+ if (rc) {
+ pr_debug("%s:%d mmss_misc IO remap failed\n",
+ __func__, __LINE__);
+ }
+
+ return 0;
+}
+
+static int mdss_dsi_irq_init(struct device *dev, int irq_no,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int ret;
+
+ ret = devm_request_irq(dev, irq_no, mdss_dsi_isr,
+ 0, "DSI", ctrl);
+ if (ret) {
+ pr_err("msm_dsi_irq_init request_irq() failed!\n");
+ return ret;
+ }
+
+ disable_irq(irq_no);
+ ctrl->dsi_hw->irq_info = kcalloc(1, sizeof(struct irq_info),
+ GFP_KERNEL);
+ if (!ctrl->dsi_hw->irq_info)
+ return -ENOMEM;
+
+ ctrl->dsi_hw->irq_info->irq = irq_no;
+ ctrl->dsi_hw->irq_info->irq_ena = false;
+
+ return ret;
+}
+
+static void mdss_dsi_parse_lane_swap(struct device_node *np, char *dlane_swap)
+{
+ const char *data;
+
+ *dlane_swap = DSI_LANE_MAP_0123;
+ data = of_get_property(np, "qcom,lane-map", NULL);
+ if (data) {
+ if (!strcmp(data, "lane_map_3012"))
+ *dlane_swap = DSI_LANE_MAP_3012;
+ else if (!strcmp(data, "lane_map_2301"))
+ *dlane_swap = DSI_LANE_MAP_2301;
+ else if (!strcmp(data, "lane_map_1230"))
+ *dlane_swap = DSI_LANE_MAP_1230;
+ else if (!strcmp(data, "lane_map_0321"))
+ *dlane_swap = DSI_LANE_MAP_0321;
+ else if (!strcmp(data, "lane_map_1032"))
+ *dlane_swap = DSI_LANE_MAP_1032;
+ else if (!strcmp(data, "lane_map_2103"))
+ *dlane_swap = DSI_LANE_MAP_2103;
+ else if (!strcmp(data, "lane_map_3210"))
+ *dlane_swap = DSI_LANE_MAP_3210;
+ }
+}
+
+static int mdss_dsi_parse_ctrl_params(struct platform_device *ctrl_pdev,
+ struct device_node *pan_node, struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int i, len;
+ struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+ const char *data;
+
+ ctrl_pdata->null_insert_enabled = of_property_read_bool(
+ ctrl_pdev->dev.of_node, "qcom,null-insertion-enabled");
+
+ data = of_get_property(ctrl_pdev->dev.of_node,
+ "qcom,platform-strength-ctrl", &len);
+ if (!data) {
+ pr_err("%s:%d, Unable to read Phy Strength ctrl settings\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ pinfo->mipi.dsi_phy_db.strength_len = len;
+ for (i = 0; i < len; i++)
+ pinfo->mipi.dsi_phy_db.strength[i] = data[i];
+
+ pinfo->mipi.dsi_phy_db.reg_ldo_mode = of_property_read_bool(
+ ctrl_pdev->dev.of_node, "qcom,regulator-ldo-mode");
+
+ data = of_get_property(ctrl_pdev->dev.of_node,
+ "qcom,platform-regulator-settings", &len);
+ if (!data) {
+ pr_err("%s:%d, Unable to read Phy regulator settings\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ pinfo->mipi.dsi_phy_db.regulator_len = len;
+ for (i = 0; i < len; i++)
+ pinfo->mipi.dsi_phy_db.regulator[i] = data[i];
+
+ data = of_get_property(ctrl_pdev->dev.of_node,
+ "qcom,platform-bist-ctrl", &len);
+ if ((!data) || (len != 6))
+ pr_debug("%s:%d, Unable to read Phy Bist Ctrl settings\n",
+ __func__, __LINE__);
+ else
+ for (i = 0; i < len; i++)
+ pinfo->mipi.dsi_phy_db.bistctrl[i] = data[i];
+
+ data = of_get_property(ctrl_pdev->dev.of_node,
+ "qcom,platform-lane-config", &len);
+ if (!data) {
+ pr_err("%s:%d, Unable to read Phy lane configure settings\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ pinfo->mipi.dsi_phy_db.lanecfg_len = len;
+ for (i = 0; i < len; i++)
+ pinfo->mipi.dsi_phy_db.lanecfg[i] = data[i];
+
+ ctrl_pdata->timing_db_mode = of_property_read_bool(
+ ctrl_pdev->dev.of_node, "qcom,timing-db-mode");
+
+ ctrl_pdata->cmd_sync_wait_broadcast = of_property_read_bool(
+ pan_node, "qcom,cmd-sync-wait-broadcast");
+
+ if (ctrl_pdata->cmd_sync_wait_broadcast &&
+ mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+ (pinfo->pdest == DISPLAY_2))
+ ctrl_pdata->cmd_sync_wait_trigger = true;
+
+ pr_debug("%s: cmd_sync_wait_enable=%d trigger=%d\n", __func__,
+ ctrl_pdata->cmd_sync_wait_broadcast,
+ ctrl_pdata->cmd_sync_wait_trigger);
+
+ mdss_dsi_parse_lane_swap(ctrl_pdev->dev.of_node,
+ &(ctrl_pdata->dlane_swap));
+
+ pinfo->is_pluggable = of_property_read_bool(ctrl_pdev->dev.of_node,
+ "qcom,pluggable");
+
+ data = of_get_property(ctrl_pdev->dev.of_node,
+ "qcom,display-id", &len);
+ if (!data || len <= 0)
+ pr_err("%s:%d Unable to read qcom,display-id, data=%pK,len=%d\n",
+ __func__, __LINE__, data, len);
+ else
+ snprintf(ctrl_pdata->panel_data.panel_info.display_id,
+ MDSS_DISPLAY_ID_MAX_LEN, "%s", data);
+
+ return 0;
+
+
+}
+
+static int mdss_dsi_parse_gpio_params(struct platform_device *ctrl_pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+ struct mdss_panel_data *pdata = &ctrl_pdata->panel_data;
+
+ /*
+ * If disp_en_gpio has been set previously (disp_en_gpio > 0)
+ * while parsing the panel node, then do not override it
+ */
+ if (ctrl_pdata->disp_en_gpio <= 0) {
+ ctrl_pdata->disp_en_gpio = of_get_named_gpio(
+ ctrl_pdev->dev.of_node,
+ "qcom,platform-enable-gpio", 0);
+
+ if (!gpio_is_valid(ctrl_pdata->disp_en_gpio))
+ pr_debug("%s:%d, Disp_en gpio not specified\n",
+ __func__, __LINE__);
+ }
+
+ ctrl_pdata->disp_te_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+ "qcom,platform-te-gpio", 0);
+
+ if (!gpio_is_valid(ctrl_pdata->disp_te_gpio))
+ pr_err("%s:%d, TE gpio not specified\n",
+ __func__, __LINE__);
+ pdata->panel_te_gpio = ctrl_pdata->disp_te_gpio;
+
+ ctrl_pdata->bklt_en_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+ "qcom,platform-bklight-en-gpio", 0);
+ if (!gpio_is_valid(ctrl_pdata->bklt_en_gpio))
+ pr_info("%s: bklt_en gpio not specified\n", __func__);
+
+ ctrl_pdata->rst_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+ "qcom,platform-reset-gpio", 0);
+ if (!gpio_is_valid(ctrl_pdata->rst_gpio))
+ pr_err("%s:%d, reset gpio not specified\n",
+ __func__, __LINE__);
+
+ if (pinfo->mode_gpio_state != MODE_GPIO_NOT_VALID) {
+
+ ctrl_pdata->mode_gpio = of_get_named_gpio(
+ ctrl_pdev->dev.of_node,
+ "qcom,platform-mode-gpio", 0);
+ if (!gpio_is_valid(ctrl_pdata->mode_gpio))
+ pr_info("%s:%d, mode gpio not specified\n",
+ __func__, __LINE__);
+ } else {
+ ctrl_pdata->mode_gpio = -EINVAL;
+ }
+
+ ctrl_pdata->intf_mux_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+ "qcom,platform-intf-mux-gpio", 0);
+ if (!gpio_is_valid(ctrl_pdata->intf_mux_gpio))
+ pr_debug("%s:%d, intf mux gpio not specified\n",
+ __func__, __LINE__);
+
+ return 0;
+}
+
+static void mdss_dsi_set_prim_panel(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct mdss_dsi_ctrl_pdata *octrl = NULL;
+ struct mdss_panel_info *pinfo;
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+
+ /*
+ * for Split and Single DSI case default is always primary
+ * and for Dual dsi case below assumptions are made.
+ * 1. DSI controller with bridge chip is always secondary
+ * 2. When there is no brigde chip, DSI1 is secondary
+ */
+ pinfo->is_prim_panel = true;
+ if (mdss_dsi_is_hw_config_dual(ctrl_pdata->shared_data)) {
+ if (mdss_dsi_is_right_ctrl(ctrl_pdata)) {
+ octrl = mdss_dsi_get_other_ctrl(ctrl_pdata);
+ if (octrl && octrl->panel_data.panel_info.is_prim_panel)
+ pinfo->is_prim_panel = false;
+ else
+ pinfo->is_prim_panel = true;
+ }
+ }
+}
+
+int dsi_panel_device_register(struct platform_device *ctrl_pdev,
+ struct device_node *pan_node, struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct mipi_panel_info *mipi;
+ int rc;
+ struct dsi_shared_data *sdata;
+ struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+ struct resource *res;
+ u64 clk_rate;
+
+ mipi = &(pinfo->mipi);
+
+ pinfo->type =
+ ((mipi->mode == DSI_VIDEO_MODE)
+ ? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
+
+ rc = mdss_dsi_clk_div_config(pinfo, mipi->frame_rate);
+ if (rc) {
+ pr_err("%s: unable to initialize the clk dividers\n", __func__);
+ return rc;
+ }
+ ctrl_pdata->pclk_rate = mipi->dsi_pclk_rate;
+ clk_rate = pinfo->clk_rate;
+ do_div(clk_rate, 8U);
+ ctrl_pdata->byte_clk_rate = (u32)clk_rate;
+ pr_debug("%s: pclk=%d, bclk=%d\n", __func__,
+ ctrl_pdata->pclk_rate, ctrl_pdata->byte_clk_rate);
+
+
+ rc = mdss_dsi_get_dt_vreg_data(&ctrl_pdev->dev, pan_node,
+ &ctrl_pdata->panel_power_data, DSI_PANEL_PM);
+ if (rc) {
+ DEV_ERR("%s: '%s' get_dt_vreg_data failed.rc=%d\n",
+ __func__, __mdss_dsi_pm_name(DSI_PANEL_PM), rc);
+ return rc;
+ }
+
+ rc = msm_mdss_config_vreg(&ctrl_pdev->dev,
+ ctrl_pdata->panel_power_data.vreg_config,
+ ctrl_pdata->panel_power_data.num_vreg, 1);
+ if (rc) {
+ pr_err("%s: failed to init regulator, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = mdss_dsi_parse_ctrl_params(ctrl_pdev, pan_node, ctrl_pdata);
+ if (rc) {
+ pr_err("%s: failed to parse ctrl settings, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ pinfo->panel_max_fps = mdss_panel_get_framerate(pinfo,
+ FPS_RESOLUTION_HZ);
+ pinfo->panel_max_vtotal = mdss_panel_get_vtotal(pinfo);
+
+ rc = mdss_dsi_parse_gpio_params(ctrl_pdev, ctrl_pdata);
+ if (rc) {
+ pr_err("%s: failed to parse gpio params, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (mdss_dsi_retrieve_ctrl_resources(ctrl_pdev,
+ pinfo->pdest,
+ ctrl_pdata)) {
+ pr_err("%s: unable to get Dsi controller res\n", __func__);
+ return -EPERM;
+ }
+
+ ctrl_pdata->panel_data.event_handler = mdss_dsi_event_handler;
+ ctrl_pdata->panel_data.get_fb_node = mdss_dsi_get_fb_node_cb;
+
+ if (ctrl_pdata->status_mode == ESD_REG ||
+ ctrl_pdata->status_mode == ESD_REG_NT35596)
+ ctrl_pdata->check_status = mdss_dsi_reg_status_check;
+ else if (ctrl_pdata->status_mode == ESD_BTA)
+ ctrl_pdata->check_status = mdss_dsi_bta_status_check;
+
+ if (ctrl_pdata->status_mode == ESD_MAX) {
+ pr_err("%s: Using default BTA for ESD check\n", __func__);
+ ctrl_pdata->check_status = mdss_dsi_bta_status_check;
+ }
+ if (ctrl_pdata->bklt_ctrl == BL_PWM)
+ mdss_dsi_panel_pwm_cfg(ctrl_pdata);
+
+ mdss_dsi_ctrl_init(&ctrl_pdev->dev, ctrl_pdata);
+ mdss_dsi_set_prim_panel(ctrl_pdata);
+
+ ctrl_pdata->dsi_irq_line = of_property_read_bool(
+ ctrl_pdev->dev.of_node, "qcom,dsi-irq-line");
+
+ if (ctrl_pdata->dsi_irq_line) {
+ /* DSI has it's own irq line */
+ res = platform_get_resource(ctrl_pdev, IORESOURCE_IRQ, 0);
+ if (!res || res->start == 0) {
+ pr_err("%s:%d unable to get the MDSS irq resources\n",
+ __func__, __LINE__);
+ return -ENODEV;
+ }
+ rc = mdss_dsi_irq_init(&ctrl_pdev->dev, res->start, ctrl_pdata);
+ if (rc) {
+ dev_err(&ctrl_pdev->dev, "%s: failed to init irq\n",
+ __func__);
+ return rc;
+ }
+ }
+ ctrl_pdata->ctrl_state = CTRL_STATE_UNKNOWN;
+
+ /*
+ * If ULPS during suspend is enabled, add an extra vote for the
+ * DSI CTRL power module. This keeps the regulator always enabled.
+ * This is needed for the DSI PHY to maintain ULPS state during
+ * suspend also.
+ */
+ sdata = ctrl_pdata->shared_data;
+
+ if (pinfo->ulps_suspend_enabled) {
+ rc = msm_mdss_enable_vreg(
+ sdata->power_data[DSI_PHY_PM].vreg_config,
+ sdata->power_data[DSI_PHY_PM].num_vreg, 1);
+ if (rc) {
+ pr_err("%s: failed to enable vregs for DSI_CTRL_PM\n",
+ __func__);
+ return rc;
+ }
+ }
+
+ pinfo->cont_splash_enabled =
+ ctrl_pdata->mdss_util->panel_intf_status(pinfo->pdest,
+ MDSS_PANEL_INTF_DSI) ? true : false;
+
+ pr_info("%s: Continuous splash %s\n", __func__,
+ pinfo->cont_splash_enabled ? "enabled" : "disabled");
+
+ rc = mdss_register_panel(ctrl_pdev, &(ctrl_pdata->panel_data));
+ if (rc) {
+ pr_err("%s: unable to register MIPI DSI panel\n", __func__);
+ return rc;
+ }
+
+ if (pinfo->pdest == DISPLAY_1) {
+ mdss_debug_register_io("dsi0_ctrl", &ctrl_pdata->ctrl_io, NULL);
+ mdss_debug_register_io("dsi0_phy", &ctrl_pdata->phy_io, NULL);
+ if (ctrl_pdata->phy_regulator_io.len)
+ mdss_debug_register_io("dsi0_phy_regulator",
+ &ctrl_pdata->phy_regulator_io, NULL);
+ } else {
+ mdss_debug_register_io("dsi1_ctrl", &ctrl_pdata->ctrl_io, NULL);
+ mdss_debug_register_io("dsi1_phy", &ctrl_pdata->phy_io, NULL);
+ if (ctrl_pdata->phy_regulator_io.len)
+ mdss_debug_register_io("dsi1_phy_regulator",
+ &ctrl_pdata->phy_regulator_io, NULL);
+ }
+
+ panel_debug_register_base("panel",
+ ctrl_pdata->ctrl_base, ctrl_pdata->reg_size);
+
+ pr_debug("%s: Panel data initialized\n", __func__);
+ return 0;
+}
+
+static const struct of_device_id mdss_dsi_dt_match[] = {
+ {.compatible = "qcom,mdss-dsi"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdss_dsi_dt_match);
+
+static struct platform_driver mdss_dsi_driver = {
+ .probe = mdss_dsi_probe,
+ .remove = mdss_dsi_remove,
+ .shutdown = NULL,
+ .driver = {
+ .name = "mdss_dsi",
+ .of_match_table = mdss_dsi_dt_match,
+ },
+};
+
+static struct platform_driver mdss_dsi_ctrl_driver = {
+ .probe = mdss_dsi_ctrl_probe,
+ .remove = mdss_dsi_ctrl_remove,
+ .shutdown = NULL,
+ .driver = {
+ .name = "mdss_dsi_ctrl",
+ .of_match_table = mdss_dsi_ctrl_dt_match,
+ },
+};
+
+static int mdss_dsi_register_driver(void)
+{
+ return platform_driver_register(&mdss_dsi_driver);
+}
+
+static int __init mdss_dsi_driver_init(void)
+{
+ int ret;
+
+ ret = mdss_dsi_register_driver();
+ if (ret) {
+ pr_err("mdss_dsi_register_driver() failed!\n");
+ return ret;
+ }
+
+ return ret;
+}
+module_init(mdss_dsi_driver_init);
+
+
+static int mdss_dsi_ctrl_register_driver(void)
+{
+ return platform_driver_register(&mdss_dsi_ctrl_driver);
+}
+
+static int __init mdss_dsi_ctrl_driver_init(void)
+{
+ int ret;
+
+ ret = mdss_dsi_ctrl_register_driver();
+ if (ret) {
+ pr_err("mdss_dsi_ctrl_register_driver() failed!\n");
+ return ret;
+ }
+
+ return ret;
+}
+module_init(mdss_dsi_ctrl_driver_init);
+
+static void __exit mdss_dsi_driver_cleanup(void)
+{
+ platform_driver_unregister(&mdss_dsi_ctrl_driver);
+}
+module_exit(mdss_dsi_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DSI controller driver");
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
new file mode 100644
index 0000000..60bc455
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -0,0 +1,899 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DSI_H
+#define MDSS_DSI_H
+
+#include <linux/list.h>
+#include <linux/mdss_io_util.h>
+#include <linux/irqreturn.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/gpio.h>
+
+#include "mdss_panel.h"
+#include "mdss_dsi_cmd.h"
+#include "mdss_dsi_clk.h"
+
+#define MMSS_SERDES_BASE_PHY 0x04f01000 /* mmss (De)Serializer CFG */
+
+#define MIPI_OUTP(addr, data) writel_relaxed((data), (addr))
+#define MIPI_INP(addr) readl_relaxed(addr)
+
+#define MIPI_OUTP_SECURE(addr, data) writel_relaxed((data), (addr))
+#define MIPI_INP_SECURE(addr) readl_relaxed(addr)
+
+#define MIPI_DSI_PRIM 1
+#define MIPI_DSI_SECD 2
+
+#define MIPI_DSI_PANEL_VGA 0
+#define MIPI_DSI_PANEL_WVGA 1
+#define MIPI_DSI_PANEL_WVGA_PT 2
+#define MIPI_DSI_PANEL_FWVGA_PT 3
+#define MIPI_DSI_PANEL_WSVGA_PT 4
+#define MIPI_DSI_PANEL_QHD_PT 5
+#define MIPI_DSI_PANEL_WXGA 6
+#define MIPI_DSI_PANEL_WUXGA 7
+#define MIPI_DSI_PANEL_720P_PT 8
+#define DSI_PANEL_MAX 8
+
+#define MDSS_DSI_HW_REV_100 0x10000000 /* 8974 */
+#define MDSS_DSI_HW_REV_100_1 0x10000001 /* 8x26 */
+#define MDSS_DSI_HW_REV_100_2 0x10000002 /* 8x26v2 */
+#define MDSS_DSI_HW_REV_101 0x10010000 /* 8974v2 */
+#define MDSS_DSI_HW_REV_101_1 0x10010001 /* 8974Pro */
+#define MDSS_DSI_HW_REV_102 0x10020000 /* 8084 */
+#define MDSS_DSI_HW_REV_103 0x10030000 /* 8994 */
+#define MDSS_DSI_HW_REV_103_1 0x10030001 /* 8916/8936 */
+#define MDSS_DSI_HW_REV_104 0x10040000 /* 8996 */
+#define MDSS_DSI_HW_REV_104_1 0x10040001 /* 8996 */
+#define MDSS_DSI_HW_REV_104_2 0x10040002 /* 8937 */
+
+#define MDSS_DSI_HW_REV_STEP_0 0x0
+#define MDSS_DSI_HW_REV_STEP_1 0x1
+#define MDSS_DSI_HW_REV_STEP_2 0x2
+
+#define MDSS_STATUS_TE_WAIT_MAX 3
+#define NONE_PANEL "none"
+
+enum { /* mipi dsi panel */
+ DSI_VIDEO_MODE,
+ DSI_CMD_MODE,
+};
+
+enum {
+ ST_DSI_CLK_OFF,
+ ST_DSI_SUSPEND,
+ ST_DSI_RESUME,
+ ST_DSI_PLAYING,
+ ST_DSI_NUM
+};
+
+enum {
+ EV_DSI_UPDATE,
+ EV_DSI_DONE,
+ EV_DSI_TOUT,
+ EV_DSI_NUM
+};
+
+enum {
+ LANDSCAPE = 1,
+ PORTRAIT = 2,
+};
+
+enum dsi_trigger_type {
+ DSI_CMD_MODE_DMA,
+ DSI_CMD_MODE_MDP,
+};
+
+enum dsi_panel_bl_ctrl {
+ BL_PWM,
+ BL_WLED,
+ BL_DCS_CMD,
+ UNKNOWN_CTRL,
+};
+
+enum dsi_panel_status_mode {
+ ESD_NONE = 0,
+ ESD_BTA,
+ ESD_REG,
+ ESD_REG_NT35596,
+ ESD_TE,
+ ESD_MAX,
+};
+
+enum dsi_ctrl_op_mode {
+ DSI_LP_MODE,
+ DSI_HS_MODE,
+};
+
+enum dsi_lane_map_type {
+ DSI_LANE_MAP_0123,
+ DSI_LANE_MAP_3012,
+ DSI_LANE_MAP_2301,
+ DSI_LANE_MAP_1230,
+ DSI_LANE_MAP_0321,
+ DSI_LANE_MAP_1032,
+ DSI_LANE_MAP_2103,
+ DSI_LANE_MAP_3210,
+};
+
+enum dsi_pm_type {
+ /* PANEL_PM not used as part of power_data in dsi_shared_data */
+ DSI_PANEL_PM,
+ DSI_CORE_PM,
+ DSI_CTRL_PM,
+ DSI_PHY_PM,
+ DSI_MAX_PM
+};
+
+/*
+ * DSI controller states.
+ * CTRL_STATE_UNKNOWN - Unknown state of DSI controller.
+ * CTRL_STATE_PANEL_INIT - State specifies that the panel is initialized.
+ * CTRL_STATE_MDP_ACTIVE - State specifies that MDP is ready to send
+ * data to DSI.
+ * CTRL_STATE_DSI_ACTIVE - State specifies that DSI controller/PHY is
+ * initialized.
+ */
+#define CTRL_STATE_UNKNOWN 0x00
+#define CTRL_STATE_PANEL_INIT BIT(0)
+#define CTRL_STATE_MDP_ACTIVE BIT(1)
+#define CTRL_STATE_DSI_ACTIVE BIT(2)
+#define CTRL_STATE_PANEL_LP BIT(3)
+
+#define DSI_NON_BURST_SYNCH_PULSE 0
+#define DSI_NON_BURST_SYNCH_EVENT 1
+#define DSI_BURST_MODE 2
+
+#define DSI_RGB_SWAP_RGB 0
+#define DSI_RGB_SWAP_RBG 1
+#define DSI_RGB_SWAP_BGR 2
+#define DSI_RGB_SWAP_BRG 3
+#define DSI_RGB_SWAP_GRB 4
+#define DSI_RGB_SWAP_GBR 5
+
+#define DSI_VIDEO_DST_FORMAT_RGB565 0
+#define DSI_VIDEO_DST_FORMAT_RGB666 1
+#define DSI_VIDEO_DST_FORMAT_RGB666_LOOSE 2
+#define DSI_VIDEO_DST_FORMAT_RGB888 3
+
+#define DSI_CMD_DST_FORMAT_RGB111 0
+#define DSI_CMD_DST_FORMAT_RGB332 3
+#define DSI_CMD_DST_FORMAT_RGB444 4
+#define DSI_CMD_DST_FORMAT_RGB565 6
+#define DSI_CMD_DST_FORMAT_RGB666 7
+#define DSI_CMD_DST_FORMAT_RGB888 8
+
+#define DSI_INTR_DESJEW_MASK BIT(31)
+#define DSI_INTR_DYNAMIC_REFRESH_MASK BIT(29)
+#define DSI_INTR_DYNAMIC_REFRESH_DONE BIT(28)
+#define DSI_INTR_ERROR_MASK BIT(25)
+#define DSI_INTR_ERROR BIT(24)
+#define DSI_INTR_BTA_DONE_MASK BIT(21)
+#define DSI_INTR_BTA_DONE BIT(20)
+#define DSI_INTR_VIDEO_DONE_MASK BIT(17)
+#define DSI_INTR_VIDEO_DONE BIT(16)
+#define DSI_INTR_CMD_MDP_DONE_MASK BIT(9)
+#define DSI_INTR_CMD_MDP_DONE BIT(8)
+#define DSI_INTR_CMD_DMA_DONE_MASK BIT(1)
+#define DSI_INTR_CMD_DMA_DONE BIT(0)
+/* Update this if more interrupt masks are added in future chipsets */
+#define DSI_INTR_TOTAL_MASK 0x2222AA02
+
+#define DSI_INTR_MASK_ALL \
+ (DSI_INTR_DESJEW_MASK | \
+ DSI_INTR_DYNAMIC_REFRESH_MASK | \
+ DSI_INTR_ERROR_MASK | \
+ DSI_INTR_BTA_DONE_MASK | \
+ DSI_INTR_VIDEO_DONE_MASK | \
+ DSI_INTR_CMD_MDP_DONE_MASK | \
+ DSI_INTR_CMD_DMA_DONE_MASK)
+
+#define DSI_CMD_TRIGGER_NONE 0x0 /* mdp trigger */
+#define DSI_CMD_TRIGGER_TE 0x02
+#define DSI_CMD_TRIGGER_SW 0x04
+#define DSI_CMD_TRIGGER_SW_SEOF 0x05 /* cmd dma only */
+#define DSI_CMD_TRIGGER_SW_TE 0x06
+
+#define DSI_VIDEO_TERM BIT(16)
+#define DSI_MDP_TERM BIT(8)
+#define DSI_DYNAMIC_TERM BIT(4)
+#define DSI_BTA_TERM BIT(1)
+#define DSI_CMD_TERM BIT(0)
+
+#define DSI_DATA_LANES_STOP_STATE 0xF
+#define DSI_CLK_LANE_STOP_STATE BIT(4)
+#define DSI_DATA_LANES_ENABLED 0xF0
+
+/* offsets for dynamic refresh */
+#define DSI_DYNAMIC_REFRESH_CTRL 0x200
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY 0x204
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2 0x208
+#define DSI_DYNAMIC_REFRESH_PLL_DELAY 0x20C
+
+#define MAX_ERR_INDEX 10
+
+extern struct device dsi_dev;
+extern u32 dsi_irq;
+extern struct mdss_dsi_ctrl_pdata *ctrl_list[];
+
+enum {
+ DSI_CTRL_0,
+ DSI_CTRL_1,
+ DSI_CTRL_MAX,
+};
+
+/*
+ * Common DSI properties for each controller. The DSI root probe will create the
+ * shared_data struct which should be accessible to each controller. The goal is
+ * to only access ctrl_pdata and ctrl_pdata->shared_data during the lifetime of
+ * each controller i.e. mdss_dsi_res should not be used directly.
+ */
+struct dsi_shared_data {
+ u32 hw_config; /* DSI setup configuration i.e. single/dual/split */
+ u32 pll_src_config; /* PLL source selection for DSI link clocks */
+ u32 hw_rev; /* DSI h/w revision */
+ u32 phy_rev; /* DSI PHY revision*/
+
+ /* DSI ULPS clamp register offsets */
+ u32 ulps_clamp_ctrl_off;
+ u32 ulps_phyrst_ctrl_off;
+
+ bool cmd_clk_ln_recovery_en;
+ bool dsi0_active;
+ bool dsi1_active;
+
+ /* DSI bus clocks */
+ struct clk *mdp_core_clk;
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *mmss_misc_ahb_clk;
+
+ /* Other shared clocks */
+ struct clk *ext_byte0_clk;
+ struct clk *ext_pixel0_clk;
+ struct clk *ext_byte1_clk;
+ struct clk *ext_pixel1_clk;
+
+ /* Clock sources for branch clocks */
+ struct clk *byte0_parent;
+ struct clk *pixel0_parent;
+ struct clk *byte1_parent;
+ struct clk *pixel1_parent;
+
+ /* DSI core regulators */
+ struct mdss_module_power power_data[DSI_MAX_PM];
+
+ /* Shared mutex for DSI PHY regulator */
+ struct mutex phy_reg_lock;
+
+ /* Data bus(AXI) scale settings */
+ struct msm_bus_scale_pdata *bus_scale_table;
+ u32 bus_handle;
+ u32 bus_refcount;
+
+ /* Shared mutex for pm_qos ref count */
+ struct mutex pm_qos_lock;
+ u32 pm_qos_req_cnt;
+};
+
+struct mdss_dsi_data {
+ bool res_init;
+ struct platform_device *pdev;
+ /* List of controller specific struct data */
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata[DSI_CTRL_MAX];
+ /*
+ * This structure should hold common data structures like
+ * mutex, clocks, regulator information, setup information
+ */
+ struct dsi_shared_data *shared_data;
+};
+
+/*
+ * enum mdss_dsi_hw_config - Supported DSI h/w configurations
+ *
+ * @SINGLE_DSI: Single DSI panel driven by either DSI0 or DSI1.
+ * @DUAL_DSI: Two DSI panels driven independently by DSI0 & DSI1.
+ * @SPLIT_DSI: A split DSI panel driven by both the DSI controllers
+ * with the DSI link clocks sourced by a single DSI PLL.
+ */
+enum mdss_dsi_hw_config {
+ SINGLE_DSI,
+ DUAL_DSI,
+ SPLIT_DSI,
+};
+
+/*
+ * enum mdss_dsi_pll_src_config - The PLL source for DSI link clocks
+ *
+ * @PLL_SRC_0: The link clocks are sourced out of PLL0.
+ * @PLL_SRC_1: The link clocks are sourced out of PLL1.
+ */
+enum mdss_dsi_pll_src_config {
+ PLL_SRC_DEFAULT,
+ PLL_SRC_0,
+ PLL_SRC_1,
+};
+
+struct dsi_panel_cmds {
+ char *buf;
+ int blen;
+ struct dsi_cmd_desc *cmds;
+ int cmd_cnt;
+ int link_state;
+};
+
+struct dsi_panel_timing {
+ struct mdss_panel_timing timing;
+ uint32_t phy_timing[12];
+ uint32_t phy_timing_8996[40];
+ /* DSI_CLKOUT_TIMING_CTRL */
+ char t_clk_post;
+ char t_clk_pre;
+ struct dsi_panel_cmds on_cmds;
+ struct dsi_panel_cmds post_panel_on_cmds;
+ struct dsi_panel_cmds switch_cmds;
+};
+
+struct dsi_kickoff_action {
+ struct list_head act_entry;
+ void (*action)(void *);
+ void *data;
+};
+
+struct dsi_pinctrl_res {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+};
+
+struct panel_horizontal_idle {
+ int min;
+ int max;
+ int idle;
+};
+
+struct dsi_err_container {
+ u32 fifo_err_cnt;
+ u32 phy_err_cnt;
+ u32 err_cnt;
+ u32 err_time_delta;
+ u32 max_err_index;
+
+ u32 index;
+ s64 err_time[MAX_ERR_INDEX];
+};
+
+#define DSI_CTRL_LEFT DSI_CTRL_0
+#define DSI_CTRL_RIGHT DSI_CTRL_1
+#define DSI_CTRL_CLK_SLAVE DSI_CTRL_RIGHT
+#define DSI_CTRL_CLK_MASTER DSI_CTRL_LEFT
+
+#define DSI_EV_PLL_UNLOCKED 0x0001
+#define DSI_EV_DLNx_FIFO_UNDERFLOW 0x0002
+#define DSI_EV_DSI_FIFO_EMPTY 0x0004
+#define DSI_EV_DLNx_FIFO_OVERFLOW 0x0008
+#define DSI_EV_LP_RX_TIMEOUT 0x0010
+#define DSI_EV_STOP_HS_CLK_LANE 0x40000000
+#define DSI_EV_MDP_BUSY_RELEASE 0x80000000
+
+#define MDSS_DSI_VIDEO_COMPRESSION_MODE_CTRL 0x02a0
+#define MDSS_DSI_VIDEO_COMPRESSION_MODE_CTRL2 0x02a4
+#define MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL 0x02a8
+#define MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL2 0x02ac
+#define MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL3 0x02b0
+#define MSM_DBA_CHIP_NAME_MAX_LEN 20
+
+struct mdss_dsi_ctrl_pdata {
+ int ndx; /* panel_num */
+ int (*on)(struct mdss_panel_data *pdata);
+ int (*post_panel_on)(struct mdss_panel_data *pdata);
+ int (*off)(struct mdss_panel_data *pdata);
+ int (*low_power_config)(struct mdss_panel_data *pdata, int enable);
+ int (*set_col_page_addr)(struct mdss_panel_data *pdata, bool force);
+ int (*check_status)(struct mdss_dsi_ctrl_pdata *pdata);
+ int (*check_read_status)(struct mdss_dsi_ctrl_pdata *pdata);
+ int (*cmdlist_commit)(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp);
+ void (*switch_mode)(struct mdss_panel_data *pdata, int mode);
+ struct mdss_panel_data panel_data;
+ unsigned char *ctrl_base;
+ struct mdss_io_data ctrl_io;
+ struct mdss_io_data mmss_misc_io;
+ struct mdss_io_data phy_io;
+ struct mdss_io_data phy_regulator_io;
+ int reg_size;
+ u32 flags;
+ struct clk *byte_clk;
+ struct clk *esc_clk;
+ struct clk *pixel_clk;
+ struct clk *mux_byte_clk;
+ struct clk *mux_pixel_clk;
+ struct clk *pll_byte_clk;
+ struct clk *pll_pixel_clk;
+ struct clk *shadow_byte_clk;
+ struct clk *shadow_pixel_clk;
+ struct clk *byte_clk_rcg;
+ struct clk *pixel_clk_rcg;
+ struct clk *vco_dummy_clk;
+ u8 ctrl_state;
+ int panel_mode;
+ int irq_cnt;
+ int disp_te_gpio;
+ int rst_gpio;
+ int disp_en_gpio;
+ int bklt_en_gpio;
+ int mode_gpio;
+ int intf_mux_gpio;
+ int bklt_ctrl; /* backlight ctrl */
+ bool pwm_pmi;
+ int pwm_period;
+ int pwm_pmic_gpio;
+ int pwm_lpg_chan;
+ int bklt_max;
+ int new_fps;
+ int pwm_enabled;
+ int clk_lane_cnt;
+ bool dmap_iommu_map;
+ bool dsi_irq_line;
+ bool dcs_cmd_insert;
+ atomic_t te_irq_ready;
+ bool idle;
+
+ bool cmd_sync_wait_broadcast;
+ bool cmd_sync_wait_trigger;
+
+ struct mdss_rect roi;
+ struct pwm_device *pwm_bl;
+ u32 pclk_rate;
+ u32 byte_clk_rate;
+ u32 pclk_rate_bkp;
+ u32 byte_clk_rate_bkp;
+ bool refresh_clk_rate; /* flag to recalculate clk_rate */
+ struct mdss_module_power panel_power_data;
+ struct mdss_module_power power_data[DSI_MAX_PM]; /* for 8x10 */
+ u32 dsi_irq_mask;
+ struct mdss_hw *dsi_hw;
+ struct mdss_intf_recovery *recovery;
+ struct mdss_intf_recovery *mdp_callback;
+
+ struct dsi_panel_cmds on_cmds;
+ struct dsi_panel_cmds post_dms_on_cmds;
+ struct dsi_panel_cmds post_panel_on_cmds;
+ struct dsi_panel_cmds off_cmds;
+ struct dsi_panel_cmds lp_on_cmds;
+ struct dsi_panel_cmds lp_off_cmds;
+ struct dsi_panel_cmds status_cmds;
+ struct dsi_panel_cmds idle_on_cmds; /* for lp mode */
+ struct dsi_panel_cmds idle_off_cmds;
+ u32 *status_valid_params;
+ u32 *status_cmds_rlen;
+ u32 *status_value;
+ unsigned char *return_buf;
+ u32 groups; /* several alternative values to compare */
+ u32 status_error_count;
+ u32 max_status_error_count;
+
+ struct dsi_panel_cmds video2cmd;
+ struct dsi_panel_cmds cmd2video;
+
+ char pps_buf[DSC_PPS_LEN]; /* dsc pps */
+
+ struct dcs_cmd_list cmdlist;
+ struct completion dma_comp;
+ struct completion mdp_comp;
+ struct completion video_comp;
+ struct completion dynamic_comp;
+ struct completion bta_comp;
+ struct completion te_irq_comp;
+ spinlock_t irq_lock;
+ spinlock_t mdp_lock;
+ int mdp_busy;
+ struct mutex mutex;
+ struct mutex cmd_mutex;
+ struct mutex cmdlist_mutex;
+ struct regulator *lab; /* vreg handle */
+ struct regulator *ibb; /* vreg handle */
+ struct mutex clk_lane_mutex;
+
+ bool null_insert_enabled;
+ bool ulps;
+ bool core_power;
+ bool mmss_clamp;
+ char dlane_swap; /* data lane swap */
+ bool is_phyreg_enabled;
+ bool burst_mode_enabled;
+
+ struct dsi_buf tx_buf;
+ struct dsi_buf rx_buf;
+ struct dsi_buf status_buf;
+ int status_mode;
+ int rx_len;
+ int cur_max_pkt_size;
+
+ struct dsi_pinctrl_res pin_res;
+
+ unsigned long dma_size;
+ dma_addr_t dma_addr;
+ bool cmd_cfg_restore;
+ bool do_unicast;
+
+ bool idle_enabled;
+ int horizontal_idle_cnt;
+ struct panel_horizontal_idle *line_idle;
+ struct mdss_util_intf *mdss_util;
+ struct dsi_shared_data *shared_data;
+
+ void *clk_mngr;
+ void *dsi_clk_handle;
+ void *mdp_clk_handle;
+ int m_dsi_vote_cnt;
+ int m_mdp_vote_cnt;
+ /* debugfs structure */
+ struct mdss_dsi_debugfs_info *debugfs_info;
+
+ struct dsi_err_container err_cont;
+
+ struct kobject *kobj;
+ int fb_node;
+
+ /* DBA data */
+ struct workqueue_struct *workq;
+ struct delayed_work dba_work;
+ char bridge_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+ uint32_t bridge_index;
+ bool ds_registered;
+
+ bool timing_db_mode;
+ bool update_phy_timing; /* flag to recalculate PHY timings */
+
+ bool phy_power_off;
+};
+
+struct dsi_status_data {
+ struct notifier_block fb_notifier;
+ struct delayed_work check_status;
+ struct msm_fb_data_type *mfd;
+};
+
+void mdss_dsi_read_hw_revision(struct mdss_dsi_ctrl_pdata *ctrl);
+int dsi_panel_device_register(struct platform_device *ctrl_pdev,
+ struct device_node *pan_node, struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+
+int mdss_dsi_cmds_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_cmd_desc *cmds, int cnt, int use_dma_tpg);
+
+int mdss_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_cmd_desc *cmds, int rlen, int use_dma_tpg);
+
+void mdss_dsi_host_init(struct mdss_panel_data *pdata);
+void mdss_dsi_op_mode_config(int mode,
+ struct mdss_panel_data *pdata);
+void mdss_dsi_restore_intr_mask(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_cmd_mode_ctrl(int enable);
+void mdp4_dsi_cmd_trigger(void);
+void mdss_dsi_cmd_mdp_start(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata);
+bool mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void *clk_handle,
+ enum mdss_dsi_clk_type clk_type, enum mdss_dsi_clk_state clk_state);
+void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_panel_clk_ctrl *clk_ctrl);
+void mdss_dsi_controller_cfg(int enable,
+ struct mdss_panel_data *pdata);
+void mdss_dsi_sw_reset(struct mdss_dsi_ctrl_pdata *ctrl_pdata, bool restore);
+int mdss_dsi_wait_for_lane_idle(struct mdss_dsi_ctrl_pdata *ctrl);
+
+irqreturn_t mdss_dsi_isr(int irq, void *ptr);
+irqreturn_t hw_vsync_handler(int irq, void *data);
+void disable_esd_thread(void);
+void mdss_dsi_irq_handler_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+
+void mdss_dsi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata);
+int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
+ int frame_rate);
+int mdss_dsi_clk_refresh(struct mdss_panel_data *pdata, bool update_phy);
+int mdss_dsi_link_clk_init(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+void mdss_dsi_link_clk_deinit(struct device *dev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+int mdss_dsi_core_clk_init(struct platform_device *pdev,
+ struct dsi_shared_data *sdata);
+void mdss_dsi_core_clk_deinit(struct device *dev,
+ struct dsi_shared_data *sdata);
+int mdss_dsi_shadow_clk_init(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+void mdss_dsi_shadow_clk_deinit(struct device *dev,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+int mdss_dsi_pre_clkoff_cb(void *priv,
+ enum mdss_dsi_clk_type clk_type,
+ enum mdss_dsi_clk_state new_state);
+int mdss_dsi_post_clkoff_cb(void *priv,
+ enum mdss_dsi_clk_type clk_type,
+ enum mdss_dsi_clk_state curr_state);
+int mdss_dsi_post_clkon_cb(void *priv,
+ enum mdss_dsi_clk_type clk_type,
+ enum mdss_dsi_clk_state curr_state);
+int mdss_dsi_pre_clkon_cb(void *priv,
+ enum mdss_dsi_clk_type clk_type,
+ enum mdss_dsi_clk_state new_state);
+int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
+void mdss_dsi_phy_disable(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_cmd_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_video_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_panel_pwm_cfg(struct mdss_dsi_ctrl_pdata *ctrl);
+bool mdss_dsi_panel_pwm_enable(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_ctrl_phy_restore(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_phy_sw_reset(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_phy_init(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_ctrl_init(struct device *ctrl_dev,
+ struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_wait4video_done(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_en_wait4dynamic_done(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp);
+void mdss_dsi_cmdlist_kickoff(int intf);
+int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_reg_status_check(struct mdss_dsi_ctrl_pdata *ctrl);
+bool __mdss_dsi_clk_enabled(struct mdss_dsi_ctrl_pdata *ctrl, u8 clk_type);
+void mdss_dsi_ctrl_setup(struct mdss_dsi_ctrl_pdata *ctrl);
+bool mdss_dsi_dln0_phy_err(struct mdss_dsi_ctrl_pdata *ctrl, bool print_en);
+void mdss_dsi_lp_cd_rx(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_read_phy_revision(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_panel_cmd_read(struct mdss_dsi_ctrl_pdata *ctrl, char cmd0,
+ char cmd1, void (*fxn)(int), char *rbuf, int len);
+int mdss_dsi_panel_init(struct device_node *node,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ int ndx);
+int mdss_dsi_panel_timing_switch(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ struct mdss_panel_timing *timing);
+
+int mdss_panel_parse_bl_settings(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+int mdss_panel_get_dst_fmt(u32 bpp, char mipi_mode, u32 pixel_packing,
+ char *dst_format);
+
+int mdss_dsi_register_recovery_handler(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_intf_recovery *recovery);
+void mdss_dsi_unregister_bl_settings(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+void mdss_dsi_panel_dsc_pps_send(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_panel_info *pinfo);
+void mdss_dsi_dsc_config(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsc_desc *dsc);
+void mdss_dsi_dfps_config_8996(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_set_burst_mode(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_set_reg(struct mdss_dsi_ctrl_pdata *ctrl, int off,
+ u32 mask, u32 val);
+int mdss_dsi_phy_pll_reset_status(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, int power_state);
+
+static inline const char *__mdss_dsi_pm_name(enum dsi_pm_type module)
+{
+ switch (module) {
+ case DSI_CORE_PM: return "DSI_CORE_PM";
+ case DSI_CTRL_PM: return "DSI_CTRL_PM";
+ case DSI_PHY_PM: return "DSI_PHY_PM";
+ case DSI_PANEL_PM: return "PANEL_PM";
+ default: return "???";
+ }
+}
+
+static inline const char *__mdss_dsi_pm_supply_node_name(
+ enum dsi_pm_type module)
+{
+ switch (module) {
+ case DSI_CORE_PM: return "qcom,core-supply-entries";
+ case DSI_CTRL_PM: return "qcom,ctrl-supply-entries";
+ case DSI_PHY_PM: return "qcom,phy-supply-entries";
+ case DSI_PANEL_PM: return "qcom,panel-supply-entries";
+ default: return "???";
+ }
+}
+
+static inline u32 mdss_dsi_get_hw_config(struct dsi_shared_data *sdata)
+{
+ return sdata->hw_config;
+}
+
+static inline bool mdss_dsi_is_hw_config_single(struct dsi_shared_data *sdata)
+{
+ return mdss_dsi_get_hw_config(sdata) == SINGLE_DSI;
+}
+
+static inline bool mdss_dsi_is_hw_config_split(struct dsi_shared_data *sdata)
+{
+ return mdss_dsi_get_hw_config(sdata) == SPLIT_DSI;
+}
+
+static inline bool mdss_dsi_is_hw_config_dual(struct dsi_shared_data *sdata)
+{
+ return mdss_dsi_get_hw_config(sdata) == DUAL_DSI;
+}
+
+static inline bool mdss_dsi_get_pll_src_config(struct dsi_shared_data *sdata)
+{
+ return sdata->pll_src_config;
+}
+
+/*
+ * mdss_dsi_is_pll_src_default: Check if the DSI device uses default PLL src
+ * For single-dsi and dual-dsi configuration, PLL source need not be
+ * explicitly specified. In this case, the default PLL source configuration
+ * is assumed.
+ *
+ * @sdata: pointer to DSI shared data structure
+ */
+static inline bool mdss_dsi_is_pll_src_default(struct dsi_shared_data *sdata)
+{
+ return sdata->pll_src_config == PLL_SRC_DEFAULT;
+}
+
+/*
+ * mdss_dsi_is_pll_src_pll0: Check if the PLL source for a DSI device is PLL0
+ * The function is only valid if the DSI configuration is single/split DSI.
+ * Not valid for dual DSI configuration.
+ *
+ * @sdata: pointer to DSI shared data structure
+ */
+static inline bool mdss_dsi_is_pll_src_pll0(struct dsi_shared_data *sdata)
+{
+ return sdata->pll_src_config == PLL_SRC_0;
+}
+
+/*
+ * mdss_dsi_is_pll_src_pll1: Check if the PLL source for a DSI device is PLL1
+ * The function is only valid if the DSI configuration is single/split DSI.
+ * Not valid for dual DSI configuration.
+ *
+ * @sdata: pointer to DSI shared data structure
+ */
+static inline bool mdss_dsi_is_pll_src_pll1(struct dsi_shared_data *sdata)
+{
+ return sdata->pll_src_config == PLL_SRC_1;
+}
+
+static inline bool mdss_dsi_is_dsi0_active(struct dsi_shared_data *sdata)
+{
+ return sdata->dsi0_active;
+}
+
+static inline bool mdss_dsi_is_dsi1_active(struct dsi_shared_data *sdata)
+{
+ return sdata->dsi1_active;
+}
+
+static inline u32 mdss_dsi_get_phy_revision(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ return ctrl->shared_data->phy_rev;
+}
+
+static inline const char *mdss_dsi_get_fb_name(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_panel_info *pinfo = &(ctrl->panel_data.panel_info);
+
+ if (mdss_dsi_is_hw_config_dual(ctrl->shared_data)) {
+ if (pinfo->is_prim_panel)
+ return "qcom,mdss-fb-map-prim";
+ else
+ return "qcom,mdss-fb-map-sec";
+ } else {
+ return "qcom,mdss-fb-map-prim";
+ }
+}
+
+static inline bool mdss_dsi_sync_wait_enable(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ return ctrl->cmd_sync_wait_broadcast;
+}
+
+static inline bool mdss_dsi_sync_wait_trigger(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ return ctrl->cmd_sync_wait_broadcast &&
+ ctrl->cmd_sync_wait_trigger;
+}
+
+static inline bool mdss_dsi_is_left_ctrl(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ return ctrl->ndx == DSI_CTRL_LEFT;
+}
+
+static inline bool mdss_dsi_is_right_ctrl(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ return ctrl->ndx == DSI_CTRL_RIGHT;
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_other_ctrl(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (ctrl->ndx == DSI_CTRL_RIGHT)
+ return ctrl_list[DSI_CTRL_LEFT];
+
+ return ctrl_list[DSI_CTRL_RIGHT];
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_ctrl_by_index(int ndx)
+{
+ if (ndx >= DSI_CTRL_MAX)
+ return NULL;
+
+ return ctrl_list[ndx];
+}
+
+static inline bool mdss_dsi_is_ctrl_clk_master(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ return mdss_dsi_is_hw_config_split(ctrl->shared_data) &&
+ (ctrl->ndx == DSI_CTRL_CLK_MASTER);
+}
+
+static inline bool mdss_dsi_is_ctrl_clk_slave(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ return mdss_dsi_is_hw_config_split(ctrl->shared_data) &&
+ (ctrl->ndx == DSI_CTRL_CLK_SLAVE);
+}
+
+static inline bool mdss_dsi_is_te_based_esd(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ return (ctrl->status_mode == ESD_TE) &&
+ gpio_is_valid(ctrl->disp_te_gpio) &&
+ mdss_dsi_is_left_ctrl(ctrl);
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_ctrl_clk_master(void)
+{
+ return ctrl_list[DSI_CTRL_CLK_MASTER];
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_ctrl_clk_slave(void)
+{
+ return ctrl_list[DSI_CTRL_CLK_SLAVE];
+}
+
+static inline bool mdss_dsi_is_panel_off(struct mdss_panel_data *pdata)
+{
+ return mdss_panel_is_power_off(pdata->panel_info.panel_power_state);
+}
+
+static inline bool mdss_dsi_is_panel_on(struct mdss_panel_data *pdata)
+{
+ return mdss_panel_is_power_on(pdata->panel_info.panel_power_state);
+}
+
+static inline bool mdss_dsi_is_panel_on_interactive(
+ struct mdss_panel_data *pdata)
+{
+ return mdss_panel_is_power_on_interactive(
+ pdata->panel_info.panel_power_state);
+}
+
+static inline bool mdss_dsi_is_panel_on_lp(struct mdss_panel_data *pdata)
+{
+ return mdss_panel_is_power_on_lp(pdata->panel_info.panel_power_state);
+}
+
+static inline bool mdss_dsi_is_panel_on_ulp(struct mdss_panel_data *pdata)
+{
+ return mdss_panel_is_power_on_ulp(pdata->panel_info.panel_power_state);
+}
+
+static inline bool mdss_dsi_ulps_feature_enabled(
+ struct mdss_panel_data *pdata)
+{
+ return pdata->panel_info.ulps_feature_enabled;
+}
+
+static inline bool mdss_dsi_cmp_panel_reg(struct dsi_buf status_buf,
+ u32 *status_val, int i)
+{
+ return status_buf.data[i] == status_val[i];
+}
+
+#endif /* MDSS_DSI_H */
diff --git a/drivers/video/fbdev/msm/mdss_dsi_clk.c b/drivers/video/fbdev/msm/mdss_dsi_clk.c
new file mode 100644
index 0000000..372c93e
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_clk.c
@@ -0,0 +1,1003 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "mdss-dsi-clk:[%s] " fmt, __func__
+#include <linux/clk/msm-clk.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+
+#include "mdss_dsi_clk.h"
+#include "mdss_dsi.h"
+#include "mdss_debug.h"
+
+#define MAX_CLIENT_NAME_LEN 20
+struct dsi_core_clks {
+ struct mdss_dsi_core_clk_info clks;
+ u32 current_clk_state;
+};
+
+struct dsi_link_clks {
+ struct mdss_dsi_link_clk_info clks;
+ u32 current_clk_state;
+ u32 byte_clk_rate;
+ u32 pix_clk_rate;
+ u32 esc_clk_rate;
+};
+
+struct mdss_dsi_clk_mngr {
+ char name[DSI_CLK_NAME_LEN];
+ struct dsi_core_clks core_clks;
+ struct dsi_link_clks link_clks;
+
+ struct reg_bus_client *reg_bus_clt;
+
+ pre_clockoff_cb pre_clkoff_cb;
+ post_clockoff_cb post_clkoff_cb;
+ post_clockon_cb post_clkon_cb;
+ pre_clockon_cb pre_clkon_cb;
+
+ struct list_head client_list;
+ struct mutex clk_mutex;
+
+ void *priv_data;
+};
+
+struct mdss_dsi_clk_client_info {
+ char name[MAX_CLIENT_NAME_LEN];
+ u32 core_refcount;
+ u32 link_refcount;
+ u32 core_clk_state;
+ u32 link_clk_state;
+
+ struct list_head list;
+
+ struct mdss_dsi_clk_mngr *mngr;
+};
+
+static int dsi_core_clk_start(struct dsi_core_clks *c_clks)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_mngr *mngr;
+
+ mngr = container_of(c_clks, struct mdss_dsi_clk_mngr, core_clks);
+
+ rc = clk_prepare_enable(c_clks->clks.mdp_core_clk);
+ if (rc) {
+ pr_err("%s: failed to enable mdp_core_clock. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+
+ rc = clk_prepare_enable(c_clks->clks.ahb_clk);
+ if (rc) {
+ pr_err("%s: failed to enable ahb clock. rc=%d\n", __func__, rc);
+ goto disable_core_clk;
+ }
+
+ rc = clk_prepare_enable(c_clks->clks.axi_clk);
+ if (rc) {
+ pr_err("%s: failed to enable ahb clock. rc=%d\n", __func__, rc);
+ goto disable_ahb_clk;
+ }
+
+ if (c_clks->clks.mmss_misc_ahb_clk) {
+ rc = clk_prepare_enable(c_clks->clks.mmss_misc_ahb_clk);
+ if (rc) {
+ pr_err("%s: failed to enable mmss misc ahb clk.rc=%d\n",
+ __func__, rc);
+ goto disable_axi_clk;
+ }
+ }
+
+ rc = mdss_update_reg_bus_vote(mngr->reg_bus_clt, VOTE_INDEX_LOW);
+ if (rc) {
+ pr_err("failed to vote for reg bus\n");
+ goto disable_mmss_misc_clk;
+ }
+
+ pr_debug("%s:CORE CLOCK IS ON\n", mngr->name);
+ return rc;
+
+disable_mmss_misc_clk:
+ if (c_clks->clks.mmss_misc_ahb_clk)
+ clk_disable_unprepare(c_clks->clks.mmss_misc_ahb_clk);
+disable_axi_clk:
+ clk_disable_unprepare(c_clks->clks.axi_clk);
+disable_ahb_clk:
+ clk_disable_unprepare(c_clks->clks.ahb_clk);
+disable_core_clk:
+ clk_disable_unprepare(c_clks->clks.mdp_core_clk);
+error:
+ pr_debug("%s: EXIT, rc = %d\n", mngr->name, rc);
+ return rc;
+}
+
+static int dsi_core_clk_stop(struct dsi_core_clks *c_clks)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_mngr *mngr;
+
+ mngr = container_of(c_clks, struct mdss_dsi_clk_mngr, core_clks);
+
+ mdss_update_reg_bus_vote(mngr->reg_bus_clt, VOTE_INDEX_DISABLE);
+ if (c_clks->clks.mmss_misc_ahb_clk)
+ clk_disable_unprepare(c_clks->clks.mmss_misc_ahb_clk);
+ clk_disable_unprepare(c_clks->clks.axi_clk);
+ clk_disable_unprepare(c_clks->clks.ahb_clk);
+ clk_disable_unprepare(c_clks->clks.mdp_core_clk);
+
+ pr_debug("%s: CORE CLOCK IS OFF\n", mngr->name);
+ return rc;
+}
+
+static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_mngr *mngr;
+ struct mdss_dsi_ctrl_pdata *ctrl;
+
+ mngr = container_of(l_clks, struct mdss_dsi_clk_mngr, link_clks);
+
+ /*
+ * In an ideal world, cont_splash_enabled should not be required inside
+ * the clock manager. But, in the current driver cont_splash_enabled
+ * flag is set inside mdp driver and there is no interface event
+ * associated with this flag setting. Also, set rate for clock need not
+ * be called for every enable call. It should be done only once when
+ * coming out of suspend.
+ */
+ ctrl = mngr->priv_data;
+ if (ctrl->panel_data.panel_info.cont_splash_enabled)
+ return 0;
+
+ rc = clk_set_rate(l_clks->clks.esc_clk, l_clks->esc_clk_rate);
+ if (rc) {
+ pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = clk_set_rate(l_clks->clks.byte_clk, l_clks->byte_clk_rate);
+ if (rc) {
+ pr_err("clk_set_rate failed for byte_clk rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = clk_set_rate(l_clks->clks.pixel_clk, l_clks->pix_clk_rate);
+ if (rc) {
+ pr_err("clk_set_rate failed for pixel_clk rc = %d\n", rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_link_clk_prepare(struct dsi_link_clks *l_clks)
+{
+ int rc = 0;
+
+ rc = clk_prepare(l_clks->clks.esc_clk);
+ if (rc) {
+ pr_err("%s: Failed to prepare dsi esc clk\n", __func__);
+ goto esc_clk_err;
+ }
+
+ rc = clk_prepare(l_clks->clks.byte_clk);
+ if (rc) {
+ pr_err("%s: Failed to prepare dsi byte clk\n", __func__);
+ goto byte_clk_err;
+ }
+
+ rc = clk_prepare(l_clks->clks.pixel_clk);
+ if (rc) {
+ pr_err("%s: Failed to prepare dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ return rc;
+
+pixel_clk_err:
+ clk_unprepare(l_clks->clks.byte_clk);
+byte_clk_err:
+ clk_unprepare(l_clks->clks.esc_clk);
+esc_clk_err:
+ return rc;
+}
+
+static int dsi_link_clk_unprepare(struct dsi_link_clks *l_clks)
+{
+ int rc = 0;
+
+ clk_unprepare(l_clks->clks.pixel_clk);
+ clk_unprepare(l_clks->clks.byte_clk);
+ clk_unprepare(l_clks->clks.esc_clk);
+
+ return rc;
+}
+
+static int dsi_link_clk_enable(struct dsi_link_clks *l_clks)
+{
+ int rc = 0;
+
+ rc = clk_enable(l_clks->clks.esc_clk);
+ if (rc) {
+ pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+ goto esc_clk_err;
+ }
+
+ rc = clk_enable(l_clks->clks.byte_clk);
+ if (rc) {
+ pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+ goto byte_clk_err;
+ }
+
+ rc = clk_enable(l_clks->clks.pixel_clk);
+ if (rc) {
+ pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ return rc;
+
+pixel_clk_err:
+ clk_disable(l_clks->clks.byte_clk);
+byte_clk_err:
+ clk_disable(l_clks->clks.esc_clk);
+esc_clk_err:
+ return rc;
+}
+
+static int dsi_link_clk_disable(struct dsi_link_clks *l_clks)
+{
+ int rc = 0;
+
+ clk_disable(l_clks->clks.esc_clk);
+ clk_disable(l_clks->clks.pixel_clk);
+ clk_disable(l_clks->clks.byte_clk);
+
+ return rc;
+}
+
+
+static int dsi_link_clk_start(struct dsi_link_clks *l_clks)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_mngr *mngr;
+
+ mngr = container_of(l_clks, struct mdss_dsi_clk_mngr, link_clks);
+
+ rc = dsi_link_clk_set_rate(l_clks);
+ if (rc) {
+ pr_err("failed to set clk rates, rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_link_clk_prepare(l_clks);
+ if (rc) {
+ pr_err("failed to prepare link clks, rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_link_clk_enable(l_clks);
+ if (rc) {
+ pr_err("failed to enable link clks, rc = %d\n", rc);
+ goto error_unprepare;
+ }
+
+ pr_debug("%s: LINK CLOCK IS ON\n", mngr->name);
+ return rc;
+error_unprepare:
+ dsi_link_clk_unprepare(l_clks);
+error:
+ return rc;
+}
+
+static int dsi_link_clk_stop(struct dsi_link_clks *l_clks)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_mngr *mngr;
+
+ mngr = container_of(l_clks, struct mdss_dsi_clk_mngr, link_clks);
+
+ (void)dsi_link_clk_disable(l_clks);
+
+ (void)dsi_link_clk_unprepare(l_clks);
+ pr_debug("%s: LINK CLOCK IS OFF\n", mngr->name);
+
+ return rc;
+}
+
+static int dsi_update_clk_state(struct dsi_core_clks *c_clks, u32 c_state,
+ struct dsi_link_clks *l_clks, u32 l_state)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_mngr *mngr;
+ bool l_c_on = false;
+
+ if (c_clks) {
+ mngr =
+ container_of(c_clks, struct mdss_dsi_clk_mngr, core_clks);
+ } else if (l_clks) {
+ mngr =
+ container_of(l_clks, struct mdss_dsi_clk_mngr, link_clks);
+ } else {
+ mngr = NULL;
+ }
+
+ if (!mngr)
+ return -EINVAL;
+
+ pr_debug("%s: c_state = %d, l_state = %d\n", mngr ? mngr->name : "NA",
+ c_clks ? c_state : -1, l_clks ? l_state : -1);
+ /*
+ * Clock toggle order:
+ * 1. When turning on, Core clocks before link clocks
+ * 2. When turning off, Link clocks before core clocks.
+ */
+ if (c_clks && (c_state == MDSS_DSI_CLK_ON)) {
+ if (c_clks->current_clk_state == MDSS_DSI_CLK_OFF) {
+ rc = mngr->pre_clkon_cb(mngr->priv_data,
+ MDSS_DSI_CORE_CLK,
+ MDSS_DSI_CLK_ON);
+ if (rc) {
+ pr_err("failed to turn on MDP FS rc= %d\n", rc);
+ goto error;
+ }
+ }
+ rc = dsi_core_clk_start(c_clks);
+ if (rc) {
+ pr_err("failed to turn on core clks rc = %d\n", rc);
+ goto error;
+ }
+
+ if (mngr->post_clkon_cb) {
+ rc = mngr->post_clkon_cb(mngr->priv_data,
+ MDSS_DSI_CORE_CLK,
+ MDSS_DSI_CLK_ON);
+ if (rc)
+ pr_err("post clk on cb failed, rc = %d\n", rc);
+ }
+ c_clks->current_clk_state = MDSS_DSI_CLK_ON;
+ }
+
+ if (l_clks) {
+
+ if (l_state == MDSS_DSI_CLK_ON) {
+ if (mngr->pre_clkon_cb) {
+ rc = mngr->pre_clkon_cb(mngr->priv_data,
+ MDSS_DSI_LINK_CLK, l_state);
+ if (rc)
+ pr_err("pre link clk on cb failed\n");
+ }
+ rc = dsi_link_clk_start(l_clks);
+ if (rc) {
+ pr_err("failed to start link clk rc= %d\n", rc);
+ goto error;
+ }
+ if (mngr->post_clkon_cb) {
+ rc = mngr->post_clkon_cb(mngr->priv_data,
+ MDSS_DSI_LINK_CLK,
+ l_state);
+ if (rc)
+ pr_err("post link clk on cb failed\n");
+ }
+ } else {
+ /*
+ * Two conditions that need to be checked for Link
+ * clocks:
+ * 1. Link clocks need core clocks to be on when
+ * transitioning from EARLY_GATE to OFF state.
+ * 2. ULPS mode might have to be enabled in case of OFF
+ * state. For ULPS, Link clocks should be turned ON
+ * first before they are turned off again.
+ *
+ * If Link is going from EARLY_GATE to OFF state AND
+ * Core clock is already in EARLY_GATE or OFF state,
+ * turn on Core clocks and link clocks.
+ *
+ * ULPS state is managed as part of the pre_clkoff_cb.
+ */
+ if ((l_state == MDSS_DSI_CLK_OFF) &&
+ (l_clks->current_clk_state ==
+ MDSS_DSI_CLK_EARLY_GATE) &&
+ (mngr->core_clks.current_clk_state !=
+ MDSS_DSI_CLK_ON)) {
+ rc = dsi_core_clk_start(&mngr->core_clks);
+ if (rc) {
+ pr_err("core clks did not start\n");
+ goto error;
+ }
+
+ rc = dsi_link_clk_start(l_clks);
+ if (rc) {
+ pr_err("Link clks did not start\n");
+ goto error;
+ }
+ l_c_on = true;
+ pr_debug("ECG: core and Link_on\n");
+ }
+
+ if (mngr->pre_clkoff_cb) {
+ rc = mngr->pre_clkoff_cb(mngr->priv_data,
+ MDSS_DSI_LINK_CLK, l_state);
+ if (rc)
+ pr_err("pre link clk off cb failed\n");
+ }
+
+ rc = dsi_link_clk_stop(l_clks);
+ if (rc) {
+ pr_err("failed to stop link clk, rc = %d\n",
+ rc);
+ goto error;
+ }
+
+ if (mngr->post_clkoff_cb) {
+ rc = mngr->post_clkoff_cb(mngr->priv_data,
+ MDSS_DSI_LINK_CLK, l_state);
+ if (rc)
+ pr_err("post link clk off cb failed\n");
+ }
+ /*
+ * This check is to save unnecessary clock state
+ * change when going from EARLY_GATE to OFF. In the
+ * case where the request happens for both Core and Link
+ * clocks in the same call, core clocks need to be
+ * turned on first before OFF state can be entered.
+ *
+ * Core clocks are turned on here for Link clocks to go
+ * to OFF state. If core clock request is also present,
+ * then core clocks can be turned off Core clocks are
+ * transitioned to OFF state.
+ */
+ if (l_c_on && (!(c_clks && (c_state == MDSS_DSI_CLK_OFF)
+ && (c_clks->current_clk_state ==
+ MDSS_DSI_CLK_EARLY_GATE)))) {
+ rc = dsi_core_clk_stop(&mngr->core_clks);
+ if (rc) {
+ pr_err("core clks did not stop\n");
+ goto error;
+ }
+
+ l_c_on = false;
+ pr_debug("ECG: core off\n");
+ } else
+ pr_debug("ECG: core off skip\n");
+ }
+
+ l_clks->current_clk_state = l_state;
+ }
+
+ if (c_clks && (c_state != MDSS_DSI_CLK_ON)) {
+
+ /*
+ * When going to OFF state from EARLY GATE state, Core clocks
+ * should be turned on first so that the IOs can be clamped.
+ * l_c_on flag is set, then the core clocks were turned before
+ * to the Link clocks go to OFF state. So Core clocks are
+ * already ON and this step can be skipped.
+ *
+ * IOs are clamped in pre_clkoff_cb callback.
+ */
+ if ((c_state == MDSS_DSI_CLK_OFF) &&
+ (c_clks->current_clk_state ==
+ MDSS_DSI_CLK_EARLY_GATE) && !l_c_on) {
+ rc = dsi_core_clk_start(&mngr->core_clks);
+ if (rc) {
+ pr_err("core clks did not start\n");
+ goto error;
+ }
+ pr_debug("ECG: core on\n");
+ } else
+ pr_debug("ECG: core on skip\n");
+
+ if (mngr->pre_clkoff_cb) {
+ rc = mngr->pre_clkoff_cb(mngr->priv_data,
+ MDSS_DSI_CORE_CLK,
+ c_state);
+ if (rc)
+ pr_err("pre core clk off cb failed\n");
+ }
+
+ rc = dsi_core_clk_stop(c_clks);
+ if (rc) {
+ pr_err("failed to turn off core clks rc = %d\n", rc);
+ goto error;
+ }
+
+ if (c_state == MDSS_DSI_CLK_OFF) {
+ if (mngr->post_clkoff_cb) {
+ rc = mngr->post_clkoff_cb(mngr->priv_data,
+ MDSS_DSI_CORE_CLK,
+ MDSS_DSI_CLK_OFF);
+ if (rc)
+ pr_err("post clkoff cb fail, rc = %d\n",
+ rc);
+ }
+ }
+ c_clks->current_clk_state = c_state;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_recheck_clk_state(struct mdss_dsi_clk_mngr *mngr)
+{
+ int rc = 0;
+ struct list_head *pos = NULL;
+ struct mdss_dsi_clk_client_info *c;
+ u32 new_core_clk_state = MDSS_DSI_CLK_OFF;
+ u32 new_link_clk_state = MDSS_DSI_CLK_OFF;
+ u32 old_c_clk_state = MDSS_DSI_CLK_OFF;
+ u32 old_l_clk_state = MDSS_DSI_CLK_OFF;
+ struct dsi_core_clks *c_clks = NULL;
+ struct dsi_link_clks *l_clks = NULL;
+
+ /*
+ * Rules to maintain clock state:
+ * 1. If any client is in ON state, clocks should be ON.
+ * 2. If any client is in ECG state with rest of them turned OFF,
+ * go to Early gate state.
+ * 3. If all clients are off, then goto OFF state.
+ */
+ list_for_each(pos, &mngr->client_list) {
+ c = list_entry(pos, struct mdss_dsi_clk_client_info, list);
+ if (c->core_clk_state == MDSS_DSI_CLK_ON) {
+ new_core_clk_state = MDSS_DSI_CLK_ON;
+ break;
+ } else if (c->core_clk_state == MDSS_DSI_CLK_EARLY_GATE) {
+ new_core_clk_state = MDSS_DSI_CLK_EARLY_GATE;
+ }
+ }
+
+ list_for_each(pos, &mngr->client_list) {
+ c = list_entry(pos, struct mdss_dsi_clk_client_info, list);
+ if (c->link_clk_state == MDSS_DSI_CLK_ON) {
+ new_link_clk_state = MDSS_DSI_CLK_ON;
+ break;
+ } else if (c->link_clk_state == MDSS_DSI_CLK_EARLY_GATE) {
+ new_link_clk_state = MDSS_DSI_CLK_EARLY_GATE;
+ }
+ }
+
+ if (new_core_clk_state != mngr->core_clks.current_clk_state)
+ c_clks = &mngr->core_clks;
+
+ if (new_link_clk_state != mngr->link_clks.current_clk_state)
+ l_clks = &mngr->link_clks;
+
+ old_c_clk_state = mngr->core_clks.current_clk_state;
+ old_l_clk_state = mngr->link_clks.current_clk_state;
+
+ pr_debug("%s: c_clk_state (%d -> %d)\n", mngr->name,
+ old_c_clk_state, new_core_clk_state);
+ pr_debug("%s: l_clk_state (%d -> %d)\n", mngr->name,
+ old_l_clk_state, new_link_clk_state);
+
+ MDSS_XLOG(old_c_clk_state, new_core_clk_state, old_l_clk_state,
+ new_link_clk_state);
+ if (c_clks || l_clks) {
+ rc = dsi_update_clk_state(c_clks, new_core_clk_state,
+ l_clks, new_link_clk_state);
+ if (rc) {
+ pr_err("failed to update clock state, rc = %d\n", rc);
+ goto error;
+ }
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_set_clk_rate(struct mdss_dsi_clk_mngr *mngr, int clk, u32 rate,
+ u32 flags)
+{
+ int rc = 0;
+
+ pr_debug("%s: clk = %d, rate = %d, flags = %d\n", mngr->name,
+ clk, rate, flags);
+
+ MDSS_XLOG(clk, rate, flags);
+ switch (clk) {
+ case MDSS_DSI_LINK_ESC_CLK:
+ mngr->link_clks.esc_clk_rate = rate;
+ if (!flags) {
+ rc = clk_set_rate(mngr->link_clks.clks.esc_clk, rate);
+ if (rc)
+ pr_err("set rate failed for esc clk rc=%d\n",
+ rc);
+ }
+ break;
+ case MDSS_DSI_LINK_BYTE_CLK:
+ mngr->link_clks.byte_clk_rate = rate;
+ if (!flags) {
+ rc = clk_set_rate(mngr->link_clks.clks.byte_clk, rate);
+ if (rc)
+ pr_err("set rate failed for byte clk rc=%d\n",
+ rc);
+ }
+ break;
+ case MDSS_DSI_LINK_PIX_CLK:
+ mngr->link_clks.pix_clk_rate = rate;
+ if (!flags) {
+ rc = clk_set_rate(mngr->link_clks.clks.pixel_clk, rate);
+ if (rc)
+ pr_err("failed to set rate for pix clk rc=%d\n",
+ rc);
+ }
+ break;
+ default:
+ pr_err("Unsupported clock (%d)\n", clk);
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+
+void *mdss_dsi_clk_register(void *clk_mngr, struct mdss_dsi_clk_client *client)
+{
+ void *handle = NULL;
+ struct mdss_dsi_clk_mngr *mngr = clk_mngr;
+ struct mdss_dsi_clk_client_info *c;
+
+ if (!mngr) {
+ pr_err("bad params\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pr_debug("%s: ENTER\n", mngr->name);
+
+ mutex_lock(&mngr->clk_mutex);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ handle = ERR_PTR(-ENOMEM);
+ goto error;
+ }
+
+ strlcpy(c->name, client->client_name, MAX_CLIENT_NAME_LEN);
+ c->mngr = mngr;
+
+ list_add(&c->list, &mngr->client_list);
+
+ pr_debug("%s: Added new client (%s)\n", mngr->name, c->name);
+ handle = c;
+error:
+ mutex_unlock(&mngr->clk_mutex);
+ pr_debug("%s: EXIT, rc = %ld\n", mngr->name, PTR_ERR(handle));
+ return handle;
+}
+
+int mdss_dsi_clk_deregister(void *client)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_client_info *c = client;
+ struct mdss_dsi_clk_mngr *mngr;
+ struct list_head *pos = NULL;
+ struct list_head *tmp = NULL;
+ struct mdss_dsi_clk_client_info *node;
+
+ if (!client) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mngr = c->mngr;
+ pr_debug("%s: ENTER\n", mngr->name);
+ mutex_lock(&mngr->clk_mutex);
+ c->core_clk_state = MDSS_DSI_CLK_OFF;
+ c->link_clk_state = MDSS_DSI_CLK_OFF;
+
+ rc = dsi_recheck_clk_state(mngr);
+ if (rc) {
+ pr_err("clock state recheck failed rc = %d\n", rc);
+ goto error;
+ }
+
+ list_for_each_safe(pos, tmp, &mngr->client_list) {
+ node = list_entry(pos, struct mdss_dsi_clk_client_info,
+ list);
+ if (node == c) {
+ list_del(&node->list);
+ pr_debug("Removed device (%s)\n", node->name);
+ kfree(node);
+ break;
+ }
+ }
+
+error:
+ mutex_unlock(&mngr->clk_mutex);
+ pr_debug("%s: EXIT, rc = %d\n", mngr->name, rc);
+ return rc;
+}
+
+bool is_dsi_clk_in_ecg_state(void *client)
+{
+ struct mdss_dsi_clk_client_info *c = client;
+ struct mdss_dsi_clk_mngr *mngr;
+ bool is_ecg = false;
+
+
+ if (!client) {
+ pr_err("Invalid client params\n");
+ goto end;
+ }
+
+ mngr = c->mngr;
+
+ mutex_lock(&mngr->clk_mutex);
+ is_ecg = (c->core_clk_state == MDSS_DSI_CLK_EARLY_GATE);
+ mutex_unlock(&mngr->clk_mutex);
+
+end:
+ return is_ecg;
+}
+
+int mdss_dsi_clk_req_state(void *client, enum mdss_dsi_clk_type clk,
+ enum mdss_dsi_clk_state state, u32 index)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_client_info *c = client;
+ struct mdss_dsi_clk_mngr *mngr;
+ bool changed = false;
+
+ if (!client || !clk || clk > (MDSS_DSI_CORE_CLK | MDSS_DSI_LINK_CLK) ||
+ state > MDSS_DSI_CLK_EARLY_GATE) {
+ pr_err("Invalid params, client = %pK, clk = 0x%x, state = %d\n",
+ client, clk, state);
+ return -EINVAL;
+ }
+
+ mngr = c->mngr;
+ mutex_lock(&mngr->clk_mutex);
+
+ pr_debug("[%s]%s: CLK=%d, new_state=%d, core=%d, linkl=%d\n",
+ c->name, mngr->name, clk, state, c->core_clk_state,
+ c->link_clk_state);
+
+ MDSS_XLOG(index, clk, state, c->core_clk_state, c->link_clk_state);
+ /*
+ * Refcount handling rules:
+ * 1. Increment refcount whenever ON is called
+ * 2. Do not decrement when going from EARLY_GATE to OFF.
+ * 3. Decrement refcount when either OFF or EARLY_GATE is called
+ */
+ if (state == MDSS_DSI_CLK_ON) {
+ if (clk & MDSS_DSI_CORE_CLK) {
+ c->core_refcount++;
+ if (c->core_clk_state != MDSS_DSI_CLK_ON) {
+ c->core_clk_state = MDSS_DSI_CLK_ON;
+ changed = true;
+ }
+ }
+ if (clk & MDSS_DSI_LINK_CLK) {
+ c->link_refcount++;
+ if (c->link_clk_state != MDSS_DSI_CLK_ON) {
+ c->link_clk_state = MDSS_DSI_CLK_ON;
+ changed = true;
+ }
+ }
+ } else if ((state == MDSS_DSI_CLK_EARLY_GATE) ||
+ (state == MDSS_DSI_CLK_OFF)) {
+ if (clk & MDSS_DSI_CORE_CLK) {
+ if (c->core_refcount == 0) {
+ if ((c->core_clk_state ==
+ MDSS_DSI_CLK_EARLY_GATE) &&
+ (state == MDSS_DSI_CLK_OFF)) {
+ changed = true;
+ c->core_clk_state = MDSS_DSI_CLK_OFF;
+ } else {
+ pr_warn("Core refcount is zero for %s",
+ c->name);
+ }
+ } else {
+ c->core_refcount--;
+ if (c->core_refcount == 0) {
+ c->core_clk_state = state;
+ changed = true;
+ }
+ }
+ }
+ if (clk & MDSS_DSI_LINK_CLK) {
+ if (c->link_refcount == 0) {
+ if ((c->link_clk_state ==
+ MDSS_DSI_CLK_EARLY_GATE) &&
+ (state == MDSS_DSI_CLK_OFF)) {
+ changed = true;
+ c->link_clk_state = MDSS_DSI_CLK_OFF;
+ } else {
+ pr_warn("Link refcount is zero for %s",
+ c->name);
+ }
+ } else {
+ c->link_refcount--;
+ if (c->link_refcount == 0) {
+ c->link_clk_state = state;
+ changed = true;
+ }
+ }
+ }
+ }
+ pr_debug("[%s]%s: change=%d, Core (ref=%d, state=%d), Link (ref=%d, state=%d)\n",
+ c->name, mngr->name, changed, c->core_refcount,
+ c->core_clk_state, c->link_refcount, c->link_clk_state);
+ MDSS_XLOG(index, clk, state, c->core_clk_state, c->link_clk_state);
+
+ if (changed) {
+ rc = dsi_recheck_clk_state(mngr);
+ if (rc)
+ pr_err("Failed to adjust clock state rc = %d\n", rc);
+ }
+
+ mutex_unlock(&mngr->clk_mutex);
+ return rc;
+}
+
+int mdss_dsi_clk_set_link_rate(void *client, enum mdss_dsi_link_clk_type clk,
+ u32 rate, u32 flags)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_client_info *c = client;
+ struct mdss_dsi_clk_mngr *mngr;
+
+ if (!client || (clk > MDSS_DSI_LINK_CLK_MAX)) {
+ pr_err("Invalid params, client = %pK, clk = 0x%x", client, clk);
+ return -EINVAL;
+ }
+
+ mngr = c->mngr;
+ pr_debug("%s: ENTER\n", mngr->name);
+ mutex_lock(&mngr->clk_mutex);
+
+ rc = dsi_set_clk_rate(mngr, clk, rate, flags);
+ if (rc)
+ pr_err("Failed to set rate for clk %d, rate = %d, rc = %d\n",
+ clk, rate, rc);
+
+ mutex_unlock(&mngr->clk_mutex);
+ pr_debug("%s: EXIT, rc = %d\n", mngr->name, rc);
+ return rc;
+}
+
+void *mdss_dsi_clk_init(struct mdss_dsi_clk_info *info)
+{
+ struct mdss_dsi_clk_mngr *mngr;
+
+ if (!info) {
+ pr_err("Invalid params\n");
+ return ERR_PTR(-EINVAL);
+ }
+ pr_debug("ENTER %s\n", info->name);
+ mngr = kzalloc(sizeof(*mngr), GFP_KERNEL);
+ if (!mngr) {
+ mngr = ERR_PTR(-ENOMEM);
+ goto error;
+ }
+
+ mutex_init(&mngr->clk_mutex);
+ memcpy(&mngr->core_clks.clks, &info->core_clks, sizeof(struct
+ mdss_dsi_core_clk_info));
+ memcpy(&mngr->link_clks.clks, &info->link_clks, sizeof(struct
+ mdss_dsi_link_clk_info));
+
+ INIT_LIST_HEAD(&mngr->client_list);
+ mngr->pre_clkon_cb = info->pre_clkon_cb;
+ mngr->post_clkon_cb = info->post_clkon_cb;
+ mngr->pre_clkoff_cb = info->pre_clkoff_cb;
+ mngr->post_clkoff_cb = info->post_clkoff_cb;
+ mngr->priv_data = info->priv_data;
+ mngr->reg_bus_clt = mdss_reg_bus_vote_client_create(info->name);
+ if (IS_ERR(mngr->reg_bus_clt)) {
+ pr_err("Unable to get handle for reg bus vote\n");
+ kfree(mngr);
+ mngr = ERR_PTR(-EINVAL);
+ goto error;
+ }
+ memcpy(mngr->name, info->name, DSI_CLK_NAME_LEN);
+error:
+ pr_debug("EXIT %s, rc = %ld\n", mngr->name, PTR_ERR(mngr));
+ return mngr;
+}
+
+int mdss_dsi_clk_deinit(void *clk_mngr)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_mngr *mngr = clk_mngr;
+ struct list_head *position = NULL;
+ struct list_head *tmp = NULL;
+ struct mdss_dsi_clk_client_info *node;
+
+ if (!mngr) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ pr_debug("%s: ENTER\n", mngr->name);
+ mutex_lock(&mngr->clk_mutex);
+
+ list_for_each_safe(position, tmp, &mngr->client_list) {
+ node = list_entry(position, struct mdss_dsi_clk_client_info,
+ list);
+ list_del(&node->list);
+ pr_debug("Removed device (%s)\n", node->name);
+ kfree(node);
+ }
+
+ rc = dsi_recheck_clk_state(mngr);
+ if (rc)
+ pr_err("failed to disable all clocks\n");
+ mdss_reg_bus_vote_client_destroy(mngr->reg_bus_clt);
+ mutex_unlock(&mngr->clk_mutex);
+ pr_debug("%s: EXIT, rc = %d\n", mngr->name, rc);
+ kfree(mngr);
+ return rc;
+}
+
+int mdss_dsi_clk_force_toggle(void *client, u32 clk)
+{
+ int rc = 0;
+ struct mdss_dsi_clk_client_info *c = client;
+ struct mdss_dsi_clk_mngr *mngr;
+
+ if (!client || !clk || clk >= MDSS_DSI_CLKS_MAX) {
+ pr_err("Invalid params, client = %pK, clk = 0x%x\n",
+ client, clk);
+ return -EINVAL;
+ }
+
+ mngr = c->mngr;
+ mutex_lock(&mngr->clk_mutex);
+
+ if ((clk & MDSS_DSI_CORE_CLK) &&
+ (mngr->core_clks.current_clk_state == MDSS_DSI_CLK_ON)) {
+
+ rc = dsi_core_clk_stop(&mngr->core_clks);
+ if (rc) {
+ pr_err("failed to stop core clks\n");
+ goto error;
+ }
+
+ rc = dsi_core_clk_start(&mngr->core_clks);
+ if (rc)
+ pr_err("failed to start core clks\n");
+
+ } else if (clk & MDSS_DSI_CORE_CLK) {
+ pr_err("cannot reset, core clock is off\n");
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+ if ((clk & MDSS_DSI_LINK_CLK) &&
+ (mngr->link_clks.current_clk_state == MDSS_DSI_CLK_ON)) {
+
+ rc = dsi_link_clk_stop(&mngr->link_clks);
+ if (rc) {
+ pr_err("failed to stop link clks\n");
+ goto error;
+ }
+
+ rc = dsi_link_clk_start(&mngr->link_clks);
+ if (rc)
+ pr_err("failed to start link clks\n");
+
+ } else if (clk & MDSS_DSI_LINK_CLK) {
+ pr_err("cannot reset, link clock is off\n");
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+error:
+ mutex_unlock(&mngr->clk_mutex);
+ return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_clk.h b/drivers/video/fbdev/msm/mdss_dsi_clk.h
new file mode 100644
index 0000000..837f2f6
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_clk.h
@@ -0,0 +1,251 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MDSS_DSI_CLK_H_
+#define _MDSS_DSI_CLK_H_
+
+#include <linux/mdss_io_util.h>
+#include <linux/list.h>
+
+#define DSI_CLK_NAME_LEN 20
+
+#define MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON 0x1
+
+enum mdss_dsi_clk_state {
+ MDSS_DSI_CLK_OFF,
+ MDSS_DSI_CLK_ON,
+ MDSS_DSI_CLK_EARLY_GATE,
+};
+
+enum dsi_clk_req_client {
+ DSI_CLK_REQ_MDP_CLIENT = 0,
+ DSI_CLK_REQ_DSI_CLIENT,
+};
+
+enum mdss_dsi_link_clk_type {
+ MDSS_DSI_LINK_ESC_CLK,
+ MDSS_DSI_LINK_BYTE_CLK,
+ MDSS_DSI_LINK_PIX_CLK,
+ MDSS_DSI_LINK_CLK_MAX,
+};
+
+enum mdss_dsi_clk_type {
+ MDSS_DSI_CORE_CLK = BIT(0),
+ MDSS_DSI_LINK_CLK = BIT(1),
+ MDSS_DSI_ALL_CLKS = (BIT(0) | BIT(1)),
+ MDSS_DSI_CLKS_MAX = BIT(2),
+};
+
+/**
+ * typedef *pre_clockoff_cb() - Callback before clock is turned off
+ * @priv: private data pointer.
+ * @clk_type: clock which is being turned off.
+ * @new_state: next state for the clock.
+ *
+ * @return: error code.
+ */
+typedef int (*pre_clockoff_cb)(void *priv,
+ enum mdss_dsi_clk_type clk_type,
+ enum mdss_dsi_clk_state new_state);
+
+/**
+ * typedef *post_clockoff_cb() - Callback after clock is turned off
+ * @priv: private data pointer.
+ * @clk_type: clock which was turned off.
+ * @curr_state: current state for the clock.
+ *
+ * @return: error code.
+ */
+typedef int (*post_clockoff_cb)(void *priv,
+ enum mdss_dsi_clk_type clk_type,
+ enum mdss_dsi_clk_state curr_state);
+
+/**
+ * typedef *post_clockon_cb() - Callback after clock is turned on
+ * @priv: private data pointer.
+ * @clk_type: clock which was turned on.
+ * @curr_state: current state for the clock.
+ *
+ * @return: error code.
+ */
+typedef int (*post_clockon_cb)(void *priv,
+ enum mdss_dsi_clk_type clk_type,
+ enum mdss_dsi_clk_state curr_state);
+
+/**
+ * typedef *pre_clockon_cb() - Callback before clock is turned on
+ * @priv: private data pointer.
+ * @clk_type: clock which is being turned on.
+ * @new_state: next state for the clock.
+ *
+ * @return: error code.
+ */
+typedef int (*pre_clockon_cb)(void *priv,
+ enum mdss_dsi_clk_type clk_type,
+ enum mdss_dsi_clk_state new_state);
+
+struct mdss_dsi_core_clk_info {
+ struct clk *mdp_core_clk;
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *mmss_misc_ahb_clk;
+};
+
+struct mdss_dsi_link_clk_info {
+ struct clk *esc_clk;
+ struct clk *byte_clk;
+ struct clk *pixel_clk;
+};
+
+struct dsi_panel_clk_ctrl {
+ enum mdss_dsi_clk_state state;
+ enum dsi_clk_req_client client;
+};
+
+/**
+ * struct mdss_dsi_clk_info - clock information to initialize manager
+ * @name: name for the clocks to identify debug logs.
+ * @core_clks: core clock information.
+ * @link_clks: link clock information.
+ * @pre_clkoff_cb: callback before a clock is turned off.
+ * @post_clkoff_cb: callback after a clock is turned off.
+ * @pre_clkon_cb: callback before a clock is turned on.
+ * @post_clkon_cb: callback after a clock is turned on.
+ * @priv_data: pointer to private data passed to callbacks.
+ */
+struct mdss_dsi_clk_info {
+ char name[DSI_CLK_NAME_LEN];
+ struct mdss_dsi_core_clk_info core_clks;
+ struct mdss_dsi_link_clk_info link_clks;
+ pre_clockoff_cb pre_clkoff_cb;
+ post_clockoff_cb post_clkoff_cb;
+ post_clockon_cb post_clkon_cb;
+ pre_clockon_cb pre_clkon_cb;
+ void *priv_data;
+};
+
+struct mdss_dsi_clk_client {
+ char *client_name;
+};
+
+/**
+ * mdss_dsi_clk_init() - Initializes clock manager
+ * @info: Clock information to be managed by the clock manager.
+ *
+ * The Init API should be called during probe of the dsi driver. DSI driver
+ * provides the clock handles to the core clocks and link clocks that will be
+ * managed by the clock manager.
+ *
+ * returns handle or an error value.
+ */
+void *mdss_dsi_clk_init(struct mdss_dsi_clk_info *info);
+
+/**
+ * mdss_dsi_clk_deinit() - Deinitializes the clock manager
+ * @mngr: handle returned by mdss_dsi_clk_init().
+ *
+ * Deinit will turn off all the clocks and release all the resources acquired
+ * by mdss_dsi_clk_init().
+ *
+ * @return: error code.
+ */
+int mdss_dsi_clk_deinit(void *mngr);
+
+/**
+ * mdss_dsi_clk_register() - Register a client to control clock state
+ * @mngr: handle returned by mdss_dsi_clk_init().
+ * @client: client information.
+ *
+ * Register allows clients for DSI clock manager to acquire a handle which can
+ * be used to request a specific clock state. The clock manager maintains a
+ * reference count of the clock states requested by each client. Client has to
+ * ensure that ON and OFF/EARLY_GATE calls are balanced properly.
+ *
+ * Requesting a particular clock state does not guarantee that physical clock
+ * state. Physical clock state is determined by the states requested by all
+ * clients.
+ *
+ * @return: handle or error code.
+ */
+void *mdss_dsi_clk_register(void *mngr, struct mdss_dsi_clk_client *client);
+
+/**
+ * mdss_dsi_clk_deregister() - Deregister a registered client.
+ * @client: client handle returned by mdss_dsi_clk_register().
+ *
+ * Deregister releases all resources acquired by mdss_dsi_clk_register().
+ *
+ * @return: error code.
+ */
+int mdss_dsi_clk_deregister(void *client);
+
+/**
+ * mdss_dsi_clk_req_state() - Request a specific clock state
+ * @client: client handle.
+ * @clk: Type of clock requested (enum mdss_dsi_clk_type).
+ * @state: clock state requested.
+ * @index: controller index.
+ *
+ * This routine is used to request a new clock state for a specific clock. If
+ * turning ON the clocks, this guarantees that clocks will be on before
+ * returning. Valid state transitions are ON -> EARLY GATE, ON -> OFF,
+ * EARLY GATE -> OFF, EARLY GATE -> ON and OFF -> ON.
+ *
+ * @return: error code.
+ */
+int mdss_dsi_clk_req_state(void *client, enum mdss_dsi_clk_type clk,
+ enum mdss_dsi_clk_state state, u32 index);
+
+/**
+ * mdss_dsi_clk_set_link_rate() - set clock rate for link clocks
+ * @client: client handle.
+ * @clk: type of clock.
+ * @rate: clock rate in Hz.
+ * @flags: flags.
+ *
+ * This routine is used to request a specific clock rate. It supports an
+ * additional flags argument which can change the behavior of the routine. If
+ * MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON flag is set, the routine caches the new
+ * clock rate and applies it next time when the clock is turned on.
+ *
+ * @return: error code.
+ */
+int mdss_dsi_clk_set_link_rate(void *client, enum mdss_dsi_link_clk_type clk,
+ u32 rate, u32 flags);
+
+/**
+ * mdss_dsi_clk_force_toggle() - Turn off and turn on clocks
+ * @client: client handle.
+ * @clk: clock type.
+ *
+ * This routine has to be used in cases where clocks have to be toggled
+ * irrespecitive of the refcount. This API bypasses the refcount and turns off
+ * and turns on the clocks. This will fail if the clocks are in OFF state
+ * already.
+ *
+ * @return:error code.
+ */
+int mdss_dsi_clk_force_toggle(void *client, u32 clk);
+
+/**
+ * is_dsi_clk_in_ecg_state() - Checks the current state of clocks
+ * @client: client handle.
+ *
+ * This routine returns checks the clocks status for client and return
+ * success code based on it.
+ *
+ * @return:true: if clocks are in ECG state
+ * false: for all other cases
+ */
+bool is_dsi_clk_in_ecg_state(void *client);
+#endif /* _MDSS_DSI_CLK_H_ */
diff --git a/drivers/video/fbdev/msm/mdss_dsi_cmd.c b/drivers/video/fbdev/msm/mdss_dsi_cmd.c
new file mode 100644
index 0000000..c67fd8a
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_cmd.c
@@ -0,0 +1,793 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/kthread.h>
+
+#include "mdss_dsi_cmd.h"
+#include "mdss_dsi.h"
+#include "mdss_smmu.h"
+
+/*
+ * mipi dsi buf mechanism
+ */
+char *mdss_dsi_buf_reserve(struct dsi_buf *dp, int len)
+{
+ dp->data += len;
+ return dp->data;
+}
+
+char *mdss_dsi_buf_unreserve(struct dsi_buf *dp, int len)
+{
+ dp->data -= len;
+ return dp->data;
+}
+
+char *mdss_dsi_buf_push(struct dsi_buf *dp, int len)
+{
+ dp->data -= len;
+ dp->len += len;
+ return dp->data;
+}
+
+char *mdss_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen)
+{
+ dp->hdr = (u32 *)dp->data;
+ return mdss_dsi_buf_reserve(dp, hlen);
+}
+
+char *mdss_dsi_buf_init(struct dsi_buf *dp)
+{
+ int off;
+
+ dp->data = dp->start;
+ off = (int) (unsigned long) dp->data;
+ /* 8 byte align */
+ off &= 0x07;
+ if (off)
+ off = 8 - off;
+ dp->data += off;
+ dp->len = 0;
+ dp->read_cnt = 0;
+ return dp->data;
+}
+
+int mdss_dsi_buf_alloc(struct device *ctrl_dev, struct dsi_buf *dp, int size)
+{
+ dp->start = mdss_smmu_dsi_alloc_buf(ctrl_dev, size, &dp->dmap,
+ GFP_KERNEL);
+ if (dp->start == NULL) {
+ pr_err("%s:%u\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ dp->end = dp->start + size;
+ dp->size = size;
+
+ if ((int) (unsigned long) dp->start & 0x07)
+ pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+
+ dp->data = dp->start;
+ dp->len = 0;
+ dp->read_cnt = 0;
+ return size;
+}
+
+/*
+ * mipi dsi generic long write
+ */
+static int mdss_dsi_generic_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ char *bp;
+ u32 *hp;
+ int i, len = 0;
+
+ dchdr = &cm->dchdr;
+ bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+ /* fill up payload */
+ if (cm->payload) {
+ len = dchdr->dlen;
+ len += 3;
+ len &= ~0x03; /* multipled by 4 */
+ for (i = 0; i < dchdr->dlen; i++)
+ *bp++ = cm->payload[i];
+
+ /* append 0xff to the end */
+ for (; i < len; i++)
+ *bp++ = 0xff;
+
+ dp->len += len;
+ }
+
+ /* fill up header */
+ hp = dp->hdr;
+ *hp = 0;
+ *hp = DSI_HDR_WC(dchdr->dlen);
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_LONG_PKT;
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_LWRITE);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ len += DSI_HOST_HDR_SIZE;
+
+ return len;
+}
+
+/*
+ * mipi dsi generic short write with 0, 1 2 parameters
+ */
+static int mdss_dsi_generic_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+ int len;
+
+ dchdr = &cm->dchdr;
+ if (dchdr->dlen && cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return 0;
+ }
+
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+
+ len = (dchdr->dlen > 2) ? 2 : dchdr->dlen;
+
+ if (len == 1) {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE1);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(0);
+ } else if (len == 2) {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE2);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(cm->payload[1]);
+ } else {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE);
+ *hp |= DSI_HDR_DATA1(0);
+ *hp |= DSI_HDR_DATA2(0);
+ }
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+/*
+ * mipi dsi gerneric read with 0, 1 2 parameters
+ */
+static int mdss_dsi_generic_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+ int len;
+
+ dchdr = &cm->dchdr;
+ if (dchdr->dlen && cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return 0;
+ }
+
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_BTA;
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ len = (dchdr->dlen > 2) ? 2 : dchdr->dlen;
+
+ if (len == 1) {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ1);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(0);
+ } else if (len == 2) {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ2);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(cm->payload[1]);
+ } else {
+ *hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ);
+ *hp |= DSI_HDR_DATA1(0);
+ *hp |= DSI_HDR_DATA2(0);
+ }
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+/*
+ * mipi dsi dcs long write
+ */
+static int mdss_dsi_dcs_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ char *bp;
+ u32 *hp;
+ int i, len = 0;
+
+ dchdr = &cm->dchdr;
+ bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+ /*
+ * fill up payload
+ * dcs command byte (first byte) followed by payload
+ */
+ if (cm->payload) {
+ len = dchdr->dlen;
+ len += 3;
+ len &= ~0x03; /* multipled by 4 */
+ for (i = 0; i < dchdr->dlen; i++)
+ *bp++ = cm->payload[i];
+
+ /* append 0xff to the end */
+ for (; i < len; i++)
+ *bp++ = 0xff;
+
+ dp->len += len;
+ }
+
+ /* fill up header */
+ hp = dp->hdr;
+ *hp = 0;
+ *hp = DSI_HDR_WC(dchdr->dlen);
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_LONG_PKT;
+ *hp |= DSI_HDR_DTYPE(DTYPE_DCS_LWRITE);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ len += DSI_HOST_HDR_SIZE;
+ return len;
+}
+
+/*
+ * mipi dsi dcs short write with 0 parameters
+ */
+static int mdss_dsi_dcs_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+ int len;
+
+ dchdr = &cm->dchdr;
+ if (cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return -EINVAL;
+ }
+
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ if (dchdr->ack) /* ask ACK trigger msg from peripeheral */
+ *hp |= DSI_HDR_BTA;
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ len = (dchdr->dlen > 1) ? 1 : dchdr->dlen;
+
+ *hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs command byte */
+ *hp |= DSI_HDR_DATA2(0);
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+/*
+ * mipi dsi dcs short write with 1 parameters
+ */
+static int mdss_dsi_dcs_swrite1(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+
+ dchdr = &cm->dchdr;
+ if (dchdr->dlen < 2 || cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return -EINVAL;
+ }
+
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ if (dchdr->ack) /* ask ACK trigger msg from peripeheral */
+ *hp |= DSI_HDR_BTA;
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ *hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE1);
+ *hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs comamnd byte */
+ *hp |= DSI_HDR_DATA2(cm->payload[1]); /* parameter */
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+/*
+ * mipi dsi dcs read with 0 parameters
+ */
+
+static int mdss_dsi_dcs_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+
+ dchdr = &cm->dchdr;
+ if (cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return -EINVAL;
+ }
+
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_BTA;
+ *hp |= DSI_HDR_DTYPE(DTYPE_DCS_READ);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ *hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs command byte */
+ *hp |= DSI_HDR_DATA2(0);
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_cm_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+
+ dchdr = &cm->dchdr;
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_CM_ON);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_dsc_pps(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ char *bp;
+ u32 *hp;
+ int i, len = 0;
+
+ dchdr = &cm->dchdr;
+ bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+ /*
+ * fill up payload
+ * dcs command byte (first byte) followed by payload
+ */
+ if (cm->payload) {
+ len = dchdr->dlen;
+ len += 3;
+ len &= ~0x03; /* multipled by 4 */
+ for (i = 0; i < dchdr->dlen; i++)
+ *bp++ = cm->payload[i];
+
+ /* append 0xff to the end */
+ for (; i < len; i++)
+ *bp++ = 0xff;
+
+ dp->len += len;
+ }
+
+ /* fill up header */
+ hp = dp->hdr;
+ *hp = 0;
+ *hp = DSI_HDR_WC(dchdr->dlen);
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_LONG_PKT;
+ *hp |= DSI_HDR_DTYPE(DTYPE_PPS);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+ len += DSI_HOST_HDR_SIZE;
+ return len;
+}
+
+static int mdss_dsi_cm_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+
+ dchdr = &cm->dchdr;
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_CM_OFF);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_peripheral_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+
+ dchdr = &cm->dchdr;
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_ON);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_peripheral_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+
+ dchdr = &cm->dchdr;
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_OFF);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_set_max_pktsize(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+
+ dchdr = &cm->dchdr;
+ if (cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return 0;
+ }
+
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_MAX_PKTSIZE);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(cm->payload[1]);
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_compression_mode(struct dsi_buf *dp,
+ struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+
+ dchdr = &cm->dchdr;
+ if (cm->payload == 0) {
+ pr_err("%s: NO payload error\n", __func__);
+ return 0;
+ }
+
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_COMPRESSION_MODE);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ *hp |= DSI_HDR_DATA1(cm->payload[0]);
+ *hp |= DSI_HDR_DATA2(cm->payload[1]);
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_null_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+
+ dchdr = &cm->dchdr;
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp = DSI_HDR_WC(dchdr->dlen);
+ *hp |= DSI_HDR_LONG_PKT;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_NULL_PKT);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_blank_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ u32 *hp;
+
+ dchdr = &cm->dchdr;
+ mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+ hp = dp->hdr;
+ *hp = 0;
+ *hp = DSI_HDR_WC(dchdr->dlen);
+ *hp |= DSI_HDR_LONG_PKT;
+ *hp |= DSI_HDR_VC(dchdr->vc);
+ *hp |= DSI_HDR_DTYPE(DTYPE_BLANK_PKT);
+ if (dchdr->last)
+ *hp |= DSI_HDR_LAST;
+
+ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+ return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+int mdss_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+ struct dsi_ctrl_hdr *dchdr;
+ int len = 0;
+
+ dchdr = &cm->dchdr;
+
+ switch (dchdr->dtype) {
+ case DTYPE_GEN_WRITE:
+ case DTYPE_GEN_WRITE1:
+ case DTYPE_GEN_WRITE2:
+ len = mdss_dsi_generic_swrite(dp, cm);
+ break;
+ case DTYPE_GEN_LWRITE:
+ len = mdss_dsi_generic_lwrite(dp, cm);
+ break;
+ case DTYPE_GEN_READ:
+ case DTYPE_GEN_READ1:
+ case DTYPE_GEN_READ2:
+ len = mdss_dsi_generic_read(dp, cm);
+ break;
+ case DTYPE_DCS_LWRITE:
+ len = mdss_dsi_dcs_lwrite(dp, cm);
+ break;
+ case DTYPE_DCS_WRITE:
+ len = mdss_dsi_dcs_swrite(dp, cm);
+ break;
+ case DTYPE_DCS_WRITE1:
+ len = mdss_dsi_dcs_swrite1(dp, cm);
+ break;
+ case DTYPE_DCS_READ:
+ len = mdss_dsi_dcs_read(dp, cm);
+ break;
+ case DTYPE_MAX_PKTSIZE:
+ len = mdss_dsi_set_max_pktsize(dp, cm);
+ break;
+ case DTYPE_PPS:
+ len = mdss_dsi_dsc_pps(dp, cm);
+ break;
+ case DTYPE_COMPRESSION_MODE:
+ len = mdss_dsi_compression_mode(dp, cm);
+ break;
+ case DTYPE_NULL_PKT:
+ len = mdss_dsi_null_pkt(dp, cm);
+ break;
+ case DTYPE_BLANK_PKT:
+ len = mdss_dsi_blank_pkt(dp, cm);
+ break;
+ case DTYPE_CM_ON:
+ len = mdss_dsi_cm_on(dp, cm);
+ break;
+ case DTYPE_CM_OFF:
+ len = mdss_dsi_cm_off(dp, cm);
+ break;
+ case DTYPE_PERIPHERAL_ON:
+ len = mdss_dsi_peripheral_on(dp, cm);
+ break;
+ case DTYPE_PERIPHERAL_OFF:
+ len = mdss_dsi_peripheral_off(dp, cm);
+ break;
+ default:
+ pr_debug("%s: dtype=%x NOT supported\n",
+ __func__, dchdr->dtype);
+ break;
+
+ }
+
+ return len;
+}
+
+/*
+ * mdss_dsi_short_read1_resp: 1 parameter
+ */
+int mdss_dsi_short_read1_resp(struct dsi_buf *rp)
+{
+ /* strip out dcs type */
+ rp->data++;
+ rp->len = 1;
+ /* 1 byte for dcs type + 1 byte for ECC + 1 byte for 2nd data byte */
+ rp->read_cnt -= 3;
+ return rp->len;
+}
+
+/*
+ * mdss_dsi_short_read2_resp: 2 parameter
+ */
+int mdss_dsi_short_read2_resp(struct dsi_buf *rp)
+{
+ /* strip out dcs type */
+ rp->data++;
+ rp->len = 2;
+ rp->read_cnt -= 2; /* 1 byte for dcs type + 1 byte for ECC */
+ return rp->len;
+}
+
+int mdss_dsi_long_read_resp(struct dsi_buf *rp)
+{
+ /* strip out dcs header */
+ rp->data += 4;
+ rp->len -= 4;
+ rp->read_cnt -= 6; /* 4 bytes for dcs header + 2 bytes for CRC */
+ return rp->len;
+}
+
+static char set_tear_on[2] = {0x35, 0x00};
+static struct dsi_cmd_desc dsi_tear_on_cmd = {
+ {DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_tear_on)}, set_tear_on};
+
+static char set_tear_off[2] = {0x34, 0x00};
+static struct dsi_cmd_desc dsi_tear_off_cmd = {
+ {DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(set_tear_off)}, set_tear_off};
+
+void mdss_dsi_set_tear_on(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct dcs_cmd_req cmdreq;
+ struct mdss_panel_info *pinfo;
+
+ pinfo = &(ctrl->panel_data.panel_info);
+ if (pinfo->dcs_cmd_by_left && ctrl->ndx != DSI_CTRL_LEFT)
+ return;
+
+ cmdreq.cmds = &dsi_tear_on_cmd;
+ cmdreq.cmds_cnt = 1;
+ cmdreq.flags = CMD_REQ_COMMIT;
+ cmdreq.rlen = 0;
+ cmdreq.cb = NULL;
+
+ mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+void mdss_dsi_set_tear_off(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct dcs_cmd_req cmdreq;
+ struct mdss_panel_info *pinfo;
+
+ pinfo = &(ctrl->panel_data.panel_info);
+ if (pinfo->dcs_cmd_by_left && ctrl->ndx != DSI_CTRL_LEFT)
+ return;
+
+ cmdreq.cmds = &dsi_tear_off_cmd;
+ cmdreq.cmds_cnt = 1;
+ cmdreq.flags = CMD_REQ_COMMIT;
+ cmdreq.rlen = 0;
+ cmdreq.cb = NULL;
+
+ mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+/*
+ * mdss_dsi_cmd_get: ctrl->cmd_mutex acquired by caller
+ */
+struct dcs_cmd_req *mdss_dsi_cmdlist_get(struct mdss_dsi_ctrl_pdata *ctrl,
+ int from_mdp)
+{
+ struct dcs_cmd_list *clist;
+ struct dcs_cmd_req *req = NULL;
+
+ mutex_lock(&ctrl->cmdlist_mutex);
+ clist = &ctrl->cmdlist;
+ if (clist->get != clist->put) {
+ req = &clist->list[clist->get];
+ /*dont let commit thread steal ESD thread's
+ * command
+ */
+ if (from_mdp && (req->flags & CMD_REQ_COMMIT)) {
+ mutex_unlock(&ctrl->cmdlist_mutex);
+ return NULL;
+ }
+ clist->get++;
+ clist->get %= CMD_REQ_MAX;
+ clist->tot--;
+ pr_debug("%s: tot=%d put=%d get=%d\n", __func__,
+ clist->tot, clist->put, clist->get);
+ }
+ mutex_unlock(&ctrl->cmdlist_mutex);
+ return req;
+}
+
+int mdss_dsi_cmdlist_put(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dcs_cmd_req *cmdreq)
+{
+ struct dcs_cmd_req *req;
+ struct dcs_cmd_list *clist;
+ int ret = 0;
+
+ mutex_lock(&ctrl->cmd_mutex);
+ mutex_lock(&ctrl->cmdlist_mutex);
+ clist = &ctrl->cmdlist;
+ req = &clist->list[clist->put];
+ *req = *cmdreq;
+ clist->put++;
+ clist->put %= CMD_REQ_MAX;
+ clist->tot++;
+ if (clist->put == clist->get) {
+ /* drop the oldest one */
+ pr_debug("%s: DROP, tot=%d put=%d get=%d\n", __func__,
+ clist->tot, clist->put, clist->get);
+ clist->get++;
+ clist->get %= CMD_REQ_MAX;
+ clist->tot--;
+ }
+
+ pr_debug("%s: tot=%d put=%d get=%d\n", __func__,
+ clist->tot, clist->put, clist->get);
+
+ mutex_unlock(&ctrl->cmdlist_mutex);
+
+ if (req->flags & CMD_REQ_COMMIT) {
+ if (!ctrl->cmdlist_commit)
+ pr_err("cmdlist_commit not implemented!\n");
+ else
+ ret = ctrl->cmdlist_commit(ctrl, 0);
+ }
+ mutex_unlock(&ctrl->cmd_mutex);
+
+ return ret;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_dsi_cmd.h b/drivers/video/fbdev/msm/mdss_dsi_cmd.h
new file mode 100644
index 0000000..0ec96ec
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_cmd.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DSI_CMD_H
+#define MDSS_DSI_CMD_H
+
+#include "mdss.h"
+
+struct mdss_dsi_ctrl_pdata;
+
+#define DSI_HOST_HDR_SIZE 4
+#define DSI_HDR_LAST BIT(31)
+#define DSI_HDR_LONG_PKT BIT(30)
+#define DSI_HDR_BTA BIT(29)
+#define DSI_HDR_VC(vc) (((vc) & 0x03) << 22)
+#define DSI_HDR_DTYPE(dtype) (((dtype) & 0x03f) << 16)
+#define DSI_HDR_DATA2(data) (((data) & 0x0ff) << 8)
+#define DSI_HDR_DATA1(data) ((data) & 0x0ff)
+#define DSI_HDR_WC(wc) ((wc) & 0x0ffff)
+
+#define MDSS_DSI_MRPS 0x04 /* Maximum Return Packet Size */
+
+#define MDSS_DSI_LEN 8 /* 4 x 4 - 6 - 2, bytes dcs header+crc-align */
+
+struct dsi_buf {
+ u32 *hdr; /* dsi host header */
+ char *start; /* buffer start addr */
+ char *end; /* buffer end addr */
+ int size; /* size of buffer */
+ char *data; /* buffer */
+ int len; /* data length */
+ int read_cnt; /* DSI read count */
+ dma_addr_t dmap; /* mapped dma addr */
+};
+
+/* dcs read/write */
+#define DTYPE_DCS_WRITE 0x05 /* short write, 0 parameter */
+#define DTYPE_DCS_WRITE1 0x15 /* short write, 1 parameter */
+#define DTYPE_DCS_READ 0x06 /* read */
+#define DTYPE_DCS_LWRITE 0x39 /* long write */
+
+/* generic read/write */
+#define DTYPE_GEN_WRITE 0x03 /* short write, 0 parameter */
+#define DTYPE_GEN_WRITE1 0x13 /* short write, 1 parameter */
+#define DTYPE_GEN_WRITE2 0x23 /* short write, 2 parameter */
+#define DTYPE_GEN_LWRITE 0x29 /* long write */
+#define DTYPE_GEN_READ 0x04 /* long read, 0 parameter */
+#define DTYPE_GEN_READ1 0x14 /* long read, 1 parameter */
+#define DTYPE_GEN_READ2 0x24 /* long read, 2 parameter */
+
+#define DTYPE_COMPRESSION_MODE 0x07 /* compression mode */
+#define DTYPE_PPS 0x0a /* pps */
+#define DTYPE_MAX_PKTSIZE 0x37 /* set max packet size */
+#define DTYPE_NULL_PKT 0x09 /* null packet, no data */
+#define DTYPE_BLANK_PKT 0x19 /* blankiing packet, no data */
+
+#define DTYPE_CM_ON 0x02 /* color mode off */
+#define DTYPE_CM_OFF 0x12 /* color mode on */
+#define DTYPE_PERIPHERAL_OFF 0x22
+#define DTYPE_PERIPHERAL_ON 0x32
+
+/*
+ * dcs response
+ */
+#define DTYPE_ACK_ERR_RESP 0x02
+#define DTYPE_EOT_RESP 0x08 /* end of tx */
+#define DTYPE_GEN_READ1_RESP 0x11 /* 1 parameter, short */
+#define DTYPE_GEN_READ2_RESP 0x12 /* 2 parameter, short */
+#define DTYPE_GEN_LREAD_RESP 0x1a
+#define DTYPE_DCS_LREAD_RESP 0x1c
+#define DTYPE_DCS_READ1_RESP 0x21 /* 1 parameter, short */
+#define DTYPE_DCS_READ2_RESP 0x22 /* 2 parameter, short */
+
+struct dsi_ctrl_hdr {
+ char dtype; /* data type */
+ char last; /* last in chain */
+ char vc; /* virtual chan */
+ char ack; /* ask ACK from peripheral */
+ char wait; /* ms */
+ short dlen; /* 16 bits */
+} __packed;
+
+struct dsi_cmd_desc {
+ struct dsi_ctrl_hdr dchdr;
+ char *payload;
+};
+
+#define CMD_REQ_MAX 4
+#define CMD_REQ_RX 0x0001
+#define CMD_REQ_COMMIT 0x0002
+#define CMD_REQ_UNICAST 0x0008
+#define CMD_REQ_DMA_TPG 0x0040
+#define CMD_REQ_NO_MAX_PKT_SIZE 0x0008
+#define CMD_REQ_LP_MODE 0x0010
+#define CMD_REQ_HS_MODE 0x0020
+
+struct dcs_cmd_req {
+ struct dsi_cmd_desc *cmds;
+ int cmds_cnt;
+ u32 flags;
+ int rlen; /* rx length */
+ char *rbuf; /* rx buf */
+ void (*cb)(int data);
+};
+
+struct dcs_cmd_list {
+ int put;
+ int get;
+ int tot;
+ struct dcs_cmd_req list[CMD_REQ_MAX];
+};
+
+char *mdss_dsi_buf_reserve(struct dsi_buf *dp, int len);
+char *mdss_dsi_buf_unreserve(struct dsi_buf *dp, int len);
+char *mdss_dsi_buf_push(struct dsi_buf *dp, int len);
+char *mdss_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen);
+char *mdss_dsi_buf_init(struct dsi_buf *dp);
+int mdss_dsi_buf_alloc(struct device *ctrl_dev, struct dsi_buf *dp, int size);
+int mdss_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
+int mdss_dsi_short_read1_resp(struct dsi_buf *rp);
+int mdss_dsi_short_read2_resp(struct dsi_buf *rp);
+int mdss_dsi_long_read_resp(struct dsi_buf *rp);
+void mdss_dsi_set_tear_on(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_set_tear_off(struct mdss_dsi_ctrl_pdata *ctrl);
+struct dcs_cmd_req *mdss_dsi_cmdlist_get(struct mdss_dsi_ctrl_pdata *ctrl,
+ int from_mdp);
+int mdss_dsi_cmdlist_put(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dcs_cmd_req *cmdreq);
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
new file mode 100644
index 0000000..988c7a9
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -0,0 +1,3239 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/kthread.h>
+
+#include <linux/msm-bus.h>
+
+#include "mdss.h"
+#include "mdss_dsi.h"
+#include "mdss_panel.h"
+#include "mdss_debug.h"
+#include "mdss_smmu.h"
+#include "mdss_dsi_phy.h"
+
+#define VSYNC_PERIOD 17
+#define DMA_TX_TIMEOUT 200
+#define DMA_TPG_FIFO_LEN 64
+
+#define FIFO_STATUS 0x0C
+#define LANE_STATUS 0xA8
+
+#define MDSS_DSI_INT_CTRL 0x0110
+
+#define CEIL(x, y) (((x) + ((y) - 1)) / (y))
+
+struct mdss_dsi_ctrl_pdata *ctrl_list[DSI_CTRL_MAX];
+
+struct mdss_hw mdss_dsi0_hw = {
+ .hw_ndx = MDSS_HW_DSI0,
+ .ptr = NULL,
+ .irq_handler = mdss_dsi_isr,
+};
+
+struct mdss_hw mdss_dsi1_hw = {
+ .hw_ndx = MDSS_HW_DSI1,
+ .ptr = NULL,
+ .irq_handler = mdss_dsi_isr,
+};
+
+
+#define DSI_EVENT_Q_MAX 4
+
+#define DSI_BTA_EVENT_TIMEOUT (HZ / 10)
+
+/* Mutex common for both the controllers */
+static struct mutex dsi_mtx;
+
+/* event */
+struct dsi_event_q {
+ struct mdss_dsi_ctrl_pdata *ctrl;
+ u32 arg;
+ u32 todo;
+};
+
+struct mdss_dsi_event {
+ int inited;
+ wait_queue_head_t event_q;
+ u32 event_pndx;
+ u32 event_gndx;
+ struct dsi_event_q todo_list[DSI_EVENT_Q_MAX];
+ spinlock_t event_lock;
+};
+
+static struct mdss_dsi_event dsi_event;
+
+static int dsi_event_thread(void *data);
+
+void mdss_dsi_ctrl_init(struct device *ctrl_dev,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (ctrl->panel_data.panel_info.pdest == DISPLAY_1) {
+ mdss_dsi0_hw.ptr = (void *)(ctrl);
+ ctrl->dsi_hw = &mdss_dsi0_hw;
+ ctrl->ndx = DSI_CTRL_0;
+ } else {
+ mdss_dsi1_hw.ptr = (void *)(ctrl);
+ ctrl->dsi_hw = &mdss_dsi1_hw;
+ ctrl->ndx = DSI_CTRL_1;
+ }
+
+ if (!(ctrl->dsi_irq_line))
+ ctrl->dsi_hw->irq_info = mdss_intr_line();
+
+ ctrl->panel_mode = ctrl->panel_data.panel_info.mipi.mode;
+
+ ctrl_list[ctrl->ndx] = ctrl; /* keep it */
+
+ if (ctrl->mdss_util->register_irq(ctrl->dsi_hw))
+ pr_err("%s: mdss_register_irq failed.\n", __func__);
+
+ pr_debug("%s: ndx=%d base=%pK\n", __func__, ctrl->ndx, ctrl->ctrl_base);
+
+ init_completion(&ctrl->dma_comp);
+ init_completion(&ctrl->mdp_comp);
+ init_completion(&ctrl->video_comp);
+ init_completion(&ctrl->dynamic_comp);
+ init_completion(&ctrl->bta_comp);
+ spin_lock_init(&ctrl->irq_lock);
+ spin_lock_init(&ctrl->mdp_lock);
+ mutex_init(&ctrl->mutex);
+ mutex_init(&ctrl->cmd_mutex);
+ mutex_init(&ctrl->clk_lane_mutex);
+ mutex_init(&ctrl->cmdlist_mutex);
+ mdss_dsi_buf_alloc(ctrl_dev, &ctrl->tx_buf, SZ_4K);
+ mdss_dsi_buf_alloc(ctrl_dev, &ctrl->rx_buf, SZ_4K);
+ mdss_dsi_buf_alloc(ctrl_dev, &ctrl->status_buf, SZ_4K);
+ ctrl->cmdlist_commit = mdss_dsi_cmdlist_commit;
+ ctrl->err_cont.err_time_delta = 100;
+ ctrl->err_cont.max_err_index = MAX_ERR_INDEX;
+
+ if (dsi_event.inited == 0) {
+ kthread_run(dsi_event_thread, (void *)&dsi_event,
+ "mdss_dsi_event");
+ mutex_init(&dsi_mtx);
+ dsi_event.inited = 1;
+ }
+}
+
+void mdss_dsi_set_reg(struct mdss_dsi_ctrl_pdata *ctrl, int off,
+ u32 mask, u32 val)
+{
+ u32 data;
+
+ off &= ~0x03;
+ val &= mask; /* set bits indicated at mask only */
+ data = MIPI_INP(ctrl->ctrl_base + off);
+ data &= ~mask;
+ data |= val;
+ pr_debug("%s: ndx=%d off=%x data=%x\n", __func__,
+ ctrl->ndx, off, data);
+ MIPI_OUTP(ctrl->ctrl_base + off, data);
+}
+
+void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_panel_clk_ctrl *clk_ctrl)
+{
+ enum dsi_clk_req_client client = clk_ctrl->client;
+ int enable = clk_ctrl->state;
+ void *clk_handle = ctrl->mdp_clk_handle;
+
+ if (clk_ctrl->client == DSI_CLK_REQ_DSI_CLIENT)
+ clk_handle = ctrl->dsi_clk_handle;
+
+ MDSS_XLOG(ctrl->ndx, enable, ctrl->mdp_busy, current->pid,
+ client);
+ if (enable == 0) {
+ /* need wait before disable */
+ mutex_lock(&ctrl->cmd_mutex);
+ mdss_dsi_cmd_mdp_busy(ctrl);
+ mutex_unlock(&ctrl->cmd_mutex);
+ }
+
+ MDSS_XLOG(ctrl->ndx, enable, ctrl->mdp_busy, current->pid,
+ client);
+ mdss_dsi_clk_ctrl(ctrl, clk_handle,
+ MDSS_DSI_ALL_CLKS, enable);
+}
+
+void mdss_dsi_pll_relock(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int rc;
+
+ /*
+ * todo: this code does not work very well with dual
+ * dsi use cases. Need to fix this eventually.
+ */
+
+ rc = mdss_dsi_clk_force_toggle(ctrl->dsi_clk_handle, MDSS_DSI_LINK_CLK);
+ if (rc)
+ pr_err("clock toggle failed, rc = %d\n", rc);
+}
+
+void mdss_dsi_enable_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 term)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->irq_lock, flags);
+ if (ctrl->dsi_irq_mask & term) {
+ spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+ return;
+ }
+ if (ctrl->dsi_irq_mask == 0) {
+ MDSS_XLOG(ctrl->ndx, term);
+ ctrl->mdss_util->enable_irq(ctrl->dsi_hw);
+ pr_debug("%s: IRQ Enable, ndx=%d mask=%x term=%x\n", __func__,
+ ctrl->ndx, (int)ctrl->dsi_irq_mask, (int)term);
+ }
+ ctrl->dsi_irq_mask |= term;
+ spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+}
+
+void mdss_dsi_disable_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 term)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->irq_lock, flags);
+ if (!(ctrl->dsi_irq_mask & term)) {
+ spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+ return;
+ }
+ ctrl->dsi_irq_mask &= ~term;
+ if (ctrl->dsi_irq_mask == 0) {
+ MDSS_XLOG(ctrl->ndx, term);
+ ctrl->mdss_util->disable_irq(ctrl->dsi_hw);
+ pr_debug("%s: IRQ Disable, ndx=%d mask=%x term=%x\n", __func__,
+ ctrl->ndx, (int)ctrl->dsi_irq_mask, (int)term);
+ }
+ spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+}
+
+/*
+ * mdss_dsi_disale_irq_nosync() should be called
+ * from interrupt context
+ */
+void mdss_dsi_disable_irq_nosync(struct mdss_dsi_ctrl_pdata *ctrl, u32 term)
+{
+ spin_lock(&ctrl->irq_lock);
+ if (!(ctrl->dsi_irq_mask & term)) {
+ spin_unlock(&ctrl->irq_lock);
+ return;
+ }
+ ctrl->dsi_irq_mask &= ~term;
+ if (ctrl->dsi_irq_mask == 0) {
+ MDSS_XLOG(ctrl->ndx, term);
+ ctrl->mdss_util->disable_irq_nosync(ctrl->dsi_hw);
+ pr_debug("%s: IRQ Disable, ndx=%d mask=%x term=%x\n", __func__,
+ ctrl->ndx, (int)ctrl->dsi_irq_mask, (int)term);
+ }
+ spin_unlock(&ctrl->irq_lock);
+}
+
+void mdss_dsi_video_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int i;
+
+ MIPI_OUTP((ctrl->ctrl_base) + 0x015c, 0x021);
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0164, 0xff0000); /* red */
+ i = 0;
+ while (i++ < 50) {
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0180, 0x1);
+ /* Add sleep to get ~50 fps frame rate*/
+ msleep(20);
+ }
+ MIPI_OUTP((ctrl->ctrl_base) + 0x015c, 0x0);
+}
+
+void mdss_dsi_cmd_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int i;
+
+ MIPI_OUTP((ctrl->ctrl_base) + 0x015c, 0x201);
+ MIPI_OUTP((ctrl->ctrl_base) + 0x016c, 0xff0000); /* red */
+ i = 0;
+ while (i++ < 50) {
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0184, 0x1);
+ /* Add sleep to get ~50 fps frame rate*/
+ msleep(20);
+ }
+ MIPI_OUTP((ctrl->ctrl_base) + 0x015c, 0x0);
+}
+
+void mdss_dsi_read_hw_revision(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (ctrl->shared_data->hw_rev)
+ return;
+
+ /* clock must be on */
+ ctrl->shared_data->hw_rev = MIPI_INP(ctrl->ctrl_base);
+}
+
+void mdss_dsi_read_phy_revision(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 reg_val;
+
+ if (ctrl->shared_data->phy_rev > DSI_PHY_REV_UNKNOWN)
+ return;
+
+ reg_val = MIPI_INP(ctrl->phy_io.base);
+ if (!reg_val) {
+ /*
+ * DSI_0_PHY_DSIPHY_REVISION_ID3 for phy 1.0
+ * reset value = 0x10
+ * 7:4 Major
+ * 3:0 Minor
+ */
+ reg_val = MIPI_INP(ctrl->phy_io.base + 0x20c);
+ reg_val = reg_val >> 4;
+ }
+
+ if (reg_val == DSI_PHY_REV_20)
+ ctrl->shared_data->phy_rev = DSI_PHY_REV_20;
+ else if (reg_val == DSI_PHY_REV_10)
+ ctrl->shared_data->phy_rev = DSI_PHY_REV_10;
+ else
+ ctrl->shared_data->phy_rev = DSI_PHY_REV_UNKNOWN;
+}
+
+void mdss_dsi_host_init(struct mdss_panel_data *pdata)
+{
+ u32 dsi_ctrl, intr_ctrl;
+ u32 data;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mipi_panel_info *pinfo = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pinfo = &pdata->panel_info.mipi;
+
+ if (pinfo->mode == DSI_VIDEO_MODE) {
+ data = 0;
+ if (pinfo->last_line_interleave_en)
+ data |= BIT(31);
+ if (pinfo->pulse_mode_hsa_he)
+ data |= BIT(28);
+ if (pinfo->hfp_power_stop)
+ data |= BIT(24);
+ if (pinfo->hbp_power_stop)
+ data |= BIT(20);
+ if (pinfo->hsa_power_stop)
+ data |= BIT(16);
+ if (pinfo->eof_bllp_power_stop)
+ data |= BIT(15);
+ if (pinfo->bllp_power_stop)
+ data |= BIT(12);
+ data |= ((pinfo->traffic_mode & 0x03) << 8);
+ data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
+ data |= (pinfo->vc & 0x03);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0010, data);
+
+ data = 0;
+ data |= ((pinfo->rgb_swap & 0x07) << 12);
+ if (pinfo->b_sel)
+ data |= BIT(8);
+ if (pinfo->g_sel)
+ data |= BIT(4);
+ if (pinfo->r_sel)
+ data |= BIT(0);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0020, data);
+ } else if (pinfo->mode == DSI_CMD_MODE) {
+ data = 0;
+ data |= ((pinfo->interleave_max & 0x0f) << 20);
+ data |= ((pinfo->rgb_swap & 0x07) << 16);
+ if (pinfo->b_sel)
+ data |= BIT(12);
+ if (pinfo->g_sel)
+ data |= BIT(8);
+ if (pinfo->r_sel)
+ data |= BIT(4);
+ data |= (pinfo->dst_format & 0x0f); /* 4 bits */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0040, data);
+
+ /* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
+ data = pinfo->wr_mem_continue & 0x0ff;
+ data <<= 8;
+ data |= (pinfo->wr_mem_start & 0x0ff);
+ if (pinfo->insert_dcs_cmd)
+ data |= BIT(16);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0044, data);
+ } else
+ pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);
+
+ dsi_ctrl = BIT(8) | BIT(2); /* clock enable & cmd mode */
+ intr_ctrl = 0;
+ intr_ctrl = (DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_CMD_MDP_DONE_MASK);
+
+ if (pinfo->crc_check)
+ dsi_ctrl |= BIT(24);
+ if (pinfo->ecc_check)
+ dsi_ctrl |= BIT(20);
+ if (pinfo->data_lane3)
+ dsi_ctrl |= BIT(7);
+ if (pinfo->data_lane2)
+ dsi_ctrl |= BIT(6);
+ if (pinfo->data_lane1)
+ dsi_ctrl |= BIT(5);
+ if (pinfo->data_lane0)
+ dsi_ctrl |= BIT(4);
+
+
+ data = 0;
+ if (pinfo->te_sel)
+ data |= BIT(31);
+ data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
+ data |= pinfo->dma_trigger; /* cmd dma trigger */
+ data |= (pinfo->stream & 0x01) << 8;
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0084,
+ data); /* DSI_TRIG_CTRL */
+
+ /* DSI_LAN_SWAP_CTRL */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00b0, ctrl_pdata->dlane_swap);
+
+ /* clock out ctrl */
+ data = pinfo->t_clk_post & 0x3f; /* 6 bits */
+ data <<= 8;
+ data |= pinfo->t_clk_pre & 0x3f; /* 6 bits */
+ /* DSI_CLKOUT_TIMING_CTRL */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xc4, data);
+
+ data = 0;
+ if (pinfo->rx_eot_ignore)
+ data |= BIT(4);
+ if (pinfo->tx_eot_append)
+ data |= BIT(0);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00cc,
+ data); /* DSI_EOT_PACKET_CTRL */
+ /*
+ * DSI_HS_TIMER_CTRL -> timer resolution = 8 esc clk
+ * HS TX timeout - 16136 (0x3f08) esc clk
+ */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00bc, 0x3fd08);
+
+
+ /* allow only ack-err-status to generate interrupt */
+ /* DSI_ERR_INT_MASK0 */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x010c, 0x03f03fc0);
+
+ intr_ctrl |= DSI_INTR_ERROR_MASK;
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0110,
+ intr_ctrl); /* DSI_INTL_CTRL */
+
+ /* turn esc, byte, dsi, pclk, sclk, hclk on */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x11c,
+ 0x23f); /* DSI_CLK_CTRL */
+
+ /* Reset DSI_LANE_CTRL */
+ if (!ctrl_pdata->mmss_clamp)
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00ac, 0x0);
+
+ dsi_ctrl |= BIT(0); /* enable dsi */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
+
+ /* enable contention detection for receiving */
+ mdss_dsi_lp_cd_rx(ctrl_pdata);
+
+ /* set DMA FIFO read watermark to 15/16 full */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x50, 0x30);
+
+ wmb(); /* ensure all DSI host configuration write are finished */
+}
+
+void mdss_dsi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ u32 data;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ data = MIPI_INP((ctrl_pdata->ctrl_base) + 0x3c);
+
+ if (mode == 0)
+ data &= ~BIT(26);
+ else
+ data |= BIT(26);
+
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x3c, data);
+}
+
+void mdss_dsi_sw_reset(struct mdss_dsi_ctrl_pdata *ctrl, bool restore)
+{
+ u32 data0;
+ unsigned long flag;
+
+ if (!ctrl) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ data0 = MIPI_INP(ctrl->ctrl_base + 0x0004);
+ MIPI_OUTP(ctrl->ctrl_base + 0x0004, (data0 & ~BIT(0)));
+ /*
+ * dsi controller need to be disabled before
+ * clocks turned on
+ */
+ wmb(); /* make sure dsi contoller is disabled */
+
+ /* turn esc, byte, dsi, pclk, sclk, hclk on */
+ MIPI_OUTP(ctrl->ctrl_base + 0x11c, 0x23f); /* DSI_CLK_CTRL */
+ wmb(); /* make sure clocks enabled */
+
+ /* dsi controller can only be reset while clocks are running */
+ MIPI_OUTP(ctrl->ctrl_base + 0x118, 0x01);
+ wmb(); /* make sure reset happen */
+ MIPI_OUTP(ctrl->ctrl_base + 0x118, 0x00);
+ wmb(); /* controller out of reset */
+
+ if (restore) {
+ MIPI_OUTP(ctrl->ctrl_base + 0x0004, data0);
+ wmb(); /* make sure dsi controller enabled again */
+ }
+
+ /* It is safe to clear mdp_busy as reset is happening */
+ spin_lock_irqsave(&ctrl->mdp_lock, flag);
+ ctrl->mdp_busy = false;
+ complete_all(&ctrl->mdp_comp);
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+}
+
+/**
+ * mdss_dsi_wait_for_lane_idle() - Wait for DSI lanes to be idle
+ * @ctrl: pointer to DSI controller structure
+ *
+ * This function waits for all the active DSI lanes to be idle by polling all
+ * the *FIFO_EMPTY bits and polling the lane status to ensure that all the lanes
+ * are in stop state. This function assumes that the bus clocks required to
+ * access the registers are already turned on.
+ */
+int mdss_dsi_wait_for_lane_idle(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int rc;
+ u32 val;
+ u32 fifo_empty_mask = 0;
+ u32 stop_state_mask = 0;
+ struct mipi_panel_info *mipi;
+ u32 const sleep_us = 10;
+ u32 const timeout_us = 100;
+
+ if (!ctrl) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mipi = &ctrl->panel_data.panel_info.mipi;
+
+ if (mipi->data_lane0) {
+ stop_state_mask |= BIT(0);
+ fifo_empty_mask |= (BIT(12) | BIT(16));
+ }
+ if (mipi->data_lane1) {
+ stop_state_mask |= BIT(1);
+ fifo_empty_mask |= BIT(20);
+ }
+ if (mipi->data_lane2) {
+ stop_state_mask |= BIT(2);
+ fifo_empty_mask |= BIT(24);
+ }
+ if (mipi->data_lane3) {
+ stop_state_mask |= BIT(3);
+ fifo_empty_mask |= BIT(28);
+ }
+
+ pr_debug("%s: polling for fifo empty, mask=0x%08x\n", __func__,
+ fifo_empty_mask);
+ rc = readl_poll_timeout(ctrl->ctrl_base + FIFO_STATUS, val,
+ (val & fifo_empty_mask), sleep_us, timeout_us);
+ if (rc) {
+ pr_err("%s: fifo not empty, FIFO_STATUS=0x%08x\n",
+ __func__, val);
+ goto error;
+ }
+
+ pr_debug("%s: polling for lanes to be in stop state, mask=0x%08x\n",
+ __func__, stop_state_mask);
+ rc = readl_poll_timeout(ctrl->ctrl_base + LANE_STATUS, val,
+ (val & stop_state_mask), sleep_us, timeout_us);
+ if (rc) {
+ pr_err("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
+ __func__, val);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static void mdss_dsi_cfg_lane_ctrl(struct mdss_dsi_ctrl_pdata *ctrl,
+ u32 bits, int set)
+{
+ u32 data;
+
+ data = MIPI_INP(ctrl->ctrl_base + 0x00ac);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ MIPI_OUTP(ctrl->ctrl_base + 0x0ac, data);
+}
+
+
+static inline bool mdss_dsi_poll_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 clk = 0;
+
+ if (readl_poll_timeout(((ctrl->ctrl_base) + 0x00a8),
+ clk,
+ (clk & 0x0010),
+ 10, 1000)) {
+ pr_err("%s: ndx=%d clk lane NOT stopped, clk=%x\n",
+ __func__, ctrl->ndx, clk);
+
+ return false;
+ }
+ return true;
+}
+
+static void mdss_dsi_wait_clk_lane_to_stop(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (mdss_dsi_poll_clk_lane(ctrl)) /* stopped */
+ return;
+
+ /* clk stuck at hs, start recovery process */
+
+ /* force clk lane tx stop -- bit 20 */
+ mdss_dsi_cfg_lane_ctrl(ctrl, BIT(20), 1);
+
+ if (mdss_dsi_poll_clk_lane(ctrl) == false)
+ pr_err("%s: clk lane recovery failed\n", __func__);
+
+ /* clear clk lane tx stop -- bit 20 */
+ mdss_dsi_cfg_lane_ctrl(ctrl, BIT(20), 0);
+}
+
+static void mdss_dsi_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl);
+
+/*
+ * mdss_dsi_start_hs_clk_lane:
+ * this function is work around solution for 8994 dsi clk lane
+ * may stuck at HS problem
+ */
+static void mdss_dsi_start_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+
+ /* make sure clk lane is stopped */
+ mdss_dsi_stop_hs_clk_lane(ctrl);
+
+ mutex_lock(&ctrl->clk_lane_mutex);
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_ON);
+ if (ctrl->clk_lane_cnt) {
+ pr_err("%s: ndx=%d do-wait, cnt=%d\n",
+ __func__, ctrl->ndx, ctrl->clk_lane_cnt);
+ mdss_dsi_wait_clk_lane_to_stop(ctrl);
+ }
+
+ /* force clk lane hs for next dma or mdp stream */
+ mdss_dsi_cfg_lane_ctrl(ctrl, BIT(28), 1);
+ ctrl->clk_lane_cnt++;
+ pr_debug("%s: ndx=%d, set_hs, cnt=%d\n", __func__,
+ ctrl->ndx, ctrl->clk_lane_cnt);
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_OFF);
+ mutex_unlock(&ctrl->clk_lane_mutex);
+}
+
+/*
+ * mdss_dsi_stop_hs_clk_lane:
+ * this function is work around solution for 8994 dsi clk lane
+ * may stuck at HS problem
+ */
+static void mdss_dsi_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 fifo = 0;
+ u32 lane = 0;
+
+ mutex_lock(&ctrl->clk_lane_mutex);
+ if (ctrl->clk_lane_cnt == 0) /* stopped already */
+ goto release;
+
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_ON);
+ /* fifo */
+ if (readl_poll_timeout(((ctrl->ctrl_base) + 0x000c),
+ fifo,
+ ((fifo & 0x11110000) == 0x11110000),
+ 10, 1000)) {
+ pr_err("%s: fifo NOT empty, fifo=%x\n",
+ __func__, fifo);
+ goto end;
+ }
+
+ /* data lane status */
+ if (readl_poll_timeout(((ctrl->ctrl_base) + 0x00a8),
+ lane,
+ ((lane & 0x000f) == 0x000f),
+ 100, 2000)) {
+ pr_err("%s: datalane NOT stopped, lane=%x\n",
+ __func__, lane);
+ }
+end:
+ /* stop force clk lane hs */
+ mdss_dsi_cfg_lane_ctrl(ctrl, BIT(28), 0);
+
+ mdss_dsi_wait_clk_lane_to_stop(ctrl);
+
+ ctrl->clk_lane_cnt = 0;
+release:
+ pr_debug("%s: ndx=%d, cnt=%d\n", __func__,
+ ctrl->ndx, ctrl->clk_lane_cnt);
+
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_OFF);
+ mutex_unlock(&ctrl->clk_lane_mutex);
+}
+
+static void mdss_dsi_cmd_start_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+ if (mdss_dsi_sync_wait_enable(ctrl)) {
+ if (!mdss_dsi_sync_wait_trigger(ctrl))
+ return;
+ mctrl = mdss_dsi_get_other_ctrl(ctrl);
+
+ if (mctrl)
+ mdss_dsi_start_hs_clk_lane(mctrl);
+ }
+
+ mdss_dsi_start_hs_clk_lane(ctrl);
+}
+
+static void mdss_dsi_cmd_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+ if (mdss_dsi_sync_wait_enable(ctrl)) {
+ if (!mdss_dsi_sync_wait_trigger(ctrl))
+ return;
+ mctrl = mdss_dsi_get_other_ctrl(ctrl);
+
+ if (mctrl)
+ mdss_dsi_stop_hs_clk_lane(mctrl);
+ }
+
+ mdss_dsi_stop_hs_clk_lane(ctrl);
+}
+
+static void mdss_dsi_ctl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl, u32 event)
+{
+ u32 data0, data1, mask = 0, data_lane_en = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl0, *ctrl1;
+ u32 ln0, ln1, ln_ctrl0, ln_ctrl1, i;
+ int rc = 0;
+
+ /*
+ * Add 2 ms delay suggested by HW team.
+ * Check clk lane stop state after every 200 us
+ */
+ u32 loop = 10, u_dly = 200;
+
+ pr_debug("%s: MDSS DSI CTRL and PHY reset. ctrl-num = %d\n",
+ __func__, ctrl->ndx);
+ if (event == DSI_EV_DLNx_FIFO_OVERFLOW) {
+ mask = BIT(20); /* clock lane only for overflow recovery */
+ } else if (event == DSI_EV_LP_RX_TIMEOUT) {
+ data_lane_en = (MIPI_INP(ctrl->ctrl_base + 0x0004) &
+ DSI_DATA_LANES_ENABLED) >> 4;
+ /* clock and data lanes for LP_RX_TO recovery */
+ mask = BIT(20) | (data_lane_en << 16);
+ }
+
+ if (mdss_dsi_is_hw_config_split(ctrl->shared_data)) {
+ pr_debug("%s: Split display enabled\n", __func__);
+ ctrl0 = mdss_dsi_get_ctrl_by_index(DSI_CTRL_0);
+ ctrl1 = mdss_dsi_get_ctrl_by_index(DSI_CTRL_1);
+
+ if (ctrl0->recovery) {
+ rc = ctrl0->recovery->fxn(ctrl0->recovery->data,
+ MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW);
+ if (rc < 0) {
+ pr_debug("%s: Target is in suspend/shutdown\n",
+ __func__);
+ return;
+ }
+ }
+ /*
+ * Disable PHY contention detection and receive.
+ * Configure the strength ctrl 1 register.
+ */
+ MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0);
+ MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0);
+
+ data0 = MIPI_INP(ctrl0->ctrl_base + 0x0004);
+ data1 = MIPI_INP(ctrl1->ctrl_base + 0x0004);
+ /* Disable DSI video mode */
+ MIPI_OUTP(ctrl0->ctrl_base + 0x004, (data0 & ~BIT(1)));
+ MIPI_OUTP(ctrl1->ctrl_base + 0x004, (data1 & ~BIT(1)));
+ /* Disable DSI controller */
+ MIPI_OUTP(ctrl0->ctrl_base + 0x004,
+ (data0 & ~(BIT(0) | BIT(1))));
+ MIPI_OUTP(ctrl1->ctrl_base + 0x004,
+ (data1 & ~(BIT(0) | BIT(1))));
+ /* "Force On" all dynamic clocks */
+ MIPI_OUTP(ctrl0->ctrl_base + 0x11c, 0x100a00);
+ MIPI_OUTP(ctrl1->ctrl_base + 0x11c, 0x100a00);
+
+ /* DSI_SW_RESET */
+ MIPI_OUTP(ctrl0->ctrl_base + 0x118, 0x1);
+ MIPI_OUTP(ctrl1->ctrl_base + 0x118, 0x1);
+ wmb(); /* ensure write is finished before progressing */
+ MIPI_OUTP(ctrl0->ctrl_base + 0x118, 0x0);
+ MIPI_OUTP(ctrl1->ctrl_base + 0x118, 0x0);
+ wmb(); /* ensure write is finished before progressing */
+
+ /* Remove "Force On" all dynamic clocks */
+ MIPI_OUTP(ctrl0->ctrl_base + 0x11c, 0x00); /* DSI_CLK_CTRL */
+ MIPI_OUTP(ctrl1->ctrl_base + 0x11c, 0x00); /* DSI_CLK_CTRL */
+
+ /* Enable DSI controller */
+ MIPI_OUTP(ctrl0->ctrl_base + 0x004, (data0 & ~BIT(1)));
+ MIPI_OUTP(ctrl1->ctrl_base + 0x004, (data1 & ~BIT(1)));
+
+ /*
+ * Toggle Clk lane Force TX stop so that
+ * clk lane status is no more in stop state
+ */
+ ln0 = MIPI_INP(ctrl0->ctrl_base + 0x00a8);
+ ln1 = MIPI_INP(ctrl1->ctrl_base + 0x00a8);
+ pr_debug("%s: lane status, ctrl0 = 0x%x, ctrl1 = 0x%x\n",
+ __func__, ln0, ln1);
+ ln_ctrl0 = MIPI_INP(ctrl0->ctrl_base + 0x00ac);
+ ln_ctrl1 = MIPI_INP(ctrl1->ctrl_base + 0x00ac);
+ MIPI_OUTP(ctrl0->ctrl_base + 0x0ac, ln_ctrl0 | mask);
+ MIPI_OUTP(ctrl1->ctrl_base + 0x0ac, ln_ctrl1 | mask);
+ ln_ctrl0 = MIPI_INP(ctrl0->ctrl_base + 0x00ac);
+ ln_ctrl1 = MIPI_INP(ctrl1->ctrl_base + 0x00ac);
+ for (i = 0; i < loop; i++) {
+ ln0 = MIPI_INP(ctrl0->ctrl_base + 0x00a8);
+ ln1 = MIPI_INP(ctrl1->ctrl_base + 0x00a8);
+ if ((ln0 == 0x1f1f) && (ln1 == 0x1f1f))
+ break;
+ /* Check clk lane stopState for every 200us */
+ udelay(u_dly);
+ }
+ if (i == loop) {
+ MDSS_XLOG(ctrl0->ndx, ln0, 0x1f1f);
+ MDSS_XLOG(ctrl1->ndx, ln1, 0x1f1f);
+ pr_err("%s: Clock lane still in stop state\n",
+ __func__);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+ "dsi1_ctrl", "dsi1_phy", "panic");
+ }
+ pr_debug("%s: lane ctrl, ctrl0 = 0x%x, ctrl1 = 0x%x\n",
+ __func__, ln0, ln1);
+ MIPI_OUTP(ctrl0->ctrl_base + 0x0ac, ln_ctrl0 & ~mask);
+ MIPI_OUTP(ctrl1->ctrl_base + 0x0ac, ln_ctrl1 & ~mask);
+
+ /* Enable Video mode for DSI controller */
+ MIPI_OUTP(ctrl0->ctrl_base + 0x004, data0);
+ MIPI_OUTP(ctrl1->ctrl_base + 0x004, data1);
+
+ /*
+ * Enable PHY contention detection and receive.
+ * Configure the strength ctrl 1 register.
+ */
+ MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0x6);
+ MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0x6);
+ /*
+ * Add sufficient delay to make sure
+ * pixel transmission as started
+ */
+ udelay(200);
+ } else {
+ if (ctrl->recovery) {
+ rc = ctrl->recovery->fxn(ctrl->recovery->data,
+ MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW);
+ if (rc < 0) {
+ pr_debug("%s: Target is in suspend/shutdown\n",
+ __func__);
+ return;
+ }
+ }
+ /* Disable PHY contention detection and receive */
+ MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0);
+
+ data0 = MIPI_INP(ctrl->ctrl_base + 0x0004);
+ /* Disable DSI video mode */
+ MIPI_OUTP(ctrl->ctrl_base + 0x004, (data0 & ~BIT(1)));
+ /* Disable DSI controller */
+ MIPI_OUTP(ctrl->ctrl_base + 0x004,
+ (data0 & ~(BIT(0) | BIT(1))));
+ /* "Force On" all dynamic clocks */
+ MIPI_OUTP(ctrl->ctrl_base + 0x11c, 0x100a00);
+
+ /* DSI_SW_RESET */
+ MIPI_OUTP(ctrl->ctrl_base + 0x118, 0x1);
+ wmb(); /* ensure write is finished before progressing */
+
+ MIPI_OUTP(ctrl->ctrl_base + 0x118, 0x0);
+ wmb(); /* ensure write is finished before progressing */
+
+ /* Remove "Force On" all dynamic clocks */
+ MIPI_OUTP(ctrl->ctrl_base + 0x11c, 0x00);
+ /* Enable DSI controller */
+ MIPI_OUTP(ctrl->ctrl_base + 0x004, (data0 & ~BIT(1)));
+
+ /*
+ * Toggle Clk lane Force TX stop so that
+ * clk lane status is no more in stop state
+ */
+ ln0 = MIPI_INP(ctrl->ctrl_base + 0x00a8);
+ pr_debug("%s: lane status, ctrl = 0x%x\n",
+ __func__, ln0);
+ ln_ctrl0 = MIPI_INP(ctrl->ctrl_base + 0x00ac);
+ MIPI_OUTP(ctrl->ctrl_base + 0x0ac, ln_ctrl0 | mask);
+ ln_ctrl0 = MIPI_INP(ctrl->ctrl_base + 0x00ac);
+ for (i = 0; i < loop; i++) {
+ ln0 = MIPI_INP(ctrl->ctrl_base + 0x00a8);
+ if (ln0 == 0x1f1f)
+ break;
+ /* Check clk lane stopState for every 200us */
+ udelay(u_dly);
+ }
+ if (i == loop) {
+ MDSS_XLOG(ctrl->ndx, ln0, 0x1f1f);
+ pr_err("%s: Clock lane still in stop state\n",
+ __func__);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+ "dsi1_ctrl", "dsi1_phy", "panic");
+ }
+ pr_debug("%s: lane status = 0x%x\n",
+ __func__, ln0);
+ MIPI_OUTP(ctrl->ctrl_base + 0x0ac, ln_ctrl0 & ~mask);
+
+ /* Enable Video mode for DSI controller */
+ MIPI_OUTP(ctrl->ctrl_base + 0x004, data0);
+ /* Enable PHY contention detection and receiver */
+ MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0x6);
+ /*
+ * Add sufficient delay to make sure
+ * pixel transmission as started
+ */
+ udelay(200);
+ }
+ pr_debug("Recovery done\n");
+}
+
+void mdss_dsi_err_intr_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask,
+ int enable)
+{
+ u32 intr;
+
+ intr = MIPI_INP(ctrl->ctrl_base + 0x0110);
+ intr &= DSI_INTR_TOTAL_MASK;
+
+ if (enable)
+ intr |= mask;
+ else
+ intr &= ~mask;
+
+ pr_debug("%s: intr=%x enable=%d\n", __func__, intr, enable);
+
+ MIPI_OUTP(ctrl->ctrl_base + 0x0110, intr); /* DSI_INTL_CTRL */
+}
+
+void mdss_dsi_controller_cfg(int enable,
+ struct mdss_panel_data *pdata)
+{
+
+ u32 dsi_ctrl;
+ u32 status;
+ u32 sleep_us = 1000;
+ u32 timeout_us = 16000;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ /* Check for CMD_MODE_DMA_BUSY */
+ if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
+ status,
+ ((status & 0x02) == 0),
+ sleep_us, timeout_us))
+ pr_info("%s: DSI status=%x failed\n", __func__, status);
+
+ /* Check for x_HS_FIFO_EMPTY */
+ if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x000c),
+ status,
+ ((status & 0x11111000) == 0x11111000),
+ sleep_us, timeout_us))
+ pr_info("%s: FIFO status=%x failed\n", __func__, status);
+
+ /* Check for VIDEO_MODE_ENGINE_BUSY */
+ if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
+ status,
+ ((status & 0x08) == 0),
+ sleep_us, timeout_us)) {
+ pr_debug("%s: DSI status=%x\n", __func__, status);
+ pr_debug("%s: Doing sw reset\n", __func__);
+ mdss_dsi_sw_reset(ctrl_pdata, false);
+ }
+
+ dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
+ if (enable)
+ dsi_ctrl |= 0x01;
+ else
+ dsi_ctrl &= ~0x01;
+
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
+ wmb(); /* ensure write is finished before progressing */
+}
+
+void mdss_dsi_restore_intr_mask(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 mask;
+
+ mask = MIPI_INP((ctrl->ctrl_base) + 0x0110);
+ mask &= DSI_INTR_TOTAL_MASK;
+ mask |= (DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
+ DSI_INTR_BTA_DONE_MASK);
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0110, mask);
+}
+
+void mdss_dsi_op_mode_config(int mode,
+ struct mdss_panel_data *pdata)
+{
+ u32 dsi_ctrl, intr_ctrl, dma_ctrl;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
+ /*If Video enabled, Keep Video and Cmd mode ON */
+ if (dsi_ctrl & 0x02)
+ dsi_ctrl &= ~0x05;
+ else
+ dsi_ctrl &= ~0x07;
+
+ if (mode == DSI_VIDEO_MODE) {
+ dsi_ctrl |= 0x03;
+ intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_BTA_DONE_MASK
+ | DSI_INTR_ERROR_MASK;
+ } else { /* command mode */
+ dsi_ctrl |= 0x05;
+ if (pdata->panel_info.type == MIPI_VIDEO_PANEL)
+ dsi_ctrl |= 0x02;
+
+ intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
+ DSI_INTR_CMD_MDP_DONE_MASK | DSI_INTR_BTA_DONE_MASK;
+ }
+
+ dma_ctrl = BIT(28) | BIT(26); /* embedded mode & LP mode */
+ if (mdss_dsi_sync_wait_enable(ctrl_pdata))
+ dma_ctrl |= BIT(31);
+
+ pr_debug("%s: configuring ctrl%d\n", __func__, ctrl_pdata->ndx);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0110, intr_ctrl);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x003c, dma_ctrl);
+ wmb(); /* ensure dsi op mode config is finished */
+}
+
+void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata)
+{
+ u32 status;
+ int timeout_us = 10000;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x098, 0x01); /* trigger */
+ wmb(); /* ensure write is finished before progressing */
+
+ /* Check for CMD_MODE_DMA_BUSY */
+ if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
+ status, ((status & 0x0010) == 0),
+ 0, timeout_us))
+ pr_info("%s: DSI status=%x failed\n", __func__, status);
+
+ mdss_dsi_ack_err_status(ctrl_pdata);
+
+ pr_debug("%s: BTA done, status = %d\n", __func__, status);
+}
+
+static int mdss_dsi_read_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int i, rc, *lenp;
+ int start = 0;
+ struct dcs_cmd_req cmdreq;
+
+ rc = 1;
+ lenp = ctrl->status_valid_params ?: ctrl->status_cmds_rlen;
+
+ for (i = 0; i < ctrl->status_cmds.cmd_cnt; ++i) {
+ memset(&cmdreq, 0, sizeof(cmdreq));
+ cmdreq.cmds = ctrl->status_cmds.cmds + i;
+ cmdreq.cmds_cnt = 1;
+ cmdreq.flags = CMD_REQ_COMMIT | CMD_REQ_RX;
+ cmdreq.rlen = ctrl->status_cmds_rlen[i];
+ cmdreq.cb = NULL;
+ cmdreq.rbuf = ctrl->status_buf.data;
+
+ if (ctrl->status_cmds.link_state == DSI_LP_MODE)
+ cmdreq.flags |= CMD_REQ_LP_MODE;
+ else if (ctrl->status_cmds.link_state == DSI_HS_MODE)
+ cmdreq.flags |= CMD_REQ_HS_MODE;
+
+ rc = mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+ if (rc <= 0) {
+ pr_err("%s: get status: fail\n", __func__);
+ return rc;
+ }
+
+ memcpy(ctrl->return_buf + start,
+ ctrl->status_buf.data, lenp[i]);
+ start += lenp[i];
+ }
+
+ return rc;
+}
+
+
+/**
+ * mdss_dsi_reg_status_check() - Check dsi panel status through reg read
+ * @ctrl_pdata: pointer to the dsi controller structure
+ *
+ * This function can be used to check the panel status through reading the
+ * status register from the panel.
+ *
+ * Return: positive value if the panel is in good state, negative value or
+ * zero otherwise.
+ */
+int mdss_dsi_reg_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int ret = 0;
+ struct mdss_dsi_ctrl_pdata *sctrl_pdata = NULL;
+
+ if (ctrl_pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return 0;
+ }
+
+ pr_debug("%s: Checking Register status\n", __func__);
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+
+ sctrl_pdata = mdss_dsi_get_other_ctrl(ctrl_pdata);
+ if (!mdss_dsi_sync_wait_enable(ctrl_pdata)) {
+ ret = mdss_dsi_read_status(ctrl_pdata);
+ } else {
+ /*
+ * Read commands to check ESD status are usually sent at
+ * the same time to both the controllers. However, if
+ * sync_wait is enabled, we need to ensure that the
+ * dcs commands are first sent to the non-trigger
+ * controller so that when the commands are triggered,
+ * both controllers receive it at the same time.
+ */
+ if (mdss_dsi_sync_wait_trigger(ctrl_pdata)) {
+ if (sctrl_pdata)
+ ret = mdss_dsi_read_status(sctrl_pdata);
+ ret = mdss_dsi_read_status(ctrl_pdata);
+ } else {
+ ret = mdss_dsi_read_status(ctrl_pdata);
+ if (sctrl_pdata)
+ ret = mdss_dsi_read_status(sctrl_pdata);
+ }
+ }
+
+ /*
+ * mdss_dsi_read_status returns the number of bytes returned
+ * by the panel. Success value is greater than zero and failure
+ * case returns zero.
+ */
+ if (ret > 0) {
+ if (!mdss_dsi_sync_wait_enable(ctrl_pdata) ||
+ mdss_dsi_sync_wait_trigger(ctrl_pdata))
+ ret = ctrl_pdata->check_read_status(ctrl_pdata);
+ else if (sctrl_pdata)
+ ret = ctrl_pdata->check_read_status(sctrl_pdata);
+ } else {
+ pr_err("%s: Read status register returned error\n", __func__);
+ }
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+ pr_debug("%s: Read register done with ret: %d\n", __func__, ret);
+
+ return ret;
+}
+
+void mdss_dsi_dsc_config(struct mdss_dsi_ctrl_pdata *ctrl, struct dsc_desc *dsc)
+{
+ u32 data, offset;
+
+ if (dsc->pkt_per_line <= 0) {
+ pr_err("%s: Error: pkt_per_line cannot be negative or 0\n",
+ __func__);
+ return;
+ }
+
+ if (ctrl->panel_mode == DSI_VIDEO_MODE) {
+ MIPI_OUTP((ctrl->ctrl_base) +
+ MDSS_DSI_VIDEO_COMPRESSION_MODE_CTRL2, 0);
+ data = dsc->bytes_per_pkt << 16;
+ data |= (0x0b << 8); /* dtype of compressed image */
+ offset = MDSS_DSI_VIDEO_COMPRESSION_MODE_CTRL;
+ } else {
+ /* strem 0 */
+ MIPI_OUTP((ctrl->ctrl_base) +
+ MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL3, 0);
+
+ MIPI_OUTP((ctrl->ctrl_base) +
+ MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL2,
+ dsc->bytes_in_slice);
+
+ data = DTYPE_DCS_LWRITE << 8;
+ offset = MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL;
+ }
+
+ /*
+ * pkt_per_line:
+ * 0 == 1 pkt
+ * 1 == 2 pkt
+ * 2 == 4 pkt
+ * 3 pkt is not support
+ */
+ if (dsc->pkt_per_line == 4)
+ data |= (dsc->pkt_per_line - 2) << 6;
+ else
+ data |= (dsc->pkt_per_line - 1) << 6;
+ data |= dsc->eol_byte_num << 4;
+ data |= 1; /* enable */
+ MIPI_OUTP((ctrl->ctrl_base) + offset, data);
+}
+
+void mdss_dsi_set_burst_mode(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 data;
+
+ if (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_103)
+ return;
+
+ data = MIPI_INP(ctrl->ctrl_base + 0x1b8);
+
+ /*
+ * idle and burst mode are mutually exclusive features,
+ * so disable burst mode if idle has been configured for
+ * the panel, otherwise enable the feature.
+ */
+ if (ctrl->idle_enabled)
+ data &= ~BIT(16); /* disable burst mode */
+ else
+ data |= BIT(16); /* enable burst mode */
+
+ ctrl->burst_mode_enabled = !ctrl->idle_enabled;
+
+ MIPI_OUTP((ctrl->ctrl_base + 0x1b8), data);
+ pr_debug("%s: burst=%d\n", __func__, ctrl->burst_mode_enabled);
+
+}
+
+static void mdss_dsi_mode_setup(struct mdss_panel_data *pdata)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo;
+ struct mipi_panel_info *mipi;
+ struct dsc_desc *dsc = NULL;
+ u32 data = 0;
+ u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+ u32 ystride, bpp, dst_bpp, byte_num;
+ u32 stream_ctrl, stream_total;
+ u32 dummy_xres = 0, dummy_yres = 0;
+ u32 hsync_period, vsync_period;
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pinfo = &pdata->panel_info;
+ if (pinfo->compression_mode == COMPRESSION_DSC)
+ dsc = &pinfo->dsc;
+
+ dst_bpp = pdata->panel_info.fbc.enabled ?
+ (pdata->panel_info.fbc.target_bpp) : (pinfo->bpp);
+
+ hbp = pdata->panel_info.lcdc.h_back_porch;
+ hfp = pdata->panel_info.lcdc.h_front_porch;
+ vbp = pdata->panel_info.lcdc.v_back_porch;
+ vfp = pdata->panel_info.lcdc.v_front_porch;
+ hspw = pdata->panel_info.lcdc.h_pulse_width;
+ vspw = pdata->panel_info.lcdc.v_pulse_width;
+ width = mult_frac(pdata->panel_info.xres, dst_bpp,
+ pdata->panel_info.bpp);
+ height = pdata->panel_info.yres;
+ pr_debug("%s: fbc=%d width=%d height=%d dst_bpp=%d\n", __func__,
+ pdata->panel_info.fbc.enabled, width, height, dst_bpp);
+
+ if (dsc) /* compressed */
+ width = dsc->pclk_per_line;
+
+ if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+ dummy_xres = mult_frac((pdata->panel_info.lcdc.border_left +
+ pdata->panel_info.lcdc.border_right),
+ dst_bpp, pdata->panel_info.bpp);
+ dummy_yres = pdata->panel_info.lcdc.border_top +
+ pdata->panel_info.lcdc.border_bottom;
+ }
+
+ mipi = &pdata->panel_info.mipi;
+ if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+ vsync_period = vspw + vbp + height + dummy_yres + vfp;
+ hsync_period = hspw + hbp + width + dummy_xres + hfp;
+
+ if (ctrl_pdata->timing_db_mode)
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e8, 0x1);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x24,
+ ((hspw + hbp + width + dummy_xres) << 16 |
+ (hspw + hbp)));
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x28,
+ ((vspw + vbp + height + dummy_yres) << 16 |
+ (vspw + vbp)));
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
+ ((vsync_period - 1) << 16)
+ | (hsync_period - 1));
+
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x30, (hspw << 16));
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x34, 0);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x38, (vspw << 16));
+ if (ctrl_pdata->timing_db_mode)
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e4, 0x1);
+ } else { /* command mode */
+ if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+ bpp = 3;
+ else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
+ bpp = 3;
+ else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+ bpp = 2;
+ else
+ bpp = 3; /* Default format set to RGB888 */
+
+ ystride = width * bpp + 1;
+
+ if (dsc) {
+ byte_num = dsc->bytes_per_pkt;
+ if (pinfo->mipi.insert_dcs_cmd)
+ byte_num++;
+
+ stream_ctrl = (byte_num << 16) |
+ (mipi->vc << 8) | DTYPE_DCS_LWRITE;
+ stream_total = dsc->pic_height << 16 |
+ dsc->pclk_per_line;
+ } else if (pinfo->partial_update_enabled &&
+ mdss_dsi_is_panel_on(pdata) && pinfo->roi.w &&
+ pinfo->roi.h) {
+ stream_ctrl = (((pinfo->roi.w * bpp) + 1) << 16) |
+ (mipi->vc << 8) | DTYPE_DCS_LWRITE;
+ stream_total = pinfo->roi.h << 16 | pinfo->roi.w;
+ } else {
+ stream_ctrl = (ystride << 16) | (mipi->vc << 8) |
+ DTYPE_DCS_LWRITE;
+ stream_total = height << 16 | width;
+ }
+
+ /* DSI_COMMAND_MODE_NULL_INSERTION_CTRL */
+ if ((ctrl_pdata->shared_data->hw_rev >= MDSS_DSI_HW_REV_104)
+ && ctrl_pdata->null_insert_enabled) {
+ data = (mipi->vc << 1); /* Virtual channel ID */
+ data |= 0 << 16; /* Word count of the NULL packet */
+ data |= 0x1; /* Enable Null insertion */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2b4, data);
+ }
+
+ mdss_dsi_set_burst_mode(ctrl_pdata);
+
+ /* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x60, stream_ctrl);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x58, stream_ctrl);
+
+ /* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x64, stream_total);
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x5C, stream_total);
+ }
+
+ if (dsc) /* compressed */
+ mdss_dsi_dsc_config(ctrl_pdata, dsc);
+}
+
+void mdss_dsi_ctrl_setup(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_panel_data *pdata = &ctrl->panel_data;
+
+ pr_debug("%s: called for ctrl%d\n", __func__, ctrl->ndx);
+
+ mdss_dsi_mode_setup(pdata);
+ mdss_dsi_host_init(pdata);
+ mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode, pdata);
+}
+
+/**
+ * mdss_dsi_bta_status_check() - Check dsi panel status through bta check
+ * @ctrl_pdata: pointer to the dsi controller structure
+ *
+ * This function can be used to check status of the panel using bta check
+ * for the panel.
+ *
+ * Return: positive value if the panel is in good state, negative value or
+ * zero otherwise.
+ */
+int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int ret = 0;
+ unsigned long flag;
+ int ignore_underflow = 0;
+
+ if (ctrl_pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+
+ /*
+ * This should not return error otherwise
+ * BTA status thread will treat it as dead panel scenario
+ * and request for blank/unblank
+ */
+ return 0;
+ }
+
+ mutex_lock(&ctrl_pdata->cmd_mutex);
+
+ if (ctrl_pdata->panel_mode == DSI_VIDEO_MODE)
+ ignore_underflow = 1;
+
+ pr_debug("%s: Checking BTA status\n", __func__);
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+ spin_lock_irqsave(&ctrl_pdata->mdp_lock, flag);
+ reinit_completion(&ctrl_pdata->bta_comp);
+ mdss_dsi_enable_irq(ctrl_pdata, DSI_BTA_TERM);
+ spin_unlock_irqrestore(&ctrl_pdata->mdp_lock, flag);
+ /* mask out overflow errors */
+ if (ignore_underflow)
+ mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0x0f0000);
+ MIPI_OUTP(ctrl_pdata->ctrl_base + 0x098, 0x01); /* trigger */
+ wmb(); /* ensure write is finished before progressing */
+
+ ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp,
+ DSI_BTA_EVENT_TIMEOUT);
+ if (ret <= 0) {
+ mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
+ pr_err("%s: DSI BTA error: %i\n", __func__, ret);
+ }
+
+ if (ignore_underflow) {
+ /* clear pending overflow status */
+ mdss_dsi_set_reg(ctrl_pdata, 0xc, 0xffffffff, 0x44440000);
+ /* restore overflow isr */
+ mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0);
+ }
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+ pr_debug("%s: BTA done with ret: %d\n", __func__, ret);
+
+ mutex_unlock(&ctrl_pdata->cmd_mutex);
+
+ return ret;
+}
+
+int mdss_dsi_cmd_reg_tx(u32 data,
+ unsigned char *ctrl_base)
+{
+ int i;
+ char *bp;
+
+ bp = (char *)&data;
+ pr_debug("%s: ", __func__);
+ for (i = 0; i < 4; i++)
+ pr_debug("%x ", *bp++);
+
+ pr_debug("\n");
+
+ MIPI_OUTP(ctrl_base + 0x0084, 0x04);/* sw trigger */
+ MIPI_OUTP(ctrl_base + 0x0004, 0x135);
+
+ wmb(); /* ensure write is finished before progressing */
+
+ MIPI_OUTP(ctrl_base + 0x03c, data);
+ wmb(); /* ensure write is finished before progressing */
+ MIPI_OUTP(ctrl_base + 0x090, 0x01); /* trigger */
+ wmb(); /* ensure write is finished before progressing */
+
+ udelay(300);
+
+ return 4;
+}
+
+static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl);
+
+static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_buf *tp);
+
+static int mdss_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_buf *rp, int rlen);
+
+static int mdss_dsi_cmd_dma_tpg_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_buf *tp)
+{
+ int len, i, ret = 0, data = 0;
+ u32 *bp;
+ struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+ if (tp->len > DMA_TPG_FIFO_LEN) {
+ pr_debug("command length more than FIFO length\n");
+ return -EINVAL;
+ }
+
+ if (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_103) {
+ pr_err("CMD DMA TPG not supported for this DSI version\n");
+ return -EINVAL;
+ }
+
+ bp = (u32 *)tp->data;
+ len = ALIGN(tp->len, 4);
+
+ reinit_completion(&ctrl->dma_comp);
+
+ if (mdss_dsi_sync_wait_trigger(ctrl))
+ mctrl = mdss_dsi_get_other_ctrl(ctrl);
+
+ data = BIT(16) | BIT(17); /* select CMD_DMA_PATTERN_SEL to 3 */
+ data |= BIT(2); /* select CMD_DMA_FIFO_MODE to 1 */
+ data |= BIT(1); /* enable CMD_DMA_TPG */
+
+ MIPI_OUTP(ctrl->ctrl_base + 0x15c, data);
+ if (mctrl)
+ MIPI_OUTP(mctrl->ctrl_base + 0x15c, data);
+
+ /*
+ * The DMA command parameters need to be programmed to the DMA_INIT_VAL
+ * register in the proper order. The 'len' value will be a multiple
+ * of 4, the padding bytes to make sure of this will be taken care of in
+ * mdss_dsi_cmd_dma_add API.
+ */
+ for (i = 0; i < len; i += 4) {
+ MIPI_OUTP(ctrl->ctrl_base + 0x17c, *bp);
+ if (mctrl)
+ MIPI_OUTP(mctrl->ctrl_base + 0x17c, *bp);
+ wmb(); /* make sure write happens before writing next command */
+ bp++;
+ }
+
+ /*
+ * The number of writes to the DMA_INIT_VAL register should be an even
+ * number of dwords (32 bits). In case 'len' is not a multiple of 8,
+ * we need to do make an extra write to the register with 0x00 to
+ * satisfy this condition.
+ */
+ if ((len % 8) != 0) {
+ MIPI_OUTP(ctrl->ctrl_base + 0x17c, 0x00);
+ if (mctrl)
+ MIPI_OUTP(mctrl->ctrl_base + 0x17c, 0x00);
+ }
+
+ if (mctrl) {
+ MIPI_OUTP(mctrl->ctrl_base + 0x04c, len);
+ MIPI_OUTP(mctrl->ctrl_base + 0x090, 0x01); /* trigger */
+ }
+ MIPI_OUTP(ctrl->ctrl_base + 0x04c, len);
+ wmb(); /* make sure DMA length is programmed */
+
+ MIPI_OUTP(ctrl->ctrl_base + 0x090, 0x01); /* trigger */
+ wmb(); /* make sure DMA trigger happens */
+
+ ret = wait_for_completion_timeout(&ctrl->dma_comp,
+ msecs_to_jiffies(DMA_TX_TIMEOUT));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else
+ ret = tp->len;
+
+ /* Reset the DMA TPG FIFO */
+ MIPI_OUTP(ctrl->ctrl_base + 0x1ec, 0x1);
+ wmb(); /* make sure FIFO reset happens */
+ MIPI_OUTP(ctrl->ctrl_base + 0x1ec, 0x0);
+ wmb(); /* make sure FIFO reset happens */
+ /* Disable CMD_DMA_TPG */
+ MIPI_OUTP(ctrl->ctrl_base + 0x15c, 0x0);
+
+ if (mctrl) {
+ /* Reset the DMA TPG FIFO */
+ MIPI_OUTP(mctrl->ctrl_base + 0x1ec, 0x1);
+ wmb(); /* make sure FIFO reset happens */
+ MIPI_OUTP(mctrl->ctrl_base + 0x1ec, 0x0);
+ wmb(); /* make sure FIFO reset happens */
+ /* Disable CMD_DMA_TPG */
+ MIPI_OUTP(mctrl->ctrl_base + 0x15c, 0x0);
+ }
+
+ return ret;
+}
+
+static int mdss_dsi_cmds2buf_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_cmd_desc *cmds, int cnt, int use_dma_tpg)
+{
+ struct dsi_buf *tp;
+ struct dsi_cmd_desc *cm;
+ struct dsi_ctrl_hdr *dchdr;
+ int len, wait, tot = 0;
+
+ tp = &ctrl->tx_buf;
+ mdss_dsi_buf_init(tp);
+ cm = cmds;
+ len = 0;
+ while (cnt--) {
+ dchdr = &cm->dchdr;
+ mdss_dsi_buf_reserve(tp, len);
+ len = mdss_dsi_cmd_dma_add(tp, cm);
+ if (!len) {
+ pr_err("%s: failed to add cmd = 0x%x\n",
+ __func__, cm->payload[0]);
+ return 0;
+ }
+ tot += len;
+ if (dchdr->last) {
+ tp->data = tp->start; /* begin of buf */
+
+ wait = mdss_dsi_wait4video_eng_busy(ctrl);
+
+ mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
+ if (use_dma_tpg)
+ len = mdss_dsi_cmd_dma_tpg_tx(ctrl, tp);
+ else
+ len = mdss_dsi_cmd_dma_tx(ctrl, tp);
+ if (IS_ERR_VALUE((unsigned long)len)) {
+ mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+ pr_err("%s: failed to call cmd_dma_tx for cmd = 0x%x\n",
+ __func__, cm->payload[0]);
+ return 0;
+ }
+ pr_debug("%s: cmd_dma_tx for cmd = 0x%x, len = %d\n",
+ __func__, cm->payload[0], len);
+
+ if (!wait || dchdr->wait > VSYNC_PERIOD)
+ usleep_range(dchdr->wait * 1000,
+ dchdr->wait * 1000);
+
+ mdss_dsi_buf_init(tp);
+ len = 0;
+ }
+ cm++;
+ }
+ return tot;
+}
+
+/**
+ * __mdss_dsi_cmd_mode_config() - Enable/disable command mode engine
+ * @ctrl: pointer to the dsi controller structure
+ * @enable: true to enable command mode, false to disable command mode
+ *
+ * This function can be used to temporarily enable the command mode
+ * engine (even for video mode panels) so as to transfer any dma commands to
+ * the panel. It can also be used to disable the command mode engine
+ * when no longer needed.
+ *
+ * Return: true, if there was a mode switch to command mode for video mode
+ * panels.
+ */
+static inline bool __mdss_dsi_cmd_mode_config(
+ struct mdss_dsi_ctrl_pdata *ctrl, bool enable)
+{
+ bool mode_changed = false;
+ u32 dsi_ctrl;
+
+ dsi_ctrl = MIPI_INP((ctrl->ctrl_base) + 0x0004);
+ /* if currently in video mode, enable command mode */
+ if (enable) {
+ if ((dsi_ctrl) & BIT(1)) {
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0004,
+ dsi_ctrl | BIT(2));
+ mode_changed = true;
+ }
+ } else {
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0004, dsi_ctrl & ~BIT(2));
+ }
+
+ return mode_changed;
+}
+
+/*
+ * mdss_dsi_cmds_tx:
+ * thread context only
+ */
+int mdss_dsi_cmds_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_cmd_desc *cmds, int cnt, int use_dma_tpg)
+{
+ int len = 0;
+ struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+ /*
+ * Turn on cmd mode in order to transmit the commands.
+ * For video mode, do not send cmds more than one pixel line,
+ * since it only transmit it during BLLP.
+ */
+
+ if (mdss_dsi_sync_wait_enable(ctrl)) {
+ if (mdss_dsi_sync_wait_trigger(ctrl)) {
+ mctrl = mdss_dsi_get_other_ctrl(ctrl);
+ if (!mctrl) {
+ pr_warn("%s: sync_wait, NULL at other control\n",
+ __func__);
+ goto do_send;
+ }
+
+ mctrl->cmd_cfg_restore =
+ __mdss_dsi_cmd_mode_config(mctrl, 1);
+ } else if (!ctrl->do_unicast) {
+ /* broadcast cmds, let cmd_trigger do it */
+ return 0;
+
+ }
+ }
+
+ pr_debug("%s: ctrl=%d do_unicast=%d\n", __func__,
+ ctrl->ndx, ctrl->do_unicast);
+
+do_send:
+ ctrl->cmd_cfg_restore = __mdss_dsi_cmd_mode_config(ctrl, 1);
+
+ len = mdss_dsi_cmds2buf_tx(ctrl, cmds, cnt, use_dma_tpg);
+ if (!len)
+ pr_err("%s: failed to call\n", __func__);
+
+ if (!ctrl->do_unicast) {
+ if (mctrl && mctrl->cmd_cfg_restore) {
+ __mdss_dsi_cmd_mode_config(mctrl, 0);
+ mctrl->cmd_cfg_restore = false;
+ }
+
+ if (ctrl->cmd_cfg_restore) {
+ __mdss_dsi_cmd_mode_config(ctrl, 0);
+ ctrl->cmd_cfg_restore = false;
+ }
+ }
+
+ return len;
+}
+
+/* MIPI_DSI_MRPS, Maximum Return Packet Size */
+static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
+
+static struct dsi_cmd_desc pkt_size_cmd = {
+ {DTYPE_MAX_PKTSIZE, 1, 0, 0, 0, sizeof(max_pktsize)},
+ max_pktsize,
+};
+
+/*
+ * mdss_dsi_cmds_rx() - dcs read from panel
+ * @ctrl: dsi controller
+ * @cmds: read command descriptor
+ * @len: number of bytes to read back
+ *
+ * controller have 4 registers can hold 16 bytes of rxed data
+ * dcs packet: 4 bytes header + payload + 2 bytes crc
+ * 1st read: 4 bytes header + 10 bytes payload + 2 crc
+ * 2nd read: 14 bytes payload + 2 crc
+ * 3rd read: 14 bytes payload + 2 crc
+ *
+ */
+int mdss_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_cmd_desc *cmds, int rlen, int use_dma_tpg)
+{
+ int data_byte, rx_byte, dlen, end;
+ int short_response, diff, pkt_size, ret = 0;
+ struct dsi_buf *tp, *rp;
+ char cmd;
+ struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+
+ if (ctrl->panel_data.panel_info.panel_ack_disabled) {
+ pr_err("%s: ACK from Client not supported\n", __func__);
+ return rlen;
+ }
+
+ if (rlen == 0) {
+ pr_debug("%s: Minimum MRPS value should be 1\n", __func__);
+ return 0;
+ }
+
+ /*
+ * Turn on cmd mode in order to transmit the commands.
+ * For video mode, do not send cmds more than one pixel line,
+ * since it only transmit it during BLLP.
+ */
+ if (mdss_dsi_sync_wait_enable(ctrl)) {
+ if (mdss_dsi_sync_wait_trigger(ctrl)) {
+ mctrl = mdss_dsi_get_other_ctrl(ctrl);
+ if (!mctrl) {
+ pr_warn("%s: sync_wait, NULL at other control\n",
+ __func__);
+ goto do_send;
+ }
+
+ mctrl->cmd_cfg_restore =
+ __mdss_dsi_cmd_mode_config(mctrl, 1);
+ } else {
+ /* skip cmds, let cmd_trigger do it */
+ return 0;
+
+ }
+ }
+
+do_send:
+ ctrl->cmd_cfg_restore = __mdss_dsi_cmd_mode_config(ctrl, 1);
+
+ if (rlen <= 2) {
+ short_response = 1;
+ pkt_size = rlen;
+ rx_byte = 4;
+ } else {
+ short_response = 0;
+ data_byte = 10; /* first read */
+ if (rlen < data_byte)
+ pkt_size = rlen;
+ else
+ pkt_size = data_byte;
+ rx_byte = data_byte + 6; /* 4 header + 2 crc */
+ }
+
+
+ tp = &ctrl->tx_buf;
+ rp = &ctrl->rx_buf;
+
+ end = 0;
+ mdss_dsi_buf_init(rp);
+ while (!end) {
+ pr_debug("%s: rlen=%d pkt_size=%d rx_byte=%d\n",
+ __func__, rlen, pkt_size, rx_byte);
+ /*
+ * Skip max_pkt_size dcs cmd if
+ * its already been configured
+ * for the requested pkt_size
+ */
+ if (pkt_size == ctrl->cur_max_pkt_size)
+ goto skip_max_pkt_size;
+
+ max_pktsize[0] = pkt_size;
+ mdss_dsi_buf_init(tp);
+ ret = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
+ if (!ret) {
+ pr_err("%s: failed to add max_pkt_size\n",
+ __func__);
+ rp->len = 0;
+ rp->read_cnt = 0;
+ goto end;
+ }
+
+ mdss_dsi_wait4video_eng_busy(ctrl);
+
+ mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
+ if (use_dma_tpg)
+ ret = mdss_dsi_cmd_dma_tpg_tx(ctrl, tp);
+ else
+ ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+ pr_err("%s: failed to tx max_pkt_size\n",
+ __func__);
+ rp->len = 0;
+ rp->read_cnt = 0;
+ goto end;
+ }
+ ctrl->cur_max_pkt_size = pkt_size;
+ pr_debug("%s: max_pkt_size=%d sent\n",
+ __func__, pkt_size);
+
+skip_max_pkt_size:
+ mdss_dsi_buf_init(tp);
+ ret = mdss_dsi_cmd_dma_add(tp, cmds);
+ if (!ret) {
+ pr_err("%s: failed to add cmd = 0x%x\n",
+ __func__, cmds->payload[0]);
+ rp->len = 0;
+ rp->read_cnt = 0;
+ goto end;
+ }
+
+ if (ctrl->shared_data->hw_rev >= MDSS_DSI_HW_REV_101) {
+ /* clear the RDBK_DATA registers */
+ MIPI_OUTP(ctrl->ctrl_base + 0x01d4, 0x1);
+ wmb(); /* make sure the RDBK registers are cleared */
+ MIPI_OUTP(ctrl->ctrl_base + 0x01d4, 0x0);
+ wmb(); /* make sure the RDBK registers are cleared */
+ }
+
+ mdss_dsi_wait4video_eng_busy(ctrl); /* video mode only */
+ mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
+ /* transmit read comamnd to client */
+ if (use_dma_tpg)
+ ret = mdss_dsi_cmd_dma_tpg_tx(ctrl, tp);
+ else
+ ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+ pr_err("%s: failed to tx cmd = 0x%x\n",
+ __func__, cmds->payload[0]);
+ rp->len = 0;
+ rp->read_cnt = 0;
+ goto end;
+ }
+
+ /*
+ * once cmd_dma_done interrupt received,
+ * return data from client is ready and stored
+ * at RDBK_DATA register already
+ * since rx fifo is 16 bytes, dcs header is kept at first loop,
+ * after that dcs header lost during shift into registers
+ */
+ dlen = mdss_dsi_cmd_dma_rx(ctrl, rp, rx_byte);
+
+ if (!dlen)
+ goto end;
+
+ if (short_response)
+ break;
+
+ if (rlen <= data_byte) {
+ diff = data_byte - rlen;
+ end = 1;
+ } else {
+ diff = 0;
+ rlen -= data_byte;
+ }
+
+ dlen -= 2; /* 2 crc */
+ dlen -= diff;
+ rp->data += dlen; /* next start position */
+ rp->len += dlen;
+ if (!end) {
+ data_byte = 14; /* NOT first read */
+ if (rlen < data_byte)
+ pkt_size += rlen;
+ else
+ pkt_size += data_byte;
+ }
+ pr_debug("%s: rp data=%x len=%d dlen=%d diff=%d\n",
+ __func__, (int) (unsigned long) rp->data,
+ rp->len, dlen, diff);
+ }
+
+ /*
+ * For single Long read, if the requested rlen < 10,
+ * we need to shift the start position of rx
+ * data buffer to skip the bytes which are not
+ * updated.
+ */
+ if (rp->read_cnt < 16 && !short_response)
+ rp->data = rp->start + (16 - rp->read_cnt);
+ else
+ rp->data = rp->start;
+ cmd = rp->data[0];
+ switch (cmd) {
+ case DTYPE_ACK_ERR_RESP:
+ pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
+ rp->len = 0;
+ rp->read_cnt = 0;
+ /* fall-through */
+ case DTYPE_GEN_READ1_RESP:
+ case DTYPE_DCS_READ1_RESP:
+ mdss_dsi_short_read1_resp(rp);
+ break;
+ case DTYPE_GEN_READ2_RESP:
+ case DTYPE_DCS_READ2_RESP:
+ mdss_dsi_short_read2_resp(rp);
+ break;
+ case DTYPE_GEN_LREAD_RESP:
+ case DTYPE_DCS_LREAD_RESP:
+ mdss_dsi_long_read_resp(rp);
+ break;
+ default:
+ pr_warn("%s:Invalid response cmd\n", __func__);
+ rp->len = 0;
+ rp->read_cnt = 0;
+ }
+end:
+
+ if (mctrl && mctrl->cmd_cfg_restore) {
+ __mdss_dsi_cmd_mode_config(mctrl, 0);
+ mctrl->cmd_cfg_restore = false;
+ }
+
+ if (ctrl->cmd_cfg_restore) {
+ __mdss_dsi_cmd_mode_config(ctrl, 0);
+ ctrl->cmd_cfg_restore = false;
+ }
+
+ if (rp->len && (rp->len != rp->read_cnt))
+ pr_err("Bytes read: %d requested:%d mismatch\n",
+ rp->read_cnt, rp->len);
+
+ return rp->read_cnt;
+}
+
+static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_buf *tp)
+{
+ int len, ret = 0;
+ int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
+ char *bp;
+ struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+ int ignored = 0; /* overflow ignored */
+
+ bp = tp->data;
+
+ len = ALIGN(tp->len, 4);
+ ctrl->dma_size = ALIGN(tp->len, SZ_4K);
+
+ ctrl->mdss_util->iommu_lock();
+ if (ctrl->mdss_util->iommu_attached()) {
+ ret = mdss_smmu_dsi_map_buffer(tp->dmap, domain, ctrl->dma_size,
+ &(ctrl->dma_addr), tp->start, DMA_TO_DEVICE);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("unable to map dma memory to iommu(%d)\n", ret);
+ ctrl->mdss_util->iommu_unlock();
+ return -ENOMEM;
+ }
+ ctrl->dmap_iommu_map = true;
+ } else {
+ ctrl->dma_addr = tp->dmap;
+ }
+
+ reinit_completion(&ctrl->dma_comp);
+
+ if (ctrl->panel_mode == DSI_VIDEO_MODE)
+ ignored = 1;
+
+ if (mdss_dsi_sync_wait_trigger(ctrl)) {
+ /* broadcast same cmd to other panel */
+ mctrl = mdss_dsi_get_other_ctrl(ctrl);
+ if (mctrl && mctrl->dma_addr == 0) {
+ if (ignored) {
+ /* mask out overflow isr */
+ mdss_dsi_set_reg(mctrl, 0x10c,
+ 0x0f0000, 0x0f0000);
+ }
+ MIPI_OUTP(mctrl->ctrl_base + 0x048, ctrl->dma_addr);
+ MIPI_OUTP(mctrl->ctrl_base + 0x04c, len);
+ MIPI_OUTP(mctrl->ctrl_base + 0x090, 0x01); /* trigger */
+ }
+ }
+
+ if (ignored) {
+ /* mask out overflow isr */
+ mdss_dsi_set_reg(ctrl, 0x10c, 0x0f0000, 0x0f0000);
+ }
+
+ /* send cmd to its panel */
+ MIPI_OUTP((ctrl->ctrl_base) + 0x048, ctrl->dma_addr);
+ MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
+ wmb(); /* ensure write is finished before progressing */
+
+ MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);
+ wmb(); /* ensure write is finished before progressing */
+
+ if (ctrl->do_unicast) {
+ /* let cmd_trigger to kickoff later */
+ pr_debug("%s: SKIP, ndx=%d do_unicast=%d\n", __func__,
+ ctrl->ndx, ctrl->do_unicast);
+ ret = tp->len;
+ goto end;
+ }
+
+ ret = wait_for_completion_timeout(&ctrl->dma_comp,
+ msecs_to_jiffies(DMA_TX_TIMEOUT));
+ if (ret == 0) {
+ u32 reg_val, status;
+
+ reg_val = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */
+ status = reg_val & DSI_INTR_CMD_DMA_DONE;
+ if (status) {
+ reg_val &= DSI_INTR_MASK_ALL;
+ /* clear CMD DMA and BTA_DONE isr only */
+ reg_val |= (DSI_INTR_CMD_DMA_DONE | DSI_INTR_BTA_DONE);
+ MIPI_OUTP(ctrl->ctrl_base + 0x0110, reg_val);
+ mdss_dsi_disable_irq_nosync(ctrl, DSI_CMD_TERM);
+ complete(&ctrl->dma_comp);
+
+ pr_warn("%s: dma tx done but irq not triggered\n",
+ __func__);
+ } else {
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ if (!IS_ERR_VALUE((unsigned long)ret))
+ ret = tp->len;
+
+ if (mctrl && mctrl->dma_addr) {
+ if (ignored) {
+ /* clear pending overflow status */
+ mdss_dsi_set_reg(mctrl, 0xc, 0xffffffff, 0x44440000);
+ /* restore overflow isr */
+ mdss_dsi_set_reg(mctrl, 0x10c, 0x0f0000, 0);
+ }
+ if (mctrl->dmap_iommu_map) {
+ mdss_smmu_dsi_unmap_buffer(mctrl->dma_addr, domain,
+ mctrl->dma_size, DMA_TO_DEVICE);
+ mctrl->dmap_iommu_map = false;
+ }
+ mctrl->dma_addr = 0;
+ mctrl->dma_size = 0;
+ }
+
+ if (ctrl->dmap_iommu_map) {
+ mdss_smmu_dsi_unmap_buffer(ctrl->dma_addr, domain,
+ ctrl->dma_size, DMA_TO_DEVICE);
+ ctrl->dmap_iommu_map = false;
+ }
+
+ if (ignored) {
+ /* clear pending overflow status */
+ mdss_dsi_set_reg(ctrl, 0xc, 0xffffffff, 0x44440000);
+ /* restore overflow isr */
+ mdss_dsi_set_reg(ctrl, 0x10c, 0x0f0000, 0);
+ }
+ ctrl->dma_addr = 0;
+ ctrl->dma_size = 0;
+end:
+ ctrl->mdss_util->iommu_unlock();
+ return ret;
+}
+
+static int mdss_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_buf *rp, int rx_byte)
+
+{
+ u32 *lp, *temp, data;
+ int i, j = 0, off, cnt;
+ bool ack_error = false;
+ char reg[16];
+ int repeated_bytes = 0;
+
+ lp = (u32 *)rp->data;
+ temp = (u32 *)reg;
+ cnt = rx_byte;
+ cnt += 3;
+ cnt >>= 2;
+
+ if (cnt > 4)
+ cnt = 4; /* 4 x 32 bits registers only */
+
+ if (ctrl->shared_data->hw_rev >= MDSS_DSI_HW_REV_101) {
+ rp->read_cnt = (MIPI_INP((ctrl->ctrl_base) + 0x01d4) >> 16);
+ pr_debug("%s: bytes read:%d\n", __func__, rp->read_cnt);
+
+ ack_error = (rx_byte == 4) ? (rp->read_cnt == 8) :
+ ((rp->read_cnt - 4) == (max_pktsize[0] + 6));
+
+ if (ack_error)
+ rp->read_cnt -= 4; /* 4 byte read err report */
+ if (!rp->read_cnt) {
+ pr_err("%s: Errors detected, no data rxed\n", __func__);
+ return 0;
+ }
+ } else if (rx_byte == 4) {
+ rp->read_cnt = 4;
+ } else {
+ rp->read_cnt = (max_pktsize[0] + 6);
+ }
+
+ /*
+ * In case of multiple reads from the panel, after the first read, there
+ * is possibility that there are some bytes in the payload repeating in
+ * the RDBK_DATA registers. Since we read all the parameters from the
+ * panel right from the first byte for every pass. We need to skip the
+ * repeating bytes and then append the new parameters to the rx buffer.
+ */
+ if (rp->read_cnt > 16) {
+ int bytes_shifted, data_lost = 0, rem_header_bytes = 0;
+ /* Any data more than 16 bytes will be shifted out */
+ bytes_shifted = rp->read_cnt - rx_byte;
+ if (bytes_shifted >= 4)
+ data_lost = bytes_shifted - 4; /* remove dcs header */
+ else
+ rem_header_bytes = 4 - bytes_shifted; /* rem header */
+ /*
+ * (rp->len - 4) -> current rx buffer data length.
+ * If data_lost > 0, then ((rp->len - 4) - data_lost) will be
+ * the number of repeating bytes.
+ * If data_lost == 0, then ((rp->len - 4) + rem_header_bytes)
+ * will be the number of bytes repeating in between rx buffer
+ * and the current RDBK_DATA registers. We need to skip the
+ * repeating bytes.
+ */
+ repeated_bytes = (rp->len - 4) - data_lost + rem_header_bytes;
+ }
+
+ off = 0x06c; /* DSI_RDBK_DATA0 */
+ off += ((cnt - 1) * 4);
+
+ for (i = 0; i < cnt; i++) {
+ data = (u32)MIPI_INP((ctrl->ctrl_base) + off);
+ /* to network byte order */
+ if (!repeated_bytes)
+ *lp++ = ntohl(data);
+ else
+ *temp++ = ntohl(data);
+ pr_debug("%s: data = 0x%x and ntohl(data) = 0x%x\n",
+ __func__, data, ntohl(data));
+ off -= 4;
+ }
+
+ /* Skip duplicates and append other data to the rx buffer */
+ if (repeated_bytes) {
+ for (i = repeated_bytes; i < 16; i++)
+ rp->data[j++] = reg[i];
+ }
+
+ return rx_byte;
+}
+
+static int mdss_dsi_bus_bandwidth_vote(struct dsi_shared_data *sdata, bool on)
+{
+ int rc = 0;
+ bool changed = false;
+
+ if (on) {
+ if (sdata->bus_refcount == 0)
+ changed = true;
+ sdata->bus_refcount++;
+ } else {
+ if (sdata->bus_refcount != 0) {
+ sdata->bus_refcount--;
+ if (sdata->bus_refcount == 0)
+ changed = true;
+ } else {
+ pr_warn("%s: bus bw votes are not balanced\n",
+ __func__);
+ }
+ }
+
+ if (changed) {
+ rc = msm_bus_scale_client_update_request(sdata->bus_handle,
+ on ? 1 : 0);
+ if (rc)
+ pr_err("%s: Bus bandwidth vote failed\n", __func__);
+ }
+
+ return rc;
+}
+
+
+int mdss_dsi_en_wait4dynamic_done(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ unsigned long flag;
+ u32 data;
+ int rc = 0;
+ struct mdss_dsi_ctrl_pdata *sctrl_pdata = NULL;
+
+ /* DSI_INTL_CTRL */
+ data = MIPI_INP((ctrl->ctrl_base) + 0x0110);
+ data &= DSI_INTR_TOTAL_MASK;
+ data |= DSI_INTR_DYNAMIC_REFRESH_MASK;
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0110, data);
+
+ spin_lock_irqsave(&ctrl->mdp_lock, flag);
+ reinit_completion(&ctrl->dynamic_comp);
+ mdss_dsi_enable_irq(ctrl, DSI_DYNAMIC_TERM);
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+ /*
+ * Ensure that registers are updated before triggering
+ * dynamic refresh
+ */
+ wmb();
+
+ MIPI_OUTP((ctrl->ctrl_base) + DSI_DYNAMIC_REFRESH_CTRL,
+ (BIT(13) | BIT(8) | BIT(0)));
+
+ /*
+ * Configure DYNAMIC_REFRESH_CTRL for second controller only
+ * for split DSI cases.
+ */
+ if (mdss_dsi_is_ctrl_clk_master(ctrl))
+ sctrl_pdata = mdss_dsi_get_ctrl_clk_slave();
+
+ if (sctrl_pdata)
+ MIPI_OUTP((sctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_CTRL,
+ (BIT(13) | BIT(8) | BIT(0)));
+
+ rc = wait_for_completion_timeout(&ctrl->dynamic_comp,
+ msecs_to_jiffies(VSYNC_PERIOD * 4));
+ if (rc == 0) {
+ u32 reg_val, status;
+
+ reg_val = MIPI_INP(ctrl->ctrl_base + MDSS_DSI_INT_CTRL);
+ status = reg_val & DSI_INTR_DYNAMIC_REFRESH_DONE;
+ if (status) {
+ reg_val &= DSI_INTR_MASK_ALL;
+ /* clear dfps DONE isr only */
+ reg_val |= DSI_INTR_DYNAMIC_REFRESH_DONE;
+ MIPI_OUTP(ctrl->ctrl_base + MDSS_DSI_INT_CTRL, reg_val);
+ mdss_dsi_disable_irq(ctrl, DSI_DYNAMIC_TERM);
+ pr_warn_ratelimited("%s: dfps done but irq not triggered\n",
+ __func__);
+ } else {
+ pr_err("Dynamic interrupt timedout\n");
+ rc = -ETIMEDOUT;
+ }
+ }
+
+ data = MIPI_INP((ctrl->ctrl_base) + 0x0110);
+ data &= DSI_INTR_TOTAL_MASK;
+ data &= ~DSI_INTR_DYNAMIC_REFRESH_MASK;
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0110, data);
+
+ return rc;
+}
+
+void mdss_dsi_wait4video_done(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ unsigned long flag;
+ u32 data;
+
+ /* DSI_INTL_CTRL */
+ data = MIPI_INP((ctrl->ctrl_base) + 0x0110);
+ /* clear previous VIDEO_DONE interrupt first */
+ data &= DSI_INTR_TOTAL_MASK;
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0110, (data | DSI_INTR_VIDEO_DONE));
+ wmb(); /* make sure write happened */
+
+ spin_lock_irqsave(&ctrl->mdp_lock, flag);
+ reinit_completion(&ctrl->video_comp);
+ mdss_dsi_enable_irq(ctrl, DSI_VIDEO_TERM);
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+ /* set interrupt enable bit for VIDEO_DONE */
+ data |= DSI_INTR_VIDEO_DONE_MASK;
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0110, data);
+ wmb(); /* make sure write happened */
+
+ wait_for_completion_timeout(&ctrl->video_comp,
+ msecs_to_jiffies(VSYNC_PERIOD * 4));
+
+ data = MIPI_INP((ctrl->ctrl_base) + 0x0110);
+ data &= DSI_INTR_TOTAL_MASK;
+ data &= ~DSI_INTR_VIDEO_DONE_MASK;
+ MIPI_OUTP((ctrl->ctrl_base) + 0x0110, data);
+}
+
+static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int ret = 0;
+ u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0;
+ struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+
+ if (ctrl->panel_mode == DSI_CMD_MODE)
+ return ret;
+
+ if (ctrl->ctrl_state & CTRL_STATE_MDP_ACTIVE) {
+ mdss_dsi_wait4video_done(ctrl);
+ v_total = mdss_panel_get_vtotal(pinfo);
+ v_blank = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+ if (pinfo->dynamic_fps && pinfo->current_fps)
+ fps = pinfo->current_fps;
+ else
+ fps = pinfo->mipi.frame_rate;
+
+ sleep_ms = CEIL((v_blank * 1000), (v_total * fps));
+ /* delay sleep_ms to skip BLLP */
+ if (sleep_ms)
+ usleep_range((sleep_ms * 1000), (sleep_ms * 1000) + 10);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+void mdss_dsi_cmd_mdp_start(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&ctrl->mdp_lock, flag);
+ mdss_dsi_enable_irq(ctrl, DSI_MDP_TERM);
+ ctrl->mdp_busy = true;
+ reinit_completion(&ctrl->mdp_comp);
+ MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, current->pid);
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+}
+
+static int mdss_dsi_mdp_busy_tout_check(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ unsigned long flag;
+ u32 isr;
+ bool stop_hs_clk = false;
+ int tout = 1;
+
+ /*
+ * two possible scenario:
+ * 1) DSI_INTR_CMD_MDP_DONE set but isr not fired
+ * 2) DSI_INTR_CMD_MDP_DONE set and cleared (isr fired)
+ * but event_thread not wakeup
+ */
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_ON);
+ spin_lock_irqsave(&ctrl->mdp_lock, flag);
+
+ isr = MIPI_INP(ctrl->ctrl_base + 0x0110);
+ if (isr & DSI_INTR_CMD_MDP_DONE) {
+ pr_warn("INTR_CMD_MDP_DONE set but isr not fired\n");
+ isr &= DSI_INTR_MASK_ALL;
+ isr |= DSI_INTR_CMD_MDP_DONE; /* clear this isr only */
+ MIPI_OUTP(ctrl->ctrl_base + 0x0110, isr);
+ mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
+ ctrl->mdp_busy = false;
+ if (ctrl->shared_data->cmd_clk_ln_recovery_en &&
+ ctrl->panel_mode == DSI_CMD_MODE) {
+ /* has hs_lane_recovery do the work */
+ stop_hs_clk = true;
+ }
+ tout = 0; /* recovered */
+ }
+
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+ if (stop_hs_clk)
+ mdss_dsi_stop_hs_clk_lane(ctrl);
+
+ complete_all(&ctrl->mdp_comp);
+
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_OFF);
+
+ return tout;
+}
+
+void mdss_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ unsigned long flags;
+ int need_wait = 0;
+ int rc;
+
+ pr_debug("%s: start pid=%d\n",
+ __func__, current->pid);
+
+ MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, current->pid, XLOG_FUNC_ENTRY);
+ spin_lock_irqsave(&ctrl->mdp_lock, flags);
+ if (ctrl->mdp_busy == true)
+ need_wait++;
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flags);
+
+ if (need_wait) {
+ /* wait until DMA finishes the current job */
+ pr_debug("%s: pending pid=%d\n",
+ __func__, current->pid);
+ rc = wait_for_completion_timeout(&ctrl->mdp_comp,
+ msecs_to_jiffies(DMA_TX_TIMEOUT));
+ spin_lock_irqsave(&ctrl->mdp_lock, flags);
+ if (!ctrl->mdp_busy)
+ rc = 1;
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flags);
+ if (!rc && mdss_dsi_mdp_busy_tout_check(ctrl))
+ pr_err("%s: timeout error\n", __func__);
+ }
+ pr_debug("%s: done pid=%d\n", __func__, current->pid);
+ MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, current->pid, XLOG_FUNC_EXIT);
+}
+
+int mdss_dsi_cmdlist_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dcs_cmd_req *req)
+{
+ int len;
+
+ if (mdss_dsi_sync_wait_enable(ctrl)) {
+ ctrl->do_unicast = false;
+ if (!ctrl->cmd_sync_wait_trigger &&
+ req->flags & CMD_REQ_UNICAST)
+ ctrl->do_unicast = true;
+ }
+
+ len = mdss_dsi_cmds_tx(ctrl, req->cmds, req->cmds_cnt,
+ (req->flags & CMD_REQ_DMA_TPG));
+
+ if (req->cb)
+ req->cb(len);
+
+ return len;
+}
+
+int mdss_dsi_cmdlist_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dcs_cmd_req *req)
+{
+ struct dsi_buf *rp;
+ int len = 0;
+
+ if (req->rbuf) {
+ rp = &ctrl->rx_buf;
+ len = mdss_dsi_cmds_rx(ctrl, req->cmds, req->rlen,
+ (req->flags & CMD_REQ_DMA_TPG));
+ memcpy(req->rbuf, rp->data, rp->len);
+ ctrl->rx_len = len;
+ } else {
+ pr_err("%s: No rx buffer provided\n", __func__);
+ }
+
+ if (req->cb)
+ req->cb(len);
+
+ return len;
+}
+
+static inline bool mdss_dsi_delay_cmd(struct mdss_dsi_ctrl_pdata *ctrl,
+ bool from_mdp)
+{
+ unsigned long flags;
+ bool mdp_busy = false;
+ bool need_wait = false;
+
+ if (!ctrl->mdp_callback)
+ goto exit;
+
+ /* delay only for split dsi, cmd mode and burst mode enabled cases */
+ if (!mdss_dsi_is_hw_config_split(ctrl->shared_data) ||
+ !(ctrl->panel_mode == DSI_CMD_MODE) ||
+ !ctrl->burst_mode_enabled)
+ goto exit;
+
+ /* delay only if cmd is not from mdp and panel has been initialized */
+ if (from_mdp || !(ctrl->ctrl_state & CTRL_STATE_PANEL_INIT))
+ goto exit;
+
+ /* if broadcast enabled, apply delay only if this is the ctrl trigger */
+ if (mdss_dsi_sync_wait_enable(ctrl) &&
+ !mdss_dsi_sync_wait_trigger(ctrl))
+ goto exit;
+
+ spin_lock_irqsave(&ctrl->mdp_lock, flags);
+ if (ctrl->mdp_busy == true)
+ mdp_busy = true;
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flags);
+
+ /*
+ * apply delay only if:
+ * mdp_busy bool is set - kickoff is being scheduled by sw
+ * MDP_BUSY bit is not set - transfer is not on-going in hw yet
+ */
+ if (mdp_busy && !(MIPI_INP(ctrl->ctrl_base + 0x008) & BIT(2)))
+ need_wait = true;
+
+exit:
+ MDSS_XLOG(need_wait, from_mdp, mdp_busy);
+ return need_wait;
+}
+
+int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
+{
+ struct dcs_cmd_req *req;
+ struct mdss_panel_info *pinfo;
+ struct mdss_rect *roi = NULL;
+ bool use_iommu = false;
+ int ret = -EINVAL;
+ int rc = 0;
+ bool hs_req = false;
+ bool cmd_mutex_acquired = false;
+
+ if (from_mdp) { /* from mdp kickoff */
+ if (!ctrl->burst_mode_enabled) {
+ mutex_lock(&ctrl->cmd_mutex);
+ cmd_mutex_acquired = true;
+ }
+ pinfo = &ctrl->panel_data.panel_info;
+ if (pinfo->partial_update_enabled)
+ roi = &pinfo->roi;
+ }
+
+ req = mdss_dsi_cmdlist_get(ctrl, from_mdp);
+ if (req && from_mdp && ctrl->burst_mode_enabled) {
+ mutex_lock(&ctrl->cmd_mutex);
+ cmd_mutex_acquired = true;
+ }
+
+ MDSS_XLOG(ctrl->ndx, from_mdp, ctrl->mdp_busy, current->pid,
+ XLOG_FUNC_ENTRY);
+
+ if (req && (req->flags & CMD_REQ_HS_MODE))
+ hs_req = true;
+
+ if ((!ctrl->burst_mode_enabled) || from_mdp) {
+ /* make sure dsi_cmd_mdp is idle */
+ mdss_dsi_cmd_mdp_busy(ctrl);
+ }
+
+ /*
+ * if secure display session is enabled
+ * and DSI controller version is above 1.3.0,
+ * then send DSI commands using TPG FIFO.
+ */
+ if (mdss_get_sd_client_cnt() && req) {
+ if (ctrl->shared_data->hw_rev >= MDSS_DSI_HW_REV_103) {
+ req->flags |= CMD_REQ_DMA_TPG;
+ } else {
+ if (cmd_mutex_acquired)
+ mutex_unlock(&ctrl->cmd_mutex);
+ return -EPERM;
+ }
+ }
+
+ /* For DSI versions less than 1.3.0, CMD DMA TPG is not supported */
+ if (req && (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_103))
+ req->flags &= ~CMD_REQ_DMA_TPG;
+
+ pr_debug("%s: ctrl=%d from_mdp=%d pid=%d\n", __func__,
+ ctrl->ndx, from_mdp, current->pid);
+
+ if (from_mdp) { /* from mdp kickoff */
+ /*
+ * when partial update enabled, the roi of pinfo
+ * is updated before mdp kickoff. Either width or
+ * height of roi is non zero, then really kickoff
+ * will followed.
+ */
+ if (!roi || (roi->w != 0 || roi->h != 0)) {
+ if (ctrl->shared_data->cmd_clk_ln_recovery_en &&
+ ctrl->panel_mode == DSI_CMD_MODE)
+ mdss_dsi_start_hs_clk_lane(ctrl);
+ }
+ } else { /* from dcs send */
+ if (ctrl->shared_data->cmd_clk_ln_recovery_en &&
+ ctrl->panel_mode == DSI_CMD_MODE && hs_req)
+ mdss_dsi_cmd_start_hs_clk_lane(ctrl);
+ }
+
+ if (!req)
+ goto need_lock;
+
+ MDSS_XLOG(ctrl->ndx, req->flags, req->cmds_cnt, from_mdp, current->pid);
+
+ pr_debug("%s: from_mdp=%d pid=%d\n", __func__, from_mdp, current->pid);
+
+ if (!(req->flags & CMD_REQ_DMA_TPG)) {
+ /*
+ * mdss interrupt is generated in mdp core clock domain
+ * mdp clock need to be enabled to receive dsi interrupt
+ * also, axi bus bandwidth need since dsi controller will
+ * fetch dcs commands from axi bus
+ */
+ rc = mdss_dsi_bus_bandwidth_vote(ctrl->shared_data, true);
+ if (rc) {
+ pr_err("%s: Bus bw vote failed\n", __func__);
+ if (from_mdp)
+ mutex_unlock(&ctrl->cmd_mutex);
+ return rc;
+ }
+
+ if (ctrl->mdss_util->iommu_ctrl) {
+ rc = ctrl->mdss_util->iommu_ctrl(1);
+ if (IS_ERR_VALUE((unsigned long)rc)) {
+ pr_err("IOMMU attach failed\n");
+ mutex_unlock(&ctrl->cmd_mutex);
+ return rc;
+ }
+ use_iommu = true;
+ }
+ }
+
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_ON);
+
+ /*
+ * In ping pong split cases, check if we need to apply a
+ * delay for any commands that are not coming from
+ * mdp path
+ */
+ mutex_lock(&ctrl->mutex);
+ if (mdss_dsi_delay_cmd(ctrl, from_mdp))
+ ctrl->mdp_callback->fxn(ctrl->mdp_callback->data,
+ MDP_INTF_CALLBACK_DSI_WAIT);
+ mutex_unlock(&ctrl->mutex);
+
+ if (req->flags & CMD_REQ_HS_MODE)
+ mdss_dsi_set_tx_power_mode(0, &ctrl->panel_data);
+
+ if (req->flags & CMD_REQ_RX)
+ ret = mdss_dsi_cmdlist_rx(ctrl, req);
+ else
+ ret = mdss_dsi_cmdlist_tx(ctrl, req);
+
+ if (req->flags & CMD_REQ_HS_MODE)
+ mdss_dsi_set_tx_power_mode(1, &ctrl->panel_data);
+
+ if (!(req->flags & CMD_REQ_DMA_TPG)) {
+ if (use_iommu)
+ ctrl->mdss_util->iommu_ctrl(0);
+
+ (void)mdss_dsi_bus_bandwidth_vote(ctrl->shared_data, false);
+ }
+
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_OFF);
+need_lock:
+
+ MDSS_XLOG(ctrl->ndx, from_mdp, ctrl->mdp_busy, current->pid,
+ XLOG_FUNC_EXIT);
+
+ if (from_mdp) { /* from mdp kickoff */
+ /*
+ * when partial update enabled, the roi of pinfo
+ * is updated before mdp kickoff. Either width or
+ * height of roi is 0, then it is false kickoff so
+ * no mdp_busy flag set needed.
+ * when partial update disabled, mdp_busy flag
+ * alway set.
+ */
+ if (!roi || (roi->w != 0 || roi->h != 0))
+ mdss_dsi_cmd_mdp_start(ctrl);
+ if (cmd_mutex_acquired)
+ mutex_unlock(&ctrl->cmd_mutex);
+ } else { /* from dcs send */
+ if (ctrl->shared_data->cmd_clk_ln_recovery_en &&
+ ctrl->panel_mode == DSI_CMD_MODE &&
+ (req && (req->flags & CMD_REQ_HS_MODE)))
+ mdss_dsi_cmd_stop_hs_clk_lane(ctrl);
+ }
+
+ return ret;
+}
+
+static void __dsi_fifo_error_handler(struct mdss_dsi_ctrl_pdata *ctrl,
+ bool recovery_needed)
+{
+ struct mdss_dsi_ctrl_pdata *sctrl;
+ bool use_pp_split = false;
+
+ use_pp_split = ctrl->panel_data.panel_info.use_pingpong_split;
+
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_ON);
+ mdss_dsi_sw_reset(ctrl, true);
+ if (recovery_needed)
+ ctrl->recovery->fxn(ctrl->recovery->data,
+ MDP_INTF_DSI_CMD_FIFO_UNDERFLOW);
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_OFF);
+
+ sctrl = mdss_dsi_get_other_ctrl(ctrl);
+ if (sctrl && use_pp_split) {
+ mdss_dsi_clk_ctrl(sctrl, sctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+ mdss_dsi_sw_reset(sctrl, true);
+ mdss_dsi_clk_ctrl(sctrl, sctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+ }
+}
+
+static void dsi_send_events(struct mdss_dsi_ctrl_pdata *ctrl,
+ u32 events, u32 arg)
+{
+ struct dsi_event_q *evq;
+
+ if (!dsi_event.inited)
+ return;
+
+ pr_debug("%s: ev=%x\n", __func__, events);
+
+ spin_lock(&dsi_event.event_lock);
+ evq = &dsi_event.todo_list[dsi_event.event_pndx++];
+ evq->todo = events;
+ evq->arg = arg;
+ evq->ctrl = ctrl;
+ dsi_event.event_pndx %= DSI_EVENT_Q_MAX;
+ wake_up(&dsi_event.event_q);
+ spin_unlock(&dsi_event.event_lock);
+}
+
+static int dsi_event_thread(void *data)
+{
+ struct mdss_dsi_event *ev;
+ struct dsi_event_q *evq;
+ struct mdss_dsi_ctrl_pdata *ctrl;
+ unsigned long flag;
+ struct sched_param param;
+ u32 todo = 0, ln_status, force_clk_ln_hs;
+ u32 arg;
+ int ret;
+
+ param.sched_priority = 16;
+ ret = sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
+ if (ret)
+ pr_err("%s: set priority failed\n", __func__);
+
+ ev = (struct mdss_dsi_event *)data;
+ /* event */
+ init_waitqueue_head(&ev->event_q);
+ spin_lock_init(&ev->event_lock);
+
+ while (1) {
+ wait_event(ev->event_q, (ev->event_pndx != ev->event_gndx));
+ spin_lock_irqsave(&ev->event_lock, flag);
+ evq = &ev->todo_list[ev->event_gndx++];
+ todo = evq->todo;
+ ctrl = evq->ctrl;
+ arg = evq->arg;
+ evq->todo = 0;
+ ev->event_gndx %= DSI_EVENT_Q_MAX;
+ spin_unlock_irqrestore(&ev->event_lock, flag);
+
+ pr_debug("%s: ev=%x\n", __func__, todo);
+
+ if (todo & DSI_EV_PLL_UNLOCKED)
+ mdss_dsi_pll_relock(ctrl);
+
+ if (todo & DSI_EV_DLNx_FIFO_UNDERFLOW) {
+ mutex_lock(&ctrl->mutex);
+ if (ctrl->recovery) {
+ pr_debug("%s: Handling underflow event\n",
+ __func__);
+ __dsi_fifo_error_handler(ctrl, true);
+ }
+ mutex_unlock(&ctrl->mutex);
+ }
+
+ if (todo & DSI_EV_DSI_FIFO_EMPTY)
+ __dsi_fifo_error_handler(ctrl, false);
+
+ if (todo & DSI_EV_DLNx_FIFO_OVERFLOW) {
+ mutex_lock(&dsi_mtx);
+ /*
+ * For targets other than msm8994,
+ * run the overflow recovery sequence only when
+ * data lanes are in stop state and
+ * clock lane is not in Stop State.
+ */
+ ln_status = MIPI_INP(ctrl->ctrl_base + 0x00a8);
+ force_clk_ln_hs = (MIPI_INP(ctrl->ctrl_base + 0x00ac)
+ & BIT(28));
+ pr_debug("%s: lane_status: 0x%x\n",
+ __func__, ln_status);
+ if (ctrl->recovery
+ && (ctrl->shared_data->hw_rev
+ != MDSS_DSI_HW_REV_103)
+ && !(force_clk_ln_hs)
+ && (ln_status
+ & DSI_DATA_LANES_STOP_STATE)
+ && !(ln_status
+ & DSI_CLK_LANE_STOP_STATE)) {
+ pr_debug("%s: Handling overflow event.\n",
+ __func__);
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_ON);
+ mdss_dsi_ctl_phy_reset(ctrl,
+ DSI_EV_DLNx_FIFO_OVERFLOW);
+ mdss_dsi_err_intr_ctrl(ctrl,
+ DSI_INTR_ERROR_MASK, 1);
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_OFF);
+ } else if (ctrl->recovery
+ && (ctrl->shared_data->hw_rev
+ == MDSS_DSI_HW_REV_103)) {
+ pr_debug("%s: Handle overflow->Rev_103\n",
+ __func__);
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_ON);
+ mdss_dsi_ctl_phy_reset(ctrl,
+ DSI_EV_DLNx_FIFO_OVERFLOW);
+ mdss_dsi_err_intr_ctrl(ctrl,
+ DSI_INTR_ERROR_MASK, 1);
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_OFF);
+ }
+ mutex_unlock(&dsi_mtx);
+ }
+
+ if (todo & DSI_EV_MDP_BUSY_RELEASE) {
+ pr_debug("%s: Handling MDP_BUSY_RELEASE event\n",
+ __func__);
+ spin_lock_irqsave(&ctrl->mdp_lock, flag);
+ ctrl->mdp_busy = false;
+ mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
+ complete(&ctrl->mdp_comp);
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+ /* enable dsi error interrupt */
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+ mdss_dsi_err_intr_ctrl(ctrl, DSI_INTR_ERROR_MASK, 1);
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+ }
+
+ if (todo & DSI_EV_STOP_HS_CLK_LANE)
+ mdss_dsi_stop_hs_clk_lane(ctrl);
+
+ if (todo & DSI_EV_LP_RX_TIMEOUT) {
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+ mdss_dsi_ctl_phy_reset(ctrl, DSI_EV_LP_RX_TIMEOUT);
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+ }
+ }
+
+ return 0;
+}
+
+bool mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 status;
+ unsigned char *base;
+ bool ret = false;
+
+ base = ctrl->ctrl_base;
+
+ status = MIPI_INP(base + 0x0068);/* DSI_ACK_ERR_STATUS */
+
+ if (status) {
+ MIPI_OUTP(base + 0x0068, status);
+ /* Writing of an extra 0 needed to clear error bits */
+ MIPI_OUTP(base + 0x0068, 0);
+ /*
+ * After bta done, h/w may have a fake overflow and
+ * that overflow may further cause ack_err about 3 ms
+ * later which is another false alarm. Here the
+ * warning message is ignored.
+ */
+ if (ctrl->panel_data.panel_info.esd_check_enabled &&
+ (ctrl->status_mode == ESD_BTA) && (status & 0x1008000))
+ return false;
+
+ pr_err("%s: status=%x\n", __func__, status);
+ ret = true;
+ }
+
+ return ret;
+}
+
+static bool mdss_dsi_timeout_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 status;
+ unsigned char *base;
+ bool ret = false;
+
+ base = ctrl->ctrl_base;
+
+ status = MIPI_INP(base + 0x00c0);/* DSI_TIMEOUT_STATUS */
+
+ if (status & 0x0111) {
+ MIPI_OUTP(base + 0x00c0, status);
+ if (status & 0x0110)
+ dsi_send_events(ctrl, DSI_EV_LP_RX_TIMEOUT, 0);
+ pr_err("%s: status=%x\n", __func__, status);
+ ret = true;
+ }
+
+ return ret;
+}
+
+bool mdss_dsi_dln0_phy_err(struct mdss_dsi_ctrl_pdata *ctrl, bool print_en)
+{
+ u32 status;
+ unsigned char *base;
+ bool ret = false;
+
+ base = ctrl->ctrl_base;
+
+ status = MIPI_INP(base + 0x00b4);/* DSI_DLN0_PHY_ERR */
+
+ if (status & 0x011111) {
+ MIPI_OUTP(base + 0x00b4, status);
+ if (print_en)
+ pr_err("%s: status=%x\n", __func__, status);
+ ctrl->err_cont.phy_err_cnt++;
+ ret = true;
+ }
+
+ return ret;
+}
+
+static bool mdss_dsi_fifo_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 status;
+ unsigned char *base;
+ bool ret = false;
+
+ base = ctrl->ctrl_base;
+
+ status = MIPI_INP(base + 0x000c);/* DSI_FIFO_STATUS */
+
+ /* fifo underflow, overflow and empty*/
+ if (status & 0xcccc4409) {
+ MIPI_OUTP(base + 0x000c, status);
+
+ pr_err("%s: status=%x\n", __func__, status);
+
+ /*
+ * if DSI FIFO overflow is masked,
+ * do not report overflow error
+ */
+ if (MIPI_INP(base + 0x10c) & 0xf0000)
+ status = status & 0xaaaaffff;
+
+ if (status & 0x44440000) {/* DLNx_HS_FIFO_OVERFLOW */
+ dsi_send_events(ctrl, DSI_EV_DLNx_FIFO_OVERFLOW, 0);
+ /* Ignore FIFO EMPTY when overflow happens */
+ status = status & 0xeeeeffff;
+ }
+ if (status & 0x88880000) /* DLNx_HS_FIFO_UNDERFLOW */
+ dsi_send_events(ctrl, DSI_EV_DLNx_FIFO_UNDERFLOW, 0);
+ if (status & 0x11110000) /* DLN_FIFO_EMPTY */
+ dsi_send_events(ctrl, DSI_EV_DSI_FIFO_EMPTY, 0);
+ ctrl->err_cont.fifo_err_cnt++;
+ ret = true;
+ }
+
+ return ret;
+}
+
+static bool mdss_dsi_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 status;
+ unsigned char *base;
+ bool ret = false;
+
+ base = ctrl->ctrl_base;
+
+ status = MIPI_INP(base + 0x0008);/* DSI_STATUS */
+
+ if (status & 0x80000000) { /* INTERLEAVE_OP_CONTENTION */
+ MIPI_OUTP(base + 0x0008, status);
+ pr_err("%s: status=%x\n", __func__, status);
+ ret = true;
+ }
+
+ return ret;
+}
+
+static bool mdss_dsi_clk_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 status;
+ unsigned char *base;
+ bool ret = false;
+
+ base = ctrl->ctrl_base;
+ status = MIPI_INP(base + 0x0120);/* DSI_CLK_STATUS */
+
+ if (status & 0x10000) { /* DSI_CLK_PLL_UNLOCKED */
+ MIPI_OUTP(base + 0x0120, status);
+ /* If PLL unlock is masked, do not report error */
+ if (MIPI_INP(base + 0x10c) & BIT(28))
+ return false;
+
+ dsi_send_events(ctrl, DSI_EV_PLL_UNLOCKED, 0);
+ pr_err("%s: status=%x\n", __func__, status);
+ ret = true;
+ }
+
+ return ret;
+}
+
+static void __dsi_error_counter(struct dsi_err_container *err_container)
+{
+ s64 prev_time, curr_time;
+ int prev_index;
+
+ err_container->err_cnt++;
+
+ err_container->index = (err_container->index + 1) %
+ err_container->max_err_index;
+ curr_time = ktime_to_ms(ktime_get());
+ err_container->err_time[err_container->index] = curr_time;
+
+ prev_index = (err_container->index + 1) % err_container->max_err_index;
+ prev_time = err_container->err_time[prev_index];
+
+ if (prev_time &&
+ ((curr_time - prev_time) < err_container->err_time_delta)) {
+ pr_err("%s: panic in WQ as dsi error intrs within:%dms\n",
+ __func__, err_container->err_time_delta);
+ MDSS_XLOG_TOUT_HANDLER_WQ("mdp", "dsi0_ctrl", "dsi0_phy",
+ "dsi1_ctrl", "dsi1_phy");
+ }
+}
+
+void mdss_dsi_error(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 intr, mask;
+ bool err_handled = false;
+
+ /* Ignore the interrupt if the error intr mask is not set */
+ mask = MIPI_INP(ctrl->ctrl_base + 0x0110);
+ if (!(mask & DSI_INTR_ERROR_MASK)) {
+ pr_debug("%s: Ignore interrupt as error mask not set, 0x%x\n",
+ __func__, mask);
+ return;
+ }
+
+ /* disable dsi error interrupt */
+ mdss_dsi_err_intr_ctrl(ctrl, DSI_INTR_ERROR_MASK, 0);
+
+ /* DSI_ERR_INT_MASK0 */
+ err_handled |= mdss_dsi_clk_status(ctrl); /* Mask0, 0x10000000 */
+ err_handled |= mdss_dsi_fifo_status(ctrl); /* mask0, 0x133d00 */
+ err_handled |= mdss_dsi_ack_err_status(ctrl); /* mask0, 0x01f */
+ err_handled |= mdss_dsi_timeout_status(ctrl); /* mask0, 0x0e0 */
+ err_handled |= mdss_dsi_status(ctrl); /* mask0, 0xc0100 */
+ err_handled |= mdss_dsi_dln0_phy_err(ctrl, true);/* mask0, 0x3e00000 */
+
+ /* clear dsi error interrupt */
+ intr = MIPI_INP(ctrl->ctrl_base + 0x0110);
+ intr &= DSI_INTR_TOTAL_MASK;
+ intr |= DSI_INTR_ERROR;
+ MIPI_OUTP(ctrl->ctrl_base + 0x0110, intr);
+
+ if (err_handled)
+ __dsi_error_counter(&ctrl->err_cont);
+
+ dsi_send_events(ctrl, DSI_EV_MDP_BUSY_RELEASE, 0);
+}
+
+irqreturn_t mdss_dsi_isr(int irq, void *ptr)
+{
+ u32 isr;
+ u32 intr;
+ struct mdss_dsi_ctrl_pdata *ctrl =
+ (struct mdss_dsi_ctrl_pdata *)ptr;
+
+ if (!ctrl->ctrl_base) {
+ pr_err("%s:%d DSI base adr no Initialized",
+ __func__, __LINE__);
+ return IRQ_HANDLED;
+ }
+
+ isr = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */
+ MIPI_OUTP(ctrl->ctrl_base + 0x0110, (isr & ~DSI_INTR_ERROR));
+
+ pr_debug("%s: ndx=%d isr=%x\n", __func__, ctrl->ndx, isr);
+
+ if (isr & DSI_INTR_ERROR) {
+ MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, isr, 0x97);
+ mdss_dsi_error(ctrl);
+ }
+
+ if (isr & DSI_INTR_BTA_DONE) {
+ MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, isr, 0x96);
+ spin_lock(&ctrl->mdp_lock);
+ mdss_dsi_disable_irq_nosync(ctrl, DSI_BTA_TERM);
+ complete(&ctrl->bta_comp);
+ /*
+ * When bta done happens, the panel should be in good
+ * state. However, bta could cause the fake overflow
+ * error for video mode. The similar issue happens when
+ * sending dcs cmd. This overflow further causes
+ * flicking because of phy reset which is unncessary,
+ * so here overflow error is ignored, and errors are
+ * cleared.
+ */
+ if (ctrl->panel_data.panel_info.esd_check_enabled &&
+ (ctrl->status_mode == ESD_BTA) &&
+ (ctrl->panel_mode == DSI_VIDEO_MODE)) {
+ isr &= ~DSI_INTR_ERROR;
+ /* clear only overflow */
+ mdss_dsi_set_reg(ctrl, 0x0c, 0x44440000, 0x44440000);
+ }
+ spin_unlock(&ctrl->mdp_lock);
+ }
+
+ if (isr & DSI_INTR_VIDEO_DONE) {
+ spin_lock(&ctrl->mdp_lock);
+ mdss_dsi_disable_irq_nosync(ctrl, DSI_VIDEO_TERM);
+ complete(&ctrl->video_comp);
+ spin_unlock(&ctrl->mdp_lock);
+ }
+
+ if (isr & DSI_INTR_CMD_DMA_DONE) {
+ MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, isr, 0x98);
+ spin_lock(&ctrl->mdp_lock);
+ mdss_dsi_disable_irq_nosync(ctrl, DSI_CMD_TERM);
+ complete(&ctrl->dma_comp);
+ spin_unlock(&ctrl->mdp_lock);
+ }
+
+ if (isr & DSI_INTR_CMD_MDP_DONE) {
+ MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, isr, 0x99);
+ spin_lock(&ctrl->mdp_lock);
+ mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
+ if (ctrl->shared_data->cmd_clk_ln_recovery_en &&
+ ctrl->panel_mode == DSI_CMD_MODE) {
+ /* stop force clk lane hs */
+ mdss_dsi_cfg_lane_ctrl(ctrl, BIT(28), 0);
+ dsi_send_events(ctrl, DSI_EV_STOP_HS_CLK_LANE,
+ DSI_MDP_TERM);
+ }
+ ctrl->mdp_busy = false;
+ complete_all(&ctrl->mdp_comp);
+ spin_unlock(&ctrl->mdp_lock);
+ }
+
+ if (isr & DSI_INTR_DYNAMIC_REFRESH_DONE) {
+ spin_lock(&ctrl->mdp_lock);
+ mdss_dsi_disable_irq_nosync(ctrl, DSI_DYNAMIC_TERM);
+
+ /* clear dfps interrupt */
+ intr = MIPI_INP(ctrl->ctrl_base + 0x0110);
+ intr |= DSI_INTR_DYNAMIC_REFRESH_DONE;
+ MIPI_OUTP(ctrl->ctrl_base + 0x0110, intr);
+
+ complete(&ctrl->dynamic_comp);
+ spin_unlock(&ctrl->mdp_lock);
+ }
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
new file mode 100644
index 0000000..d84cf5e
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -0,0 +1,2883 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/qpnp/pwm.h>
+#include <linux/err.h>
+#include <linux/string.h>
+
+#include "mdss_dsi.h"
+#ifdef TARGET_HW_MDSS_HDMI
+#include "mdss_dba_utils.h"
+#endif
+#define DT_CMD_HDR 6
+#define MIN_REFRESH_RATE 48
+#define DEFAULT_MDP_TRANSFER_TIME 14000
+
+#define VSYNC_DELAY msecs_to_jiffies(17)
+
+DEFINE_LED_TRIGGER(bl_led_trigger);
+
+void mdss_dsi_panel_pwm_cfg(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (ctrl->pwm_pmi)
+ return;
+
+ ctrl->pwm_bl = pwm_request(ctrl->pwm_lpg_chan, "lcd-bklt");
+ if (ctrl->pwm_bl == NULL || IS_ERR(ctrl->pwm_bl)) {
+ pr_err("%s: Error: lpg_chan=%d pwm request failed",
+ __func__, ctrl->pwm_lpg_chan);
+ }
+ ctrl->pwm_enabled = 0;
+}
+
+bool mdss_dsi_panel_pwm_enable(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ bool status = true;
+
+ if (!ctrl->pwm_enabled)
+ goto end;
+
+ if (pwm_enable(ctrl->pwm_bl)) {
+ pr_err("%s: pwm_enable() failed\n", __func__);
+ status = false;
+ }
+
+ ctrl->pwm_enabled = 1;
+
+end:
+ return status;
+}
+
+static void mdss_dsi_panel_bklt_pwm(struct mdss_dsi_ctrl_pdata *ctrl, int level)
+{
+ int ret;
+ u32 duty;
+ u32 period_ns;
+
+ if (ctrl->pwm_bl == NULL) {
+ pr_err("%s: no PWM\n", __func__);
+ return;
+ }
+
+ if (level == 0) {
+ if (ctrl->pwm_enabled) {
+ ret = pwm_config_us(ctrl->pwm_bl, level,
+ ctrl->pwm_period);
+ if (ret)
+ pr_err("%s: pwm_config_us() failed err=%d.\n",
+ __func__, ret);
+ pwm_disable(ctrl->pwm_bl);
+ }
+ ctrl->pwm_enabled = 0;
+ return;
+ }
+
+ duty = level * ctrl->pwm_period;
+ duty /= ctrl->bklt_max;
+
+ pr_debug("%s: bklt_ctrl=%d pwm_period=%d pwm_gpio=%d pwm_lpg_chan=%d\n",
+ __func__, ctrl->bklt_ctrl, ctrl->pwm_period,
+ ctrl->pwm_pmic_gpio, ctrl->pwm_lpg_chan);
+
+ pr_debug("%s: ndx=%d level=%d duty=%d\n", __func__,
+ ctrl->ndx, level, duty);
+
+ if (ctrl->pwm_period >= USEC_PER_SEC) {
+ ret = pwm_config_us(ctrl->pwm_bl, duty, ctrl->pwm_period);
+ if (ret) {
+ pr_err("%s: pwm_config_us() failed err=%d.\n",
+ __func__, ret);
+ return;
+ }
+ } else {
+ period_ns = ctrl->pwm_period * NSEC_PER_USEC;
+ ret = pwm_config(ctrl->pwm_bl,
+ level * period_ns / ctrl->bklt_max,
+ period_ns);
+ if (ret) {
+ pr_err("%s: pwm_config() failed err=%d.\n",
+ __func__, ret);
+ return;
+ }
+ }
+
+ if (!ctrl->pwm_enabled) {
+ ret = pwm_enable(ctrl->pwm_bl);
+ if (ret)
+ pr_err("%s: pwm_enable() failed err=%d\n", __func__,
+ ret);
+ ctrl->pwm_enabled = 1;
+ }
+}
+
+static char dcs_cmd[2] = {0x54, 0x00}; /* DTYPE_DCS_READ */
+static struct dsi_cmd_desc dcs_read_cmd = {
+ {DTYPE_DCS_READ, 1, 0, 1, 5, sizeof(dcs_cmd)},
+ dcs_cmd
+};
+
+int mdss_dsi_panel_cmd_read(struct mdss_dsi_ctrl_pdata *ctrl, char cmd0,
+ char cmd1, void (*fxn)(int), char *rbuf, int len)
+{
+ struct dcs_cmd_req cmdreq;
+ struct mdss_panel_info *pinfo;
+
+ pinfo = &(ctrl->panel_data.panel_info);
+ if (pinfo->dcs_cmd_by_left) {
+ if (ctrl->ndx != DSI_CTRL_LEFT)
+ return -EINVAL;
+ }
+
+ dcs_cmd[0] = cmd0;
+ dcs_cmd[1] = cmd1;
+ memset(&cmdreq, 0, sizeof(cmdreq));
+ cmdreq.cmds = &dcs_read_cmd;
+ cmdreq.cmds_cnt = 1;
+ cmdreq.flags = CMD_REQ_RX | CMD_REQ_COMMIT;
+ cmdreq.rlen = len;
+ cmdreq.rbuf = rbuf;
+ cmdreq.cb = fxn; /* call back */
+ /*
+ * blocked here, until call back called
+ */
+
+ return mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+static void mdss_dsi_panel_apply_settings(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_panel_cmds *pcmds)
+{
+ struct dcs_cmd_req cmdreq;
+ struct mdss_panel_info *pinfo;
+
+ pinfo = &(ctrl->panel_data.panel_info);
+ if ((pinfo->dcs_cmd_by_left) && (ctrl->ndx != DSI_CTRL_LEFT))
+ return;
+
+ memset(&cmdreq, 0, sizeof(cmdreq));
+ cmdreq.cmds = pcmds->cmds;
+ cmdreq.cmds_cnt = pcmds->cmd_cnt;
+ cmdreq.flags = CMD_REQ_COMMIT;
+ cmdreq.rlen = 0;
+ cmdreq.cb = NULL;
+ mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+static void mdss_dsi_panel_cmds_send(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct dsi_panel_cmds *pcmds, u32 flags)
+{
+ struct dcs_cmd_req cmdreq;
+ struct mdss_panel_info *pinfo;
+
+ pinfo = &(ctrl->panel_data.panel_info);
+ if (pinfo->dcs_cmd_by_left) {
+ if (ctrl->ndx != DSI_CTRL_LEFT)
+ return;
+ }
+
+ memset(&cmdreq, 0, sizeof(cmdreq));
+ cmdreq.cmds = pcmds->cmds;
+ cmdreq.cmds_cnt = pcmds->cmd_cnt;
+ cmdreq.flags = flags;
+
+ /*Panel ON/Off commands should be sent in DSI Low Power Mode*/
+ if (pcmds->link_state == DSI_LP_MODE)
+ cmdreq.flags |= CMD_REQ_LP_MODE;
+ else if (pcmds->link_state == DSI_HS_MODE)
+ cmdreq.flags |= CMD_REQ_HS_MODE;
+
+ cmdreq.rlen = 0;
+ cmdreq.cb = NULL;
+
+ mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+static char led_pwm1[2] = {0x51, 0x0}; /* DTYPE_DCS_WRITE1 */
+static struct dsi_cmd_desc backlight_cmd = {
+ {DTYPE_DCS_WRITE1, 1, 0, 0, 1, sizeof(led_pwm1)},
+ led_pwm1
+};
+
+static void mdss_dsi_panel_bklt_dcs(struct mdss_dsi_ctrl_pdata *ctrl, int level)
+{
+ struct dcs_cmd_req cmdreq;
+ struct mdss_panel_info *pinfo;
+
+ pinfo = &(ctrl->panel_data.panel_info);
+ if (pinfo->dcs_cmd_by_left) {
+ if (ctrl->ndx != DSI_CTRL_LEFT)
+ return;
+ }
+
+ pr_debug("%s: level=%d\n", __func__, level);
+
+ led_pwm1[1] = (unsigned char)level;
+
+ memset(&cmdreq, 0, sizeof(cmdreq));
+ cmdreq.cmds = &backlight_cmd;
+ cmdreq.cmds_cnt = 1;
+ cmdreq.flags = CMD_REQ_COMMIT;
+ cmdreq.rlen = 0;
+ cmdreq.cb = NULL;
+
+ mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+static void mdss_dsi_panel_set_idle_mode(struct mdss_panel_data *pdata,
+ bool enable)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pr_debug("%s: Idle (%d->%d)\n", __func__, ctrl->idle, enable);
+
+ if (ctrl->idle == enable)
+ return;
+
+ if (enable) {
+ if (ctrl->idle_on_cmds.cmd_cnt) {
+ mdss_dsi_panel_cmds_send(ctrl, &ctrl->idle_on_cmds,
+ CMD_REQ_COMMIT);
+ ctrl->idle = true;
+ pr_debug("Idle on\n");
+ }
+ } else {
+ if (ctrl->idle_off_cmds.cmd_cnt) {
+ mdss_dsi_panel_cmds_send(ctrl, &ctrl->idle_off_cmds,
+ CMD_REQ_COMMIT);
+ ctrl->idle = false;
+ pr_debug("Idle off\n");
+ }
+ }
+}
+
+static bool mdss_dsi_panel_get_idle_mode(struct mdss_panel_data *pdata)
+
+{
+ struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return 0;
+ }
+ ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ return ctrl->idle;
+}
+
+static int mdss_dsi_request_gpios(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int rc = 0;
+
+ if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+ rc = gpio_request(ctrl_pdata->disp_en_gpio,
+ "disp_enable");
+ if (rc) {
+ pr_err("request disp_en gpio failed, rc=%d\n",
+ rc);
+ goto disp_en_gpio_err;
+ }
+ }
+ rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
+ if (rc) {
+ pr_err("request reset gpio failed, rc=%d\n",
+ rc);
+ goto rst_gpio_err;
+ }
+ if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) {
+ rc = gpio_request(ctrl_pdata->bklt_en_gpio,
+ "bklt_enable");
+ if (rc) {
+ pr_err("request bklt gpio failed, rc=%d\n",
+ rc);
+ goto bklt_en_gpio_err;
+ }
+ }
+ if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
+ rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode");
+ if (rc) {
+ pr_err("request panel mode gpio failed,rc=%d\n",
+ rc);
+ goto mode_gpio_err;
+ }
+ }
+ return rc;
+
+mode_gpio_err:
+ if (gpio_is_valid(ctrl_pdata->bklt_en_gpio))
+ gpio_free(ctrl_pdata->bklt_en_gpio);
+bklt_en_gpio_err:
+ gpio_free(ctrl_pdata->rst_gpio);
+rst_gpio_err:
+ if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+ gpio_free(ctrl_pdata->disp_en_gpio);
+disp_en_gpio_err:
+ return rc;
+}
+
+int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo = NULL;
+ int i, rc = 0;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+ /* need to configure intf mux only for external interface */
+ if (pinfo->is_dba_panel) {
+ if (enable) {
+ if (gpio_is_valid(ctrl_pdata->intf_mux_gpio)) {
+ rc = gpio_request(ctrl_pdata->intf_mux_gpio,
+ "intf_mux");
+ if (rc) {
+ pr_err("request mux gpio failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ rc = gpio_direction_output(
+ ctrl_pdata->intf_mux_gpio, 0);
+ if (rc) {
+ pr_err("%s: unable to set dir for intf mux gpio\n",
+ __func__);
+ goto exit;
+ }
+ gpio_set_value(ctrl_pdata->intf_mux_gpio, 0);
+ } else {
+ pr_debug("%s:%d, intf mux gpio not specified\n",
+ __func__, __LINE__);
+ }
+ } else {
+ if (gpio_is_valid(ctrl_pdata->intf_mux_gpio))
+ gpio_free(ctrl_pdata->intf_mux_gpio);
+ }
+ }
+
+ if ((mdss_dsi_is_right_ctrl(ctrl_pdata) &&
+ mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) ||
+ pinfo->is_dba_panel) {
+ pr_debug("%s:%d, right ctrl gpio configuration not needed\n",
+ __func__, __LINE__);
+ return rc;
+ }
+
+ if (!gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+ pr_debug("%s:%d, reset line not configured\n",
+ __func__, __LINE__);
+ }
+
+ if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
+ pr_debug("%s:%d, reset line not configured\n",
+ __func__, __LINE__);
+ return rc;
+ }
+
+ pr_debug("%s: enable = %d\n", __func__, enable);
+
+ if (enable) {
+ rc = mdss_dsi_request_gpios(ctrl_pdata);
+ if (rc) {
+ pr_err("gpio request failed\n");
+ return rc;
+ }
+ if (!pinfo->cont_splash_enabled) {
+ if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+ rc = gpio_direction_output(
+ ctrl_pdata->disp_en_gpio, 1);
+ if (rc) {
+ pr_err("%s: unable to set dir for en gpio\n",
+ __func__);
+ goto exit;
+ }
+ }
+
+ if (pdata->panel_info.rst_seq_len) {
+ rc = gpio_direction_output(ctrl_pdata->rst_gpio,
+ pdata->panel_info.rst_seq[0]);
+ if (rc) {
+ pr_err("%s: unable to set dir for rst gpio\n",
+ __func__);
+ goto exit;
+ }
+ }
+
+ for (i = 0; i < pdata->panel_info.rst_seq_len; ++i) {
+ gpio_set_value((ctrl_pdata->rst_gpio),
+ pdata->panel_info.rst_seq[i]);
+ if (pdata->panel_info.rst_seq[++i])
+ usleep_range(pinfo->rst_seq[i] * 1000,
+ pinfo->rst_seq[i] * 1000);
+ }
+
+ if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) {
+ rc = gpio_direction_output(
+ ctrl_pdata->bklt_en_gpio, 1);
+ if (rc) {
+ pr_err("%s: unable to set dir for bklt gpio\n",
+ __func__);
+ goto exit;
+ }
+ }
+ }
+
+ if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
+ bool out = false;
+
+ if (pinfo->mode_gpio_state == MODE_GPIO_HIGH)
+ out = true;
+ else if (pinfo->mode_gpio_state == MODE_GPIO_LOW)
+ out = false;
+
+ rc = gpio_direction_output(ctrl_pdata->mode_gpio, out);
+ if (rc) {
+ pr_err("%s: unable to set dir for mode gpio\n",
+ __func__);
+ goto exit;
+ }
+ }
+ if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) {
+ pr_debug("%s: Panel Not properly turned OFF\n",
+ __func__);
+ ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_INIT;
+ pr_debug("%s: Reset panel done\n", __func__);
+ }
+ } else {
+ if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) {
+ gpio_set_value((ctrl_pdata->bklt_en_gpio), 0);
+ gpio_free(ctrl_pdata->bklt_en_gpio);
+ }
+ if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+ gpio_set_value((ctrl_pdata->disp_en_gpio), 0);
+ gpio_free(ctrl_pdata->disp_en_gpio);
+ }
+ gpio_set_value((ctrl_pdata->rst_gpio), 0);
+ gpio_free(ctrl_pdata->rst_gpio);
+ if (gpio_is_valid(ctrl_pdata->mode_gpio))
+ gpio_free(ctrl_pdata->mode_gpio);
+ }
+
+exit:
+ return rc;
+}
+
+/**
+ * mdss_dsi_roi_merge() - merge two roi into single roi
+ *
+ * Function used by partial update with only one dsi intf take 2A/2B
+ * (column/page) dcs commands.
+ */
+static int mdss_dsi_roi_merge(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_rect *roi)
+{
+ struct mdss_panel_info *l_pinfo;
+ struct mdss_rect *l_roi;
+ struct mdss_rect *r_roi;
+ struct mdss_dsi_ctrl_pdata *other = NULL;
+ int ans = 0;
+
+ if (ctrl->ndx == DSI_CTRL_LEFT) {
+ other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_RIGHT);
+ if (!other)
+ return ans;
+ l_pinfo = &(ctrl->panel_data.panel_info);
+ l_roi = &(ctrl->panel_data.panel_info.roi);
+ r_roi = &(other->panel_data.panel_info.roi);
+ } else {
+ other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_LEFT);
+ if (!other)
+ return ans;
+ l_pinfo = &(other->panel_data.panel_info);
+ l_roi = &(other->panel_data.panel_info.roi);
+ r_roi = &(ctrl->panel_data.panel_info.roi);
+ }
+
+ if (l_roi->w == 0 && l_roi->h == 0) {
+ /* right only */
+ *roi = *r_roi;
+ roi->x += l_pinfo->xres;/* add left full width to x-offset */
+ } else {
+ /* left only and left+righ */
+ *roi = *l_roi;
+ roi->w += r_roi->w; /* add right width */
+ ans = 1;
+ }
+
+ return ans;
+}
+
+static char caset[] = {0x2a, 0x00, 0x00, 0x03, 0x00}; /* DTYPE_DCS_LWRITE */
+static char paset[] = {0x2b, 0x00, 0x00, 0x05, 0x00}; /* DTYPE_DCS_LWRITE */
+
+/* pack into one frame before sent */
+static struct dsi_cmd_desc set_col_page_addr_cmd[] = {
+ {{DTYPE_DCS_LWRITE, 0, 0, 0, 1, sizeof(caset)}, caset}, /* packed */
+ {{DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(paset)}, paset},
+};
+
+static void mdss_dsi_send_col_page_addr(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_rect *roi, int unicast)
+{
+ struct dcs_cmd_req cmdreq;
+
+ caset[1] = (((roi->x) & 0xFF00) >> 8);
+ caset[2] = (((roi->x) & 0xFF));
+ caset[3] = (((roi->x - 1 + roi->w) & 0xFF00) >> 8);
+ caset[4] = (((roi->x - 1 + roi->w) & 0xFF));
+ set_col_page_addr_cmd[0].payload = caset;
+
+ paset[1] = (((roi->y) & 0xFF00) >> 8);
+ paset[2] = (((roi->y) & 0xFF));
+ paset[3] = (((roi->y - 1 + roi->h) & 0xFF00) >> 8);
+ paset[4] = (((roi->y - 1 + roi->h) & 0xFF));
+ set_col_page_addr_cmd[1].payload = paset;
+
+ memset(&cmdreq, 0, sizeof(cmdreq));
+ cmdreq.cmds_cnt = 2;
+ cmdreq.flags = CMD_REQ_COMMIT;
+ if (unicast)
+ cmdreq.flags |= CMD_REQ_UNICAST;
+ cmdreq.rlen = 0;
+ cmdreq.cb = NULL;
+
+ cmdreq.cmds = set_col_page_addr_cmd;
+ mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+static int mdss_dsi_set_col_page_addr(struct mdss_panel_data *pdata,
+ bool force_send)
+{
+ struct mdss_panel_info *pinfo;
+ struct mdss_rect roi = {0};
+ struct mdss_rect *p_roi;
+ struct mdss_rect *c_roi;
+ struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+ struct mdss_dsi_ctrl_pdata *other = NULL;
+ int left_or_both = 0;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pinfo = &pdata->panel_info;
+ p_roi = &pinfo->roi;
+
+ /*
+ * to avoid keep sending same col_page info to panel,
+ * if roi_merge enabled, the roi of left ctrl is used
+ * to compare against new merged roi and saved new
+ * merged roi to it after comparing.
+ * if roi_merge disabled, then the calling ctrl's roi
+ * and pinfo's roi are used to compare.
+ */
+ if (pinfo->partial_update_roi_merge) {
+ left_or_both = mdss_dsi_roi_merge(ctrl, &roi);
+ other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_LEFT);
+ c_roi = &other->roi;
+ } else {
+ c_roi = &ctrl->roi;
+ roi = *p_roi;
+ }
+
+ /* roi had changed, do col_page update */
+ if (force_send || !mdss_rect_cmp(c_roi, &roi)) {
+ pr_debug("%s: ndx=%d x=%d y=%d w=%d h=%d\n",
+ __func__, ctrl->ndx, p_roi->x,
+ p_roi->y, p_roi->w, p_roi->h);
+
+ *c_roi = roi; /* keep to ctrl */
+ if (c_roi->w == 0 || c_roi->h == 0) {
+ /* no new frame update */
+ pr_debug("%s: ctrl=%d, no partial roi set\n",
+ __func__, ctrl->ndx);
+ return 0;
+ }
+
+ if (pinfo->dcs_cmd_by_left) {
+ if (left_or_both && ctrl->ndx == DSI_CTRL_RIGHT) {
+ /* 2A/2B sent by left already */
+ return 0;
+ }
+ }
+
+ if (!mdss_dsi_sync_wait_enable(ctrl)) {
+ if (pinfo->dcs_cmd_by_left)
+ ctrl = mdss_dsi_get_ctrl_by_index(
+ DSI_CTRL_LEFT);
+ mdss_dsi_send_col_page_addr(ctrl, &roi, 0);
+ } else {
+ /*
+ * when sync_wait_broadcast enabled,
+ * need trigger at right ctrl to
+ * start both dcs cmd transmission
+ */
+ other = mdss_dsi_get_other_ctrl(ctrl);
+ if (!other)
+ goto end;
+
+ if (mdss_dsi_is_left_ctrl(ctrl)) {
+ if (pinfo->partial_update_roi_merge) {
+ /*
+ * roi is the one after merged
+ * to dsi-1 only
+ */
+ mdss_dsi_send_col_page_addr(other,
+ &roi, 0);
+ } else {
+ mdss_dsi_send_col_page_addr(ctrl,
+ &ctrl->roi, 1);
+ mdss_dsi_send_col_page_addr(other,
+ &other->roi, 1);
+ }
+ } else {
+ if (pinfo->partial_update_roi_merge) {
+ /*
+ * roi is the one after merged
+ * to dsi-1 only
+ */
+ mdss_dsi_send_col_page_addr(ctrl,
+ &roi, 0);
+ } else {
+ mdss_dsi_send_col_page_addr(other,
+ &other->roi, 1);
+ mdss_dsi_send_col_page_addr(ctrl,
+ &ctrl->roi, 1);
+ }
+ }
+ }
+ }
+
+end:
+ return 0;
+}
+
+static int mdss_dsi_panel_apply_display_setting(struct mdss_panel_data *pdata,
+ u32 mode)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+ struct dsi_panel_cmds *lp_on_cmds;
+ struct dsi_panel_cmds *lp_off_cmds;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ lp_on_cmds = &ctrl->lp_on_cmds;
+ lp_off_cmds = &ctrl->lp_off_cmds;
+
+ /* Apply display settings for low-persistence mode */
+ if ((mode == MDSS_PANEL_LOW_PERSIST_MODE_ON) &&
+ (lp_on_cmds->cmd_cnt))
+ mdss_dsi_panel_apply_settings(ctrl, lp_on_cmds);
+ else if ((mode == MDSS_PANEL_LOW_PERSIST_MODE_OFF) &&
+ (lp_off_cmds->cmd_cnt))
+ mdss_dsi_panel_apply_settings(ctrl, lp_off_cmds);
+ else
+ return -EINVAL;
+
+ pr_debug("%s: Persistence mode %d applied\n", __func__, mode);
+ return 0;
+}
+
+static void mdss_dsi_panel_switch_mode(struct mdss_panel_data *pdata,
+ int mode)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mipi_panel_info *mipi;
+ struct dsi_panel_cmds *pcmds;
+ u32 flags = 0;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ mipi = &pdata->panel_info.mipi;
+
+ if (!mipi->dms_mode)
+ return;
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ if (mipi->dms_mode != DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE) {
+ flags |= CMD_REQ_COMMIT;
+ if (mode == SWITCH_TO_CMD_MODE)
+ pcmds = &ctrl_pdata->video2cmd;
+ else
+ pcmds = &ctrl_pdata->cmd2video;
+ } else if ((mipi->dms_mode ==
+ DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE)
+ && pdata->current_timing
+ && !list_empty(&pdata->timings_list)) {
+ struct dsi_panel_timing *pt;
+
+ pt = container_of(pdata->current_timing,
+ struct dsi_panel_timing, timing);
+
+ pr_debug("%s: sending switch commands\n", __func__);
+ pcmds = &pt->switch_cmds;
+ flags |= CMD_REQ_DMA_TPG;
+ flags |= CMD_REQ_COMMIT;
+ } else {
+ pr_warn("%s: Invalid mode switch attempted\n", __func__);
+ return;
+ }
+
+ if ((pdata->panel_info.compression_mode == COMPRESSION_DSC) &&
+ (pdata->panel_info.send_pps_before_switch))
+ mdss_dsi_panel_dsc_pps_send(ctrl_pdata, &pdata->panel_info);
+
+ mdss_dsi_panel_cmds_send(ctrl_pdata, pcmds, flags);
+
+ if ((pdata->panel_info.compression_mode == COMPRESSION_DSC) &&
+ (!pdata->panel_info.send_pps_before_switch))
+ mdss_dsi_panel_dsc_pps_send(ctrl_pdata, &pdata->panel_info);
+}
+
+static void mdss_dsi_panel_bl_ctrl(struct mdss_panel_data *pdata,
+ u32 bl_level)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ /*
+ * Some backlight controllers specify a minimum duty cycle
+ * for the backlight brightness. If the brightness is less
+ * than it, the controller can malfunction.
+ */
+
+ if ((bl_level < pdata->panel_info.bl_min) && (bl_level != 0))
+ bl_level = pdata->panel_info.bl_min;
+
+ switch (ctrl_pdata->bklt_ctrl) {
+ case BL_WLED:
+ led_trigger_event(bl_led_trigger, bl_level);
+ break;
+ case BL_PWM:
+ mdss_dsi_panel_bklt_pwm(ctrl_pdata, bl_level);
+ break;
+ case BL_DCS_CMD:
+ if (!mdss_dsi_sync_wait_enable(ctrl_pdata)) {
+ mdss_dsi_panel_bklt_dcs(ctrl_pdata, bl_level);
+ break;
+ }
+ /*
+ * DCS commands to update backlight are usually sent at
+ * the same time to both the controllers. However, if
+ * sync_wait is enabled, we need to ensure that the
+ * dcs commands are first sent to the non-trigger
+ * controller so that when the commands are triggered,
+ * both controllers receive it at the same time.
+ */
+ sctrl = mdss_dsi_get_other_ctrl(ctrl_pdata);
+ if (mdss_dsi_sync_wait_trigger(ctrl_pdata)) {
+ if (sctrl)
+ mdss_dsi_panel_bklt_dcs(sctrl, bl_level);
+ mdss_dsi_panel_bklt_dcs(ctrl_pdata, bl_level);
+ } else {
+ mdss_dsi_panel_bklt_dcs(ctrl_pdata, bl_level);
+ if (sctrl)
+ mdss_dsi_panel_bklt_dcs(sctrl, bl_level);
+ }
+ break;
+ default:
+ pr_err("%s: Unknown bl_ctrl configuration\n",
+ __func__);
+ break;
+ }
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_panel_on_hdmi(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_panel_info *pinfo)
+{
+ if (ctrl->ds_registered)
+ mdss_dba_utils_video_on(pinfo->dba_data, pinfo);
+}
+#else
+static void mdss_dsi_panel_on_hdmi(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_panel_info *pinfo)
+{
+ (void)(*ctrl);
+ (void)(*pinfo);
+}
+#endif
+
+static int mdss_dsi_panel_on(struct mdss_panel_data *pdata)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+ struct mdss_panel_info *pinfo;
+ struct dsi_panel_cmds *on_cmds;
+ int ret = 0;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ pinfo = &pdata->panel_info;
+ ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pr_debug("%s: ndx=%d\n", __func__, ctrl->ndx);
+
+ if (pinfo->dcs_cmd_by_left) {
+ if (ctrl->ndx != DSI_CTRL_LEFT)
+ goto end;
+ }
+
+ on_cmds = &ctrl->on_cmds;
+
+ if ((pinfo->mipi.dms_mode == DYNAMIC_MODE_SWITCH_IMMEDIATE) &&
+ (pinfo->mipi.boot_mode != pinfo->mipi.mode))
+ on_cmds = &ctrl->post_dms_on_cmds;
+
+ pr_debug("%s: ndx=%d cmd_cnt=%d\n", __func__,
+ ctrl->ndx, on_cmds->cmd_cnt);
+
+ if (on_cmds->cmd_cnt)
+ mdss_dsi_panel_cmds_send(ctrl, on_cmds, CMD_REQ_COMMIT);
+
+ if (pinfo->compression_mode == COMPRESSION_DSC)
+ mdss_dsi_panel_dsc_pps_send(ctrl, pinfo);
+
+ mdss_dsi_panel_on_hdmi(ctrl, pinfo);
+
+ /* Ensure low persistence mode is set as before */
+ mdss_dsi_panel_apply_display_setting(pdata, pinfo->persist_mode);
+
+end:
+ pr_debug("%s:-\n", __func__);
+ return ret;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_post_panel_on_hdmi(struct mdss_panel_info *pinfo)
+{
+ u32 vsync_period = 0;
+
+ if (pinfo->is_dba_panel && pinfo->is_pluggable) {
+ /* ensure at least 1 frame transfers to down stream device */
+ vsync_period = (MSEC_PER_SEC / pinfo->mipi.frame_rate) + 1;
+ msleep(vsync_period);
+ mdss_dba_utils_hdcp_enable(pinfo->dba_data, true);
+ }
+}
+#else
+static void mdss_dsi_post_panel_on_hdmi(struct mdss_panel_info *pinfo)
+{
+ (void)(*pinfo);
+}
+#endif
+
+static int mdss_dsi_post_panel_on(struct mdss_panel_data *pdata)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+ struct mdss_panel_info *pinfo;
+ struct dsi_panel_cmds *cmds;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pr_debug("%s: ctrl=%pK ndx=%d\n", __func__, ctrl, ctrl->ndx);
+
+ pinfo = &pdata->panel_info;
+ if (pinfo->dcs_cmd_by_left && ctrl->ndx != DSI_CTRL_LEFT)
+ goto end;
+
+ cmds = &ctrl->post_panel_on_cmds;
+ if (cmds->cmd_cnt) {
+ msleep(VSYNC_DELAY); /* wait for a vsync passed */
+ mdss_dsi_panel_cmds_send(ctrl, cmds, CMD_REQ_COMMIT);
+ }
+
+ mdss_dsi_post_panel_on_hdmi(pinfo);
+
+end:
+ pr_debug("%s:-\n", __func__);
+ return 0;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_panel_off_hdmi(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_panel_info *pinfo)
+{
+ if (ctrl->ds_registered && pinfo->is_pluggable) {
+ mdss_dba_utils_video_off(pinfo->dba_data);
+ mdss_dba_utils_hdcp_enable(pinfo->dba_data, false);
+ }
+}
+#else
+static void mdss_dsi_panel_off_hdmi(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_panel_info *pinfo)
+{
+ (void)(*ctrl);
+ (void)(*pinfo);
+}
+#endif
+
+static int mdss_dsi_panel_off(struct mdss_panel_data *pdata)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+ struct mdss_panel_info *pinfo;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ pinfo = &pdata->panel_info;
+ ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pr_debug("%s: ctrl=%pK ndx=%d\n", __func__, ctrl, ctrl->ndx);
+
+ if (pinfo->dcs_cmd_by_left) {
+ if (ctrl->ndx != DSI_CTRL_LEFT)
+ goto end;
+ }
+
+ if (ctrl->off_cmds.cmd_cnt)
+ mdss_dsi_panel_cmds_send(ctrl, &ctrl->off_cmds, CMD_REQ_COMMIT);
+
+ mdss_dsi_panel_off_hdmi(ctrl, pinfo);
+
+end:
+ /* clear idle state */
+ ctrl->idle = false;
+ pr_debug("%s:-\n", __func__);
+ return 0;
+}
+
+static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata,
+ int enable)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+ struct mdss_panel_info *pinfo;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ pinfo = &pdata->panel_info;
+ ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pr_debug("%s: ctrl=%pK ndx=%d enable=%d\n", __func__, ctrl, ctrl->ndx,
+ enable);
+
+ /* Any panel specific low power commands/config */
+ /* Control idle mode for panel */
+ if (enable)
+ mdss_dsi_panel_set_idle_mode(pdata, true);
+ else
+ mdss_dsi_panel_set_idle_mode(pdata, false);
+ pr_debug("%s:-\n", __func__);
+ return 0;
+}
+
+static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger,
+ char *trigger_key)
+{
+ const char *data;
+
+ *trigger = DSI_CMD_TRIGGER_SW;
+ data = of_get_property(np, trigger_key, NULL);
+ if (data) {
+ if (!strcmp(data, "none"))
+ *trigger = DSI_CMD_TRIGGER_NONE;
+ else if (!strcmp(data, "trigger_te"))
+ *trigger = DSI_CMD_TRIGGER_TE;
+ else if (!strcmp(data, "trigger_sw_seof"))
+ *trigger = DSI_CMD_TRIGGER_SW_SEOF;
+ else if (!strcmp(data, "trigger_sw_te"))
+ *trigger = DSI_CMD_TRIGGER_SW_TE;
+ }
+}
+
+
+static int mdss_dsi_parse_dcs_cmds(struct device_node *np,
+ struct dsi_panel_cmds *pcmds, char *cmd_key, char *link_key)
+{
+ const char *data;
+ int blen = 0, len;
+ char *buf, *bp;
+ struct dsi_ctrl_hdr *dchdr;
+ int i, cnt;
+
+ data = of_get_property(np, cmd_key, &blen);
+ if (!data) {
+ pr_err("%s: failed, key=%s\n", __func__, cmd_key);
+ return -ENOMEM;
+ }
+
+ buf = kcalloc(blen, sizeof(char), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data, blen);
+
+ /* scan dcs commands */
+ bp = buf;
+ len = blen;
+ cnt = 0;
+ while (len >= sizeof(*dchdr)) {
+ dchdr = (struct dsi_ctrl_hdr *)bp;
+ dchdr->dlen = ntohs(dchdr->dlen);
+ if (dchdr->dlen > len) {
+ pr_err("%s: dtsi cmd=%x error, len=%d",
+ __func__, dchdr->dtype, dchdr->dlen);
+ goto exit_free;
+ }
+ bp += sizeof(*dchdr);
+ len -= sizeof(*dchdr);
+ bp += dchdr->dlen;
+ len -= dchdr->dlen;
+ cnt++;
+ }
+
+ if (len != 0) {
+ pr_err("%s: dcs_cmd=%x len=%d error!",
+ __func__, buf[0], blen);
+ goto exit_free;
+ }
+
+ pcmds->cmds = kcalloc(cnt, sizeof(struct dsi_cmd_desc),
+ GFP_KERNEL);
+ if (!pcmds->cmds)
+ goto exit_free;
+
+ pcmds->cmd_cnt = cnt;
+ pcmds->buf = buf;
+ pcmds->blen = blen;
+
+ bp = buf;
+ len = blen;
+ for (i = 0; i < cnt; i++) {
+ dchdr = (struct dsi_ctrl_hdr *)bp;
+ len -= sizeof(*dchdr);
+ bp += sizeof(*dchdr);
+ pcmds->cmds[i].dchdr = *dchdr;
+ pcmds->cmds[i].payload = bp;
+ bp += dchdr->dlen;
+ len -= dchdr->dlen;
+ }
+
+ /*Set default link state to LP Mode*/
+ pcmds->link_state = DSI_LP_MODE;
+
+ if (link_key) {
+ data = of_get_property(np, link_key, NULL);
+ if (data && !strcmp(data, "dsi_hs_mode"))
+ pcmds->link_state = DSI_HS_MODE;
+ else
+ pcmds->link_state = DSI_LP_MODE;
+ }
+
+ pr_debug("%s: dcs_cmd=%x len=%d, cmd_cnt=%d link_state=%d\n", __func__,
+ pcmds->buf[0], pcmds->blen, pcmds->cmd_cnt, pcmds->link_state);
+
+ return 0;
+
+exit_free:
+ kfree(buf);
+ return -ENOMEM;
+}
+
+
+int mdss_panel_get_dst_fmt(u32 bpp, char mipi_mode, u32 pixel_packing,
+ char *dst_format)
+{
+ int rc = 0;
+
+ switch (bpp) {
+ case 3:
+ *dst_format = DSI_CMD_DST_FORMAT_RGB111;
+ break;
+ case 8:
+ *dst_format = DSI_CMD_DST_FORMAT_RGB332;
+ break;
+ case 12:
+ *dst_format = DSI_CMD_DST_FORMAT_RGB444;
+ break;
+ case 16:
+ switch (mipi_mode) {
+ case DSI_VIDEO_MODE:
+ *dst_format = DSI_VIDEO_DST_FORMAT_RGB565;
+ break;
+ case DSI_CMD_MODE:
+ *dst_format = DSI_CMD_DST_FORMAT_RGB565;
+ break;
+ default:
+ *dst_format = DSI_VIDEO_DST_FORMAT_RGB565;
+ break;
+ }
+ break;
+ case 18:
+ switch (mipi_mode) {
+ case DSI_VIDEO_MODE:
+ if (pixel_packing == 0)
+ *dst_format = DSI_VIDEO_DST_FORMAT_RGB666;
+ else
+ *dst_format = DSI_VIDEO_DST_FORMAT_RGB666_LOOSE;
+ break;
+ case DSI_CMD_MODE:
+ *dst_format = DSI_CMD_DST_FORMAT_RGB666;
+ break;
+ default:
+ if (pixel_packing == 0)
+ *dst_format = DSI_VIDEO_DST_FORMAT_RGB666;
+ else
+ *dst_format = DSI_VIDEO_DST_FORMAT_RGB666_LOOSE;
+ break;
+ }
+ break;
+ case 24:
+ switch (mipi_mode) {
+ case DSI_VIDEO_MODE:
+ *dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
+ break;
+ case DSI_CMD_MODE:
+ *dst_format = DSI_CMD_DST_FORMAT_RGB888;
+ break;
+ default:
+ *dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
+ break;
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int mdss_dsi_parse_fbc_params(struct device_node *np,
+ struct mdss_panel_timing *timing)
+{
+ int rc, fbc_enabled = 0;
+ u32 tmp;
+ struct fbc_panel_info *fbc = &timing->fbc;
+
+ fbc_enabled = of_property_read_bool(np, "qcom,mdss-dsi-fbc-enable");
+ if (fbc_enabled) {
+ pr_debug("%s:%d FBC panel enabled.\n", __func__, __LINE__);
+ fbc->enabled = 1;
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-bpp", &tmp);
+ fbc->target_bpp = (!rc ? tmp : 24);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-packing",
+ &tmp);
+ fbc->comp_mode = (!rc ? tmp : 0);
+ fbc->qerr_enable = of_property_read_bool(np,
+ "qcom,mdss-dsi-fbc-quant-error");
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-bias", &tmp);
+ fbc->cd_bias = (!rc ? tmp : 0);
+ fbc->pat_enable = of_property_read_bool(np,
+ "qcom,mdss-dsi-fbc-pat-mode");
+ fbc->vlc_enable = of_property_read_bool(np,
+ "qcom,mdss-dsi-fbc-vlc-mode");
+ fbc->bflc_enable = of_property_read_bool(np,
+ "qcom,mdss-dsi-fbc-bflc-mode");
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-h-line-budget",
+ &tmp);
+ fbc->line_x_budget = (!rc ? tmp : 0);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-budget-ctrl",
+ &tmp);
+ fbc->block_x_budget = (!rc ? tmp : 0);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-block-budget",
+ &tmp);
+ fbc->block_budget = (!rc ? tmp : 0);
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-fbc-lossless-threshold", &tmp);
+ fbc->lossless_mode_thd = (!rc ? tmp : 0);
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-fbc-lossy-threshold", &tmp);
+ fbc->lossy_mode_thd = (!rc ? tmp : 0);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-rgb-threshold",
+ &tmp);
+ fbc->lossy_rgb_thd = (!rc ? tmp : 0);
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-fbc-lossy-mode-idx", &tmp);
+ fbc->lossy_mode_idx = (!rc ? tmp : 0);
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-fbc-slice-height", &tmp);
+ fbc->slice_height = (!rc ? tmp : 0);
+ fbc->pred_mode = of_property_read_bool(np,
+ "qcom,mdss-dsi-fbc-2d-pred-mode");
+ fbc->enc_mode = of_property_read_bool(np,
+ "qcom,mdss-dsi-fbc-ver2-mode");
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-fbc-max-pred-err", &tmp);
+ fbc->max_pred_err = (!rc ? tmp : 0);
+
+ timing->compression_mode = COMPRESSION_FBC;
+ } else {
+ pr_debug("%s:%d Panel does not support FBC.\n",
+ __func__, __LINE__);
+ fbc->enabled = 0;
+ fbc->target_bpp = 24;
+ }
+ return 0;
+}
+
+void mdss_dsi_panel_dsc_pps_send(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_panel_info *pinfo)
+{
+ struct dsi_panel_cmds pcmds;
+ struct dsi_cmd_desc cmd;
+
+ if (!pinfo || (pinfo->compression_mode != COMPRESSION_DSC))
+ return;
+
+ memset(&pcmds, 0, sizeof(pcmds));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.dchdr.dlen = mdss_panel_dsc_prepare_pps_buf(&pinfo->dsc,
+ ctrl->pps_buf, 0);
+ cmd.dchdr.dtype = DTYPE_PPS;
+ cmd.dchdr.last = 1;
+ cmd.dchdr.wait = 10;
+ cmd.dchdr.vc = 0;
+ cmd.dchdr.ack = 0;
+ cmd.payload = ctrl->pps_buf;
+
+ pcmds.cmd_cnt = 1;
+ pcmds.cmds = &cmd;
+ pcmds.link_state = DSI_LP_MODE;
+
+ mdss_dsi_panel_cmds_send(ctrl, &pcmds, CMD_REQ_COMMIT);
+}
+
+static int mdss_dsi_parse_hdr_settings(struct device_node *np,
+ struct mdss_panel_info *pinfo)
+{
+ int rc = 0;
+ struct mdss_panel_hdr_properties *hdr_prop;
+
+ if (!np) {
+ pr_err("%s: device node pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!pinfo) {
+ pr_err("%s: panel info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ hdr_prop = &pinfo->hdr_properties;
+ hdr_prop->hdr_enabled = of_property_read_bool(np,
+ "qcom,mdss-dsi-panel-hdr-enabled");
+
+ if (hdr_prop->hdr_enabled) {
+ rc = of_property_read_u32_array(np,
+ "qcom,mdss-dsi-panel-hdr-color-primaries",
+ hdr_prop->display_primaries,
+ DISPLAY_PRIMARIES_COUNT);
+ if (rc) {
+ pr_info("%s:%d, Unable to read color primaries,rc:%u",
+ __func__, __LINE__,
+ hdr_prop->hdr_enabled = false);
+ }
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-panel-peak-brightness",
+ &(hdr_prop->peak_brightness));
+ if (rc) {
+ pr_info("%s:%d, Unable to read hdr brightness, rc:%u",
+ __func__, __LINE__, rc);
+ hdr_prop->hdr_enabled = false;
+ }
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-panel-blackness-level",
+ &(hdr_prop->blackness_level));
+ if (rc) {
+ pr_info("%s:%d, Unable to read hdr brightness, rc:%u",
+ __func__, __LINE__, rc);
+ hdr_prop->hdr_enabled = false;
+ }
+ }
+ return 0;
+}
+
+static int mdss_dsi_parse_dsc_version(struct device_node *np,
+ struct mdss_panel_timing *timing)
+{
+ u32 data;
+ int rc = 0;
+ struct dsc_desc *dsc = &timing->dsc;
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsc-version", &data);
+ if (rc) {
+ dsc->version = 0x11;
+ rc = 0;
+ } else {
+ dsc->version = data & 0xff;
+ /* only support DSC 1.1 rev */
+ if (dsc->version != 0x11) {
+ pr_err("%s: DSC version:%d not supported\n", __func__,
+ dsc->version);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsc-scr-version", &data);
+ if (rc) {
+ dsc->scr_rev = 0x0;
+ rc = 0;
+ } else {
+ dsc->scr_rev = data & 0xff;
+ /* only one scr rev supported */
+ if (dsc->scr_rev > 0x1) {
+ pr_err("%s: DSC scr version:%d not supported\n",
+ __func__, dsc->scr_rev);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+end:
+ return rc;
+}
+
+static int mdss_dsi_parse_dsc_params(struct device_node *np,
+ struct mdss_panel_timing *timing, bool is_split_display)
+{
+ u32 data, intf_width;
+ int rc = 0;
+ struct dsc_desc *dsc = &timing->dsc;
+
+ if (!np) {
+ pr_err("%s: device node pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsc-encoders", &data);
+ if (rc) {
+ if (!of_find_property(np, "qcom,mdss-dsc-encoders", NULL)) {
+ /* property is not defined, default to 1 */
+ data = 1;
+ } else {
+ pr_err("%s: Error parsing qcom,mdss-dsc-encoders\n",
+ __func__);
+ goto end;
+ }
+ }
+
+ timing->dsc_enc_total = data;
+
+ if (is_split_display && (timing->dsc_enc_total > 1)) {
+ pr_err("%s: Error: for split displays, more than 1 dsc encoder per panel is not allowed.\n",
+ __func__);
+ goto end;
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsc-slice-height", &data);
+ if (rc)
+ goto end;
+ dsc->slice_height = data;
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsc-slice-width", &data);
+ if (rc)
+ goto end;
+ dsc->slice_width = data;
+ intf_width = timing->xres;
+
+ if (intf_width % dsc->slice_width) {
+ pr_err("%s: Error: multiple of slice-width:%d should match panel-width:%d\n",
+ __func__, dsc->slice_width, intf_width);
+ goto end;
+ }
+
+ data = intf_width / dsc->slice_width;
+ if (((timing->dsc_enc_total > 1) && ((data != 2) && (data != 4))) ||
+ ((timing->dsc_enc_total == 1) && (data > 2))) {
+ pr_err("%s: Error: max 2 slice per encoder. slice-width:%d should match panel-width:%d dsc_enc_total:%d\n",
+ __func__, dsc->slice_width,
+ intf_width, timing->dsc_enc_total);
+ goto end;
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsc-slice-per-pkt", &data);
+ if (rc)
+ goto end;
+ dsc->slice_per_pkt = data;
+
+ /*
+ * slice_per_pkt can be either 1 or all slices_per_intf
+ */
+ if ((dsc->slice_per_pkt > 1) && (dsc->slice_per_pkt !=
+ DIV_ROUND_UP(intf_width, dsc->slice_width))) {
+ pr_err("Error: slice_per_pkt can be either 1 or all slices_per_intf\n");
+ pr_err("%s: slice_per_pkt=%d, slice_width=%d intf_width=%d\n",
+ __func__,
+ dsc->slice_per_pkt, dsc->slice_width, intf_width);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ pr_debug("%s: num_enc:%d :slice h=%d w=%d s_pkt=%d\n", __func__,
+ timing->dsc_enc_total, dsc->slice_height,
+ dsc->slice_width, dsc->slice_per_pkt);
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsc-bit-per-component", &data);
+ if (rc)
+ goto end;
+ dsc->bpc = data;
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsc-bit-per-pixel", &data);
+ if (rc)
+ goto end;
+ dsc->bpp = data;
+
+ pr_debug("%s: bpc=%d bpp=%d\n", __func__,
+ dsc->bpc, dsc->bpp);
+
+ dsc->block_pred_enable = of_property_read_bool(np,
+ "qcom,mdss-dsc-block-prediction-enable");
+
+ dsc->enable_422 = 0;
+ dsc->convert_rgb = 1;
+ dsc->vbr_enable = 0;
+
+ dsc->config_by_manufacture_cmd = of_property_read_bool(np,
+ "qcom,mdss-dsc-config-by-manufacture-cmd");
+
+ mdss_panel_dsc_parameters_calc(&timing->dsc);
+ mdss_panel_dsc_pclk_param_calc(&timing->dsc, intf_width);
+
+ timing->dsc.full_frame_slices =
+ DIV_ROUND_UP(intf_width, timing->dsc.slice_width);
+
+ timing->compression_mode = COMPRESSION_DSC;
+
+end:
+ return rc;
+}
+
+static struct device_node *mdss_dsi_panel_get_dsc_cfg_np(
+ struct device_node *np, struct mdss_panel_data *panel_data,
+ bool default_timing)
+{
+ struct device_node *dsc_cfg_np = NULL;
+
+
+ /* Read the dsc config node specified by command line */
+ if (default_timing) {
+ dsc_cfg_np = of_get_child_by_name(np,
+ panel_data->dsc_cfg_np_name);
+ if (!dsc_cfg_np)
+ pr_warn_once("%s: cannot find dsc config node:%s\n",
+ __func__, panel_data->dsc_cfg_np_name);
+ }
+
+ /*
+ * Fall back to default from DT as nothing is specified
+ * in command line.
+ */
+ if (!dsc_cfg_np && of_find_property(np, "qcom,config-select", NULL)) {
+ dsc_cfg_np = of_parse_phandle(np, "qcom,config-select", 0);
+ if (!dsc_cfg_np)
+ pr_warn_once("%s:err parsing qcom,config-select\n",
+ __func__);
+ }
+
+ return dsc_cfg_np;
+}
+
+static int mdss_dsi_parse_topology_config(struct device_node *np,
+ struct dsi_panel_timing *pt, struct mdss_panel_data *panel_data,
+ bool default_timing)
+{
+ int rc = 0;
+ bool is_split_display = panel_data->panel_info.is_split_display;
+ const char *data;
+ struct mdss_panel_timing *timing = &pt->timing;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo;
+ struct device_node *cfg_np = NULL;
+
+ ctrl_pdata = container_of(panel_data, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+
+ cfg_np = mdss_dsi_panel_get_dsc_cfg_np(np,
+ &ctrl_pdata->panel_data, default_timing);
+
+ if (cfg_np) {
+ if (!of_property_read_u32_array(cfg_np, "qcom,lm-split",
+ timing->lm_widths, 2)) {
+ if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)
+ && (timing->lm_widths[1] != 0)) {
+ pr_err("%s: lm-split not allowed with split display\n",
+ __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+ rc = of_property_read_string(cfg_np, "qcom,split-mode", &data);
+ if (!rc && !strcmp(data, "pingpong-split"))
+ pinfo->use_pingpong_split = true;
+
+ if (((timing->lm_widths[0]) || (timing->lm_widths[1])) &&
+ pinfo->use_pingpong_split) {
+ pr_err("%s: pingpong_split cannot be used when lm-split[%d,%d] is specified\n",
+ __func__,
+ timing->lm_widths[0], timing->lm_widths[1]);
+ return -EINVAL;
+ }
+
+ pr_info("%s: cfg_node name %s lm_split:%dx%d pp_split:%s\n",
+ __func__, cfg_np->name,
+ timing->lm_widths[0], timing->lm_widths[1],
+ pinfo->use_pingpong_split ? "yes" : "no");
+ }
+
+ if (!pinfo->use_pingpong_split &&
+ (timing->lm_widths[0] == 0) && (timing->lm_widths[1] == 0))
+ timing->lm_widths[0] = pt->timing.xres;
+
+ data = of_get_property(np, "qcom,compression-mode", NULL);
+ if (data) {
+ if (cfg_np && !strcmp(data, "dsc")) {
+ rc = mdss_dsi_parse_dsc_version(np, &pt->timing);
+ if (rc)
+ goto end;
+
+ pinfo->send_pps_before_switch =
+ of_property_read_bool(np,
+ "qcom,mdss-dsi-send-pps-before-switch");
+
+ rc = mdss_dsi_parse_dsc_params(cfg_np, &pt->timing,
+ is_split_display);
+ } else if (!strcmp(data, "fbc")) {
+ rc = mdss_dsi_parse_fbc_params(np, &pt->timing);
+ }
+ }
+
+end:
+ of_node_put(cfg_np);
+ return rc;
+}
+
+static void mdss_panel_parse_te_params(struct device_node *np,
+ struct mdss_panel_timing *timing)
+{
+ struct mdss_mdp_pp_tear_check *te = &timing->te;
+ u32 tmp;
+ int rc = 0;
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ */
+ te->tear_check_en =
+ !of_property_read_bool(np, "qcom,mdss-tear-check-disable");
+ rc = of_property_read_u32
+ (np, "qcom,mdss-tear-check-sync-cfg-height", &tmp);
+ te->sync_cfg_height = (!rc ? tmp : 0xfff0);
+ rc = of_property_read_u32
+ (np, "qcom,mdss-tear-check-sync-init-val", &tmp);
+ te->vsync_init_val = (!rc ? tmp : timing->yres);
+ rc = of_property_read_u32
+ (np, "qcom,mdss-tear-check-sync-threshold-start", &tmp);
+ te->sync_threshold_start = (!rc ? tmp : 4);
+ rc = of_property_read_u32
+ (np, "qcom,mdss-tear-check-sync-threshold-continue", &tmp);
+ te->sync_threshold_continue = (!rc ? tmp : 4);
+ rc = of_property_read_u32(np, "qcom,mdss-tear-check-frame-rate", &tmp);
+ te->refx100 = (!rc ? tmp : 6000);
+ rc = of_property_read_u32
+ (np, "qcom,mdss-tear-check-start-pos", &tmp);
+ te->start_pos = (!rc ? tmp : timing->yres);
+ rc = of_property_read_u32
+ (np, "qcom,mdss-tear-check-rd-ptr-trigger-intr", &tmp);
+ te->rd_ptr_irq = (!rc ? tmp : timing->yres + 1);
+ te->wr_ptr_irq = 0;
+}
+
+
+static int mdss_dsi_parse_reset_seq(struct device_node *np,
+ u32 rst_seq[MDSS_DSI_RST_SEQ_LEN], u32 *rst_len,
+ const char *name)
+{
+ int num = 0, i;
+ int rc;
+ struct property *data;
+ u32 tmp[MDSS_DSI_RST_SEQ_LEN];
+ *rst_len = 0;
+ data = of_find_property(np, name, &num);
+ num /= sizeof(u32);
+ if (!data || !num || num > MDSS_DSI_RST_SEQ_LEN || num % 2) {
+ pr_debug("%s:%d, error reading %s, length found = %d\n",
+ __func__, __LINE__, name, num);
+ } else {
+ rc = of_property_read_u32_array(np, name, tmp, num);
+ if (rc)
+ pr_debug("%s:%d, error reading %s, rc = %d\n",
+ __func__, __LINE__, name, rc);
+ else {
+ for (i = 0; i < num; ++i)
+ rst_seq[i] = tmp[i];
+ *rst_len = num;
+ }
+ }
+ return 0;
+}
+
+static bool mdss_dsi_cmp_panel_reg_v2(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int i, j;
+ int len = 0, *lenp;
+ int group = 0;
+
+ lenp = ctrl->status_valid_params ?: ctrl->status_cmds_rlen;
+
+ for (i = 0; i < ctrl->status_cmds.cmd_cnt; i++)
+ len += lenp[i];
+
+ for (j = 0; j < ctrl->groups; ++j) {
+ for (i = 0; i < len; ++i) {
+ if (ctrl->return_buf[i] !=
+ ctrl->status_value[group + i])
+ break;
+ }
+
+ if (i == len)
+ return true;
+ group += len;
+ }
+
+ return false;
+}
+
+static int mdss_dsi_gen_read_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ if (!mdss_dsi_cmp_panel_reg_v2(ctrl_pdata)) {
+ pr_err("%s: Read back value from panel is incorrect\n",
+ __func__);
+ return -EINVAL;
+ } else {
+ return 1;
+ }
+}
+
+static int mdss_dsi_nt35596_read_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ if (!mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf,
+ ctrl_pdata->status_value, 0)) {
+ ctrl_pdata->status_error_count = 0;
+ pr_err("%s: Read back value from panel is incorrect\n",
+ __func__);
+ return -EINVAL;
+ }
+ {
+ if (!mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf,
+ ctrl_pdata->status_value, 3)) {
+ ctrl_pdata->status_error_count = 0;
+ } else {
+ if (mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf,
+ ctrl_pdata->status_value, 4) ||
+ mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf,
+ ctrl_pdata->status_value, 5))
+ ctrl_pdata->status_error_count = 0;
+ else
+ ctrl_pdata->status_error_count++;
+ if (ctrl_pdata->status_error_count >=
+ ctrl_pdata->max_status_error_count) {
+ ctrl_pdata->status_error_count = 0;
+ pr_err("%s: Read value bad. Error_cnt = %i\n",
+ __func__,
+ ctrl_pdata->status_error_count);
+ return -EINVAL;
+ }
+ }
+ return 1;
+ }
+}
+
+static void mdss_dsi_parse_roi_alignment(struct device_node *np,
+ struct dsi_panel_timing *pt)
+{
+ int len = 0;
+ u32 value[6];
+ struct property *data;
+ struct mdss_panel_timing *timing = &pt->timing;
+
+ data = of_find_property(np, "qcom,panel-roi-alignment", &len);
+ len /= sizeof(u32);
+ if (!data || (len != 6)) {
+ pr_debug("%s: Panel roi alignment not found", __func__);
+ } else {
+ int rc = of_property_read_u32_array(np,
+ "qcom,panel-roi-alignment", value, len);
+ if (rc)
+ pr_debug("%s: Error reading panel roi alignment values",
+ __func__);
+ else {
+ timing->roi_alignment.xstart_pix_align = value[0];
+ timing->roi_alignment.ystart_pix_align = value[1];
+ timing->roi_alignment.width_pix_align = value[2];
+ timing->roi_alignment.height_pix_align = value[3];
+ timing->roi_alignment.min_width = value[4];
+ timing->roi_alignment.min_height = value[5];
+ }
+
+ pr_debug("%s: ROI alignment: [%d, %d, %d, %d, %d, %d]",
+ __func__, timing->roi_alignment.xstart_pix_align,
+ timing->roi_alignment.width_pix_align,
+ timing->roi_alignment.ystart_pix_align,
+ timing->roi_alignment.height_pix_align,
+ timing->roi_alignment.min_width,
+ timing->roi_alignment.min_height);
+ }
+}
+
+static void mdss_dsi_parse_dms_config(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+ const char *data;
+ bool dms_enabled;
+
+ dms_enabled = of_property_read_bool(np,
+ "qcom,dynamic-mode-switch-enabled");
+
+ if (!dms_enabled) {
+ pinfo->mipi.dms_mode = DYNAMIC_MODE_SWITCH_DISABLED;
+ goto exit;
+ }
+
+ /* default mode is suspend_resume */
+ pinfo->mipi.dms_mode = DYNAMIC_MODE_SWITCH_SUSPEND_RESUME;
+ data = of_get_property(np, "qcom,dynamic-mode-switch-type", NULL);
+ if (data && !strcmp(data, "dynamic-resolution-switch-immediate")) {
+ if (!list_empty(&ctrl->panel_data.timings_list))
+ pinfo->mipi.dms_mode =
+ DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE;
+ else
+ pinfo->mipi.dms_mode =
+ DYNAMIC_MODE_SWITCH_DISABLED;
+ goto exit;
+ }
+
+ if (data && !strcmp(data, "dynamic-switch-immediate"))
+ pinfo->mipi.dms_mode = DYNAMIC_MODE_SWITCH_IMMEDIATE;
+ else
+ pr_debug("%s: default dms suspend/resume\n", __func__);
+
+ mdss_dsi_parse_dcs_cmds(np, &ctrl->video2cmd,
+ "qcom,video-to-cmd-mode-switch-commands", NULL);
+
+ mdss_dsi_parse_dcs_cmds(np, &ctrl->cmd2video,
+ "qcom,cmd-to-video-mode-switch-commands", NULL);
+
+ mdss_dsi_parse_dcs_cmds(np, &ctrl->post_dms_on_cmds,
+ "qcom,mdss-dsi-post-mode-switch-on-command",
+ "qcom,mdss-dsi-post-mode-switch-on-command-state");
+
+ if (pinfo->mipi.dms_mode == DYNAMIC_MODE_SWITCH_IMMEDIATE &&
+ !ctrl->post_dms_on_cmds.cmd_cnt) {
+ pr_warn("%s: No post dms on cmd specified\n", __func__);
+ pinfo->mipi.dms_mode = DYNAMIC_MODE_SWITCH_DISABLED;
+ }
+
+ if (!ctrl->video2cmd.cmd_cnt || !ctrl->cmd2video.cmd_cnt) {
+ pr_warn("%s: No commands specified for dynamic switch\n",
+ __func__);
+ pinfo->mipi.dms_mode = DYNAMIC_MODE_SWITCH_DISABLED;
+ }
+exit:
+ pr_info("%s: dynamic switch feature enabled: %d\n", __func__,
+ pinfo->mipi.dms_mode);
+}
+
+/* the length of all the valid values to be checked should not be great
+ * than the length of returned data from read command.
+ */
+static bool
+mdss_dsi_parse_esd_check_valid_params(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int i;
+
+ for (i = 0; i < ctrl->status_cmds.cmd_cnt; ++i) {
+ if (ctrl->status_valid_params[i] > ctrl->status_cmds_rlen[i]) {
+ pr_debug("%s: ignore valid params!\n", __func__);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool mdss_dsi_parse_esd_status_len(struct device_node *np,
+ char *prop_key, u32 **target, u32 cmd_cnt)
+{
+ int tmp;
+
+ if (!of_find_property(np, prop_key, &tmp))
+ return false;
+
+ tmp /= sizeof(u32);
+ if (tmp != cmd_cnt) {
+ pr_err("%s: request property number(%d) not match command count(%d)\n",
+ __func__, tmp, cmd_cnt);
+ return false;
+ }
+
+ *target = kcalloc(tmp, sizeof(u32), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(*target)) {
+ pr_err("%s: Error allocating memory for property\n",
+ __func__);
+ return false;
+ }
+
+ if (of_property_read_u32_array(np, prop_key, *target, tmp)) {
+ pr_err("%s: cannot get values from dts\n", __func__);
+ kfree(*target);
+ *target = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+static void mdss_dsi_parse_esd_params(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 tmp;
+ u32 i, status_len, *lenp;
+ int rc;
+ struct property *data;
+ const char *string;
+ struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+
+ pinfo->esd_check_enabled = of_property_read_bool(np,
+ "qcom,esd-check-enabled");
+
+ if (!pinfo->esd_check_enabled)
+ return;
+
+ ctrl->status_mode = ESD_MAX;
+ rc = of_property_read_string(np,
+ "qcom,mdss-dsi-panel-status-check-mode", &string);
+ if (!rc) {
+ if (!strcmp(string, "bta_check")) {
+ ctrl->status_mode = ESD_BTA;
+ } else if (!strcmp(string, "reg_read")) {
+ ctrl->status_mode = ESD_REG;
+ ctrl->check_read_status =
+ mdss_dsi_gen_read_status;
+ } else if (!strcmp(string, "reg_read_nt35596")) {
+ ctrl->status_mode = ESD_REG_NT35596;
+ ctrl->status_error_count = 0;
+ ctrl->check_read_status =
+ mdss_dsi_nt35596_read_status;
+ } else if (!strcmp(string, "te_signal_check")) {
+ if (pinfo->mipi.mode == DSI_CMD_MODE) {
+ ctrl->status_mode = ESD_TE;
+ } else {
+ pr_err("TE-ESD not valid for video mode\n");
+ goto error;
+ }
+ } else {
+ pr_err("No valid panel-status-check-mode string\n");
+ goto error;
+ }
+ }
+
+ if ((ctrl->status_mode == ESD_BTA) || (ctrl->status_mode == ESD_TE) ||
+ (ctrl->status_mode == ESD_MAX))
+ return;
+
+ mdss_dsi_parse_dcs_cmds(np, &ctrl->status_cmds,
+ "qcom,mdss-dsi-panel-status-command",
+ "qcom,mdss-dsi-panel-status-command-state");
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-max-error-count",
+ &tmp);
+ ctrl->max_status_error_count = (!rc ? tmp : 0);
+
+ if (!mdss_dsi_parse_esd_status_len(np,
+ "qcom,mdss-dsi-panel-status-read-length",
+ &ctrl->status_cmds_rlen, ctrl->status_cmds.cmd_cnt)) {
+ pinfo->esd_check_enabled = false;
+ return;
+ }
+
+ if (mdss_dsi_parse_esd_status_len(np,
+ "qcom,mdss-dsi-panel-status-valid-params",
+ &ctrl->status_valid_params, ctrl->status_cmds.cmd_cnt)) {
+ if (!mdss_dsi_parse_esd_check_valid_params(ctrl))
+ goto error1;
+ }
+
+ status_len = 0;
+ lenp = ctrl->status_valid_params ?: ctrl->status_cmds_rlen;
+ for (i = 0; i < ctrl->status_cmds.cmd_cnt; ++i)
+ status_len += lenp[i];
+
+ data = of_find_property(np, "qcom,mdss-dsi-panel-status-value", &tmp);
+ tmp /= sizeof(u32);
+ if (!IS_ERR_OR_NULL(data) && tmp != 0 && (tmp % status_len) == 0) {
+ ctrl->groups = tmp / status_len;
+ } else {
+ pr_err("%s: Error parse panel-status-value\n", __func__);
+ goto error1;
+ }
+
+ ctrl->status_value = kcalloc(status_len * ctrl->groups, sizeof(u32),
+ GFP_KERNEL);
+ if (!ctrl->status_value)
+ goto error1;
+
+ ctrl->return_buf = kcalloc(status_len * ctrl->groups,
+ sizeof(unsigned char), GFP_KERNEL);
+ if (!ctrl->return_buf)
+ goto error2;
+
+ rc = of_property_read_u32_array(np,
+ "qcom,mdss-dsi-panel-status-value",
+ ctrl->status_value, ctrl->groups * status_len);
+ if (rc) {
+ pr_debug("%s: Error reading panel status values\n",
+ __func__);
+ memset(ctrl->status_value, 0, ctrl->groups * status_len);
+ }
+
+ return;
+
+error2:
+ kfree(ctrl->status_value);
+error1:
+ kfree(ctrl->status_valid_params);
+ kfree(ctrl->status_cmds_rlen);
+error:
+ pinfo->esd_check_enabled = false;
+}
+
+static int mdss_dsi_parse_panel_features(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_panel_info *pinfo;
+
+ if (!np || !ctrl) {
+ pr_err("%s: Invalid arguments\n", __func__);
+ return -ENODEV;
+ }
+
+ pinfo = &ctrl->panel_data.panel_info;
+
+ pinfo->partial_update_supported = of_property_read_bool(np,
+ "qcom,partial-update-enabled");
+ if (pinfo->mipi.mode == DSI_CMD_MODE) {
+ pinfo->partial_update_enabled = pinfo->partial_update_supported;
+ pr_info("%s: partial_update_enabled=%d\n", __func__,
+ pinfo->partial_update_enabled);
+ ctrl->set_col_page_addr = mdss_dsi_set_col_page_addr;
+ if (pinfo->partial_update_enabled) {
+ pinfo->partial_update_roi_merge =
+ of_property_read_bool(np,
+ "qcom,partial-update-roi-merge");
+ }
+ }
+
+ pinfo->dcs_cmd_by_left = of_property_read_bool(np,
+ "qcom,dcs-cmd-by-left");
+
+ pinfo->ulps_feature_enabled = of_property_read_bool(np,
+ "qcom,ulps-enabled");
+ pr_info("%s: ulps feature %s\n", __func__,
+ (pinfo->ulps_feature_enabled ? "enabled" : "disabled"));
+
+ pinfo->ulps_suspend_enabled = of_property_read_bool(np,
+ "qcom,suspend-ulps-enabled");
+ pr_info("%s: ulps during suspend feature %s", __func__,
+ (pinfo->ulps_suspend_enabled ? "enabled" : "disabled"));
+
+ mdss_dsi_parse_dms_config(np, ctrl);
+
+ pinfo->panel_ack_disabled = pinfo->sim_panel_mode ?
+ 1 : of_property_read_bool(np, "qcom,panel-ack-disabled");
+
+ pinfo->allow_phy_power_off = of_property_read_bool(np,
+ "qcom,panel-allow-phy-poweroff");
+
+ mdss_dsi_parse_esd_params(np, ctrl);
+
+ if (pinfo->panel_ack_disabled && pinfo->esd_check_enabled) {
+ pr_warn("ESD should not be enabled if panel ACK is disabled\n");
+ pinfo->esd_check_enabled = false;
+ }
+
+ if (ctrl->disp_en_gpio <= 0) {
+ ctrl->disp_en_gpio = of_get_named_gpio(
+ np,
+ "qcom,5v-boost-gpio", 0);
+
+ if (!gpio_is_valid(ctrl->disp_en_gpio))
+ pr_debug("%s:%d, Disp_en gpio not specified\n",
+ __func__, __LINE__);
+ }
+
+ mdss_dsi_parse_dcs_cmds(np, &ctrl->lp_on_cmds,
+ "qcom,mdss-dsi-lp-mode-on", NULL);
+
+ mdss_dsi_parse_dcs_cmds(np, &ctrl->lp_off_cmds,
+ "qcom,mdss-dsi-lp-mode-off", NULL);
+
+ return 0;
+}
+
+static void mdss_dsi_parse_panel_horizintal_line_idle(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ const u32 *src;
+ int i, len, cnt;
+ struct panel_horizontal_idle *kp;
+
+ if (!np || !ctrl) {
+ pr_err("%s: Invalid arguments\n", __func__);
+ return;
+ }
+
+ src = of_get_property(np, "qcom,mdss-dsi-hor-line-idle", &len);
+ if (!src || len == 0)
+ return;
+
+ cnt = len % 3; /* 3 fields per entry */
+ if (cnt) {
+ pr_err("%s: invalid horizontal idle len=%d\n", __func__, len);
+ return;
+ }
+
+ cnt = len / sizeof(u32);
+
+ kp = kcalloc((cnt / 3), sizeof(*kp), GFP_KERNEL);
+ if (kp == NULL)
+ return;
+
+ ctrl->line_idle = kp;
+ for (i = 0; i < cnt; i += 3) {
+ kp->min = be32_to_cpu(src[i]);
+ kp->max = be32_to_cpu(src[i+1]);
+ kp->idle = be32_to_cpu(src[i+2]);
+ kp++;
+ ctrl->horizontal_idle_cnt++;
+ }
+
+ /*
+ * idle is enabled for this controller, this will be used to
+ * enable/disable burst mode since both features are mutually
+ * exclusive.
+ */
+ ctrl->idle_enabled = true;
+
+ pr_debug("%s: horizontal_idle_cnt=%d\n", __func__,
+ ctrl->horizontal_idle_cnt);
+}
+
+static int mdss_dsi_set_refresh_rate_range(struct device_node *pan_node,
+ struct mdss_panel_info *pinfo)
+{
+ int rc = 0;
+
+ rc = of_property_read_u32(pan_node,
+ "qcom,mdss-dsi-min-refresh-rate",
+ &pinfo->min_fps);
+ if (rc) {
+ pr_warn("%s:%d, Unable to read min refresh rate\n",
+ __func__, __LINE__);
+
+ /*
+ * Since min refresh rate is not specified when dynamic
+ * fps is enabled, using minimum as 30
+ */
+ pinfo->min_fps = MIN_REFRESH_RATE;
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(pan_node,
+ "qcom,mdss-dsi-max-refresh-rate",
+ &pinfo->max_fps);
+ if (rc) {
+ pr_warn("%s:%d, Unable to read max refresh rate\n",
+ __func__, __LINE__);
+
+ /*
+ * Since max refresh rate was not specified when dynamic
+ * fps is enabled, using the default panel refresh rate
+ * as max refresh rate supported.
+ */
+ pinfo->max_fps = pinfo->mipi.frame_rate;
+ rc = 0;
+ }
+
+ pr_info("dyn_fps: min = %d, max = %d\n",
+ pinfo->min_fps, pinfo->max_fps);
+ return rc;
+}
+
+static void mdss_dsi_parse_dfps_config(struct device_node *pan_node,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ const char *data;
+ bool dynamic_fps;
+ struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+ dynamic_fps = of_property_read_bool(pan_node,
+ "qcom,mdss-dsi-pan-enable-dynamic-fps");
+
+ if (!dynamic_fps)
+ return;
+
+ pinfo->dynamic_fps = true;
+ data = of_get_property(pan_node, "qcom,mdss-dsi-pan-fps-update", NULL);
+ if (data) {
+ if (!strcmp(data, "dfps_suspend_resume_mode")) {
+ pinfo->dfps_update = DFPS_SUSPEND_RESUME_MODE;
+ pr_debug("dfps mode: suspend/resume\n");
+ } else if (!strcmp(data, "dfps_immediate_clk_mode")) {
+ pinfo->dfps_update = DFPS_IMMEDIATE_CLK_UPDATE_MODE;
+ pr_debug("dfps mode: Immediate clk\n");
+ } else if (!strcmp(data, "dfps_immediate_porch_mode_hfp")) {
+ pinfo->dfps_update =
+ DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP;
+ pr_debug("dfps mode: Immediate porch HFP\n");
+ } else if (!strcmp(data, "dfps_immediate_porch_mode_vfp")) {
+ pinfo->dfps_update =
+ DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP;
+ pr_debug("dfps mode: Immediate porch VFP\n");
+ } else {
+ pinfo->dfps_update = DFPS_SUSPEND_RESUME_MODE;
+ pr_debug("default dfps mode: suspend/resume\n");
+ }
+ mdss_dsi_set_refresh_rate_range(pan_node, pinfo);
+ } else {
+ pinfo->dynamic_fps = false;
+ pr_debug("dfps update mode not configured: disable\n");
+ }
+ pinfo->new_fps = pinfo->mipi.frame_rate;
+ pinfo->current_fps = pinfo->mipi.frame_rate;
+}
+
+int mdss_panel_parse_bl_settings(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ const char *data;
+ int rc = 0;
+ u32 tmp;
+
+ ctrl_pdata->bklt_ctrl = UNKNOWN_CTRL;
+ data = of_get_property(np, "qcom,mdss-dsi-bl-pmic-control-type", NULL);
+ if (data) {
+ if (!strcmp(data, "bl_ctrl_wled")) {
+ led_trigger_register_simple("bkl-trigger",
+ &bl_led_trigger);
+ pr_debug("%s: SUCCESS-> WLED TRIGGER register\n",
+ __func__);
+ ctrl_pdata->bklt_ctrl = BL_WLED;
+ } else if (!strcmp(data, "bl_ctrl_pwm")) {
+ ctrl_pdata->bklt_ctrl = BL_PWM;
+ ctrl_pdata->pwm_pmi = of_property_read_bool(np,
+ "qcom,mdss-dsi-bl-pwm-pmi");
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-bl-pmic-pwm-frequency", &tmp);
+ if (rc) {
+ pr_err("%s:%d, Error, panel pwm_period\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ ctrl_pdata->pwm_period = tmp;
+ if (ctrl_pdata->pwm_pmi) {
+ ctrl_pdata->pwm_bl = of_pwm_get(np, NULL);
+ if (IS_ERR(ctrl_pdata->pwm_bl)) {
+ pr_err("%s: Error, pwm device\n",
+ __func__);
+ ctrl_pdata->pwm_bl = NULL;
+ return -EINVAL;
+ }
+ } else {
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-bl-pmic-bank-select",
+ &tmp);
+ if (rc) {
+ pr_err("%s:%d, Error, lpg channel\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ ctrl_pdata->pwm_lpg_chan = tmp;
+ tmp = of_get_named_gpio(np,
+ "qcom,mdss-dsi-pwm-gpio", 0);
+ ctrl_pdata->pwm_pmic_gpio = tmp;
+ pr_debug("%s: Configured PWM bklt ctrl\n",
+ __func__);
+ }
+ } else if (!strcmp(data, "bl_ctrl_dcs")) {
+ ctrl_pdata->bklt_ctrl = BL_DCS_CMD;
+ pr_debug("%s: Configured DCS_CMD bklt ctrl\n",
+ __func__);
+ }
+ }
+ return 0;
+}
+
+int mdss_dsi_panel_timing_switch(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_panel_timing *timing)
+{
+ struct dsi_panel_timing *pt;
+ struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+ int i;
+
+ if (!timing)
+ return -EINVAL;
+
+ if (timing == ctrl->panel_data.current_timing) {
+ pr_warn("%s: panel timing \"%s\" already set\n", __func__,
+ timing->name);
+ return 0; /* nothing to do */
+ }
+
+ pr_debug("%s: ndx=%d switching to panel timing \"%s\"\n", __func__,
+ ctrl->ndx, timing->name);
+
+ mdss_panel_info_from_timing(timing, pinfo);
+
+ pt = container_of(timing, struct dsi_panel_timing, timing);
+ pinfo->mipi.t_clk_pre = pt->t_clk_pre;
+ pinfo->mipi.t_clk_post = pt->t_clk_post;
+
+ for (i = 0; i < ARRAY_SIZE(pt->phy_timing); i++)
+ pinfo->mipi.dsi_phy_db.timing[i] = pt->phy_timing[i];
+
+ for (i = 0; i < ARRAY_SIZE(pt->phy_timing_8996); i++)
+ pinfo->mipi.dsi_phy_db.timing_8996[i] = pt->phy_timing_8996[i];
+
+ ctrl->on_cmds = pt->on_cmds;
+ ctrl->post_panel_on_cmds = pt->post_panel_on_cmds;
+
+ ctrl->panel_data.current_timing = timing;
+ if (!timing->clk_rate)
+ ctrl->refresh_clk_rate = true;
+ mdss_dsi_clk_refresh(&ctrl->panel_data, ctrl->update_phy_timing);
+
+ return 0;
+}
+
+void mdss_dsi_unregister_bl_settings(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ if (ctrl_pdata->bklt_ctrl == BL_WLED)
+ led_trigger_unregister_simple(bl_led_trigger);
+}
+
+static int mdss_dsi_panel_timing_from_dt(struct device_node *np,
+ struct dsi_panel_timing *pt,
+ struct mdss_panel_data *panel_data)
+{
+ u32 tmp;
+ u64 tmp64;
+ int rc, i, len;
+ const char *data;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata;
+ struct mdss_panel_info *pinfo;
+ bool phy_timings_present = false;
+
+ pinfo = &panel_data->panel_info;
+
+ ctrl_pdata = container_of(panel_data, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-width", &tmp);
+ if (rc) {
+ pr_err("%s:%d, panel width not specified\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ pt->timing.xres = tmp;
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-height", &tmp);
+ if (rc) {
+ pr_err("%s:%d, panel height not specified\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ pt->timing.yres = tmp;
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-h-front-porch", &tmp);
+ pt->timing.h_front_porch = (!rc ? tmp : 6);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-h-back-porch", &tmp);
+ pt->timing.h_back_porch = (!rc ? tmp : 6);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-h-pulse-width", &tmp);
+ pt->timing.h_pulse_width = (!rc ? tmp : 2);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-h-sync-skew", &tmp);
+ pt->timing.hsync_skew = (!rc ? tmp : 0);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-v-back-porch", &tmp);
+ pt->timing.v_back_porch = (!rc ? tmp : 6);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-v-front-porch", &tmp);
+ pt->timing.v_front_porch = (!rc ? tmp : 6);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-v-pulse-width", &tmp);
+ pt->timing.v_pulse_width = (!rc ? tmp : 2);
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-h-left-border", &tmp);
+ pt->timing.border_left = !rc ? tmp : 0;
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-h-right-border", &tmp);
+ pt->timing.border_right = !rc ? tmp : 0;
+
+ /* overriding left/right borders for split display cases */
+ if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) {
+ if (panel_data->next)
+ pt->timing.border_right = 0;
+ else
+ pt->timing.border_left = 0;
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-v-top-border", &tmp);
+ pt->timing.border_top = !rc ? tmp : 0;
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-v-bottom-border", &tmp);
+ pt->timing.border_bottom = !rc ? tmp : 0;
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-framerate", &tmp);
+ pt->timing.frame_rate = !rc ? tmp : DEFAULT_FRAME_RATE;
+ rc = of_property_read_u64(np, "qcom,mdss-dsi-panel-clockrate", &tmp64);
+ if (rc == -EOVERFLOW) {
+ tmp64 = 0;
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-panel-clockrate", (u32 *)&tmp64);
+ }
+ pt->timing.clk_rate = !rc ? tmp64 : 0;
+
+ data = of_get_property(np, "qcom,mdss-dsi-panel-timings", &len);
+ if ((!data) || (len != 12)) {
+ pr_debug("%s:%d, Unable to read Phy timing settings",
+ __func__, __LINE__);
+ } else {
+ for (i = 0; i < len; i++)
+ pt->phy_timing[i] = data[i];
+ phy_timings_present = true;
+ }
+
+ data = of_get_property(np, "qcom,mdss-dsi-panel-timings-phy-v2", &len);
+ if ((!data) || (len != 40)) {
+ pr_debug("%s:%d, Unable to read 8996 Phy lane timing settings",
+ __func__, __LINE__);
+ } else {
+ for (i = 0; i < len; i++)
+ pt->phy_timing_8996[i] = data[i];
+ phy_timings_present = true;
+ }
+ if (!phy_timings_present) {
+ pr_err("%s: phy timing settings not present\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-t-clk-pre", &tmp);
+ pt->t_clk_pre = (!rc ? tmp : 0x24);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-t-clk-post", &tmp);
+ pt->t_clk_post = (!rc ? tmp : 0x03);
+
+ if (np->name) {
+ pt->timing.name = kstrdup(np->name, GFP_KERNEL);
+ pr_info("%s: found new timing \"%s\" (%pK)\n", __func__,
+ np->name, &pt->timing);
+ }
+
+ return 0;
+}
+
+static int mdss_dsi_panel_config_res_properties(struct device_node *np,
+ struct dsi_panel_timing *pt,
+ struct mdss_panel_data *panel_data,
+ bool default_timing)
+{
+ int rc = 0;
+
+ mdss_dsi_parse_roi_alignment(np, pt);
+
+ mdss_dsi_parse_dcs_cmds(np, &pt->on_cmds,
+ "qcom,mdss-dsi-on-command",
+ "qcom,mdss-dsi-on-command-state");
+
+ mdss_dsi_parse_dcs_cmds(np, &pt->post_panel_on_cmds,
+ "qcom,mdss-dsi-post-panel-on-command", NULL);
+
+ mdss_dsi_parse_dcs_cmds(np, &pt->switch_cmds,
+ "qcom,mdss-dsi-timing-switch-command",
+ "qcom,mdss-dsi-timing-switch-command-state");
+
+ rc = mdss_dsi_parse_topology_config(np, pt, panel_data, default_timing);
+ if (rc) {
+ pr_err("%s: parsing compression params failed. rc:%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ mdss_panel_parse_te_params(np, &pt->timing);
+ return rc;
+}
+
+static int mdss_panel_parse_display_timings(struct device_node *np,
+ struct mdss_panel_data *panel_data)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl;
+ struct dsi_panel_timing *modedb;
+ struct device_node *timings_np;
+ struct device_node *entry;
+ int num_timings, rc;
+ int i = 0, active_ndx = 0;
+ bool default_timing = false;
+
+ ctrl = container_of(panel_data, struct mdss_dsi_ctrl_pdata, panel_data);
+
+ INIT_LIST_HEAD(&panel_data->timings_list);
+
+ timings_np = of_get_child_by_name(np, "qcom,mdss-dsi-display-timings");
+ if (!timings_np) {
+ struct dsi_panel_timing pt;
+
+ memset(&pt, 0, sizeof(struct dsi_panel_timing));
+
+ /*
+ * display timings node is not available, fallback to reading
+ * timings directly from root node instead
+ */
+ pr_debug("reading display-timings from panel node\n");
+ rc = mdss_dsi_panel_timing_from_dt(np, &pt, panel_data);
+ if (!rc) {
+ mdss_dsi_panel_config_res_properties(np, &pt,
+ panel_data, true);
+ rc = mdss_dsi_panel_timing_switch(ctrl, &pt.timing);
+ }
+ return rc;
+ }
+
+ num_timings = of_get_child_count(timings_np);
+ if (num_timings == 0) {
+ pr_err("no timings found within display-timings\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ modedb = kcalloc(num_timings, sizeof(*modedb), GFP_KERNEL);
+ if (!modedb) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ for_each_child_of_node(timings_np, entry) {
+ rc = mdss_dsi_panel_timing_from_dt(entry, (modedb + i),
+ panel_data);
+ if (rc) {
+ kfree(modedb);
+ goto exit;
+ }
+
+ default_timing = of_property_read_bool(entry,
+ "qcom,mdss-dsi-timing-default");
+ if (default_timing)
+ active_ndx = i;
+
+ mdss_dsi_panel_config_res_properties(entry, (modedb + i),
+ panel_data, default_timing);
+
+ list_add(&modedb[i].timing.list,
+ &panel_data->timings_list);
+ i++;
+ }
+
+ /* Configure default timing settings */
+ rc = mdss_dsi_panel_timing_switch(ctrl, &modedb[active_ndx].timing);
+ if (rc)
+ pr_err("unable to configure default timing settings\n");
+
+exit:
+ of_node_put(timings_np);
+
+ return rc;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static int mdss_panel_parse_dt_hdmi(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int len = 0;
+ const char *bridge_chip_name;
+ struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+ pinfo->is_dba_panel = of_property_read_bool(np,
+ "qcom,dba-panel");
+
+ if (pinfo->is_dba_panel) {
+ bridge_chip_name = of_get_property(np,
+ "qcom,bridge-name", &len);
+ if (!bridge_chip_name || len <= 0) {
+ pr_err("%s:%d Unable to read qcom,bridge_name, data=%pK,len=%d\n",
+ __func__, __LINE__, bridge_chip_name, len);
+ return -EINVAL;
+ }
+ strlcpy(ctrl_pdata->bridge_name, bridge_chip_name,
+ MSM_DBA_CHIP_NAME_MAX_LEN);
+ }
+ return 0;
+}
+#else
+static int mdss_panel_parse_dt_hdmi(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ (void)(*np);
+ (void)(*ctrl_pdata);
+ return 0;
+}
+#endif
+static int mdss_panel_parse_dt(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ u32 tmp;
+ u8 lanes = 0;
+ int rc = 0;
+ const char *data;
+ static const char *pdest;
+ struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+ if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data))
+ pinfo->is_split_display = true;
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-pan-physical-width-dimension", &tmp);
+ pinfo->physical_width = (!rc ? tmp : 0);
+ rc = of_property_read_u32(np,
+ "qcom,mdss-pan-physical-height-dimension", &tmp);
+ pinfo->physical_height = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-bpp", &tmp);
+ if (rc) {
+ pr_err("%s:%d, bpp not specified\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ pinfo->bpp = (!rc ? tmp : 24);
+ pinfo->mipi.mode = DSI_VIDEO_MODE;
+ data = of_get_property(np, "qcom,mdss-dsi-panel-type", NULL);
+ if (data && !strcmp(data, "dsi_cmd_mode"))
+ pinfo->mipi.mode = DSI_CMD_MODE;
+ pinfo->mipi.boot_mode = pinfo->mipi.mode;
+ tmp = 0;
+ data = of_get_property(np, "qcom,mdss-dsi-pixel-packing", NULL);
+ if (data && !strcmp(data, "loose"))
+ pinfo->mipi.pixel_packing = 1;
+ else
+ pinfo->mipi.pixel_packing = 0;
+ rc = mdss_panel_get_dst_fmt(pinfo->bpp,
+ pinfo->mipi.mode, pinfo->mipi.pixel_packing,
+ &(pinfo->mipi.dst_format));
+ if (rc) {
+ pr_debug("%s: problem determining dst format. Set Default\n",
+ __func__);
+ pinfo->mipi.dst_format =
+ DSI_VIDEO_DST_FORMAT_RGB888;
+ }
+ pdest = of_get_property(np,
+ "qcom,mdss-dsi-panel-destination", NULL);
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-underflow-color", &tmp);
+ pinfo->lcdc.underflow_clr = (!rc ? tmp : 0xff);
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-border-color", &tmp);
+ pinfo->lcdc.border_clr = (!rc ? tmp : 0);
+ data = of_get_property(np, "qcom,mdss-dsi-panel-orientation", NULL);
+ if (data) {
+ pr_debug("panel orientation is %s\n", data);
+ if (!strcmp(data, "180"))
+ pinfo->panel_orientation = MDP_ROT_180;
+ else if (!strcmp(data, "hflip"))
+ pinfo->panel_orientation = MDP_FLIP_LR;
+ else if (!strcmp(data, "vflip"))
+ pinfo->panel_orientation = MDP_FLIP_UD;
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-brightness-max-level", &tmp);
+ pinfo->brightness_max = (!rc ? tmp : MDSS_MAX_BL_BRIGHTNESS);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-bl-min-level", &tmp);
+ pinfo->bl_min = (!rc ? tmp : 0);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-bl-max-level", &tmp);
+ pinfo->bl_max = (!rc ? tmp : 255);
+ ctrl_pdata->bklt_max = pinfo->bl_max;
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-interleave-mode", &tmp);
+ pinfo->mipi.interleave_mode = (!rc ? tmp : 0);
+
+ pinfo->mipi.vsync_enable = of_property_read_bool(np,
+ "qcom,mdss-dsi-te-check-enable");
+
+ if (pinfo->sim_panel_mode == SIM_SW_TE_MODE)
+ pinfo->mipi.hw_vsync_mode = false;
+ else
+ pinfo->mipi.hw_vsync_mode = of_property_read_bool(np,
+ "qcom,mdss-dsi-te-using-te-pin");
+
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-h-sync-pulse", &tmp);
+ pinfo->mipi.pulse_mode_hsa_he = (!rc ? tmp : false);
+
+ pinfo->mipi.hfp_power_stop = of_property_read_bool(np,
+ "qcom,mdss-dsi-hfp-power-mode");
+ pinfo->mipi.hsa_power_stop = of_property_read_bool(np,
+ "qcom,mdss-dsi-hsa-power-mode");
+ pinfo->mipi.hbp_power_stop = of_property_read_bool(np,
+ "qcom,mdss-dsi-hbp-power-mode");
+ pinfo->mipi.last_line_interleave_en = of_property_read_bool(np,
+ "qcom,mdss-dsi-last-line-interleave");
+ pinfo->mipi.bllp_power_stop = of_property_read_bool(np,
+ "qcom,mdss-dsi-bllp-power-mode");
+ pinfo->mipi.eof_bllp_power_stop = of_property_read_bool(
+ np, "qcom,mdss-dsi-bllp-eof-power-mode");
+ pinfo->mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE;
+ data = of_get_property(np, "qcom,mdss-dsi-traffic-mode", NULL);
+ if (data) {
+ if (!strcmp(data, "non_burst_sync_event"))
+ pinfo->mipi.traffic_mode = DSI_NON_BURST_SYNCH_EVENT;
+ else if (!strcmp(data, "burst_mode"))
+ pinfo->mipi.traffic_mode = DSI_BURST_MODE;
+ }
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-te-dcs-command", &tmp);
+ pinfo->mipi.insert_dcs_cmd =
+ (!rc ? tmp : 1);
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-wr-mem-continue", &tmp);
+ pinfo->mipi.wr_mem_continue =
+ (!rc ? tmp : 0x3c);
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-wr-mem-start", &tmp);
+ pinfo->mipi.wr_mem_start =
+ (!rc ? tmp : 0x2c);
+ rc = of_property_read_u32(np,
+ "qcom,mdss-dsi-te-pin-select", &tmp);
+ pinfo->mipi.te_sel =
+ (!rc ? tmp : 1);
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-virtual-channel-id", &tmp);
+ pinfo->mipi.vc = (!rc ? tmp : 0);
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_RGB;
+ data = of_get_property(np, "qcom,mdss-dsi-color-order", NULL);
+ if (data) {
+ if (!strcmp(data, "rgb_swap_rbg"))
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_RBG;
+ else if (!strcmp(data, "rgb_swap_bgr"))
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_BGR;
+ else if (!strcmp(data, "rgb_swap_brg"))
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_BRG;
+ else if (!strcmp(data, "rgb_swap_grb"))
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_GRB;
+ else if (!strcmp(data, "rgb_swap_gbr"))
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_GBR;
+ }
+ pinfo->mipi.data_lane0 = of_property_read_bool(np,
+ "qcom,mdss-dsi-lane-0-state");
+ pinfo->mipi.data_lane1 = of_property_read_bool(np,
+ "qcom,mdss-dsi-lane-1-state");
+ pinfo->mipi.data_lane2 = of_property_read_bool(np,
+ "qcom,mdss-dsi-lane-2-state");
+ pinfo->mipi.data_lane3 = of_property_read_bool(np,
+ "qcom,mdss-dsi-lane-3-state");
+
+ if (pinfo->mipi.data_lane0)
+ lanes++;
+ if (pinfo->mipi.data_lane1)
+ lanes++;
+ if (pinfo->mipi.data_lane2)
+ lanes++;
+ if (pinfo->mipi.data_lane3)
+ lanes++;
+ /*
+ * needed to set default lanes during
+ * resolution switch for bridge chips
+ */
+ pinfo->mipi.default_lanes = lanes;
+
+ rc = mdss_panel_parse_display_timings(np, &ctrl_pdata->panel_data);
+ if (rc)
+ return rc;
+ rc = mdss_dsi_parse_hdr_settings(np, pinfo);
+ if (rc)
+ return rc;
+
+ pinfo->mipi.rx_eot_ignore = of_property_read_bool(np,
+ "qcom,mdss-dsi-rx-eot-ignore");
+ pinfo->mipi.tx_eot_append = of_property_read_bool(np,
+ "qcom,mdss-dsi-tx-eot-append");
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-stream", &tmp);
+ pinfo->mipi.stream = (!rc ? tmp : 0);
+
+ data = of_get_property(np, "qcom,mdss-dsi-panel-mode-gpio-state", NULL);
+ if (data) {
+ if (!strcmp(data, "high"))
+ pinfo->mode_gpio_state = MODE_GPIO_HIGH;
+ else if (!strcmp(data, "low"))
+ pinfo->mode_gpio_state = MODE_GPIO_LOW;
+ } else {
+ pinfo->mode_gpio_state = MODE_GPIO_NOT_VALID;
+ }
+
+ rc = of_property_read_u32(np, "qcom,mdss-mdp-transfer-time-us", &tmp);
+ pinfo->mdp_transfer_time_us = (!rc ? tmp : DEFAULT_MDP_TRANSFER_TIME);
+
+ pinfo->mipi.lp11_init = of_property_read_bool(np,
+ "qcom,mdss-dsi-lp11-init");
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp);
+ pinfo->mipi.init_delay = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-post-init-delay", &tmp);
+ pinfo->mipi.post_init_delay = (!rc ? tmp : 0);
+
+ mdss_dsi_parse_trigger(np, &(pinfo->mipi.mdp_trigger),
+ "qcom,mdss-dsi-mdp-trigger");
+
+ mdss_dsi_parse_trigger(np, &(pinfo->mipi.dma_trigger),
+ "qcom,mdss-dsi-dma-trigger");
+
+ mdss_dsi_parse_reset_seq(np, pinfo->rst_seq, &(pinfo->rst_seq_len),
+ "qcom,mdss-dsi-reset-sequence");
+
+ mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->off_cmds,
+ "qcom,mdss-dsi-off-command", "qcom,mdss-dsi-off-command-state");
+
+ mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->idle_on_cmds,
+ "qcom,mdss-dsi-idle-on-command",
+ "qcom,mdss-dsi-idle-on-command-state");
+
+ mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->idle_off_cmds,
+ "qcom,mdss-dsi-idle-off-command",
+ "qcom,mdss-dsi-idle-off-command-state");
+
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-idle-fps", &tmp);
+ pinfo->mipi.frame_rate_idle = (!rc ? tmp : 60);
+
+ rc = of_property_read_u32(np, "qcom,adjust-timer-wakeup-ms", &tmp);
+ pinfo->adjust_timer_delay_ms = (!rc ? tmp : 0);
+
+ pinfo->mipi.force_clk_lane_hs = of_property_read_bool(np,
+ "qcom,mdss-dsi-force-clock-lane-hs");
+
+ rc = mdss_dsi_parse_panel_features(np, ctrl_pdata);
+ if (rc) {
+ pr_err("%s: failed to parse panel features\n", __func__);
+ goto error;
+ }
+
+ mdss_dsi_parse_panel_horizintal_line_idle(np, ctrl_pdata);
+
+ mdss_dsi_parse_dfps_config(np, ctrl_pdata);
+
+ rc = mdss_panel_parse_dt_hdmi(np, ctrl_pdata);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ return -EINVAL;
+}
+
+int mdss_dsi_panel_init(struct device_node *node,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ int ndx)
+{
+ int rc = 0;
+ static const char *panel_name;
+ struct mdss_panel_info *pinfo;
+
+ if (!node || !ctrl_pdata) {
+ pr_err("%s: Invalid arguments\n", __func__);
+ return -ENODEV;
+ }
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+
+ pr_debug("%s:%d\n", __func__, __LINE__);
+ pinfo->panel_name[0] = '\0';
+ panel_name = of_get_property(node, "qcom,mdss-dsi-panel-name", NULL);
+ if (!panel_name) {
+ pr_info("%s:%d, Panel name not specified\n",
+ __func__, __LINE__);
+ } else {
+ pr_info("%s: Panel Name = %s\n", __func__, panel_name);
+ strlcpy(&pinfo->panel_name[0], panel_name, MDSS_MAX_PANEL_LEN);
+ }
+ rc = mdss_panel_parse_dt(node, ctrl_pdata);
+ if (rc) {
+ pr_err("%s:%d panel dt parse failed\n", __func__, __LINE__);
+ return rc;
+ }
+
+ pinfo->dynamic_switch_pending = false;
+ pinfo->is_lpm_mode = false;
+ pinfo->esd_rdy = false;
+ pinfo->persist_mode = false;
+
+ ctrl_pdata->on = mdss_dsi_panel_on;
+ ctrl_pdata->post_panel_on = mdss_dsi_post_panel_on;
+ ctrl_pdata->off = mdss_dsi_panel_off;
+ ctrl_pdata->low_power_config = mdss_dsi_panel_low_power_config;
+ ctrl_pdata->panel_data.set_backlight = mdss_dsi_panel_bl_ctrl;
+ ctrl_pdata->panel_data.apply_display_setting =
+ mdss_dsi_panel_apply_display_setting;
+ ctrl_pdata->switch_mode = mdss_dsi_panel_switch_mode;
+ ctrl_pdata->panel_data.get_idle = mdss_dsi_panel_get_idle_mode;
+ return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy.c b/drivers/video/fbdev/msm/mdss_dsi_phy.c
new file mode 100644
index 0000000..456a8eb
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy.c
@@ -0,0 +1,904 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdss_dsi_phy.h"
+
+#define ESC_CLK_MHZ 192
+#define ESCCLK_MMSS_CC_PREDIV 10
+
+#define TLPX_NUMER 1000
+#define TR_EOT 20
+#define TA_GO 3
+#define TA_SURE 0
+#define TA_GET 4
+
+#define CLK_PREPARE_SPEC_MIN 38
+#define CLK_PREPARE_SPEC_MAX 95
+#define CLK_TRAIL_SPEC_MIN 60
+#define HS_EXIT_SPEC_MIN 100
+#define HS_EXIT_RECO_MAX 255
+#define HS_RQST_SPEC_MIN 50
+#define CLK_ZERO_RECO_MAX1 511
+#define CLK_ZERO_RECO_MAX2 255
+
+/* No. of timing params for phy rev 2.0 */
+#define TIMING_PARAM_DLANE_COUNT 32
+#define TIMING_PARAM_CLK_COUNT 8
+
+struct timing_entry {
+ int32_t mipi_min;
+ int32_t mipi_max;
+ int32_t rec_min;
+ int32_t rec_max;
+ int32_t rec;
+ char program_value;
+};
+
+struct dsi_phy_timing {
+ struct timing_entry clk_prepare;
+ struct timing_entry clk_zero;
+ struct timing_entry clk_trail;
+ struct timing_entry hs_prepare;
+ struct timing_entry hs_zero;
+ struct timing_entry hs_trail;
+ struct timing_entry hs_rqst;
+ struct timing_entry hs_rqst_clk;
+ struct timing_entry hs_exit;
+ struct timing_entry ta_go;
+ struct timing_entry ta_sure;
+ struct timing_entry ta_get;
+ struct timing_entry clk_post;
+ struct timing_entry clk_pre;
+};
+
+struct dsi_phy_t_clk_param {
+ uint32_t bitclk_mbps;
+ uint32_t escclk_numer;
+ uint32_t escclk_denom;
+ uint32_t tlpx_numer_ns;
+ uint32_t treot_ns;
+};
+
+static int mdss_dsi_phy_common_validate_and_set(struct timing_entry *te,
+ char const *te_name)
+{
+ if (te->rec & 0xffffff00) {
+ /* Output value can only be 8 bits */
+ pr_err("Incorrect %s calculations - %d\n", te_name, te->rec);
+ return -EINVAL;
+ }
+ pr_debug("%s program value=%d\n", te_name, te->rec);
+ te->program_value = te->rec;
+ return 0;
+}
+
+static int mdss_dsi_phy_validate_and_set(struct timing_entry *te,
+ char const *te_name)
+{
+ if (te->rec < 0)
+ te->program_value = 0;
+ else
+ return mdss_dsi_phy_common_validate_and_set(te, te_name);
+
+ return 0;
+}
+
+static int mdss_dsi_phy_initialize_defaults(struct dsi_phy_t_clk_param *t_clk,
+ struct dsi_phy_timing *t_param, u32 phy_rev)
+{
+
+ if (phy_rev <= DSI_PHY_REV_UNKNOWN || phy_rev >= DSI_PHY_REV_MAX) {
+ pr_err("Invalid PHY %d revision\n", phy_rev);
+ return -EINVAL;
+ }
+
+ t_param->clk_prepare.mipi_min = CLK_PREPARE_SPEC_MIN;
+ t_param->clk_prepare.mipi_max = CLK_PREPARE_SPEC_MAX;
+ t_param->clk_trail.mipi_min = CLK_TRAIL_SPEC_MIN;
+ t_param->hs_exit.mipi_min = HS_EXIT_SPEC_MIN;
+ t_param->hs_exit.rec_max = HS_EXIT_RECO_MAX;
+
+ if (phy_rev == DSI_PHY_REV_20) {
+ t_param->clk_prepare.rec_min =
+ DIV_ROUND_UP((t_param->clk_prepare.mipi_min
+ * t_clk->bitclk_mbps),
+ (8 * t_clk->tlpx_numer_ns));
+ t_param->clk_prepare.rec_max =
+ rounddown(mult_frac(t_param->clk_prepare.mipi_max
+ * t_clk->bitclk_mbps, 1,
+ (8 * t_clk->tlpx_numer_ns)), 1);
+ t_param->hs_rqst.mipi_min = HS_RQST_SPEC_MIN;
+ t_param->hs_rqst_clk.mipi_min = HS_RQST_SPEC_MIN;
+ } else if (phy_rev == DSI_PHY_REV_10) {
+ t_param->clk_prepare.rec_min =
+ (DIV_ROUND_UP(t_param->clk_prepare.mipi_min *
+ t_clk->bitclk_mbps,
+ t_clk->tlpx_numer_ns)) - 2;
+ t_param->clk_prepare.rec_max =
+ (DIV_ROUND_UP(t_param->clk_prepare.mipi_max *
+ t_clk->bitclk_mbps,
+ t_clk->tlpx_numer_ns)) - 2;
+ }
+
+ pr_debug("clk_prepare: min=%d, max=%d\n", t_param->clk_prepare.rec_min,
+ t_param->clk_prepare.rec_max);
+
+ return 0;
+}
+
+static int mdss_dsi_phy_calc_param_phy_rev_2(struct dsi_phy_t_clk_param *t_clk,
+ struct dsi_phy_timing *t_param)
+{
+ /* recommended fraction for PHY REV 2.0 */
+ u32 const min_prepare_frac = 50;
+ u32 const hs_exit_min_frac = 10;
+ u32 const phy_timing_frac = 30;
+ u32 const hs_zero_min_frac = 10;
+ u32 const clk_zero_min_frac = 2;
+ int tmp;
+ int t_hs_prep_actual;
+ int teot_clk_lane, teot_data_lane;
+ u64 dividend;
+ u64 temp, rc = 0;
+ u64 multiplier = BIT(20);
+ u64 temp_multiple;
+ s64 mipi_min, mipi_max, mipi_max_tr, rec_min, rec_prog;
+ s64 clk_prep_actual;
+ s64 actual_intermediate;
+ s32 actual_frac;
+ s64 rec_temp1, rec_temp2, rec_temp3;
+ int tclk_prepare_program, dsiphy_halfbyteclk_en, tclk_zero_program;
+ int ths_request_clk_prepare, hstx_prepare_delay, temp_rec_min;
+ s64 tclk_prepare_theoretical, tclk_zero_theoretical;
+ s64 ths_request_theoretical;
+
+ /* clk_prepare calculations */
+ dividend = ((t_param->clk_prepare.rec_max
+ - t_param->clk_prepare.rec_min)
+ * min_prepare_frac * multiplier);
+ temp = roundup(div_s64(dividend, 100), multiplier);
+ temp += (t_param->clk_prepare.rec_min * multiplier);
+ t_param->clk_prepare.rec = div_s64(temp, multiplier);
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->clk_prepare,
+ "clk prepare");
+ if (rc)
+ goto error;
+
+ /* clk_ prepare theoretical value*/
+ temp_multiple = (8 * t_param->clk_prepare.program_value
+ * t_clk->tlpx_numer_ns * multiplier);
+ actual_intermediate = div_s64(temp_multiple, t_clk->bitclk_mbps);
+ div_s64_rem(temp_multiple, t_clk->bitclk_mbps, &actual_frac);
+ clk_prep_actual =
+ div_s64((actual_intermediate + actual_frac), multiplier);
+
+ pr_debug("CLK PREPARE: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d",
+ t_param->clk_prepare.mipi_min,
+ t_param->clk_prepare.mipi_max,
+ t_param->clk_prepare.rec_min,
+ t_param->clk_prepare.rec_max);
+ pr_debug("prog value = %d, actual=%lld\n",
+ t_param->clk_prepare.rec, clk_prep_actual);
+
+ /* clk zero calculations */
+ /* Mipi spec min*/
+ mipi_min = (300 * multiplier) - (actual_intermediate + actual_frac);
+ t_param->clk_zero.mipi_min = div_s64(mipi_min, multiplier);
+
+ /* recommended min */
+ rec_temp1 = div_s64(mipi_min * t_clk->bitclk_mbps,
+ t_clk->tlpx_numer_ns);
+ rec_temp2 = rec_temp1 - (11 * multiplier);
+ rec_temp3 = roundup(div_s64(rec_temp2, 8), multiplier);
+ rec_min = div_s64(rec_temp3, multiplier) - 3;
+ t_param->clk_zero.rec_min = rec_min;
+
+ /* recommended max */
+ t_param->clk_zero.rec_max =
+ ((t_param->clk_zero.rec_min > 255) ? 511 : 255);
+
+ /* Programmed value */
+ t_param->clk_zero.rec = DIV_ROUND_UP(
+ (t_param->clk_zero.rec_max - t_param->clk_zero.rec_min)
+ * clk_zero_min_frac
+ + (t_param->clk_zero.rec_min * 100), 100);
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->clk_zero,
+ "clk zero");
+ if (rc)
+ goto error;
+
+ pr_debug("CLK ZERO: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+ t_param->clk_zero.mipi_min, t_param->clk_zero.mipi_max,
+ t_param->clk_zero.rec_min, t_param->clk_zero.rec_max,
+ t_param->clk_zero.rec);
+
+ /* clk trail calculations */
+ temp_multiple = div_s64(12 * multiplier * t_clk->tlpx_numer_ns,
+ t_clk->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &actual_frac);
+
+ mipi_max_tr = 105 * multiplier + (temp_multiple + actual_frac);
+ teot_clk_lane = div_s64(mipi_max_tr, multiplier);
+
+ mipi_max = mipi_max_tr - (t_clk->treot_ns * multiplier);
+
+ t_param->clk_trail.mipi_max = div_s64(mipi_max, multiplier);
+
+ /* recommended min*/
+ temp_multiple = div_s64(t_param->clk_trail.mipi_min * multiplier *
+ t_clk->bitclk_mbps, t_clk->tlpx_numer_ns);
+ div_s64_rem(temp_multiple, multiplier, &actual_frac);
+ rec_temp1 = temp_multiple + actual_frac + 3 * multiplier;
+ rec_temp2 = div_s64(rec_temp1, 8);
+ rec_temp3 = roundup(rec_temp2, multiplier);
+
+ t_param->clk_trail.rec_min = div_s64(rec_temp3, multiplier);
+
+ /* recommended max */
+ rec_temp1 = div_s64(mipi_max * t_clk->bitclk_mbps,
+ t_clk->tlpx_numer_ns);
+ rec_temp2 = rec_temp1 + 3 * multiplier;
+ rec_temp3 = rec_temp2 / 8;
+ t_param->clk_trail.rec_max = div_s64(rec_temp3, multiplier);
+
+ /* Programmed value */
+ t_param->clk_trail.rec = DIV_ROUND_UP(
+ (t_param->clk_trail.rec_max - t_param->clk_trail.rec_min)
+ * phy_timing_frac
+ + (t_param->clk_trail.rec_min * 100), 100);
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->clk_trail,
+ "clk trail");
+ if (rc)
+ goto error;
+
+ pr_debug("CLK TRAIL: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+ t_param->clk_trail.mipi_min,
+ t_param->clk_trail.mipi_max,
+ t_param->clk_trail.rec_min,
+ t_param->clk_trail.rec_max,
+ t_param->clk_trail.rec);
+
+ /* hs prepare calculations */
+ /* mipi min */
+ temp_multiple = div_s64(4 * t_clk->tlpx_numer_ns * multiplier,
+ t_clk->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &actual_frac);
+ mipi_min = 40 * multiplier + (temp_multiple + actual_frac);
+ t_param->hs_prepare.mipi_min = div_s64(mipi_min, multiplier);
+
+ /* mipi max */
+ temp_multiple = div_s64(6 * t_clk->tlpx_numer_ns * multiplier,
+ t_clk->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &actual_frac);
+ mipi_max = 85 * multiplier + temp_multiple;
+ t_param->hs_prepare.mipi_max = div_s64(mipi_max, multiplier);
+
+ /* recommended min */
+ temp_multiple = div_s64(mipi_min * t_clk->bitclk_mbps,
+ t_clk->tlpx_numer_ns);
+ div_s64_rem(temp_multiple, multiplier, &actual_frac);
+ rec_temp1 = roundup((temp_multiple + actual_frac)/8, multiplier);
+ t_param->hs_prepare.rec_min = div_s64(rec_temp1, multiplier);
+
+ /* recommended max*/
+ temp_multiple = div_s64(mipi_max * t_clk->bitclk_mbps,
+ t_clk->tlpx_numer_ns);
+ div_s64_rem(temp_multiple, multiplier, &actual_frac);
+ rec_temp2 = rounddown((temp_multiple + actual_frac)/8, multiplier);
+ t_param->hs_prepare.rec_max = div_s64(rec_temp2, multiplier);
+
+ /* prog value*/
+ dividend = (rec_temp2 - rec_temp1) * min_prepare_frac;
+ temp = roundup(div_u64(dividend, 100), multiplier);
+ rec_prog = temp + rec_temp1;
+ t_param->hs_prepare.rec = div_s64(rec_prog, multiplier);
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_prepare,
+ "HS prepare");
+ if (rc)
+ goto error;
+
+ /* theoretical Value */
+ temp_multiple = div_s64(8 * rec_prog * t_clk->tlpx_numer_ns,
+ t_clk->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &actual_frac);
+ t_hs_prep_actual = div_s64(temp_multiple, multiplier);
+ pr_debug("HS PREPARE: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d, actual=%d\n",
+ t_param->hs_prepare.mipi_min,
+ t_param->hs_prepare.mipi_max,
+ t_param->hs_prepare.rec_min,
+ t_param->hs_prepare.rec_max,
+ t_param->hs_prepare.rec, t_hs_prep_actual);
+
+ /* hs zero calculations */
+ /* mipi min*/
+ mipi_min = div_s64(10 * t_clk->tlpx_numer_ns * multiplier,
+ t_clk->bitclk_mbps);
+ rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple;
+ t_param->hs_zero.mipi_min = div_s64(rec_temp1, multiplier);
+
+ /* recommended min */
+ rec_temp1 = div_s64(rec_temp1 * t_clk->bitclk_mbps,
+ t_clk->tlpx_numer_ns);
+ rec_temp2 = rec_temp1 - (11 * multiplier);
+ rec_temp3 = roundup((rec_temp2/8), multiplier);
+ rec_min = rec_temp3 - (3 * multiplier);
+ t_param->hs_zero.rec_min = div_s64(rec_min, multiplier);
+
+ t_param->hs_zero.rec_max =
+ ((t_param->hs_zero.rec_min > 255) ? 511 : 255);
+
+ /* prog value */
+ t_param->hs_zero.rec = DIV_ROUND_UP(
+ (t_param->hs_zero.rec_max - t_param->hs_zero.rec_min)
+ * hs_zero_min_frac + (t_param->hs_zero.rec_min * 100),
+ 100);
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_zero, "HS zero");
+ if (rc)
+ goto error;
+
+ pr_debug("HS ZERO: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+ t_param->hs_zero.mipi_min, t_param->hs_zero.mipi_max,
+ t_param->hs_zero.rec_min, t_param->hs_zero.rec_max,
+ t_param->hs_zero.rec);
+
+ /* hs_trail calculations */
+ teot_data_lane = teot_clk_lane;
+ t_param->hs_trail.mipi_min = 60 +
+ mult_frac(t_clk->tlpx_numer_ns, 4, t_clk->bitclk_mbps);
+ t_param->hs_trail.mipi_max = teot_clk_lane - t_clk->treot_ns;
+ t_param->hs_trail.rec_min = DIV_ROUND_UP(
+ ((t_param->hs_trail.mipi_min * t_clk->bitclk_mbps)
+ + 3 * t_clk->tlpx_numer_ns), (8 * t_clk->tlpx_numer_ns));
+ tmp = ((t_param->hs_trail.mipi_max * t_clk->bitclk_mbps)
+ + (3 * t_clk->tlpx_numer_ns));
+ t_param->hs_trail.rec_max = tmp/(8 * t_clk->tlpx_numer_ns);
+ tmp = DIV_ROUND_UP((t_param->hs_trail.rec_max
+ - t_param->hs_trail.rec_min) * phy_timing_frac,
+ 100);
+ t_param->hs_trail.rec = tmp + t_param->hs_trail.rec_min;
+
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_trail,
+ "HS trail");
+ if (rc)
+ goto error;
+
+ pr_debug("HS TRAIL: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+ t_param->hs_trail.mipi_min, t_param->hs_trail.mipi_max,
+ t_param->hs_trail.rec_min, t_param->hs_trail.rec_max,
+ t_param->hs_trail.rec);
+
+ /* hs rqst calculations for Data lane */
+ t_param->hs_rqst.rec = DIV_ROUND_UP(
+ (t_param->hs_rqst.mipi_min * t_clk->bitclk_mbps)
+ - (8 * t_clk->tlpx_numer_ns), (8 * t_clk->tlpx_numer_ns));
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_rqst, "HS rqst");
+ if (rc)
+ goto error;
+
+ pr_debug("HS RQST-DATA: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+ t_param->hs_rqst.mipi_min, t_param->hs_rqst.mipi_max,
+ t_param->hs_rqst.rec_min, t_param->hs_rqst.rec_max,
+ t_param->hs_rqst.rec);
+
+ /* hs exit calculations */
+ t_param->hs_exit.rec_min = DIV_ROUND_UP(
+ (t_param->hs_exit.mipi_min * t_clk->bitclk_mbps),
+ (8 * t_clk->tlpx_numer_ns)) - 1;
+ t_param->hs_exit.rec = DIV_ROUND_UP(
+ (t_param->hs_exit.rec_max - t_param->hs_exit.rec_min)
+ * hs_exit_min_frac
+ + (t_param->hs_exit.rec_min * 100), 100);
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_exit, "HS exit");
+ if (rc)
+ goto error;
+
+ pr_debug("HS EXIT: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+ t_param->hs_exit.mipi_min, t_param->hs_exit.mipi_max,
+ t_param->hs_exit.rec_min, t_param->hs_exit.rec_max,
+ t_param->hs_exit.rec);
+
+ /* hs rqst calculations for Clock lane */
+ t_param->hs_rqst_clk.rec = DIV_ROUND_UP(
+ (t_param->hs_rqst_clk.mipi_min * t_clk->bitclk_mbps)
+ - (8 * t_clk->tlpx_numer_ns), (8 * t_clk->tlpx_numer_ns));
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_rqst_clk,
+ "HS rqst clk");
+ if (rc)
+ goto error;
+
+ pr_debug("HS RQST-CLK: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+ t_param->hs_rqst_clk.mipi_min,
+ t_param->hs_rqst_clk.mipi_max,
+ t_param->hs_rqst_clk.rec_min,
+ t_param->hs_rqst_clk.rec_max,
+ t_param->hs_rqst_clk.rec);
+
+ /* clk post and pre value calculation */
+ tmp = ((60 * (int)t_clk->bitclk_mbps) + (52 * 1000) - (43 * 1000));
+
+ /* clk_post minimum value can be a negetive number */
+ if (tmp % (8 * 1000) != 0) {
+ if (tmp < 0)
+ tmp = (tmp / (8 * 1000)) - 1;
+ else
+ tmp = (tmp / (8 * 1000)) + 1;
+ } else {
+ tmp = tmp / (8 * 1000);
+ }
+ tmp = tmp - 1;
+
+ t_param->clk_post.program_value =
+ DIV_ROUND_UP((63 - tmp) * hs_exit_min_frac, 100);
+ t_param->clk_post.program_value += tmp;
+
+ if (t_param->clk_post.program_value & 0xffffff00) {
+ pr_err("Invalid clk post calculations - %d\n",
+ t_param->clk_post.program_value);
+ goto error;
+ }
+
+ t_param->clk_post.rec_min = tmp;
+
+ if (t_param->hs_rqst_clk.rec < 0)
+ ths_request_clk_prepare = 0;
+ else
+ ths_request_clk_prepare = t_param->hs_rqst_clk.program_value;
+
+ ths_request_theoretical = (ths_request_clk_prepare + 1);
+
+ tclk_prepare_program = t_param->clk_prepare.program_value;
+
+ dsiphy_halfbyteclk_en = 0;
+
+ if (t_clk->bitclk_mbps > 100)
+ hstx_prepare_delay = 0;
+ else
+ hstx_prepare_delay = 3;
+
+ tclk_prepare_theoretical = ((tclk_prepare_program * 8)
+ + (dsiphy_halfbyteclk_en * 4)
+ + (hstx_prepare_delay * 2));
+
+ tclk_zero_program = t_param->clk_zero.program_value;
+
+ tclk_zero_theoretical = ((tclk_zero_program + 3) * 8) + 11
+ - (hstx_prepare_delay * 2);
+
+ temp_rec_min = (8 * 1000) + (tclk_prepare_theoretical * 1000)
+ + (tclk_zero_theoretical * 1000)
+ + (ths_request_theoretical * 8 * 1000);
+
+ t_param->clk_pre.rec_min = DIV_ROUND_UP(temp_rec_min, 8 * 1000) - 1;
+
+ if (t_param->clk_pre.rec_min > 63) {
+ t_param->clk_pre.program_value =
+ DIV_ROUND_UP((2 * 63 - t_param->clk_pre.rec_min)
+ * hs_exit_min_frac, 100);
+ t_param->clk_pre.program_value += t_param->clk_pre.rec_min;
+ } else {
+ t_param->clk_pre.program_value =
+ DIV_ROUND_UP((63 - t_param->clk_pre.rec_min)
+ * hs_exit_min_frac, 100);
+ t_param->clk_pre.program_value += t_param->clk_pre.rec_min;
+ }
+
+ if (t_param->clk_pre.program_value & 0xffffff00) {
+ pr_err("Invalid clk pre calculations - %d\n",
+ t_param->clk_pre.program_value);
+ goto error;
+ }
+ pr_debug("t_clk_post: %d t_clk_pre: %d\n",
+ t_param->clk_post.program_value,
+ t_param->clk_pre.program_value);
+
+ pr_debug("teot_clk=%d, data=%d\n", teot_clk_lane, teot_data_lane);
+ return 0;
+
+error:
+ return -EINVAL;
+}
+
+static int mdss_dsi_phy_calc_hs_param_phy_rev_1(
+ struct dsi_phy_t_clk_param *t_clk,
+ struct dsi_phy_timing *t_param)
+{
+ int percent_min = 10;
+ int percent_allowable_phy = 0;
+ int percent_min_ths;
+ int tmp, rc = 0;
+ int tclk_prepare_theoretical, tclk_zero_theoretical;
+ int tlpx, ths_exit_theoretical;
+
+ if (t_clk->bitclk_mbps > 1200)
+ percent_min_ths = 15;
+ else
+ percent_min_ths = 10;
+
+ if (t_clk->bitclk_mbps > 180)
+ percent_allowable_phy = 10;
+ else
+ percent_allowable_phy = 40;
+
+ t_param->hs_prepare.rec_min =
+ DIV_ROUND_UP((40 * t_clk->bitclk_mbps)
+ + (4 * t_clk->tlpx_numer_ns), t_clk->tlpx_numer_ns) - 2;
+ t_param->hs_prepare.rec_max =
+ DIV_ROUND_UP((85 * t_clk->bitclk_mbps)
+ + (6 * t_clk->tlpx_numer_ns), t_clk->tlpx_numer_ns) - 2;
+ tmp = DIV_ROUND_UP((t_param->hs_prepare.rec_max
+ - t_param->hs_prepare.rec_min) * percent_min_ths, 100);
+ tmp += t_param->hs_prepare.rec_min;
+ t_param->hs_prepare.rec = (tmp & ~0x1);
+
+ rc = mdss_dsi_phy_validate_and_set(&t_param->hs_prepare, "HS prepare");
+ if (rc)
+ goto error;
+
+ tmp = (t_param->hs_prepare.program_value / 2) + 1;
+ t_param->hs_zero.rec_min = DIV_ROUND_UP((145 * t_clk->bitclk_mbps)
+ + ((10 - (2 * (tmp + 1))) * 1000), 1000) - 2;
+ t_param->hs_zero.rec_max = 255;
+ tmp = DIV_ROUND_UP((t_param->hs_zero.rec_max
+ - t_param->hs_zero.rec_min) * percent_min, 100);
+ tmp += t_param->hs_zero.rec_min;
+ t_param->hs_zero.rec = (tmp & ~0x1);
+
+ rc = mdss_dsi_phy_validate_and_set(&t_param->hs_zero, "HS zero");
+ if (rc)
+ goto error;
+
+ t_param->hs_trail.rec_min = DIV_ROUND_UP((60 * t_clk->bitclk_mbps)
+ + 4000, 1000) - 2;
+ t_param->hs_trail.rec_max = DIV_ROUND_UP((105 - t_clk->treot_ns)
+ * t_clk->bitclk_mbps + 12000, 1000) - 2;
+ tmp = DIV_ROUND_UP((t_param->hs_trail.rec_max
+ - t_param->hs_trail.rec_min) * percent_allowable_phy, 100);
+ tmp += t_param->hs_trail.rec_min;
+ t_param->hs_trail.rec = tmp & ~0x1;
+
+ rc = mdss_dsi_phy_validate_and_set(&t_param->hs_trail, "HS trail");
+ if (rc)
+ goto error;
+
+ t_param->hs_exit.rec_min = DIV_ROUND_UP(100 * t_clk->bitclk_mbps,
+ t_clk->tlpx_numer_ns) - 2;
+ t_param->hs_exit.rec_max = 255;
+ tmp = DIV_ROUND_UP((t_param->hs_exit.rec_max
+ - t_param->hs_exit.rec_min) * percent_min, 100);
+ tmp += t_param->hs_exit.rec_min;
+ t_param->hs_exit.rec = (tmp & ~0x1);
+
+ rc = mdss_dsi_phy_validate_and_set(&t_param->hs_exit, "HS exit");
+ if (rc)
+ goto error;
+
+ /* clk post and pre value calculation */
+ ths_exit_theoretical = (t_param->hs_exit.program_value / 2) + 1;
+ tmp = ((60 * (int)t_clk->bitclk_mbps) + (52 * 1000)
+ - (24 * 1000) - (ths_exit_theoretical * 2 * 1000));
+ /* clk_post minimum value can be a negetive number */
+ if (tmp % (8 * 1000) != 0) {
+ if (tmp < 0)
+ tmp = (tmp / (8 * 1000)) - 1;
+ else
+ tmp = (tmp / (8 * 1000)) + 1;
+ } else {
+ tmp = tmp / (8 * 1000);
+ }
+ tmp = tmp - 1;
+
+ t_param->clk_post.program_value =
+ DIV_ROUND_UP((63 - tmp) * percent_min, 100);
+ t_param->clk_post.program_value += tmp;
+
+ if (t_param->clk_post.program_value & 0xffffff00) {
+ pr_err("Invalid clk post calculations - %d\n",
+ t_param->clk_post.program_value);
+ goto error;
+ }
+
+ t_param->clk_post.rec_min = tmp;
+
+ tclk_prepare_theoretical = (t_param->clk_prepare.program_value / 2) + 1;
+ tclk_zero_theoretical = (t_param->clk_zero.program_value / 2) + 1;
+ tlpx = 10000/t_clk->escclk_numer;
+
+ t_param->clk_pre.rec_min =
+ DIV_ROUND_UP((tlpx * t_clk->bitclk_mbps) + (8 * 1000)
+ + (tclk_prepare_theoretical * 2 * 1000)
+ + (tclk_zero_theoretical * 2 * 1000), 8 * 1000) - 1;
+ if (t_param->clk_pre.rec_min > 63) {
+ t_param->clk_pre.program_value =
+ DIV_ROUND_UP((2 * 63 - t_param->clk_pre.rec_min)
+ * percent_min, 100);
+ t_param->clk_pre.program_value += t_param->clk_pre.rec_min;
+ } else {
+ t_param->clk_pre.program_value =
+ DIV_ROUND_UP((63 - t_param->clk_pre.rec_min)
+ * percent_min, 100);
+ t_param->clk_pre.program_value += t_param->clk_pre.rec_min;
+ }
+
+ if (t_param->clk_pre.program_value & 0xffffff00) {
+ pr_err("Invalid clk pre calculations - %d\n",
+ t_param->clk_pre.program_value);
+ goto error;
+ }
+ pr_debug("t_clk_post: %d t_clk_pre: %d\n",
+ t_param->clk_post.program_value,
+ t_param->clk_pre.program_value);
+
+ return 0;
+
+error:
+ return -EINVAL;
+
+}
+
+static int mdss_dsi_phy_calc_param_phy_rev_1(struct dsi_phy_t_clk_param *t_clk,
+ struct dsi_phy_timing *t_param)
+{
+ int percent_allowable_phy = 0;
+ int percent_min_t_clk = 10;
+ int tmp, rc = 0;
+ int clk_prep_actual;
+ int teot_clk_lane;
+ u32 temp = 0;
+
+ if (t_clk->bitclk_mbps > 180)
+ percent_allowable_phy = 10;
+ else
+ percent_allowable_phy = 40;
+
+ tmp = DIV_ROUND_UP((t_param->clk_prepare.rec_max -
+ t_param->clk_prepare.rec_min) * percent_min_t_clk, 100);
+ tmp += t_param->clk_prepare.rec_min;
+
+ t_param->clk_prepare.rec = (tmp & ~0x1);
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->clk_prepare,
+ "clk prepare");
+ if (rc)
+ goto error;
+
+ clk_prep_actual = 2 * ((t_param->clk_prepare.program_value
+ / 2) + 1) * t_clk->tlpx_numer_ns;
+ clk_prep_actual /= t_clk->bitclk_mbps;
+
+ tmp = t_clk->bitclk_mbps * t_clk->escclk_denom
+ / t_clk->escclk_numer;
+ t_param->hs_rqst.rec = tmp;
+ if (!(tmp & 0x1))
+ t_param->hs_rqst.rec -= 2;
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_rqst, "HS rqst");
+ if (rc)
+ goto error;
+
+ if (t_param->hs_rqst.program_value < 0)
+ t_param->hs_rqst.program_value = 0;
+
+ /* t_clk_zero calculation */
+ t_param->clk_zero.mipi_min = (300 - clk_prep_actual);
+ t_param->clk_zero.rec_min = (DIV_ROUND_UP(t_param->clk_zero.mipi_min
+ * t_clk->bitclk_mbps, t_clk->tlpx_numer_ns)) - 2;
+
+ if (t_param->clk_zero.rec_min > 255) {
+ t_param->clk_zero.rec_max = CLK_ZERO_RECO_MAX1;
+ t_param->clk_zero.rec =
+ DIV_ROUND_UP(t_param->clk_zero.rec_min * 10
+ + (t_param->clk_zero.rec_min * 100), 100);
+ } else {
+ t_param->clk_zero.rec_max = CLK_ZERO_RECO_MAX2;
+ temp = t_param->clk_zero.rec_max - t_param->clk_zero.rec_min;
+ t_param->clk_zero.rec = DIV_ROUND_UP(temp * 10
+ + (t_param->clk_zero.rec_min * 100), 100);
+ }
+
+ t_param->clk_zero.rec &= ~0x1;
+
+ if (((t_param->hs_rqst.rec + t_param->clk_zero.rec +
+ t_param->clk_prepare.rec) % 8) != 0)
+ t_param->clk_zero.rec +=
+ (8 - ((t_param->hs_rqst.rec + t_param->clk_zero.rec +
+ t_param->clk_prepare.rec) % 8));
+
+ rc = mdss_dsi_phy_common_validate_and_set(&t_param->clk_zero,
+ "clk zero");
+ if (rc)
+ goto error;
+
+ pr_debug("hs_rqst.rec: %d clk_zero.rec: %d clk_prepare.rec: %d\n",
+ t_param->hs_rqst.rec, t_param->clk_zero.rec,
+ t_param->clk_prepare.rec);
+ teot_clk_lane = 105 + (12 * t_clk->tlpx_numer_ns
+ / t_clk->bitclk_mbps);
+ t_param->clk_trail.mipi_max = teot_clk_lane - t_clk->treot_ns;
+ t_param->clk_trail.rec_min = DIV_ROUND_UP(t_param->clk_trail.mipi_min *
+ t_clk->bitclk_mbps, t_clk->tlpx_numer_ns) - 2;
+ t_param->clk_trail.rec_max = DIV_ROUND_UP(t_param->clk_trail.mipi_max *
+ t_clk->bitclk_mbps, t_clk->tlpx_numer_ns) - 2;
+
+ tmp = DIV_ROUND_UP((t_param->clk_trail.rec_max -
+ t_param->clk_trail.rec_min) * percent_allowable_phy, 100);
+ tmp += t_param->clk_trail.rec_min;
+ t_param->clk_trail.rec = (tmp & ~0x1);
+
+ rc = mdss_dsi_phy_validate_and_set(&t_param->clk_trail, "clk trail");
+ if (rc)
+ goto error;
+
+ rc = mdss_dsi_phy_calc_hs_param_phy_rev_1(t_clk, t_param);
+ if (rc)
+ pr_err("Invalid HS param calculations\n");
+
+error:
+ return rc;
+}
+
+static void mdss_dsi_phy_update_timing_param(struct mdss_panel_info *pinfo,
+ struct dsi_phy_timing *t_param)
+{
+ struct mdss_dsi_phy_ctrl *reg;
+
+ reg = &(pinfo->mipi.dsi_phy_db);
+
+ pinfo->mipi.t_clk_post = t_param->clk_post.program_value;
+ pinfo->mipi.t_clk_pre = t_param->clk_pre.program_value;
+
+ if (t_param->clk_zero.rec > 255) {
+ reg->timing[0] = t_param->clk_zero.program_value - 255;
+ reg->timing[3] = 1;
+ } else {
+ reg->timing[0] = t_param->clk_zero.program_value;
+ reg->timing[3] = 0;
+ }
+ reg->timing[1] = t_param->clk_trail.program_value;
+ reg->timing[2] = t_param->clk_prepare.program_value;
+ reg->timing[4] = t_param->hs_exit.program_value;
+ reg->timing[5] = t_param->hs_zero.program_value;
+ reg->timing[6] = t_param->hs_prepare.program_value;
+ reg->timing[7] = t_param->hs_trail.program_value;
+ reg->timing[8] = t_param->hs_rqst.program_value;
+ reg->timing[9] = (TA_SURE << 16) + TA_GO;
+ reg->timing[10] = TA_GET;
+ reg->timing[11] = 0;
+
+ pr_debug("[%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x]\n",
+ reg->timing[0], reg->timing[1], reg->timing[2], reg->timing[3],
+ reg->timing[4], reg->timing[5], reg->timing[6], reg->timing[7],
+ reg->timing[8], reg->timing[9], reg->timing[10],
+ reg->timing[11]);
+}
+
+static void mdss_dsi_phy_update_timing_param_rev_2(
+ struct mdss_panel_info *pinfo,
+ struct dsi_phy_timing *t_param)
+{
+ struct mdss_dsi_phy_ctrl *reg;
+ int i = 0;
+
+ reg = &(pinfo->mipi.dsi_phy_db);
+
+ pinfo->mipi.t_clk_post = t_param->clk_post.program_value;
+ pinfo->mipi.t_clk_pre = t_param->clk_pre.program_value;
+
+ for (i = 0; i < TIMING_PARAM_DLANE_COUNT; i += 8) {
+ reg->timing_8996[i] = t_param->hs_exit.program_value;
+ reg->timing_8996[i + 1] = t_param->hs_zero.program_value;
+ reg->timing_8996[i + 2] = t_param->hs_prepare.program_value;
+ reg->timing_8996[i + 3] = t_param->hs_trail.program_value;
+ reg->timing_8996[i + 4] = t_param->hs_rqst.program_value;
+ reg->timing_8996[i + 5] = 0x3;
+ reg->timing_8996[i + 6] = 0x4;
+ reg->timing_8996[i + 7] = 0xA0;
+ }
+
+ for (i = TIMING_PARAM_DLANE_COUNT;
+ i < TIMING_PARAM_DLANE_COUNT + TIMING_PARAM_CLK_COUNT;
+ i += 8) {
+ reg->timing_8996[i] = t_param->hs_exit.program_value;
+ reg->timing_8996[i + 1] = t_param->clk_zero.program_value;
+ reg->timing_8996[i + 2] = t_param->clk_prepare.program_value;
+ reg->timing_8996[i + 3] = t_param->clk_trail.program_value;
+ reg->timing_8996[i + 4] = t_param->hs_rqst_clk.program_value;
+ reg->timing_8996[i + 5] = 0x3;
+ reg->timing_8996[i + 6] = 0x4;
+ reg->timing_8996[i + 7] = 0xA0;
+ }
+}
+
+int mdss_dsi_phy_calc_timing_param(struct mdss_panel_info *pinfo, u32 phy_rev,
+ u32 frate_hz)
+{
+ struct dsi_phy_t_clk_param t_clk;
+ struct dsi_phy_timing t_param;
+ int hsync_period;
+ int vsync_period;
+ unsigned long inter_num;
+ uint32_t lane_config = 0;
+ unsigned long x, y;
+ int rc = 0;
+
+ if (!pinfo) {
+ pr_err("invalid panel info\n");
+ return -EINVAL;
+ }
+
+ hsync_period = mdss_panel_get_htotal(pinfo, true);
+ vsync_period = mdss_panel_get_vtotal(pinfo);
+
+ inter_num = pinfo->bpp * frate_hz;
+
+ if (pinfo->mipi.data_lane0)
+ lane_config++;
+ if (pinfo->mipi.data_lane1)
+ lane_config++;
+ if (pinfo->mipi.data_lane2)
+ lane_config++;
+ if (pinfo->mipi.data_lane3)
+ lane_config++;
+
+ x = mult_frac(vsync_period * hsync_period, inter_num, lane_config);
+ y = rounddown(x, 1);
+ t_clk.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
+ t_clk.escclk_numer = ESC_CLK_MHZ;
+ t_clk.escclk_denom = ESCCLK_MMSS_CC_PREDIV;
+ t_clk.tlpx_numer_ns = TLPX_NUMER;
+ t_clk.treot_ns = TR_EOT;
+ pr_debug("hperiod=%d, vperiod=%d, inter_num=%lu, lane_cfg=%d\n",
+ hsync_period, vsync_period, inter_num, lane_config);
+ pr_debug("x=%lu, y=%lu, bitrate=%d\n", x, y, t_clk.bitclk_mbps);
+
+ switch (phy_rev) {
+ case DSI_PHY_REV_10:
+ rc = mdss_dsi_phy_initialize_defaults(&t_clk, &t_param,
+ phy_rev);
+ if (rc) {
+ pr_err("phy%d initialization failed\n", phy_rev);
+ goto timing_calc_end;
+ }
+ mdss_dsi_phy_calc_param_phy_rev_1(&t_clk, &t_param);
+ mdss_dsi_phy_update_timing_param(pinfo, &t_param);
+ break;
+ case DSI_PHY_REV_20:
+ rc = mdss_dsi_phy_initialize_defaults(&t_clk, &t_param,
+ phy_rev);
+ if (rc) {
+ pr_err("phy%d initialization failed\n", phy_rev);
+ goto timing_calc_end;
+ }
+
+ rc = mdss_dsi_phy_calc_param_phy_rev_2(&t_clk, &t_param);
+ if (rc) {
+ pr_err("Phy timing calculations failed\n");
+ goto timing_calc_end;
+ }
+ mdss_dsi_phy_update_timing_param_rev_2(pinfo, &t_param);
+ break;
+ default:
+ pr_err("phy rev %d not supported\n", phy_rev);
+ return -EINVAL;
+ }
+
+timing_calc_end:
+ return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy.h b/drivers/video/fbdev/msm/mdss_dsi_phy.h
new file mode 100644
index 0000000..aea42e8
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MDSS_DSI_PHY_H
+#define MDSS_DSI_PHY_H
+
+#include <linux/types.h>
+
+#include "mdss_panel.h"
+
+enum phy_rev {
+ DSI_PHY_REV_UNKNOWN = 0x00,
+ DSI_PHY_REV_10 = 0x01, /* REV 1.0 - 20nm, 28nm */
+ DSI_PHY_REV_20 = 0x02, /* REV 2.0 - 14nm */
+ DSI_PHY_REV_MAX,
+};
+
+/*
+ * mdss_dsi_phy_calc_timing_param() - calculates clock timing and hs timing
+ * parameters for the given phy revision.
+ *
+ * @pinfo - structure containing panel specific information which will be
+ * used in calculating the phy timing parameters.
+ * @phy_rev - phy revision for which phy timings need to be calculated.
+ * @frate_hz - Frame rate for which phy timing parameters are to be calculated.
+ */
+int mdss_dsi_phy_calc_timing_param(struct mdss_panel_info *pinfo, u32 phy_rev,
+ u32 frate_hz);
+
+#endif /* MDSS_DSI_PHY_H */
diff --git a/drivers/video/fbdev/msm/mdss_dsi_status.c b/drivers/video/fbdev/msm/mdss_dsi_status.c
new file mode 100644
index 0000000..992d687
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_status.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fb.h>
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/iopoll.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/interrupt.h>
+
+#include "mdss_fb.h"
+#include "mdss_dsi.h"
+#include "mdss_panel.h"
+#include "mdss_mdp.h"
+
+#define STATUS_CHECK_INTERVAL_MS 5000
+#define STATUS_CHECK_INTERVAL_MIN_MS 50
+#define DSI_STATUS_CHECK_INIT -1
+#define DSI_STATUS_CHECK_DISABLE 1
+
+static uint32_t interval = STATUS_CHECK_INTERVAL_MS;
+static int32_t dsi_status_disable = DSI_STATUS_CHECK_INIT;
+struct dsi_status_data *pstatus_data;
+
+/*
+ * check_dsi_ctrl_status() - Reads MFD structure and
+ * calls platform specific DSI ctrl Status function.
+ * @work : dsi controller status data
+ */
+static void check_dsi_ctrl_status(struct work_struct *work)
+{
+ struct dsi_status_data *pdsi_status = NULL;
+
+ pdsi_status = container_of(to_delayed_work(work),
+ struct dsi_status_data, check_status);
+
+ if (!pdsi_status) {
+ pr_err("%s: DSI status data not available\n", __func__);
+ return;
+ }
+
+ if (!pdsi_status->mfd) {
+ pr_err("%s: FB data not available\n", __func__);
+ return;
+ }
+
+ if (mdss_panel_is_power_off(pdsi_status->mfd->panel_power_state) ||
+ pdsi_status->mfd->shutdown_pending) {
+ pr_debug("%s: panel off\n", __func__);
+ return;
+ }
+
+ pdsi_status->mfd->mdp.check_dsi_status(work, interval);
+}
+
+/*
+ * hw_vsync_handler() - Interrupt handler for HW VSYNC signal.
+ * @irq : irq line number
+ * @data : Pointer to the device structure.
+ *
+ * This function is called whenever a HW vsync signal is received from the
+ * panel. This resets the timer of ESD delayed workqueue back to initial
+ * value.
+ */
+irqreturn_t hw_vsync_handler(int irq, void *data)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata =
+ (struct mdss_dsi_ctrl_pdata *)data;
+ if (!ctrl_pdata) {
+ pr_err("%s: DSI ctrl not available\n", __func__);
+ return IRQ_HANDLED;
+ }
+
+ if (pstatus_data)
+ mod_delayed_work(system_wq, &pstatus_data->check_status,
+ msecs_to_jiffies(interval));
+ else
+ pr_err("Pstatus data is NULL\n");
+
+ if (!atomic_read(&ctrl_pdata->te_irq_ready)) {
+ complete_all(&ctrl_pdata->te_irq_comp);
+ atomic_inc(&ctrl_pdata->te_irq_ready);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * disable_esd_thread() - Cancels work item for the esd check.
+ */
+void disable_esd_thread(void)
+{
+ if (pstatus_data &&
+ cancel_delayed_work_sync(&pstatus_data->check_status))
+ pr_debug("esd thread killed\n");
+}
+
+/*
+ * fb_event_callback() - Call back function for the fb_register_client()
+ * notifying events
+ * @self : notifier block
+ * @event : The event that was triggered
+ * @data : Of type struct fb_event
+ *
+ * This function listens for FB_BLANK_UNBLANK and FB_BLANK_POWERDOWN events
+ * from frame buffer. DSI status check work is either scheduled again after
+ * PANEL_STATUS_CHECK_INTERVAL or cancelled based on the event.
+ */
+static int fb_event_callback(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ struct fb_event *evdata = data;
+ struct dsi_status_data *pdata = container_of(self,
+ struct dsi_status_data, fb_notifier);
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo;
+ struct msm_fb_data_type *mfd;
+
+ if (!evdata) {
+ pr_err("%s: event data not available\n", __func__);
+ return NOTIFY_BAD;
+ }
+
+ /* handle only mdss fb device */
+ if (strcmp("mdssfb", evdata->info->fix.id))
+ return NOTIFY_DONE;
+
+ mfd = evdata->info->par;
+ ctrl_pdata = container_of(dev_get_platdata(&mfd->pdev->dev),
+ struct mdss_dsi_ctrl_pdata, panel_data);
+ if (!ctrl_pdata) {
+ pr_err("%s: DSI ctrl not available\n", __func__);
+ return NOTIFY_BAD;
+ }
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+
+ if ((!(pinfo->esd_check_enabled) &&
+ dsi_status_disable) ||
+ (dsi_status_disable == DSI_STATUS_CHECK_DISABLE)) {
+ pr_debug("ESD check is disabled.\n");
+ cancel_delayed_work(&pdata->check_status);
+ return NOTIFY_DONE;
+ }
+
+ pdata->mfd = evdata->info->par;
+ if (event == FB_EVENT_BLANK) {
+ int *blank = evdata->data;
+ struct dsi_status_data *pdata = container_of(self,
+ struct dsi_status_data, fb_notifier);
+ pdata->mfd = evdata->info->par;
+
+ switch (*blank) {
+ case FB_BLANK_UNBLANK:
+ schedule_delayed_work(&pdata->check_status,
+ msecs_to_jiffies(interval));
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_NORMAL:
+ pr_debug("%s : ESD thread running\n", __func__);
+ break;
+ case FB_BLANK_POWERDOWN:
+ case FB_BLANK_HSYNC_SUSPEND:
+ cancel_delayed_work(&pdata->check_status);
+ break;
+ default:
+ pr_err("Unknown case in FB_EVENT_BLANK event\n");
+ break;
+ }
+ }
+ return 0;
+}
+
+static int param_dsi_status_disable(const char *val, struct kernel_param *kp)
+{
+ int ret = 0;
+ int int_val;
+
+ ret = kstrtos32(val, 0, &int_val);
+ if (ret)
+ return ret;
+
+ pr_info("%s: Set DSI status disable to %d\n",
+ __func__, int_val);
+ *((int *)kp->arg) = int_val;
+ return ret;
+}
+
+static int param_set_interval(const char *val, struct kernel_param *kp)
+{
+ int ret = 0;
+ int int_val;
+
+ ret = kstrtos32(val, 0, &int_val);
+ if (ret)
+ return ret;
+ if (int_val < STATUS_CHECK_INTERVAL_MIN_MS) {
+ pr_err("%s: Invalid value %d used, ignoring\n",
+ __func__, int_val);
+ ret = -EINVAL;
+ } else {
+ pr_info("%s: Set check interval to %d msecs\n",
+ __func__, int_val);
+ *((int *)kp->arg) = int_val;
+ }
+ return ret;
+}
+
+int __init mdss_dsi_status_init(void)
+{
+ int rc = 0;
+
+ pstatus_data = kzalloc(sizeof(struct dsi_status_data), GFP_KERNEL);
+ if (!pstatus_data)
+ return -ENOMEM;
+
+ pstatus_data->fb_notifier.notifier_call = fb_event_callback;
+
+ rc = fb_register_client(&pstatus_data->fb_notifier);
+ if (rc < 0) {
+ pr_err("%s: fb_register_client failed, returned with rc=%d\n",
+ __func__, rc);
+ kfree(pstatus_data);
+ return -EPERM;
+ }
+
+ pr_info("%s: DSI status check interval:%d\n", __func__, interval);
+
+ INIT_DELAYED_WORK(&pstatus_data->check_status, check_dsi_ctrl_status);
+
+ pr_debug("%s: DSI ctrl status work queue initialized\n", __func__);
+
+ return rc;
+}
+
+void __exit mdss_dsi_status_exit(void)
+{
+ fb_unregister_client(&pstatus_data->fb_notifier);
+ cancel_delayed_work_sync(&pstatus_data->check_status);
+ kfree(pstatus_data);
+ pr_debug("%s: DSI ctrl status work queue removed\n", __func__);
+}
+
+module_param_call(interval, param_set_interval, param_get_uint,
+ &interval, 0644);
+MODULE_PARM_DESC(interval,
+ "Duration in milliseconds to send BTA command for DSI status check");
+
+module_param_call(dsi_status_disable, param_dsi_status_disable, param_get_uint,
+ &dsi_status_disable, 0644);
+MODULE_PARM_DESC(dsi_status_disable,
+ "Disable DSI status check");
+
+module_init(mdss_dsi_status_init);
+module_exit(mdss_dsi_status_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/msm/mdss_edp.c b/drivers/video/fbdev/msm/mdss_edp.c
new file mode 100644
index 0000000..79eae8b
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_edp.c
@@ -0,0 +1,1268 @@
+/* Copyright (c) 2012-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/qpnp/pwm.h>
+#include <linux/clk.h>
+#include <linux/spinlock_types.h>
+#include <linux/kthread.h>
+
+#include "mdss.h"
+#include "mdss_edp.h"
+
+#define RGB_COMPONENTS 3
+#define VDDA_MIN_UV 1800000 /* uV units */
+#define VDDA_MAX_UV 1800000 /* uV units */
+#define VDDA_UA_ON_LOAD 100000 /* uA units */
+#define VDDA_UA_OFF_LOAD 100 /* uA units */
+
+static int mdss_edp_regulator_on(struct mdss_edp_drv_pdata *edp_drv);
+/*
+ * Init regulator needed for edp, 8974_l12
+ */
+static int mdss_edp_regulator_init(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret;
+
+ edp_drv->vdda_vreg = devm_regulator_get(&(edp_drv->pdev->dev), "vdda");
+ if (IS_ERR(edp_drv->vdda_vreg)) {
+ pr_err("%s: Could not get 8941_l12, ret = %ld\n", __func__,
+ PTR_ERR(edp_drv->vdda_vreg));
+ return -ENODEV;
+ }
+
+ ret = regulator_set_voltage(edp_drv->vdda_vreg,
+ VDDA_MIN_UV, VDDA_MAX_UV);
+ if (ret) {
+ pr_err("%s: vdda_vreg set_voltage failed, ret=%d\n", __func__,
+ ret);
+ return -EINVAL;
+ }
+
+ ret = mdss_edp_regulator_on(edp_drv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Set uA and enable vdda
+ */
+static int mdss_edp_regulator_on(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret;
+
+ ret = regulator_set_optimum_mode(edp_drv->vdda_vreg, VDDA_UA_ON_LOAD);
+ if (ret < 0) {
+ pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
+ return ret;
+ }
+
+ ret = regulator_enable(edp_drv->vdda_vreg);
+ if (ret) {
+ pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Disable vdda and set uA
+ */
+static int mdss_edp_regulator_off(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret;
+
+ ret = regulator_disable(edp_drv->vdda_vreg);
+ if (ret) {
+ pr_err("%s: Failed to disable vdda_vreg regulator.\n",
+ __func__);
+ return ret;
+ }
+
+ ret = regulator_set_optimum_mode(edp_drv->vdda_vreg, VDDA_UA_OFF_LOAD);
+ if (ret < 0) {
+ pr_err("%s: vdda_vreg set regulator mode failed.\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Enables the gpio that supply power to the panel and enable the backlight
+ */
+static int mdss_edp_gpio_panel_en(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret = 0;
+
+ edp_drv->gpio_panel_en = of_get_named_gpio(edp_drv->pdev->dev.of_node,
+ "gpio-panel-en", 0);
+ if (!gpio_is_valid(edp_drv->gpio_panel_en)) {
+ pr_err("%s: gpio_panel_en=%d not specified\n", __func__,
+ edp_drv->gpio_panel_en);
+ goto gpio_err;
+ }
+
+ ret = gpio_request(edp_drv->gpio_panel_en, "disp_enable");
+ if (ret) {
+ pr_err("%s: Request reset gpio_panel_en failed, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = gpio_direction_output(edp_drv->gpio_panel_en, 1);
+ if (ret) {
+ pr_err("%s: Set direction for gpio_panel_en failed, ret=%d\n",
+ __func__, ret);
+ goto gpio_free;
+ }
+
+ return 0;
+
+gpio_free:
+ gpio_free(edp_drv->gpio_panel_en);
+gpio_err:
+ return -ENODEV;
+}
+
+static int mdss_edp_gpio_lvl_en(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret = 0;
+
+ edp_drv->gpio_lvl_en = of_get_named_gpio(edp_drv->pdev->dev.of_node,
+ "gpio-lvl-en", 0);
+ if (!gpio_is_valid(edp_drv->gpio_lvl_en)) {
+ pr_err("%s: gpio_lvl_en=%d not specified\n", __func__,
+ edp_drv->gpio_lvl_en);
+ ret = -ENODEV;
+ goto gpio_err;
+ }
+
+ ret = gpio_request(edp_drv->gpio_lvl_en, "lvl_enable");
+ if (ret) {
+ pr_err("%s: Request reset gpio_lvl_en failed, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = gpio_direction_output(edp_drv->gpio_lvl_en, 1);
+ if (ret) {
+ pr_err("%s: Set direction for gpio_lvl_en failed, ret=%d\n",
+ __func__, ret);
+ goto gpio_free;
+ }
+
+ return ret;
+
+gpio_free:
+ gpio_free(edp_drv->gpio_lvl_en);
+gpio_err:
+ return ret;
+}
+
+static int mdss_edp_pwm_config(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret = 0;
+
+ ret = of_property_read_u32(edp_drv->pdev->dev.of_node,
+ "qcom,panel-pwm-period", &edp_drv->pwm_period);
+ if (ret) {
+ pr_warn("%s: panel pwm period is not specified, %d", __func__,
+ edp_drv->pwm_period);
+ edp_drv->pwm_period = -EINVAL;
+ }
+
+ ret = of_property_read_u32(edp_drv->pdev->dev.of_node,
+ "qcom,panel-lpg-channel", &edp_drv->lpg_channel);
+ if (ret) {
+ pr_warn("%s: panel lpg channel is not specified, %d", __func__,
+ edp_drv->lpg_channel);
+ edp_drv->lpg_channel = -EINVAL;
+ }
+
+ if (edp_drv->pwm_period != -EINVAL &&
+ edp_drv->lpg_channel != -EINVAL) {
+ edp_drv->bl_pwm = pwm_request(edp_drv->lpg_channel,
+ "lcd-backlight");
+ if (edp_drv->bl_pwm == NULL || IS_ERR(edp_drv->bl_pwm)) {
+ pr_err("%s: pwm request failed", __func__);
+ edp_drv->bl_pwm = NULL;
+ return -EIO;
+ }
+ } else {
+ edp_drv->bl_pwm = NULL;
+ }
+
+ return 0;
+}
+
+void mdss_edp_set_backlight(struct mdss_panel_data *pdata, u32 bl_level)
+{
+ int ret = 0;
+ struct mdss_edp_drv_pdata *edp_drv = NULL;
+ int bl_max;
+ int period_ns;
+
+ edp_drv = container_of(pdata, struct mdss_edp_drv_pdata, panel_data);
+ if (!edp_drv) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ if (edp_drv->bl_pwm != NULL) {
+ bl_max = edp_drv->panel_data.panel_info.bl_max;
+ if (bl_level > bl_max)
+ bl_level = bl_max;
+
+ /* In order to avoid overflow, use the microsecond version
+ * of pwm_config if the pwm_period is greater than or equal
+ * to 1 second.
+ */
+ if (edp_drv->pwm_period >= USEC_PER_SEC) {
+ ret = pwm_config_us(edp_drv->bl_pwm,
+ bl_level * edp_drv->pwm_period / bl_max,
+ edp_drv->pwm_period);
+ if (ret) {
+ pr_err("%s: pwm_config_us() failed err=%d.\n",
+ __func__, ret);
+ return;
+ }
+ } else {
+ period_ns = edp_drv->pwm_period * NSEC_PER_USEC;
+ ret = pwm_config(edp_drv->bl_pwm,
+ bl_level * period_ns / bl_max,
+ period_ns);
+ if (ret) {
+ pr_err("%s: pwm_config() failed err=%d.\n",
+ __func__, ret);
+ return;
+ }
+ }
+
+ if (edp_drv->is_pwm_enabled) {
+ pwm_disable(edp_drv->bl_pwm);
+ edp_drv->is_pwm_enabled = 0;
+ }
+
+ ret = pwm_enable(edp_drv->bl_pwm);
+ if (ret) {
+ pr_err("%s: pwm_enable() failed err=%d\n", __func__,
+ ret);
+ return;
+ }
+ edp_drv->is_pwm_enabled = 1;
+ }
+}
+
+int mdss_edp_mainlink_ready(struct mdss_edp_drv_pdata *ep, u32 which)
+{
+ u32 data;
+ int cnt = 10;
+
+ while (--cnt) {
+ data = edp_read(ep->base + 0x84); /* EDP_MAINLINK_READY */
+ if (data & which) {
+ pr_debug("%s: which=%x ready\n", __func__, which);
+ return 1;
+ }
+ usleep_range(1000, 1100);
+ }
+ pr_err("%s: which=%x NOT ready\n", __func__, which);
+
+ return 0;
+}
+
+void mdss_edp_mainlink_reset(struct mdss_edp_drv_pdata *ep)
+{
+ edp_write(ep->base + 0x04, 0x02); /* EDP_MAINLINK_CTRL */
+ usleep_range(1000, 1100);
+ edp_write(ep->base + 0x04, 0); /* EDP_MAINLINK_CTRL */
+}
+
+void mdss_edp_mainlink_ctrl(struct mdss_edp_drv_pdata *ep, int enable)
+{
+ u32 data;
+
+ data = edp_read(ep->base + 0x04);
+ data &= ~BIT(0);
+
+ if (enable)
+ data |= 0x1;
+
+ edp_write(ep->base + 0x04, data);
+}
+
+void mdss_edp_state_ctrl(struct mdss_edp_drv_pdata *ep, u32 state)
+{
+ edp_write(ep->base + EDP_STATE_CTRL, state);
+}
+
+void mdss_edp_aux_reset(struct mdss_edp_drv_pdata *ep)
+{
+ /* reset AUX */
+ edp_write(ep->base + 0x300, BIT(1)); /* EDP_AUX_CTRL */
+ usleep_range(1000, 1100);
+ edp_write(ep->base + 0x300, 0); /* EDP_AUX_CTRL */
+}
+
+void mdss_edp_aux_ctrl(struct mdss_edp_drv_pdata *ep, int enable)
+{
+ u32 data;
+
+ data = edp_read(ep->base + 0x300);
+ if (enable)
+ data |= 0x01;
+ else
+ data |= ~0x01;
+ edp_write(ep->base + 0x300, data); /* EDP_AUX_CTRL */
+}
+
+void mdss_edp_phy_pll_reset(struct mdss_edp_drv_pdata *ep)
+{
+ /* EDP_PHY_CTRL */
+ edp_write(ep->base + 0x74, 0x005); /* bit 0, 2 */
+ usleep_range(1000, 1100);
+ edp_write(ep->base + 0x74, 0x000); /* EDP_PHY_CTRL */
+}
+
+int mdss_edp_phy_pll_ready(struct mdss_edp_drv_pdata *ep)
+{
+ int cnt;
+ u32 status = 0;
+
+ cnt = 100;
+ while (--cnt) {
+ status = edp_read(ep->base + 0x6c0);
+ if (status & 0x01)
+ break;
+ usleep_range(100, 110);
+ }
+
+ pr_debug("%s: PLL cnt=%d status=%x\n", __func__, cnt, (int)status);
+
+ if (cnt <= 0) {
+ pr_err("%s: PLL NOT ready\n", __func__);
+ return 0;
+ } else
+ return 1;
+}
+
+int mdss_edp_phy_ready(struct mdss_edp_drv_pdata *ep)
+{
+ u32 status;
+
+ status = edp_read(ep->base + 0x598);
+ status &= 0x01;
+
+ return status;
+}
+
+void mdss_edp_phy_power_ctrl(struct mdss_edp_drv_pdata *ep, int enable)
+{
+ if (enable) {
+ /* EDP_PHY_EDPPHY_GLB_PD_CTL */
+ edp_write(ep->base + 0x52c, 0x3f);
+ /* EDP_PHY_EDPPHY_GLB_CFG */
+ edp_write(ep->base + 0x528, 0x1);
+ /* EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG */
+ edp_write(ep->base + 0x620, 0xf);
+ } else {
+ /* EDP_PHY_EDPPHY_GLB_PD_CTL */
+ edp_write(ep->base + 0x52c, 0xc0);
+ }
+}
+
+void mdss_edp_lane_power_ctrl(struct mdss_edp_drv_pdata *ep, int up)
+{
+ int i, off, max_lane;
+ u32 data;
+
+ max_lane = ep->lane_cnt;
+
+ if (up)
+ data = 0; /* power up */
+ else
+ data = 0x7; /* power down */
+
+ /* EDP_PHY_EDPPHY_LNn_PD_CTL */
+ for (i = 0; i < max_lane; i++) {
+ off = 0x40 * i;
+ edp_write(ep->base + 0x404 + off, data);
+ }
+
+ /* power down un used lane */
+ data = 0x7; /* power down */
+ for (i = max_lane; i < EDP_MAX_LANE; i++) {
+ off = 0x40 * i;
+ edp_write(ep->base + 0x404 + off, data);
+ }
+}
+
+void mdss_edp_clock_synchrous(struct mdss_edp_drv_pdata *ep, int sync)
+{
+ u32 data;
+ u32 color;
+
+ /* EDP_MISC1_MISC0 */
+ data = edp_read(ep->base + 0x02c);
+
+ if (sync)
+ data |= 0x01;
+ else
+ data &= ~0x01;
+
+ /* only legacy rgb mode supported */
+ color = 0; /* 6 bits */
+ if (ep->edid.color_depth == 8)
+ color = 0x01;
+ else if (ep->edid.color_depth == 10)
+ color = 0x02;
+ else if (ep->edid.color_depth == 12)
+ color = 0x03;
+ else if (ep->edid.color_depth == 16)
+ color = 0x04;
+
+ color <<= 5; /* bit 5 to bit 7 */
+
+ data |= color;
+ /* EDP_MISC1_MISC0 */
+ edp_write(ep->base + 0x2c, data);
+}
+
+/* voltage mode and pre emphasis cfg */
+void mdss_edp_phy_vm_pe_init(struct mdss_edp_drv_pdata *ep)
+{
+ /* EDP_PHY_EDPPHY_GLB_VM_CFG0 */
+ edp_write(ep->base + 0x510, 0x3); /* vm only */
+ /* EDP_PHY_EDPPHY_GLB_VM_CFG1 */
+ edp_write(ep->base + 0x514, 0x64);
+ /* EDP_PHY_EDPPHY_GLB_MISC9 */
+ edp_write(ep->base + 0x518, 0x6c);
+}
+
+void mdss_edp_config_ctrl(struct mdss_edp_drv_pdata *ep)
+{
+ struct dpcd_cap *cap;
+ struct display_timing_desc *dp;
+ u32 data = 0;
+
+ dp = &ep->edid.timing[0];
+
+ cap = &ep->dpcd;
+
+ data = ep->lane_cnt - 1;
+ data <<= 4;
+
+ if (cap->enhanced_frame)
+ data |= 0x40;
+
+ if (ep->edid.color_depth == 8) {
+ /* 0 == 6 bits, 1 == 8 bits */
+ data |= 0x100; /* bit 8 */
+ }
+
+ if (!dp->interlaced) /* progressive */
+ data |= 0x04;
+
+ data |= 0x03; /* sycn clock & static Mvid */
+
+ edp_write(ep->base + 0xc, data); /* EDP_CONFIGURATION_CTRL */
+}
+
+static void mdss_edp_sw_mvid_nvid(struct mdss_edp_drv_pdata *ep)
+{
+ edp_write(ep->base + 0x14, 0x13b); /* EDP_SOFTWARE_MVID */
+ edp_write(ep->base + 0x18, 0x266); /* EDP_SOFTWARE_NVID */
+}
+
+static void mdss_edp_timing_cfg(struct mdss_edp_drv_pdata *ep)
+{
+ struct mdss_panel_info *pinfo;
+ u32 total_ver, total_hor;
+ u32 data;
+
+ pinfo = &ep->panel_data.panel_info;
+
+ pr_debug("%s: width=%d hporch= %d %d %d\n", __func__,
+ pinfo->xres, pinfo->lcdc.h_back_porch,
+ pinfo->lcdc.h_front_porch, pinfo->lcdc.h_pulse_width);
+
+ pr_debug("%s: height=%d vporch= %d %d %d\n", __func__,
+ pinfo->yres, pinfo->lcdc.v_back_porch,
+ pinfo->lcdc.v_front_porch, pinfo->lcdc.v_pulse_width);
+
+ total_hor = pinfo->xres + pinfo->lcdc.h_back_porch +
+ pinfo->lcdc.h_front_porch + pinfo->lcdc.h_pulse_width;
+
+ total_ver = pinfo->yres + pinfo->lcdc.v_back_porch +
+ pinfo->lcdc.v_front_porch + pinfo->lcdc.v_pulse_width;
+
+ data = total_ver;
+ data <<= 16;
+ data |= total_hor;
+ edp_write(ep->base + 0x1c, data); /* EDP_TOTAL_HOR_VER */
+
+ data = (pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width);
+ data <<= 16;
+ data |= (pinfo->lcdc.h_back_porch + pinfo->lcdc.h_pulse_width);
+ edp_write(ep->base + 0x20, data); /* EDP_START_HOR_VER_FROM_SYNC */
+
+ data = pinfo->lcdc.v_pulse_width;
+ data <<= 16;
+ data |= pinfo->lcdc.h_pulse_width;
+ edp_write(ep->base + 0x24, data); /* EDP_HSYNC_VSYNC_WIDTH_POLARITY */
+
+ data = pinfo->yres;
+ data <<= 16;
+ data |= pinfo->xres;
+ edp_write(ep->base + 0x28, data); /* EDP_ACTIVE_HOR_VER */
+}
+
+int mdss_edp_wait4train(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret = 0;
+
+ if (edp_drv->cont_splash)
+ return ret;
+
+ ret = wait_for_completion_timeout(&edp_drv->video_comp, 30);
+ if (ret <= 0) {
+ pr_err("%s: Link Train timedout\n", __func__);
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ }
+
+ pr_debug("%s:\n", __func__);
+
+ return ret;
+}
+
+static void mdss_edp_irq_enable(struct mdss_edp_drv_pdata *edp_drv);
+static void mdss_edp_irq_disable(struct mdss_edp_drv_pdata *edp_drv);
+
+int mdss_edp_on(struct mdss_panel_data *pdata)
+{
+ struct mdss_edp_drv_pdata *edp_drv = NULL;
+ int ret = 0;
+
+ if (!pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ edp_drv = container_of(pdata, struct mdss_edp_drv_pdata,
+ panel_data);
+
+ pr_debug("%s:+, cont_splash=%d\n", __func__, edp_drv->cont_splash);
+
+ if (!edp_drv->cont_splash) { /* vote for clocks */
+ mdss_edp_phy_pll_reset(edp_drv);
+ mdss_edp_aux_reset(edp_drv);
+ mdss_edp_mainlink_reset(edp_drv);
+ mdss_edp_aux_ctrl(edp_drv, 1);
+
+ ret = mdss_edp_prepare_clocks(edp_drv);
+ if (ret)
+ return ret;
+
+ mdss_edp_phy_power_ctrl(edp_drv, 1);
+
+ ret = mdss_edp_clk_enable(edp_drv);
+ if (ret) {
+ mdss_edp_unprepare_clocks(edp_drv);
+ return ret;
+ }
+
+ mdss_edp_phy_pll_ready(edp_drv);
+
+ mdss_edp_lane_power_ctrl(edp_drv, 1);
+
+ mdss_edp_clock_synchrous(edp_drv, 1);
+ mdss_edp_phy_vm_pe_init(edp_drv);
+ mdss_edp_config_ctrl(edp_drv);
+ mdss_edp_sw_mvid_nvid(edp_drv);
+ mdss_edp_timing_cfg(edp_drv);
+
+ gpio_set_value(edp_drv->gpio_panel_en, 1);
+ if (gpio_is_valid(edp_drv->gpio_lvl_en))
+ gpio_set_value(edp_drv->gpio_lvl_en, 1);
+
+ reinit_completion(&edp_drv->idle_comp);
+ mdss_edp_mainlink_ctrl(edp_drv, 1);
+ } else {
+ mdss_edp_aux_ctrl(edp_drv, 1);
+ }
+
+ mdss_edp_irq_enable(edp_drv);
+
+ if (edp_drv->delay_link_train) {
+ mdss_edp_link_train(edp_drv);
+ edp_drv->delay_link_train = 0;
+ }
+
+ mdss_edp_wait4train(edp_drv);
+
+ edp_drv->cont_splash = 0;
+
+ pr_debug("%s:-\n", __func__);
+ return ret;
+}
+
+int mdss_edp_off(struct mdss_panel_data *pdata)
+{
+ struct mdss_edp_drv_pdata *edp_drv = NULL;
+ int ret = 0;
+
+ edp_drv = container_of(pdata, struct mdss_edp_drv_pdata,
+ panel_data);
+ if (!edp_drv) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s:+, cont_splash=%d\n", __func__, edp_drv->cont_splash);
+
+ /* wait until link training is completed */
+ mutex_lock(&edp_drv->train_mutex);
+
+ reinit_completion(&edp_drv->idle_comp);
+ mdss_edp_state_ctrl(edp_drv, ST_PUSH_IDLE);
+
+ ret = wait_for_completion_timeout(&edp_drv->idle_comp,
+ msecs_to_jiffies(100));
+ if (ret == 0)
+ pr_err("%s: idle pattern timedout\n", __func__);
+
+ mdss_edp_state_ctrl(edp_drv, 0);
+
+ mdss_edp_sink_power_state(edp_drv, SINK_POWER_OFF);
+
+ mdss_edp_irq_disable(edp_drv);
+
+ gpio_set_value(edp_drv->gpio_panel_en, 0);
+ if (gpio_is_valid(edp_drv->gpio_lvl_en))
+ gpio_set_value(edp_drv->gpio_lvl_en, 0);
+ if (edp_drv->bl_pwm != NULL)
+ pwm_disable(edp_drv->bl_pwm);
+ edp_drv->is_pwm_enabled = 0;
+
+ mdss_edp_mainlink_reset(edp_drv);
+ mdss_edp_mainlink_ctrl(edp_drv, 0);
+
+ mdss_edp_lane_power_ctrl(edp_drv, 0);
+ mdss_edp_phy_power_ctrl(edp_drv, 0);
+
+ mdss_edp_clk_disable(edp_drv);
+ mdss_edp_unprepare_clocks(edp_drv);
+
+ mdss_edp_aux_ctrl(edp_drv, 0);
+
+ pr_debug("%s-: state_ctrl=%x\n", __func__,
+ edp_read(edp_drv->base + 0x8));
+
+ mutex_unlock(&edp_drv->train_mutex);
+ return 0;
+}
+
+static int mdss_edp_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+
+ pr_debug("%s: event=%d\n", __func__, event);
+ switch (event) {
+ case MDSS_EVENT_UNBLANK:
+ rc = mdss_edp_on(pdata);
+ break;
+ case MDSS_EVENT_PANEL_OFF:
+ rc = mdss_edp_off(pdata);
+ break;
+ }
+ return rc;
+}
+
+/*
+ * Converts from EDID struct to mdss_panel_info
+ */
+static void mdss_edp_edid2pinfo(struct mdss_edp_drv_pdata *edp_drv)
+{
+ struct display_timing_desc *dp;
+ struct mdss_panel_info *pinfo;
+
+ dp = &edp_drv->edid.timing[0];
+ pinfo = &edp_drv->panel_data.panel_info;
+
+ pinfo->clk_rate = dp->pclk;
+ pr_debug("%s: pclk=%d\n", __func__, pinfo->clk_rate);
+
+ pinfo->xres = dp->h_addressable + dp->h_border * 2;
+ pinfo->yres = dp->v_addressable + dp->v_border * 2;
+
+ pr_debug("%s: x=%d y=%d\n", __func__, pinfo->xres, pinfo->yres);
+
+ pinfo->lcdc.h_back_porch = dp->h_blank - dp->h_fporch -
+ dp->h_sync_pulse;
+ pinfo->lcdc.h_front_porch = dp->h_fporch;
+ pinfo->lcdc.h_pulse_width = dp->h_sync_pulse;
+
+ pr_debug("%s: hporch= %d %d %d\n", __func__,
+ pinfo->lcdc.h_back_porch, pinfo->lcdc.h_front_porch,
+ pinfo->lcdc.h_pulse_width);
+
+ pinfo->lcdc.v_back_porch = dp->v_blank - dp->v_fporch
+ - dp->v_sync_pulse;
+ pinfo->lcdc.v_front_porch = dp->v_fporch;
+ pinfo->lcdc.v_pulse_width = dp->v_sync_pulse;
+
+ pr_debug("%s: vporch= %d %d %d\n", __func__,
+ pinfo->lcdc.v_back_porch, pinfo->lcdc.v_front_porch,
+ pinfo->lcdc.v_pulse_width);
+
+ pinfo->type = EDP_PANEL;
+ pinfo->pdest = DISPLAY_1;
+ pinfo->wait_cycle = 0;
+ pinfo->bpp = edp_drv->edid.color_depth * RGB_COMPONENTS;
+ pinfo->fb_num = 2;
+
+ pinfo->lcdc.border_clr = 0; /* black */
+ pinfo->lcdc.underflow_clr = 0xff; /* blue */
+ pinfo->lcdc.hsync_skew = 0;
+}
+
+static int mdss_edp_remove(struct platform_device *pdev)
+{
+ struct mdss_edp_drv_pdata *edp_drv = NULL;
+
+ edp_drv = platform_get_drvdata(pdev);
+
+ gpio_free(edp_drv->gpio_panel_en);
+ if (gpio_is_valid(edp_drv->gpio_lvl_en))
+ gpio_free(edp_drv->gpio_lvl_en);
+ mdss_edp_regulator_off(edp_drv);
+ iounmap(edp_drv->base);
+ iounmap(edp_drv->mmss_cc_base);
+ edp_drv->base = NULL;
+
+ return 0;
+}
+
+static int mdss_edp_device_register(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret;
+ u32 tmp;
+
+ mdss_edp_edid2pinfo(edp_drv);
+ edp_drv->panel_data.panel_info.bl_min = 1;
+ edp_drv->panel_data.panel_info.bl_max = 255;
+ ret = of_property_read_u32(edp_drv->pdev->dev.of_node,
+ "qcom,mdss-brightness-max-level", &tmp);
+ edp_drv->panel_data.panel_info.brightness_max =
+ (!ret ? tmp : MDSS_MAX_BL_BRIGHTNESS);
+
+ edp_drv->panel_data.panel_info.edp.frame_rate =
+ DEFAULT_FRAME_RATE;/* 60 fps */
+
+ edp_drv->panel_data.event_handler = mdss_edp_event_handler;
+ edp_drv->panel_data.set_backlight = mdss_edp_set_backlight;
+
+ edp_drv->panel_data.panel_info.cont_splash_enabled =
+ edp_drv->cont_splash;
+
+ ret = mdss_register_panel(edp_drv->pdev, &edp_drv->panel_data);
+ if (ret) {
+ dev_err(&(edp_drv->pdev->dev), "unable to register eDP\n");
+ return ret;
+ }
+
+ pr_info("%s: eDP initialized\n", __func__);
+
+ return 0;
+}
+
+/*
+ * Retrieve edp base address
+ */
+static int mdss_edp_get_base_address(struct mdss_edp_drv_pdata *edp_drv)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(edp_drv->pdev, IORESOURCE_MEM,
+ "edp_base");
+ if (!res) {
+ pr_err("%s: Unable to get the MDSS EDP resources", __func__);
+ return -ENOMEM;
+ }
+
+ edp_drv->base_size = resource_size(res);
+ edp_drv->base = ioremap(res->start, resource_size(res));
+ if (!edp_drv->base) {
+ pr_err("%s: Unable to remap EDP resources", __func__);
+ return -ENOMEM;
+ }
+
+ pr_debug("%s: drv=%x base=%x size=%x\n", __func__,
+ (int)edp_drv, (int)edp_drv->base, edp_drv->base_size);
+
+ mdss_debug_register_base("edp",
+ edp_drv->base, edp_drv->base_size, NULL);
+
+ return 0;
+}
+
+static int mdss_edp_get_mmss_cc_base_address(struct mdss_edp_drv_pdata
+ *edp_drv)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(edp_drv->pdev, IORESOURCE_MEM,
+ "mmss_cc_base");
+ if (!res) {
+ pr_err("%s: Unable to get the MMSS_CC resources", __func__);
+ return -ENOMEM;
+ }
+
+ edp_drv->mmss_cc_base = ioremap(res->start, resource_size(res));
+ if (!edp_drv->mmss_cc_base) {
+ pr_err("%s: Unable to remap MMSS_CC resources", __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mdss_edp_video_ready(struct mdss_edp_drv_pdata *ep)
+{
+ pr_debug("%s: edp_video_ready\n", __func__);
+ complete(&ep->video_comp);
+}
+
+static void mdss_edp_idle_patterns_sent(struct mdss_edp_drv_pdata *ep)
+{
+ pr_debug("%s: idle_patterns_sent\n", __func__);
+ complete(&ep->idle_comp);
+}
+
+static void mdss_edp_do_link_train(struct mdss_edp_drv_pdata *ep)
+{
+ if (ep->cont_splash)
+ return;
+
+ if (!ep->inited) {
+ ep->delay_link_train++;
+ return;
+ }
+
+ mdss_edp_link_train(ep);
+}
+
+static int edp_event_thread(void *data)
+{
+ struct mdss_edp_drv_pdata *ep;
+ unsigned long flag;
+ u32 todo = 0;
+
+ ep = (struct mdss_edp_drv_pdata *)data;
+
+ while (1) {
+ wait_event(ep->event_q, (ep->event_pndx != ep->event_gndx));
+ spin_lock_irqsave(&ep->event_lock, flag);
+ if (ep->event_pndx == ep->event_gndx) {
+ spin_unlock_irqrestore(&ep->event_lock, flag);
+ break;
+ }
+ todo = ep->event_todo_list[ep->event_gndx];
+ ep->event_todo_list[ep->event_gndx++] = 0;
+ ep->event_gndx %= HPD_EVENT_MAX;
+ spin_unlock_irqrestore(&ep->event_lock, flag);
+
+ pr_debug("%s: todo=%x\n", __func__, todo);
+
+ if (todo == 0)
+ continue;
+
+ if (todo & EV_EDID_READ)
+ mdss_edp_edid_read(ep, 0);
+
+ if (todo & EV_DPCD_CAP_READ)
+ mdss_edp_dpcd_cap_read(ep);
+
+ if (todo & EV_DPCD_STATUS_READ)
+ mdss_edp_dpcd_status_read(ep);
+
+ if (todo & EV_LINK_TRAIN)
+ mdss_edp_do_link_train(ep);
+
+ if (todo & EV_VIDEO_READY)
+ mdss_edp_video_ready(ep);
+
+ if (todo & EV_IDLE_PATTERNS_SENT)
+ mdss_edp_idle_patterns_sent(ep);
+ }
+
+ return 0;
+}
+
+static void edp_send_events(struct mdss_edp_drv_pdata *ep, u32 events)
+{
+ spin_lock(&ep->event_lock);
+ ep->event_todo_list[ep->event_pndx++] = events;
+ ep->event_pndx %= HPD_EVENT_MAX;
+ wake_up(&ep->event_q);
+ spin_unlock(&ep->event_lock);
+}
+
+irqreturn_t edp_isr(int irq, void *ptr)
+{
+ struct mdss_edp_drv_pdata *ep = (struct mdss_edp_drv_pdata *)ptr;
+ unsigned char *base = ep->base;
+ u32 isr1, isr2, mask1, mask2;
+ u32 ack;
+
+ spin_lock(&ep->lock);
+ isr1 = edp_read(base + 0x308);
+ isr2 = edp_read(base + 0x30c);
+
+ mask1 = isr1 & ep->mask1;
+ mask2 = isr2 & ep->mask2;
+
+ isr1 &= ~mask1; /* remove masks bit */
+ isr2 &= ~mask2;
+
+ pr_debug("%s: isr=%x mask=%x isr2=%x mask2=%x\n",
+ __func__, isr1, mask1, isr2, mask2);
+
+ ack = isr1 & EDP_INTR_STATUS1;
+ ack <<= 1; /* ack bits */
+ ack |= mask1;
+ edp_write(base + 0x308, ack);
+
+ ack = isr2 & EDP_INTR_STATUS2;
+ ack <<= 1; /* ack bits */
+ ack |= mask2;
+ edp_write(base + 0x30c, ack);
+ spin_unlock(&ep->lock);
+
+ if (isr1 & EDP_INTR_HPD) {
+ isr1 &= ~EDP_INTR_HPD; /* clear */
+ edp_send_events(ep, EV_LINK_TRAIN);
+ }
+
+ if (isr2 & EDP_INTR_READY_FOR_VIDEO)
+ edp_send_events(ep, EV_VIDEO_READY);
+
+ if (isr2 & EDP_INTR_IDLE_PATTERNs_SENT)
+ edp_send_events(ep, EV_IDLE_PATTERNS_SENT);
+
+ if (isr1 && ep->aux_cmd_busy) {
+ /* clear EDP_AUX_TRANS_CTRL */
+ edp_write(base + 0x318, 0);
+ /* read EDP_INTERRUPT_TRANS_NUM */
+ ep->aux_trans_num = edp_read(base + 0x310);
+
+ if (ep->aux_cmd_i2c)
+ edp_aux_i2c_handler(ep, isr1);
+ else
+ edp_aux_native_handler(ep, isr1);
+ }
+
+ return IRQ_HANDLED;
+}
+
+struct mdss_hw mdss_edp_hw = {
+ .hw_ndx = MDSS_HW_EDP,
+ .ptr = NULL,
+ .irq_handler = edp_isr,
+};
+
+static void mdss_edp_irq_enable(struct mdss_edp_drv_pdata *edp_drv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&edp_drv->lock, flags);
+ edp_write(edp_drv->base + 0x308, edp_drv->mask1);
+ edp_write(edp_drv->base + 0x30c, edp_drv->mask2);
+ spin_unlock_irqrestore(&edp_drv->lock, flags);
+
+ edp_drv->mdss_util->enable_irq(&mdss_edp_hw);
+}
+
+static void mdss_edp_irq_disable(struct mdss_edp_drv_pdata *edp_drv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&edp_drv->lock, flags);
+ edp_write(edp_drv->base + 0x308, 0x0);
+ edp_write(edp_drv->base + 0x30c, 0x0);
+ spin_unlock_irqrestore(&edp_drv->lock, flags);
+
+ edp_drv->mdss_util->disable_irq(&mdss_edp_hw);
+}
+
+static int mdss_edp_irq_setup(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret = 0;
+
+ edp_drv->gpio_panel_hpd = of_get_named_gpio_flags(
+ edp_drv->pdev->dev.of_node, "gpio-panel-hpd", 0,
+ &edp_drv->hpd_flags);
+
+ if (!gpio_is_valid(edp_drv->gpio_panel_hpd)) {
+ pr_err("%s gpio_panel_hpd %d is not valid ", __func__,
+ edp_drv->gpio_panel_hpd);
+ return -ENODEV;
+ }
+
+ ret = gpio_request(edp_drv->gpio_panel_hpd, "edp_hpd_irq_gpio");
+ if (ret) {
+ pr_err("%s unable to request gpio_panel_hpd %d", __func__,
+ edp_drv->gpio_panel_hpd);
+ return -ENODEV;
+ }
+
+ ret = gpio_tlmm_config(GPIO_CFG(
+ edp_drv->gpio_panel_hpd,
+ 1,
+ GPIO_CFG_INPUT,
+ GPIO_CFG_NO_PULL,
+ GPIO_CFG_2MA),
+ GPIO_CFG_ENABLE);
+ if (ret) {
+ pr_err("%s: unable to config tlmm = %d\n", __func__,
+ edp_drv->gpio_panel_hpd);
+ gpio_free(edp_drv->gpio_panel_hpd);
+ return -ENODEV;
+ }
+
+ ret = gpio_direction_input(edp_drv->gpio_panel_hpd);
+ if (ret) {
+ pr_err("%s unable to set direction for gpio_panel_hpd %d",
+ __func__, edp_drv->gpio_panel_hpd);
+ return -ENODEV;
+ }
+
+ mdss_edp_hw.ptr = (void *)(edp_drv);
+
+ if (edp_drv->mdss_util->register_irq(&mdss_edp_hw))
+ pr_err("%s: mdss_register_irq failed.\n", __func__);
+
+
+ return 0;
+}
+
+
+static void mdss_edp_event_setup(struct mdss_edp_drv_pdata *ep)
+{
+ init_waitqueue_head(&ep->event_q);
+ spin_lock_init(&ep->event_lock);
+
+ kthread_run(edp_event_thread, (void *)ep, "mdss_edp_hpd");
+}
+
+static int mdss_edp_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct mdss_edp_drv_pdata *edp_drv;
+ struct mdss_panel_cfg *pan_cfg = NULL;
+
+ if (!mdss_is_ready()) {
+ pr_err("%s: MDP not probed yet!\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ pan_cfg = mdss_panel_intf_type(MDSS_PANEL_INTF_EDP);
+ if (IS_ERR(pan_cfg)) {
+ return PTR_ERR(pan_cfg);
+ } else if (!pan_cfg) {
+ pr_debug("%s: not configured as prim\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!pdev->dev.of_node) {
+ pr_err("%s: Failed\n", __func__);
+ return -EPERM;
+ }
+
+ edp_drv = devm_kzalloc(&pdev->dev, sizeof(*edp_drv), GFP_KERNEL);
+ if (edp_drv == NULL)
+ return -ENOMEM;
+
+ edp_drv->mdss_util = mdss_get_util_intf();
+ if (edp_drv->mdss_util == NULL) {
+ pr_err("Failed to get mdss utility functions\n");
+ return -ENODEV;
+ }
+ edp_drv->panel_data.panel_info.is_prim_panel = true;
+
+ mdss_edp_hw.irq_info = mdss_intr_line();
+ if (mdss_edp_hw.irq_info == NULL) {
+ pr_err("Failed to get mdss irq information\n");
+ return -ENODEV;
+ }
+
+ edp_drv->pdev = pdev;
+ edp_drv->pdev->id = 1;
+ edp_drv->clk_on = 0;
+ edp_drv->aux_rate = 19200000;
+ edp_drv->mask1 = EDP_INTR_MASK1;
+ edp_drv->mask2 = EDP_INTR_MASK2;
+ mutex_init(&edp_drv->emutex);
+ spin_lock_init(&edp_drv->lock);
+
+ ret = mdss_edp_get_base_address(edp_drv);
+ if (ret)
+ goto probe_err;
+
+ ret = mdss_edp_get_mmss_cc_base_address(edp_drv);
+ if (ret)
+ goto edp_base_unmap;
+
+ ret = mdss_edp_regulator_init(edp_drv);
+ if (ret)
+ goto mmss_cc_base_unmap;
+
+ ret = mdss_edp_clk_init(edp_drv);
+ if (ret)
+ goto edp_clk_deinit;
+
+ ret = mdss_edp_gpio_panel_en(edp_drv);
+ if (ret)
+ goto edp_clk_deinit;
+
+ ret = mdss_edp_gpio_lvl_en(edp_drv);
+ if (ret)
+ pr_err("%s: No gpio_lvl_en detected\n", __func__);
+
+ ret = mdss_edp_pwm_config(edp_drv);
+ if (ret)
+ goto edp_free_gpio_panel_en;
+
+ mdss_edp_irq_setup(edp_drv);
+
+ mdss_edp_aux_init(edp_drv);
+
+ mdss_edp_event_setup(edp_drv);
+
+ edp_drv->cont_splash = edp_drv->mdss_util->panel_intf_status(DISPLAY_1,
+ MDSS_PANEL_INTF_EDP) ? true : false;
+
+ /* only need aux and ahb clock for aux channel */
+ mdss_edp_prepare_aux_clocks(edp_drv);
+ mdss_edp_aux_clk_enable(edp_drv);
+
+ if (!edp_drv->cont_splash) {
+ mdss_edp_phy_pll_reset(edp_drv);
+ mdss_edp_aux_reset(edp_drv);
+ mdss_edp_mainlink_reset(edp_drv);
+ mdss_edp_phy_power_ctrl(edp_drv, 1);
+ mdss_edp_aux_ctrl(edp_drv, 1);
+ }
+
+ mdss_edp_irq_enable(edp_drv);
+
+ mdss_edp_edid_read(edp_drv, 0);
+ mdss_edp_dpcd_cap_read(edp_drv);
+ mdss_edp_fill_link_cfg(edp_drv);
+
+ mdss_edp_irq_disable(edp_drv);
+
+ if (!edp_drv->cont_splash) {
+ mdss_edp_aux_ctrl(edp_drv, 0);
+ mdss_edp_phy_power_ctrl(edp_drv, 0);
+ }
+
+ mdss_edp_aux_clk_disable(edp_drv);
+ mdss_edp_unprepare_aux_clocks(edp_drv);
+
+ if (edp_drv->cont_splash) { /* vote for clocks */
+ mdss_edp_prepare_clocks(edp_drv);
+ mdss_edp_clk_enable(edp_drv);
+ }
+
+ mdss_edp_device_register(edp_drv);
+
+ edp_drv->inited = true;
+
+ pr_debug("%s: done\n", __func__);
+
+ return 0;
+
+
+edp_free_gpio_panel_en:
+ gpio_free(edp_drv->gpio_panel_en);
+ if (gpio_is_valid(edp_drv->gpio_lvl_en))
+ gpio_free(edp_drv->gpio_lvl_en);
+edp_clk_deinit:
+ mdss_edp_clk_deinit(edp_drv);
+ mdss_edp_regulator_off(edp_drv);
+mmss_cc_base_unmap:
+ iounmap(edp_drv->mmss_cc_base);
+edp_base_unmap:
+ iounmap(edp_drv->base);
+probe_err:
+ return ret;
+
+}
+
+static const struct of_device_id msm_mdss_edp_dt_match[] = {
+ {.compatible = "qcom,mdss-edp"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_mdss_edp_dt_match);
+
+static struct platform_driver mdss_edp_driver = {
+ .probe = mdss_edp_probe,
+ .remove = mdss_edp_remove,
+ .shutdown = NULL,
+ .driver = {
+ .name = "mdss_edp",
+ .of_match_table = msm_mdss_edp_dt_match,
+ },
+};
+
+static int __init mdss_edp_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mdss_edp_driver);
+ if (ret) {
+ pr_err("%s driver register failed", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+module_init(mdss_edp_init);
+
+static void __exit mdss_edp_driver_cleanup(void)
+{
+ platform_driver_unregister(&mdss_edp_driver);
+}
+module_exit(mdss_edp_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("eDP controller driver");
diff --git a/drivers/video/fbdev/msm/mdss_edp.h b/drivers/video/fbdev/msm/mdss_edp.h
new file mode 100644
index 0000000..2477f36
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_edp.h
@@ -0,0 +1,380 @@
+/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_EDP_H
+#define MDSS_EDP_H
+
+#include <linux/of_gpio.h>
+
+#define edp_read(offset) readl_relaxed((offset))
+#define edp_write(offset, data) writel_relaxed((data), (offset))
+
+#define AUX_CMD_FIFO_LEN 144
+#define AUX_CMD_MAX 16
+#define AUX_CMD_I2C_MAX 128
+
+#define EDP_PORT_MAX 1
+#define EDP_SINK_CAP_LEN 16
+
+#define EDP_AUX_ERR_NONE 0
+#define EDP_AUX_ERR_ADDR -1
+#define EDP_AUX_ERR_TOUT -2
+#define EDP_AUX_ERR_NACK -3
+
+/* 4 bits of aux command */
+#define EDP_CMD_AUX_WRITE 0x8
+#define EDP_CMD_AUX_READ 0x9
+
+/* 4 bits of i2c command */
+#define EDP_CMD_I2C_MOT 0x4 /* i2c middle of transaction */
+#define EDP_CMD_I2C_WRITE 0x0
+#define EDP_CMD_I2C_READ 0x1
+#define EDP_CMD_I2C_STATUS 0x2 /* i2c write status request */
+
+/* cmd reply: bit 0, 1 for aux */
+#define EDP_AUX_ACK 0x0
+#define EDP_AUX_NACK 0x1
+#define EDP_AUX_DEFER 0x2
+
+/* cmd reply: bit 2, 3 for i2c */
+#define EDP_I2C_ACK 0x0
+#define EDP_I2C_NACK 0x4
+#define EDP_I2C_DEFER 0x8
+
+#define EDP_CMD_TIMEOUT 400 /* us */
+#define EDP_CMD_LEN 16
+
+#define EDP_INTR_ACK_SHIFT 1
+#define EDP_INTR_MASK_SHIFT 2
+
+#define EDP_MAX_LANE 4
+
+/* isr */
+#define EDP_INTR_HPD BIT(0)
+#define EDP_INTR_AUX_I2C_DONE BIT(3)
+#define EDP_INTR_WRONG_ADDR BIT(6)
+#define EDP_INTR_TIMEOUT BIT(9)
+#define EDP_INTR_NACK_DEFER BIT(12)
+#define EDP_INTR_WRONG_DATA_CNT BIT(15)
+#define EDP_INTR_I2C_NACK BIT(18)
+#define EDP_INTR_I2C_DEFER BIT(21)
+#define EDP_INTR_PLL_UNLOCKED BIT(24)
+#define EDP_INTR_AUX_ERROR BIT(27)
+
+
+#define EDP_INTR_STATUS1 \
+ (EDP_INTR_HPD | EDP_INTR_AUX_I2C_DONE| \
+ EDP_INTR_WRONG_ADDR | EDP_INTR_TIMEOUT | \
+ EDP_INTR_NACK_DEFER | EDP_INTR_WRONG_DATA_CNT | \
+ EDP_INTR_I2C_NACK | EDP_INTR_I2C_DEFER | \
+ EDP_INTR_PLL_UNLOCKED | EDP_INTR_AUX_ERROR)
+
+#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2)
+
+
+#define EDP_INTR_READY_FOR_VIDEO BIT(0)
+#define EDP_INTR_IDLE_PATTERNs_SENT BIT(3)
+#define EDP_INTR_FRAME_END BIT(6)
+#define EDP_INTR_CRC_UPDATED BIT(9)
+
+#define EDP_INTR_STATUS2 \
+ (EDP_INTR_READY_FOR_VIDEO | EDP_INTR_IDLE_PATTERNs_SENT | \
+ EDP_INTR_FRAME_END | EDP_INTR_CRC_UPDATED)
+
+#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2)
+
+
+#define EDP_MAINLINK_CTRL 0x004
+#define EDP_STATE_CTRL 0x008
+#define EDP_MAINLINK_READY 0x084
+
+#define EDP_AUX_CTRL 0x300
+#define EDP_INTERRUPT_STATUS 0x308
+#define EDP_INTERRUPT_STATUS_2 0x30c
+#define EDP_AUX_DATA 0x314
+#define EDP_AUX_TRANS_CTRL 0x318
+#define EDP_AUX_STATUS 0x324
+
+#define EDP_PHY_EDPPHY_GLB_VM_CFG0 0x510
+#define EDP_PHY_EDPPHY_GLB_VM_CFG1 0x514
+
+struct edp_cmd {
+ char read; /* 1 == read, 0 == write */
+ char i2c; /* 1 == i2c cmd, 0 == native cmd */
+ u32 addr; /* 20 bits */
+ char *datap;
+ int len; /* len to be tx OR len to be rx for read */
+ char next; /* next command */
+};
+
+struct edp_buf {
+ char *start; /* buffer start addr */
+ char *end; /* buffer end addr */
+ int size; /* size of buffer */
+ char *data; /* data pointer */
+ int len; /* dara length */
+ char trans_num; /* transaction number */
+ char i2c; /* 1 == i2c cmd, 0 == native cmd */
+};
+
+#define DPCD_ENHANCED_FRAME BIT(0)
+#define DPCD_TPS3 BIT(1)
+#define DPCD_MAX_DOWNSPREAD_0_5 BIT(2)
+#define DPCD_NO_AUX_HANDSHAKE BIT(3)
+#define DPCD_PORT_0_EDID_PRESENTED BIT(4)
+
+/* event */
+#define EV_EDP_AUX_SETUP BIT(0)
+#define EV_EDID_READ BIT(1)
+#define EV_DPCD_CAP_READ BIT(2)
+#define EV_DPCD_STATUS_READ BIT(3)
+#define EV_LINK_TRAIN BIT(4)
+#define EV_IDLE_PATTERNS_SENT BIT(30)
+#define EV_VIDEO_READY BIT(31)
+
+/* edp state ctrl */
+#define ST_TRAIN_PATTERN_1 BIT(0)
+#define ST_TRAIN_PATTERN_2 BIT(1)
+#define ST_TRAIN_PATTERN_3 BIT(2)
+#define ST_SYMBOL_ERR_RATE_MEASUREMENT BIT(3)
+#define ST_PRBS7 BIT(4)
+#define ST_CUSTOM_80_BIT_PATTERN BIT(5)
+#define ST_SEND_VIDEO BIT(6)
+#define ST_PUSH_IDLE BIT(7)
+
+/* sink power state */
+#define SINK_POWER_ON 1
+#define SINK_POWER_OFF 2
+
+#define EDP_LINK_RATE_162 6 /* 1.62G = 270M * 6 */
+#define EDP_LINK_RATE_270 10 /* 2.70G = 270M * 10 */
+#define EDP_LINK_RATE_MAX EDP_LINK_RATE_270
+
+struct dpcd_cap {
+ char major;
+ char minor;
+ char max_lane_count;
+ char num_rx_port;
+ char i2c_speed_ctrl;
+ char scrambler_reset;
+ char enhanced_frame;
+ u32 max_link_rate; /* 162, 270 and 540 Mb, divided by 10 */
+ u32 flags;
+ u32 rx_port0_buf_size;
+ u32 training_read_interval;/* us */
+};
+
+struct dpcd_link_status {
+ char lane_01_status;
+ char lane_23_status;
+ char interlane_align_done;
+ char downstream_port_status_changed;
+ char link_status_updated;
+ char port_0_in_sync;
+ char port_1_in_sync;
+ char req_voltage_swing[4];
+ char req_pre_emphasis[4];
+};
+
+struct display_timing_desc {
+ u32 pclk;
+ u32 h_addressable; /* addressable + boder = active */
+ u32 h_border;
+ u32 h_blank; /* fporch + bporch + sync_pulse = blank */
+ u32 h_fporch;
+ u32 h_sync_pulse;
+ u32 v_addressable; /* addressable + boder = active */
+ u32 v_border;
+ u32 v_blank; /* fporch + bporch + sync_pulse = blank */
+ u32 v_fporch;
+ u32 v_sync_pulse;
+ u32 width_mm;
+ u32 height_mm;
+ u32 interlaced;
+ u32 stereo;
+ u32 sync_type;
+ u32 sync_separate;
+ u32 vsync_pol;
+ u32 hsync_pol;
+};
+
+#define EDID_DISPLAY_PORT_SUPPORT 0x05
+
+struct edp_edid {
+ char id_name[4];
+ short id_product;
+ char version;
+ char revision;
+ char video_intf; /* edp == 0x5 */
+ char color_depth; /* 6, 8, 10, 12 and 14 bits */
+ char color_format; /* RGB 4:4:4, YCrCb 4:4:4, Ycrcb 4:2:2 */
+ char dpm; /* display power management */
+ char sync_digital; /* 1 = digital */
+ char sync_separate; /* 1 = separate */
+ char vsync_pol; /* 0 = negative, 1 = positive */
+ char hsync_pol; /* 0 = negative, 1 = positive */
+ char ext_block_cnt;
+ struct display_timing_desc timing[4];
+};
+
+struct edp_statistic {
+ u32 intr_hpd;
+ u32 intr_aux_i2c_done;
+ u32 intr_wrong_addr;
+ u32 intr_tout;
+ u32 intr_nack_defer;
+ u32 intr_wrong_data_cnt;
+ u32 intr_i2c_nack;
+ u32 intr_i2c_defer;
+ u32 intr_pll_unlock;
+ u32 intr_crc_update;
+ u32 intr_frame_end;
+ u32 intr_idle_pattern_sent;
+ u32 intr_ready_for_video;
+ u32 aux_i2c_tx;
+ u32 aux_i2c_rx;
+ u32 aux_native_tx;
+ u32 aux_native_rx;
+};
+
+
+#define DPCD_LINK_VOLTAGE_MAX 4
+#define DPCD_LINK_PRE_EMPHASIS_MAX 4
+
+#define HPD_EVENT_MAX 8
+
+struct mdss_edp_drv_pdata {
+ /* device driver */
+ int (*on)(struct mdss_panel_data *pdata);
+ int (*off)(struct mdss_panel_data *pdata);
+ struct platform_device *pdev;
+
+ struct mutex emutex;
+ int clk_cnt;
+ int cont_splash;
+ bool inited;
+ int delay_link_train;
+
+ /* edp specific */
+ unsigned char *base;
+ int base_size;
+ unsigned char *mmss_cc_base;
+ u32 mask1;
+ u32 mask2;
+
+ struct mdss_panel_data panel_data;
+ struct mdss_util_intf *mdss_util;
+
+ int edp_on_cnt;
+ int edp_off_cnt;
+
+ u32 pixel_rate;
+ u32 aux_rate;
+ char link_rate; /* X 27000000 for real rate */
+ char lane_cnt;
+ char train_link_rate; /* X 27000000 for real rate */
+ char train_lane_cnt;
+
+ struct edp_edid edid;
+ struct dpcd_cap dpcd;
+
+ /* regulators */
+ struct regulator *vdda_vreg;
+
+ /* clocks */
+ struct clk *aux_clk;
+ struct clk *pixel_clk;
+ struct clk *ahb_clk;
+ struct clk *link_clk;
+ struct clk *mdp_core_clk;
+ int clk_on;
+
+ /* gpios */
+ int gpio_panel_en;
+ int gpio_lvl_en;
+
+ /* backlight */
+ struct pwm_device *bl_pwm;
+ bool is_pwm_enabled;
+ int lpg_channel;
+ int pwm_period;
+
+ /* hpd */
+ int gpio_panel_hpd;
+ enum of_gpio_flags hpd_flags;
+ int hpd_irq;
+
+ /* aux */
+ struct completion aux_comp;
+ struct completion train_comp;
+ struct completion idle_comp;
+ struct completion video_comp;
+ struct mutex aux_mutex;
+ struct mutex train_mutex;
+ u32 aux_cmd_busy;
+ u32 aux_cmd_i2c;
+ int aux_trans_num;
+ int aux_error_num;
+ u32 aux_ctrl_reg;
+ struct edp_buf txp;
+ struct edp_buf rxp;
+ char txbuf[256];
+ char rxbuf[256];
+ struct dpcd_link_status link_status;
+ char v_level;
+ char p_level;
+ /* transfer unit */
+ char tu_desired;
+ char valid_boundary;
+ char delay_start;
+ u32 bpp;
+ struct edp_statistic edp_stat;
+
+ /* event */
+ wait_queue_head_t event_q;
+ u32 event_pndx;
+ u32 event_gndx;
+ u32 event_todo_list[HPD_EVENT_MAX];
+ spinlock_t event_lock;
+ spinlock_t lock;
+};
+
+int mdss_edp_aux_clk_enable(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_aux_clk_disable(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_clk_disable(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_clk_init(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_clk_deinit(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_prepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_unprepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_unprepare_clocks(struct mdss_edp_drv_pdata *edp_drv);
+
+void mdss_edp_dpcd_cap_read(struct mdss_edp_drv_pdata *edp);
+int mdss_edp_dpcd_status_read(struct mdss_edp_drv_pdata *edp);
+void mdss_edp_edid_read(struct mdss_edp_drv_pdata *edp, int block);
+int mdss_edp_link_train(struct mdss_edp_drv_pdata *edp);
+void edp_aux_i2c_handler(struct mdss_edp_drv_pdata *edp, u32 isr);
+void edp_aux_native_handler(struct mdss_edp_drv_pdata *edp, u32 isr);
+void mdss_edp_aux_init(struct mdss_edp_drv_pdata *ep);
+
+void mdss_edp_fill_link_cfg(struct mdss_edp_drv_pdata *ep);
+void mdss_edp_sink_power_down(struct mdss_edp_drv_pdata *ep);
+void mdss_edp_state_ctrl(struct mdss_edp_drv_pdata *ep, u32 state);
+int mdss_edp_sink_power_state(struct mdss_edp_drv_pdata *ep, char state);
+void mdss_edp_lane_power_ctrl(struct mdss_edp_drv_pdata *ep, int up);
+void mdss_edp_config_ctrl(struct mdss_edp_drv_pdata *ep);
+
+void mdss_edp_clk_debug(unsigned char *edp_base, unsigned char *mmss_cc_base);
+
+#endif /* MDSS_EDP_H */
diff --git a/drivers/video/fbdev/msm/mdss_edp_aux.c b/drivers/video/fbdev/msm/mdss_edp_aux.c
new file mode 100644
index 0000000..8ba715d
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_edp_aux.c
@@ -0,0 +1,1338 @@
+/* Copyright (c) 2013-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/bug.h>
+#include <linux/of_gpio.h>
+#include <linux/clk/msm-clk.h>
+
+#include "mdss_panel.h"
+#include "mdss_edp.h"
+
+/*
+ * edp buffer operation
+ */
+static char *edp_buf_init(struct edp_buf *eb, char *buf, int size)
+{
+ eb->start = buf;
+ eb->size = size;
+ eb->data = eb->start;
+ eb->end = eb->start + eb->size;
+ eb->len = 0;
+ eb->trans_num = 0;
+ eb->i2c = 0;
+ return eb->data;
+}
+
+static char *edp_buf_reset(struct edp_buf *eb)
+{
+ eb->data = eb->start;
+ eb->len = 0;
+ eb->trans_num = 0;
+ eb->i2c = 0;
+ return eb->data;
+}
+
+static char *edp_buf_push(struct edp_buf *eb, int len)
+{
+ eb->data += len;
+ eb->len += len;
+ return eb->data;
+}
+
+static int edp_buf_trailing(struct edp_buf *eb)
+{
+ return (int)(eb->end - eb->data);
+}
+
+/*
+ * edp aux edp_buf_add_cmd:
+ * NO native and i2c command mix allowed
+ */
+static int edp_buf_add_cmd(struct edp_buf *eb, struct edp_cmd *cmd)
+{
+ char data;
+ char *bp, *cp;
+ int i, len;
+
+ if (cmd->read) /* read */
+ len = 4;
+ else
+ len = cmd->len + 4;
+
+ if (edp_buf_trailing(eb) < len)
+ return 0;
+
+ /*
+ * cmd fifo only has depth of 144 bytes
+ * limit buf length to 128 bytes here
+ */
+ if ((eb->len + len) > 128)
+ return 0;
+
+ bp = eb->data;
+ data = cmd->addr >> 16;
+ data &= 0x0f; /* 4 addr bits */
+ if (cmd->read)
+ data |= BIT(4);
+ *bp++ = data;
+ *bp++ = cmd->addr >> 8;
+ *bp++ = cmd->addr;
+ *bp++ = cmd->len - 1;
+
+ if (!cmd->read) { /* write */
+ cp = cmd->datap;
+ for (i = 0; i < cmd->len; i++)
+ *bp++ = *cp++;
+ }
+ edp_buf_push(eb, len);
+
+ if (cmd->i2c)
+ eb->i2c++;
+
+ eb->trans_num++; /* Increase transaction number */
+
+ return cmd->len - 1;
+}
+
+static int edp_cmd_fifo_tx(struct edp_buf *tp, unsigned char *base)
+{
+ u32 data;
+ char *dp;
+ int len, cnt;
+
+ len = tp->len; /* total byte to cmd fifo */
+ if (len == 0)
+ return 0;
+
+ cnt = 0;
+ dp = tp->start;
+
+ while (cnt < len) {
+ data = *dp; /* data byte */
+ data <<= 8;
+ data &= 0x00ff00; /* index = 0, write */
+ if (cnt == 0)
+ data |= BIT(31); /* INDEX_WRITE */
+ pr_debug("%s: data=%x\n", __func__, data);
+ edp_write(base + EDP_AUX_DATA, data);
+ cnt++;
+ dp++;
+ }
+
+ data = (tp->trans_num - 1);
+ if (tp->i2c)
+ data |= BIT(8); /* I2C */
+
+ data |= BIT(9); /* GO */
+ pr_debug("%s: data=%x\n", __func__, data);
+ edp_write(base + EDP_AUX_TRANS_CTRL, data);
+
+ return tp->len;
+}
+
+static int edp_cmd_fifo_rx(struct edp_buf *rp, int len, unsigned char *base)
+{
+ u32 data;
+ char *dp;
+ int i;
+
+ data = 0; /* index = 0 */
+ data |= BIT(31); /* INDEX_WRITE */
+ data |= BIT(0); /* read */
+ edp_write(base + EDP_AUX_DATA, data);
+
+ dp = rp->data;
+
+ /* discard first byte */
+ data = edp_read(base + EDP_AUX_DATA);
+ for (i = 0; i < len; i++) {
+ data = edp_read(base + EDP_AUX_DATA);
+ pr_debug("%s: data=%x\n", __func__, data);
+ *dp++ = (char)((data >> 8) & 0xff);
+ }
+
+ rp->len = len;
+ return len;
+}
+
+static int edp_aux_write_cmds(struct mdss_edp_drv_pdata *ep,
+ struct edp_cmd *cmd)
+{
+ struct edp_cmd *cm;
+ struct edp_buf *tp;
+ int len, ret;
+
+ mutex_lock(&ep->aux_mutex);
+ ep->aux_cmd_busy = 1;
+
+ tp = &ep->txp;
+ edp_buf_reset(tp);
+
+ cm = cmd;
+ while (cm) {
+ pr_debug("%s: i2c=%d read=%d addr=%x len=%d next=%d\n",
+ __func__, cm->i2c, cm->read, cm->addr, cm->len,
+ cm->next);
+ ret = edp_buf_add_cmd(tp, cm);
+ if (ret <= 0)
+ break;
+ if (cm->next == 0)
+ break;
+ cm++;
+ }
+
+ if (tp->i2c)
+ ep->aux_cmd_i2c = 1;
+ else
+ ep->aux_cmd_i2c = 0;
+
+ reinit_completion(&ep->aux_comp);
+
+ len = edp_cmd_fifo_tx(&ep->txp, ep->base);
+
+ wait_for_completion(&ep->aux_comp);
+
+ if (ep->aux_error_num == EDP_AUX_ERR_NONE)
+ ret = len;
+ else
+ ret = ep->aux_error_num;
+
+ ep->aux_cmd_busy = 0;
+ mutex_unlock(&ep->aux_mutex);
+ return ret;
+}
+
+static int edp_aux_read_cmds(struct mdss_edp_drv_pdata *ep,
+ struct edp_cmd *cmds)
+{
+ struct edp_cmd *cm;
+ struct edp_buf *tp;
+ struct edp_buf *rp;
+ int len, ret;
+
+ mutex_lock(&ep->aux_mutex);
+ ep->aux_cmd_busy = 1;
+
+ tp = &ep->txp;
+ rp = &ep->rxp;
+ edp_buf_reset(tp);
+ edp_buf_reset(rp);
+
+ cm = cmds;
+ len = 0;
+ while (cm) {
+ pr_debug("%s: i2c=%d read=%d addr=%x len=%d next=%d\n",
+ __func__, cm->i2c, cm->read, cm->addr, cm->len,
+ cm->next);
+ ret = edp_buf_add_cmd(tp, cm);
+ len += cm->len;
+ if (ret <= 0)
+ break;
+ if (cm->next == 0)
+ break;
+ cm++;
+ }
+
+ if (tp->i2c)
+ ep->aux_cmd_i2c = 1;
+ else
+ ep->aux_cmd_i2c = 0;
+
+ reinit_completion(&ep->aux_comp);
+
+ edp_cmd_fifo_tx(tp, ep->base);
+
+ wait_for_completion(&ep->aux_comp);
+
+ if (ep->aux_error_num == EDP_AUX_ERR_NONE)
+ ret = edp_cmd_fifo_rx(rp, len, ep->base);
+ else
+ ret = ep->aux_error_num;
+
+ ep->aux_cmd_busy = 0;
+ mutex_unlock(&ep->aux_mutex);
+
+ return ret;
+}
+
+void edp_aux_native_handler(struct mdss_edp_drv_pdata *ep, u32 isr)
+{
+
+ pr_debug("%s: isr=%x\n", __func__, isr);
+
+ if (isr & EDP_INTR_AUX_I2C_DONE)
+ ep->aux_error_num = EDP_AUX_ERR_NONE;
+ else if (isr & EDP_INTR_WRONG_ADDR)
+ ep->aux_error_num = EDP_AUX_ERR_ADDR;
+ else if (isr & EDP_INTR_TIMEOUT)
+ ep->aux_error_num = EDP_AUX_ERR_TOUT;
+ if (isr & EDP_INTR_NACK_DEFER)
+ ep->aux_error_num = EDP_AUX_ERR_NACK;
+
+ complete(&ep->aux_comp);
+}
+
+void edp_aux_i2c_handler(struct mdss_edp_drv_pdata *ep, u32 isr)
+{
+
+ pr_debug("%s: isr=%x\n", __func__, isr);
+
+ if (isr & EDP_INTR_AUX_I2C_DONE) {
+ if (isr & (EDP_INTR_I2C_NACK | EDP_INTR_I2C_DEFER))
+ ep->aux_error_num = EDP_AUX_ERR_NACK;
+ else
+ ep->aux_error_num = EDP_AUX_ERR_NONE;
+ } else {
+ if (isr & EDP_INTR_WRONG_ADDR)
+ ep->aux_error_num = EDP_AUX_ERR_ADDR;
+ else if (isr & EDP_INTR_TIMEOUT)
+ ep->aux_error_num = EDP_AUX_ERR_TOUT;
+ if (isr & EDP_INTR_NACK_DEFER)
+ ep->aux_error_num = EDP_AUX_ERR_NACK;
+ if (isr & EDP_INTR_I2C_NACK)
+ ep->aux_error_num = EDP_AUX_ERR_NACK;
+ if (isr & EDP_INTR_I2C_DEFER)
+ ep->aux_error_num = EDP_AUX_ERR_NACK;
+ }
+
+ complete(&ep->aux_comp);
+}
+
+static int edp_aux_write_buf(struct mdss_edp_drv_pdata *ep, u32 addr,
+ char *buf, int len, int i2c)
+{
+ struct edp_cmd cmd;
+
+ cmd.read = 0;
+ cmd.i2c = i2c;
+ cmd.addr = addr;
+ cmd.datap = buf;
+ cmd.len = len & 0x0ff;
+ cmd.next = 0;
+
+ return edp_aux_write_cmds(ep, &cmd);
+}
+
+static int edp_aux_read_buf(struct mdss_edp_drv_pdata *ep, u32 addr,
+ int len, int i2c)
+{
+ struct edp_cmd cmd;
+
+ cmd.read = 1;
+ cmd.i2c = i2c;
+ cmd.addr = addr;
+ cmd.datap = NULL;
+ cmd.len = len & 0x0ff;
+ cmd.next = 0;
+
+ return edp_aux_read_cmds(ep, &cmd);
+}
+
+/*
+ * edid standard header bytes
+ */
+static char edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+
+int edp_edid_buf_error(char *buf, int len)
+{
+ char *bp;
+ int i;
+ char csum = 0;
+
+ bp = buf;
+ if (len < 128) {
+ pr_err("%s: Error: len=%x\n", __func__, len);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 128; i++)
+ csum += *bp++;
+
+ if (csum != 0) {
+ pr_err("%s: Error: csum=%x\n", __func__, csum);
+ return -EINVAL;
+ }
+
+ if (strcmp(buf, edid_hdr)) {
+ pr_err("%s: Error: header\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+void edp_extract_edid_manufacturer(struct edp_edid *edid, char *buf)
+{
+ char *bp;
+ char data;
+
+ bp = &buf[8];
+ data = *bp & 0x7f;
+ data >>= 2;
+ edid->id_name[0] = 'A' + data - 1;
+ data = *bp & 0x03;
+ data <<= 3;
+ bp++;
+ data |= (*bp >> 5);
+ edid->id_name[1] = 'A' + data - 1;
+ data = *bp & 0x1f;
+ edid->id_name[2] = 'A' + data - 1;
+ edid->id_name[3] = 0;
+
+ pr_debug("%s: edid manufacturer = %s\n", __func__, edid->id_name);
+}
+
+void edp_extract_edid_product(struct edp_edid *edid, char *buf)
+{
+ char *bp;
+ u32 data;
+
+ bp = &buf[0x0a];
+ data = *bp;
+ edid->id_product = *bp++;
+ edid->id_product &= 0x0ff;
+ data = *bp & 0x0ff;
+ data <<= 8;
+ edid->id_product |= data;
+
+ pr_debug("%s: edid product = 0x%x\n", __func__, edid->id_product);
+};
+
+void edp_extract_edid_version(struct edp_edid *edid, char *buf)
+{
+ edid->version = buf[0x12];
+ edid->revision = buf[0x13];
+ pr_debug("%s: edid version = %d.%d\n", __func__, edid->version,
+ edid->revision);
+};
+
+void edp_extract_edid_ext_block_cnt(struct edp_edid *edid, char *buf)
+{
+ edid->ext_block_cnt = buf[0x7e];
+ pr_debug("%s: edid extension = %d\n", __func__,
+ edid->ext_block_cnt);
+};
+
+void edp_extract_edid_video_support(struct edp_edid *edid, char *buf)
+{
+ char *bp;
+
+ bp = &buf[0x14];
+ if (*bp & 0x80) {
+ edid->video_intf = *bp & 0x0f;
+ /* 6, 8, 10, 12, 14 and 16 bit per component */
+ edid->color_depth = ((*bp & 0x70) >> 4); /* color bit depth */
+ if (edid->color_depth) {
+ edid->color_depth *= 2;
+ edid->color_depth += 4;
+ }
+ pr_debug("%s: Digital Video intf=%d color_depth=%d\n",
+ __func__, edid->video_intf, edid->color_depth);
+ } else {
+ pr_err("%s: Error, Analog video interface\n", __func__);
+ }
+};
+
+void edp_extract_edid_feature(struct edp_edid *edid, char *buf)
+{
+ char *bp;
+ char data;
+
+ bp = &buf[0x18];
+ data = *bp;
+ data &= 0xe0;
+ data >>= 5;
+ if (data == 0x01)
+ edid->dpm = 1; /* display power management */
+
+ if (edid->video_intf) {
+ if (*bp & 0x80) {
+ /* RGB 4:4:4, YcrCb 4:4:4 and YCrCb 4:2:2 */
+ edid->color_format = *bp & 0x18;
+ edid->color_format >>= 3;
+ }
+ }
+
+ pr_debug("%s: edid dpm=%d color_format=%d\n", __func__,
+ edid->dpm, edid->color_format);
+};
+
+void edp_extract_edid_detailed_timing_description(struct edp_edid *edid,
+ char *buf)
+{
+ char *bp;
+ u32 data;
+ struct display_timing_desc *dp;
+
+ dp = &edid->timing[0];
+
+ bp = &buf[0x36];
+ dp->pclk = 0;
+ dp->pclk = *bp++; /* byte 0x36 */
+ dp->pclk |= (*bp++ << 8); /* byte 0x37 */
+
+ dp->h_addressable = *bp++; /* byte 0x38 */
+
+ if (dp->pclk == 0 && dp->h_addressable == 0)
+ return; /* Not detailed timing definition */
+
+ dp->pclk *= 10000;
+
+ dp->h_blank = *bp++;/* byte 0x39 */
+ data = *bp & 0xf0; /* byte 0x3A */
+ data <<= 4;
+ dp->h_addressable |= data;
+
+ data = *bp++ & 0x0f;
+ data <<= 8;
+ dp->h_blank |= data;
+
+ dp->v_addressable = *bp++; /* byte 0x3B */
+ dp->v_blank = *bp++; /* byte 0x3C */
+ data = *bp & 0xf0; /* byte 0x3D */
+ data <<= 4;
+ dp->v_addressable |= data;
+
+ data = *bp++ & 0x0f;
+ data <<= 8;
+ dp->v_blank |= data;
+
+ dp->h_fporch = *bp++; /* byte 0x3E */
+ dp->h_sync_pulse = *bp++; /* byte 0x3F */
+
+ dp->v_fporch = *bp & 0x0f0; /* byte 0x40 */
+ dp->v_fporch >>= 4;
+ dp->v_sync_pulse = *bp & 0x0f;
+
+ bp++;
+ data = *bp & 0xc0; /* byte 0x41 */
+ data <<= 2;
+ dp->h_fporch |= data;
+
+ data = *bp & 0x30;
+ data <<= 4;
+ dp->h_sync_pulse |= data;
+
+ data = *bp & 0x0c;
+ data <<= 2;
+ dp->v_fporch |= data;
+
+ data = *bp & 0x03;
+ data <<= 4;
+ dp->v_sync_pulse |= data;
+
+ bp++;
+ dp->width_mm = *bp++; /* byte 0x42 */
+ dp->height_mm = *bp++; /* byte 0x43 */
+ data = *bp & 0x0f0; /* byte 0x44 */
+ data <<= 4;
+ dp->width_mm |= data;
+ data = *bp & 0x0f;
+ data <<= 8;
+ dp->height_mm |= data;
+
+ bp++;
+ dp->h_border = *bp++; /* byte 0x45 */
+ dp->v_border = *bp++; /* byte 0x46 */
+
+ /* progressive or interlaved */
+ dp->interlaced = *bp & 0x80; /* byte 0x47 */
+
+ dp->stereo = *bp & 0x60;
+ dp->stereo >>= 5;
+
+ data = *bp & 0x1e; /* bit 4,3,2 1*/
+ data >>= 1;
+ dp->sync_type = data & 0x08;
+ dp->sync_type >>= 3; /* analog or digital */
+ if (dp->sync_type) {
+ dp->sync_separate = data & 0x04;
+ dp->sync_separate >>= 2;
+ if (dp->sync_separate) {
+ if (data & 0x02)
+ dp->vsync_pol = 1; /* positive */
+ else
+ dp->vsync_pol = 0;/* negative */
+
+ if (data & 0x01)
+ dp->hsync_pol = 1; /* positive */
+ else
+ dp->hsync_pol = 0; /* negative */
+ }
+ }
+
+ pr_debug("%s: pixel_clock = %d\n", __func__, dp->pclk);
+
+ pr_debug("%s: horizontal=%d, blank=%d, porch=%d, sync=%d\n"
+ , __func__, dp->h_addressable, dp->h_blank,
+ dp->h_fporch, dp->h_sync_pulse);
+ pr_debug("%s: vertical=%d, blank=%d, porch=%d, vsync=%d\n"
+ , __func__, dp->v_addressable, dp->v_blank,
+ dp->v_fporch, dp->v_sync_pulse);
+ pr_debug("%s: panel size in mm, width=%d height=%d\n", __func__,
+ dp->width_mm, dp->height_mm);
+ pr_debug("%s: panel border horizontal=%d vertical=%d\n", __func__,
+ dp->h_border, dp->v_border);
+ pr_debug("%s: flags: interlaced=%d stereo=%d sync_type=%d sync_sep=%d\n"
+ , __func__, dp->interlaced, dp->stereo,
+ dp->sync_type, dp->sync_separate);
+ pr_debug("%s: polarity vsync=%d, hsync=%d", __func__,
+ dp->vsync_pol, dp->hsync_pol);
+}
+
+
+/*
+ * EDID structure can be found in VESA standard here:
+ * http://read.pudn.com/downloads110/ebook/456020/E-EDID%20Standard.pdf
+ *
+ * following table contains default edid
+ * static char edid_raw_data[128] = {
+ * 0, 255, 255, 255, 255, 255, 255, 0,
+ * 6, 175, 93, 48, 0, 0, 0, 0, 0, 22,
+ * 1, 4,
+ * 149, 26, 14, 120, 2,
+ * 164, 21,158, 85, 78, 155, 38, 15, 80, 84,
+ * 0, 0, 0,
+ * 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ * 29, 54, 128, 160, 112, 56, 30, 64, 48, 32, 142, 0, 0, 144, 16,0,0,24,
+ * 19, 36, 128, 160, 112, 56, 30, 64, 48, 32, 142, 0, 0, 144, 16,0,0,24,
+ * 0, 0, 0, 254, 0, 65, 85, 79, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+ * 0, 0, 0, 254, 0, 66, 49, 49, 54, 72, 65, 78, 48, 51, 46, 48, 32, 10,
+ * 0, 75 };
+ */
+
+static int edp_aux_chan_ready(struct mdss_edp_drv_pdata *ep)
+{
+ int cnt, ret;
+ char data = 0;
+
+ for (cnt = 5; cnt; cnt--) {
+ ret = edp_aux_write_buf(ep, 0x50, &data, 1, 1);
+ pr_debug("%s: ret=%d\n", __func__, ret);
+ if (ret >= 0)
+ break;
+ msleep(100);
+ }
+
+ if (cnt <= 0) {
+ pr_err("%s: aux chan NOT ready\n", __func__);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int edp_sink_edid_read(struct mdss_edp_drv_pdata *ep, int block)
+{
+ struct edp_buf *rp;
+ int cnt, rlen;
+ int ret = 0;
+
+ ret = edp_aux_chan_ready(ep);
+ if (ret == 0) {
+ pr_err("%s: aux chan NOT ready\n", __func__);
+ return ret;
+ }
+
+ for (cnt = 5; cnt; cnt--) {
+ rlen = edp_aux_read_buf(ep, 0x50, 128, 1);
+ if (rlen > 0) {
+ pr_debug("%s: rlen=%d\n", __func__, rlen);
+
+ rp = &ep->rxp;
+ if (!edp_edid_buf_error(rp->data, rp->len))
+ break;
+ }
+ }
+
+ if (cnt <= 0) {
+ pr_err("%s: Failed\n", __func__);
+ return -EINVAL;
+ }
+
+ edp_extract_edid_manufacturer(&ep->edid, rp->data);
+ edp_extract_edid_product(&ep->edid, rp->data);
+ edp_extract_edid_version(&ep->edid, rp->data);
+ edp_extract_edid_ext_block_cnt(&ep->edid, rp->data);
+ edp_extract_edid_video_support(&ep->edid, rp->data);
+ edp_extract_edid_feature(&ep->edid, rp->data);
+ edp_extract_edid_detailed_timing_description(&ep->edid, rp->data);
+
+ return 128;
+}
+
+static void edp_sink_capability_read(struct mdss_edp_drv_pdata *ep,
+ int len)
+{
+ char *bp;
+ char data;
+ struct dpcd_cap *cap;
+ struct edp_buf *rp;
+ int rlen;
+
+ rlen = edp_aux_read_buf(ep, 0, len, 0);
+ if (rlen <= 0) {
+ pr_err("%s: edp aux read failed\n", __func__);
+ return;
+ }
+ rp = &ep->rxp;
+ cap = &ep->dpcd;
+ bp = rp->data;
+
+ data = *bp++; /* byte 0 */
+ cap->major = (data >> 4) & 0x0f;
+ cap->minor = data & 0x0f;
+ if (--rlen <= 0)
+ return;
+ pr_debug("%s: version: %d.%d\n", __func__, cap->major, cap->minor);
+
+ data = *bp++; /* byte 1 */
+ /* 162, 270 and 540 MB, symbol rate, NOT bit rate */
+ cap->max_link_rate = data;
+ if (--rlen <= 0)
+ return;
+ pr_debug("%s: link_rate=%d\n", __func__, cap->max_link_rate);
+
+ data = *bp++; /* byte 2 */
+ if (data & BIT(7))
+ cap->enhanced_frame++;
+
+ if (data & 0x40)
+ cap->flags |= DPCD_TPS3;
+ data &= 0x0f;
+ cap->max_lane_count = data;
+ if (--rlen <= 0)
+ return;
+ pr_debug("%s: lane_count=%d\n", __func__, cap->max_lane_count);
+
+ data = *bp++; /* byte 3 */
+ if (data & BIT(0)) {
+ cap->flags |= DPCD_MAX_DOWNSPREAD_0_5;
+ pr_debug("%s: max_downspread\n", __func__);
+ }
+
+ if (data & BIT(6)) {
+ cap->flags |= DPCD_NO_AUX_HANDSHAKE;
+ pr_debug("%s: NO Link Training\n", __func__);
+ }
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 4 */
+ cap->num_rx_port = (data & BIT(0)) + 1;
+ pr_debug("%s: rx_ports=%d", __func__, cap->num_rx_port);
+ if (--rlen <= 0)
+ return;
+
+ bp += 3; /* skip 5, 6 and 7 */
+ rlen -= 3;
+ if (rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 8 */
+ if (data & BIT(1)) {
+ cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
+ pr_debug("%s: edid presented\n", __func__);
+ }
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 9 */
+ cap->rx_port0_buf_size = (data + 1) * 32;
+ pr_debug("%s: lane_buf_size=%d", __func__, cap->rx_port0_buf_size);
+ if (--rlen <= 0)
+ return;
+
+ bp += 2; /* skip 10, 11 port1 capability */
+ rlen -= 2;
+ if (rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 12 */
+ cap->i2c_speed_ctrl = data;
+ if (cap->i2c_speed_ctrl > 0)
+ pr_debug("%s: i2c_rate=%d", __func__, cap->i2c_speed_ctrl);
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 13 */
+ cap->scrambler_reset = data & BIT(0);
+ pr_debug("%s: scrambler_reset=%d\n", __func__,
+ cap->scrambler_reset);
+
+ if (data & BIT(1))
+ cap->enhanced_frame++;
+
+ pr_debug("%s: enhanced_framing=%d\n", __func__,
+ cap->enhanced_frame);
+ if (--rlen <= 0)
+ return;
+
+ data = *bp++; /* byte 14 */
+ if (data == 0)
+ cap->training_read_interval = 4000; /* us */
+ else
+ cap->training_read_interval = 4000 * data; /* us */
+ pr_debug("%s: training_interval=%d\n", __func__,
+ cap->training_read_interval);
+}
+
+static int edp_link_status_read(struct mdss_edp_drv_pdata *ep, int len)
+{
+ char *bp;
+ char data;
+ struct dpcd_link_status *sp;
+ struct edp_buf *rp;
+ int rlen;
+
+ pr_debug("%s: len=%d", __func__, len);
+ /* skip byte 0x200 and 0x201 */
+ rlen = edp_aux_read_buf(ep, 0x202, len, 0);
+ if (rlen < len) {
+ pr_err("%s: edp aux read failed\n", __func__);
+ return 0;
+ }
+ rp = &ep->rxp;
+ bp = rp->data;
+ sp = &ep->link_status;
+
+ data = *bp++; /* byte 0x202 */
+ sp->lane_01_status = data; /* lane 0, 1 */
+
+ data = *bp++; /* byte 0x203 */
+ sp->lane_23_status = data; /* lane 2, 3 */
+
+ data = *bp++; /* byte 0x204 */
+ sp->interlane_align_done = (data & BIT(0));
+ sp->downstream_port_status_changed = (data & BIT(6));
+ sp->link_status_updated = (data & BIT(7));
+
+ data = *bp++; /* byte 0x205 */
+ sp->port_0_in_sync = (data & BIT(0));
+ sp->port_1_in_sync = (data & BIT(1));
+
+ data = *bp++; /* byte 0x206 */
+ sp->req_voltage_swing[0] = data & 0x03;
+ data >>= 2;
+ sp->req_pre_emphasis[0] = data & 0x03;
+ data >>= 2;
+ sp->req_voltage_swing[1] = data & 0x03;
+ data >>= 2;
+ sp->req_pre_emphasis[1] = data & 0x03;
+
+ data = *bp++; /* byte 0x207 */
+ sp->req_voltage_swing[2] = data & 0x03;
+ data >>= 2;
+ sp->req_pre_emphasis[2] = data & 0x03;
+ data >>= 2;
+ sp->req_voltage_swing[3] = data & 0x03;
+ data >>= 2;
+ sp->req_pre_emphasis[3] = data & 0x03;
+
+ return len;
+}
+
+static int edp_cap_lane_rate_set(struct mdss_edp_drv_pdata *ep)
+{
+ char buf[4];
+ int len = 0;
+ struct dpcd_cap *cap;
+
+ cap = &ep->dpcd;
+
+ pr_debug("%s: bw=%x lane=%d\n", __func__, ep->link_rate, ep->lane_cnt);
+ buf[0] = ep->link_rate;
+ buf[1] = ep->lane_cnt;
+ if (cap->enhanced_frame)
+ buf[1] |= 0x80;
+ len = edp_aux_write_buf(ep, 0x100, buf, 2, 0);
+
+ return len;
+}
+
+static int edp_lane_set_write(struct mdss_edp_drv_pdata *ep, int voltage_level,
+ int pre_emphasis_level)
+{
+ int i;
+ char buf[4];
+
+ if (voltage_level >= DPCD_LINK_VOLTAGE_MAX)
+ voltage_level |= 0x04;
+
+ if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX)
+ pre_emphasis_level |= 0x04;
+
+ pre_emphasis_level <<= 3;
+
+ for (i = 0; i < 4; i++)
+ buf[i] = voltage_level | pre_emphasis_level;
+
+ pr_debug("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level);
+ return edp_aux_write_buf(ep, 0x103, buf, 4, 0);
+}
+
+static int edp_train_pattern_set_write(struct mdss_edp_drv_pdata *ep,
+ int pattern)
+{
+ char buf[4];
+
+ pr_debug("%s: pattern=%x\n", __func__, pattern);
+ buf[0] = pattern;
+ return edp_aux_write_buf(ep, 0x102, buf, 1, 0);
+}
+
+static int edp_sink_clock_recovery_done(struct mdss_edp_drv_pdata *ep)
+{
+ u32 mask;
+ u32 data;
+
+ if (ep->lane_cnt == 1) {
+ mask = 0x01; /* lane 0 */
+ data = ep->link_status.lane_01_status;
+ } else if (ep->lane_cnt == 2) {
+ mask = 0x011; /*B lane 0, 1 */
+ data = ep->link_status.lane_01_status;
+ } else {
+ mask = 0x01111; /*B lane 0, 1 */
+ data = ep->link_status.lane_23_status;
+ data <<= 8;
+ data |= ep->link_status.lane_01_status;
+ }
+
+ pr_debug("%s: data=%x mask=%x\n", __func__, data, mask);
+ data &= mask;
+ if (data == mask) /* all done */
+ return 1;
+
+ return 0;
+}
+
+static int edp_sink_channel_eq_done(struct mdss_edp_drv_pdata *ep)
+{
+ u32 mask;
+ u32 data;
+
+ pr_debug("%s:\n", __func__);
+
+ if (!ep->link_status.interlane_align_done) { /* not align */
+ pr_err("%s: interlane align failed\n", __func__);
+ return 0;
+ }
+
+ if (ep->lane_cnt == 1) {
+ mask = 0x7;
+ data = ep->link_status.lane_01_status;
+ } else if (ep->lane_cnt == 2) {
+ mask = 0x77;
+ data = ep->link_status.lane_01_status;
+ } else {
+ mask = 0x7777;
+ data = ep->link_status.lane_23_status;
+ data <<= 8;
+ data |= ep->link_status.lane_01_status;
+ }
+
+ pr_debug("%s: data=%x mask=%x\n", __func__, data, mask);
+
+ data &= mask;
+ if (data == mask)/* all done */
+ return 1;
+
+ return 0;
+}
+
+void edp_sink_train_set_adjust(struct mdss_edp_drv_pdata *ep)
+{
+ int i;
+ int max = 0;
+
+
+ /* use the max level across lanes */
+ for (i = 0; i < ep->lane_cnt; i++) {
+ pr_debug("%s: lane=%d req_voltage_swing=%d",
+ __func__, i, ep->link_status.req_voltage_swing[i]);
+ if (max < ep->link_status.req_voltage_swing[i])
+ max = ep->link_status.req_voltage_swing[i];
+ }
+
+ ep->v_level = max;
+
+ /* use the max level across lanes */
+ max = 0;
+ for (i = 0; i < ep->lane_cnt; i++) {
+ pr_debug(" %s: lane=%d req_pre_emphasis=%d",
+ __func__, i, ep->link_status.req_pre_emphasis[i]);
+ if (max < ep->link_status.req_pre_emphasis[i])
+ max = ep->link_status.req_pre_emphasis[i];
+ }
+
+ ep->p_level = max;
+ pr_debug("%s: v_level=%d, p_level=%d", __func__,
+ ep->v_level, ep->p_level);
+}
+
+static void edp_host_train_set(struct mdss_edp_drv_pdata *ep, int train)
+{
+ int bit, cnt;
+ u32 data;
+
+
+ bit = 1;
+ bit <<= (train - 1);
+ pr_debug("%s: bit=%d train=%d\n", __func__, bit, train);
+ edp_write(ep->base + EDP_STATE_CTRL, bit);
+
+ bit = 8;
+ bit <<= (train - 1);
+ cnt = 10;
+ while (cnt--) {
+ data = edp_read(ep->base + EDP_MAINLINK_READY);
+ if (data & bit)
+ break;
+ }
+
+ if (cnt == 0)
+ pr_err("%s: set link_train=%d failed\n", __func__, train);
+}
+
+char vm_pre_emphasis[4][4] = {
+ {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
+ {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
+ {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
+ {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
+};
+
+/* voltage swing, 0.2v and 1.0v are not support */
+char vm_voltage_swing[4][4] = {
+ {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
+ {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
+ {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
+ {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
+};
+
+static void edp_voltage_pre_emphasise_set(struct mdss_edp_drv_pdata *ep)
+{
+ u32 value0 = 0;
+ u32 value1 = 0;
+
+ pr_debug("%s: v=%d p=%d\n", __func__, ep->v_level, ep->p_level);
+
+ value0 = vm_pre_emphasis[(int)(ep->v_level)][(int)(ep->p_level)];
+ value1 = vm_voltage_swing[(int)(ep->v_level)][(int)(ep->p_level)];
+
+ /* Configure host and panel only if both values are allowed */
+ if (value0 != 0xFF && value1 != 0xFF) {
+ edp_write(ep->base + EDP_PHY_EDPPHY_GLB_VM_CFG0, value0);
+ edp_write(ep->base + EDP_PHY_EDPPHY_GLB_VM_CFG1, value1);
+ pr_debug("%s: value0=0x%x value1=0x%x", __func__,
+ value0, value1);
+ edp_lane_set_write(ep, ep->v_level, ep->p_level);
+ }
+
+}
+
+static int edp_start_link_train_1(struct mdss_edp_drv_pdata *ep)
+{
+ int tries, old_v_level;
+ int ret = 0;
+ int usleep_time;
+
+ pr_debug("%s:", __func__);
+
+ edp_host_train_set(ep, 0x01); /* train_1 */
+ edp_voltage_pre_emphasise_set(ep);
+ edp_train_pattern_set_write(ep, 0x21); /* train_1 */
+
+ tries = 0;
+ old_v_level = ep->v_level;
+ while (1) {
+ usleep_time = ep->dpcd.training_read_interval;
+ usleep_range(usleep_time, usleep_time + 10);
+
+ edp_link_status_read(ep, 6);
+ if (edp_sink_clock_recovery_done(ep)) {
+ ret = 0;
+ break;
+ }
+
+ if (ep->v_level == DPCD_LINK_VOLTAGE_MAX) {
+ ret = -1;
+ break; /* quit */
+ }
+
+ if (old_v_level == ep->v_level) {
+ tries++;
+ if (tries >= 5) {
+ ret = -1;
+ break; /* quit */
+ }
+ } else {
+ tries = 0;
+ old_v_level = ep->v_level;
+ }
+
+ edp_sink_train_set_adjust(ep);
+ edp_voltage_pre_emphasise_set(ep);
+ }
+
+ return ret;
+}
+
+static int edp_start_link_train_2(struct mdss_edp_drv_pdata *ep)
+{
+ int tries;
+ int ret = 0;
+ int usleep_time;
+ char pattern;
+
+ pr_debug("%s:", __func__);
+
+ if (ep->dpcd.flags & DPCD_TPS3)
+ pattern = 0x03;
+ else
+ pattern = 0x02;
+
+ edp_host_train_set(ep, pattern); /* train_2 */
+ edp_voltage_pre_emphasise_set(ep);
+ edp_train_pattern_set_write(ep, pattern | 0x20);/* train_2 */
+
+ tries = 0;
+ while (1) {
+ usleep_time = ep->dpcd.training_read_interval;
+ usleep_range(usleep_time, usleep_time + 10);
+
+ edp_link_status_read(ep, 6);
+
+ if (edp_sink_channel_eq_done(ep)) {
+ ret = 0;
+ break;
+ }
+
+ tries++;
+ if (tries > 5) {
+ ret = -1;
+ break;
+ }
+
+ edp_sink_train_set_adjust(ep);
+ edp_voltage_pre_emphasise_set(ep);
+ }
+
+ return ret;
+}
+
+static int edp_link_rate_down_shift(struct mdss_edp_drv_pdata *ep)
+{
+ u32 prate, lrate;
+ int rate, lane, max_lane;
+ int changed = 0;
+
+ rate = ep->link_rate;
+ lane = ep->lane_cnt;
+ max_lane = ep->dpcd.max_lane_count;
+
+ prate = ep->pixel_rate;
+ prate /= 1000; /* avoid using 64 biits */
+ prate *= ep->bpp;
+ prate /= 8; /* byte */
+
+ if (rate > EDP_LINK_RATE_162 && rate <= EDP_LINK_RATE_MAX) {
+ rate -= 4; /* reduce rate */
+ changed++;
+ }
+
+ if (changed) {
+ if (lane >= 1 && lane < max_lane)
+ lane <<= 1; /* increase lane */
+
+ lrate = 270000000; /* 270M */
+ lrate /= 1000; /* avoid using 64 bits */
+ lrate *= rate;
+ lrate /= 10; /* byte, 10 bits --> 8 bits */
+ lrate *= lane;
+
+ pr_debug("%s: new lrate=%u prate=%u rate=%d lane=%d p=%d b=%d\n",
+ __func__, lrate, prate, rate, lane, ep->pixel_rate, ep->bpp);
+
+ if (lrate > prate) {
+ ep->link_rate = rate;
+ ep->lane_cnt = lane;
+ pr_debug("%s: new rate=%d %d\n", __func__, rate, lane);
+ return 0;
+ }
+ }
+
+ /* add calculation later */
+ return -EINVAL;
+}
+
+static void edp_clear_training_pattern(struct mdss_edp_drv_pdata *ep)
+{
+ int usleep_time;
+
+ pr_debug("%s:\n", __func__);
+ edp_train_pattern_set_write(ep, 0);
+ usleep_time = ep->dpcd.training_read_interval;
+ usleep_range(usleep_time, usleep_time + 10);
+}
+
+static int edp_aux_link_train(struct mdss_edp_drv_pdata *ep)
+{
+ int ret = 0;
+ int usleep_time;
+
+ ret = edp_aux_chan_ready(ep);
+ if (ret == 0) {
+ pr_err("%s: LINK Train failed: aux chan NOT ready\n", __func__);
+ complete(&ep->train_comp);
+ return ret;
+ }
+
+ edp_write(ep->base + EDP_MAINLINK_CTRL, 0x1);
+
+ mdss_edp_sink_power_state(ep, SINK_POWER_ON);
+
+train_start:
+ ep->v_level = 0; /* start from default level */
+ ep->p_level = 0;
+ edp_cap_lane_rate_set(ep);
+ mdss_edp_config_ctrl(ep);
+ mdss_edp_lane_power_ctrl(ep, 1);
+
+ mdss_edp_state_ctrl(ep, 0);
+ edp_clear_training_pattern(ep);
+ usleep_time = ep->dpcd.training_read_interval;
+ usleep_range(usleep_time, usleep_time + 10);
+
+ ret = edp_start_link_train_1(ep);
+ if (ret < 0) {
+ if (edp_link_rate_down_shift(ep) == 0) {
+ goto train_start;
+ } else {
+ pr_err("%s: Training 1 failed\n", __func__);
+ ret = -1;
+ goto clear;
+ }
+ }
+
+ pr_debug("%s: Training 1 completed successfully\n", __func__);
+
+ mdss_edp_state_ctrl(ep, 0);
+ edp_clear_training_pattern(ep);
+ ret = edp_start_link_train_2(ep);
+ if (ret < 0) {
+ if (edp_link_rate_down_shift(ep) == 0) {
+ goto train_start;
+ } else {
+ pr_err("%s: Training 2 failed\n", __func__);
+ ret = -1;
+ goto clear;
+ }
+ }
+
+ pr_debug("%s: Training 2 completed successfully\n", __func__);
+
+ mdss_edp_state_ctrl(ep, ST_SEND_VIDEO);
+clear:
+ edp_clear_training_pattern(ep);
+
+ complete(&ep->train_comp);
+ return ret;
+}
+
+void mdss_edp_dpcd_cap_read(struct mdss_edp_drv_pdata *ep)
+{
+ edp_sink_capability_read(ep, 16);
+}
+
+int mdss_edp_dpcd_status_read(struct mdss_edp_drv_pdata *ep)
+{
+ struct dpcd_link_status *sp;
+ int ret = 0; /* not sync */
+
+ ret = edp_link_status_read(ep, 6);
+
+ if (ret) {
+ sp = &ep->link_status;
+ ret = sp->port_0_in_sync; /* 1 == sync */
+ }
+
+ return ret;
+}
+
+void mdss_edp_fill_link_cfg(struct mdss_edp_drv_pdata *ep)
+{
+ struct display_timing_desc *dp;
+
+ dp = &ep->edid.timing[0];
+ ep->pixel_rate = dp->pclk;
+ ep->lane_cnt = ep->dpcd.max_lane_count;
+ ep->link_rate = ep->dpcd.max_link_rate;
+
+ pr_debug("%s: pclk=%d rate=%d lane=%d\n", __func__,
+ ep->pixel_rate, ep->link_rate, ep->lane_cnt);
+
+}
+
+void mdss_edp_edid_read(struct mdss_edp_drv_pdata *ep, int block)
+{
+ edp_sink_edid_read(ep, block);
+}
+
+int mdss_edp_sink_power_state(struct mdss_edp_drv_pdata *ep, char state)
+{
+ int ret;
+
+ ret = edp_aux_write_buf(ep, 0x600, &state, 1, 0);
+ pr_debug("%s: state=%d ret=%d\n", __func__, state, ret);
+ return ret;
+}
+
+int mdss_edp_link_train(struct mdss_edp_drv_pdata *ep)
+{
+ int ret;
+
+ mutex_lock(&ep->train_mutex);
+ ret = edp_aux_link_train(ep);
+ mutex_unlock(&ep->train_mutex);
+ return ret;
+}
+
+void mdss_edp_aux_init(struct mdss_edp_drv_pdata *ep)
+{
+ mutex_init(&ep->aux_mutex);
+ mutex_init(&ep->train_mutex);
+ init_completion(&ep->aux_comp);
+ init_completion(&ep->train_comp);
+ init_completion(&ep->idle_comp);
+ init_completion(&ep->video_comp);
+ complete(&ep->train_comp); /* make non block at first time */
+ complete(&ep->video_comp); /* make non block at first time */
+
+ edp_buf_init(&ep->txp, ep->txbuf, sizeof(ep->txbuf));
+ edp_buf_init(&ep->rxp, ep->rxbuf, sizeof(ep->rxbuf));
+}
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
new file mode 100644
index 0000000..65b689f
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -0,0 +1,5119 @@
+/*
+ * Core MDSS framebuffer driver.
+ *
+ * Copyright (C) 2007 Google Incorporated
+ * Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/videodev2.h>
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/msm_mdp.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/proc_fs.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <linux/file.h>
+#include <linux/kthread.h>
+#include <linux/dma-buf.h>
+#include "mdss_fb.h"
+#include "mdss_mdp_splash_logo.h"
+#define CREATE_TRACE_POINTS
+#include "mdss_debug.h"
+#include "mdss_smmu.h"
+#include "mdss_mdp.h"
+#include "mdp3_ctrl.h"
+#include "mdss_sync.h"
+
+#ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
+#define MDSS_FB_NUM 3
+#else
+#define MDSS_FB_NUM 2
+#endif
+
+#ifndef EXPORT_COMPAT
+#define EXPORT_COMPAT(x)
+#endif
+
+#define MAX_FBI_LIST 32
+
+#ifndef TARGET_HW_MDSS_MDP3
+#define BLANK_FLAG_LP FB_BLANK_NORMAL
+#define BLANK_FLAG_ULP FB_BLANK_VSYNC_SUSPEND
+#else
+#define BLANK_FLAG_LP FB_BLANK_VSYNC_SUSPEND
+#define BLANK_FLAG_ULP FB_BLANK_NORMAL
+#endif
+
+/*
+ * Time period for fps calulation in micro seconds.
+ * Default value is set to 1 sec.
+ */
+#define MDP_TIME_PERIOD_CALC_FPS_US 1000000
+
+static struct fb_info *fbi_list[MAX_FBI_LIST];
+static int fbi_list_index;
+
+static u32 mdss_fb_pseudo_palette[16] = {
+ 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
+};
+
+static struct msm_mdp_interface *mdp_instance;
+
+static int mdss_fb_register(struct msm_fb_data_type *mfd);
+static int mdss_fb_open(struct fb_info *info, int user);
+static int mdss_fb_release(struct fb_info *info, int user);
+static int mdss_fb_release_all(struct fb_info *info, bool release_all);
+static int mdss_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info);
+static int mdss_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info);
+static int mdss_fb_set_par(struct fb_info *info);
+static int mdss_fb_blank_sub(int blank_mode, struct fb_info *info,
+ int op_enable);
+static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd);
+static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file);
+static int mdss_fb_fbmem_ion_mmap(struct fb_info *info,
+ struct vm_area_struct *vma);
+static int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd,
+ size_t size);
+static void mdss_fb_release_fences(struct msm_fb_data_type *mfd);
+static int __mdss_fb_sync_buf_done_callback(struct notifier_block *p,
+ unsigned long val, void *data);
+
+static int __mdss_fb_display_thread(void *data);
+static int mdss_fb_pan_idle(struct msm_fb_data_type *mfd);
+static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
+ int event, void *arg);
+static void mdss_fb_set_mdp_sync_pt_threshold(struct msm_fb_data_type *mfd,
+ int type);
+void mdss_fb_no_update_notify_timer_cb(unsigned long data)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
+
+ if (!mfd) {
+ pr_err("%s mfd NULL\n", __func__);
+ return;
+ }
+ mfd->no_update.value = NOTIFY_TYPE_NO_UPDATE;
+ complete(&mfd->no_update.comp);
+}
+
+void mdss_fb_bl_update_notify(struct msm_fb_data_type *mfd,
+ uint32_t notification_type)
+{
+#ifndef TARGET_HW_MDSS_MDP3
+ struct mdss_overlay_private *mdp5_data = NULL;
+#endif
+#ifdef TARGET_HW_MDSS_MDP3
+ struct mdp3_session_data *mdp3_session = NULL;
+#endif
+ if (!mfd) {
+ pr_err("%s mfd NULL\n", __func__);
+ return;
+ }
+ mutex_lock(&mfd->update.lock);
+ if (mfd->update.is_suspend) {
+ mutex_unlock(&mfd->update.lock);
+ return;
+ }
+ if (mfd->update.ref_count > 0) {
+ mutex_unlock(&mfd->update.lock);
+ mfd->update.value = notification_type;
+ complete(&mfd->update.comp);
+ mutex_lock(&mfd->update.lock);
+ }
+ mutex_unlock(&mfd->update.lock);
+
+ mutex_lock(&mfd->no_update.lock);
+ if (mfd->no_update.ref_count > 0) {
+ mutex_unlock(&mfd->no_update.lock);
+ mfd->no_update.value = notification_type;
+ complete(&mfd->no_update.comp);
+ mutex_lock(&mfd->no_update.lock);
+ }
+ mutex_unlock(&mfd->no_update.lock);
+#ifndef TARGET_HW_MDSS_MDP3
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ if (mdp5_data) {
+ if (notification_type == NOTIFY_TYPE_BL_AD_ATTEN_UPDATE) {
+ mdp5_data->ad_bl_events++;
+ sysfs_notify_dirent(mdp5_data->ad_bl_event_sd);
+ } else if (notification_type == NOTIFY_TYPE_BL_UPDATE) {
+ mdp5_data->bl_events++;
+ sysfs_notify_dirent(mdp5_data->bl_event_sd);
+ }
+ }
+#endif
+#ifdef TARGET_HW_MDSS_MDP3
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ if (mdp3_session) {
+ mdp3_session->bl_events++;
+ sysfs_notify_dirent(mdp3_session->bl_event_sd);
+ pr_debug("bl_event = %u\n", mdp3_session->bl_events);
+ }
+#endif
+}
+
+static int mdss_fb_notify_update(struct msm_fb_data_type *mfd,
+ unsigned long *argp)
+{
+ int ret;
+ unsigned int notify = 0x0, to_user = 0x0;
+
+ ret = copy_from_user(¬ify, argp, sizeof(unsigned int));
+ if (ret) {
+ pr_err("%s:ioctl failed\n", __func__);
+ return ret;
+ }
+
+ if (notify > NOTIFY_UPDATE_POWER_OFF)
+ return -EINVAL;
+
+ if (notify == NOTIFY_UPDATE_INIT) {
+ mutex_lock(&mfd->update.lock);
+ mfd->update.init_done = true;
+ mutex_unlock(&mfd->update.lock);
+ ret = 1;
+ } else if (notify == NOTIFY_UPDATE_DEINIT) {
+ mutex_lock(&mfd->update.lock);
+ mfd->update.init_done = false;
+ mutex_unlock(&mfd->update.lock);
+ complete(&mfd->update.comp);
+ complete(&mfd->no_update.comp);
+ ret = 1;
+ } else if (mfd->update.is_suspend) {
+ to_user = NOTIFY_TYPE_SUSPEND;
+ mfd->update.is_suspend = 0;
+ ret = 1;
+ } else if (notify == NOTIFY_UPDATE_START) {
+ mutex_lock(&mfd->update.lock);
+ if (mfd->update.init_done)
+ reinit_completion(&mfd->update.comp);
+ else {
+ mutex_unlock(&mfd->update.lock);
+ pr_err("notify update start called without init\n");
+ return -EINVAL;
+ }
+ mfd->update.ref_count++;
+ mutex_unlock(&mfd->update.lock);
+ ret = wait_for_completion_interruptible_timeout(
+ &mfd->update.comp, 4 * HZ);
+ mutex_lock(&mfd->update.lock);
+ mfd->update.ref_count--;
+ mutex_unlock(&mfd->update.lock);
+ to_user = (unsigned int)mfd->update.value;
+ if (mfd->update.type == NOTIFY_TYPE_SUSPEND) {
+ to_user = (unsigned int)mfd->update.type;
+ ret = 1;
+ }
+ } else if (notify == NOTIFY_UPDATE_STOP) {
+ mutex_lock(&mfd->update.lock);
+ if (mfd->update.init_done)
+ reinit_completion(&mfd->no_update.comp);
+ else {
+ mutex_unlock(&mfd->update.lock);
+ pr_err("notify update stop called without init\n");
+ return -EINVAL;
+ }
+ mfd->no_update.ref_count++;
+ mutex_unlock(&mfd->no_update.lock);
+ ret = wait_for_completion_interruptible_timeout(
+ &mfd->no_update.comp, 4 * HZ);
+ mutex_lock(&mfd->no_update.lock);
+ mfd->no_update.ref_count--;
+ mutex_unlock(&mfd->no_update.lock);
+ to_user = (unsigned int)mfd->no_update.value;
+ } else {
+ if (mdss_fb_is_power_on(mfd)) {
+ reinit_completion(&mfd->power_off_comp);
+ ret = wait_for_completion_interruptible_timeout(
+ &mfd->power_off_comp, 1 * HZ);
+ }
+ }
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = copy_to_user(argp, &to_user, sizeof(unsigned int));
+ return ret;
+}
+
+static int lcd_backlight_registered;
+
+static void mdss_fb_set_bl_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct msm_fb_data_type *mfd = dev_get_drvdata(led_cdev->dev->parent);
+ int bl_lvl;
+
+ if (mfd->boot_notification_led) {
+ led_trigger_event(mfd->boot_notification_led, 0);
+ mfd->boot_notification_led = NULL;
+ }
+
+ if (value > mfd->panel_info->brightness_max)
+ value = mfd->panel_info->brightness_max;
+
+ /* This maps android backlight level 0 to 255 into
+ * driver backlight level 0 to bl_max with rounding
+ */
+ MDSS_BRIGHT_TO_BL(bl_lvl, value, mfd->panel_info->bl_max,
+ mfd->panel_info->brightness_max);
+
+ if (!bl_lvl && value)
+ bl_lvl = 1;
+
+ if (!IS_CALIB_MODE_BL(mfd) && (!mfd->ext_bl_ctrl || !value ||
+ !mfd->bl_level)) {
+ mutex_lock(&mfd->bl_lock);
+ mdss_fb_set_backlight(mfd, bl_lvl);
+ mutex_unlock(&mfd->bl_lock);
+ }
+}
+
+static struct led_classdev backlight_led = {
+ .name = "lcd-backlight",
+ .brightness = MDSS_MAX_BL_BRIGHTNESS / 2,
+ .brightness_set = mdss_fb_set_bl_brightness,
+ .max_brightness = MDSS_MAX_BL_BRIGHTNESS,
+};
+
+static ssize_t mdss_fb_get_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+
+ switch (mfd->panel.type) {
+ case NO_PANEL:
+ ret = snprintf(buf, PAGE_SIZE, "no panel\n");
+ break;
+ case HDMI_PANEL:
+ ret = snprintf(buf, PAGE_SIZE, "hdmi panel\n");
+ break;
+ case LVDS_PANEL:
+ ret = snprintf(buf, PAGE_SIZE, "lvds panel\n");
+ break;
+ case DTV_PANEL:
+ ret = snprintf(buf, PAGE_SIZE, "dtv panel\n");
+ break;
+ case MIPI_VIDEO_PANEL:
+ ret = snprintf(buf, PAGE_SIZE, "mipi dsi video panel\n");
+ break;
+ case MIPI_CMD_PANEL:
+ ret = snprintf(buf, PAGE_SIZE, "mipi dsi cmd panel\n");
+ break;
+ case WRITEBACK_PANEL:
+ ret = snprintf(buf, PAGE_SIZE, "writeback panel\n");
+ break;
+ case EDP_PANEL:
+ ret = snprintf(buf, PAGE_SIZE, "edp panel\n");
+ break;
+ default:
+ ret = snprintf(buf, PAGE_SIZE, "unknown panel\n");
+ break;
+ }
+
+ return ret;
+}
+
+static int mdss_fb_get_panel_xres(struct mdss_panel_info *pinfo)
+{
+ struct mdss_panel_data *pdata;
+ int xres;
+
+ pdata = container_of(pinfo, struct mdss_panel_data, panel_info);
+
+ xres = pinfo->xres;
+ if (pdata->next && pdata->next->active)
+ xres += mdss_fb_get_panel_xres(&pdata->next->panel_info);
+
+ return xres;
+}
+
+static inline int mdss_fb_validate_split(int left, int right,
+ struct msm_fb_data_type *mfd)
+{
+ int rc = -EINVAL;
+ u32 panel_xres = mdss_fb_get_panel_xres(mfd->panel_info);
+
+ pr_debug("%pS: split_mode = %d left=%d right=%d panel_xres=%d\n",
+ __builtin_return_address(0), mfd->split_mode,
+ left, right, panel_xres);
+
+ /* more validate condition could be added if needed */
+ if (left && right) {
+ if (panel_xres == left + right) {
+ mfd->split_fb_left = left;
+ mfd->split_fb_right = right;
+ rc = 0;
+ }
+ } else {
+ if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ mfd->split_fb_left = mfd->panel_info->xres;
+ mfd->split_fb_right = panel_xres - mfd->split_fb_left;
+ rc = 0;
+ } else {
+ mfd->split_fb_left = mfd->split_fb_right = 0;
+ }
+ }
+
+ return rc;
+}
+
+static ssize_t mdss_fb_store_split(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int data[2] = {0};
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+
+ if (sscanf(buf, "%d %d", &data[0], &data[1]) != 2)
+ pr_debug("Not able to read split values\n");
+ else if (!mdss_fb_validate_split(data[0], data[1], mfd))
+ pr_debug("split left=%d right=%d\n", data[0], data[1]);
+
+ return len;
+}
+
+static ssize_t mdss_fb_show_split(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d %d\n",
+ mfd->split_fb_left, mfd->split_fb_right);
+ return ret;
+}
+
+static void mdss_fb_get_split(struct msm_fb_data_type *mfd)
+{
+ if ((mfd->split_mode == MDP_SPLIT_MODE_NONE) &&
+ (mfd->split_fb_left && mfd->split_fb_right))
+ mfd->split_mode = MDP_DUAL_LM_SINGLE_DISPLAY;
+
+ pr_debug("split fb%d left=%d right=%d mode=%d\n", mfd->index,
+ mfd->split_fb_left, mfd->split_fb_right, mfd->split_mode);
+}
+
+static ssize_t mdss_fb_get_src_split_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret = 0;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+
+ if (is_split_lm(mfd) && (fbi->var.yres > fbi->var.xres)) {
+ pr_debug("always split mode enabled\n");
+ ret = scnprintf(buf, PAGE_SIZE,
+ "src_split_always\n");
+ }
+
+ return ret;
+}
+
+static ssize_t mdss_fb_get_thermal_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ int ret;
+
+ ret = scnprintf(buf, PAGE_SIZE, "thermal_level=%d\n",
+ mfd->thermal_level);
+
+ return ret;
+}
+
+static ssize_t mdss_fb_set_thermal_level(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ int rc = 0;
+ int thermal_level = 0;
+
+ rc = kstrtoint(buf, 10, &thermal_level);
+ if (rc) {
+ pr_err("kstrtoint failed. rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_debug("Thermal level set to %d\n", thermal_level);
+ mfd->thermal_level = thermal_level;
+ sysfs_notify(&mfd->fbi->dev->kobj, NULL, "msm_fb_thermal_level");
+
+ return count;
+}
+
+static ssize_t mdss_mdp_show_blank_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ int ret;
+
+ pr_debug("fb%d panel_power_state = %d\n", mfd->index,
+ mfd->panel_power_state);
+ ret = scnprintf(buf, PAGE_SIZE, "panel_power_on = %d\n",
+ mfd->panel_power_state);
+
+ return ret;
+}
+
+static void __mdss_fb_idle_notify_work(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct msm_fb_data_type *mfd = container_of(dw, struct msm_fb_data_type,
+ idle_notify_work);
+
+ /* Notify idle-ness here */
+ pr_debug("Idle timeout %dms expired!\n", mfd->idle_time);
+ if (mfd->idle_time)
+ sysfs_notify(&mfd->fbi->dev->kobj, NULL, "idle_notify");
+ mfd->idle_state = MDSS_FB_IDLE;
+}
+
+
+static ssize_t mdss_fb_get_fps_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ unsigned int fps_int, fps_float;
+
+ if (mfd->panel_power_state != MDSS_PANEL_POWER_ON)
+ mfd->fps_info.measured_fps = 0;
+ fps_int = (unsigned int) mfd->fps_info.measured_fps;
+ fps_float = do_div(fps_int, 10);
+ return scnprintf(buf, PAGE_SIZE, "%d.%d\n", fps_int, fps_float);
+
+}
+
+static ssize_t mdss_fb_get_idle_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ int ret;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d", mfd->idle_time);
+
+ return ret;
+}
+
+static ssize_t mdss_fb_set_idle_time(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ int rc = 0;
+ int idle_time = 0;
+
+ rc = kstrtoint(buf, 10, &idle_time);
+ if (rc) {
+ pr_err("kstrtoint failed. rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_debug("Idle time = %d\n", idle_time);
+ mfd->idle_time = idle_time;
+
+ return count;
+}
+
+static ssize_t mdss_fb_get_idle_notify(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ int ret;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%s",
+ work_busy(&mfd->idle_notify_work.work) ? "no" : "yes");
+
+ return ret;
+}
+
+static ssize_t mdss_fb_get_panel_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdss_panel_info *pinfo = mfd->panel_info;
+ int ret;
+
+ ret = scnprintf(buf, PAGE_SIZE,
+ "pu_en=%d\nxstart=%d\nwalign=%d\nystart=%d\nhalign=%d\n"
+ "min_w=%d\nmin_h=%d\nroi_merge=%d\ndyn_fps_en=%d\n"
+ "min_fps=%d\nmax_fps=%d\npanel_name=%s\n"
+ "primary_panel=%d\nis_pluggable=%d\ndisplay_id=%s\n"
+ "is_cec_supported=%d\nis_pingpong_split=%d\n"
+ "is_hdr_enabled=%d\n"
+ "peak_brightness=%d\nblackness_level=%d\n"
+ "white_chromaticity_x=%d\nwhite_chromaticity_y=%d\n"
+ "red_chromaticity_x=%d\nred_chromaticity_y=%d\n"
+ "green_chromaticity_x=%d\ngreen_chromaticity_y=%d\n"
+ "blue_chromaticity_x=%d\nblue_chromaticity_y=%d\n",
+ pinfo->partial_update_enabled,
+ pinfo->roi_alignment.xstart_pix_align,
+ pinfo->roi_alignment.width_pix_align,
+ pinfo->roi_alignment.ystart_pix_align,
+ pinfo->roi_alignment.height_pix_align,
+ pinfo->roi_alignment.min_width,
+ pinfo->roi_alignment.min_height,
+ pinfo->partial_update_roi_merge,
+ pinfo->dynamic_fps, pinfo->min_fps, pinfo->max_fps,
+ pinfo->panel_name, pinfo->is_prim_panel,
+ pinfo->is_pluggable, pinfo->display_id,
+ pinfo->is_cec_supported, is_pingpong_split(mfd),
+ pinfo->hdr_properties.hdr_enabled,
+ pinfo->hdr_properties.peak_brightness,
+ pinfo->hdr_properties.blackness_level,
+ pinfo->hdr_properties.display_primaries[0],
+ pinfo->hdr_properties.display_primaries[1],
+ pinfo->hdr_properties.display_primaries[2],
+ pinfo->hdr_properties.display_primaries[3],
+ pinfo->hdr_properties.display_primaries[4],
+ pinfo->hdr_properties.display_primaries[5],
+ pinfo->hdr_properties.display_primaries[6],
+ pinfo->hdr_properties.display_primaries[7]);
+
+ return ret;
+}
+
+static ssize_t mdss_fb_get_panel_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ int ret;
+ int panel_status;
+
+ if (mdss_panel_is_power_off(mfd->panel_power_state)) {
+ ret = scnprintf(buf, PAGE_SIZE, "panel_status=%s\n", "suspend");
+ } else {
+ panel_status = mdss_fb_send_panel_event(mfd,
+ MDSS_EVENT_DSI_PANEL_STATUS, NULL);
+ ret = scnprintf(buf, PAGE_SIZE, "panel_status=%s\n",
+ panel_status > 0 ? "alive" : "dead");
+ }
+
+ return ret;
+}
+
+static ssize_t mdss_fb_force_panel_dead(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_panel_data *pdata;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected!\n");
+ return len;
+ }
+
+ if (kstrtouint(buf, 0, &pdata->panel_info.panel_force_dead))
+ pr_err("kstrtouint buf error!\n");
+
+ return len;
+}
+
+/*
+ * mdss_fb_blanking_mode_switch() - Function triggers dynamic mode switch
+ * @mfd: Framebuffer data structure for display
+ * @mode: Enabled/Disable LowPowerMode
+ * 1: Enter into LowPowerMode
+ * 0: Exit from LowPowerMode
+ *
+ * This Function dynamically switches to and from video mode. This
+ * swtich involves the panel turning off backlight during trantision.
+ */
+static int mdss_fb_blanking_mode_switch(struct msm_fb_data_type *mfd, int mode)
+{
+ int ret = 0;
+ u32 bl_lvl = 0;
+ struct mdss_panel_info *pinfo = NULL;
+ struct mdss_panel_data *pdata;
+
+ if (!mfd || !mfd->panel_info)
+ return -EINVAL;
+
+ pinfo = mfd->panel_info;
+
+ if (!pinfo->mipi.dms_mode) {
+ pr_warn("Panel does not support dynamic switch!\n");
+ return 0;
+ }
+
+ if (mode == pinfo->mipi.mode) {
+ pr_debug("Already in requested mode!\n");
+ return 0;
+ }
+ pr_debug("Enter mode: %d\n", mode);
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+
+ pdata->panel_info.dynamic_switch_pending = true;
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret) {
+ pr_err("mdss_fb_pan_idle for fb%d failed. ret=%d\n",
+ mfd->index, ret);
+ pdata->panel_info.dynamic_switch_pending = false;
+ return ret;
+ }
+
+ mutex_lock(&mfd->bl_lock);
+ bl_lvl = mfd->bl_level;
+ mdss_fb_set_backlight(mfd, 0);
+ mutex_unlock(&mfd->bl_lock);
+
+ lock_fb_info(mfd->fbi);
+ ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, mfd->fbi,
+ mfd->op_enable);
+ if (ret) {
+ pr_err("can't turn off display!\n");
+ unlock_fb_info(mfd->fbi);
+ return ret;
+ }
+
+ mfd->op_enable = false;
+
+ ret = mfd->mdp.configure_panel(mfd, mode, 1);
+ mdss_fb_set_mdp_sync_pt_threshold(mfd, mfd->panel.type);
+
+ mfd->op_enable = true;
+
+ ret = mdss_fb_blank_sub(FB_BLANK_UNBLANK, mfd->fbi,
+ mfd->op_enable);
+ if (ret) {
+ pr_err("can't turn on display!\n");
+ unlock_fb_info(mfd->fbi);
+ return ret;
+ }
+ unlock_fb_info(mfd->fbi);
+
+ mutex_lock(&mfd->bl_lock);
+ mfd->allow_bl_update = true;
+ mdss_fb_set_backlight(mfd, bl_lvl);
+ mutex_unlock(&mfd->bl_lock);
+
+ pdata->panel_info.dynamic_switch_pending = false;
+ pdata->panel_info.is_lpm_mode = mode ? 1 : 0;
+
+ if (ret) {
+ pr_err("can't turn on display!\n");
+ return ret;
+ }
+
+ pr_debug("Exit mode: %d\n", mode);
+
+ return 0;
+}
+
+static ssize_t mdss_fb_change_dfps_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_panel_data *pdata;
+ struct mdss_panel_info *pinfo;
+ u32 dfps_mode;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected!\n");
+ return len;
+ }
+ pinfo = &pdata->panel_info;
+
+ if (kstrtouint(buf, 0, &dfps_mode)) {
+ pr_err("kstrtouint buf error!\n");
+ return len;
+ }
+
+ if (dfps_mode >= DFPS_MODE_MAX) {
+ pinfo->dynamic_fps = false;
+ return len;
+ }
+
+ if (mfd->idle_time != 0) {
+ pr_err("ERROR: Idle time is not disabled.\n");
+ return len;
+ }
+
+ if (pinfo->current_fps != pinfo->default_fps) {
+ pr_err("ERROR: panel not configured to default fps\n");
+ return len;
+ }
+
+ pinfo->dynamic_fps = true;
+ pinfo->dfps_update = dfps_mode;
+
+ if (pdata->next)
+ pdata->next->panel_info.dfps_update = dfps_mode;
+
+ return len;
+}
+
+static ssize_t mdss_fb_get_dfps_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_panel_data *pdata;
+ struct mdss_panel_info *pinfo;
+ int ret;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected!\n");
+ return -EINVAL;
+ }
+ pinfo = &pdata->panel_info;
+
+ ret = scnprintf(buf, PAGE_SIZE, "dfps enabled=%d mode=%d\n",
+ pinfo->dynamic_fps, pinfo->dfps_update);
+
+ return ret;
+}
+
+static ssize_t mdss_fb_change_persist_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_panel_info *pinfo = NULL;
+ struct mdss_panel_data *pdata;
+ int ret = 0;
+ u32 persist_mode;
+
+ if (!mfd || !mfd->panel_info) {
+ pr_err("%s: Panel info is NULL!\n", __func__);
+ return len;
+ }
+
+ pinfo = mfd->panel_info;
+
+ if (kstrtouint(buf, 0, &persist_mode)) {
+ pr_err("kstrtouint buf error!\n");
+ return len;
+ }
+
+ mutex_lock(&mfd->mdss_sysfs_lock);
+ if (mdss_panel_is_power_off(mfd->panel_power_state)) {
+ pinfo->persist_mode = persist_mode;
+ goto end;
+ }
+
+ mutex_lock(&mfd->bl_lock);
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if ((pdata) && (pdata->apply_display_setting))
+ ret = pdata->apply_display_setting(pdata, persist_mode);
+
+ mutex_unlock(&mfd->bl_lock);
+
+ if (!ret) {
+ pr_debug("%s: Persist mode %d\n", __func__, persist_mode);
+ pinfo->persist_mode = persist_mode;
+ }
+
+end:
+ mutex_unlock(&mfd->mdss_sysfs_lock);
+ return len;
+}
+
+static ssize_t mdss_fb_get_persist_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_panel_data *pdata;
+ struct mdss_panel_info *pinfo;
+ int ret;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected!\n");
+ return -EINVAL;
+ }
+ pinfo = &pdata->panel_info;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", pinfo->persist_mode);
+
+ return ret;
+}
+
+static DEVICE_ATTR(msm_fb_type, 0444, mdss_fb_get_type, NULL);
+static DEVICE_ATTR(msm_fb_split, 0644, mdss_fb_show_split,
+ mdss_fb_store_split);
+static DEVICE_ATTR(show_blank_event, 0444, mdss_mdp_show_blank_event, NULL);
+static DEVICE_ATTR(idle_time, 0644,
+ mdss_fb_get_idle_time, mdss_fb_set_idle_time);
+static DEVICE_ATTR(idle_notify, 0444, mdss_fb_get_idle_notify, NULL);
+static DEVICE_ATTR(msm_fb_panel_info, 0444, mdss_fb_get_panel_info, NULL);
+static DEVICE_ATTR(msm_fb_src_split_info, 0444, mdss_fb_get_src_split_info,
+ NULL);
+static DEVICE_ATTR(msm_fb_thermal_level, 0644,
+ mdss_fb_get_thermal_level, mdss_fb_set_thermal_level);
+static DEVICE_ATTR(msm_fb_panel_status, 0644,
+ mdss_fb_get_panel_status, mdss_fb_force_panel_dead);
+static DEVICE_ATTR(msm_fb_dfps_mode, 0644,
+ mdss_fb_get_dfps_mode, mdss_fb_change_dfps_mode);
+static DEVICE_ATTR(measured_fps, 0664,
+ mdss_fb_get_fps_info, NULL);
+static DEVICE_ATTR(msm_fb_persist_mode, 0644,
+ mdss_fb_get_persist_mode, mdss_fb_change_persist_mode);
+static struct attribute *mdss_fb_attrs[] = {
+ &dev_attr_msm_fb_type.attr,
+ &dev_attr_msm_fb_split.attr,
+ &dev_attr_show_blank_event.attr,
+ &dev_attr_idle_time.attr,
+ &dev_attr_idle_notify.attr,
+ &dev_attr_msm_fb_panel_info.attr,
+ &dev_attr_msm_fb_src_split_info.attr,
+ &dev_attr_msm_fb_thermal_level.attr,
+ &dev_attr_msm_fb_panel_status.attr,
+ &dev_attr_msm_fb_dfps_mode.attr,
+ &dev_attr_measured_fps.attr,
+ &dev_attr_msm_fb_persist_mode.attr,
+ NULL,
+};
+
+static struct attribute_group mdss_fb_attr_group = {
+ .attrs = mdss_fb_attrs,
+};
+
+static int mdss_fb_create_sysfs(struct msm_fb_data_type *mfd)
+{
+ int rc;
+
+ rc = sysfs_create_group(&mfd->fbi->dev->kobj, &mdss_fb_attr_group);
+ if (rc)
+ pr_err("sysfs group creation failed, rc=%d\n", rc);
+ return rc;
+}
+
+static void mdss_fb_remove_sysfs(struct msm_fb_data_type *mfd)
+{
+ sysfs_remove_group(&mfd->fbi->dev->kobj, &mdss_fb_attr_group);
+}
+
+static void mdss_fb_shutdown(struct platform_device *pdev)
+{
+ struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+ mfd->shutdown_pending = true;
+
+ /* wake up threads waiting on idle or kickoff queues */
+ wake_up_all(&mfd->idle_wait_q);
+ wake_up_all(&mfd->kickoff_wait_q);
+
+ lock_fb_info(mfd->fbi);
+ mdss_fb_release_all(mfd->fbi, true);
+ sysfs_notify(&mfd->fbi->dev->kobj, NULL, "show_blank_event");
+ unlock_fb_info(mfd->fbi);
+}
+
+static void mdss_fb_input_event_handler(struct input_handle *handle,
+ unsigned int type,
+ unsigned int code,
+ int value)
+{
+ struct msm_fb_data_type *mfd = handle->handler->private;
+ int rc;
+
+ if ((type != EV_ABS) || !mdss_fb_is_power_on(mfd))
+ return;
+
+ if (mfd->mdp.input_event_handler) {
+ rc = mfd->mdp.input_event_handler(mfd);
+ if (rc)
+ pr_err("mdp input event handler failed\n");
+ }
+}
+
+static int mdss_fb_input_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ int rc;
+ struct input_handle *handle;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = handler->name;
+
+ rc = input_register_handle(handle);
+ if (rc) {
+ pr_err("failed to register input handle, rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = input_open_device(handle);
+ if (rc) {
+ pr_err("failed to open input device, rc = %d\n", rc);
+ goto error_unregister;
+ }
+
+ return 0;
+
+error_unregister:
+ input_unregister_handle(handle);
+error:
+ kfree(handle);
+ return rc;
+}
+
+static void mdss_fb_input_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+/*
+ * Structure for specifying event parameters on which to receive callbacks.
+ * This structure will trigger a callback in case of a touch event (specified by
+ * EV_ABS) where there is a change in X and Y coordinates,
+ */
+static const struct input_device_id mdss_fb_input_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_ABS) },
+ .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+ BIT_MASK(ABS_MT_POSITION_X) |
+ BIT_MASK(ABS_MT_POSITION_Y) },
+ },
+ { },
+};
+
+static int mdss_fb_register_input_handler(struct msm_fb_data_type *mfd)
+{
+ int rc;
+ struct input_handler *handler;
+
+ if (mfd->input_handler)
+ return -EINVAL;
+
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler)
+ return -ENOMEM;
+
+ handler->event = mdss_fb_input_event_handler;
+ handler->connect = mdss_fb_input_connect;
+ handler->disconnect = mdss_fb_input_disconnect,
+ handler->name = "mdss_fb",
+ handler->id_table = mdss_fb_input_ids;
+ handler->private = mfd;
+
+ rc = input_register_handler(handler);
+ if (rc) {
+ pr_err("Unable to register the input handler\n");
+ kfree(handler);
+ } else {
+ mfd->input_handler = handler;
+ }
+
+ return rc;
+}
+
+static void mdss_fb_unregister_input_handler(struct msm_fb_data_type *mfd)
+{
+ if (!mfd->input_handler)
+ return;
+
+ input_unregister_handler(mfd->input_handler);
+ kfree(mfd->input_handler);
+}
+
+static void mdss_fb_videomode_from_panel_timing(struct fb_videomode *videomode,
+ struct mdss_panel_timing *pt)
+{
+ videomode->name = pt->name;
+ videomode->xres = pt->xres;
+ videomode->yres = pt->yres;
+ videomode->left_margin = pt->h_back_porch;
+ videomode->right_margin = pt->h_front_porch;
+ videomode->hsync_len = pt->h_pulse_width;
+ videomode->upper_margin = pt->v_back_porch;
+ videomode->lower_margin = pt->v_front_porch;
+ videomode->vsync_len = pt->v_pulse_width;
+ videomode->refresh = pt->frame_rate;
+ videomode->flag = 0;
+ videomode->vmode = 0;
+ videomode->sync = 0;
+
+ if (videomode->refresh) {
+ unsigned long clk_rate, h_total, v_total;
+
+ h_total = videomode->xres + videomode->left_margin
+ + videomode->right_margin + videomode->hsync_len;
+ v_total = videomode->yres + videomode->lower_margin
+ + videomode->upper_margin + videomode->vsync_len;
+ clk_rate = h_total * v_total * videomode->refresh;
+ videomode->pixclock =
+ KHZ2PICOS(clk_rate / 1000);
+ } else {
+ videomode->pixclock =
+ KHZ2PICOS((unsigned long)pt->clk_rate / 1000);
+ }
+}
+
+static void mdss_fb_set_split_mode(struct msm_fb_data_type *mfd,
+ struct mdss_panel_data *pdata)
+{
+ if (pdata->panel_info.is_split_display) {
+ struct mdss_panel_data *pnext = pdata->next;
+
+ mfd->split_fb_left = pdata->panel_info.lm_widths[0];
+ if (pnext)
+ mfd->split_fb_right = pnext->panel_info.lm_widths[0];
+
+ if (pdata->panel_info.use_pingpong_split)
+ mfd->split_mode = MDP_PINGPONG_SPLIT;
+ else
+ mfd->split_mode = MDP_DUAL_LM_DUAL_DISPLAY;
+ } else if ((pdata->panel_info.lm_widths[0] != 0)
+ && (pdata->panel_info.lm_widths[1] != 0)) {
+ mfd->split_fb_left = pdata->panel_info.lm_widths[0];
+ mfd->split_fb_right = pdata->panel_info.lm_widths[1];
+ mfd->split_mode = MDP_DUAL_LM_SINGLE_DISPLAY;
+ } else {
+ mfd->split_mode = MDP_SPLIT_MODE_NONE;
+ }
+}
+
+static int mdss_fb_init_panel_modes(struct msm_fb_data_type *mfd,
+ struct mdss_panel_data *pdata)
+{
+ struct fb_info *fbi = mfd->fbi;
+ struct fb_videomode *modedb;
+ struct mdss_panel_timing *pt;
+ struct list_head *pos;
+ int num_timings = 0;
+ int i = 0;
+
+ /* check if multiple modes are supported */
+ if (!pdata->timings_list.prev || !pdata->timings_list.next)
+ INIT_LIST_HEAD(&pdata->timings_list);
+
+ if (!fbi || !pdata->current_timing || list_empty(&pdata->timings_list))
+ return 0;
+
+ list_for_each(pos, &pdata->timings_list)
+ num_timings++;
+
+ modedb = devm_kzalloc(fbi->dev, num_timings * sizeof(*modedb),
+ GFP_KERNEL);
+ if (!modedb)
+ return -ENOMEM;
+
+ list_for_each_entry(pt, &pdata->timings_list, list) {
+ struct mdss_panel_timing *spt = NULL;
+
+ mdss_fb_videomode_from_panel_timing(modedb + i, pt);
+ if (pdata->next) {
+ spt = mdss_panel_get_timing_by_name(pdata->next,
+ modedb[i].name);
+ if (!IS_ERR_OR_NULL(spt))
+ modedb[i].xres += spt->xres;
+ else
+ pr_debug("no matching split config for %s\n",
+ modedb[i].name);
+
+ /*
+ * if no panel timing found for current, need to
+ * disable it otherwise mark it as active
+ */
+ if (pt == pdata->current_timing)
+ pdata->next->active = !IS_ERR_OR_NULL(spt);
+ }
+
+ if (pt == pdata->current_timing) {
+ pr_debug("found current mode: %s\n", pt->name);
+ fbi->mode = modedb + i;
+ }
+ i++;
+ }
+
+ fbi->monspecs.modedb = modedb;
+ fbi->monspecs.modedb_len = num_timings;
+
+ /* destroy and recreate modelist */
+ fb_destroy_modelist(&fbi->modelist);
+
+ if (fbi->mode)
+ fb_videomode_to_var(&fbi->var, fbi->mode);
+ fb_videomode_to_modelist(modedb, num_timings, &fbi->modelist);
+
+ return 0;
+}
+
+static int mdss_fb_probe(struct platform_device *pdev)
+{
+ struct msm_fb_data_type *mfd = NULL;
+ struct mdss_panel_data *pdata;
+ struct fb_info *fbi;
+ int rc;
+
+ if (fbi_list_index >= MAX_FBI_LIST)
+ return -ENOMEM;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata)
+ return -EPROBE_DEFER;
+
+ if (!mdp_instance) {
+ pr_err("mdss mdp resource not initialized yet\n");
+ return -ENODEV;
+ }
+
+ /*
+ * alloc framebuffer info + par data
+ */
+ fbi = framebuffer_alloc(sizeof(struct msm_fb_data_type), NULL);
+ if (fbi == NULL) {
+ pr_err("can't allocate framebuffer info data!\n");
+ return -ENOMEM;
+ }
+
+ mfd = (struct msm_fb_data_type *)fbi->par;
+ mfd->key = MFD_KEY;
+ mfd->fbi = fbi;
+ mfd->panel_info = &pdata->panel_info;
+ mfd->panel.type = pdata->panel_info.type;
+ mfd->panel.id = mfd->index;
+ mfd->fb_page = MDSS_FB_NUM;
+ mfd->index = fbi_list_index;
+ mfd->mdp_fb_page_protection = MDP_FB_PAGE_PROTECTION_WRITECOMBINE;
+
+ mfd->ext_ad_ctrl = -1;
+ if (mfd->panel_info && mfd->panel_info->brightness_max > 0)
+ MDSS_BRIGHT_TO_BL(mfd->bl_level, backlight_led.brightness,
+ mfd->panel_info->bl_max, mfd->panel_info->brightness_max);
+ else
+ mfd->bl_level = 0;
+
+ mfd->bl_scale = 1024;
+ mfd->bl_min_lvl = 30;
+ mfd->ad_bl_level = 0;
+ mfd->fb_imgType = MDP_RGBA_8888;
+ mfd->calib_mode_bl = 0;
+ mfd->unset_bl_level = U32_MAX;
+
+ mfd->pdev = pdev;
+
+ mfd->split_fb_left = mfd->split_fb_right = 0;
+
+ mdss_fb_set_split_mode(mfd, pdata);
+ pr_info("fb%d: split_mode:%d left:%d right:%d\n", mfd->index,
+ mfd->split_mode, mfd->split_fb_left, mfd->split_fb_right);
+
+ mfd->mdp = *mdp_instance;
+
+ rc = of_property_read_bool(pdev->dev.of_node,
+ "qcom,boot-indication-enabled");
+
+ if (rc) {
+ led_trigger_register_simple("boot-indication",
+ &(mfd->boot_notification_led));
+ }
+
+ INIT_LIST_HEAD(&mfd->file_list);
+
+ mutex_init(&mfd->bl_lock);
+ mutex_init(&mfd->mdss_sysfs_lock);
+ mutex_init(&mfd->switch_lock);
+
+ fbi_list[fbi_list_index++] = fbi;
+
+ platform_set_drvdata(pdev, mfd);
+
+ rc = mdss_fb_register(mfd);
+ if (rc)
+ return rc;
+
+ mdss_fb_create_sysfs(mfd);
+ mdss_fb_send_panel_event(mfd, MDSS_EVENT_FB_REGISTERED, fbi);
+
+ if (mfd->mdp.init_fnc) {
+ rc = mfd->mdp.init_fnc(mfd);
+ if (rc) {
+ pr_err("init_fnc failed\n");
+ return rc;
+ }
+ }
+ mdss_fb_init_fps_info(mfd);
+
+ rc = pm_runtime_set_active(mfd->fbi->dev);
+ if (rc < 0)
+ pr_err("pm_runtime: fail to set active.\n");
+ pm_runtime_enable(mfd->fbi->dev);
+
+ /* android supports only one lcd-backlight/lcd for now */
+ if (!lcd_backlight_registered) {
+ backlight_led.brightness = mfd->panel_info->brightness_max;
+ backlight_led.max_brightness = mfd->panel_info->brightness_max;
+ if (led_classdev_register(&pdev->dev, &backlight_led))
+ pr_err("led_classdev_register failed\n");
+ else
+ lcd_backlight_registered = 1;
+ }
+
+ mdss_fb_init_panel_modes(mfd, pdata);
+
+ mfd->mdp_sync_pt_data.fence_name = "mdp-fence";
+ if (mfd->mdp_sync_pt_data.timeline == NULL) {
+ char timeline_name[32];
+
+ snprintf(timeline_name, sizeof(timeline_name),
+ "mdss_fb_%d", mfd->index);
+ mfd->mdp_sync_pt_data.timeline =
+ mdss_create_timeline(timeline_name);
+ if (mfd->mdp_sync_pt_data.timeline == NULL) {
+ pr_err("cannot create release fence time line\n");
+ return -ENOMEM;
+ }
+ snprintf(timeline_name, sizeof(timeline_name),
+ "mdss_fb_%d_retire", mfd->index);
+ mfd->mdp_sync_pt_data.timeline_retire =
+ mdss_create_timeline(timeline_name);
+ if (mfd->mdp_sync_pt_data.timeline == NULL) {
+ pr_err("cannot create release fence time line\n");
+ return -ENOMEM;
+ }
+ mfd->mdp_sync_pt_data.notifier.notifier_call =
+ __mdss_fb_sync_buf_done_callback;
+ }
+
+ mdss_fb_set_mdp_sync_pt_threshold(mfd, mfd->panel.type);
+
+ if (mfd->mdp.splash_init_fnc)
+ mfd->mdp.splash_init_fnc(mfd);
+
+ /*
+ * Register with input driver for a callback for command mode panels.
+ * When there is an input event, mdp clocks will be turned on to reduce
+ * latency when a frame update happens.
+ * For video mode panels, idle timeout will be delayed so that userspace
+ * does not get an idle event while new frames are expected. In case of
+ * an idle event, user space tries to fall back to GPU composition which
+ * can lead to increased load when there are new frames.
+ */
+ if (mfd->mdp.input_event_handler &&
+ ((mfd->panel_info->type == MIPI_CMD_PANEL) ||
+ (mfd->panel_info->type == MIPI_VIDEO_PANEL)))
+ if (mdss_fb_register_input_handler(mfd))
+ pr_err("failed to register input handler\n");
+
+ INIT_DELAYED_WORK(&mfd->idle_notify_work, __mdss_fb_idle_notify_work);
+
+ return rc;
+}
+
+static void mdss_fb_set_mdp_sync_pt_threshold(struct msm_fb_data_type *mfd,
+ int type)
+{
+ if (!mfd)
+ return;
+
+ switch (type) {
+ case WRITEBACK_PANEL:
+ mfd->mdp_sync_pt_data.threshold = 1;
+ mfd->mdp_sync_pt_data.retire_threshold = 0;
+ break;
+ case MIPI_CMD_PANEL:
+ mfd->mdp_sync_pt_data.threshold = 1;
+ mfd->mdp_sync_pt_data.retire_threshold = 1;
+ break;
+ default:
+ mfd->mdp_sync_pt_data.threshold = 2;
+ mfd->mdp_sync_pt_data.retire_threshold = 0;
+ break;
+ }
+}
+
+static int mdss_fb_remove(struct platform_device *pdev)
+{
+ struct msm_fb_data_type *mfd;
+
+ mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+ if (!mfd)
+ return -ENODEV;
+
+ mdss_fb_remove_sysfs(mfd);
+
+ pm_runtime_disable(mfd->fbi->dev);
+
+ if (mfd->key != MFD_KEY)
+ return -EINVAL;
+
+ mdss_fb_unregister_input_handler(mfd);
+ mdss_panel_debugfs_cleanup(mfd->panel_info);
+
+ if (mdss_fb_suspend_sub(mfd))
+ pr_err("msm_fb_remove: can't stop the device %d\n",
+ mfd->index);
+
+ /* remove /dev/fb* */
+ unregister_framebuffer(mfd->fbi);
+
+ if (lcd_backlight_registered) {
+ lcd_backlight_registered = 0;
+ led_classdev_unregister(&backlight_led);
+ }
+
+ return 0;
+}
+
+static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
+ int event, void *arg)
+{
+ int ret = 0;
+ struct mdss_panel_data *pdata;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ pr_debug("sending event=%d for fb%d\n", event, mfd->index);
+
+ do {
+ if (pdata->event_handler)
+ ret = pdata->event_handler(pdata, event, arg);
+
+ pdata = pdata->next;
+ } while (!ret && pdata);
+
+ return ret;
+}
+
+static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd)
+{
+ int ret = 0;
+
+ if ((!mfd) || (mfd->key != MFD_KEY))
+ return 0;
+
+ pr_debug("mdss_fb suspend index=%d\n", mfd->index);
+
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret) {
+ pr_warn("mdss_fb_pan_idle for fb%d failed. ret=%d\n",
+ mfd->index, ret);
+ goto exit;
+ }
+
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND, NULL);
+ if (ret) {
+ pr_warn("unable to suspend fb%d (%d)\n", mfd->index, ret);
+ goto exit;
+ }
+
+ mfd->suspend.op_enable = mfd->op_enable;
+ mfd->suspend.panel_power_state = mfd->panel_power_state;
+
+ if (mfd->op_enable) {
+ /*
+ * Ideally, display should have either been blanked by now, or
+ * should have transitioned to a low power state. If not, then
+ * as a fall back option, enter ulp state to leave the display
+ * on, but turn off all interface clocks.
+ */
+ if (mdss_fb_is_power_on(mfd)) {
+ ret = mdss_fb_blank_sub(BLANK_FLAG_ULP, mfd->fbi,
+ mfd->suspend.op_enable);
+ if (ret) {
+ pr_err("can't turn off display!\n");
+ goto exit;
+ }
+ }
+ mfd->op_enable = false;
+ fb_set_suspend(mfd->fbi, FBINFO_STATE_SUSPENDED);
+ }
+exit:
+ return ret;
+}
+
+static int mdss_fb_resume_sub(struct msm_fb_data_type *mfd)
+{
+ int ret = 0;
+
+ if ((!mfd) || (mfd->key != MFD_KEY))
+ return 0;
+
+ reinit_completion(&mfd->power_set_comp);
+ mfd->is_power_setting = true;
+ pr_debug("mdss_fb resume index=%d\n", mfd->index);
+
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret) {
+ pr_warn("mdss_fb_pan_idle for fb%d failed. ret=%d\n",
+ mfd->index, ret);
+ return ret;
+ }
+
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME, NULL);
+ if (ret) {
+ pr_warn("unable to resume fb%d (%d)\n", mfd->index, ret);
+ return ret;
+ }
+
+ /* resume state var recover */
+ mfd->op_enable = mfd->suspend.op_enable;
+
+ /*
+ * If the fb was explicitly blanked or transitioned to ulp during
+ * suspend, then undo it during resume with the appropriate unblank
+ * flag. If fb was in ulp state when entering suspend, then nothing
+ * needs to be done.
+ */
+ if (mdss_panel_is_power_on(mfd->suspend.panel_power_state) &&
+ !mdss_panel_is_power_on_ulp(mfd->suspend.panel_power_state)) {
+ int unblank_flag = mdss_panel_is_power_on_interactive(
+ mfd->suspend.panel_power_state) ? FB_BLANK_UNBLANK :
+ BLANK_FLAG_LP;
+
+ ret = mdss_fb_blank_sub(unblank_flag, mfd->fbi, mfd->op_enable);
+ if (ret)
+ pr_warn("can't turn on display!\n");
+ else
+ fb_set_suspend(mfd->fbi, FBINFO_STATE_RUNNING);
+ }
+ mfd->is_power_setting = false;
+ complete_all(&mfd->power_set_comp);
+
+ return ret;
+}
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdss_fb_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+ if (!mfd)
+ return -ENODEV;
+
+ dev_dbg(&pdev->dev, "display suspend\n");
+
+ return mdss_fb_suspend_sub(mfd);
+}
+
+static int mdss_fb_resume(struct platform_device *pdev)
+{
+ struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+ if (!mfd)
+ return -ENODEV;
+
+ dev_dbg(&pdev->dev, "display resume\n");
+
+ return mdss_fb_resume_sub(mfd);
+}
+#else
+#define mdss_fb_suspend NULL
+#define mdss_fb_resume NULL
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int mdss_fb_pm_suspend(struct device *dev)
+{
+ struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+
+ if (!mfd)
+ return -ENODEV;
+
+ dev_dbg(dev, "display pm suspend\n");
+
+ return mdss_fb_suspend_sub(mfd);
+}
+
+static int mdss_fb_pm_resume(struct device *dev)
+{
+ struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+
+ if (!mfd)
+ return -ENODEV;
+
+ dev_dbg(dev, "display pm resume\n");
+
+ /*
+ * It is possible that the runtime status of the fb device may
+ * have been active when the system was suspended. Reset the runtime
+ * status to suspended state after a complete system resume.
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ return mdss_fb_resume_sub(mfd);
+}
+#endif
+
+static const struct dev_pm_ops mdss_fb_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mdss_fb_pm_suspend, mdss_fb_pm_resume)
+};
+
+static const struct of_device_id mdss_fb_dt_match[] = {
+ { .compatible = "qcom,mdss-fb",},
+ {}
+};
+EXPORT_COMPAT("qcom,mdss-fb");
+
+static struct platform_driver mdss_fb_driver = {
+ .probe = mdss_fb_probe,
+ .remove = mdss_fb_remove,
+ .suspend = mdss_fb_suspend,
+ .resume = mdss_fb_resume,
+ .shutdown = mdss_fb_shutdown,
+ .driver = {
+ .name = "mdss_fb",
+ .of_match_table = mdss_fb_dt_match,
+ .pm = &mdss_fb_pm_ops,
+ },
+};
+
+static void mdss_fb_scale_bl(struct msm_fb_data_type *mfd, u32 *bl_lvl)
+{
+ u32 temp = *bl_lvl;
+
+ pr_debug("input = %d, scale = %d\n", temp, mfd->bl_scale);
+ if (temp >= mfd->bl_min_lvl) {
+ if (temp > mfd->panel_info->bl_max) {
+ pr_warn("%s: invalid bl level\n",
+ __func__);
+ temp = mfd->panel_info->bl_max;
+ }
+ if (mfd->bl_scale > 1024) {
+ pr_warn("%s: invalid bl scale\n",
+ __func__);
+ mfd->bl_scale = 1024;
+ }
+ /*
+ * bl_scale is the numerator of
+ * scaling fraction (x/1024)
+ */
+ temp = (temp * mfd->bl_scale) / 1024;
+
+ /*if less than minimum level, use min level*/
+ if (temp < mfd->bl_min_lvl)
+ temp = mfd->bl_min_lvl;
+ }
+ pr_debug("output = %d\n", temp);
+
+ (*bl_lvl) = temp;
+}
+
+/* must call this function from within mfd->bl_lock */
+void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl)
+{
+ struct mdss_panel_data *pdata;
+ u32 temp = bkl_lvl;
+ bool ad_bl_notify_needed = false;
+ bool bl_notify_needed = false;
+
+ if ((((mdss_fb_is_power_off(mfd) && mfd->dcm_state != DCM_ENTER)
+ || !mfd->allow_bl_update) && !IS_CALIB_MODE_BL(mfd)) ||
+ mfd->panel_info->cont_splash_enabled) {
+ mfd->unset_bl_level = bkl_lvl;
+ return;
+ } else if (mdss_fb_is_power_on(mfd) && mfd->panel_info->panel_dead) {
+ mfd->unset_bl_level = mfd->bl_level;
+ } else {
+ mfd->unset_bl_level = U32_MAX;
+ }
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+
+ if ((pdata) && (pdata->set_backlight)) {
+ if (mfd->mdp.ad_calc_bl)
+ (*mfd->mdp.ad_calc_bl)(mfd, temp, &temp,
+ &ad_bl_notify_needed);
+ if (!IS_CALIB_MODE_BL(mfd))
+ mdss_fb_scale_bl(mfd, &temp);
+ /*
+ * Even though backlight has been scaled, want to show that
+ * backlight has been set to bkl_lvl to those that read from
+ * sysfs node. Thus, need to set bl_level even if it appears
+ * the backlight has already been set to the level it is at,
+ * as well as setting bl_level to bkl_lvl even though the
+ * backlight has been set to the scaled value.
+ */
+ if (mfd->bl_level_scaled == temp) {
+ mfd->bl_level = bkl_lvl;
+ } else {
+ if (mfd->bl_level != bkl_lvl)
+ bl_notify_needed = true;
+ pr_debug("backlight sent to panel :%d\n", temp);
+ pdata->set_backlight(pdata, temp);
+ mfd->bl_level = bkl_lvl;
+ mfd->bl_level_scaled = temp;
+ }
+ if (ad_bl_notify_needed)
+ mdss_fb_bl_update_notify(mfd,
+ NOTIFY_TYPE_BL_AD_ATTEN_UPDATE);
+ if (bl_notify_needed)
+ mdss_fb_bl_update_notify(mfd,
+ NOTIFY_TYPE_BL_UPDATE);
+ }
+}
+
+void mdss_fb_update_backlight(struct msm_fb_data_type *mfd)
+{
+ struct mdss_panel_data *pdata;
+ u32 temp;
+ bool bl_notify = false;
+
+ if (mfd->unset_bl_level == U32_MAX)
+ return;
+ mutex_lock(&mfd->bl_lock);
+ if (!mfd->allow_bl_update) {
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if ((pdata) && (pdata->set_backlight)) {
+ mfd->bl_level = mfd->unset_bl_level;
+ temp = mfd->bl_level;
+ if (mfd->mdp.ad_calc_bl)
+ (*mfd->mdp.ad_calc_bl)(mfd, temp, &temp,
+ &bl_notify);
+ if (bl_notify)
+ mdss_fb_bl_update_notify(mfd,
+ NOTIFY_TYPE_BL_AD_ATTEN_UPDATE);
+ mdss_fb_bl_update_notify(mfd, NOTIFY_TYPE_BL_UPDATE);
+ pdata->set_backlight(pdata, temp);
+ mfd->bl_level_scaled = mfd->unset_bl_level;
+ mfd->allow_bl_update = true;
+ }
+ }
+ mutex_unlock(&mfd->bl_lock);
+}
+
+static int mdss_fb_start_disp_thread(struct msm_fb_data_type *mfd)
+{
+ int ret = 0;
+
+ pr_debug("%pS: start display thread fb%d\n",
+ __builtin_return_address(0), mfd->index);
+
+ /* this is needed for new split request from debugfs */
+ mdss_fb_get_split(mfd);
+
+ atomic_set(&mfd->commits_pending, 0);
+ mfd->disp_thread = kthread_run(__mdss_fb_display_thread,
+ mfd, "mdss_fb%d", mfd->index);
+
+ if (IS_ERR(mfd->disp_thread)) {
+ pr_err("ERROR: unable to start display thread %d\n",
+ mfd->index);
+ ret = PTR_ERR(mfd->disp_thread);
+ mfd->disp_thread = NULL;
+ }
+
+ return ret;
+}
+
+static void mdss_fb_stop_disp_thread(struct msm_fb_data_type *mfd)
+{
+ pr_debug("%pS: stop display thread fb%d\n",
+ __builtin_return_address(0), mfd->index);
+
+ kthread_stop(mfd->disp_thread);
+ mfd->disp_thread = NULL;
+}
+
+static void mdss_panel_validate_debugfs_info(struct msm_fb_data_type *mfd)
+{
+ struct mdss_panel_info *panel_info = mfd->panel_info;
+ struct fb_info *fbi = mfd->fbi;
+ struct fb_var_screeninfo *var = &fbi->var;
+ struct mdss_panel_data *pdata = container_of(panel_info,
+ struct mdss_panel_data, panel_info);
+
+ if (panel_info->debugfs_info->override_flag) {
+ if (mfd->mdp.off_fnc) {
+ mfd->panel_reconfig = true;
+ mfd->mdp.off_fnc(mfd);
+ mfd->panel_reconfig = false;
+ }
+
+ pr_debug("Overriding panel_info with debugfs_info\n");
+ panel_info->debugfs_info->override_flag = 0;
+ mdss_panel_debugfsinfo_to_panelinfo(panel_info);
+ if (is_panel_split(mfd) && pdata->next)
+ mdss_fb_validate_split(pdata->panel_info.xres,
+ pdata->next->panel_info.xres, mfd);
+ mdss_panelinfo_to_fb_var(panel_info, var);
+ if (mdss_fb_send_panel_event(mfd, MDSS_EVENT_CHECK_PARAMS,
+ panel_info))
+ pr_err("Failed to send panel event CHECK_PARAMS\n");
+ }
+}
+
+static int mdss_fb_blank_blank(struct msm_fb_data_type *mfd,
+ int req_power_state)
+{
+ int ret = 0;
+ int cur_power_state, current_bl;
+
+ if (!mfd)
+ return -EINVAL;
+
+ if (!mdss_fb_is_power_on(mfd) || !mfd->mdp.off_fnc)
+ return 0;
+
+ cur_power_state = mfd->panel_power_state;
+
+ pr_debug("Transitioning from %d --> %d\n", cur_power_state,
+ req_power_state);
+
+ if (cur_power_state == req_power_state) {
+ pr_debug("No change in power state\n");
+ return 0;
+ }
+
+ mutex_lock(&mfd->update.lock);
+ mfd->update.type = NOTIFY_TYPE_SUSPEND;
+ mfd->update.is_suspend = 1;
+ mutex_unlock(&mfd->update.lock);
+ complete(&mfd->update.comp);
+ del_timer(&mfd->no_update.timer);
+ mfd->no_update.value = NOTIFY_TYPE_SUSPEND;
+ complete(&mfd->no_update.comp);
+
+ mfd->op_enable = false;
+ if (mdss_panel_is_power_off(req_power_state)) {
+ /* Stop Display thread */
+ if (mfd->disp_thread)
+ mdss_fb_stop_disp_thread(mfd);
+ mutex_lock(&mfd->bl_lock);
+ current_bl = mfd->bl_level;
+ mfd->allow_bl_update = true;
+ mdss_fb_set_backlight(mfd, 0);
+ mfd->allow_bl_update = false;
+ mfd->unset_bl_level = current_bl;
+ mutex_unlock(&mfd->bl_lock);
+ }
+ mfd->panel_power_state = req_power_state;
+
+ ret = mfd->mdp.off_fnc(mfd);
+ if (ret)
+ mfd->panel_power_state = cur_power_state;
+ else if (mdss_panel_is_power_off(req_power_state))
+ mdss_fb_release_fences(mfd);
+ mfd->op_enable = true;
+ complete(&mfd->power_off_comp);
+
+ return ret;
+}
+
+static int mdss_fb_blank_unblank(struct msm_fb_data_type *mfd)
+{
+ int ret = 0;
+ int cur_power_state;
+
+ if (!mfd)
+ return -EINVAL;
+
+ if (mfd->panel_info->debugfs_info)
+ mdss_panel_validate_debugfs_info(mfd);
+
+ /* Start Display thread */
+ if (mfd->disp_thread == NULL) {
+ ret = mdss_fb_start_disp_thread(mfd);
+ if (IS_ERR_VALUE((unsigned long)ret))
+ return ret;
+ }
+
+ cur_power_state = mfd->panel_power_state;
+ pr_debug("Transitioning from %d --> %d\n", cur_power_state,
+ MDSS_PANEL_POWER_ON);
+
+ if (mdss_panel_is_power_on_interactive(cur_power_state)) {
+ pr_debug("No change in power state\n");
+ return 0;
+ }
+
+ if (mfd->mdp.on_fnc) {
+ struct mdss_panel_info *panel_info = mfd->panel_info;
+ struct fb_var_screeninfo *var = &mfd->fbi->var;
+
+ ret = mfd->mdp.on_fnc(mfd);
+ if (ret) {
+ mdss_fb_stop_disp_thread(mfd);
+ goto error;
+ }
+
+ mfd->panel_power_state = MDSS_PANEL_POWER_ON;
+ mfd->panel_info->panel_dead = false;
+ mutex_lock(&mfd->update.lock);
+ mfd->update.type = NOTIFY_TYPE_UPDATE;
+ mfd->update.is_suspend = 0;
+ mutex_unlock(&mfd->update.lock);
+
+ /*
+ * Panel info can change depending in the information
+ * programmed in the controller.
+ * Update this info in the upstream structs.
+ */
+ mdss_panelinfo_to_fb_var(panel_info, var);
+
+ /* Start the work thread to signal idle time */
+ if (mfd->idle_time)
+ schedule_delayed_work(&mfd->idle_notify_work,
+ msecs_to_jiffies(mfd->idle_time));
+ }
+
+ /* Reset the backlight only if the panel was off */
+ if (mdss_panel_is_power_off(cur_power_state)) {
+ mutex_lock(&mfd->bl_lock);
+ if (!mfd->allow_bl_update) {
+ mfd->allow_bl_update = true;
+ /*
+ * If in AD calibration mode then frameworks would not
+ * be allowed to update backlight hence post unblank
+ * the backlight would remain 0 (0 is set in blank).
+ * Hence resetting back to calibration mode value
+ */
+ if (IS_CALIB_MODE_BL(mfd))
+ mdss_fb_set_backlight(mfd, mfd->calib_mode_bl);
+ else if ((!mfd->panel_info->mipi.post_init_delay) &&
+ (mfd->unset_bl_level != U32_MAX))
+ mdss_fb_set_backlight(mfd, mfd->unset_bl_level);
+
+ /*
+ * it blocks the backlight update between unblank and
+ * first kickoff to avoid backlight turn on before black
+ * frame is transferred to panel through unblank call.
+ */
+ mfd->allow_bl_update = false;
+ }
+ mutex_unlock(&mfd->bl_lock);
+ }
+
+error:
+ return ret;
+}
+
+static int mdss_fb_blank_sub(int blank_mode, struct fb_info *info,
+ int op_enable)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ int ret = 0;
+ int cur_power_state, req_power_state = MDSS_PANEL_POWER_OFF;
+ char trace_buffer[32];
+
+ if (!mfd || !op_enable)
+ return -EPERM;
+
+ if (mfd->dcm_state == DCM_ENTER)
+ return -EPERM;
+
+ pr_debug("%pS mode:%d\n", __builtin_return_address(0),
+ blank_mode);
+
+ snprintf(trace_buffer, sizeof(trace_buffer), "fb%d blank %d",
+ mfd->index, blank_mode);
+ ATRACE_BEGIN(trace_buffer);
+
+ cur_power_state = mfd->panel_power_state;
+
+ /*
+ * Low power (lp) and ultra low power (ulp) modes are currently only
+ * supported for command mode panels. For all other panel, treat lp
+ * mode as full unblank and ulp mode as full blank.
+ */
+ if (mfd->panel_info->type != MIPI_CMD_PANEL) {
+ if (blank_mode == BLANK_FLAG_LP) {
+ pr_debug("lp mode only valid for cmd mode panels\n");
+ if (mdss_fb_is_power_on_interactive(mfd))
+ return 0;
+ blank_mode = FB_BLANK_UNBLANK;
+ } else if (blank_mode == BLANK_FLAG_ULP) {
+ pr_debug("ulp mode valid for cmd mode panels\n");
+ if (mdss_fb_is_power_off(mfd))
+ return 0;
+ blank_mode = FB_BLANK_POWERDOWN;
+ }
+ }
+
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ pr_debug("unblank called. cur pwr state=%d\n", cur_power_state);
+ ret = mdss_fb_blank_unblank(mfd);
+ break;
+ case BLANK_FLAG_ULP:
+ req_power_state = MDSS_PANEL_POWER_LP2;
+ pr_debug("ultra low power mode requested\n");
+ if (mdss_fb_is_power_off(mfd)) {
+ pr_debug("Unsupp transition: off --> ulp\n");
+ return 0;
+ }
+
+ ret = mdss_fb_blank_blank(mfd, req_power_state);
+ break;
+ case BLANK_FLAG_LP:
+ req_power_state = MDSS_PANEL_POWER_LP1;
+ pr_debug(" power mode requested\n");
+
+ /*
+ * If low power mode is requested when panel is already off,
+ * then first unblank the panel before entering low power mode
+ */
+ if (mdss_fb_is_power_off(mfd) && mfd->mdp.on_fnc) {
+ pr_debug("off --> lp. switch to on first\n");
+ ret = mdss_fb_blank_unblank(mfd);
+ if (ret)
+ break;
+ }
+
+ ret = mdss_fb_blank_blank(mfd, req_power_state);
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ default:
+ req_power_state = MDSS_PANEL_POWER_OFF;
+ pr_debug("blank powerdown called\n");
+ ret = mdss_fb_blank_blank(mfd, req_power_state);
+ break;
+ }
+
+ /* Notify listeners */
+ sysfs_notify(&mfd->fbi->dev->kobj, NULL, "show_blank_event");
+
+ ATRACE_END(trace_buffer);
+
+ return ret;
+}
+
+static int mdss_fb_blank(int blank_mode, struct fb_info *info)
+{
+ int ret;
+ struct mdss_panel_data *pdata;
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret) {
+ pr_warn("mdss_fb_pan_idle for fb%d failed. ret=%d\n",
+ mfd->index, ret);
+ return ret;
+ }
+ mutex_lock(&mfd->mdss_sysfs_lock);
+ if (mfd->op_enable == 0) {
+ if (blank_mode == FB_BLANK_UNBLANK)
+ mfd->suspend.panel_power_state = MDSS_PANEL_POWER_ON;
+ else if (blank_mode == BLANK_FLAG_ULP)
+ mfd->suspend.panel_power_state = MDSS_PANEL_POWER_LP2;
+ else if (blank_mode == BLANK_FLAG_LP)
+ mfd->suspend.panel_power_state = MDSS_PANEL_POWER_LP1;
+ else
+ mfd->suspend.panel_power_state = MDSS_PANEL_POWER_OFF;
+ ret = 0;
+ goto end;
+ }
+ pr_debug("mode: %d\n", blank_mode);
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+
+ if (pdata->panel_info.is_lpm_mode &&
+ blank_mode == FB_BLANK_UNBLANK) {
+ pr_debug("panel is in lpm mode\n");
+ mfd->mdp.configure_panel(mfd, 0, 1);
+ mdss_fb_set_mdp_sync_pt_threshold(mfd, mfd->panel.type);
+ pdata->panel_info.is_lpm_mode = false;
+ }
+
+ ret = mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
+
+end:
+ mutex_unlock(&mfd->mdss_sysfs_lock);
+ return ret;
+}
+
+static inline int mdss_fb_create_ion_client(struct msm_fb_data_type *mfd)
+{
+ mfd->fb_ion_client = msm_ion_client_create("mdss_fb_iclient");
+ if (IS_ERR_OR_NULL(mfd->fb_ion_client)) {
+ pr_err("Err:client not created, val %d\n",
+ PTR_RET(mfd->fb_ion_client));
+ mfd->fb_ion_client = NULL;
+ return PTR_RET(mfd->fb_ion_client);
+ }
+ return 0;
+}
+
+void mdss_fb_free_fb_ion_memory(struct msm_fb_data_type *mfd)
+{
+ if (!mfd) {
+ pr_err("no mfd\n");
+ return;
+ }
+
+ if (!mfd->fbi->screen_base)
+ return;
+
+ if (!mfd->fb_ion_client || !mfd->fb_ion_handle) {
+ pr_err("invalid input parameters for fb%d\n", mfd->index);
+ return;
+ }
+
+ mfd->fbi->screen_base = NULL;
+ mfd->fbi->fix.smem_start = 0;
+
+ ion_unmap_kernel(mfd->fb_ion_client, mfd->fb_ion_handle);
+
+ if (mfd->mdp.fb_mem_get_iommu_domain && !(!mfd->fb_attachment ||
+ !mfd->fb_attachment->dmabuf ||
+ !mfd->fb_attachment->dmabuf->ops)) {
+ dma_buf_unmap_attachment(mfd->fb_attachment, mfd->fb_table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(mfd->fbmem_buf, mfd->fb_attachment);
+ dma_buf_put(mfd->fbmem_buf);
+ }
+
+ ion_free(mfd->fb_ion_client, mfd->fb_ion_handle);
+ mfd->fb_ion_handle = NULL;
+ mfd->fbmem_buf = NULL;
+}
+
+int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size)
+{
+ int rc = 0;
+ void *vaddr;
+ int domain;
+
+ if (!mfd) {
+ pr_err("Invalid input param - no mfd\n");
+ return -EINVAL;
+ }
+
+ if (!mfd->fb_ion_client) {
+ rc = mdss_fb_create_ion_client(mfd);
+ if (rc < 0) {
+ pr_err("fb ion client couldn't be created - %d\n", rc);
+ return rc;
+ }
+ }
+
+ pr_debug("size for mmap = %zu\n", fb_size);
+ mfd->fb_ion_handle = ion_alloc(mfd->fb_ion_client, fb_size, SZ_4K,
+ ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL(mfd->fb_ion_handle)) {
+ pr_err("unable to alloc fbmem from ion - %ld\n",
+ PTR_ERR(mfd->fb_ion_handle));
+ return PTR_ERR(mfd->fb_ion_handle);
+ }
+
+ if (mfd->mdp.fb_mem_get_iommu_domain) {
+ mfd->fbmem_buf = ion_share_dma_buf(mfd->fb_ion_client,
+ mfd->fb_ion_handle);
+ if (IS_ERR(mfd->fbmem_buf)) {
+ rc = PTR_ERR(mfd->fbmem_buf);
+ goto fb_mmap_failed;
+ }
+
+ domain = mfd->mdp.fb_mem_get_iommu_domain();
+
+ mfd->fb_attachment = mdss_smmu_dma_buf_attach(mfd->fbmem_buf,
+ &mfd->pdev->dev, domain);
+ if (IS_ERR(mfd->fb_attachment)) {
+ rc = PTR_ERR(mfd->fb_attachment);
+ goto err_put;
+ }
+
+ mfd->fb_table = dma_buf_map_attachment(mfd->fb_attachment,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR(mfd->fb_table)) {
+ rc = PTR_ERR(mfd->fb_table);
+ goto err_detach;
+ }
+ } else {
+ pr_err("No IOMMU Domain\n");
+ rc = -EINVAL;
+ goto fb_mmap_failed;
+ }
+
+ vaddr = ion_map_kernel(mfd->fb_ion_client, mfd->fb_ion_handle);
+ if (IS_ERR_OR_NULL(vaddr)) {
+ pr_err("ION memory mapping failed - %ld\n", PTR_ERR(vaddr));
+ rc = PTR_ERR(vaddr);
+ goto err_unmap;
+ }
+ pr_debug("alloc 0x%zxB vaddr = %pK for fb%d\n", fb_size,
+ vaddr, mfd->index);
+
+ mfd->fbi->screen_base = (char *) vaddr;
+ mfd->fbi->fix.smem_len = fb_size;
+
+ return rc;
+
+err_unmap:
+ dma_buf_unmap_attachment(mfd->fb_attachment, mfd->fb_table,
+ DMA_BIDIRECTIONAL);
+err_detach:
+ dma_buf_detach(mfd->fbmem_buf, mfd->fb_attachment);
+err_put:
+ dma_buf_put(mfd->fbmem_buf);
+fb_mmap_failed:
+ ion_free(mfd->fb_ion_client, mfd->fb_ion_handle);
+ mfd->fb_attachment = NULL;
+ mfd->fb_table = NULL;
+ mfd->fb_ion_handle = NULL;
+ mfd->fbmem_buf = NULL;
+ return rc;
+}
+
+/**
+ * mdss_fb_fbmem_ion_mmap() - Custom fb mmap() function for MSM driver.
+ *
+ * @info - Framebuffer info.
+ * @vma - VM area which is part of the process virtual memory.
+ *
+ * This framebuffer mmap function differs from standard mmap() function by
+ * allowing for customized page-protection and dynamically allocate framebuffer
+ * memory from system heap and map to iommu virtual address.
+ *
+ * Return: virtual address is returned through vma
+ */
+static int mdss_fb_fbmem_ion_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ int rc = 0;
+ size_t req_size, fb_size;
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ struct sg_table *table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ unsigned int i;
+ struct page *page;
+
+ if (!mfd || !mfd->pdev || !mfd->pdev->dev.of_node) {
+ pr_err("Invalid device node\n");
+ return -ENODEV;
+ }
+
+ req_size = vma->vm_end - vma->vm_start;
+ fb_size = mfd->fbi->fix.smem_len;
+ if (req_size > fb_size) {
+ pr_warn("requested map is greater than framebuffer\n");
+ return -EOVERFLOW;
+ }
+
+ if (!mfd->fbi->screen_base) {
+ rc = mdss_fb_alloc_fb_ion_memory(mfd, fb_size);
+ if (rc < 0) {
+ pr_err("fb mmap failed!!!!\n");
+ return rc;
+ }
+ }
+
+ table = mfd->fb_table;
+ if (IS_ERR(table)) {
+ pr_err("Unable to get sg_table from ion:%ld\n", PTR_ERR(table));
+ mfd->fbi->screen_base = NULL;
+ return PTR_ERR(table);
+ } else if (!table) {
+ pr_err("sg_list is NULL\n");
+ mfd->fbi->screen_base = NULL;
+ return -EINVAL;
+ }
+
+ page = sg_page(table->sgl);
+ if (page) {
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg->length;
+
+ page = sg_page(sg);
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg->length - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+
+ if (mfd->mdp_fb_page_protection ==
+ MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
+ vma->vm_page_prot =
+ pgprot_writecombine(vma->vm_page_prot);
+
+ pr_debug("vma=%pK, addr=%x len=%ld\n",
+ vma, (unsigned int)addr, len);
+ pr_debug("vm_start=%x vm_end=%x vm_page_prot=%ld\n",
+ (unsigned int)vma->vm_start,
+ (unsigned int)vma->vm_end,
+ (unsigned long int)vma->vm_page_prot.pgprot);
+
+ io_remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ addr += len;
+ if (addr >= vma->vm_end)
+ break;
+ }
+ } else {
+ pr_err("PAGE is null\n");
+ mdss_fb_free_fb_ion_memory(mfd);
+ return -ENOMEM;
+ }
+
+ return rc;
+}
+
+/*
+ * mdss_fb_physical_mmap() - Custom fb mmap() function for MSM driver.
+ *
+ * @info - Framebuffer info.
+ * @vma - VM area which is part of the process virtual memory.
+ *
+ * This framebuffer mmap function differs from standard mmap() function as
+ * map to framebuffer memory from the CMA memory which is allocated during
+ * bootup.
+ *
+ * Return: virtual address is returned through vma
+ */
+static int mdss_fb_physical_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ /* Get frame buffer memory range. */
+ unsigned long start = info->fix.smem_start;
+ u32 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
+ unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+ if (!start) {
+ pr_warn("No framebuffer memory is allocated\n");
+ return -ENOMEM;
+ }
+
+ /* Set VM flags. */
+ start &= PAGE_MASK;
+ if ((vma->vm_end <= vma->vm_start) ||
+ (off >= len) ||
+ ((vma->vm_end - vma->vm_start) > (len - off)))
+ return -EINVAL;
+ off += start;
+ if (off < start)
+ return -EINVAL;
+ vma->vm_pgoff = off >> PAGE_SHIFT;
+ /* This is an IO map - tell maydump to skip this VMA */
+ vma->vm_flags |= VM_IO;
+
+ if (mfd->mdp_fb_page_protection == MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /* Remap the frame buffer I/O range */
+ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int mdss_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ int rc = -EINVAL;
+
+ if (mfd->fb_mmap_type == MDP_FB_MMAP_ION_ALLOC) {
+ rc = mdss_fb_fbmem_ion_mmap(info, vma);
+ } else if (mfd->fb_mmap_type == MDP_FB_MMAP_PHYSICAL_ALLOC) {
+ rc = mdss_fb_physical_mmap(info, vma);
+ } else {
+ if (!info->fix.smem_start && !mfd->fb_ion_handle) {
+ rc = mdss_fb_fbmem_ion_mmap(info, vma);
+ mfd->fb_mmap_type = MDP_FB_MMAP_ION_ALLOC;
+ } else {
+ rc = mdss_fb_physical_mmap(info, vma);
+ mfd->fb_mmap_type = MDP_FB_MMAP_PHYSICAL_ALLOC;
+ }
+ }
+ if (rc < 0)
+ pr_err("fb mmap failed with rc = %d\n", rc);
+
+ return rc;
+}
+
+static struct fb_ops mdss_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = mdss_fb_open,
+ .fb_release = mdss_fb_release,
+ .fb_check_var = mdss_fb_check_var, /* vinfo check */
+ .fb_set_par = mdss_fb_set_par, /* set the video mode */
+ .fb_blank = mdss_fb_blank, /* blank display */
+ .fb_pan_display = mdss_fb_pan_display, /* pan display */
+ .fb_ioctl_v2 = mdss_fb_ioctl, /* perform fb specific ioctl */
+#ifdef CONFIG_COMPAT
+ .fb_compat_ioctl_v2 = mdss_fb_compat_ioctl,
+#endif
+ .fb_mmap = mdss_fb_mmap,
+};
+
+static int mdss_fb_alloc_fbmem_iommu(struct msm_fb_data_type *mfd, int dom)
+{
+ void *virt = NULL;
+ phys_addr_t phys = 0;
+ size_t size = 0;
+ struct platform_device *pdev = mfd->pdev;
+ int rc = 0;
+ struct device_node *fbmem_pnode = NULL;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("Invalid device node\n");
+ return -ENODEV;
+ }
+
+ fbmem_pnode = of_parse_phandle(pdev->dev.of_node,
+ "linux,contiguous-region", 0);
+ if (!fbmem_pnode) {
+ pr_debug("fbmem is not reserved for %s\n", pdev->name);
+ mfd->fbi->screen_base = NULL;
+ mfd->fbi->fix.smem_start = 0;
+ return 0;
+ }
+ {
+ const u32 *addr;
+ u64 len;
+
+ addr = of_get_address(fbmem_pnode, 0, &len, NULL);
+ if (!addr) {
+ pr_err("fbmem size is not specified\n");
+ of_node_put(fbmem_pnode);
+ return -EINVAL;
+ }
+ size = (size_t)len;
+ of_node_put(fbmem_pnode);
+ }
+
+ pr_debug("%s frame buffer reserve_size=0x%zx\n", __func__, size);
+
+ if (size < PAGE_ALIGN(mfd->fbi->fix.line_length *
+ mfd->fbi->var.yres_virtual))
+ pr_warn("reserve size is smaller than framebuffer size\n");
+
+ rc = mdss_smmu_dma_alloc_coherent(&pdev->dev, size, &phys, &mfd->iova,
+ &virt, GFP_KERNEL, dom);
+ if (rc) {
+ pr_err("unable to alloc fbmem size=%zx\n", size);
+ return -ENOMEM;
+ }
+
+ if (MDSS_LPAE_CHECK(phys)) {
+ pr_warn("fb mem phys %pa > 4GB is not supported.\n", &phys);
+ mdss_smmu_dma_free_coherent(&pdev->dev, size, &virt,
+ phys, mfd->iova, dom);
+ return -ERANGE;
+ }
+
+ pr_debug("alloc 0x%zxB @ (%pa phys) (0x%pK virt) (%pa iova) for fb%d\n",
+ size, &phys, virt, &mfd->iova, mfd->index);
+
+ mfd->fbi->screen_base = virt;
+ mfd->fbi->fix.smem_start = phys;
+ mfd->fbi->fix.smem_len = size;
+
+ return 0;
+}
+
+static int mdss_fb_alloc_fbmem(struct msm_fb_data_type *mfd)
+{
+
+ if (mfd->mdp.fb_mem_alloc_fnc) {
+ return mfd->mdp.fb_mem_alloc_fnc(mfd);
+ } else if (mfd->mdp.fb_mem_get_iommu_domain) {
+ int dom = mfd->mdp.fb_mem_get_iommu_domain();
+
+ if (dom >= 0)
+ return mdss_fb_alloc_fbmem_iommu(mfd, dom);
+ else
+ return -ENOMEM;
+ } else {
+ pr_err("no fb memory allocator function defined\n");
+ return -ENOMEM;
+ }
+}
+
+static int mdss_fb_register(struct msm_fb_data_type *mfd)
+{
+ int ret = -ENODEV;
+ int bpp;
+ char panel_name[20];
+ struct mdss_panel_info *panel_info = mfd->panel_info;
+ struct fb_info *fbi = mfd->fbi;
+ struct fb_fix_screeninfo *fix;
+ struct fb_var_screeninfo *var;
+ int *id;
+
+ /*
+ * fb info initialization
+ */
+ fix = &fbi->fix;
+ var = &fbi->var;
+
+ fix->type_aux = 0; /* if type == FB_TYPE_INTERLEAVED_PLANES */
+ fix->visual = FB_VISUAL_TRUECOLOR; /* True Color */
+ fix->ywrapstep = 0; /* No support */
+ fix->mmio_start = 0; /* No MMIO Address */
+ fix->mmio_len = 0; /* No MMIO Address */
+ fix->accel = FB_ACCEL_NONE;/* FB_ACCEL_MSM needes to be added in fb.h */
+
+ var->xoffset = 0, /* Offset from virtual to visible */
+ var->yoffset = 0, /* resolution */
+ var->grayscale = 0, /* No graylevels */
+ var->nonstd = 0, /* standard pixel format */
+ var->activate = FB_ACTIVATE_VBL, /* activate it at vsync */
+ var->height = -1, /* height of picture in mm */
+ var->width = -1, /* width of picture in mm */
+ var->accel_flags = 0, /* acceleration flags */
+ var->sync = 0, /* see FB_SYNC_* */
+ var->rotate = 0, /* angle we rotate counter clockwise */
+ mfd->op_enable = false;
+
+ switch (mfd->fb_imgType) {
+ case MDP_RGB_565:
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->xpanstep = 1;
+ fix->ypanstep = 1;
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->blue.offset = 0;
+ var->green.offset = 5;
+ var->red.offset = 11;
+ var->blue.length = 5;
+ var->green.length = 6;
+ var->red.length = 5;
+ var->blue.msb_right = 0;
+ var->green.msb_right = 0;
+ var->red.msb_right = 0;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ bpp = 2;
+ break;
+
+ case MDP_RGB_888:
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->xpanstep = 1;
+ fix->ypanstep = 1;
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->blue.offset = 0;
+ var->green.offset = 8;
+ var->red.offset = 16;
+ var->blue.length = 8;
+ var->green.length = 8;
+ var->red.length = 8;
+ var->blue.msb_right = 0;
+ var->green.msb_right = 0;
+ var->red.msb_right = 0;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ bpp = 3;
+ break;
+
+ case MDP_ARGB_8888:
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->xpanstep = 1;
+ fix->ypanstep = 1;
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->blue.offset = 24;
+ var->green.offset = 16;
+ var->red.offset = 8;
+ var->blue.length = 8;
+ var->green.length = 8;
+ var->red.length = 8;
+ var->blue.msb_right = 0;
+ var->green.msb_right = 0;
+ var->red.msb_right = 0;
+ var->transp.offset = 0;
+ var->transp.length = 8;
+ bpp = 4;
+ break;
+
+ case MDP_RGBA_8888:
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->xpanstep = 1;
+ fix->ypanstep = 1;
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->blue.offset = 16;
+ var->green.offset = 8;
+ var->red.offset = 0;
+ var->blue.length = 8;
+ var->green.length = 8;
+ var->red.length = 8;
+ var->blue.msb_right = 0;
+ var->green.msb_right = 0;
+ var->red.msb_right = 0;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ bpp = 4;
+ break;
+
+ case MDP_YCRYCB_H2V1:
+ fix->type = FB_TYPE_INTERLEAVED_PLANES;
+ fix->xpanstep = 2;
+ fix->ypanstep = 1;
+ var->vmode = FB_VMODE_NONINTERLACED;
+
+ /* how about R/G/B offset? */
+ var->blue.offset = 0;
+ var->green.offset = 5;
+ var->red.offset = 11;
+ var->blue.length = 5;
+ var->green.length = 6;
+ var->red.length = 5;
+ var->blue.msb_right = 0;
+ var->green.msb_right = 0;
+ var->red.msb_right = 0;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ bpp = 2;
+ break;
+
+ default:
+ pr_err("msm_fb_init: fb %d unknown image type!\n",
+ mfd->index);
+ return ret;
+ }
+
+ mdss_panelinfo_to_fb_var(panel_info, var);
+
+ fix->type = panel_info->is_3d_panel;
+ if (mfd->mdp.fb_stride)
+ fix->line_length = mfd->mdp.fb_stride(mfd->index, var->xres,
+ bpp);
+ else
+ fix->line_length = var->xres * bpp;
+
+ var->xres_virtual = var->xres;
+ var->yres_virtual = panel_info->yres * mfd->fb_page;
+ var->bits_per_pixel = bpp * 8; /* FrameBuffer color depth */
+
+ /*
+ * Populate smem length here for uspace to get the
+ * Framebuffer size when FBIO_FSCREENINFO ioctl is called.
+ */
+ fix->smem_len = PAGE_ALIGN(fix->line_length * var->yres) * mfd->fb_page;
+
+ /* id field for fb app */
+ id = (int *)&mfd->panel;
+
+ snprintf(fix->id, sizeof(fix->id), "mdssfb_%x", (u32) *id);
+
+ fbi->fbops = &mdss_fb_ops;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->pseudo_palette = mdss_fb_pseudo_palette;
+
+ mfd->ref_cnt = 0;
+ mfd->panel_power_state = MDSS_PANEL_POWER_OFF;
+ mfd->dcm_state = DCM_UNINIT;
+
+ if (mdss_fb_alloc_fbmem(mfd))
+ pr_warn("unable to allocate fb memory in fb register\n");
+
+ mfd->op_enable = true;
+
+ mutex_init(&mfd->update.lock);
+ mutex_init(&mfd->no_update.lock);
+ mutex_init(&mfd->mdp_sync_pt_data.sync_mutex);
+ atomic_set(&mfd->mdp_sync_pt_data.commit_cnt, 0);
+ atomic_set(&mfd->commits_pending, 0);
+ atomic_set(&mfd->ioctl_ref_cnt, 0);
+ atomic_set(&mfd->kickoff_pending, 0);
+
+ init_timer(&mfd->no_update.timer);
+ mfd->no_update.timer.function = mdss_fb_no_update_notify_timer_cb;
+ mfd->no_update.timer.data = (unsigned long)mfd;
+ mfd->update.ref_count = 0;
+ mfd->no_update.ref_count = 0;
+ mfd->update.init_done = false;
+ init_completion(&mfd->update.comp);
+ init_completion(&mfd->no_update.comp);
+ init_completion(&mfd->power_off_comp);
+ init_completion(&mfd->power_set_comp);
+ init_waitqueue_head(&mfd->commit_wait_q);
+ init_waitqueue_head(&mfd->idle_wait_q);
+ init_waitqueue_head(&mfd->ioctl_q);
+ init_waitqueue_head(&mfd->kickoff_wait_q);
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret)
+ pr_err("fb_alloc_cmap() failed!\n");
+
+ if (register_framebuffer(fbi) < 0) {
+ fb_dealloc_cmap(&fbi->cmap);
+
+ mfd->op_enable = false;
+ return -EPERM;
+ }
+
+ snprintf(panel_name, ARRAY_SIZE(panel_name), "mdss_panel_fb%d",
+ mfd->index);
+ mdss_panel_debugfs_init(panel_info, panel_name);
+ pr_info("FrameBuffer[%d] %dx%d registered successfully!\n", mfd->index,
+ fbi->var.xres, fbi->var.yres);
+
+ return 0;
+}
+
+static int mdss_fb_open(struct fb_info *info, int user)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ struct mdss_fb_file_info *file_info = NULL;
+ int result;
+ struct task_struct *task = current->group_leader;
+
+ if (mfd->shutdown_pending) {
+ pr_err_once("Shutdown pending. Aborting operation. Request from pid:%d name=%s\n",
+ current->tgid, task->comm);
+ sysfs_notify(&mfd->fbi->dev->kobj, NULL, "show_blank_event");
+ return -ESHUTDOWN;
+ }
+
+ file_info = kmalloc(sizeof(*file_info), GFP_KERNEL);
+ if (!file_info)
+ return -ENOMEM;
+
+ file_info->file = info->file;
+ list_add(&file_info->list, &mfd->file_list);
+
+ result = pm_runtime_get_sync(info->dev);
+
+ if (result < 0) {
+ pr_err("pm_runtime: fail to wake up\n");
+ goto pm_error;
+ }
+
+ if (!mfd->ref_cnt) {
+ result = mdss_fb_blank_sub(FB_BLANK_UNBLANK, info,
+ mfd->op_enable);
+ if (result) {
+ pr_err("can't turn on fb%d! rc=%d\n", mfd->index,
+ result);
+ goto blank_error;
+ }
+ }
+
+ mfd->ref_cnt++;
+ pr_debug("mfd refcount:%d file:%pK\n", mfd->ref_cnt, info->file);
+
+ return 0;
+
+blank_error:
+ pm_runtime_put(info->dev);
+pm_error:
+ list_del(&file_info->list);
+ kfree(file_info);
+ return result;
+}
+
+static int mdss_fb_release_all(struct fb_info *info, bool release_all)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ struct mdss_fb_file_info *file_info = NULL, *temp_file_info = NULL;
+ struct file *file = info->file;
+ int ret = 0;
+ bool node_found = false;
+ struct task_struct *task = current->group_leader;
+
+ if (!mfd->ref_cnt) {
+ pr_info("try to close unopened fb %d! from pid:%d name:%s\n",
+ mfd->index, current->tgid, task->comm);
+ return -EINVAL;
+ }
+
+ if (!wait_event_timeout(mfd->ioctl_q,
+ !atomic_read(&mfd->ioctl_ref_cnt) || !release_all,
+ msecs_to_jiffies(1000)))
+ pr_warn("fb%d ioctl could not finish. waited 1 sec.\n",
+ mfd->index);
+
+ /* wait only for the last release */
+ if (release_all || (mfd->ref_cnt == 1)) {
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret && (ret != -ESHUTDOWN))
+ pr_warn("mdss_fb_pan_idle for fb%d failed. ret=%d ignoring.\n",
+ mfd->index, ret);
+ }
+
+ pr_debug("release_all = %s\n", release_all ? "true" : "false");
+
+ list_for_each_entry_safe(file_info, temp_file_info, &mfd->file_list,
+ list) {
+ if (!release_all && file_info->file != file)
+ continue;
+
+ pr_debug("found file node mfd->ref=%d\n", mfd->ref_cnt);
+ list_del(&file_info->list);
+ kfree(file_info);
+
+ mfd->ref_cnt--;
+ pm_runtime_put(info->dev);
+
+ node_found = true;
+
+ if (!release_all)
+ break;
+ }
+
+ if (!node_found || (release_all && mfd->ref_cnt))
+ pr_warn("file node not found or wrong ref cnt: release all:%d refcnt:%d\n",
+ release_all, mfd->ref_cnt);
+
+ pr_debug("current process=%s pid=%d mfd->ref=%d file:%pK\n",
+ task->comm, current->tgid, mfd->ref_cnt, info->file);
+
+ if (!mfd->ref_cnt || release_all) {
+ /* resources (if any) will be released during blank */
+ if (mfd->mdp.release_fnc)
+ mfd->mdp.release_fnc(mfd, NULL);
+
+ if (mfd->mdp.pp_release_fnc) {
+ ret = (*mfd->mdp.pp_release_fnc)(mfd);
+ if (ret)
+ pr_err("PP release failed ret %d\n", ret);
+ }
+
+ /* reset backlight before blank to prevent backlight from
+ * enabling ahead of unblank. for some special cases like
+ * adb shell stop/start.
+ */
+ mdss_fb_set_backlight(mfd, 0);
+
+ ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info,
+ mfd->op_enable);
+ if (ret) {
+ pr_err("can't turn off fb%d! rc=%d current process=%s pid=%d\n",
+ mfd->index, ret, task->comm, current->tgid);
+ return ret;
+ }
+ if (mfd->fb_ion_handle)
+ mdss_fb_free_fb_ion_memory(mfd);
+
+ atomic_set(&mfd->ioctl_ref_cnt, 0);
+ } else {
+ if (mfd->mdp.release_fnc)
+ ret = mfd->mdp.release_fnc(mfd, file);
+
+ /* display commit is needed to release resources */
+ if (ret)
+ mdss_fb_pan_display(&mfd->fbi->var, mfd->fbi);
+ }
+
+ return ret;
+}
+
+static int mdss_fb_release(struct fb_info *info, int user)
+{
+ return mdss_fb_release_all(info, false);
+}
+
+static void mdss_fb_power_setting_idle(struct msm_fb_data_type *mfd)
+{
+ int ret;
+
+ if (mfd->is_power_setting) {
+ ret = wait_for_completion_timeout(
+ &mfd->power_set_comp,
+ msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
+ if (ret < 0)
+ ret = -ERESTARTSYS;
+ else if (!ret)
+ pr_err("%s wait for power_set_comp timeout %d %d",
+ __func__, ret, mfd->is_power_setting);
+ if (ret <= 0) {
+ mfd->is_power_setting = false;
+ complete_all(&mfd->power_set_comp);
+ }
+ }
+}
+
+static void __mdss_fb_copy_fence(struct msm_sync_pt_data *sync_pt_data,
+ struct mdss_fence **fences, u32 *fence_cnt)
+{
+ pr_debug("%s: wait for fences\n", sync_pt_data->fence_name);
+
+ mutex_lock(&sync_pt_data->sync_mutex);
+ /*
+ * Assuming that acq_fen_cnt is sanitized in bufsync ioctl
+ * to check for sync_pt_data->acq_fen_cnt <= MDP_MAX_FENCE_FD
+ */
+ *fence_cnt = sync_pt_data->acq_fen_cnt;
+ sync_pt_data->acq_fen_cnt = 0;
+ if (*fence_cnt)
+ memcpy(fences, sync_pt_data->acq_fen,
+ *fence_cnt * sizeof(struct mdss_fence *));
+ mutex_unlock(&sync_pt_data->sync_mutex);
+}
+
+static int __mdss_fb_wait_for_fence_sub(struct msm_sync_pt_data *sync_pt_data,
+ struct mdss_fence **fences, int fence_cnt)
+{
+ int i, ret = 0;
+ unsigned long max_wait = msecs_to_jiffies(WAIT_MAX_FENCE_TIMEOUT);
+ unsigned long timeout = jiffies + max_wait;
+ long wait_ms, wait_jf;
+
+ /* buf sync */
+ for (i = 0; i < fence_cnt && !ret; i++) {
+ wait_jf = timeout - jiffies;
+ wait_ms = jiffies_to_msecs(wait_jf);
+
+ /*
+ * In this loop, if one of the previous fence took long
+ * time, give a chance for the next fence to check if
+ * fence is already signalled. If not signalled it breaks
+ * in the final wait timeout.
+ */
+ if (wait_jf < 0)
+ wait_ms = WAIT_MIN_FENCE_TIMEOUT;
+ else
+ wait_ms = min_t(long, WAIT_FENCE_FIRST_TIMEOUT,
+ wait_ms);
+
+ ret = mdss_wait_sync_fence(fences[i], wait_ms);
+
+ if (ret == -ETIME) {
+ wait_jf = timeout - jiffies;
+ wait_ms = jiffies_to_msecs(wait_jf);
+ if (wait_jf < 0)
+ break;
+
+ wait_ms = min_t(long, WAIT_FENCE_FINAL_TIMEOUT,
+ wait_ms);
+
+ pr_warn("%s: sync_fence_wait timed out! ",
+ mdss_get_sync_fence_name(fences[i]));
+ pr_cont("Waiting %ld.%ld more seconds\n",
+ (wait_ms/MSEC_PER_SEC), (wait_ms%MSEC_PER_SEC));
+ MDSS_XLOG(sync_pt_data->timeline_value);
+ MDSS_XLOG_TOUT_HANDLER("mdp");
+ ret = mdss_wait_sync_fence(fences[i], wait_ms);
+
+ if (ret == -ETIME)
+ break;
+ }
+ mdss_put_sync_fence(fences[i]);
+ }
+
+ if (ret < 0) {
+ pr_err("%s: sync_fence_wait failed! ret = %x\n",
+ sync_pt_data->fence_name, ret);
+ for (; i < fence_cnt; i++)
+ mdss_put_sync_fence(fences[i]);
+ }
+ return ret;
+}
+
+int mdss_fb_wait_for_fence(struct msm_sync_pt_data *sync_pt_data)
+{
+ struct mdss_fence *fences[MDP_MAX_FENCE_FD];
+ int fence_cnt = 0;
+
+ __mdss_fb_copy_fence(sync_pt_data, fences, &fence_cnt);
+
+ if (fence_cnt)
+ __mdss_fb_wait_for_fence_sub(sync_pt_data,
+ fences, fence_cnt);
+
+ return fence_cnt;
+}
+
+/**
+ * mdss_fb_signal_timeline() - signal a single release fence
+ * @sync_pt_data: Sync point data structure for the timeline which
+ * should be signaled.
+ *
+ * This is called after a frame has been pushed to display. This signals the
+ * timeline to release the fences associated with this frame.
+ */
+void mdss_fb_signal_timeline(struct msm_sync_pt_data *sync_pt_data)
+{
+ mutex_lock(&sync_pt_data->sync_mutex);
+ if (atomic_add_unless(&sync_pt_data->commit_cnt, -1, 0) &&
+ sync_pt_data->timeline) {
+ mdss_inc_timeline(sync_pt_data->timeline, 1);
+ mdss_inc_timeline(sync_pt_data->timeline_retire, 1);
+ MDSS_XLOG(sync_pt_data->timeline_value);
+ sync_pt_data->timeline_value++;
+
+ pr_debug("%s: buffer signaled! timeline val=%d remaining=%d\n",
+ sync_pt_data->fence_name, sync_pt_data->timeline_value,
+ atomic_read(&sync_pt_data->commit_cnt));
+ } else {
+ pr_debug("%s timeline signaled without commits val=%d\n",
+ sync_pt_data->fence_name, sync_pt_data->timeline_value);
+ }
+ mutex_unlock(&sync_pt_data->sync_mutex);
+}
+
+/**
+ * mdss_fb_release_fences() - signal all pending release fences
+ * @mfd: Framebuffer data structure for display
+ *
+ * Release all currently pending release fences, including those that are in
+ * the process to be commtted.
+ *
+ * Note: this should only be called during close or suspend sequence.
+ */
+static void mdss_fb_release_fences(struct msm_fb_data_type *mfd)
+{
+ struct msm_sync_pt_data *sync_pt_data = &mfd->mdp_sync_pt_data;
+ int val;
+
+ mutex_lock(&sync_pt_data->sync_mutex);
+ if (sync_pt_data->timeline) {
+ val = sync_pt_data->threshold +
+ atomic_read(&sync_pt_data->commit_cnt);
+ mdss_inc_timeline(sync_pt_data->timeline, val);
+ sync_pt_data->timeline_value += val;
+ atomic_set(&sync_pt_data->commit_cnt, 0);
+ }
+ mutex_unlock(&sync_pt_data->sync_mutex);
+}
+
+static void mdss_fb_release_kickoff(struct msm_fb_data_type *mfd)
+{
+ if (mfd->wait_for_kickoff) {
+ atomic_set(&mfd->kickoff_pending, 0);
+ wake_up_all(&mfd->kickoff_wait_q);
+ }
+}
+
+/**
+ * __mdss_fb_sync_buf_done_callback() - process async display events
+ * @p: Notifier block registered for async events.
+ * @event: Event enum to identify the event.
+ * @data: Optional argument provided with the event.
+ *
+ * See enum mdp_notify_event for events handled.
+ */
+static int __mdss_fb_sync_buf_done_callback(struct notifier_block *p,
+ unsigned long event, void *data)
+{
+ struct msm_sync_pt_data *sync_pt_data;
+ struct msm_fb_data_type *mfd;
+ int fence_cnt;
+ int ret = NOTIFY_OK;
+
+ sync_pt_data = container_of(p, struct msm_sync_pt_data, notifier);
+ mfd = container_of(sync_pt_data, struct msm_fb_data_type,
+ mdp_sync_pt_data);
+
+ switch (event) {
+ case MDP_NOTIFY_FRAME_BEGIN:
+ if (mfd->idle_time && !mod_delayed_work(system_wq,
+ &mfd->idle_notify_work,
+ msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT)))
+ pr_debug("fb%d: start idle delayed work\n",
+ mfd->index);
+
+ mfd->idle_state = MDSS_FB_NOT_IDLE;
+ break;
+ case MDP_NOTIFY_FRAME_READY:
+ if (sync_pt_data->async_wait_fences &&
+ sync_pt_data->temp_fen_cnt) {
+ fence_cnt = sync_pt_data->temp_fen_cnt;
+ sync_pt_data->temp_fen_cnt = 0;
+ ret = __mdss_fb_wait_for_fence_sub(sync_pt_data,
+ sync_pt_data->temp_fen, fence_cnt);
+ }
+ if (mfd->idle_time && !mod_delayed_work(system_wq,
+ &mfd->idle_notify_work,
+ msecs_to_jiffies(mfd->idle_time)))
+ pr_debug("fb%d: restarted idle work\n",
+ mfd->index);
+ if (ret == -ETIME)
+ ret = NOTIFY_BAD;
+ mfd->idle_state = MDSS_FB_IDLE_TIMER_RUNNING;
+ break;
+ case MDP_NOTIFY_FRAME_FLUSHED:
+ pr_debug("%s: frame flushed\n", sync_pt_data->fence_name);
+ sync_pt_data->flushed = true;
+ break;
+ case MDP_NOTIFY_FRAME_TIMEOUT:
+ pr_err("%s: frame timeout\n", sync_pt_data->fence_name);
+ mdss_fb_signal_timeline(sync_pt_data);
+ break;
+ case MDP_NOTIFY_FRAME_DONE:
+ pr_debug("%s: frame done\n", sync_pt_data->fence_name);
+ mdss_fb_signal_timeline(sync_pt_data);
+ mdss_fb_calc_fps(mfd);
+ break;
+ case MDP_NOTIFY_FRAME_CFG_DONE:
+ if (sync_pt_data->async_wait_fences)
+ __mdss_fb_copy_fence(sync_pt_data,
+ sync_pt_data->temp_fen,
+ &sync_pt_data->temp_fen_cnt);
+ break;
+ case MDP_NOTIFY_FRAME_CTX_DONE:
+ mdss_fb_release_kickoff(mfd);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * mdss_fb_pan_idle() - wait for panel programming to be idle
+ * @mfd: Framebuffer data structure for display
+ *
+ * Wait for any pending programming to be done if in the process of programming
+ * hardware configuration. After this function returns it is safe to perform
+ * software updates for next frame.
+ */
+static int mdss_fb_pan_idle(struct msm_fb_data_type *mfd)
+{
+ int ret = 0;
+
+ ret = wait_event_timeout(mfd->idle_wait_q,
+ (!atomic_read(&mfd->commits_pending) ||
+ mfd->shutdown_pending),
+ msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
+ if (!ret) {
+ pr_err("%pS: wait for idle timeout commits=%d\n",
+ __builtin_return_address(0),
+ atomic_read(&mfd->commits_pending));
+ MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus");
+ ret = -ETIMEDOUT;
+ } else if (mfd->shutdown_pending) {
+ pr_debug("Shutdown signalled\n");
+ ret = -ESHUTDOWN;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int mdss_fb_wait_for_kickoff(struct msm_fb_data_type *mfd)
+{
+ int ret = 0;
+
+ if (!mfd->wait_for_kickoff)
+ return mdss_fb_pan_idle(mfd);
+
+ ret = wait_event_timeout(mfd->kickoff_wait_q,
+ (!atomic_read(&mfd->kickoff_pending) ||
+ mfd->shutdown_pending),
+ msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
+ if (!ret) {
+ pr_err("%pS: wait for kickoff timeout koff=%d commits=%d\n",
+ __builtin_return_address(0),
+ atomic_read(&mfd->kickoff_pending),
+ atomic_read(&mfd->commits_pending));
+ MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus");
+ ret = -ETIMEDOUT;
+ } else if (mfd->shutdown_pending) {
+ pr_debug("Shutdown signalled\n");
+ ret = -ESHUTDOWN;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int mdss_fb_pan_display_ex(struct fb_info *info,
+ struct mdp_display_commit *disp_commit)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ struct fb_var_screeninfo *var = &disp_commit->var;
+ u32 wait_for_finish = disp_commit->wait_for_finish;
+ int ret = 0;
+
+ if (!mfd || (!mfd->op_enable))
+ return -EPERM;
+
+ if ((mdss_fb_is_power_off(mfd)) &&
+ !((mfd->dcm_state == DCM_ENTER) &&
+ (mfd->panel.type == MIPI_CMD_PANEL)))
+ return -EPERM;
+
+ if (var->xoffset > (info->var.xres_virtual - info->var.xres))
+ return -EINVAL;
+
+ if (var->yoffset > (info->var.yres_virtual - info->var.yres))
+ return -EINVAL;
+
+ ret = mdss_fb_wait_for_kickoff(mfd);
+ if (ret) {
+ pr_err("wait_for_kick failed. rc=%d\n", ret);
+ return ret;
+ }
+
+ if (mfd->mdp.pre_commit_fnc) {
+ ret = mfd->mdp.pre_commit_fnc(mfd);
+ if (ret) {
+ pr_err("fb%d: pre commit failed %d\n",
+ mfd->index, ret);
+ return ret;
+ }
+ }
+
+ mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+ if (info->fix.xpanstep)
+ info->var.xoffset =
+ (var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
+
+ if (info->fix.ypanstep)
+ info->var.yoffset =
+ (var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
+
+ mfd->msm_fb_backup.info = *info;
+ mfd->msm_fb_backup.disp_commit = *disp_commit;
+
+ atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
+ atomic_inc(&mfd->commits_pending);
+ atomic_inc(&mfd->kickoff_pending);
+ wake_up_all(&mfd->commit_wait_q);
+ mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+ if (wait_for_finish) {
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret)
+ pr_err("mdss_fb_pan_idle failed. rc=%d\n", ret);
+ }
+ return ret;
+}
+
+u32 mdss_fb_get_mode_switch(struct msm_fb_data_type *mfd)
+{
+ /* If there is no attached mfd then there is no pending mode switch */
+ if (!mfd)
+ return 0;
+
+ if (mfd->pending_switch)
+ return mfd->switch_new_mode;
+
+ return 0;
+}
+
+/*
+ * __ioctl_transition_dyn_mode_state() - State machine for mode switch
+ * @mfd: Framebuffer data structure for display
+ * @cmd: ioctl that was called
+ * @validate: used with atomic commit when doing validate layers
+ *
+ * This function assists with dynamic mode switch of DSI panel. States
+ * are used to make sure that panel mode switch occurs on next
+ * prepare/sync/commit (for legacy) and validate/pre_commit (for
+ * atomic commit) pairing. This state machine insure that calculation
+ * and return values (such as buffer release fences) are based on the
+ * panel mode being switching into.
+ */
+static int __ioctl_transition_dyn_mode_state(struct msm_fb_data_type *mfd,
+ unsigned int cmd, bool validate, bool null_commit)
+{
+ if (mfd->switch_state == MDSS_MDP_NO_UPDATE_REQUESTED)
+ return 0;
+
+ mutex_lock(&mfd->switch_lock);
+ switch (cmd) {
+ case MSMFB_ATOMIC_COMMIT:
+ if ((mfd->switch_state == MDSS_MDP_WAIT_FOR_VALIDATE)
+ && validate) {
+ if (mfd->switch_new_mode != SWITCH_RESOLUTION)
+ mfd->pending_switch = true;
+ mfd->switch_state = MDSS_MDP_WAIT_FOR_COMMIT;
+ } else if (mfd->switch_state == MDSS_MDP_WAIT_FOR_COMMIT) {
+ if (mfd->switch_new_mode != SWITCH_RESOLUTION)
+ mdss_fb_set_mdp_sync_pt_threshold(mfd,
+ mfd->switch_new_mode);
+ mfd->switch_state = MDSS_MDP_WAIT_FOR_KICKOFF;
+ } else if ((mfd->switch_state == MDSS_MDP_WAIT_FOR_VALIDATE)
+ && null_commit) {
+ mfd->switch_state = MDSS_MDP_WAIT_FOR_KICKOFF;
+ }
+ break;
+ }
+ mutex_unlock(&mfd->switch_lock);
+ return 0;
+}
+
+static inline bool mdss_fb_is_wb_config_same(struct msm_fb_data_type *mfd,
+ struct mdp_output_layer *output_layer)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct msm_mdp_interface *mdp5_interface = &mfd->mdp;
+
+ if (!mdp5_data->wfd
+ || (mdp5_interface->is_config_same
+ && !mdp5_interface->is_config_same(mfd, output_layer)))
+ return false;
+ return true;
+}
+
+/* update pinfo and var for WB on config change */
+static void mdss_fb_update_resolution(struct msm_fb_data_type *mfd,
+ u32 xres, u32 yres, u32 format)
+{
+ struct mdss_panel_info *pinfo = mfd->panel_info;
+ struct fb_var_screeninfo *var = &mfd->fbi->var;
+ struct fb_fix_screeninfo *fix = &mfd->fbi->fix;
+ struct mdss_mdp_format_params *fmt = NULL;
+
+ pinfo->xres = xres;
+ pinfo->yres = yres;
+ mfd->fb_imgType = format;
+ if (mfd->mdp.get_format_params) {
+ fmt = mfd->mdp.get_format_params(format);
+ if (fmt) {
+ pinfo->bpp = fmt->bpp;
+ var->bits_per_pixel = fmt->bpp * 8;
+ }
+ if (mfd->mdp.fb_stride)
+ fix->line_length = mfd->mdp.fb_stride(mfd->index,
+ var->xres,
+ var->bits_per_pixel / 8);
+ else
+ fix->line_length = var->xres * var->bits_per_pixel / 8;
+
+ }
+ var->xres_virtual = var->xres;
+ var->yres_virtual = pinfo->yres * mfd->fb_page;
+ mdss_panelinfo_to_fb_var(pinfo, var);
+}
+
+int mdss_fb_atomic_commit(struct fb_info *info,
+ struct mdp_layer_commit *commit, struct file *file)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ struct mdp_layer_commit_v1 *commit_v1;
+ struct mdp_output_layer *output_layer;
+ struct mdss_panel_info *pinfo;
+ bool wait_for_finish, wb_change = false;
+ int ret = -EPERM;
+ u32 old_xres, old_yres, old_format;
+
+ if (!mfd || (!mfd->op_enable)) {
+ pr_err("mfd is NULL or operation not permitted\n");
+ return -EPERM;
+ }
+
+ if ((mdss_fb_is_power_off(mfd)) &&
+ !((mfd->dcm_state == DCM_ENTER) &&
+ (mfd->panel.type == MIPI_CMD_PANEL))) {
+ pr_err("commit is not supported when interface is in off state\n");
+ goto end;
+ }
+ pinfo = mfd->panel_info;
+
+ /* only supports version 1.0 */
+ if (commit->version != MDP_COMMIT_VERSION_1_0) {
+ pr_err("commit version is not supported\n");
+ goto end;
+ }
+
+ if (!mfd->mdp.pre_commit || !mfd->mdp.atomic_validate) {
+ pr_err("commit callback is not registered\n");
+ goto end;
+ }
+
+ commit_v1 = &commit->commit_v1;
+ if (commit_v1->flags & MDP_VALIDATE_LAYER) {
+ ret = mdss_fb_wait_for_kickoff(mfd);
+ if (ret) {
+ pr_err("wait for kickoff failed\n");
+ } else {
+ __ioctl_transition_dyn_mode_state(mfd,
+ MSMFB_ATOMIC_COMMIT, true, false);
+ if (mfd->panel.type == WRITEBACK_PANEL) {
+ output_layer = commit_v1->output_layer;
+ if (!output_layer) {
+ pr_err("Output layer is null\n");
+ goto end;
+ }
+ wb_change = !mdss_fb_is_wb_config_same(mfd,
+ commit_v1->output_layer);
+ if (wb_change) {
+ old_xres = pinfo->xres;
+ old_yres = pinfo->yres;
+ old_format = mfd->fb_imgType;
+ mdss_fb_update_resolution(mfd,
+ output_layer->buffer.width,
+ output_layer->buffer.height,
+ output_layer->buffer.format);
+ }
+ }
+ ret = mfd->mdp.atomic_validate(mfd, file, commit_v1);
+ if (!ret)
+ mfd->atomic_commit_pending = true;
+ }
+ goto end;
+ } else {
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret) {
+ pr_err("pan display idle call failed\n");
+ goto end;
+ }
+ __ioctl_transition_dyn_mode_state(mfd,
+ MSMFB_ATOMIC_COMMIT, false,
+ (commit_v1->input_layer_cnt ? 0 : 1));
+
+ ret = mfd->mdp.pre_commit(mfd, file, commit_v1);
+ if (ret) {
+ pr_err("atomic pre commit failed\n");
+ goto end;
+ }
+ }
+
+ wait_for_finish = commit_v1->flags & MDP_COMMIT_WAIT_FOR_FINISH;
+ mfd->msm_fb_backup.atomic_commit = true;
+ mfd->msm_fb_backup.disp_commit.l_roi = commit_v1->left_roi;
+ mfd->msm_fb_backup.disp_commit.r_roi = commit_v1->right_roi;
+
+ mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+ atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
+ atomic_inc(&mfd->commits_pending);
+ atomic_inc(&mfd->kickoff_pending);
+ wake_up_all(&mfd->commit_wait_q);
+ mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+
+ if (wait_for_finish)
+ ret = mdss_fb_pan_idle(mfd);
+
+end:
+ if (ret && (mfd->panel.type == WRITEBACK_PANEL) && wb_change)
+ mdss_fb_update_resolution(mfd, old_xres, old_yres, old_format);
+ return ret;
+}
+
+static int mdss_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct mdp_display_commit disp_commit;
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+ /*
+ * during mode switch through mode sysfs node, it will trigger a
+ * pan_display after switch. This assumes that fb has been adjusted,
+ * however when using overlays we may not have the right size at this
+ * point, so it needs to go through PREPARE first. Abort pan_display
+ * operations until that happens
+ */
+ if (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) {
+ pr_debug("fb%d: pan_display skipped during switch\n",
+ mfd->index);
+ return 0;
+ }
+
+ memset(&disp_commit, 0, sizeof(disp_commit));
+ disp_commit.wait_for_finish = true;
+ memcpy(&disp_commit.var, var, sizeof(struct fb_var_screeninfo));
+ return mdss_fb_pan_display_ex(info, &disp_commit);
+}
+
+static int mdss_fb_pan_display_sub(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+ if (!mfd->op_enable)
+ return -EPERM;
+
+ if ((mdss_fb_is_power_off(mfd)) &&
+ !((mfd->dcm_state == DCM_ENTER) &&
+ (mfd->panel.type == MIPI_CMD_PANEL)))
+ return -EPERM;
+
+ if (var->xoffset > (info->var.xres_virtual - info->var.xres))
+ return -EINVAL;
+
+ if (var->yoffset > (info->var.yres_virtual - info->var.yres))
+ return -EINVAL;
+
+ if (info->fix.xpanstep)
+ info->var.xoffset =
+ (var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
+
+ if (info->fix.ypanstep)
+ info->var.yoffset =
+ (var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
+
+ if (mfd->mdp.dma_fnc)
+ mfd->mdp.dma_fnc(mfd);
+ else
+ pr_warn("dma function not set for panel type=%d\n",
+ mfd->panel.type);
+
+ return 0;
+}
+
+static int mdss_grayscale_to_mdp_format(u32 grayscale)
+{
+ switch (grayscale) {
+ case V4L2_PIX_FMT_RGB24:
+ return MDP_RGB_888;
+ case V4L2_PIX_FMT_NV12:
+ return MDP_Y_CBCR_H2V2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void mdss_fb_var_to_panelinfo(struct fb_var_screeninfo *var,
+ struct mdss_panel_info *pinfo)
+{
+ int format = -EINVAL;
+
+ pinfo->xres = var->xres;
+ pinfo->yres = var->yres;
+ pinfo->lcdc.v_front_porch = var->lower_margin;
+ pinfo->lcdc.v_back_porch = var->upper_margin;
+ pinfo->lcdc.v_pulse_width = var->vsync_len;
+ pinfo->lcdc.h_front_porch = var->right_margin;
+ pinfo->lcdc.h_back_porch = var->left_margin;
+ pinfo->lcdc.h_pulse_width = var->hsync_len;
+
+ if (var->grayscale > 1) {
+ format = mdss_grayscale_to_mdp_format(var->grayscale);
+ if (!IS_ERR_VALUE((unsigned long)format))
+ pinfo->out_format = format;
+ else
+ pr_warn("Failed to map grayscale value (%d) to an MDP format\n",
+ var->grayscale);
+ }
+
+ /*
+ * if greater than 1M, then rate would fall below 1mhz which is not
+ * even supported. In this case it means clock rate is actually
+ * passed directly in hz.
+ */
+ if (var->pixclock > SZ_1M)
+ pinfo->clk_rate = var->pixclock;
+ else
+ pinfo->clk_rate = PICOS2KHZ(var->pixclock) * 1000;
+
+ /*
+ * if it is a DBA panel i.e. HDMI TV connected through
+ * DSI interface, then store the pixel clock value in
+ * DSI specific variable.
+ */
+ if (pinfo->is_dba_panel)
+ pinfo->mipi.dsi_pclk_rate = pinfo->clk_rate;
+}
+
+void mdss_panelinfo_to_fb_var(struct mdss_panel_info *pinfo,
+ struct fb_var_screeninfo *var)
+{
+ u32 frame_rate;
+
+ var->xres = mdss_fb_get_panel_xres(pinfo);
+ var->yres = pinfo->yres;
+ var->lower_margin = pinfo->lcdc.v_front_porch -
+ pinfo->prg_fet;
+ var->upper_margin = pinfo->lcdc.v_back_porch +
+ pinfo->prg_fet;
+ var->vsync_len = pinfo->lcdc.v_pulse_width;
+ var->right_margin = pinfo->lcdc.h_front_porch;
+ var->left_margin = pinfo->lcdc.h_back_porch;
+ var->hsync_len = pinfo->lcdc.h_pulse_width;
+
+ frame_rate = mdss_panel_get_framerate(pinfo,
+ FPS_RESOLUTION_HZ);
+ if (frame_rate) {
+ unsigned long clk_rate, h_total, v_total;
+
+ h_total = var->xres + var->left_margin
+ + var->right_margin + var->hsync_len;
+ v_total = var->yres + var->lower_margin
+ + var->upper_margin + var->vsync_len;
+ clk_rate = h_total * v_total * frame_rate;
+ var->pixclock = KHZ2PICOS(clk_rate / 1000);
+ } else if (pinfo->clk_rate) {
+ var->pixclock = KHZ2PICOS(
+ (unsigned long int) pinfo->clk_rate / 1000);
+ }
+
+ if (pinfo->physical_width)
+ var->width = pinfo->physical_width;
+ if (pinfo->physical_height)
+ var->height = pinfo->physical_height;
+
+ pr_debug("ScreenInfo: res=%dx%d [%d, %d] [%d, %d]\n",
+ var->xres, var->yres, var->left_margin,
+ var->right_margin, var->upper_margin,
+ var->lower_margin);
+}
+
+/**
+ * __mdss_fb_perform_commit() - process a frame to display
+ * @mfd: Framebuffer data structure for display
+ *
+ * Processes all layers and buffers programmed and ensures all pending release
+ * fences are signaled once the buffer is transferred to display.
+ */
+static int __mdss_fb_perform_commit(struct msm_fb_data_type *mfd)
+{
+ struct msm_sync_pt_data *sync_pt_data = &mfd->mdp_sync_pt_data;
+ struct msm_fb_backup_type *fb_backup = &mfd->msm_fb_backup;
+ int ret = -ENOTSUPP;
+ u32 new_dsi_mode, dynamic_dsi_switch = 0;
+
+ if (!sync_pt_data->async_wait_fences)
+ mdss_fb_wait_for_fence(sync_pt_data);
+ sync_pt_data->flushed = false;
+
+ mutex_lock(&mfd->switch_lock);
+ if (mfd->switch_state == MDSS_MDP_WAIT_FOR_KICKOFF) {
+ dynamic_dsi_switch = 1;
+ new_dsi_mode = mfd->switch_new_mode;
+ } else if (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) {
+ pr_err("invalid commit on fb%d with state = %d\n",
+ mfd->index, mfd->switch_state);
+ mutex_unlock(&mfd->switch_lock);
+ goto skip_commit;
+ }
+ mutex_unlock(&mfd->switch_lock);
+ if (dynamic_dsi_switch) {
+ MDSS_XLOG(mfd->index, mfd->split_mode, new_dsi_mode,
+ XLOG_FUNC_ENTRY);
+ pr_debug("Triggering dyn mode switch to %d\n", new_dsi_mode);
+ ret = mfd->mdp.mode_switch(mfd, new_dsi_mode);
+ if (ret)
+ pr_err("DSI mode switch has failed");
+ else
+ mfd->pending_switch = false;
+ }
+ if (fb_backup->disp_commit.flags & MDP_DISPLAY_COMMIT_OVERLAY) {
+ if (mfd->mdp.kickoff_fnc)
+ ret = mfd->mdp.kickoff_fnc(mfd,
+ &fb_backup->disp_commit);
+ else
+ pr_warn("no kickoff function setup for fb%d\n",
+ mfd->index);
+ } else if (fb_backup->atomic_commit) {
+ if (mfd->mdp.kickoff_fnc)
+ ret = mfd->mdp.kickoff_fnc(mfd,
+ &fb_backup->disp_commit);
+ else
+ pr_warn("no kickoff function setup for fb%d\n",
+ mfd->index);
+ fb_backup->atomic_commit = false;
+ } else {
+ ret = mdss_fb_pan_display_sub(&fb_backup->disp_commit.var,
+ &fb_backup->info);
+ if (ret)
+ pr_err("pan display failed %x on fb%d\n", ret,
+ mfd->index);
+ }
+
+skip_commit:
+ if (!ret)
+ mdss_fb_update_backlight(mfd);
+
+ if (IS_ERR_VALUE((unsigned long)ret) || !sync_pt_data->flushed) {
+ mdss_fb_release_kickoff(mfd);
+ mdss_fb_signal_timeline(sync_pt_data);
+ if ((mfd->panel.type == MIPI_CMD_PANEL) &&
+ (mfd->mdp.signal_retire_fence))
+ mfd->mdp.signal_retire_fence(mfd, 1);
+ }
+
+ if (dynamic_dsi_switch) {
+ MDSS_XLOG(mfd->index, mfd->split_mode, new_dsi_mode,
+ XLOG_FUNC_EXIT);
+ mfd->mdp.mode_switch_post(mfd, new_dsi_mode);
+ mutex_lock(&mfd->switch_lock);
+ mfd->switch_state = MDSS_MDP_NO_UPDATE_REQUESTED;
+ mutex_unlock(&mfd->switch_lock);
+ if (new_dsi_mode != SWITCH_RESOLUTION)
+ mfd->panel.type = new_dsi_mode;
+ pr_debug("Dynamic mode switch completed\n");
+ }
+
+ return ret;
+}
+
+static int __mdss_fb_display_thread(void *data)
+{
+ struct msm_fb_data_type *mfd = data;
+ int ret;
+ struct sched_param param;
+
+ /*
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority tasks
+ */
+ param.sched_priority = 16;
+ ret = sched_setscheduler(current, SCHED_FIFO, ¶m);
+ if (ret)
+ pr_warn("set priority failed for fb%d display thread\n",
+ mfd->index);
+
+ while (1) {
+ wait_event(mfd->commit_wait_q,
+ (atomic_read(&mfd->commits_pending) ||
+ kthread_should_stop()));
+
+ if (kthread_should_stop())
+ break;
+
+ MDSS_XLOG(mfd->index, XLOG_FUNC_ENTRY);
+ ret = __mdss_fb_perform_commit(mfd);
+ MDSS_XLOG(mfd->index, XLOG_FUNC_EXIT);
+
+ atomic_dec(&mfd->commits_pending);
+ wake_up_all(&mfd->idle_wait_q);
+ }
+
+ mdss_fb_release_kickoff(mfd);
+ atomic_set(&mfd->commits_pending, 0);
+ wake_up_all(&mfd->idle_wait_q);
+
+ return ret;
+}
+
+static int mdss_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+ if (var->rotate != FB_ROTATE_UR && var->rotate != FB_ROTATE_UD)
+ return -EINVAL;
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ if ((var->green.offset != 5) ||
+ !((var->blue.offset == 11)
+ || (var->blue.offset == 0)) ||
+ !((var->red.offset == 11)
+ || (var->red.offset == 0)) ||
+ (var->blue.length != 5) ||
+ (var->green.length != 6) ||
+ (var->red.length != 5) ||
+ (var->blue.msb_right != 0) ||
+ (var->green.msb_right != 0) ||
+ (var->red.msb_right != 0) ||
+ (var->transp.offset != 0) ||
+ (var->transp.length != 0))
+ return -EINVAL;
+ break;
+
+ case 24:
+ if ((var->blue.offset != 0) ||
+ (var->green.offset != 8) ||
+ (var->red.offset != 16) ||
+ (var->blue.length != 8) ||
+ (var->green.length != 8) ||
+ (var->red.length != 8) ||
+ (var->blue.msb_right != 0) ||
+ (var->green.msb_right != 0) ||
+ (var->red.msb_right != 0) ||
+ !(((var->transp.offset == 0) &&
+ (var->transp.length == 0)) ||
+ ((var->transp.offset == 24) &&
+ (var->transp.length == 8))))
+ return -EINVAL;
+ break;
+
+ case 32:
+ /*
+ * Check user specified color format BGRA/ARGB/RGBA
+ * and verify the position of the RGB components
+ */
+
+ if (!((var->transp.offset == 24) &&
+ (var->blue.offset == 0) &&
+ (var->green.offset == 8) &&
+ (var->red.offset == 16)) &&
+ !((var->transp.offset == 0) &&
+ (var->blue.offset == 24) &&
+ (var->green.offset == 16) &&
+ (var->red.offset == 8)) &&
+ !((var->transp.offset == 24) &&
+ (var->blue.offset == 16) &&
+ (var->green.offset == 8) &&
+ (var->red.offset == 0)))
+ return -EINVAL;
+
+ /* Check the common values for both RGBA and ARGB */
+
+ if ((var->blue.length != 8) ||
+ (var->green.length != 8) ||
+ (var->red.length != 8) ||
+ (var->transp.length != 8) ||
+ (var->blue.msb_right != 0) ||
+ (var->green.msb_right != 0) ||
+ (var->red.msb_right != 0))
+ return -EINVAL;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if ((var->xres_virtual <= 0) || (var->yres_virtual <= 0))
+ return -EINVAL;
+
+ if ((var->xres == 0) || (var->yres == 0))
+ return -EINVAL;
+
+ if (var->xoffset > (var->xres_virtual - var->xres))
+ return -EINVAL;
+
+ if (var->yoffset > (var->yres_virtual - var->yres))
+ return -EINVAL;
+
+ if (info->mode) {
+ const struct fb_videomode *mode;
+
+ mode = fb_match_mode(var, &info->modelist);
+ if (mode == NULL)
+ return -EINVAL;
+ } else if (mfd->panel_info && !(var->activate & FB_ACTIVATE_TEST)) {
+ struct mdss_panel_info *panel_info;
+ int rc;
+
+ panel_info = kzalloc(sizeof(struct mdss_panel_info),
+ GFP_KERNEL);
+ if (!panel_info)
+ return -ENOMEM;
+
+ memcpy(panel_info, mfd->panel_info,
+ sizeof(struct mdss_panel_info));
+ mdss_fb_var_to_panelinfo(var, panel_info);
+ rc = mdss_fb_send_panel_event(mfd, MDSS_EVENT_CHECK_PARAMS,
+ panel_info);
+ if (IS_ERR_VALUE((unsigned long)rc)) {
+ kfree(panel_info);
+ return rc;
+ }
+ mfd->panel_reconfig = rc;
+ kfree(panel_info);
+ }
+
+ return 0;
+}
+
+static int mdss_fb_videomode_switch(struct msm_fb_data_type *mfd,
+ const struct fb_videomode *mode)
+{
+ int ret = 0;
+ struct mdss_panel_data *pdata, *tmp;
+ struct mdss_panel_timing *timing;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ /* make sure that we are idle while switching */
+ mdss_fb_wait_for_kickoff(mfd);
+
+ pr_debug("fb%d: changing display mode to %s\n", mfd->index, mode->name);
+ MDSS_XLOG(mfd->index, mode->name,
+ mdss_fb_get_panel_xres(mfd->panel_info),
+ mfd->panel_info->yres, mfd->split_mode,
+ XLOG_FUNC_ENTRY);
+ tmp = pdata;
+ do {
+ if (!tmp->event_handler) {
+ pr_warn("no event handler for panel\n");
+ continue;
+ }
+ timing = mdss_panel_get_timing_by_name(tmp, mode->name);
+ ret = tmp->event_handler(tmp,
+ MDSS_EVENT_PANEL_TIMING_SWITCH, timing);
+
+ tmp->active = timing != NULL;
+ tmp = tmp->next;
+ } while (tmp && !ret);
+
+ if (!ret)
+ mdss_fb_set_split_mode(mfd, pdata);
+
+ if (!ret && mfd->mdp.configure_panel) {
+ int dest_ctrl = 1;
+
+ /* todo: currently assumes no changes in video/cmd mode */
+ if (!mdss_fb_is_power_off(mfd)) {
+ mutex_lock(&mfd->switch_lock);
+ mfd->switch_state = MDSS_MDP_WAIT_FOR_VALIDATE;
+ mfd->switch_new_mode = SWITCH_RESOLUTION;
+ mutex_unlock(&mfd->switch_lock);
+ dest_ctrl = 0;
+ }
+ ret = mfd->mdp.configure_panel(mfd,
+ pdata->panel_info.mipi.mode, dest_ctrl);
+ }
+
+ MDSS_XLOG(mfd->index, mode->name,
+ mdss_fb_get_panel_xres(mfd->panel_info),
+ mfd->panel_info->yres, mfd->split_mode,
+ XLOG_FUNC_EXIT);
+ pr_debug("fb%d: %s mode change complete\n", mfd->index, mode->name);
+
+ return ret;
+}
+
+static int mdss_fb_set_par(struct fb_info *info)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ int old_imgType, old_format;
+ int ret = 0;
+
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret) {
+ pr_err("mdss_fb_pan_idle failed. rc=%d\n", ret);
+ return ret;
+ }
+
+ old_imgType = mfd->fb_imgType;
+ switch (var->bits_per_pixel) {
+ case 16:
+ if (var->red.offset == 0)
+ mfd->fb_imgType = MDP_BGR_565;
+ else
+ mfd->fb_imgType = MDP_RGB_565;
+ break;
+
+ case 24:
+ if ((var->transp.offset == 0) && (var->transp.length == 0))
+ mfd->fb_imgType = MDP_RGB_888;
+ else if ((var->transp.offset == 24) &&
+ (var->transp.length == 8)) {
+ mfd->fb_imgType = MDP_ARGB_8888;
+ info->var.bits_per_pixel = 32;
+ }
+ break;
+
+ case 32:
+ if ((var->red.offset == 0) &&
+ (var->green.offset == 8) &&
+ (var->blue.offset == 16) &&
+ (var->transp.offset == 24))
+ mfd->fb_imgType = MDP_RGBA_8888;
+ else if ((var->red.offset == 16) &&
+ (var->green.offset == 8) &&
+ (var->blue.offset == 0) &&
+ (var->transp.offset == 24))
+ mfd->fb_imgType = MDP_BGRA_8888;
+ else if ((var->red.offset == 8) &&
+ (var->green.offset == 16) &&
+ (var->blue.offset == 24) &&
+ (var->transp.offset == 0))
+ mfd->fb_imgType = MDP_ARGB_8888;
+ else
+ mfd->fb_imgType = MDP_RGBA_8888;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (info->mode) {
+ const struct fb_videomode *mode;
+
+ mode = fb_match_mode(var, &info->modelist);
+ if (!mode)
+ return -EINVAL;
+
+ pr_debug("found mode: %s\n", mode->name);
+
+ if (fb_mode_is_equal(mode, info->mode)) {
+ pr_debug("mode is equal to current mode\n");
+ return 0;
+ }
+
+ ret = mdss_fb_videomode_switch(mfd, mode);
+ if (ret)
+ return ret;
+ }
+
+ if (mfd->mdp.fb_stride)
+ mfd->fbi->fix.line_length = mfd->mdp.fb_stride(mfd->index,
+ var->xres,
+ var->bits_per_pixel / 8);
+ else
+ mfd->fbi->fix.line_length = var->xres * var->bits_per_pixel / 8;
+
+ /* if memory is not allocated yet, change memory size for fb */
+ if (!info->fix.smem_start)
+ mfd->fbi->fix.smem_len = PAGE_ALIGN(mfd->fbi->fix.line_length *
+ mfd->fbi->var.yres) * mfd->fb_page;
+
+ old_format = mdss_grayscale_to_mdp_format(var->grayscale);
+ if (!IS_ERR_VALUE((unsigned long)old_format)) {
+ if (old_format != mfd->panel_info->out_format)
+ mfd->panel_reconfig = true;
+ }
+
+ if (mfd->panel_reconfig || (mfd->fb_imgType != old_imgType)) {
+ mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable);
+ mdss_fb_var_to_panelinfo(var, mfd->panel_info);
+ if (mfd->panel_info->is_dba_panel &&
+ mdss_fb_send_panel_event(mfd, MDSS_EVENT_UPDATE_PARAMS,
+ mfd->panel_info))
+ pr_debug("Failed to send panel event UPDATE_PARAMS\n");
+ mdss_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
+ mfd->panel_reconfig = false;
+ }
+
+ return ret;
+}
+
+int mdss_fb_dcm(struct msm_fb_data_type *mfd, int req_state)
+{
+ int ret = 0;
+
+ if (req_state == mfd->dcm_state) {
+ pr_warn("Already in correct DCM/DTM state\n");
+ return ret;
+ }
+
+ switch (req_state) {
+ case DCM_UNBLANK:
+ if (mfd->dcm_state == DCM_UNINIT &&
+ mdss_fb_is_power_off(mfd) && mfd->mdp.on_fnc) {
+ if (mfd->disp_thread == NULL) {
+ ret = mdss_fb_start_disp_thread(mfd);
+ if (ret < 0)
+ return ret;
+ }
+ ret = mfd->mdp.on_fnc(mfd);
+ if (ret == 0) {
+ mfd->panel_power_state = MDSS_PANEL_POWER_ON;
+ mfd->dcm_state = DCM_UNBLANK;
+ }
+ }
+ break;
+ case DCM_ENTER:
+ if (mfd->dcm_state == DCM_UNBLANK) {
+ /*
+ * Keep unblank path available for only
+ * DCM operation
+ */
+ mfd->panel_power_state = MDSS_PANEL_POWER_OFF;
+ mfd->dcm_state = DCM_ENTER;
+ }
+ break;
+ case DCM_EXIT:
+ if (mfd->dcm_state == DCM_ENTER) {
+ /* Release the unblank path for exit */
+ mfd->panel_power_state = MDSS_PANEL_POWER_ON;
+ mfd->dcm_state = DCM_EXIT;
+ }
+ break;
+ case DCM_BLANK:
+ if ((mfd->dcm_state == DCM_EXIT ||
+ mfd->dcm_state == DCM_UNBLANK) &&
+ mdss_fb_is_power_on(mfd) && mfd->mdp.off_fnc) {
+ mfd->panel_power_state = MDSS_PANEL_POWER_OFF;
+ ret = mfd->mdp.off_fnc(mfd);
+ if (ret == 0)
+ mfd->dcm_state = DCM_UNINIT;
+ else
+ pr_err("DCM_BLANK failed\n");
+
+ if (mfd->disp_thread)
+ mdss_fb_stop_disp_thread(mfd);
+ }
+ break;
+ case DTM_ENTER:
+ if (mfd->dcm_state == DCM_UNINIT)
+ mfd->dcm_state = DTM_ENTER;
+ break;
+ case DTM_EXIT:
+ if (mfd->dcm_state == DTM_ENTER)
+ mfd->dcm_state = DCM_UNINIT;
+ break;
+ }
+
+ return ret;
+}
+
+static int mdss_fb_cursor(struct fb_info *info, void __user *p)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ struct fb_cursor cursor;
+ int ret;
+
+ if (!mfd->mdp.cursor_update)
+ return -ENODEV;
+
+ ret = copy_from_user(&cursor, p, sizeof(cursor));
+ if (ret)
+ return ret;
+
+ return mfd->mdp.cursor_update(mfd, &cursor);
+}
+
+int mdss_fb_async_position_update(struct fb_info *info,
+ struct mdp_position_update *update_pos)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+ if (!update_pos->input_layer_cnt) {
+ pr_err("no input layers for position update\n");
+ return -EINVAL;
+ }
+ return mfd->mdp.async_position_update(mfd, update_pos);
+}
+
+static int mdss_fb_async_position_update_ioctl(struct fb_info *info,
+ unsigned long *argp)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ struct mdp_position_update update_pos;
+ int ret, rc;
+ u32 buffer_size, layer_cnt;
+ struct mdp_async_layer *layer_list = NULL;
+ struct mdp_async_layer __user *input_layer_list;
+
+ if (!mfd->mdp.async_position_update)
+ return -ENODEV;
+
+ ret = copy_from_user(&update_pos, argp, sizeof(update_pos));
+ if (ret) {
+ pr_err("copy from user failed\n");
+ return ret;
+ }
+ input_layer_list = update_pos.input_layers;
+
+ layer_cnt = update_pos.input_layer_cnt;
+ if ((!layer_cnt) || (layer_cnt > MAX_LAYER_COUNT)) {
+ pr_err("invalid async layers :%d to update\n", layer_cnt);
+ return -EINVAL;
+ }
+
+ buffer_size = sizeof(struct mdp_async_layer) * layer_cnt;
+ layer_list = kmalloc(buffer_size, GFP_KERNEL);
+ if (!layer_list) {
+ pr_err("unable to allocate memory for layers\n");
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(layer_list, input_layer_list, buffer_size);
+ if (ret) {
+ pr_err("layer list copy from user failed\n");
+ goto end;
+ }
+ update_pos.input_layers = layer_list;
+
+ ret = mdss_fb_async_position_update(info, &update_pos);
+ if (ret)
+ pr_err("async position update failed ret:%d\n", ret);
+
+ rc = copy_to_user(input_layer_list, layer_list, buffer_size);
+ if (rc)
+ pr_err("layer error code copy to user failed\n");
+
+ update_pos.input_layers = input_layer_list;
+ rc = copy_to_user(argp, &update_pos,
+ sizeof(struct mdp_position_update));
+ if (rc)
+ pr_err("copy to user for layers failed");
+
+end:
+ kfree(layer_list);
+ return ret;
+}
+
+static int mdss_fb_set_lut(struct fb_info *info, void __user *p)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ struct fb_cmap cmap;
+ int ret;
+
+ if (!mfd->mdp.lut_update)
+ return -ENODEV;
+
+ ret = copy_from_user(&cmap, p, sizeof(cmap));
+ if (ret)
+ return ret;
+
+ mfd->mdp.lut_update(mfd, &cmap);
+ return 0;
+}
+
+/**
+ * mdss_fb_sync_get_fence() - get fence from timeline
+ * @timeline: Timeline to create the fence on
+ * @fence_name: Name of the fence that will be created for debugging
+ * @val: Timeline value at which the fence will be signaled
+ *
+ * Function returns a fence on the timeline given with the name provided.
+ * The fence created will be signaled when the timeline is advanced.
+ */
+struct mdss_fence *mdss_fb_sync_get_fence(struct mdss_timeline *timeline,
+ const char *fence_name, int val)
+{
+ struct mdss_fence *fence;
+
+
+ fence = mdss_get_sync_fence(timeline, fence_name, NULL, val);
+ pr_debug("%s: buf sync fence timeline=%d\n",
+ mdss_get_sync_fence_name(fence), val);
+ if (fence == NULL) {
+ pr_err("%s: cannot create fence\n", fence_name);
+ return NULL;
+ }
+
+ return fence;
+}
+
+static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
+ struct mdp_buf_sync *buf_sync)
+{
+ int i, ret = 0;
+ int acq_fen_fd[MDP_MAX_FENCE_FD];
+ struct mdss_fence *fence, *rel_fence, *retire_fence;
+ int rel_fen_fd;
+ int retire_fen_fd;
+ int val;
+
+ if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
+ (sync_pt_data->timeline == NULL))
+ return -EINVAL;
+
+ if (buf_sync->acq_fen_fd_cnt)
+ ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
+ buf_sync->acq_fen_fd_cnt * sizeof(int));
+ if (ret) {
+ pr_err("%s: copy_from_user failed\n", sync_pt_data->fence_name);
+ return ret;
+ }
+
+ i = mdss_fb_wait_for_fence(sync_pt_data);
+ if (i > 0)
+ pr_warn("%s: waited on %d active fences\n",
+ sync_pt_data->fence_name, i);
+
+ mutex_lock(&sync_pt_data->sync_mutex);
+ for (i = 0; i < buf_sync->acq_fen_fd_cnt; i++) {
+ fence = mdss_get_fd_sync_fence(acq_fen_fd[i]);
+ if (fence == NULL) {
+ pr_err("%s: null fence! i=%d fd=%d\n",
+ sync_pt_data->fence_name, i,
+ acq_fen_fd[i]);
+ ret = -EINVAL;
+ break;
+ }
+ sync_pt_data->acq_fen[i] = fence;
+ }
+ sync_pt_data->acq_fen_cnt = i;
+ if (ret)
+ goto buf_sync_err_1;
+
+ val = sync_pt_data->threshold +
+ atomic_read(&sync_pt_data->commit_cnt);
+
+ MDSS_XLOG(sync_pt_data->timeline_value, val,
+ atomic_read(&sync_pt_data->commit_cnt));
+ pr_debug("%s: fence CTL%d Commit_cnt%d\n", sync_pt_data->fence_name,
+ sync_pt_data->timeline_value,
+ atomic_read(&sync_pt_data->commit_cnt));
+ /* Set release fence */
+ rel_fence = mdss_fb_sync_get_fence(sync_pt_data->timeline,
+ sync_pt_data->fence_name, val);
+ if (IS_ERR_OR_NULL(rel_fence)) {
+ pr_err("%s: unable to retrieve release fence\n",
+ sync_pt_data->fence_name);
+ ret = rel_fence ? PTR_ERR(rel_fence) : -ENOMEM;
+ goto buf_sync_err_1;
+ }
+
+ /* create fd */
+ rel_fen_fd = mdss_get_sync_fence_fd(rel_fence);
+ if (rel_fen_fd < 0) {
+ pr_err("%s: get_unused_fd_flags failed error:0x%x\n",
+ sync_pt_data->fence_name, rel_fen_fd);
+ ret = rel_fen_fd;
+ goto buf_sync_err_2;
+ }
+
+ ret = copy_to_user(buf_sync->rel_fen_fd, &rel_fen_fd, sizeof(int));
+ if (ret) {
+ pr_err("%s: copy_to_user failed\n", sync_pt_data->fence_name);
+ goto buf_sync_err_3;
+ }
+
+ if (!(buf_sync->flags & MDP_BUF_SYNC_FLAG_RETIRE_FENCE))
+ goto skip_retire_fence;
+
+ if (sync_pt_data->get_retire_fence)
+ retire_fence = sync_pt_data->get_retire_fence(sync_pt_data);
+ else
+ retire_fence = NULL;
+
+ if (IS_ERR_OR_NULL(retire_fence)) {
+ val += sync_pt_data->retire_threshold;
+ retire_fence = mdss_fb_sync_get_fence(
+ sync_pt_data->timeline, "mdp-retire", val);
+ }
+
+ if (IS_ERR_OR_NULL(retire_fence)) {
+ pr_err("%s: unable to retrieve retire fence\n",
+ sync_pt_data->fence_name);
+ ret = retire_fence ? PTR_ERR(rel_fence) : -ENOMEM;
+ goto buf_sync_err_3;
+ }
+ retire_fen_fd = mdss_get_sync_fence_fd(retire_fence);
+
+ if (retire_fen_fd < 0) {
+ pr_err("%s: get_unused_fd_flags failed for retire fence error:0x%x\n",
+ sync_pt_data->fence_name, retire_fen_fd);
+ ret = retire_fen_fd;
+ mdss_put_sync_fence(retire_fence);
+ goto buf_sync_err_3;
+ }
+
+ ret = copy_to_user(buf_sync->retire_fen_fd, &retire_fen_fd,
+ sizeof(int));
+ if (ret) {
+ pr_err("%s: copy_to_user failed for retire fence\n",
+ sync_pt_data->fence_name);
+ put_unused_fd(retire_fen_fd);
+ mdss_put_sync_fence(retire_fence);
+ goto buf_sync_err_3;
+ }
+
+skip_retire_fence:
+ mdss_get_sync_fence_fd(rel_fence);
+ mutex_unlock(&sync_pt_data->sync_mutex);
+
+ if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
+ mdss_fb_wait_for_fence(sync_pt_data);
+
+ return ret;
+buf_sync_err_3:
+ put_unused_fd(rel_fen_fd);
+buf_sync_err_2:
+ mdss_put_sync_fence(rel_fence);
+buf_sync_err_1:
+ for (i = 0; i < sync_pt_data->acq_fen_cnt; i++)
+ mdss_put_sync_fence(sync_pt_data->acq_fen[i]);
+ sync_pt_data->acq_fen_cnt = 0;
+ mutex_unlock(&sync_pt_data->sync_mutex);
+ return ret;
+}
+static int mdss_fb_display_commit(struct fb_info *info,
+ unsigned long *argp)
+{
+ int ret;
+ struct mdp_display_commit disp_commit;
+
+ ret = copy_from_user(&disp_commit, argp,
+ sizeof(disp_commit));
+ if (ret) {
+ pr_err("%s:copy_from_user failed\n", __func__);
+ return ret;
+ }
+ ret = mdss_fb_pan_display_ex(info, &disp_commit);
+ return ret;
+}
+
+/**
+ * __mdss_fb_copy_pixel_ext() - copy pixel extension payload
+ * @src: pixel extn structure
+ * @dest: Qseed3/pixel extn common payload
+ *
+ * Function copies the pixel extension parameters into the scale data structure,
+ * this is required to allow using the scale_v2 data structure for both
+ * QSEED2 and QSEED3
+ */
+static void __mdss_fb_copy_pixel_ext(struct mdp_scale_data *src,
+ struct mdp_scale_data_v2 *dest)
+{
+ if (!src || !dest)
+ return;
+ memcpy(dest->init_phase_x, src->init_phase_x,
+ sizeof(src->init_phase_x));
+ memcpy(dest->phase_step_x, src->phase_step_x,
+ sizeof(src->init_phase_x));
+ memcpy(dest->init_phase_y, src->init_phase_y,
+ sizeof(src->init_phase_x));
+ memcpy(dest->phase_step_y, src->phase_step_y,
+ sizeof(src->init_phase_x));
+
+ memcpy(dest->num_ext_pxls_left, src->num_ext_pxls_left,
+ sizeof(src->num_ext_pxls_left));
+ memcpy(dest->num_ext_pxls_right, src->num_ext_pxls_right,
+ sizeof(src->num_ext_pxls_right));
+ memcpy(dest->num_ext_pxls_top, src->num_ext_pxls_top,
+ sizeof(src->num_ext_pxls_top));
+ memcpy(dest->num_ext_pxls_btm, src->num_ext_pxls_btm,
+ sizeof(src->num_ext_pxls_btm));
+
+ memcpy(dest->left_ftch, src->left_ftch, sizeof(src->left_ftch));
+ memcpy(dest->left_rpt, src->left_rpt, sizeof(src->left_rpt));
+ memcpy(dest->right_ftch, src->right_ftch, sizeof(src->right_ftch));
+ memcpy(dest->right_rpt, src->right_rpt, sizeof(src->right_rpt));
+
+
+ memcpy(dest->top_rpt, src->top_rpt, sizeof(src->top_rpt));
+ memcpy(dest->btm_rpt, src->btm_rpt, sizeof(src->btm_rpt));
+ memcpy(dest->top_ftch, src->top_ftch, sizeof(src->top_ftch));
+ memcpy(dest->btm_ftch, src->btm_ftch, sizeof(src->btm_ftch));
+
+ memcpy(dest->roi_w, src->roi_w, sizeof(src->roi_w));
+}
+
+static int __mdss_fb_scaler_handler(struct mdp_input_layer *layer)
+{
+ int ret = 0;
+ struct mdp_scale_data *pixel_ext = NULL;
+ struct mdp_scale_data_v2 *scale = NULL;
+
+ if ((layer->flags & MDP_LAYER_ENABLE_PIXEL_EXT) &&
+ (layer->flags & MDP_LAYER_ENABLE_QSEED3_SCALE)) {
+ pr_err("Invalid flag configuration for scaler, %x\n",
+ layer->flags);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (layer->flags & MDP_LAYER_ENABLE_PIXEL_EXT) {
+ scale = kzalloc(sizeof(struct mdp_scale_data_v2),
+ GFP_KERNEL);
+ pixel_ext = kzalloc(sizeof(struct mdp_scale_data),
+ GFP_KERNEL);
+ if (!scale || !pixel_ext) {
+ mdss_mdp_free_layer_pp_info(layer);
+ ret = -ENOMEM;
+ goto err;
+ }
+ ret = copy_from_user(pixel_ext, layer->scale,
+ sizeof(struct mdp_scale_data));
+ if (ret) {
+ mdss_mdp_free_layer_pp_info(layer);
+ ret = -EFAULT;
+ goto err;
+ }
+ __mdss_fb_copy_pixel_ext(pixel_ext, scale);
+ layer->scale = scale;
+ } else if (layer->flags & MDP_LAYER_ENABLE_QSEED3_SCALE) {
+ scale = kzalloc(sizeof(struct mdp_scale_data_v2),
+ GFP_KERNEL);
+ if (!scale) {
+ mdss_mdp_free_layer_pp_info(layer);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = copy_from_user(scale, layer->scale,
+ sizeof(struct mdp_scale_data_v2));
+ if (ret) {
+ mdss_mdp_free_layer_pp_info(layer);
+ ret = -EFAULT;
+ goto err;
+ }
+ layer->scale = scale;
+ } else {
+ layer->scale = NULL;
+ }
+ kfree(pixel_ext);
+ return ret;
+err:
+ kfree(pixel_ext);
+ kfree(scale);
+ layer->scale = NULL;
+ return ret;
+}
+
+static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
+ unsigned long *argp, struct file *file)
+{
+ int ret, i = 0, j = 0, rc;
+ struct mdp_layer_commit commit;
+ u32 buffer_size, layer_count;
+ struct mdp_input_layer *layer, *layer_list = NULL;
+ struct mdp_input_layer __user *input_layer_list;
+ struct mdp_output_layer *output_layer = NULL;
+ struct mdp_output_layer __user *output_layer_user;
+ struct mdp_frc_info *frc_info = NULL;
+ struct mdp_frc_info __user *frc_info_user;
+ struct msm_fb_data_type *mfd;
+ struct mdss_overlay_private *mdp5_data = NULL;
+
+ ret = copy_from_user(&commit, argp, sizeof(struct mdp_layer_commit));
+ if (ret) {
+ pr_err("%s:copy_from_user failed\n", __func__);
+ return ret;
+ }
+
+ mfd = (struct msm_fb_data_type *)info->par;
+ if (!mfd)
+ return -EINVAL;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (mfd->panel_info->panel_dead) {
+ pr_debug("early commit return\n");
+ MDSS_XLOG(mfd->panel_info->panel_dead);
+ /*
+ * In case of an ESD attack, since we early return from the
+ * commits, we need to signal the outstanding fences.
+ */
+ mdss_fb_release_fences(mfd);
+ if ((mfd->panel.type == MIPI_CMD_PANEL) &&
+ mfd->mdp.signal_retire_fence && mdp5_data)
+ mfd->mdp.signal_retire_fence(mfd,
+ mdp5_data->retire_cnt);
+ return 0;
+ }
+
+ output_layer_user = commit.commit_v1.output_layer;
+ if (output_layer_user) {
+ buffer_size = sizeof(struct mdp_output_layer);
+ output_layer = kzalloc(buffer_size, GFP_KERNEL);
+ if (!output_layer) {
+ pr_err("unable to allocate memory for output layer\n");
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(output_layer,
+ output_layer_user, buffer_size);
+ if (ret) {
+ pr_err("layer list copy from user failed\n");
+ goto err;
+ }
+ commit.commit_v1.output_layer = output_layer;
+ }
+
+ layer_count = commit.commit_v1.input_layer_cnt;
+ input_layer_list = commit.commit_v1.input_layers;
+
+ if (layer_count > MAX_LAYER_COUNT) {
+ ret = -EINVAL;
+ goto err;
+ } else if (layer_count) {
+ buffer_size = sizeof(struct mdp_input_layer) * layer_count;
+ layer_list = kzalloc(buffer_size, GFP_KERNEL);
+ if (!layer_list) {
+ pr_err("unable to allocate memory for layers\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = copy_from_user(layer_list, input_layer_list, buffer_size);
+ if (ret) {
+ pr_err("layer list copy from user failed\n");
+ goto err;
+ }
+
+ commit.commit_v1.input_layers = layer_list;
+
+ for (i = 0; i < layer_count; i++) {
+ layer = &layer_list[i];
+
+ if (!(layer->flags & MDP_LAYER_PP)) {
+ layer->pp_info = NULL;
+ } else {
+ ret = mdss_mdp_copy_layer_pp_info(layer);
+ if (ret) {
+ pr_err("failure to copy pp_info data for layer %d, ret = %d\n",
+ i, ret);
+ goto err;
+ }
+ }
+
+ if ((layer->flags & MDP_LAYER_ENABLE_PIXEL_EXT) ||
+ (layer->flags &
+ MDP_LAYER_ENABLE_QSEED3_SCALE)) {
+ ret = __mdss_fb_scaler_handler(layer);
+ if (ret) {
+ pr_err("failure to copy scale params for layer %d, ret = %d\n",
+ i, ret);
+ goto err;
+ }
+ } else {
+ layer->scale = NULL;
+ }
+ }
+ }
+
+ /* Copy Deterministic Frame Rate Control info from userspace */
+ frc_info_user = commit.commit_v1.frc_info;
+ if (frc_info_user) {
+ frc_info = kzalloc(sizeof(struct mdp_frc_info), GFP_KERNEL);
+ if (!frc_info) {
+ pr_err("unable to allocate memory for frc\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = copy_from_user(frc_info, frc_info_user,
+ sizeof(struct mdp_frc_info));
+ if (ret) {
+ pr_err("frc info copy from user failed\n");
+ goto frc_err;
+ }
+
+ commit.commit_v1.frc_info = frc_info;
+ }
+
+ ATRACE_BEGIN("ATOMIC_COMMIT");
+ ret = mdss_fb_atomic_commit(info, &commit, file);
+ if (ret)
+ pr_err("atomic commit failed ret:%d\n", ret);
+ ATRACE_END("ATOMIC_COMMIT");
+
+ if (layer_count) {
+ for (j = 0; j < layer_count; j++) {
+ rc = copy_to_user(&input_layer_list[j].error_code,
+ &layer_list[j].error_code, sizeof(int));
+ if (rc)
+ pr_err("layer error code copy to user failed\n");
+ }
+
+ commit.commit_v1.input_layers = input_layer_list;
+ commit.commit_v1.output_layer = output_layer_user;
+ commit.commit_v1.frc_info = frc_info_user;
+ rc = copy_to_user(argp, &commit,
+ sizeof(struct mdp_layer_commit));
+ if (rc)
+ pr_err("copy to user for release & retire fence failed\n");
+ }
+
+frc_err:
+ kfree(frc_info);
+err:
+ for (i--; i >= 0; i--) {
+ kfree(layer_list[i].scale);
+ layer_list[i].scale = NULL;
+ mdss_mdp_free_layer_pp_info(&layer_list[i]);
+ }
+ kfree(layer_list);
+ kfree(output_layer);
+
+ return ret;
+}
+
+int mdss_fb_switch_check(struct msm_fb_data_type *mfd, u32 mode)
+{
+ struct mdss_panel_info *pinfo = NULL;
+ int panel_type;
+
+ if (!mfd || !mfd->panel_info)
+ return -EINVAL;
+
+ pinfo = mfd->panel_info;
+
+ if ((!mfd->op_enable) || (mdss_fb_is_power_off(mfd)))
+ return -EPERM;
+
+ if (pinfo->mipi.dms_mode != DYNAMIC_MODE_SWITCH_IMMEDIATE) {
+ pr_warn("Panel does not support immediate dynamic switch!\n");
+ return -EPERM;
+ }
+
+ if (mfd->dcm_state != DCM_UNINIT) {
+ pr_warn("Switch not supported during DCM!\n");
+ return -EPERM;
+ }
+
+ mutex_lock(&mfd->switch_lock);
+ if (mode == pinfo->type) {
+ pr_debug("Already in requested mode!\n");
+ mutex_unlock(&mfd->switch_lock);
+ return -EPERM;
+ }
+ mutex_unlock(&mfd->switch_lock);
+
+ panel_type = mfd->panel.type;
+ if (panel_type != MIPI_VIDEO_PANEL && panel_type != MIPI_CMD_PANEL) {
+ pr_debug("Panel not in mipi video or cmd mode, cannot change\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int mdss_fb_immediate_mode_switch(struct msm_fb_data_type *mfd, u32 mode)
+{
+ int ret;
+ u32 tranlated_mode;
+
+ if (mode)
+ tranlated_mode = MIPI_CMD_PANEL;
+ else
+ tranlated_mode = MIPI_VIDEO_PANEL;
+
+ pr_debug("%s: Request to switch to %d,", __func__, tranlated_mode);
+
+ ret = mdss_fb_switch_check(mfd, tranlated_mode);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mfd->switch_lock);
+ if (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) {
+ pr_err("%s: Mode switch already in progress\n", __func__);
+ ret = -EAGAIN;
+ goto exit;
+ }
+ mfd->switch_state = MDSS_MDP_WAIT_FOR_VALIDATE;
+ mfd->switch_new_mode = tranlated_mode;
+
+exit:
+ mutex_unlock(&mfd->switch_lock);
+ return ret;
+}
+
+/*
+ * mdss_fb_mode_switch() - Function to change DSI mode
+ * @mfd: Framebuffer data structure for display
+ * @mode: Enabled/Disable LowPowerMode
+ * 1: Switch to Command Mode
+ * 0: Switch to video Mode
+ *
+ * This function is used to change from DSI mode based on the
+ * argument @mode on the next frame to be displayed.
+ */
+static int mdss_fb_mode_switch(struct msm_fb_data_type *mfd, u32 mode)
+{
+ struct mdss_panel_info *pinfo = NULL;
+ int ret = 0;
+
+ if (!mfd || !mfd->panel_info)
+ return -EINVAL;
+
+ pinfo = mfd->panel_info;
+ if (pinfo->mipi.dms_mode == DYNAMIC_MODE_SWITCH_SUSPEND_RESUME) {
+ ret = mdss_fb_blanking_mode_switch(mfd, mode);
+ } else if (pinfo->mipi.dms_mode == DYNAMIC_MODE_SWITCH_IMMEDIATE) {
+ ret = mdss_fb_immediate_mode_switch(mfd, mode);
+ } else {
+ pr_warn("Panel does not support dynamic mode switch!\n");
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+static int __ioctl_wait_idle(struct msm_fb_data_type *mfd, u32 cmd)
+{
+ int ret = 0;
+
+ if (mfd->wait_for_kickoff &&
+ ((cmd == MSMFB_OVERLAY_PREPARE) ||
+ (cmd == MSMFB_BUFFER_SYNC) ||
+ (cmd == MSMFB_OVERLAY_PLAY) ||
+ (cmd == MSMFB_CURSOR) ||
+ (cmd == MSMFB_METADATA_GET) ||
+ (cmd == MSMFB_METADATA_SET) ||
+ (cmd == MSMFB_OVERLAY_GET) ||
+ (cmd == MSMFB_OVERLAY_UNSET) ||
+ (cmd == MSMFB_OVERLAY_SET))) {
+ ret = mdss_fb_wait_for_kickoff(mfd);
+ }
+
+ if (ret && (ret != -ESHUTDOWN))
+ pr_err("wait_idle failed. cmd=0x%x rc=%d\n", cmd, ret);
+
+ return ret;
+}
+
+#ifdef TARGET_HW_MDSS_MDP3
+static bool check_not_supported_ioctl(u32 cmd)
+{
+ return false;
+}
+#else
+static bool check_not_supported_ioctl(u32 cmd)
+{
+ return((cmd == MSMFB_OVERLAY_SET) || (cmd == MSMFB_OVERLAY_UNSET) ||
+ (cmd == MSMFB_OVERLAY_GET) || (cmd == MSMFB_OVERLAY_PREPARE) ||
+ (cmd == MSMFB_DISPLAY_COMMIT) || (cmd == MSMFB_OVERLAY_PLAY) ||
+ (cmd == MSMFB_BUFFER_SYNC) || (cmd == MSMFB_OVERLAY_QUEUE) ||
+ (cmd == MSMFB_NOTIFY_UPDATE));
+}
+#endif
+
+/*
+ * mdss_fb_do_ioctl() - MDSS Framebuffer ioctl function
+ * @info: pointer to framebuffer info
+ * @cmd: ioctl command
+ * @arg: argument to ioctl
+ *
+ * This function provides an architecture agnostic implementation
+ * of the mdss framebuffer ioctl. This function can be called
+ * by compat ioctl or regular ioctl to handle the supported commands.
+ */
+int mdss_fb_do_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file)
+{
+ struct msm_fb_data_type *mfd;
+ void __user *argp = (void __user *)arg;
+ int ret = -ENOTSUPP;
+ struct mdp_buf_sync buf_sync;
+ unsigned int dsi_mode = 0;
+ struct mdss_panel_data *pdata = NULL;
+
+ if (!info || !info->par)
+ return -EINVAL;
+
+ mfd = (struct msm_fb_data_type *)info->par;
+ if (!mfd)
+ return -EINVAL;
+
+ if (mfd->shutdown_pending)
+ return -ESHUTDOWN;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata || pdata->panel_info.dynamic_switch_pending)
+ return -EPERM;
+
+ if (check_not_supported_ioctl(cmd)) {
+ pr_err("Unsupported ioctl\n");
+ return -EINVAL;
+ }
+
+ atomic_inc(&mfd->ioctl_ref_cnt);
+
+ mdss_fb_power_setting_idle(mfd);
+
+ ret = __ioctl_wait_idle(mfd, cmd);
+ if (ret)
+ goto exit;
+
+ switch (cmd) {
+ case MSMFB_CURSOR:
+ ret = mdss_fb_cursor(info, argp);
+ break;
+
+ case MSMFB_SET_LUT:
+ ret = mdss_fb_set_lut(info, argp);
+ break;
+
+ case MSMFB_BUFFER_SYNC:
+ ret = copy_from_user(&buf_sync, argp, sizeof(buf_sync));
+ if (ret)
+ goto exit;
+
+ if ((!mfd->op_enable) || (mdss_fb_is_power_off(mfd))) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ ret = mdss_fb_handle_buf_sync_ioctl(&mfd->mdp_sync_pt_data,
+ &buf_sync);
+ if (!ret)
+ ret = copy_to_user(argp, &buf_sync, sizeof(buf_sync));
+ break;
+
+ case MSMFB_NOTIFY_UPDATE:
+ ret = mdss_fb_notify_update(mfd, argp);
+ break;
+
+ case MSMFB_DISPLAY_COMMIT:
+ ret = mdss_fb_display_commit(info, argp);
+ break;
+
+ case MSMFB_LPM_ENABLE:
+ ret = copy_from_user(&dsi_mode, argp, sizeof(dsi_mode));
+ if (ret) {
+ pr_err("%s: MSMFB_LPM_ENABLE ioctl failed\n", __func__);
+ goto exit;
+ }
+
+ ret = mdss_fb_mode_switch(mfd, dsi_mode);
+ break;
+ case MSMFB_ATOMIC_COMMIT:
+ ret = mdss_fb_atomic_commit_ioctl(info, argp, file);
+ break;
+
+ case MSMFB_ASYNC_POSITION_UPDATE:
+ ret = mdss_fb_async_position_update_ioctl(info, argp);
+ break;
+
+ default:
+ if (mfd->mdp.ioctl_handler)
+ ret = mfd->mdp.ioctl_handler(mfd, cmd, argp);
+ break;
+ }
+
+ if (ret == -ENOTSUPP)
+ pr_err("unsupported ioctl (%x)\n", cmd);
+
+exit:
+ if (!atomic_dec_return(&mfd->ioctl_ref_cnt))
+ wake_up_all(&mfd->ioctl_q);
+
+ return ret;
+}
+
+static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file)
+{
+ if (!info || !info->par)
+ return -EINVAL;
+
+ return mdss_fb_do_ioctl(info, cmd, arg, file);
+}
+
+static int mdss_fb_register_extra_panel(struct platform_device *pdev,
+ struct mdss_panel_data *pdata)
+{
+ struct mdss_panel_data *fb_pdata;
+
+ fb_pdata = dev_get_platdata(&pdev->dev);
+ if (!fb_pdata) {
+ pr_err("framebuffer device %s contains invalid panel data\n",
+ dev_name(&pdev->dev));
+ return -EINVAL;
+ }
+
+ if (fb_pdata->next) {
+ pr_err("split panel already setup for framebuffer device %s\n",
+ dev_name(&pdev->dev));
+ return -EEXIST;
+ }
+
+ fb_pdata->next = pdata;
+
+ return 0;
+}
+
+int mdss_register_panel(struct platform_device *pdev,
+ struct mdss_panel_data *pdata)
+{
+ struct platform_device *fb_pdev, *mdss_pdev;
+ struct device_node *node = NULL;
+ int rc = 0;
+ bool master_panel = true;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("Invalid device node\n");
+ return -ENODEV;
+ }
+
+ if (!mdp_instance) {
+ pr_err("mdss mdp resource not initialized yet\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (pdata->get_fb_node)
+ node = pdata->get_fb_node(pdev);
+
+ if (!node) {
+ node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,mdss-fb-map", 0);
+ if (!node) {
+ pr_err("Unable to find fb node for device: %s\n",
+ pdev->name);
+ return -ENODEV;
+ }
+ }
+ mdss_pdev = of_find_device_by_node(node->parent);
+ if (!mdss_pdev) {
+ pr_err("Unable to find mdss for node: %s\n", node->full_name);
+ rc = -ENODEV;
+ goto mdss_notfound;
+ }
+
+ pdata->active = true;
+ fb_pdev = of_find_device_by_node(node);
+ if (fb_pdev) {
+ rc = mdss_fb_register_extra_panel(fb_pdev, pdata);
+ if (rc == 0)
+ master_panel = false;
+ } else {
+ pr_info("adding framebuffer device %s\n", dev_name(&pdev->dev));
+ fb_pdev = of_platform_device_create(node, NULL,
+ &mdss_pdev->dev);
+ if (fb_pdev)
+ fb_pdev->dev.platform_data = pdata;
+ }
+
+ if (master_panel && mdp_instance->panel_register_done)
+ mdp_instance->panel_register_done(pdata);
+
+mdss_notfound:
+ of_node_put(node);
+ return rc;
+}
+EXPORT_SYMBOL(mdss_register_panel);
+
+int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp)
+{
+ if (mdp_instance) {
+ pr_err("multiple MDP instance registration\n");
+ return -EINVAL;
+ }
+
+ mdp_instance = mdp;
+ return 0;
+}
+EXPORT_SYMBOL(mdss_fb_register_mdp_instance);
+
+int mdss_fb_get_phys_info(dma_addr_t *start, unsigned long *len, int fb_num)
+{
+ struct fb_info *info;
+ struct msm_fb_data_type *mfd;
+
+ if (fb_num >= MAX_FBI_LIST)
+ return -EINVAL;
+
+ info = fbi_list[fb_num];
+ if (!info)
+ return -ENOENT;
+
+ mfd = (struct msm_fb_data_type *)info->par;
+ if (!mfd)
+ return -ENODEV;
+
+ if (mfd->iova)
+ *start = mfd->iova;
+ else
+ *start = info->fix.smem_start;
+ *len = info->fix.smem_len;
+
+ return 0;
+}
+EXPORT_SYMBOL(mdss_fb_get_phys_info);
+
+int __init mdss_fb_init(void)
+{
+ int rc = -ENODEV;
+
+ if (fb_get_options("msmfb", NULL))
+ return rc;
+
+ if (platform_driver_register(&mdss_fb_driver))
+ return rc;
+
+ return 0;
+}
+
+module_init(mdss_fb_init);
+
+int mdss_fb_suspres_panel(struct device *dev, void *data)
+{
+ struct msm_fb_data_type *mfd;
+ int rc = 0;
+ u32 event;
+
+ if (!data) {
+ pr_err("Device state not defined\n");
+ return -EINVAL;
+ }
+ mfd = dev_get_drvdata(dev);
+ if (!mfd)
+ return 0;
+
+ event = *((bool *) data) ? MDSS_EVENT_RESUME : MDSS_EVENT_SUSPEND;
+
+ /* Do not send runtime suspend/resume for HDMI primary */
+ if (!mdss_fb_is_hdmi_primary(mfd)) {
+ rc = mdss_fb_send_panel_event(mfd, event, NULL);
+ if (rc)
+ pr_warn("unable to %s fb%d (%d)\n",
+ event == MDSS_EVENT_RESUME ?
+ "resume" : "suspend",
+ mfd->index, rc);
+ }
+ return rc;
+}
+
+/*
+ * mdss_fb_report_panel_dead() - Sends the PANEL_ALIVE=0 status to HAL layer.
+ * @mfd : frame buffer structure associated with fb device.
+ *
+ * This function is called if the panel fails to respond as expected to
+ * the register read/BTA or if the TE signal is not coming as expected
+ * from the panel. The function sends the PANEL_ALIVE=0 status to HAL
+ * layer.
+ */
+void mdss_fb_report_panel_dead(struct msm_fb_data_type *mfd)
+{
+ char *envp[2] = {"PANEL_ALIVE=0", NULL};
+ struct mdss_panel_data *pdata =
+ dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("Panel data not available\n");
+ return;
+ }
+
+ pdata->panel_info.panel_dead = true;
+ kobject_uevent_env(&mfd->fbi->dev->kobj,
+ KOBJ_CHANGE, envp);
+ pr_err("Panel has gone bad, sending uevent - %s\n", envp[0]);
+}
+
+
+/*
+ * mdss_fb_calc_fps() - Calculates fps value.
+ * @mfd : frame buffer structure associated with fb device.
+ *
+ * This function is called at frame done. It counts the number
+ * of frames done for every 1 sec. Stores the value in measured_fps.
+ * measured_fps value is 10 times the calculated fps value.
+ * For example, measured_fps= 594 for calculated fps of 59.4
+ */
+void mdss_fb_calc_fps(struct msm_fb_data_type *mfd)
+{
+ ktime_t current_time_us;
+ u64 fps, diff_us;
+
+ current_time_us = ktime_get();
+ diff_us = (u64)ktime_us_delta(current_time_us,
+ mfd->fps_info.last_sampled_time_us);
+ mfd->fps_info.frame_count++;
+
+ if (diff_us >= MDP_TIME_PERIOD_CALC_FPS_US) {
+ fps = ((u64)mfd->fps_info.frame_count) * 10000000;
+ do_div(fps, diff_us);
+ mfd->fps_info.measured_fps = (unsigned int)fps;
+ pr_debug(" MDP_FPS for fb%d is %d.%d\n",
+ mfd->index, (unsigned int)fps/10, (unsigned int)fps%10);
+ mfd->fps_info.last_sampled_time_us = current_time_us;
+ mfd->fps_info.frame_count = 0;
+ }
+}
diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h
new file mode 100644
index 0000000..19e6299
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_fb.h
@@ -0,0 +1,475 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_FB_H
+#define MDSS_FB_H
+
+#include <linux/msm_ion.h>
+#include <linux/list.h>
+#include <linux/msm_mdp_ext.h>
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/leds.h>
+
+#include "mdss_panel.h"
+#include "mdss_mdp_splash_logo.h"
+
+#define MDSS_LPAE_CHECK(phys) \
+ ((sizeof(phys) > sizeof(unsigned long)) ? ((phys >> 32) & 0xFF) : (0))
+
+#define MSM_FB_DEFAULT_PAGE_SIZE 2
+#define MFD_KEY 0x11161126
+#define MSM_FB_MAX_DEV_LIST 32
+
+#define MSM_FB_ENABLE_DBGFS
+#define WAIT_FENCE_FIRST_TIMEOUT (3 * MSEC_PER_SEC)
+#define WAIT_FENCE_FINAL_TIMEOUT (7 * MSEC_PER_SEC)
+#define WAIT_MAX_FENCE_TIMEOUT (WAIT_FENCE_FIRST_TIMEOUT + \
+ WAIT_FENCE_FINAL_TIMEOUT)
+#define WAIT_MIN_FENCE_TIMEOUT (1)
+/*
+ * Display op timeout should be greater than total time it can take for
+ * a display thread to commit one frame. One of the largest time consuming
+ * activity performed by display thread is waiting for fences. So keeping
+ * that as a reference and add additional 20s to sustain system holdups.
+ */
+#define WAIT_DISP_OP_TIMEOUT (WAIT_FENCE_FIRST_TIMEOUT + \
+ WAIT_FENCE_FINAL_TIMEOUT + (20 * MSEC_PER_SEC))
+
+#ifndef MAX
+#define MAX(x, y) (((x) > (y)) ? (x) : (y))
+#endif
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+
+#define MDP_PP_AD_BL_LINEAR 0x0
+#define MDP_PP_AD_BL_LINEAR_INV 0x1
+
+/**
+ * enum mdp_notify_event - Different frame events to indicate frame update state
+ *
+ * @MDP_NOTIFY_FRAME_BEGIN: Frame update has started, the frame is about to
+ * be programmed into hardware.
+ * @MDP_NOTIFY_FRAME_CFG_DONE: Frame configuration is done.
+ * @MDP_NOTIFY_FRAME_CTX_DONE: Frame has finished accessing sw context.
+ * Next frame can start preparing.
+ * @MDP_NOTIFY_FRAME_READY: Frame ready to be kicked off, this can be used
+ * as the last point in time to synchronize with
+ * source buffers before kickoff.
+ * @MDP_NOTIFY_FRAME_FLUSHED: Configuration of frame has been flushed and
+ * DMA transfer has started.
+ * @MDP_NOTIFY_FRAME_DONE: Frame DMA transfer has completed.
+ * - For video mode panels this will indicate that
+ * previous frame has been replaced by new one.
+ * - For command mode/writeback frame done happens
+ * as soon as the DMA of the frame is done.
+ * @MDP_NOTIFY_FRAME_TIMEOUT: Frame DMA transfer has failed to complete within
+ * a fair amount of time.
+ */
+enum mdp_notify_event {
+ MDP_NOTIFY_FRAME_BEGIN = 1,
+ MDP_NOTIFY_FRAME_CFG_DONE,
+ MDP_NOTIFY_FRAME_CTX_DONE,
+ MDP_NOTIFY_FRAME_READY,
+ MDP_NOTIFY_FRAME_FLUSHED,
+ MDP_NOTIFY_FRAME_DONE,
+ MDP_NOTIFY_FRAME_TIMEOUT,
+};
+
+/**
+ * enum mdp_split_mode - Lists the possible split modes in the device
+ *
+ * @MDP_SPLIT_MODE_NONE: Single physical display with single ctl path
+ * and single layer mixer.
+ * i.e. 1080p single DSI with single LM.
+ * #MDP_DUAL_LM_SINGLE_DISPLAY: Single physical display with signle ctl
+ * path but two layer mixers.
+ * i.e. WQXGA eDP or 4K HDMI primary or 1080p
+ * single DSI with split LM to reduce power.
+ * @MDP_DUAL_LM_DUAL_DISPLAY: Two physically separate displays with two
+ * separate but synchronized ctl paths. Each ctl
+ * path with its own layer mixer.
+ * i.e. 1440x2560 with two DSI interfaces.
+ * @MDP_PINGPONG_SPLIT: Two physically separate display but single ctl path with
+ * single layer mixer. Data is split at pingpong module.
+ * i.e. 1440x2560 on chipsets with single DSI interface.
+ */
+enum mdp_split_mode {
+ MDP_SPLIT_MODE_NONE,
+ MDP_DUAL_LM_SINGLE_DISPLAY,
+ MDP_DUAL_LM_DUAL_DISPLAY,
+ MDP_PINGPONG_SPLIT,
+};
+
+/* enum mdp_mmap_type - Lists the possible mmap type in the device
+ *
+ * @MDP_FB_MMAP_NONE: Unknown type.
+ * @MDP_FB_MMAP_ION_ALLOC: Use ION allocate a buffer for mmap
+ * @MDP_FB_MMAP_PHYSICAL_ALLOC: Use physical buffer for mmap
+ */
+enum mdp_mmap_type {
+ MDP_FB_MMAP_NONE,
+ MDP_FB_MMAP_ION_ALLOC,
+ MDP_FB_MMAP_PHYSICAL_ALLOC,
+};
+
+/**
+ * enum dyn_mode_switch_state - Lists next stage for dynamic mode switch work
+ *
+ * @MDSS_MDP_NO_UPDATE_REQUESTED: incoming frame is processed normally
+ * @MDSS_MDP_WAIT_FOR_VALIDATE: Waiting for ATOMIC_COMMIT-validate to be called
+ * @MDSS_MDP_WAIT_FOR_COMMIT: Waiting for ATOMIC_COMMIT-commit to be called
+ * @MDSS_MDP_WAIT_FOR_KICKOFF: Waiting for KICKOFF to be called
+ */
+enum dyn_mode_switch_state {
+ MDSS_MDP_NO_UPDATE_REQUESTED,
+ MDSS_MDP_WAIT_FOR_VALIDATE,
+ MDSS_MDP_WAIT_FOR_COMMIT,
+ MDSS_MDP_WAIT_FOR_KICKOFF,
+};
+
+/**
+ * enum mdss_fb_idle_state - idle states based on frame updates
+ * @MDSS_FB_NOT_IDLE: Frame updates have started
+ * @MDSS_FB_IDLE_TIMER_RUNNING: Idle timer has been kicked
+ * @MDSS_FB_IDLE: Currently idle
+ */
+enum mdss_fb_idle_state {
+ MDSS_FB_NOT_IDLE,
+ MDSS_FB_IDLE_TIMER_RUNNING,
+ MDSS_FB_IDLE
+};
+
+struct disp_info_type_suspend {
+ int op_enable;
+ int panel_power_state;
+};
+
+struct disp_info_notify {
+ int type;
+ struct timer_list timer;
+ struct completion comp;
+ struct mutex lock;
+ int value;
+ int is_suspend;
+ int ref_count;
+ bool init_done;
+};
+
+struct msm_sync_pt_data {
+ char *fence_name;
+ u32 acq_fen_cnt;
+ struct mdss_fence *acq_fen[MDP_MAX_FENCE_FD];
+ u32 temp_fen_cnt;
+ struct mdss_fence *temp_fen[MDP_MAX_FENCE_FD];
+
+ struct mdss_timeline *timeline;
+ struct mdss_timeline *timeline_retire;
+ int timeline_value;
+ u32 threshold;
+ u32 retire_threshold;
+ atomic_t commit_cnt;
+ bool flushed;
+ bool async_wait_fences;
+
+ struct mutex sync_mutex;
+ struct notifier_block notifier;
+
+ struct mdss_fence *(*get_retire_fence)
+ (struct msm_sync_pt_data *sync_pt_data);
+};
+
+struct msm_fb_data_type;
+
+struct msm_mdp_interface {
+ int (*fb_mem_alloc_fnc)(struct msm_fb_data_type *mfd);
+ int (*fb_mem_get_iommu_domain)(void);
+ int (*init_fnc)(struct msm_fb_data_type *mfd);
+ int (*on_fnc)(struct msm_fb_data_type *mfd);
+ int (*off_fnc)(struct msm_fb_data_type *mfd);
+ /* called to release resources associated to the process */
+ int (*release_fnc)(struct msm_fb_data_type *mfd, struct file *file);
+ int (*mode_switch)(struct msm_fb_data_type *mfd,
+ u32 mode);
+ int (*mode_switch_post)(struct msm_fb_data_type *mfd,
+ u32 mode);
+ int (*kickoff_fnc)(struct msm_fb_data_type *mfd,
+ struct mdp_display_commit *data);
+ int (*atomic_validate)(struct msm_fb_data_type *mfd, struct file *file,
+ struct mdp_layer_commit_v1 *commit);
+ bool (*is_config_same)(struct msm_fb_data_type *mfd,
+ struct mdp_output_layer *layer);
+ int (*pre_commit)(struct msm_fb_data_type *mfd, struct file *file,
+ struct mdp_layer_commit_v1 *commit);
+ int (*pre_commit_fnc)(struct msm_fb_data_type *mfd);
+ int (*ioctl_handler)(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
+ void (*dma_fnc)(struct msm_fb_data_type *mfd);
+ int (*cursor_update)(struct msm_fb_data_type *mfd,
+ struct fb_cursor *cursor);
+ int (*async_position_update)(struct msm_fb_data_type *mfd,
+ struct mdp_position_update *update_pos);
+ int (*lut_update)(struct msm_fb_data_type *mfd, struct fb_cmap *cmap);
+ int (*do_histogram)(struct msm_fb_data_type *mfd,
+ struct mdp_histogram *hist);
+ int (*ad_calc_bl)(struct msm_fb_data_type *mfd, int bl_in,
+ int *bl_out, bool *bl_out_notify);
+ int (*panel_register_done)(struct mdss_panel_data *pdata);
+ u32 (*fb_stride)(u32 fb_index, u32 xres, int bpp);
+ struct mdss_mdp_format_params *(*get_format_params)(u32 format);
+ int (*splash_init_fnc)(struct msm_fb_data_type *mfd);
+ void (*check_dsi_status)(struct work_struct *work, uint32_t interval);
+ int (*configure_panel)(struct msm_fb_data_type *mfd, int mode,
+ int dest_ctrl);
+ int (*input_event_handler)(struct msm_fb_data_type *mfd);
+ int (*pp_release_fnc)(struct msm_fb_data_type *mfd);
+ void (*signal_retire_fence)(struct msm_fb_data_type *mfd,
+ int retire_cnt);
+ void *private1;
+};
+
+#define IS_CALIB_MODE_BL(mfd) (((mfd)->calib_mode) & MDSS_CALIB_MODE_BL)
+#define MDSS_BRIGHT_TO_BL(out, v, bl_max, max_bright) do {\
+ out = (2 * (v) * (bl_max) + max_bright);\
+ do_div(out, 2 * max_bright);\
+ } while (0)
+
+struct mdss_fb_file_info {
+ struct file *file;
+ struct list_head list;
+};
+
+struct msm_fb_backup_type {
+ struct fb_info info;
+ struct mdp_display_commit disp_commit;
+ bool atomic_commit;
+};
+
+struct msm_fb_fps_info {
+ u32 frame_count;
+ ktime_t last_sampled_time_us;
+ u32 measured_fps;
+};
+
+struct msm_fb_data_type {
+ u32 key;
+ u32 index;
+ u32 ref_cnt;
+ u32 fb_page;
+
+ struct panel_id panel;
+ struct mdss_panel_info *panel_info;
+ struct mdss_panel_info reconfig_panel_info;
+ int split_mode;
+ int split_fb_left;
+ int split_fb_right;
+
+ u32 dest;
+ struct fb_info *fbi;
+
+ int idle_time;
+ u32 idle_state;
+ struct msm_fb_fps_info fps_info;
+ struct delayed_work idle_notify_work;
+
+ bool atomic_commit_pending;
+
+ int op_enable;
+ u32 fb_imgType;
+ int panel_reconfig;
+ u32 panel_orientation;
+
+ u32 dst_format;
+ int panel_power_state;
+ struct disp_info_type_suspend suspend;
+
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *table;
+ dma_addr_t iova;
+ void *cursor_buf;
+ phys_addr_t cursor_buf_phys;
+ dma_addr_t cursor_buf_iova;
+
+ int ext_ad_ctrl;
+ u32 ext_bl_ctrl;
+ u32 calib_mode;
+ u32 calib_mode_bl;
+ u32 ad_bl_level;
+ u32 bl_level;
+ u32 bl_scale;
+ u32 bl_min_lvl;
+ u32 unset_bl_level;
+ bool allow_bl_update;
+ u32 bl_level_scaled;
+ struct mutex bl_lock;
+ struct mutex mdss_sysfs_lock;
+ bool ipc_resume;
+
+ struct platform_device *pdev;
+
+ u32 mdp_fb_page_protection;
+
+ struct disp_info_notify update;
+ struct disp_info_notify no_update;
+ struct completion power_off_comp;
+
+ struct msm_mdp_interface mdp;
+
+ struct msm_sync_pt_data mdp_sync_pt_data;
+
+ /* for non-blocking */
+ struct task_struct *disp_thread;
+ atomic_t commits_pending;
+ atomic_t kickoff_pending;
+ wait_queue_head_t commit_wait_q;
+ wait_queue_head_t idle_wait_q;
+ wait_queue_head_t kickoff_wait_q;
+ bool shutdown_pending;
+
+ struct msm_fb_splash_info splash_info;
+
+ wait_queue_head_t ioctl_q;
+ atomic_t ioctl_ref_cnt;
+
+ struct msm_fb_backup_type msm_fb_backup;
+ struct completion power_set_comp;
+ u32 is_power_setting;
+
+ u32 dcm_state;
+ struct list_head file_list;
+ struct ion_client *fb_ion_client;
+ struct ion_handle *fb_ion_handle;
+ struct dma_buf *fbmem_buf;
+ struct dma_buf_attachment *fb_attachment;
+ struct sg_table *fb_table;
+
+ bool mdss_fb_split_stored;
+
+ u32 wait_for_kickoff;
+ u32 thermal_level;
+
+ int fb_mmap_type;
+ struct led_trigger *boot_notification_led;
+
+ /* Following is used for dynamic mode switch */
+ enum dyn_mode_switch_state switch_state;
+ u32 switch_new_mode;
+ bool pending_switch;
+ struct mutex switch_lock;
+ struct input_handler *input_handler;
+};
+
+static inline void mdss_fb_update_notify_update(struct msm_fb_data_type *mfd)
+{
+ int needs_complete = 0;
+
+ mutex_lock(&mfd->update.lock);
+ mfd->update.value = mfd->update.type;
+ needs_complete = mfd->update.value == NOTIFY_TYPE_UPDATE;
+ mutex_unlock(&mfd->update.lock);
+ if (needs_complete) {
+ complete(&mfd->update.comp);
+ mutex_lock(&mfd->no_update.lock);
+ if (mfd->no_update.timer.function)
+ del_timer(&(mfd->no_update.timer));
+
+ mfd->no_update.timer.expires = jiffies + (2 * HZ);
+ add_timer(&mfd->no_update.timer);
+ mutex_unlock(&mfd->no_update.lock);
+ }
+}
+
+/* Function returns true for either any kind of dual display */
+static inline bool is_panel_split(struct msm_fb_data_type *mfd)
+{
+ return mfd && mfd->panel_info && mfd->panel_info->is_split_display;
+}
+/* Function returns true, if Layer Mixer split is Set */
+static inline bool is_split_lm(struct msm_fb_data_type *mfd)
+{
+ return mfd &&
+ (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY ||
+ mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY);
+}
+/* Function returns true, if Ping pong split is Set*/
+static inline bool is_pingpong_split(struct msm_fb_data_type *mfd)
+{
+ return mfd && (mfd->split_mode == MDP_PINGPONG_SPLIT);
+}
+static inline bool is_dual_lm_single_display(struct msm_fb_data_type *mfd)
+{
+ return mfd && (mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY);
+}
+static inline bool mdss_fb_is_power_off(struct msm_fb_data_type *mfd)
+{
+ return mdss_panel_is_power_off(mfd->panel_power_state);
+}
+
+static inline bool mdss_fb_is_power_on_interactive(
+ struct msm_fb_data_type *mfd)
+{
+ return mdss_panel_is_power_on_interactive(mfd->panel_power_state);
+}
+
+static inline bool mdss_fb_is_power_on(struct msm_fb_data_type *mfd)
+{
+ return mdss_panel_is_power_on(mfd->panel_power_state);
+}
+
+static inline bool mdss_fb_is_power_on_lp(struct msm_fb_data_type *mfd)
+{
+ return mdss_panel_is_power_on_lp(mfd->panel_power_state);
+}
+
+static inline bool mdss_fb_is_power_on_ulp(struct msm_fb_data_type *mfd)
+{
+ return mdss_panel_is_power_on_ulp(mfd->panel_power_state);
+}
+
+static inline bool mdss_fb_is_hdmi_primary(struct msm_fb_data_type *mfd)
+{
+ return (mfd && (mfd->index == 0) &&
+ (mfd->panel_info->type == DTV_PANEL));
+}
+
+static inline void mdss_fb_init_fps_info(struct msm_fb_data_type *mfd)
+{
+ memset(&mfd->fps_info, 0, sizeof(mfd->fps_info));
+}
+int mdss_fb_get_phys_info(dma_addr_t *start, unsigned long *len, int fb_num);
+void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl);
+void mdss_fb_update_backlight(struct msm_fb_data_type *mfd);
+int mdss_fb_wait_for_fence(struct msm_sync_pt_data *sync_pt_data);
+void mdss_fb_signal_timeline(struct msm_sync_pt_data *sync_pt_data);
+struct mdss_fence *mdss_fb_sync_get_fence(struct mdss_timeline *timeline,
+ const char *fence_name, int val);
+int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp);
+int mdss_fb_dcm(struct msm_fb_data_type *mfd, int req_state);
+int mdss_fb_suspres_panel(struct device *dev, void *data);
+int mdss_fb_do_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file);
+int mdss_fb_compat_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file);
+int mdss_fb_atomic_commit(struct fb_info *info,
+ struct mdp_layer_commit *commit, struct file *file);
+int mdss_fb_async_position_update(struct fb_info *info,
+ struct mdp_position_update *update_pos);
+
+u32 mdss_fb_get_mode_switch(struct msm_fb_data_type *mfd);
+void mdss_fb_report_panel_dead(struct msm_fb_data_type *mfd);
+void mdss_panelinfo_to_fb_var(struct mdss_panel_info *pinfo,
+ struct fb_var_screeninfo *var);
+void mdss_fb_calc_fps(struct msm_fb_data_type *mfd);
+#endif /* MDSS_FB_H */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_audio.c b/drivers/video/fbdev/msm/mdss_hdmi_audio.c
new file mode 100644
index 0000000..446e8b4
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_audio.c
@@ -0,0 +1,526 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/extcon.h>
+#include <linux/gcd.h>
+
+#include "mdss_hdmi_audio.h"
+#include "mdss_hdmi_util.h"
+
+#define HDMI_AUDIO_INFO_FRAME_PACKET_HEADER 0x84
+#define HDMI_AUDIO_INFO_FRAME_PACKET_VERSION 0x1
+#define HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH 0x0A
+
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_ACR_N_MULTIPLIER 128
+#define DEFAULT_AUDIO_SAMPLE_RATE_HZ 48000
+
+/* Supported HDMI Audio channels */
+enum hdmi_audio_channels {
+ AUDIO_CHANNEL_2 = 2,
+ AUDIO_CHANNEL_3,
+ AUDIO_CHANNEL_4,
+ AUDIO_CHANNEL_5,
+ AUDIO_CHANNEL_6,
+ AUDIO_CHANNEL_7,
+ AUDIO_CHANNEL_8,
+};
+
+/* parameters for clock regeneration */
+struct hdmi_audio_acr {
+ u32 n;
+ u32 cts;
+};
+
+enum hdmi_audio_sample_rates {
+ AUDIO_SAMPLE_RATE_32KHZ,
+ AUDIO_SAMPLE_RATE_44_1KHZ,
+ AUDIO_SAMPLE_RATE_48KHZ,
+ AUDIO_SAMPLE_RATE_88_2KHZ,
+ AUDIO_SAMPLE_RATE_96KHZ,
+ AUDIO_SAMPLE_RATE_176_4KHZ,
+ AUDIO_SAMPLE_RATE_192KHZ,
+ AUDIO_SAMPLE_RATE_MAX
+};
+
+struct hdmi_audio {
+ struct mdss_io_data *io;
+ struct msm_hdmi_audio_setup_params params;
+ struct extcon_dev sdev;
+ u32 pclk;
+ bool ack_enabled;
+ bool audio_ack_enabled;
+ atomic_t ack_pending;
+};
+
+static void hdmi_audio_get_audio_sample_rate(u32 *sample_rate_hz)
+{
+ u32 rate = *sample_rate_hz;
+
+ switch (rate) {
+ case 32000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ pr_debug("%d unchanged\n", rate);
+ break;
+ }
+}
+
+static void hdmi_audio_get_acr_param(u32 pclk, u32 fs,
+ struct hdmi_audio_acr *acr)
+{
+ u32 div, mul;
+
+ if (!acr) {
+ pr_err("invalid data\n");
+ return;
+ }
+
+ /*
+ * as per HDMI specification, N/CTS = (128*fs)/pclk.
+ * get the ratio using this formula.
+ */
+ acr->n = HDMI_ACR_N_MULTIPLIER * fs;
+ acr->cts = pclk;
+
+ /* get the greatest common divisor for the ratio */
+ div = gcd(acr->n, acr->cts);
+
+ /* get the n and cts values wrt N/CTS formula */
+ acr->n /= div;
+ acr->cts /= div;
+
+ /*
+ * as per HDMI specification, 300 <= 128*fs/N <= 1500
+ * with a target of 128*fs/N = 1000. To get closest
+ * value without truncating fractional values, find
+ * the corresponding multiplier
+ */
+ mul = ((HDMI_ACR_N_MULTIPLIER * fs / HDMI_KHZ_TO_HZ)
+ + (acr->n - 1)) / acr->n;
+
+ acr->n *= mul;
+ acr->cts *= mul;
+}
+
+static void hdmi_audio_acr_enable(struct hdmi_audio *audio)
+{
+ struct mdss_io_data *io;
+ struct hdmi_audio_acr acr;
+ struct msm_hdmi_audio_setup_params *params;
+ u32 pclk, layout, multiplier = 1, sample_rate;
+ u32 acr_pkt_ctl, aud_pkt_ctl2, acr_reg_cts, acr_reg_n;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ io = audio->io;
+ params = &audio->params;
+ pclk = audio->pclk;
+ sample_rate = params->sample_rate_hz;
+
+ hdmi_audio_get_acr_param(pclk * HDMI_KHZ_TO_HZ, sample_rate, &acr);
+ hdmi_audio_get_audio_sample_rate(&sample_rate);
+
+ layout = params->num_of_channels == AUDIO_CHANNEL_2 ? 0 : 1;
+
+ pr_debug("n=%u, cts=%u, layout=%u\n", acr.n, acr.cts, layout);
+
+ /* AUDIO_PRIORITY | SOURCE */
+ acr_pkt_ctl = BIT(31) | BIT(8);
+
+ switch (sample_rate) {
+ case AUDIO_SAMPLE_RATE_44_1KHZ:
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_48KHZ:
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_192KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_176_4KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_96KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_88_2KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ default:
+ multiplier = 1;
+
+ acr_pkt_ctl |= 0x1 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_32_0;
+ acr_reg_n = HDMI_ACR_32_1;
+ break;
+ }
+
+ aud_pkt_ctl2 = BIT(0) | (layout << 1);
+
+ /* N_MULTIPLE(multiplier) */
+ acr_pkt_ctl &= ~(7 << 16);
+ acr_pkt_ctl |= (multiplier & 0x7) << 16;
+
+ /* SEND | CONT */
+ acr_pkt_ctl |= BIT(0) | BIT(1);
+
+ DSS_REG_W(io, acr_reg_cts, acr.cts);
+ DSS_REG_W(io, acr_reg_n, acr.n);
+ DSS_REG_W(io, HDMI_ACR_PKT_CTRL, acr_pkt_ctl);
+ DSS_REG_W(io, HDMI_AUDIO_PKT_CTRL2, aud_pkt_ctl2);
+}
+
+static void hdmi_audio_acr_setup(struct hdmi_audio *audio, bool on)
+{
+ if (on)
+ hdmi_audio_acr_enable(audio);
+ else
+ DSS_REG_W(audio->io, HDMI_ACR_PKT_CTRL, 0);
+}
+
+static void hdmi_audio_infoframe_setup(struct hdmi_audio *audio, bool enabled)
+{
+ struct mdss_io_data *io = NULL;
+ u32 channels, channel_allocation, level_shift, down_mix, layout;
+ u32 hdmi_debug_reg = 0, audio_info_0_reg = 0, audio_info_1_reg = 0;
+ u32 audio_info_ctrl_reg, aud_pck_ctrl_2_reg;
+ u32 check_sum, sample_present;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ io = audio->io;
+ if (!io->base) {
+ pr_err("core io not inititalized\n");
+ return;
+ }
+
+ audio_info_ctrl_reg = DSS_REG_R(io, HDMI_INFOFRAME_CTRL0);
+ audio_info_ctrl_reg &= ~0xF0;
+
+ if (!enabled)
+ goto end;
+
+ channels = audio->params.num_of_channels - 1;
+ channel_allocation = audio->params.channel_allocation;
+ level_shift = audio->params.level_shift;
+ down_mix = audio->params.down_mix;
+ sample_present = audio->params.sample_present;
+
+ layout = audio->params.num_of_channels == AUDIO_CHANNEL_2 ? 0 : 1;
+ aud_pck_ctrl_2_reg = BIT(0) | (layout << 1);
+ DSS_REG_W(io, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
+
+ audio_info_1_reg |= channel_allocation & 0xFF;
+ audio_info_1_reg |= ((level_shift & 0xF) << 11);
+ audio_info_1_reg |= ((down_mix & 0x1) << 15);
+
+ check_sum = 0;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_HEADER;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_VERSION;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH;
+ check_sum += channels;
+ check_sum += channel_allocation;
+ check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
+ check_sum &= 0xFF;
+ check_sum = (u8) (256 - check_sum);
+
+ audio_info_0_reg |= check_sum & 0xFF;
+ audio_info_0_reg |= ((channels & 0x7) << 8);
+
+ /* Enable Audio InfoFrame Transmission */
+ audio_info_ctrl_reg |= 0xF0;
+
+ if (layout) {
+ /* Set the Layout bit */
+ hdmi_debug_reg |= BIT(4);
+
+ /* Set the Sample Present bits */
+ hdmi_debug_reg |= sample_present & 0xF;
+ }
+end:
+ DSS_REG_W(io, HDMI_DEBUG, hdmi_debug_reg);
+ DSS_REG_W(io, HDMI_AUDIO_INFO0, audio_info_0_reg);
+ DSS_REG_W(io, HDMI_AUDIO_INFO1, audio_info_1_reg);
+ DSS_REG_W(io, HDMI_INFOFRAME_CTRL0, audio_info_ctrl_reg);
+}
+
+static int hdmi_audio_on(void *ctx, u32 pclk,
+ struct msm_hdmi_audio_setup_params *params)
+{
+ struct hdmi_audio *audio = ctx;
+ int rc = 0;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ audio->pclk = pclk;
+ audio->params = *params;
+
+ if (!audio->params.num_of_channels) {
+ audio->params.sample_rate_hz = DEFAULT_AUDIO_SAMPLE_RATE_HZ;
+ audio->params.num_of_channels = AUDIO_CHANNEL_2;
+ }
+
+ hdmi_audio_acr_setup(audio, true);
+ hdmi_audio_infoframe_setup(audio, true);
+
+ pr_debug("HDMI Audio: Enabled\n");
+end:
+ return rc;
+}
+
+static void hdmi_audio_off(void *ctx)
+{
+ struct hdmi_audio *audio = ctx;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ hdmi_audio_infoframe_setup(audio, false);
+ hdmi_audio_acr_setup(audio, false);
+
+ pr_debug("HDMI Audio: Disabled\n");
+}
+
+static void hdmi_audio_notify(void *ctx, int val)
+{
+ struct hdmi_audio *audio = ctx;
+ int state = 0;
+ bool switched;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ state = audio->sdev.state;
+ if (state == val)
+ return;
+
+ if (audio->ack_enabled &&
+ atomic_read(&audio->ack_pending)) {
+ pr_err("%s ack pending, not notifying %s\n",
+ state ? "connect" : "disconnect",
+ val ? "connect" : "disconnect");
+ return;
+ }
+
+ extcon_set_state_sync(&audio->sdev, 0, val);
+ switched = audio->sdev.state != state;
+
+ if (audio->ack_enabled && switched)
+ atomic_set(&audio->ack_pending, 1);
+
+ pr_debug("audio %s %s\n", switched ? "switched to" : "same as",
+ audio->sdev.state ? "HDMI" : "SPKR");
+}
+
+static void hdmi_audio_ack(void *ctx, u32 ack, u32 hpd)
+{
+ struct hdmi_audio *audio = ctx;
+ u32 ack_hpd;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (ack & AUDIO_ACK_SET_ENABLE) {
+ audio->ack_enabled = ack & AUDIO_ACK_ENABLE ?
+ true : false;
+
+ pr_debug("audio ack feature %s\n",
+ audio->ack_enabled ? "enabled" : "disabled");
+ return;
+ }
+
+ if (!audio->ack_enabled)
+ return;
+
+ atomic_set(&audio->ack_pending, 0);
+
+ ack_hpd = ack & AUDIO_ACK_CONNECT;
+
+ pr_debug("acknowledging %s\n",
+ ack_hpd ? "connect" : "disconnect");
+
+ if (ack_hpd != hpd) {
+ pr_debug("unbalanced audio state, ack %d, hpd %d\n",
+ ack_hpd, hpd);
+
+ hdmi_audio_notify(ctx, hpd);
+ }
+}
+
+static void hdmi_audio_reset(void *ctx)
+{
+ struct hdmi_audio *audio = ctx;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ atomic_set(&audio->ack_pending, 0);
+}
+
+static void hdmi_audio_status(void *ctx, struct hdmi_audio_status *status)
+{
+ struct hdmi_audio *audio = ctx;
+
+ if (!audio || !status) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ status->ack_enabled = audio->ack_enabled;
+ status->ack_pending = atomic_read(&audio->ack_pending);
+ status->switched = audio->sdev.state;
+}
+
+/**
+ * hdmi_audio_register() - audio registeration function
+ * @data: registeration initialization data
+ *
+ * This API configures audio module for client to use HDMI audio.
+ * Provides audio functionalities which client can call.
+ * Initializes internal data structures.
+ *
+ * Return: pointer to audio data that client needs to pass on
+ * calling audio functions.
+ */
+void *hdmi_audio_register(struct hdmi_audio_init_data *data)
+{
+ struct hdmi_audio *audio = NULL;
+ int rc = 0;
+
+ if (!data)
+ goto end;
+
+ audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+ if (!audio)
+ goto end;
+
+ audio->sdev.name = "hdmi_audio";
+ rc = extcon_dev_register(&audio->sdev);
+ if (rc) {
+ pr_err("audio switch registration failed\n");
+ kzfree(audio);
+ goto end;
+ }
+
+ audio->io = data->io;
+
+ data->ops->on = hdmi_audio_on;
+ data->ops->off = hdmi_audio_off;
+ data->ops->notify = hdmi_audio_notify;
+ data->ops->ack = hdmi_audio_ack;
+ data->ops->reset = hdmi_audio_reset;
+ data->ops->status = hdmi_audio_status;
+end:
+ return audio;
+}
+
+/**
+ * hdmi_audio_unregister() - unregister audio module
+ * @ctx: audio module's data
+ *
+ * Delete audio module's instance and allocated resources
+ */
+void hdmi_audio_unregister(void *ctx)
+{
+ struct hdmi_audio *audio = ctx;
+
+ if (audio) {
+ extcon_dev_unregister(&audio->sdev);
+ kfree(ctx);
+ }
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_audio.h b/drivers/video/fbdev/msm/mdss_hdmi_audio.h
new file mode 100644
index 0000000..2449123
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_audio.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2016, 2018,The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_AUDIO_H__
+#define __MDSS_HDMI_AUDIO_H__
+
+#include <linux/mdss_io_util.h>
+#include <linux/msm_hdmi.h>
+
+#define AUDIO_ACK_SET_ENABLE BIT(5)
+#define AUDIO_ACK_ENABLE BIT(4)
+#define AUDIO_ACK_CONNECT BIT(0)
+
+/**
+ * struct hdmi_audio_status - hdmi audio current status info
+ * @ack_pending: notification acknowledgment status
+ * @ack_enabled: acknowledgment feature is enabled or disabled
+ * @switched: audio notification status for routing
+ *
+ * Data for client to query about the current status of audio
+ */
+struct hdmi_audio_status {
+ bool ack_pending;
+ bool ack_enabled;
+ bool switched;
+};
+
+/**
+ * struct hdmi_audio_ops - audio operations for clients to call
+ * @on: function pointer to enable audio
+ * @reset: function pointer to reset the audio current status to default
+ * @status: function pointer to get the current status of audio
+ * @notify: function pointer to notify other modules for audio routing
+ * @ack: function pointer to acknowledge audio routing change
+ *
+ * Provides client operations for audio functionalities
+ */
+struct hdmi_audio_ops {
+ int (*on)(void *ctx, u32 pclk,
+ struct msm_hdmi_audio_setup_params *params);
+ void (*off)(void *ctx);
+ void (*reset)(void *ctx);
+ void (*status)(void *ctx, struct hdmi_audio_status *status);
+ void (*notify)(void *ctx, int val);
+ void (*ack)(void *ctx, u32 ack, u32 hpd);
+};
+
+/**
+ * struct hdmi_audio_init_data - data needed for initializing audio module
+ * @io: pointer to register access related data
+ * @ops: pointer to populate operation functions.
+ *
+ * Defines the data needed to be provided while initializing audio module
+ */
+struct hdmi_audio_init_data {
+ struct mdss_io_data *io;
+ struct hdmi_audio_ops *ops;
+};
+
+void *hdmi_audio_register(struct hdmi_audio_init_data *data);
+void hdmi_audio_unregister(void *data);
+
+#endif /* __MDSS_HDMI_AUDIO_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_cec.c b/drivers/video/fbdev/msm/mdss_hdmi_cec.c
new file mode 100644
index 0000000..f15272e
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_cec.c
@@ -0,0 +1,504 @@
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/input.h>
+
+#include "mdss_hdmi_cec.h"
+#include "mdss_panel.h"
+
+#define CEC_STATUS_WR_ERROR BIT(0)
+#define CEC_STATUS_WR_DONE BIT(1)
+#define CEC_INTR (BIT(1) | BIT(3) | BIT(7))
+
+#define CEC_SUPPORTED_HW_VERSION 0x30000001
+
+/* Reference: HDMI 1.4a Specification section 7.1 */
+
+#define CEC_OP_SET_STREAM_PATH 0x86
+#define CEC_OP_KEY_PRESS 0x44
+#define CEC_OP_STANDBY 0x36
+
+struct hdmi_cec_ctrl {
+ bool cec_enabled;
+ bool cec_wakeup_en;
+ bool cec_device_suspend;
+
+ u32 cec_msg_wr_status;
+ spinlock_t lock;
+ struct work_struct cec_read_work;
+ struct completion cec_msg_wr_done;
+ struct hdmi_cec_init_data init_data;
+ struct input_dev *input;
+};
+
+static int hdmi_cec_msg_send(void *data, struct cec_msg *msg)
+{
+ int i, line_check_retry = 10, rc = 0;
+ u32 frame_retransmit = RETRANSMIT_MAX_NUM;
+ bool frame_type;
+ unsigned long flags;
+ struct mdss_io_data *io = NULL;
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)data;
+
+ if (!cec_ctrl || !cec_ctrl->init_data.io || !msg) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ io = cec_ctrl->init_data.io;
+
+ reinit_completion(&cec_ctrl->cec_msg_wr_done);
+ cec_ctrl->cec_msg_wr_status = 0;
+ frame_type = (msg->recvr_id == 15 ? BIT(0) : 0);
+ if (msg->retransmit > 0 && msg->retransmit < RETRANSMIT_MAX_NUM)
+ frame_retransmit = msg->retransmit;
+
+ /* toggle cec in order to flush out bad hw state, if any */
+ DSS_REG_W(io, HDMI_CEC_CTRL, 0);
+ DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0));
+
+ frame_retransmit = (frame_retransmit & 0xF) << 4;
+ DSS_REG_W(io, HDMI_CEC_RETRANSMIT, BIT(0) | frame_retransmit);
+
+ /* header block */
+ DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+ (((msg->sender_id << 4) | msg->recvr_id) << 8) | frame_type);
+
+ /* data block 0 : opcode */
+ DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+ ((msg->frame_size < 2 ? 0 : msg->opcode) << 8) | frame_type);
+
+ /* data block 1-14 : operand 0-13 */
+ for (i = 0; i < msg->frame_size - 2; i++)
+ DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+ (msg->operand[i] << 8) | frame_type);
+
+ while ((DSS_REG_R(io, HDMI_CEC_STATUS) & BIT(0)) &&
+ line_check_retry) {
+ line_check_retry--;
+ DEV_DBG("%s: CEC line is busy(%d)\n", __func__,
+ line_check_retry);
+ schedule();
+ }
+
+ if (!line_check_retry && (DSS_REG_R(io, HDMI_CEC_STATUS) & BIT(0))) {
+ DEV_ERR("%s: CEC line is busy. Retry\n", __func__);
+ return -EAGAIN;
+ }
+
+ /* start transmission */
+ DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0) | BIT(1) |
+ ((msg->frame_size & 0x1F) << 4) | BIT(9));
+
+ if (!wait_for_completion_timeout(
+ &cec_ctrl->cec_msg_wr_done, HZ)) {
+ DEV_ERR("%s: timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ if (cec_ctrl->cec_msg_wr_status == CEC_STATUS_WR_ERROR) {
+ rc = -ENXIO;
+ DEV_ERR("%s: msg write failed.\n", __func__);
+ } else {
+ DEV_DBG("%s: CEC write frame done (frame len=%d)", __func__,
+ msg->frame_size);
+ }
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ return rc;
+} /* hdmi_cec_msg_send */
+
+static void hdmi_cec_init_input_event(struct hdmi_cec_ctrl *cec_ctrl)
+{
+ int rc = 0;
+
+ if (!cec_ctrl) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return;
+ }
+
+ /* Initialize CEC input events */
+ if (!cec_ctrl->input)
+ cec_ctrl->input = input_allocate_device();
+ if (!cec_ctrl->input) {
+ DEV_ERR("%s: hdmi input device allocation failed\n", __func__);
+ return;
+ }
+
+ cec_ctrl->input->name = "HDMI CEC User or Deck Control";
+ cec_ctrl->input->phys = "hdmi/input0";
+ cec_ctrl->input->id.bustype = BUS_VIRTUAL;
+
+ input_set_capability(cec_ctrl->input, EV_KEY, KEY_POWER);
+
+ rc = input_register_device(cec_ctrl->input);
+ if (rc) {
+ DEV_ERR("%s: cec input device registeration failed\n",
+ __func__);
+ input_free_device(cec_ctrl->input);
+ cec_ctrl->input = NULL;
+ return;
+ }
+}
+
+static void hdmi_cec_deinit_input_event(struct hdmi_cec_ctrl *cec_ctrl)
+{
+ if (cec_ctrl->input)
+ input_unregister_device(cec_ctrl->input);
+ cec_ctrl->input = NULL;
+}
+
+static void hdmi_cec_msg_recv(struct work_struct *work)
+{
+ int i;
+ u32 data;
+ struct hdmi_cec_ctrl *cec_ctrl = NULL;
+ struct mdss_io_data *io = NULL;
+ struct cec_msg msg;
+ struct cec_cbs *cbs;
+
+ cec_ctrl = container_of(work, struct hdmi_cec_ctrl, cec_read_work);
+ if (!cec_ctrl || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (!cec_ctrl->cec_enabled) {
+ DEV_ERR("%s: cec not enabled\n", __func__);
+ return;
+ }
+
+ io = cec_ctrl->init_data.io;
+ cbs = cec_ctrl->init_data.cbs;
+
+ data = DSS_REG_R(io, HDMI_CEC_RD_DATA);
+
+ msg.recvr_id = (data & 0x000F);
+ msg.sender_id = (data & 0x00F0) >> 4;
+ msg.frame_size = (data & 0x1F00) >> 8;
+ DEV_DBG("%s: Recvd init=[%u] dest=[%u] size=[%u]\n", __func__,
+ msg.sender_id, msg.recvr_id,
+ msg.frame_size);
+
+ if (msg.frame_size < 1 || msg.frame_size > MAX_CEC_FRAME_SIZE) {
+ DEV_ERR("%s: invalid message (frame length = %d)\n",
+ __func__, msg.frame_size);
+ return;
+ } else if (msg.frame_size == 1) {
+ DEV_DBG("%s: polling message (dest[%x] <- init[%x])\n",
+ __func__, msg.recvr_id, msg.sender_id);
+ return;
+ }
+
+ /* data block 0 : opcode */
+ data = DSS_REG_R_ND(io, HDMI_CEC_RD_DATA);
+ msg.opcode = data & 0xFF;
+
+ /* data block 1-14 : operand 0-13 */
+ for (i = 0; i < msg.frame_size - 2; i++) {
+ data = DSS_REG_R_ND(io, HDMI_CEC_RD_DATA);
+ msg.operand[i] = data & 0xFF;
+ }
+
+ for (; i < MAX_OPERAND_SIZE; i++)
+ msg.operand[i] = 0;
+
+ DEV_DBG("%s: opcode 0x%x, wakup_en %d, device_suspend %d\n", __func__,
+ msg.opcode, cec_ctrl->cec_wakeup_en,
+ cec_ctrl->cec_device_suspend);
+
+ if ((msg.opcode == CEC_OP_SET_STREAM_PATH ||
+ msg.opcode == CEC_OP_KEY_PRESS) &&
+ cec_ctrl->input && cec_ctrl->cec_wakeup_en &&
+ cec_ctrl->cec_device_suspend) {
+ DEV_DBG("%s: Sending power on at wakeup\n", __func__);
+ input_report_key(cec_ctrl->input, KEY_POWER, 1);
+ input_sync(cec_ctrl->input);
+ input_report_key(cec_ctrl->input, KEY_POWER, 0);
+ input_sync(cec_ctrl->input);
+ }
+
+ if ((msg.opcode == CEC_OP_STANDBY) &&
+ cec_ctrl->input && cec_ctrl->cec_wakeup_en &&
+ !cec_ctrl->cec_device_suspend) {
+ DEV_DBG("%s: Sending power off on standby\n", __func__);
+ input_report_key(cec_ctrl->input, KEY_POWER, 1);
+ input_sync(cec_ctrl->input);
+ input_report_key(cec_ctrl->input, KEY_POWER, 0);
+ input_sync(cec_ctrl->input);
+ }
+
+ if (cbs && cbs->msg_recv_notify)
+ cbs->msg_recv_notify(cbs->data, &msg);
+}
+
+/**
+ * hdmi_cec_isr() - interrupt handler for cec hw module
+ * @cec_ctrl: pointer to cec hw module's data
+ *
+ * Return: irq error code
+ *
+ * The API can be called by HDMI Tx driver on receiving hw interrupts
+ * to let the CEC related interrupts handled by this module.
+ */
+int hdmi_cec_isr(void *input)
+{
+ int rc = 0;
+ u32 cec_intr, cec_status;
+ unsigned long flags;
+ struct mdss_io_data *io = NULL;
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+ if (!cec_ctrl || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return -EPERM;
+ }
+
+ if (!cec_ctrl->cec_enabled) {
+ DEV_DBG("%s: CEC feature not enabled\n", __func__);
+ return 0;
+ }
+
+ io = cec_ctrl->init_data.io;
+
+ cec_intr = DSS_REG_R_ND(io, HDMI_CEC_INT);
+
+ cec_status = DSS_REG_R_ND(io, HDMI_CEC_STATUS);
+
+ if ((cec_intr & BIT(0)) && (cec_intr & BIT(1))) {
+ DEV_DBG("%s: CEC_IRQ_FRAME_WR_DONE\n", __func__);
+ DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(0));
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ cec_ctrl->cec_msg_wr_status |= CEC_STATUS_WR_DONE;
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ if (!completion_done(&cec_ctrl->cec_msg_wr_done))
+ complete_all(&cec_ctrl->cec_msg_wr_done);
+ }
+
+ if ((cec_intr & BIT(2)) && (cec_intr & BIT(3))) {
+ DEV_DBG("%s: CEC_IRQ_FRAME_ERROR\n", __func__);
+ DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(2));
+
+ spin_lock_irqsave(&cec_ctrl->lock, flags);
+ cec_ctrl->cec_msg_wr_status |= CEC_STATUS_WR_ERROR;
+ spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+ if (!completion_done(&cec_ctrl->cec_msg_wr_done))
+ complete_all(&cec_ctrl->cec_msg_wr_done);
+ }
+
+ if ((cec_intr & BIT(6)) && (cec_intr & BIT(7))) {
+ DEV_DBG("%s: CEC_IRQ_FRAME_RD_DONE\n", __func__);
+
+ DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(6));
+ queue_work(cec_ctrl->init_data.workq, &cec_ctrl->cec_read_work);
+ }
+
+ return rc;
+}
+
+void hdmi_cec_device_suspend(void *input, bool suspend)
+{
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+ if (!cec_ctrl) {
+ DEV_WARN("%s: HDMI CEC HW module not initialized.\n", __func__);
+ return;
+ }
+
+ cec_ctrl->cec_device_suspend = suspend;
+}
+
+bool hdmi_cec_is_wakeup_en(void *input)
+{
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+ if (!cec_ctrl) {
+ DEV_WARN("%s: HDMI CEC HW module not initialized.\n", __func__);
+ return 0;
+ }
+
+ return cec_ctrl->cec_wakeup_en;
+}
+
+static void hdmi_cec_wakeup_en(void *input, bool enable)
+{
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+ if (!cec_ctrl) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return;
+ }
+
+ cec_ctrl->cec_wakeup_en = enable;
+}
+
+static void hdmi_cec_write_logical_addr(void *input, u8 addr)
+{
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+ if (!cec_ctrl || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return;
+ }
+
+ if (cec_ctrl->cec_enabled)
+ DSS_REG_W(cec_ctrl->init_data.io, HDMI_CEC_ADDR, addr & 0xF);
+}
+
+static int hdmi_cec_enable(void *input, bool enable)
+{
+ int ret = 0;
+ u32 hdmi_hw_version, reg_val;
+ struct mdss_io_data *io = NULL;
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+ struct mdss_panel_info *pinfo;
+
+ if (!cec_ctrl || !cec_ctrl->init_data.io) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ ret = -EPERM;
+ goto end;
+ }
+
+ io = cec_ctrl->init_data.io;
+ pinfo = cec_ctrl->init_data.pinfo;
+
+ if (!pinfo) {
+ DEV_ERR("%s: invalid pinfo\n", __func__);
+ goto end;
+ }
+
+ if (enable) {
+ /* 19.2Mhz * 0.00005 us = 950 = 0x3B6 */
+ DSS_REG_W(io, HDMI_CEC_REFTIMER, (0x3B6 & 0xFFF) | BIT(16));
+
+ hdmi_hw_version = DSS_REG_R(io, HDMI_VERSION);
+ if (hdmi_hw_version >= CEC_SUPPORTED_HW_VERSION) {
+ DSS_REG_W(io, HDMI_CEC_RD_RANGE, 0x30AB9888);
+ DSS_REG_W(io, HDMI_CEC_WR_RANGE, 0x888AA888);
+
+ DSS_REG_W(io, HDMI_CEC_RD_START_RANGE, 0x88888888);
+ DSS_REG_W(io, HDMI_CEC_RD_TOTAL_RANGE, 0x99);
+ DSS_REG_W(io, HDMI_CEC_COMPL_CTL, 0xF);
+ DSS_REG_W(io, HDMI_CEC_WR_CHECK_CONFIG, 0x4);
+ } else {
+ DEV_DBG("%s: CEC version %d is not supported.\n",
+ __func__, hdmi_hw_version);
+ ret = -EPERM;
+ goto end;
+ }
+
+ DSS_REG_W(io, HDMI_CEC_RD_FILTER, BIT(0) | (0x7FF << 4));
+ DSS_REG_W(io, HDMI_CEC_TIME, BIT(0) | ((7 * 0x30) << 7));
+
+ /* Enable CEC interrupts */
+ DSS_REG_W(io, HDMI_CEC_INT, CEC_INTR);
+
+ /* Enable Engine */
+ DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0));
+ } else {
+ /* Disable Engine */
+ DSS_REG_W(io, HDMI_CEC_CTRL, 0);
+
+ /* Disable CEC interrupts */
+ reg_val = DSS_REG_R(io, HDMI_CEC_INT);
+ DSS_REG_W(io, HDMI_CEC_INT, reg_val & ~CEC_INTR);
+ }
+
+ cec_ctrl->cec_enabled = enable;
+end:
+ return ret;
+}
+
+/**
+ * hdmi_cec_init() - Initialize the CEC hw module
+ * @init_data: data needed to initialize the cec hw module
+ *
+ * Return: pointer to cec hw modules data that needs to be passed when
+ * calling cec hw modules API or error code.
+ *
+ * The API registers CEC HW modules with the client and provides HW
+ * specific operations.
+ */
+void *hdmi_cec_init(struct hdmi_cec_init_data *init_data)
+{
+ struct hdmi_cec_ctrl *cec_ctrl;
+ struct cec_ops *ops;
+ int ret = 0;
+
+ if (!init_data) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ops = init_data->ops;
+ if (!ops) {
+ DEV_ERR("%s: no ops provided\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ cec_ctrl = kzalloc(sizeof(*cec_ctrl), GFP_KERNEL);
+ if (!cec_ctrl) {
+ DEV_ERR("%s: FAILED: out of memory\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* keep a copy of init data */
+ cec_ctrl->init_data = *init_data;
+
+ spin_lock_init(&cec_ctrl->lock);
+ INIT_WORK(&cec_ctrl->cec_read_work, hdmi_cec_msg_recv);
+ init_completion(&cec_ctrl->cec_msg_wr_done);
+
+ /* populate hardware specific operations to client */
+ ops->send_msg = hdmi_cec_msg_send;
+ ops->wt_logical_addr = hdmi_cec_write_logical_addr;
+ ops->enable = hdmi_cec_enable;
+ ops->data = cec_ctrl;
+ ops->wakeup_en = hdmi_cec_wakeup_en;
+ ops->is_wakeup_en = hdmi_cec_is_wakeup_en;
+ ops->device_suspend = hdmi_cec_device_suspend;
+
+ hdmi_cec_init_input_event(cec_ctrl);
+
+ return cec_ctrl;
+error:
+ return ERR_PTR(ret);
+}
+
+/**
+ * hdmi_cec_deinit() - de-initialize CEC HW module
+ * @data: CEC HW module data
+ *
+ * This API release all resources allocated.
+ */
+void hdmi_cec_deinit(void *data)
+{
+ struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)data;
+
+ if (cec_ctrl)
+ hdmi_cec_deinit_input_event(cec_ctrl);
+
+ kfree(cec_ctrl);
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_cec.h b/drivers/video/fbdev/msm/mdss_hdmi_cec.h
new file mode 100644
index 0000000..de4bb35
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_cec.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2010-2013, 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_CEC_H__
+#define __MDSS_HDMI_CEC_H__
+
+#include "mdss_hdmi_util.h"
+#include "mdss_cec_core.h"
+
+#define RETRANSMIT_MAX_NUM 5
+
+/**
+ * struct hdmi_cec_init_data - data needed for initializing cec hw module
+ * @workq: pointer to workqueue
+ * @io: pointer to register access related data
+ * @pinfo: pointer to panel information data
+ * @cbs: pointer to cec abstract callback functions.
+ * @ops: pointer to cec hw operation functions.
+ *
+ * Defines the data needed to be provided while initializing cec hw module
+ */
+struct hdmi_cec_init_data {
+ struct workqueue_struct *workq;
+ struct mdss_io_data *io;
+ struct mdss_panel_info *pinfo;
+ struct cec_cbs *cbs;
+ struct cec_ops *ops;
+};
+
+/**
+ * hdmi_cec_isr() - interrupt handler for cec hw module
+ * @cec_ctrl: pointer to cec hw module's data
+ *
+ * Return: irq error code
+ *
+ * The API can be called by HDMI Tx driver on receiving hw interrupts
+ * to let the CEC related interrupts handled by this module.
+ */
+int hdmi_cec_isr(void *cec_ctrl);
+
+/**
+ * hdmi_cec_init() - Initialize the CEC hw module
+ * @init_data: data needed to initialize the cec hw module
+ *
+ * Return: pointer to cec hw modules data that needs to be passed when
+ * calling cec hw modules API or error code.
+ *
+ * The API registers CEC HW modules with the client and provides HW
+ * specific operations.
+ */
+void *hdmi_cec_init(struct hdmi_cec_init_data *init_data);
+
+/**
+ * hdmi_cec_deinit() - de-initialize CEC HW module
+ * @data: CEC HW module data
+ *
+ * This API release all resources allocated.
+ */
+void hdmi_cec_deinit(void *data);
+
+/**
+ * hdmi_cec_is_wakeup_en() - checks cec wakeup state
+ * @cec_ctrl: pointer to cec hw module's data
+ *
+ * Return: cec wakeup state
+ *
+ * This API is used to query whether the cec wakeup functionality is
+ * enabled or not.
+ */
+bool hdmi_cec_is_wakeup_en(void *cec_ctrl);
+
+/**
+ * hdmi_cec_device_suspend() - updates cec with device suspend state
+ * @cec_ctrl: pointer to cec hw module's data
+ * @suspend: device suspend state
+ *
+ * This API is used to update the CEC HW module of the device's suspend
+ * state.
+ */
+void hdmi_cec_device_suspend(void *cec_ctrl, bool suspend);
+#endif /* __MDSS_HDMI_CEC_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
new file mode 100644
index 0000000..ab8491d
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -0,0 +1,2494 @@
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include "mdss_fb.h"
+#include "mdss_hdmi_edid.h"
+
+#define DBC_START_OFFSET 4
+#define EDID_DTD_LEN 18
+/*
+ * As per CEA-861-E specification 7.5.2, there can be
+ * upto 31 bytes following any tag (data block type).
+ */
+#define MAX_DATA_BLOCK_SIZE 31
+
+#define HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd) \
+ (!((vsd)[8] & BIT(7)) ? 9 : (!((vsd)[8] & BIT(6)) ? 11 : 13))
+
+/*
+ * As per the CEA-861E spec, there can be a total of 10 short audio
+ * descriptors with each SAD being 3 bytes long.
+ * Thus, the maximum length of the audio data block would be 30 bytes
+ */
+#define MAX_NUMBER_ADB 5
+#define MAX_AUDIO_DATA_BLOCK_SIZE 30
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3
+
+/*
+ * As per the HDMI 2.0 spec, the size of the HF-VSDB cannot exceed 31 bytes and
+ * the minimum size is 7 bytes.
+ */
+#define MAX_HF_VSDB_SIZE 31
+#define MIN_HF_VSDB_SIZE 7
+
+/* IEEE OUI for HDMI Forum. */
+#define HDMI_FORUM_IEEE_OUI 0xD85DC4
+
+/* Support for first 5 EDID blocks */
+#define MAX_EDID_SIZE (EDID_BLOCK_SIZE * MAX_EDID_BLOCKS)
+
+#define BUFF_SIZE_3D 128
+
+#define DTD_MAX 0x04
+#define DTD_OFFSET 0x36
+#define DTD_SIZE 0x12
+#define REVISION_OFFSET 0x13
+#define EDID_REVISION_FOUR 0x04
+
+#define EDID_VENDOR_ID_SIZE 4
+#define EDID_IEEE_REG_ID 0x0c03
+
+enum edid_sink_mode {
+ SINK_MODE_DVI,
+ SINK_MODE_HDMI
+};
+
+enum data_block_types {
+ RESERVED,
+ AUDIO_DATA_BLOCK,
+ VIDEO_DATA_BLOCK,
+ VENDOR_SPECIFIC_DATA_BLOCK,
+ SPEAKER_ALLOCATION_DATA_BLOCK,
+ VESA_DTC_DATA_BLOCK,
+ RESERVED2,
+ USE_EXTENDED_TAG
+};
+
+enum extended_data_block_types {
+ VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
+ VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
+ HDMI_VIDEO_DATA_BLOCK = 0x04,
+ Y420_VIDEO_DATA_BLOCK = 0x0E,
+ VIDEO_FORMAT_PREFERENCE_DATA_BLOCK = 0x0D,
+ Y420_CAPABILITY_MAP_DATA_BLOCK = 0x0F,
+ VENDOR_SPECIFIC_AUDIO_DATA_BLOCK = 0x11,
+ INFOFRAME_DATA_BLOCK = 0x20,
+};
+
+struct disp_mode_info {
+ u32 video_format;
+ u32 video_3d_format; /* Flags like SIDE_BY_SIDE_HALF*/
+ bool rgb_support;
+ bool y420_support;
+};
+
+struct hdmi_edid_sink_data {
+ struct disp_mode_info disp_mode_list[HDMI_VFRMT_MAX];
+ u32 disp_multi_3d_mode_list[16];
+ u32 disp_multi_3d_mode_list_cnt;
+ u32 num_of_elements;
+ u32 preferred_video_format;
+};
+
+struct hdmi_edid_sink_caps {
+ u32 max_pclk_in_hz;
+ bool scdc_present;
+ bool scramble_support; /* scramble support for less than 340Mcsc */
+ bool read_req_support;
+ bool osd_disparity;
+ bool dual_view_support;
+ bool ind_view_support;
+};
+
+struct hdmi_edid_override_data {
+ int scramble;
+ int sink_mode;
+ int format;
+ int vic;
+};
+
+struct hdmi_edid_ctrl {
+ u8 pt_scan_info;
+ u8 it_scan_info;
+ u8 ce_scan_info;
+ u8 cea_blks;
+ u16 physical_address;
+ u32 video_resolution; /* selected by user */
+ u32 sink_mode; /* HDMI or DVI */
+ u32 default_vic;
+ u16 audio_latency;
+ u16 video_latency;
+ u32 present_3d;
+ u32 page_id;
+ u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
+ int adb_size;
+ u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+ int sadb_size;
+ u8 edid_buf[MAX_EDID_SIZE];
+ char vendor_id[EDID_VENDOR_ID_SIZE];
+ bool keep_resv_timings;
+ bool edid_override;
+
+ struct hdmi_edid_sink_data sink_data;
+ struct hdmi_edid_init_data init_data;
+ struct hdmi_edid_sink_caps sink_caps;
+ struct hdmi_edid_override_data override_data;
+};
+
+static bool hdmi_edid_is_mode_supported(struct hdmi_edid_ctrl *edid_ctrl,
+ struct msm_hdmi_mode_timing_info *timing)
+{
+ if (!timing->supported ||
+ timing->pixel_freq > edid_ctrl->init_data.max_pclk_khz)
+ return false;
+
+ return true;
+}
+
+static int hdmi_edid_reset_parser(struct hdmi_edid_ctrl *edid_ctrl)
+{
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ /* reset res info read page */
+ edid_ctrl->page_id = MSM_HDMI_INIT_RES_PAGE;
+
+ /* reset sink mode to DVI as default */
+ edid_ctrl->sink_mode = SINK_MODE_DVI;
+
+ edid_ctrl->sink_data.num_of_elements = 0;
+
+ /* reset scan info data */
+ edid_ctrl->pt_scan_info = 0;
+ edid_ctrl->it_scan_info = 0;
+ edid_ctrl->ce_scan_info = 0;
+
+ /* reset 3d data */
+ edid_ctrl->present_3d = 0;
+
+ /* reset number of cea extension blocks to 0 */
+ edid_ctrl->cea_blks = 0;
+
+ /* reset resolution related sink data */
+ memset(&edid_ctrl->sink_data, 0, sizeof(edid_ctrl->sink_data));
+
+ /* reset audio related data */
+ memset(edid_ctrl->audio_data_block, 0,
+ sizeof(edid_ctrl->audio_data_block));
+ memset(edid_ctrl->spkr_alloc_data_block, 0,
+ sizeof(edid_ctrl->spkr_alloc_data_block));
+ edid_ctrl->adb_size = 0;
+ edid_ctrl->sadb_size = 0;
+
+ hdmi_edid_set_video_resolution(edid_ctrl, edid_ctrl->default_vic, true);
+
+ /* reset new resolution details */
+ if (!edid_ctrl->keep_resv_timings)
+ hdmi_reset_resv_timing_info();
+
+ return 0;
+}
+
+static struct hdmi_edid_ctrl *hdmi_edid_get_ctrl(struct device *dev)
+{
+ struct fb_info *fbi;
+ struct msm_fb_data_type *mfd;
+ struct mdss_panel_info *pinfo;
+
+ if (!dev) {
+ pr_err("invlid input\n");
+ goto error;
+ }
+
+ fbi = dev_get_drvdata(dev);
+
+ if (!fbi) {
+ pr_err("invlid fbi\n");
+ goto error;
+ }
+
+ mfd = (struct msm_fb_data_type *)fbi->par;
+ if (!mfd) {
+ pr_err("invlid mfd\n");
+ goto error;
+ }
+
+ pinfo = mfd->panel_info;
+ if (!pinfo) {
+ pr_err("invlid pinfo\n");
+ goto error;
+ }
+
+ return pinfo->edid_data;
+
+error:
+ return NULL;
+}
+
+static ssize_t hdmi_edid_sysfs_rda_audio_data_block(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int adb_size, adb_count;
+ ssize_t ret;
+ char *data = buf;
+
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ adb_count = 1;
+ adb_size = edid_ctrl->adb_size;
+ ret = sizeof(adb_count) + sizeof(adb_size) + adb_size;
+
+ if (ret > PAGE_SIZE) {
+ DEV_DBG("%s: Insufficient buffer size\n", __func__);
+ return 0;
+ }
+
+ /* Currently only extracting one audio data block */
+ memcpy(data, &adb_count, sizeof(adb_count));
+ data += sizeof(adb_count);
+ memcpy(data, &adb_size, sizeof(adb_size));
+ data += sizeof(adb_size);
+ memcpy(data, edid_ctrl->audio_data_block,
+ edid_ctrl->adb_size);
+
+ print_hex_dump(KERN_DEBUG, "AUDIO DATA BLOCK: ", DUMP_PREFIX_NONE,
+ 32, 8, buf, ret, false);
+
+ return ret;
+}
+static DEVICE_ATTR(audio_data_block, 0444,
+ hdmi_edid_sysfs_rda_audio_data_block,
+ NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_spkr_alloc_data_block(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int sadb_size, sadb_count;
+ ssize_t ret;
+ char *data = buf;
+
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ sadb_count = 1;
+ sadb_size = edid_ctrl->sadb_size;
+ ret = sizeof(sadb_count) + sizeof(sadb_size) + sadb_size;
+
+ if (ret > PAGE_SIZE) {
+ DEV_DBG("%s: Insufficient buffer size\n", __func__);
+ return 0;
+ }
+
+ /* Currently only extracting one speaker allocation data block */
+ memcpy(data, &sadb_count, sizeof(sadb_count));
+ data += sizeof(sadb_count);
+ memcpy(data, &sadb_size, sizeof(sadb_size));
+ data += sizeof(sadb_size);
+ memcpy(data, edid_ctrl->spkr_alloc_data_block,
+ edid_ctrl->sadb_size);
+
+ print_hex_dump(KERN_DEBUG, "SPKR ALLOC DATA BLOCK: ", DUMP_PREFIX_NONE,
+ 32, 8, buf, ret, false);
+
+ return ret;
+}
+static DEVICE_ATTR(spkr_alloc_data_block, 0444,
+ hdmi_edid_sysfs_rda_spkr_alloc_data_block, NULL);
+
+static ssize_t hdmi_edid_sysfs_wta_modes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ ssize_t ret = strnlen(buf, PAGE_SIZE);
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid ctrl\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (sscanf(buf, "%d %d %d %d",
+ &edid_ctrl->override_data.scramble,
+ &edid_ctrl->override_data.sink_mode,
+ &edid_ctrl->override_data.format,
+ &edid_ctrl->override_data.vic) != 4) {
+ DEV_ERR("could not read input\n");
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ edid_ctrl->edid_override = true;
+ return ret;
+bail:
+ DEV_DBG("%s: reset edid override\n", __func__);
+ edid_ctrl->edid_override = false;
+error:
+ return ret;
+}
+
+static ssize_t hdmi_edid_sysfs_rda_modes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ int i;
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+ u32 num_of_elements = 0;
+ struct disp_mode_info *video_mode;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ num_of_elements = edid_ctrl->sink_data.num_of_elements;
+ video_mode = edid_ctrl->sink_data.disp_mode_list;
+
+ if (edid_ctrl->edid_override && (edid_ctrl->override_data.vic > 0)) {
+ num_of_elements = 1;
+ edid_ctrl->sink_data.disp_mode_list[0].video_format =
+ edid_ctrl->override_data.vic;
+ }
+
+ buf[0] = 0;
+ if (num_of_elements) {
+ for (i = 0; i < num_of_elements; i++) {
+ if (ret > 0)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ ",%d", video_mode[i].video_format);
+ else
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "%d", video_mode[i].video_format);
+ }
+ } else {
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%d",
+ edid_ctrl->video_resolution);
+ }
+
+ DEV_DBG("%s: '%s'\n", __func__, buf);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+
+ return ret;
+} /* hdmi_edid_sysfs_rda_modes */
+static DEVICE_ATTR(edid_modes, 0644, hdmi_edid_sysfs_rda_modes,
+ hdmi_edid_sysfs_wta_modes);
+
+static ssize_t hdmi_edid_sysfs_rda_res_info_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ u32 i, no_of_elem, offset = 0;
+ struct msm_hdmi_mode_timing_info info = {0};
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+ struct disp_mode_info *minfo = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ no_of_elem = edid_ctrl->sink_data.num_of_elements;
+ minfo = edid_ctrl->sink_data.disp_mode_list;
+
+ if (edid_ctrl->edid_override && (edid_ctrl->override_data.vic > 0)) {
+ no_of_elem = 1;
+ minfo[0].video_format = edid_ctrl->override_data.vic;
+ }
+
+ for (i = 0; i < no_of_elem; i++) {
+ ret = hdmi_get_supported_mode(&info,
+ &edid_ctrl->init_data.ds_data,
+ minfo->video_format);
+
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.format > 0))
+ info.pixel_formats = edid_ctrl->override_data.format;
+ else
+ info.pixel_formats =
+ (minfo->rgb_support ?
+ MSM_HDMI_RGB_888_24BPP_FORMAT : 0) |
+ (minfo->y420_support ?
+ MSM_HDMI_YUV_420_12BPP_FORMAT : 0);
+
+ minfo++;
+ if (ret || !info.supported)
+ continue;
+
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset,
+ "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ info.video_format, info.active_h,
+ info.front_porch_h, info.pulse_width_h,
+ info.back_porch_h, info.active_low_h,
+ info.active_v, info.front_porch_v,
+ info.pulse_width_v, info.back_porch_v,
+ info.active_low_v, info.pixel_freq,
+ info.refresh_rate, info.interlaced,
+ info.supported, info.ar,
+ info.pixel_formats);
+ }
+
+ return offset;
+}
+static DEVICE_ATTR(res_info_data, 0444, hdmi_edid_sysfs_rda_res_info_data,
+ NULL);
+
+static ssize_t hdmi_edid_sysfs_wta_res_info(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc, page_id;
+ u32 i = 0, j, page;
+ ssize_t ret = strnlen(buf, PAGE_SIZE);
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+ struct msm_hdmi_mode_timing_info info = {0};
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = kstrtoint(buf, 10, &page_id);
+ if (rc) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ return rc;
+ }
+
+ if (page_id > MSM_HDMI_INIT_RES_PAGE) {
+ page = MSM_HDMI_INIT_RES_PAGE;
+ while (page < page_id) {
+ j = 1;
+ while (sizeof(info) * j < PAGE_SIZE) {
+ i++;
+ j++;
+ }
+ page++;
+ }
+ }
+
+ if (i < HDMI_VFRMT_MAX)
+ edid_ctrl->page_id = page_id;
+ else
+ DEV_ERR("%s: invalid page id\n", __func__);
+
+ DEV_DBG("%s: %d\n", __func__, edid_ctrl->page_id);
+ return ret;
+}
+
+static ssize_t hdmi_edid_sysfs_rda_res_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ u32 no_of_elem;
+ u32 i = 0, j, page;
+ char *buf_dbg = buf;
+ struct msm_hdmi_mode_timing_info info = {0};
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+ u32 size_to_write = sizeof(info);
+ struct disp_mode_info *minfo = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ minfo = edid_ctrl->sink_data.disp_mode_list;
+ no_of_elem = edid_ctrl->sink_data.num_of_elements;
+
+ if (edid_ctrl->page_id > MSM_HDMI_INIT_RES_PAGE) {
+ page = MSM_HDMI_INIT_RES_PAGE;
+ while (page < edid_ctrl->page_id) {
+ j = 1;
+ while (sizeof(info) * j < PAGE_SIZE) {
+ i++;
+ j++;
+ minfo++;
+ }
+ page++;
+ }
+ }
+
+ if (edid_ctrl->edid_override && (edid_ctrl->override_data.vic > 0)) {
+ no_of_elem = 1;
+ minfo[0].video_format = edid_ctrl->override_data.vic;
+ }
+
+ for (; i < no_of_elem && size_to_write < PAGE_SIZE; i++) {
+ ret = hdmi_get_supported_mode(&info,
+ &edid_ctrl->init_data.ds_data,
+ minfo->video_format);
+
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.format > 0))
+ info.pixel_formats = edid_ctrl->override_data.format;
+ else
+ info.pixel_formats =
+ (minfo->rgb_support ?
+ MSM_HDMI_RGB_888_24BPP_FORMAT : 0) |
+ (minfo->y420_support ?
+ MSM_HDMI_YUV_420_12BPP_FORMAT : 0);
+
+ minfo++;
+ if (ret || !info.supported)
+ continue;
+
+ memcpy(buf, &info, sizeof(info));
+
+ buf += sizeof(info);
+ size_to_write += sizeof(info);
+ }
+
+ for (i = sizeof(info); i < size_to_write; i += sizeof(info)) {
+ struct msm_hdmi_mode_timing_info info_dbg = {0};
+
+ memcpy(&info_dbg, buf_dbg, sizeof(info_dbg));
+
+ DEV_DBG("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ info_dbg.video_format, info_dbg.active_h,
+ info_dbg.front_porch_h, info_dbg.pulse_width_h,
+ info_dbg.back_porch_h, info_dbg.active_low_h,
+ info_dbg.active_v, info_dbg.front_porch_v,
+ info_dbg.pulse_width_v, info_dbg.back_porch_v,
+ info_dbg.active_low_v, info_dbg.pixel_freq,
+ info_dbg.refresh_rate, info_dbg.interlaced,
+ info_dbg.supported, info_dbg.ar,
+ info_dbg.pixel_formats);
+
+ buf_dbg += sizeof(info_dbg);
+ }
+
+ return size_to_write - sizeof(info);
+}
+static DEVICE_ATTR(res_info, 0644, hdmi_edid_sysfs_rda_res_info,
+ hdmi_edid_sysfs_wta_res_info);
+
+static ssize_t hdmi_edid_sysfs_rda_audio_latency(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", edid_ctrl->audio_latency);
+
+ DEV_DBG("%s: '%s'\n", __func__, buf);
+
+ return ret;
+} /* hdmi_edid_sysfs_rda_audio_latency */
+static DEVICE_ATTR(edid_audio_latency, 0444,
+ hdmi_edid_sysfs_rda_audio_latency, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_video_latency(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", edid_ctrl->video_latency);
+
+ DEV_DBG("%s: '%s'\n", __func__, buf);
+
+ return ret;
+} /* hdmi_edid_sysfs_rda_video_latency */
+static DEVICE_ATTR(edid_video_latency, 0444,
+ hdmi_edid_sysfs_rda_video_latency, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_physical_address(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", edid_ctrl->physical_address);
+ DEV_DBG("%s: '%d'\n", __func__, edid_ctrl->physical_address);
+
+ return ret;
+} /* hdmi_edid_sysfs_rda_physical_address */
+static DEVICE_ATTR(pa, 0400, hdmi_edid_sysfs_rda_physical_address, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_scan_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d, %d, %d\n", edid_ctrl->pt_scan_info,
+ edid_ctrl->it_scan_info, edid_ctrl->ce_scan_info);
+ DEV_DBG("%s: '%s'\n", __func__, buf);
+
+ return ret;
+} /* hdmi_edid_sysfs_rda_scan_info */
+static DEVICE_ATTR(scan_info, 0444, hdmi_edid_sysfs_rda_scan_info, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_3d_modes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ int i;
+ char buff_3d[BUFF_SIZE_3D];
+
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ buf[0] = 0;
+ if (edid_ctrl->sink_data.num_of_elements) {
+ struct disp_mode_info *video_mode =
+ edid_ctrl->sink_data.disp_mode_list;
+
+ for (i = 0; i < edid_ctrl->sink_data.num_of_elements; i++) {
+ if (!video_mode[i].video_3d_format)
+ continue;
+ hdmi_get_video_3d_fmt_2string(
+ video_mode[i].video_3d_format,
+ buff_3d,
+ sizeof(buff_3d));
+ if (ret > 0)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ ",%d=%s", video_mode[i].video_format,
+ buff_3d);
+ else
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "%d=%s", video_mode[i].video_format,
+ buff_3d);
+ }
+ }
+
+ DEV_DBG("%s: '%s'\n", __func__, buf);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+
+ return ret;
+} /* hdmi_edid_sysfs_rda_3d_modes */
+static DEVICE_ATTR(edid_3d_modes, 0444, hdmi_edid_sysfs_rda_3d_modes, NULL);
+
+static ssize_t hdmi_common_rda_edid_raw_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+ u32 size;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ size = sizeof(edid_ctrl->edid_buf) < PAGE_SIZE ?
+ sizeof(edid_ctrl->edid_buf) : PAGE_SIZE;
+
+ /* buf can have max size of PAGE_SIZE */
+ memcpy(buf, edid_ctrl->edid_buf, size);
+
+ return size;
+} /* hdmi_common_rda_edid_raw_data */
+static DEVICE_ATTR(edid_raw_data, 0444, hdmi_common_rda_edid_raw_data, NULL);
+
+static ssize_t hdmi_edid_sysfs_wta_add_resolution(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc;
+ ssize_t ret = strnlen(buf, PAGE_SIZE);
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+ struct msm_hdmi_mode_timing_info timing;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = sscanf(buf,
+ "%lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %u",
+ (unsigned long *) &timing.active_h,
+ (unsigned long *) &timing.front_porch_h,
+ (unsigned long *) &timing.pulse_width_h,
+ (unsigned long *) &timing.back_porch_h,
+ (unsigned long *) &timing.active_low_h,
+ (unsigned long *) &timing.active_v,
+ (unsigned long *) &timing.front_porch_v,
+ (unsigned long *) &timing.pulse_width_v,
+ (unsigned long *) &timing.back_porch_v,
+ (unsigned long *) &timing.active_low_v,
+ (unsigned long *) &timing.pixel_freq,
+ (unsigned long *) &timing.refresh_rate,
+ (unsigned long *) &timing.interlaced,
+ (unsigned long *) &timing.supported,
+ (unsigned int *) &timing.ar);
+
+ if (rc != 15) {
+ DEV_ERR("%s: error reading buf\n", __func__);
+ goto err;
+ }
+
+ rc = hdmi_set_resv_timing_info(&timing);
+
+ if (!IS_ERR_VALUE((unsigned long)rc)) {
+ DEV_DBG("%s: added new res %d\n", __func__, rc);
+ } else {
+ DEV_ERR("%s: error adding new res %d\n", __func__, rc);
+ goto err;
+ }
+
+ edid_ctrl->keep_resv_timings = true;
+ return ret;
+
+err:
+ edid_ctrl->keep_resv_timings = false;
+ return -EFAULT;
+}
+static DEVICE_ATTR(add_res, 0200, NULL, hdmi_edid_sysfs_wta_add_resolution);
+
+static struct attribute *hdmi_edid_fs_attrs[] = {
+ &dev_attr_edid_modes.attr,
+ &dev_attr_pa.attr,
+ &dev_attr_scan_info.attr,
+ &dev_attr_edid_3d_modes.attr,
+ &dev_attr_edid_raw_data.attr,
+ &dev_attr_audio_data_block.attr,
+ &dev_attr_spkr_alloc_data_block.attr,
+ &dev_attr_edid_audio_latency.attr,
+ &dev_attr_edid_video_latency.attr,
+ &dev_attr_res_info.attr,
+ &dev_attr_res_info_data.attr,
+ &dev_attr_add_res.attr,
+ NULL,
+};
+
+static struct attribute_group hdmi_edid_fs_attrs_group = {
+ .attrs = hdmi_edid_fs_attrs,
+};
+
+static const u8 *hdmi_edid_find_block(const u8 *in_buf, u32 start_offset,
+ u8 type, u8 *len)
+{
+ /* the start of data block collection, start of Video Data Block */
+ u32 offset = start_offset;
+ u32 dbc_offset = in_buf[2];
+
+ if (dbc_offset >= EDID_BLOCK_SIZE - EDID_DTD_LEN)
+ return NULL;
+ *len = 0;
+
+ /*
+ * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+ * collection present.
+ * * edid buffer 1, byte 2 being 0 menas no non-DTD/DATA block
+ * collection present and no DTD data present.
+ */
+ if ((dbc_offset == 0) || (dbc_offset == 4)) {
+ DEV_WARN("EDID: no DTD or non-DTD data present\n");
+ return NULL;
+ }
+
+ while (offset < dbc_offset) {
+ u8 block_len = in_buf[offset] & 0x1F;
+
+ if ((offset + block_len <= dbc_offset) &&
+ (in_buf[offset] >> 5) == type) {
+ *len = block_len;
+ DEV_DBG("%s: EDID: block=%d found @ 0x%x w/ len=%d\n",
+ __func__, type, offset, block_len);
+
+ return in_buf + offset;
+ }
+ offset += 1 + block_len;
+ }
+ DEV_WARN("%s: EDID: type=%d block not found in EDID block\n",
+ __func__, type);
+
+ return NULL;
+} /* hdmi_edid_find_block */
+
+static void hdmi_edid_set_y420_support(struct hdmi_edid_ctrl *edid_ctrl,
+ u32 video_format)
+{
+ u32 i = 0;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: Invalid input\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < edid_ctrl->sink_data.num_of_elements; ++i) {
+ if (video_format ==
+ edid_ctrl->sink_data.disp_mode_list[i].video_format) {
+ edid_ctrl->sink_data.disp_mode_list[i].y420_support =
+ true;
+ DEV_DBG("%s: Yuv420 supported for format %d\n",
+ __func__,
+ edid_ctrl->sink_data.disp_mode_list[i].video_format);
+ }
+ }
+}
+
+static void hdmi_edid_add_sink_y420_format(struct hdmi_edid_ctrl *edid_ctrl,
+ u32 video_format)
+{
+ struct msm_hdmi_mode_timing_info timing = {0};
+ u32 ret = hdmi_get_supported_mode(&timing,
+ &edid_ctrl->init_data.ds_data,
+ video_format);
+ u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
+ struct hdmi_edid_sink_data *sink = &edid_ctrl->sink_data;
+
+ if (video_format >= HDMI_VFRMT_MAX) {
+ DEV_ERR("%s: video format: %s is not supported\n", __func__,
+ msm_hdmi_mode_2string(video_format));
+ return;
+ }
+
+ if (!sink) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ DEV_DBG("%s: EDID: format: %d [%s], %s\n", __func__,
+ video_format, msm_hdmi_mode_2string(video_format),
+ supported ? "Supported" : "Not-Supported");
+
+ if (!ret && supported) {
+ sink->disp_mode_list[sink->num_of_elements].video_format
+ = video_format;
+ sink->disp_mode_list[sink->num_of_elements].y420_support
+ = true;
+ sink->num_of_elements++;
+ }
+}
+
+static void hdmi_edid_parse_Y420VDB(struct hdmi_edid_ctrl *edid_ctrl,
+ const u8 *in_buf)
+{
+ u8 len = 0;
+ u8 i = 0;
+ u32 video_format = 0;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ len = in_buf[0] & 0x1F;
+ /* Offset to byte 3 */
+ in_buf += 2;
+ for (i = 0; i < len - 1; i++) {
+ video_format = *(in_buf + i) & 0x7F;
+ hdmi_edid_add_sink_y420_format(edid_ctrl, video_format);
+ }
+}
+
+static void hdmi_edid_parse_Y420CMDB(struct hdmi_edid_ctrl *edid_ctrl,
+ const u8 *in_buf)
+{
+ u32 offset = 0;
+ u8 svd_len = 0;
+ u32 i = 0, j = 0;
+ u32 video_format = 0;
+ u32 len = 0;
+ const u8 *svd = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+ /* Byte 3 to L+1 contain SVDs */
+ offset += 2;
+ len = in_buf[0] & 0x1F;
+
+ /*
+ * The Y420 Capability map data block should be parsed along with the
+ * video data block. Each bit in Y420CMDB maps to each SVD in data
+ * block
+ */
+ svd = hdmi_edid_find_block(edid_ctrl->edid_buf+0x80, DBC_START_OFFSET,
+ VIDEO_DATA_BLOCK, &svd_len);
+
+ ++svd;
+ for (i = 0; i < svd_len; i++, j++) {
+ video_format = *svd & 0x7F;
+ if (in_buf[offset] & (1 << j))
+ hdmi_edid_set_y420_support(edid_ctrl, video_format);
+
+ if (j & 0x80) {
+ j = j/8;
+ offset++;
+ if (offset >= len)
+ break;
+ }
+ }
+}
+
+static void hdmi_edid_parse_hvdb(struct hdmi_edid_ctrl *edid_ctrl,
+ const u8 *in_buf)
+{
+ u32 len = 0;
+ struct hdmi_edid_sink_caps *sink_caps = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ sink_caps = &edid_ctrl->sink_caps;
+ len = in_buf[0] & 0x1F;
+ if ((in_buf[1] != HDMI_VIDEO_DATA_BLOCK) ||
+ (len < 5)) {
+ DEV_ERR("%s: Not a HVDB tag code\n", __func__);
+ return;
+ }
+ DEV_ERR("FOUND HVDB flags = 0x%x\n", in_buf[4]);
+ sink_caps->max_pclk_in_hz = in_buf[3]*5000;
+ sink_caps->scdc_present = (in_buf[4] & 0x80) ? true : false;
+ sink_caps->read_req_support = (in_buf[4] & 0x40) ? true : false;
+ sink_caps->scramble_support = (in_buf[4] & 0x08) ? true : false;
+ sink_caps->ind_view_support = (in_buf[4] & 0x04) ? true : false;
+ sink_caps->dual_view_support = (in_buf[4] & 0x02) ? true : false;
+ sink_caps->osd_disparity = (in_buf[4] * 0x01) ? true : false;
+
+}
+
+static void hdmi_edid_extract_extended_data_blocks(
+ struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+ u8 len = 0;
+ u32 start_offset = 0;
+ u8 const *etag = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ do {
+ if (!start_offset && !etag)
+ start_offset = DBC_START_OFFSET;
+ else
+ start_offset = etag - in_buf + len + 1;
+
+ etag = hdmi_edid_find_block(in_buf, start_offset,
+ USE_EXTENDED_TAG, &len);
+
+ if (!etag || !len) {
+ DEV_DBG("%s: No more extended block found\n", __func__);
+ break;
+ }
+
+ /* The extended data block should at least be 2 bytes long */
+ if (len < 2) {
+ DEV_DBG("%s: invalid block size\n", __func__);
+ continue;
+ }
+
+ /*
+ * The second byte of the extended data block has the
+ * extended tag code
+ */
+ switch (etag[1]) {
+ case VIDEO_CAPABILITY_DATA_BLOCK:
+ /* Video Capability Data Block */
+ DEV_DBG("%s: EDID: VCDB=%02X %02X\n", __func__,
+ etag[1], etag[2]);
+
+ /*
+ * Check if the sink specifies underscan
+ * support for:
+ * BIT 5: preferred video format
+ * BIT 3: IT video format
+ * BIT 1: CE video format
+ */
+ edid_ctrl->pt_scan_info =
+ (etag[2] & (BIT(4) | BIT(5))) >> 4;
+ edid_ctrl->it_scan_info =
+ (etag[2] & (BIT(3) | BIT(2))) >> 2;
+ edid_ctrl->ce_scan_info =
+ etag[2] & (BIT(1) | BIT(0));
+ DEV_DBG("%s: Scan Info (pt|it|ce): (%d|%d|%d)",
+ __func__,
+ edid_ctrl->pt_scan_info,
+ edid_ctrl->it_scan_info,
+ edid_ctrl->ce_scan_info);
+ break;
+ case HDMI_VIDEO_DATA_BLOCK:
+ /* HDMI Video data block defined in HDMI 2.0 */
+ DEV_DBG("%s: EDID: HVDB found\n", __func__);
+ hdmi_edid_parse_hvdb(edid_ctrl, etag);
+ break;
+ case Y420_CAPABILITY_MAP_DATA_BLOCK:
+ DEV_DBG("%s found Y420CMDB byte 3 = 0x%x",
+ __func__, etag[2]);
+ hdmi_edid_parse_Y420CMDB(edid_ctrl, etag);
+ break;
+ case Y420_VIDEO_DATA_BLOCK:
+ DEV_DBG("%s found Y420VDB byte 3 = 0x%x",
+ __func__, etag[2]);
+ hdmi_edid_parse_Y420VDB(edid_ctrl, etag);
+ break;
+ default:
+ DEV_DBG("%s: Tag Code %d not supported\n",
+ __func__, etag[1]);
+ break;
+ }
+ } while (1);
+} /* hdmi_edid_extract_extended_data_blocks */
+
+static void hdmi_edid_extract_3d_present(struct hdmi_edid_ctrl *edid_ctrl,
+ const u8 *in_buf)
+{
+ u8 len, offset;
+ const u8 *vsd = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+ VENDOR_SPECIFIC_DATA_BLOCK, &len);
+
+ edid_ctrl->present_3d = 0;
+ if (vsd == NULL || len == 0 || len > MAX_DATA_BLOCK_SIZE) {
+ DEV_DBG("%s: No/Invalid vendor Specific Data Block\n",
+ __func__);
+ return;
+ }
+
+ offset = HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd);
+ DEV_DBG("%s: EDID: 3D present @ 0x%x = %02x\n", __func__,
+ offset, vsd[offset]);
+
+ if (vsd[offset] >> 7) { /* 3D format indication present */
+ DEV_INFO("%s: EDID: 3D present, 3D-len=%d\n", __func__,
+ vsd[offset+1] & 0x1F);
+ edid_ctrl->present_3d = 1;
+ }
+} /* hdmi_edid_extract_3d_present */
+
+static void hdmi_edid_extract_audio_data_blocks(
+ struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+ u8 len = 0;
+ u8 adb_max = 0;
+ const u8 *adb = NULL;
+ u32 offset = DBC_START_OFFSET;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ edid_ctrl->adb_size = 0;
+
+ memset(edid_ctrl->audio_data_block, 0,
+ sizeof(edid_ctrl->audio_data_block));
+
+ do {
+ len = 0;
+ adb = hdmi_edid_find_block(in_buf, offset, AUDIO_DATA_BLOCK,
+ &len);
+
+ if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
+ adb_max >= MAX_NUMBER_ADB)) {
+ if (!edid_ctrl->adb_size) {
+ DEV_DBG("%s: No/Invalid Audio Data Block\n",
+ __func__);
+ return;
+ }
+ DEV_DBG("%s: No more valid ADB found\n",
+ __func__);
+
+ continue;
+ }
+
+ memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
+ adb + 1, len);
+ offset = (adb - in_buf) + 1 + len;
+
+ edid_ctrl->adb_size += len;
+ adb_max++;
+ } while (adb);
+
+} /* hdmi_edid_extract_audio_data_blocks */
+
+static void hdmi_edid_extract_speaker_allocation_data(
+ struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+ u8 len;
+ const u8 *sadb = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ sadb = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+ SPEAKER_ALLOCATION_DATA_BLOCK, &len);
+ if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
+ DEV_DBG("%s: No/Invalid Speaker Allocation Data Block\n",
+ __func__);
+ return;
+ }
+
+ memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
+ edid_ctrl->sadb_size = len;
+
+ DEV_DBG("%s: EDID: speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+ __func__, sadb[1],
+ (sadb[1] & BIT(0)) ? "FL/FR," : "",
+ (sadb[1] & BIT(1)) ? "LFE," : "",
+ (sadb[1] & BIT(2)) ? "FC," : "",
+ (sadb[1] & BIT(3)) ? "RL/RR," : "",
+ (sadb[1] & BIT(4)) ? "RC," : "",
+ (sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+ (sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+} /* hdmi_edid_extract_speaker_allocation_data */
+
+static void hdmi_edid_extract_sink_caps(struct hdmi_edid_ctrl *edid_ctrl,
+ const u8 *in_buf)
+{
+ u8 len = 0, i = 0;
+ const u8 *vsd = NULL;
+ u32 vsd_offset = DBC_START_OFFSET;
+ u32 hf_ieee_oui = 0;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ /* Find HF-VSDB with HF-OUI */
+ do {
+ vsd = hdmi_edid_find_block(in_buf, vsd_offset,
+ VENDOR_SPECIFIC_DATA_BLOCK, &len);
+
+ if (!vsd || !len || len > MAX_DATA_BLOCK_SIZE) {
+ if (i == 0)
+ DEV_ERR("%s: VSDB not found\n", __func__);
+ else
+ DEV_DBG("%s: no more VSDB found\n", __func__);
+ break;
+ }
+
+ hf_ieee_oui = (vsd[1] << 16) | (vsd[2] << 8) | vsd[3];
+
+ if (hf_ieee_oui == HDMI_FORUM_IEEE_OUI) {
+ DEV_DBG("%s: found HF-VSDB\n", __func__);
+ break;
+ }
+
+ DEV_DBG("%s: Not a HF OUI 0x%x\n", __func__, hf_ieee_oui);
+
+ i++;
+ vsd_offset = vsd - in_buf + len + 1;
+ } while (1);
+
+ if (!vsd) {
+ DEV_DBG("%s: HF-VSDB not found\n", __func__);
+ return;
+ }
+
+ /* Max pixel clock is in multiples of 5Mhz. */
+ edid_ctrl->sink_caps.max_pclk_in_hz =
+ vsd[5]*5000000;
+ edid_ctrl->sink_caps.scdc_present =
+ (vsd[6] & 0x80) ? true : false;
+ edid_ctrl->sink_caps.scramble_support =
+ (vsd[6] & 0x08) ? true : false;
+ edid_ctrl->sink_caps.read_req_support =
+ (vsd[6] & 0x40) ? true : false;
+ edid_ctrl->sink_caps.osd_disparity =
+ (vsd[6] & 0x01) ? true : false;
+ edid_ctrl->sink_caps.dual_view_support =
+ (vsd[6] & 0x02) ? true : false;
+ edid_ctrl->sink_caps.ind_view_support =
+ (vsd[6] & 0x04) ? true : false;
+}
+
+static void hdmi_edid_extract_latency_fields(struct hdmi_edid_ctrl *edid_ctrl,
+ const u8 *in_buf)
+{
+ u8 len;
+ const u8 *vsd = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+ VENDOR_SPECIFIC_DATA_BLOCK, &len);
+
+ if (vsd == NULL || len == 0 || len > MAX_DATA_BLOCK_SIZE ||
+ !(vsd[8] & BIT(7))) {
+ edid_ctrl->video_latency = (u16)-1;
+ edid_ctrl->audio_latency = (u16)-1;
+ DEV_DBG("%s: EDID: No audio/video latency present\n", __func__);
+ } else {
+ edid_ctrl->video_latency = vsd[9];
+ edid_ctrl->audio_latency = vsd[10];
+ DEV_DBG("%s: EDID: video-latency=%04x, audio-latency=%04x\n",
+ __func__, edid_ctrl->video_latency,
+ edid_ctrl->audio_latency);
+ }
+} /* hdmi_edid_extract_latency_fields */
+
+static u32 hdmi_edid_extract_ieee_reg_id(struct hdmi_edid_ctrl *edid_ctrl,
+ const u8 *in_buf)
+{
+ u8 len;
+ const u8 *vsd = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return 0;
+ }
+
+ vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+ VENDOR_SPECIFIC_DATA_BLOCK, &len);
+
+ if (vsd == NULL || len == 0 || len > MAX_DATA_BLOCK_SIZE) {
+ DEV_DBG("%s: No/Invalid Vendor Specific Data Block\n",
+ __func__);
+ return 0;
+ }
+
+ DEV_DBG("%s: EDID: VSD PhyAddr=%04x, MaxTMDS=%dMHz\n", __func__,
+ ((u32)vsd[4] << 8) + (u32)vsd[5], (u32)vsd[7] * 5);
+
+ edid_ctrl->physical_address = ((u16)vsd[4] << 8) + (u16)vsd[5];
+
+ return ((u32)vsd[3] << 16) + ((u32)vsd[2] << 8) + (u32)vsd[1];
+} /* hdmi_edid_extract_ieee_reg_id */
+
+static void hdmi_edid_extract_vendor_id(struct hdmi_edid_ctrl *edid_ctrl)
+{
+ char *vendor_id;
+ u32 id_codes;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ vendor_id = edid_ctrl->vendor_id;
+ id_codes = ((u32)edid_ctrl->edid_buf[8] << 8) +
+ edid_ctrl->edid_buf[9];
+
+ vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
+ vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
+ vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
+ vendor_id[3] = 0;
+} /* hdmi_edid_extract_vendor_id */
+
+static u32 hdmi_edid_check_header(const u8 *edid_buf)
+{
+ return (edid_buf[0] == 0x00) && (edid_buf[1] == 0xff)
+ && (edid_buf[2] == 0xff) && (edid_buf[3] == 0xff)
+ && (edid_buf[4] == 0xff) && (edid_buf[5] == 0xff)
+ && (edid_buf[6] == 0xff) && (edid_buf[7] == 0x00);
+} /* hdmi_edid_check_header */
+
+static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
+ const u8 *data_buf, u32 *disp_mode)
+{
+ u32 aspect_ratio_4_3 = false;
+ u32 aspect_ratio_5_4 = false;
+ u32 interlaced = false;
+ u32 active_h = 0;
+ u32 active_v = 0;
+ u32 blank_h = 0;
+ u32 blank_v = 0;
+ u32 img_size_h = 0;
+ u32 img_size_v = 0;
+ u32 pixel_clk = 0;
+ u32 front_porch_h = 0;
+ u32 front_porch_v = 0;
+ u32 pulse_width_h = 0;
+ u32 pulse_width_v = 0;
+ u32 active_low_h = 0;
+ u32 active_low_v = 0;
+ const u32 khz_to_hz = 1000;
+ u32 frame_data;
+ struct msm_hdmi_mode_timing_info timing = {0};
+ int rc;
+
+ /*
+ * Pixel clock/ 10,000
+ * LSB stored in byte 0 and MSB stored in byte 1
+ */
+ pixel_clk = (u32) (data_buf[0x0] | (data_buf[0x1] << 8));
+
+ /* store pixel clock in /1000 terms */
+ pixel_clk *= 10;
+
+ /*
+ * byte 0x8 -- Horizontal Front Porch - contains lower 8 bits
+ * byte 0xb (bits 6, 7) -- contains upper 2 bits
+ */
+ front_porch_h = (u32) (data_buf[0x8] |
+ (data_buf[0xb] & (0x3 << 6)) << 2);
+
+ /*
+ * byte 0x9 -- Horizontal pulse width - contains lower 8 bits
+ * byte 0xb (bits 4, 5) -- contains upper 2 bits
+ */
+ pulse_width_h = (u32) (data_buf[0x9] |
+ (data_buf[0xb] & (0x3 << 4)) << 4);
+
+ /*
+ * byte 0xa -- Vertical front porch -- stored in Upper Nibble,
+ * contains lower 4 bits.
+ * byte 0xb (bits 2, 3) -- contains upper 2 bits
+ */
+ front_porch_v = (u32) (((data_buf[0xa] & (0xF << 4)) >> 4) |
+ (data_buf[0xb] & (0x3 << 2)) << 2);
+
+ /*
+ * byte 0xa -- Vertical pulse width -- stored in Lower Nibble,
+ * contains lower 4 bits.
+ * byte 0xb (bits 0, 1) -- contains upper 2 bits
+ */
+ pulse_width_v = (u32) ((data_buf[0xa] & 0xF) |
+ ((data_buf[0xb] & 0x3) << 4));
+
+ /*
+ * * See VESA Spec
+ * * EDID_TIMING_DESC_UPPER_H_NIBBLE[0x4]: Relative Offset to the
+ * EDID detailed timing descriptors - Upper 4 bit for each H
+ * active/blank field
+ * * EDID_TIMING_DESC_H_ACTIVE[0x2]: Relative Offset to the EDID
+ * detailed timing descriptors - H active
+ */
+ active_h = ((((u32)data_buf[0x4] >> 0x4) & 0xF) << 8)
+ | data_buf[0x2];
+
+ /*
+ * EDID_TIMING_DESC_H_BLANK[0x3]: Relative Offset to the EDID detailed
+ * timing descriptors - H blank
+ */
+ blank_h = (((u32)data_buf[0x4] & 0xF) << 8)
+ | data_buf[0x3];
+
+ /*
+ * * EDID_TIMING_DESC_UPPER_V_NIBBLE[0x7]: Relative Offset to the
+ * EDID detailed timing descriptors - Upper 4 bit for each V
+ * active/blank field
+ * * EDID_TIMING_DESC_V_ACTIVE[0x5]: Relative Offset to the EDID
+ * detailed timing descriptors - V active
+ */
+ active_v = ((((u32)data_buf[0x7] >> 0x4) & 0xF) << 8)
+ | data_buf[0x5];
+
+ /*
+ * EDID_TIMING_DESC_V_BLANK[0x6]: Relative Offset to the EDID
+ * detailed timing descriptors - V blank
+ */
+ blank_v = (((u32)data_buf[0x7] & 0xF) << 8)
+ | data_buf[0x6];
+
+ /*
+ * * EDID_TIMING_DESC_IMAGE_SIZE_UPPER_NIBBLE[0xE]: Relative Offset
+ * to the EDID detailed timing descriptors - Image Size upper
+ * nibble V and H
+ * * EDID_TIMING_DESC_H_IMAGE_SIZE[0xC]: Relative Offset to the EDID
+ * detailed timing descriptors - H image size
+ * * EDID_TIMING_DESC_V_IMAGE_SIZE[0xD]: Relative Offset to the EDID
+ * detailed timing descriptors - V image size
+ */
+ img_size_h = ((((u32)data_buf[0xE] >> 0x4) & 0xF) << 8)
+ | data_buf[0xC];
+ img_size_v = (((u32)data_buf[0xE] & 0xF) << 8)
+ | data_buf[0xD];
+
+ /*
+ * aspect ratio as 4:3 if within specificed range, rather than being
+ * absolute value
+ */
+ aspect_ratio_4_3 = (abs(img_size_h * 3 - img_size_v * 4) < 5) ? 1 : 0;
+
+ aspect_ratio_5_4 = (abs(img_size_h * 4 - img_size_v * 5) < 5) ? 1 : 0;
+
+ /*
+ * EDID_TIMING_DESC_INTERLACE[0x11:7]: Relative Offset to the EDID
+ * detailed timing descriptors - Interlace flag
+ */
+ DEV_DBG("%s: Interlaced mode byte data_buf[0x11]=[%x]\n", __func__,
+ data_buf[0x11]);
+
+ /*
+ * CEA 861-D: interlaced bit is bit[7] of byte[0x11]
+ */
+ interlaced = (data_buf[0x11] & 0x80) >> 7;
+
+ active_low_v = ((data_buf[0x11] & (0x7 << 2)) >> 2) == 0x7 ? 0 : 1;
+
+ active_low_h = ((data_buf[0x11] & BIT(1)) &&
+ (data_buf[0x11] & BIT(4))) ? 0 : 1;
+
+ frame_data = (active_h + blank_h) * (active_v + blank_v);
+
+ if (frame_data) {
+ int refresh_rate_khz = (pixel_clk * khz_to_hz) / frame_data;
+
+ timing.active_h = active_h;
+ timing.front_porch_h = front_porch_h;
+ timing.pulse_width_h = pulse_width_h;
+ timing.back_porch_h = blank_h -
+ (front_porch_h + pulse_width_h);
+ timing.active_low_h = active_low_h;
+ timing.active_v = active_v;
+ timing.front_porch_v = front_porch_v;
+ timing.pulse_width_v = pulse_width_v;
+ timing.back_porch_v = blank_v -
+ (front_porch_v + pulse_width_v);
+ timing.active_low_v = active_low_v;
+ timing.pixel_freq = pixel_clk;
+ timing.refresh_rate = refresh_rate_khz * khz_to_hz;
+ timing.interlaced = interlaced;
+ timing.supported = true;
+ timing.ar = aspect_ratio_4_3 ? HDMI_RES_AR_4_3 :
+ (aspect_ratio_5_4 ? HDMI_RES_AR_5_4 :
+ HDMI_RES_AR_16_9);
+
+ DEV_DBG("%s: new res: %dx%d%s@%dHz\n", __func__,
+ timing.active_h, timing.active_v,
+ interlaced ? "i" : "p",
+ timing.refresh_rate / khz_to_hz);
+
+ rc = hdmi_set_resv_timing_info(&timing);
+ } else {
+ DEV_ERR("%s: Invalid frame data\n", __func__);
+ rc = -EINVAL;
+ }
+
+ if (!IS_ERR_VALUE((unsigned long)rc)) {
+ *disp_mode = rc;
+ DEV_DBG("%s: DTD mode found: %d\n", __func__, *disp_mode);
+ } else {
+ *disp_mode = HDMI_VFRMT_UNKNOWN;
+ DEV_ERR("%s: error adding mode from DTD: %d\n", __func__, rc);
+ }
+} /* hdmi_edid_detail_desc */
+
+static void hdmi_edid_add_sink_3d_format(struct hdmi_edid_sink_data *sink_data,
+ u32 video_format, u32 video_3d_format)
+{
+ char string[BUFF_SIZE_3D];
+ u32 added = false;
+ int i;
+
+ for (i = 0; i < sink_data->num_of_elements; ++i) {
+ if (sink_data->disp_mode_list[i].video_format == video_format) {
+ sink_data->disp_mode_list[i].video_3d_format |=
+ video_3d_format;
+ added = true;
+ break;
+ }
+ }
+
+ hdmi_get_video_3d_fmt_2string(video_3d_format, string, sizeof(string));
+
+ DEV_DBG("%s: EDID[3D]: format: %d [%s], %s %s\n", __func__,
+ video_format, msm_hdmi_mode_2string(video_format),
+ string, added ? "added" : "NOT added");
+} /* hdmi_edid_add_sink_3d_format */
+
+static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl,
+ u32 video_format)
+{
+ struct msm_hdmi_mode_timing_info timing = {0};
+ u32 ret = hdmi_get_supported_mode(&timing,
+ &edid_ctrl->init_data.ds_data,
+ video_format);
+ u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
+ struct hdmi_edid_sink_data *sink_data = &edid_ctrl->sink_data;
+ struct disp_mode_info *disp_mode_list = sink_data->disp_mode_list;
+
+ if (video_format >= HDMI_VFRMT_MAX) {
+ DEV_ERR("%s: video format: %s is not supported\n", __func__,
+ msm_hdmi_mode_2string(video_format));
+ return;
+ }
+
+ DEV_DBG("%s: EDID: format: %d [%s], %s\n", __func__,
+ video_format, msm_hdmi_mode_2string(video_format),
+ supported ? "Supported" : "Not-Supported");
+
+ if (!ret && supported) {
+ /* todo: MHL */
+ disp_mode_list[sink_data->num_of_elements].video_format =
+ video_format;
+ disp_mode_list[sink_data->num_of_elements].rgb_support =
+ true;
+ sink_data->num_of_elements++;
+ }
+} /* hdmi_edid_add_sink_video_format */
+
+static int hdmi_edid_get_display_vsd_3d_mode(const u8 *data_buf,
+ struct hdmi_edid_sink_data *sink_data, u32 num_of_cea_blocks)
+{
+ u8 len, offset, present_multi_3d, hdmi_vic_len;
+ int hdmi_3d_len;
+ u16 structure_all, structure_mask;
+ const u8 *vsd = num_of_cea_blocks ?
+ hdmi_edid_find_block(data_buf+0x80, DBC_START_OFFSET,
+ VENDOR_SPECIFIC_DATA_BLOCK, &len) : NULL;
+ int i;
+
+ if (vsd == NULL || len == 0 || len > MAX_DATA_BLOCK_SIZE) {
+ DEV_DBG("%s: No/Invalid Vendor Specific Data Block\n",
+ __func__);
+ return -ENXIO;
+ }
+
+ offset = HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd);
+ if (offset >= len - 1)
+ return -ETOOSMALL;
+
+ present_multi_3d = (vsd[offset] & 0x60) >> 5;
+
+ offset += 1;
+
+ hdmi_vic_len = (vsd[offset] >> 5) & 0x7;
+ hdmi_3d_len = vsd[offset] & 0x1F;
+ DEV_DBG("%s: EDID[3D]: HDMI_VIC_LEN = %d, HDMI_3D_LEN = %d\n", __func__,
+ hdmi_vic_len, hdmi_3d_len);
+
+ offset += (hdmi_vic_len + 1);
+ if (offset >= len - 1)
+ return -ETOOSMALL;
+
+ if (present_multi_3d == 1 || present_multi_3d == 2) {
+ DEV_DBG("%s: EDID[3D]: multi 3D present (%d)\n", __func__,
+ present_multi_3d);
+ /* 3d_structure_all */
+ structure_all = (vsd[offset] << 8) | vsd[offset + 1];
+ offset += 2;
+ if (offset >= len - 1)
+ return -ETOOSMALL;
+ hdmi_3d_len -= 2;
+ if (present_multi_3d == 2) {
+ /* 3d_structure_mask */
+ structure_mask = (vsd[offset] << 8) | vsd[offset + 1];
+ offset += 2;
+ hdmi_3d_len -= 2;
+ } else
+ structure_mask = 0xffff;
+
+ i = 0;
+ while (i < 16) {
+ if (i >= sink_data->disp_multi_3d_mode_list_cnt)
+ break;
+
+ if (!(structure_mask & BIT(i))) {
+ ++i;
+ continue;
+ }
+
+ /* BIT0: FRAME PACKING */
+ if (structure_all & BIT(0))
+ hdmi_edid_add_sink_3d_format(sink_data,
+ sink_data->
+ disp_multi_3d_mode_list[i],
+ FRAME_PACKING);
+
+ /* BIT6: TOP AND BOTTOM */
+ if (structure_all & BIT(6))
+ hdmi_edid_add_sink_3d_format(sink_data,
+ sink_data->
+ disp_multi_3d_mode_list[i],
+ TOP_AND_BOTTOM);
+
+ /* BIT8: SIDE BY SIDE HALF */
+ if (structure_all & BIT(8))
+ hdmi_edid_add_sink_3d_format(sink_data,
+ sink_data->
+ disp_multi_3d_mode_list[i],
+ SIDE_BY_SIDE_HALF);
+
+ ++i;
+ }
+ }
+
+ i = 0;
+ while (hdmi_3d_len > 0) {
+ if (offset >= len - 1)
+ return -ETOOSMALL;
+ DEV_DBG("%s: EDID: 3D_Structure_%d @ 0x%x: %02x\n",
+ __func__, i + 1, offset, vsd[offset]);
+ if ((vsd[offset] >> 4) >=
+ sink_data->disp_multi_3d_mode_list_cnt) {
+ if ((vsd[offset] & 0x0F) >= 8) {
+ offset += 1;
+ hdmi_3d_len -= 1;
+ DEV_DBG("%s:EDID:3D_Detail_%d @ 0x%x: %02x\n",
+ __func__, i + 1, offset,
+ vsd[min_t(u32, offset, (len - 1))]);
+ }
+ i += 1;
+ offset += 1;
+ hdmi_3d_len -= 1;
+ continue;
+ }
+
+ switch (vsd[offset] & 0x0F) {
+ case 0:
+ /* 0000b: FRAME PACKING */
+ hdmi_edid_add_sink_3d_format(sink_data,
+ sink_data->
+ disp_multi_3d_mode_list[vsd[offset] >> 4],
+ FRAME_PACKING);
+ break;
+ case 6:
+ /* 0110b: TOP AND BOTTOM */
+ hdmi_edid_add_sink_3d_format(sink_data,
+ sink_data->
+ disp_multi_3d_mode_list[vsd[offset] >> 4],
+ TOP_AND_BOTTOM);
+ break;
+ case 8:
+ /* 1000b: SIDE BY SIDE HALF */
+ hdmi_edid_add_sink_3d_format(sink_data,
+ sink_data->
+ disp_multi_3d_mode_list[vsd[offset] >> 4],
+ SIDE_BY_SIDE_HALF);
+ break;
+ }
+ if ((vsd[offset] & 0x0F) >= 8) {
+ offset += 1;
+ hdmi_3d_len -= 1;
+ DEV_DBG("%s: EDID[3D]: 3D_Detail_%d @ 0x%x: %02x\n",
+ __func__, i + 1, offset,
+ vsd[min_t(u32, offset, (len - 1))]);
+ }
+ i += 1;
+ offset += 1;
+ hdmi_3d_len -= 1;
+ }
+ return 0;
+} /* hdmi_edid_get_display_vsd_3d_mode */
+
+static void hdmi_edid_get_extended_video_formats(
+ struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+ u8 db_len, offset, i;
+ u8 hdmi_vic_len;
+ u32 video_format;
+ const u8 *vsd = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+ VENDOR_SPECIFIC_DATA_BLOCK, &db_len);
+
+ if (vsd == NULL || db_len == 0 || db_len > MAX_DATA_BLOCK_SIZE) {
+ DEV_DBG("%s: No/Invalid Vendor Specific Data Block\n",
+ __func__);
+ return;
+ }
+
+ /* check if HDMI_Video_present flag is set or not */
+ if (!(vsd[8] & BIT(5))) {
+ DEV_DBG("%s: extended vfmts are not supported by the sink.\n",
+ __func__);
+ return;
+ }
+
+ offset = HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd);
+
+ hdmi_vic_len = vsd[offset + 1] >> 5;
+ if (hdmi_vic_len) {
+ DEV_DBG("%s: EDID: EVFRMT @ 0x%x of block 3, len = %02x\n",
+ __func__, offset, hdmi_vic_len);
+
+ for (i = 0; i < hdmi_vic_len; i++) {
+ video_format = HDMI_VFRMT_END + vsd[offset + 2 + i];
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
+ }
+ }
+} /* hdmi_edid_get_extended_video_formats */
+
+static void hdmi_edid_parse_et3(struct hdmi_edid_ctrl *edid_ctrl,
+ const u8 *edid_blk0)
+{
+ u8 start = DTD_OFFSET, i = 0;
+ struct hdmi_edid_sink_data *sink_data = NULL;
+
+ if (!edid_ctrl || !edid_blk0) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ sink_data = &edid_ctrl->sink_data;
+
+ /* check if the EDID revision is 4 (version 1.4) */
+ if (edid_blk0[REVISION_OFFSET] != EDID_REVISION_FOUR)
+ return;
+
+ /* Check each of 4 - 18 bytes descriptors */
+ while (i < DTD_MAX) {
+ u8 iter = start;
+ u32 header_1 = 0;
+ u8 header_2 = 0;
+
+ header_1 = edid_blk0[iter++];
+ header_1 = header_1 << 8 | edid_blk0[iter++];
+ header_1 = header_1 << 8 | edid_blk0[iter++];
+ header_1 = header_1 << 8 | edid_blk0[iter++];
+ header_2 = edid_blk0[iter];
+
+ if (header_1 != 0x000000F7 || header_2 != 0x00)
+ goto loop_end;
+
+ /* VESA DMT Standard Version (0x0A)*/
+ iter++;
+
+ /* First set of supported formats */
+ iter++;
+ if (edid_blk0[iter] & BIT(3)) {
+ pr_debug("%s: DMT 848x480@60\n", __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_848x480p60_16_9);
+ }
+
+ /* Second set of supported formats */
+ iter++;
+ if (edid_blk0[iter] & BIT(1)) {
+ pr_debug("%s: DMT 1280x1024@60\n", __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1280x1024p60_5_4);
+ }
+
+ if (edid_blk0[iter] & BIT(3)) {
+ pr_debug("%s: DMT 1280x960@60\n", __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1280x960p60_4_3);
+ }
+
+ /* Third set of supported formats */
+ iter++;
+ if (edid_blk0[iter] & BIT(1)) {
+ pr_debug("%s: DMT 1400x1050@60\n", __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1400x1050p60_4_3);
+ }
+
+ if (edid_blk0[iter] & BIT(5)) {
+ pr_debug("%s: DMT 1440x900@60\n", __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1440x900p60_16_10);
+ }
+
+ if (edid_blk0[iter] & BIT(7)) {
+ pr_debug("%s: DMT 1360x768@60\n", __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1360x768p60_16_9);
+ }
+
+ /* Fourth set of supported formats */
+ iter++;
+ if (edid_blk0[iter] & BIT(2)) {
+ pr_debug("%s: DMT 1600x1200@60\n", __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1600x1200p60_4_3);
+ }
+
+ if (edid_blk0[iter] & BIT(5)) {
+ pr_debug("%s: DMT 1680x1050@60\n", __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1680x1050p60_16_10);
+ }
+
+ /* Fifth set of supported formats */
+ iter++;
+ if (edid_blk0[iter] & BIT(0)) {
+ pr_debug("%s: DMT 1920x1200@60\n", __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1920x1200p60_16_10);
+ }
+
+loop_end:
+ i++;
+ start += DTD_SIZE;
+ }
+}
+
+static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
+{
+ u8 i = 0, offset = 0, std_blk = 0;
+ u32 video_format = HDMI_VFRMT_640x480p60_4_3;
+ u32 has480p = false;
+ u8 len = 0;
+ u8 num_of_cea_blocks;
+ u8 *data_buf;
+ int rc;
+ const u8 *edid_blk0 = NULL;
+ const u8 *edid_blk1 = NULL;
+ const u8 *svd = NULL;
+ u32 has60hz_mode = false;
+ u32 has50hz_mode = false;
+ bool read_block0_res = false;
+ struct hdmi_edid_sink_data *sink_data = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ data_buf = edid_ctrl->edid_buf;
+ num_of_cea_blocks = edid_ctrl->cea_blks;
+
+ edid_blk0 = &data_buf[0x0];
+ edid_blk1 = &data_buf[0x80];
+ svd = num_of_cea_blocks ?
+ hdmi_edid_find_block(data_buf+0x80, DBC_START_OFFSET,
+ VIDEO_DATA_BLOCK, &len) : NULL;
+
+ if (num_of_cea_blocks && (len == 0 || len > MAX_DATA_BLOCK_SIZE)) {
+ DEV_DBG("%s: fall back to block 0 res\n", __func__);
+ svd = NULL;
+ read_block0_res = true;
+ }
+
+ sink_data = &edid_ctrl->sink_data;
+
+ sink_data->disp_multi_3d_mode_list_cnt = 0;
+ if (svd != NULL) {
+ ++svd;
+ for (i = 0; i < len; ++i, ++svd) {
+ /*
+ * Subtract 1 because it is zero based in the driver,
+ * while the Video identification code is 1 based in the
+ * CEA_861D spec
+ */
+ video_format = (*svd & 0x7F);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
+ /* Make a note of the preferred video format */
+ if (i == 0)
+ sink_data->preferred_video_format =
+ video_format;
+
+ if (i < 16) {
+ sink_data->disp_multi_3d_mode_list[i]
+ = video_format;
+ sink_data->disp_multi_3d_mode_list_cnt++;
+ }
+
+ if (video_format <= HDMI_VFRMT_1920x1080p60_16_9 ||
+ video_format == HDMI_VFRMT_2880x480p60_4_3 ||
+ video_format == HDMI_VFRMT_2880x480p60_16_9)
+ has60hz_mode = true;
+
+ if ((video_format >= HDMI_VFRMT_720x576p50_4_3 &&
+ video_format <= HDMI_VFRMT_1920x1080p50_16_9) ||
+ video_format == HDMI_VFRMT_2880x576p50_4_3 ||
+ video_format == HDMI_VFRMT_2880x576p50_16_9 ||
+ video_format == HDMI_VFRMT_1920x1250i50_16_9)
+ has50hz_mode = true;
+
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
+ }
+ } else if (!num_of_cea_blocks || read_block0_res) {
+ /* Detailed timing descriptors */
+ u32 desc_offset = 0;
+ /*
+ * * Maximum 4 timing descriptor in block 0 - No CEA
+ * extension in this case
+ * * EDID_FIRST_TIMING_DESC[0x36] - 1st detailed timing
+ * descriptor
+ * * EDID_DETAIL_TIMING_DESC_BLCK_SZ[0x12] - Each detailed
+ * timing descriptor has block size of 18
+ */
+ while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
+ hdmi_edid_detail_desc(edid_ctrl,
+ edid_blk0+0x36+desc_offset,
+ &video_format);
+
+ DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
+ __func__, __LINE__,
+ msm_hdmi_mode_2string(video_format));
+
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
+
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
+
+ /* Make a note of the preferred video format */
+ if (i == 0) {
+ sink_data->preferred_video_format =
+ video_format;
+ }
+ desc_offset += 0x12;
+ ++i;
+ }
+ } else if (num_of_cea_blocks == 1) {
+ u32 desc_offset = 0;
+
+ /*
+ * Read from both block 0 and block 1
+ * Read EDID block[0] as above
+ */
+ while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
+ hdmi_edid_detail_desc(edid_ctrl,
+ edid_blk0+0x36+desc_offset,
+ &video_format);
+
+ DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
+ __func__, __LINE__,
+ msm_hdmi_mode_2string(video_format));
+
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
+
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
+
+ /* Make a note of the preferred video format */
+ if (i == 0) {
+ sink_data->preferred_video_format =
+ video_format;
+ }
+ desc_offset += 0x12;
+ ++i;
+ }
+
+ /*
+ * * Parse block 1 - CEA extension byte offset of first
+ * detailed timing generation - offset is relevant to
+ * the offset of block 1
+ * * EDID_CEA_EXTENSION_FIRST_DESC[0x82]: Offset to CEA
+ * extension first timing desc - indicate the offset of
+ * the first detailed timing descriptor
+ * * EDID_BLOCK_SIZE = 0x80 Each page size in the EDID ROM
+ */
+ desc_offset = edid_blk1[0x02];
+ while (edid_blk1[desc_offset] != 0) {
+ hdmi_edid_detail_desc(edid_ctrl,
+ edid_blk1+desc_offset,
+ &video_format);
+
+ DEV_DBG("[%s:%d] Block-1 Adding vid fmt = [%s]\n",
+ __func__, __LINE__,
+ msm_hdmi_mode_2string(video_format));
+
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
+
+ /* Make a note of the preferred video format */
+ if (i == 0) {
+ sink_data->preferred_video_format =
+ video_format;
+ }
+ desc_offset += 0x12;
+ ++i;
+ }
+ }
+
+ std_blk = 0;
+ offset = 0;
+ while (std_blk < 8) {
+ if ((edid_blk0[0x26 + offset] == 0x81) &&
+ (edid_blk0[0x26 + offset + 1] == 0x80)) {
+ pr_debug("%s: 108MHz: off=[%x] stdblk=[%x]\n",
+ __func__, offset, std_blk);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1280x1024p60_5_4);
+ }
+ if ((edid_blk0[0x26 + offset] == 0x61) &&
+ (edid_blk0[0x26 + offset + 1] == 0x40)) {
+ pr_debug("%s: 65MHz: off=[%x] stdblk=[%x]\n",
+ __func__, offset, std_blk);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1024x768p60_4_3);
+ break;
+ }
+ offset += 2;
+
+ std_blk++;
+ }
+
+ /* Established Timing I */
+ if (edid_blk0[0x23] & BIT(0)) {
+ pr_debug("%s: DMT: ETI: HDMI_VFRMT_800x600_4_3\n", __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_800x600p60_4_3);
+ }
+
+ /* Established Timing II */
+ if (edid_blk0[0x24] & BIT(3)) {
+ pr_debug("%s: DMT: ETII: HDMI_VFRMT_1024x768p60_4_3\n",
+ __func__);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_1024x768p60_4_3);
+ }
+
+ /* Established Timing III */
+ hdmi_edid_parse_et3(edid_ctrl, data_buf);
+
+ hdmi_edid_get_extended_video_formats(edid_ctrl, data_buf+0x80);
+
+ /* mandaroty 3d format */
+ if (edid_ctrl->present_3d) {
+ if (has60hz_mode) {
+ hdmi_edid_add_sink_3d_format(sink_data,
+ HDMI_VFRMT_1920x1080p24_16_9,
+ FRAME_PACKING | TOP_AND_BOTTOM);
+ hdmi_edid_add_sink_3d_format(sink_data,
+ HDMI_VFRMT_1280x720p60_16_9,
+ FRAME_PACKING | TOP_AND_BOTTOM);
+ hdmi_edid_add_sink_3d_format(sink_data,
+ HDMI_VFRMT_1920x1080i60_16_9,
+ SIDE_BY_SIDE_HALF);
+ }
+
+ if (has50hz_mode) {
+ hdmi_edid_add_sink_3d_format(sink_data,
+ HDMI_VFRMT_1920x1080p24_16_9,
+ FRAME_PACKING | TOP_AND_BOTTOM);
+ hdmi_edid_add_sink_3d_format(sink_data,
+ HDMI_VFRMT_1280x720p50_16_9,
+ FRAME_PACKING | TOP_AND_BOTTOM);
+ hdmi_edid_add_sink_3d_format(sink_data,
+ HDMI_VFRMT_1920x1080i50_16_9,
+ SIDE_BY_SIDE_HALF);
+ }
+
+ /* 3d format described in Vendor Specific Data */
+ rc = hdmi_edid_get_display_vsd_3d_mode(data_buf, sink_data,
+ num_of_cea_blocks);
+ if (!rc)
+ pr_debug("%s: 3D formats in VSD\n", __func__);
+ }
+
+ /*
+ * Need to add default 640 by 480 timings, in case not described
+ * in the EDID structure.
+ * All DTV sink devices should support this mode
+ */
+ if (!has480p)
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ HDMI_VFRMT_640x480p60_4_3);
+} /* hdmi_edid_get_display_mode */
+
+u32 hdmi_edid_get_raw_data(void *input, u8 *buf, u32 size)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *) input;
+ u32 ret = 0;
+ u32 buf_size;
+
+ if (!edid_ctrl || !buf) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ buf_size = sizeof(edid_ctrl->edid_buf);
+
+ size = min(size, buf_size);
+
+ memcpy(buf, edid_ctrl->edid_buf, size);
+
+end:
+ return ret;
+}
+
+static void hdmi_edid_add_resv_timings(struct hdmi_edid_ctrl *edid_ctrl)
+{
+ int i = HDMI_VFRMT_RESERVE1;
+
+ while (i <= RESERVE_VFRMT_END) {
+ if (hdmi_is_valid_resv_timing(i))
+ hdmi_edid_add_sink_video_format(edid_ctrl, i);
+ else
+ break;
+ i++;
+ }
+}
+
+int hdmi_edid_parser(void *input)
+{
+ u8 *edid_buf = NULL;
+ u32 num_of_cea_blocks = 0;
+ u16 ieee_reg_id;
+ int status = 0;
+ u32 i = 0;
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ status = -EINVAL;
+ goto err_invalid_data;
+ }
+
+ /* reset edid data for new hdmi connection */
+ hdmi_edid_reset_parser(edid_ctrl);
+
+ edid_buf = edid_ctrl->edid_buf;
+
+ DEV_DBG("%s: === HDMI EDID BLOCK 0 ===\n", __func__);
+ print_hex_dump(KERN_DEBUG, "HDMI EDID: ", DUMP_PREFIX_NONE, 16, 1,
+ edid_buf, EDID_BLOCK_SIZE, false);
+
+ if (!hdmi_edid_check_header(edid_buf)) {
+ status = -EPROTO;
+ goto err_invalid_header;
+ }
+
+ hdmi_edid_extract_vendor_id(edid_ctrl);
+
+ /* EDID_CEA_EXTENSION_FLAG[0x7E] - CEC extension byte */
+ num_of_cea_blocks = edid_buf[EDID_BLOCK_SIZE - 2];
+ DEV_DBG("%s: No. of CEA blocks is [%u]\n", __func__,
+ num_of_cea_blocks);
+
+ /* Find out any CEA extension blocks following block 0 */
+ if (num_of_cea_blocks == 0) {
+ /* No CEA extension */
+ edid_ctrl->sink_mode = SINK_MODE_DVI;
+ DEV_DBG("HDMI DVI mode: %s\n",
+ edid_ctrl->sink_mode ? "no" : "yes");
+ goto bail;
+ }
+
+ /* Find out if CEA extension blocks exceeding max limit */
+ if (num_of_cea_blocks >= MAX_EDID_BLOCKS) {
+ DEV_WARN("%s: HDMI EDID exceeded max CEA blocks limit\n",
+ __func__);
+ num_of_cea_blocks = MAX_EDID_BLOCKS - 1;
+ }
+
+ /* check for valid CEA block */
+ if (edid_buf[EDID_BLOCK_SIZE] != 2) {
+ DEV_ERR("%s: Invalid CEA block\n", __func__);
+ num_of_cea_blocks = 0;
+ goto bail;
+ }
+
+ /* goto to CEA extension edid block */
+ edid_buf += EDID_BLOCK_SIZE;
+
+ ieee_reg_id = hdmi_edid_extract_ieee_reg_id(edid_ctrl, edid_buf);
+ if (ieee_reg_id == EDID_IEEE_REG_ID)
+ edid_ctrl->sink_mode = SINK_MODE_HDMI;
+ else
+ edid_ctrl->sink_mode = SINK_MODE_DVI;
+
+ hdmi_edid_extract_sink_caps(edid_ctrl, edid_buf);
+ hdmi_edid_extract_latency_fields(edid_ctrl, edid_buf);
+ hdmi_edid_extract_speaker_allocation_data(edid_ctrl, edid_buf);
+ hdmi_edid_extract_audio_data_blocks(edid_ctrl, edid_buf);
+ hdmi_edid_extract_3d_present(edid_ctrl, edid_buf);
+ hdmi_edid_extract_extended_data_blocks(edid_ctrl, edid_buf);
+
+bail:
+ for (i = 1; i <= num_of_cea_blocks; i++) {
+ DEV_DBG("%s: === HDMI EDID BLOCK %d ===\n", __func__, i);
+ print_hex_dump(KERN_DEBUG, "HDMI EDID: ", DUMP_PREFIX_NONE,
+ 16, 1, edid_ctrl->edid_buf + (i * EDID_BLOCK_SIZE),
+ EDID_BLOCK_SIZE, false);
+ }
+
+ edid_ctrl->cea_blks = num_of_cea_blocks;
+
+ hdmi_edid_get_display_mode(edid_ctrl);
+
+ if (edid_ctrl->keep_resv_timings)
+ hdmi_edid_add_resv_timings(edid_ctrl);
+
+ return 0;
+
+err_invalid_header:
+ edid_ctrl->sink_data.num_of_elements = 1;
+ edid_ctrl->sink_data.disp_mode_list[0].video_format =
+ edid_ctrl->video_resolution;
+ edid_ctrl->sink_data.disp_mode_list[0].rgb_support = true;
+err_invalid_data:
+ return status;
+} /* hdmi_edid_read */
+
+/*
+ * If the sink specified support for both underscan/overscan then, by default,
+ * set the underscan bit. Only checking underscan support for preferred
+ * format and cea formats.
+ */
+u8 hdmi_edid_get_sink_scaninfo(void *input, u32 resolution)
+{
+ u8 scaninfo = 0;
+ int use_ce_scan_info = true;
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ goto end;
+ }
+
+ if (resolution == edid_ctrl->sink_data.preferred_video_format) {
+ use_ce_scan_info = false;
+ switch (edid_ctrl->pt_scan_info) {
+ case 0:
+ /*
+ * Need to use the info specified for the corresponding
+ * IT or CE format
+ */
+ DEV_DBG("%s: No underscan info for preferred V fmt\n",
+ __func__);
+ use_ce_scan_info = true;
+ break;
+ case 3:
+ DEV_DBG("%s: Set underscan bit for preferred V fmt\n",
+ __func__);
+ scaninfo = BIT(1);
+ break;
+ default:
+ DEV_DBG("%s: Underscan not set for preferred V fmt\n",
+ __func__);
+ break;
+ }
+ }
+
+ if (use_ce_scan_info) {
+ if (edid_ctrl->ce_scan_info == 3) {
+ DEV_DBG("%s: Setting underscan bit for CE video fmt\n",
+ __func__);
+ scaninfo |= BIT(1);
+ } else {
+ DEV_DBG("%s: Not setting underscan bit for CE V fmt\n",
+ __func__);
+ }
+ }
+
+end:
+ return scaninfo;
+} /* hdmi_edid_get_sink_scaninfo */
+
+u32 hdmi_edid_get_sink_mode(void *input)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+ bool sink_mode;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return 0;
+ }
+
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.sink_mode != -1))
+ sink_mode = edid_ctrl->override_data.sink_mode;
+ else
+ sink_mode = edid_ctrl->sink_mode;
+
+ return sink_mode;
+} /* hdmi_edid_get_sink_mode */
+
+bool hdmi_edid_is_s3d_mode_supported(void *input, u32 video_mode, u32 s3d_mode)
+{
+ int i;
+ bool ret = false;
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+ struct hdmi_edid_sink_data *sink_data;
+
+ sink_data = &edid_ctrl->sink_data;
+ for (i = 0; i < sink_data->num_of_elements; ++i) {
+ if (sink_data->disp_mode_list[i].video_format != video_mode)
+ continue;
+ if (sink_data->disp_mode_list[i].video_3d_format &
+ (1 << s3d_mode))
+ ret = true;
+ else
+ DEV_DBG("%s: return false: vic=%d caps=%x s3d=%d\n",
+ __func__, video_mode,
+ sink_data->disp_mode_list[i].video_3d_format,
+ s3d_mode);
+ break;
+ }
+ return ret;
+}
+
+bool hdmi_edid_get_scdc_support(void *input)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = input;
+ bool scdc_present;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return false;
+ }
+
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.scramble != -1))
+ scdc_present = edid_ctrl->override_data.scramble;
+ else
+ scdc_present = edid_ctrl->sink_caps.scdc_present;
+
+ return scdc_present;
+}
+
+/**
+ * hdmi_edid_sink_scramble_override() - check if override has been enabled
+ * @input: edid data
+ *
+ * Return true if scrambling override is enabled false otherwise.
+ */
+bool hdmi_edid_sink_scramble_override(void *input)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.scramble != -1))
+ return true;
+
+ return false;
+
+}
+
+bool hdmi_edid_get_sink_scrambler_support(void *input)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+ bool scramble_support;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return 0;
+ }
+
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.scramble != -1))
+ scramble_support = edid_ctrl->override_data.scramble;
+ else
+ scramble_support = edid_ctrl->sink_caps.scramble_support;
+
+ return scramble_support;
+}
+
+int hdmi_edid_get_audio_blk(void *input, struct msm_hdmi_audio_edid_blk *blk)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+ if (!edid_ctrl || !blk) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ blk->audio_data_blk = edid_ctrl->audio_data_block;
+ blk->audio_data_blk_size = edid_ctrl->adb_size;
+
+ blk->spk_alloc_data_blk = edid_ctrl->spkr_alloc_data_block;
+ blk->spk_alloc_data_blk_size = edid_ctrl->sadb_size;
+
+ return 0;
+} /* hdmi_edid_get_audio_blk */
+
+void hdmi_edid_set_video_resolution(void *input, u32 resolution, bool reset)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ edid_ctrl->video_resolution = resolution;
+
+ if (reset) {
+ edid_ctrl->default_vic = resolution;
+ edid_ctrl->sink_data.num_of_elements = 1;
+ edid_ctrl->sink_data.disp_mode_list[0].video_format =
+ resolution;
+ edid_ctrl->sink_data.disp_mode_list[0].rgb_support = true;
+ }
+} /* hdmi_edid_set_video_resolution */
+
+void hdmi_edid_deinit(void *input)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+ if (edid_ctrl) {
+ if (edid_ctrl->init_data.kobj)
+ sysfs_remove_group(edid_ctrl->init_data.kobj,
+ &hdmi_edid_fs_attrs_group);
+
+ kfree(edid_ctrl);
+ }
+}
+
+void *hdmi_edid_init(struct hdmi_edid_init_data *idata)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = NULL;
+
+ if (!idata) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ goto error;
+ }
+
+ edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL);
+ if (!edid_ctrl)
+ goto error;
+
+ edid_ctrl->init_data = *idata;
+
+ if (idata->kobj) {
+ if (sysfs_create_group(idata->kobj,
+ &hdmi_edid_fs_attrs_group))
+ DEV_ERR("%s: EDID sysfs create failed\n",
+ __func__);
+ } else {
+ DEV_DBG("%s: kobj not provided\n", __func__);
+ }
+
+ /* provide edid buffer to the client */
+ idata->buf = edid_ctrl->edid_buf;
+ idata->buf_size = sizeof(edid_ctrl->edid_buf);
+
+ return (void *)edid_ctrl;
+
+error:
+ kfree(edid_ctrl);
+ return NULL;
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.h b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
new file mode 100644
index 0000000..69c3eb6
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HDMI_EDID_H__
+#define __HDMI_EDID_H__
+
+#include <linux/msm_hdmi.h>
+#include "mdss_hdmi_util.h"
+
+#define EDID_BLOCK_SIZE 0x80
+#define EDID_BLOCK_ADDR 0xA0
+#define MAX_EDID_BLOCKS 5
+
+struct hdmi_edid_init_data {
+ struct kobject *kobj;
+ struct hdmi_util_ds_data ds_data;
+ u32 max_pclk_khz;
+ u8 *buf;
+ u32 buf_size;
+};
+
+int hdmi_edid_parser(void *edid_ctrl);
+u32 hdmi_edid_get_raw_data(void *edid_ctrl, u8 *buf, u32 size);
+u8 hdmi_edid_get_sink_scaninfo(void *edid_ctrl, u32 resolution);
+u32 hdmi_edid_get_sink_mode(void *edid_ctrl);
+bool hdmi_edid_sink_scramble_override(void *input);
+bool hdmi_edid_get_sink_scrambler_support(void *input);
+bool hdmi_edid_get_scdc_support(void *input);
+int hdmi_edid_get_audio_blk(void *edid_ctrl,
+ struct msm_hdmi_audio_edid_blk *blk);
+void hdmi_edid_set_video_resolution(void *edid_ctrl, u32 resolution,
+ bool reset);
+void hdmi_edid_deinit(void *edid_ctrl);
+void *hdmi_edid_init(struct hdmi_edid_init_data *init_data);
+bool hdmi_edid_is_s3d_mode_supported(void *input,
+ u32 video_mode, u32 s3d_mode);
+
+#endif /* __HDMI_EDID_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.c
new file mode 100644
index 0000000..41c6844
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.c
@@ -0,0 +1,1705 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <soc/qcom/scm.h>
+#include <linux/hdcp_qseecom.h>
+#include "mdss_hdmi_hdcp.h"
+#include "video/msm_hdmi_hdcp_mgr.h"
+
+#define HDCP_STATE_NAME (hdcp_state_name(hdcp_ctrl->hdcp_state))
+
+/* HDCP Keys state based on HDMI_HDCP_LINK0_STATUS:KEYS_STATE */
+#define HDCP_KEYS_STATE_NO_KEYS 0
+#define HDCP_KEYS_STATE_NOT_CHECKED 1
+#define HDCP_KEYS_STATE_CHECKING 2
+#define HDCP_KEYS_STATE_VALID 3
+#define HDCP_KEYS_STATE_AKSV_NOT_VALID 4
+#define HDCP_KEYS_STATE_CHKSUM_MISMATCH 5
+#define HDCP_KEYS_STATE_PROD_AKSV 6
+#define HDCP_KEYS_STATE_RESERVED 7
+
+#define TZ_HDCP_CMD_ID 0x00004401
+#define HDCP_REG_ENABLE 0x01
+#define HDCP_REG_DISABLE 0x00
+
+#define HDCP_INT_CLR (BIT(1) | BIT(5) | BIT(7) | BIT(9) | BIT(13))
+
+struct hdmi_hdcp_reg_data {
+ u32 reg_id;
+ u32 off;
+ char *name;
+ u32 reg_val;
+};
+
+struct hdmi_hdcp_ctrl {
+ u32 auth_retries;
+ u32 tp_msgid;
+ u32 tz_hdcp;
+ enum hdmi_hdcp_state hdcp_state;
+ struct HDCP_V2V1_MSG_TOPOLOGY cached_tp;
+ struct HDCP_V2V1_MSG_TOPOLOGY current_tp;
+ struct delayed_work hdcp_auth_work;
+ struct work_struct hdcp_int_work;
+ struct completion r0_checked;
+ struct hdmi_hdcp_init_data init_data;
+ struct hdmi_hdcp_ops *ops;
+ bool hdmi_tx_ver_4;
+};
+
+const char *hdcp_state_name(enum hdmi_hdcp_state hdcp_state)
+{
+ switch (hdcp_state) {
+ case HDCP_STATE_INACTIVE: return "HDCP_STATE_INACTIVE";
+ case HDCP_STATE_AUTHENTICATING: return "HDCP_STATE_AUTHENTICATING";
+ case HDCP_STATE_AUTHENTICATED: return "HDCP_STATE_AUTHENTICATED";
+ case HDCP_STATE_AUTH_FAIL: return "HDCP_STATE_AUTH_FAIL";
+ default: return "???";
+ }
+} /* hdcp_state_name */
+
+static int hdmi_hdcp_count_one(u8 *array, u8 len)
+{
+ int i, j, count = 0;
+
+ for (i = 0; i < len; i++)
+ for (j = 0; j < 8; j++)
+ count += (((array[i] >> j) & 0x1) ? 1 : 0);
+ return count;
+} /* hdmi_hdcp_count_one */
+
+static void reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int hdcp_ddc_ctrl1_reg;
+ int hdcp_ddc_status;
+ int failure;
+ int nack0;
+ struct mdss_io_data *io;
+
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ io = hdcp_ctrl->init_data.core_io;
+
+ /* Check for any DDC transfer failures */
+ hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+ failure = (hdcp_ddc_status >> 16) & 0x1;
+ nack0 = (hdcp_ddc_status >> 14) & 0x1;
+ DEV_DBG("%s: %s: On Entry: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
+ __func__, HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
+
+ if (failure == 0x1) {
+ /*
+ * Indicates that the last HDCP HW DDC transfer failed.
+ * This occurs when a transfer is attempted with HDCP DDC
+ * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+ * matches HDCP_DDC_RETRY_CNT.
+ * Failure occurred, let's clear it.
+ */
+ DEV_DBG("%s: %s: DDC failure detected.HDCP_DDC_STATUS=0x%08x\n",
+ __func__, HDCP_STATE_NAME, hdcp_ddc_status);
+
+ /* First, Disable DDC */
+ DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, BIT(0));
+
+ /* ACK the Failure to Clear it */
+ hdcp_ddc_ctrl1_reg = DSS_REG_R(io, HDMI_HDCP_DDC_CTRL_1);
+ DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_1,
+ hdcp_ddc_ctrl1_reg | BIT(0));
+
+ /* Check if the FAILURE got Cleared */
+ hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+ hdcp_ddc_status = (hdcp_ddc_status >> 16) & BIT(0);
+ if (hdcp_ddc_status == 0x0)
+ DEV_DBG("%s: %s: HDCP DDC Failure cleared\n", __func__,
+ HDCP_STATE_NAME);
+ else
+ DEV_WARN("%s: %s: Unable to clear HDCP DDC Failure",
+ __func__, HDCP_STATE_NAME);
+
+ /* Re-Enable HDCP DDC */
+ DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, 0);
+ }
+
+ if (nack0 == 0x1) {
+ DEV_DBG("%s: %s: Before: HDMI_DDC_SW_STATUS=0x%08x\n", __func__,
+ HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
+ /* Reset HDMI DDC software status */
+ DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+ DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(3));
+ msleep(20);
+ DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+ DSS_REG_R(io, HDMI_DDC_CTRL) & ~(BIT(3)));
+
+ /* Reset HDMI DDC Controller */
+ DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+ DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(1));
+ msleep(20);
+ DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+ DSS_REG_R(io, HDMI_DDC_CTRL) & ~BIT(1));
+ DEV_DBG("%s: %s: After: HDMI_DDC_SW_STATUS=0x%08x\n", __func__,
+ HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
+ }
+
+ hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+
+ failure = (hdcp_ddc_status >> 16) & BIT(0);
+ nack0 = (hdcp_ddc_status >> 14) & BIT(0);
+ DEV_DBG("%s: %s: On Exit: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
+ __func__, HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
+} /* reset_hdcp_ddc_failures */
+
+static void hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct mdss_io_data *io = NULL;
+ u32 hdcp_ddc_status, ddc_hw_status;
+ u32 ddc_xfer_done, ddc_xfer_req;
+ u32 ddc_hw_req, ddc_hw_not_idle;
+ bool ddc_hw_not_ready, xfer_not_done, hw_not_done;
+ u32 timeout_count;
+
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ io = hdcp_ctrl->init_data.core_io;
+ if (!io->base) {
+ DEV_ERR("%s: core io not inititalized\n", __func__);
+ return;
+ }
+
+ /* Wait to be clean on DDC HW engine */
+ timeout_count = 100;
+ do {
+ hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+ ddc_xfer_req = hdcp_ddc_status & BIT(4);
+ ddc_xfer_done = hdcp_ddc_status & BIT(10);
+
+ ddc_hw_status = DSS_REG_R(io, HDMI_DDC_HW_STATUS);
+ ddc_hw_req = ddc_hw_status & BIT(16);
+ ddc_hw_not_idle = ddc_hw_status & (BIT(0) | BIT(1));
+
+ /* ddc transfer was requested but not completed */
+ xfer_not_done = ddc_xfer_req && !ddc_xfer_done;
+
+ /* ddc status is not idle or a hw request pending */
+ hw_not_done = ddc_hw_not_idle || ddc_hw_req;
+
+ ddc_hw_not_ready = xfer_not_done || hw_not_done;
+
+ DEV_DBG("%s: %s: timeout count(%d): ddc hw%sready\n",
+ __func__, HDCP_STATE_NAME, timeout_count,
+ ddc_hw_not_ready ? " not " : " ");
+ DEV_DBG("hdcp_ddc_status[0x%x], ddc_hw_status[0x%x]\n",
+ hdcp_ddc_status, ddc_hw_status);
+ if (ddc_hw_not_ready)
+ msleep(20);
+ } while (ddc_hw_not_ready && --timeout_count);
+} /* hdmi_hdcp_hw_ddc_clean */
+
+static int hdcp_scm_call(struct scm_hdcp_req *req, u32 *resp)
+{
+ int ret = 0;
+
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_HDCP, SCM_CMD_HDCP, (void *) req,
+ SCM_HDCP_MAX_REG * sizeof(struct scm_hdcp_req),
+ &resp, sizeof(*resp));
+ } else {
+ struct scm_desc desc;
+
+ desc.args[0] = req[0].addr;
+ desc.args[1] = req[0].val;
+ desc.args[2] = req[1].addr;
+ desc.args[3] = req[1].val;
+ desc.args[4] = req[2].addr;
+ desc.args[5] = req[2].val;
+ desc.args[6] = req[3].addr;
+ desc.args[7] = req[3].val;
+ desc.args[8] = req[4].addr;
+ desc.args[9] = req[4].val;
+ desc.arginfo = SCM_ARGS(10);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_HDCP, SCM_CMD_HDCP),
+ &desc);
+ *resp = desc.ret[0];
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int hdmi_hdcp_load_keys(void *input)
+{
+ int rc = 0;
+ bool use_sw_keys = false;
+ u32 reg_val;
+ u32 ksv_lsb_addr, ksv_msb_addr;
+ u32 aksv_lsb, aksv_msb;
+ u8 aksv[5];
+ struct mdss_io_data *io;
+ struct mdss_io_data *qfprom_io;
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = input;
+
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io ||
+ !hdcp_ctrl->init_data.qfprom_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if ((hdcp_ctrl->hdcp_state != HDCP_STATE_INACTIVE) &&
+ (hdcp_ctrl->hdcp_state != HDCP_STATE_AUTH_FAIL)) {
+ DEV_ERR("%s: %s: invalid state. returning\n", __func__,
+ HDCP_STATE_NAME);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ io = hdcp_ctrl->init_data.core_io;
+ qfprom_io = hdcp_ctrl->init_data.qfprom_io;
+
+ /* On compatible hardware, use SW keys */
+ reg_val = DSS_REG_R(qfprom_io, SEC_CTRL_HW_VERSION);
+ if (reg_val >= HDCP_SEL_MIN_SEC_VERSION) {
+ reg_val = DSS_REG_R(qfprom_io,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
+ QFPROM_RAW_VERSION_4);
+
+ if (!(reg_val & BIT(23)))
+ use_sw_keys = true;
+ }
+
+ if (use_sw_keys) {
+ if (hdcp1_set_keys(&aksv_msb, &aksv_lsb)) {
+ pr_err("%s: setting hdcp SW keys failed\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+ } else {
+ /* Fetch aksv from QFPROM, this info should be public. */
+ ksv_lsb_addr = HDCP_KSV_LSB;
+ ksv_msb_addr = HDCP_KSV_MSB;
+
+ if (hdcp_ctrl->hdmi_tx_ver_4) {
+ ksv_lsb_addr += HDCP_KSV_VERSION_4_OFFSET;
+ ksv_msb_addr += HDCP_KSV_VERSION_4_OFFSET;
+ }
+
+ aksv_lsb = DSS_REG_R(qfprom_io, ksv_lsb_addr);
+ aksv_msb = DSS_REG_R(qfprom_io, ksv_msb_addr);
+ }
+
+ DEV_DBG("%s: %s: AKSV=%02x%08x\n", __func__, HDCP_STATE_NAME,
+ aksv_msb, aksv_lsb);
+
+ aksv[0] = aksv_lsb & 0xFF;
+ aksv[1] = (aksv_lsb >> 8) & 0xFF;
+ aksv[2] = (aksv_lsb >> 16) & 0xFF;
+ aksv[3] = (aksv_lsb >> 24) & 0xFF;
+ aksv[4] = aksv_msb & 0xFF;
+
+ /* check there are 20 ones in AKSV */
+ if (hdmi_hdcp_count_one(aksv, 5) != 20) {
+ DEV_ERR("%s: AKSV bit count failed\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ DSS_REG_W(io, HDMI_HDCP_SW_LOWER_AKSV, aksv_lsb);
+ DSS_REG_W(io, HDMI_HDCP_SW_UPPER_AKSV, aksv_msb);
+
+ /* Setup seed values for random number An */
+ DSS_REG_W(io, HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
+ DSS_REG_W(io, HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
+
+ /* Disable the RngCipher state */
+ DSS_REG_W(io, HDMI_HDCP_DEBUG_CTRL,
+ DSS_REG_R(io, HDMI_HDCP_DEBUG_CTRL) & ~(BIT(2)));
+
+ /* make sure hw is programmed */
+ wmb();
+
+ DSS_REG_W(io, HDMI_HDCP_CTRL, BIT(0));
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+end:
+ return rc;
+}
+
+static int hdmi_hdcp_authentication_part1(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ u32 link0_aksv_0, link0_aksv_1;
+ u32 link0_bksv_0, link0_bksv_1;
+ u32 link0_an_0, link0_an_1;
+ u32 timeout_count;
+ bool is_match;
+ struct mdss_io_data *io;
+ struct mdss_io_data *hdcp_io;
+ u8 aksv[5], *bksv = NULL;
+ u8 an[8];
+ u8 bcaps = 0;
+ struct hdmi_tx_ddc_data ddc_data;
+ u32 link0_status = 0, an_ready, keys_state;
+ u8 buf[0xFF];
+
+ struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG];
+ u32 phy_addr;
+ u32 ret = 0;
+ u32 resp = 0;
+
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io ||
+ !hdcp_ctrl->init_data.qfprom_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ phy_addr = hdcp_ctrl->init_data.phy_addr;
+ bksv = hdcp_ctrl->current_tp.bksv;
+ io = hdcp_ctrl->init_data.core_io;
+ hdcp_io = hdcp_ctrl->init_data.hdcp_io;
+
+ if (hdcp_ctrl->hdcp_state != HDCP_STATE_AUTHENTICATING) {
+ DEV_ERR("%s: %s: invalid state. returning\n", __func__,
+ HDCP_STATE_NAME);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /* Clear any DDC failures from previous tries */
+ reset_hdcp_ddc_failures(hdcp_ctrl);
+
+ /*
+ * Read BCAPS
+ * We need to first try to read an HDCP register on the sink to see if
+ * the sink is ready for HDCP authentication
+ */
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = 0x74;
+ ddc_data.offset = 0x40;
+ ddc_data.data_buf = &bcaps;
+ ddc_data.data_len = 1;
+ ddc_data.request_len = 1;
+ ddc_data.retry = 5;
+ ddc_data.what = "Bcaps";
+
+ hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+ if (rc) {
+ DEV_ERR("%s: %s: BCAPS read failed\n", __func__,
+ HDCP_STATE_NAME);
+ goto error;
+ }
+ DEV_DBG("%s: %s: BCAPS=%02x\n", __func__, HDCP_STATE_NAME, bcaps);
+
+ /* receiver (0), repeater (1) */
+ hdcp_ctrl->current_tp.ds_type =
+ (bcaps & BIT(6)) >> 6 ? DS_REPEATER : DS_RECEIVER;
+
+ /* Write BCAPS to the hardware */
+ if (hdcp_ctrl->tz_hdcp) {
+ memset(scm_buf, 0x00, sizeof(scm_buf));
+
+ scm_buf[0].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA12;
+ scm_buf[0].val = bcaps;
+
+ ret = hdcp_scm_call(scm_buf, &resp);
+ if (ret || resp) {
+ DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+ __func__, ret, resp);
+ rc = -EINVAL;
+ goto error;
+ }
+ } else if (hdcp_ctrl->hdmi_tx_ver_4) {
+ DSS_REG_W(hdcp_io, HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12,
+ bcaps);
+ } else {
+ DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA12, bcaps);
+ }
+
+ /* Wait for HDCP keys to be checked and validated */
+ timeout_count = 100;
+ keys_state = (link0_status >> 28) & 0x7;
+ while ((keys_state != HDCP_KEYS_STATE_VALID) &&
+ --timeout_count) {
+ link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+ keys_state = (link0_status >> 28) & 0x7;
+ DEV_DBG("%s: %s: Keys not ready(%d). s=%d\n, l0=%0x08x",
+ __func__, HDCP_STATE_NAME, timeout_count,
+ keys_state, link0_status);
+ msleep(20);
+ }
+
+ if (!timeout_count) {
+ DEV_ERR("%s: %s: Invalid Keys State: %d\n", __func__,
+ HDCP_STATE_NAME, keys_state);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * 1.1_Features turned off by default.
+ * No need to write AInfo since 1.1_Features is disabled.
+ */
+ DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA4, 0);
+
+ /* Wait for An0 and An1 bit to be ready */
+ timeout_count = 100;
+ do {
+ link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+ an_ready = (link0_status & BIT(8)) && (link0_status & BIT(9));
+ if (!an_ready) {
+ DEV_DBG("%s: %s: An not ready(%d). l0_status=0x%08x\n",
+ __func__, HDCP_STATE_NAME, timeout_count,
+ link0_status);
+ msleep(20);
+ }
+ } while (!an_ready && --timeout_count);
+
+ if (!timeout_count) {
+ rc = -ETIMEDOUT;
+ DEV_ERR("%s: %s: timedout, An0=%ld, An1=%ld\n", __func__,
+ HDCP_STATE_NAME, (link0_status & BIT(8)) >> 8,
+ (link0_status & BIT(9)) >> 9);
+ goto error;
+ }
+
+ /* As per hardware recommendations, wait before reading An */
+ msleep(20);
+
+ /* Read An0 and An1 */
+ link0_an_0 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA5);
+ link0_an_1 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA6);
+
+ /* Read AKSV */
+ link0_aksv_0 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA3);
+ link0_aksv_1 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA4);
+
+ /* Copy An and AKSV to byte arrays for transmission */
+ aksv[0] = link0_aksv_0 & 0xFF;
+ aksv[1] = (link0_aksv_0 >> 8) & 0xFF;
+ aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
+ aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
+ aksv[4] = link0_aksv_1 & 0xFF;
+
+ an[0] = link0_an_0 & 0xFF;
+ an[1] = (link0_an_0 >> 8) & 0xFF;
+ an[2] = (link0_an_0 >> 16) & 0xFF;
+ an[3] = (link0_an_0 >> 24) & 0xFF;
+ an[4] = link0_an_1 & 0xFF;
+ an[5] = (link0_an_1 >> 8) & 0xFF;
+ an[6] = (link0_an_1 >> 16) & 0xFF;
+ an[7] = (link0_an_1 >> 24) & 0xFF;
+
+ /* Write An to offset 0x18 */
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = 0x74;
+ ddc_data.offset = 0x18;
+ ddc_data.data_buf = an;
+ ddc_data.data_len = 8;
+ ddc_data.what = "An";
+ hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ rc = hdmi_ddc_write(hdcp_ctrl->init_data.ddc_ctrl);
+ if (rc) {
+ DEV_ERR("%s: %s: An write failed\n", __func__, HDCP_STATE_NAME);
+ goto error;
+ }
+
+ /* Write AKSV to offset 0x10 */
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = 0x74;
+ ddc_data.offset = 0x10;
+ ddc_data.data_buf = aksv;
+ ddc_data.data_len = 5;
+ ddc_data.what = "Aksv";
+ hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ rc = hdmi_ddc_write(hdcp_ctrl->init_data.ddc_ctrl);
+ if (rc) {
+ DEV_ERR("%s: %s: AKSV write failed\n", __func__,
+ HDCP_STATE_NAME);
+ goto error;
+ }
+ DEV_DBG("%s: %s: Link0-AKSV=%02x%08x\n", __func__,
+ HDCP_STATE_NAME, link0_aksv_1 & 0xFF, link0_aksv_0);
+
+ /* Read BKSV at offset 0x00 */
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = 0x74;
+ ddc_data.offset = 0x00;
+ ddc_data.data_buf = bksv;
+ ddc_data.data_len = 5;
+ ddc_data.request_len = 5;
+ ddc_data.retry = 5;
+ ddc_data.what = "Bksv";
+
+ hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+ if (rc) {
+ DEV_ERR("%s: %s: BKSV read failed\n", __func__,
+ HDCP_STATE_NAME);
+ goto error;
+ }
+
+ /* check there are 20 ones in BKSV */
+ if (hdmi_hdcp_count_one(bksv, 5) != 20) {
+ DEV_ERR("%s: %s: BKSV doesn't have 20 1's and 20 0's\n",
+ __func__, HDCP_STATE_NAME);
+ DEV_ERR("%s: %s: BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
+ __func__, HDCP_STATE_NAME, bksv[4], bksv[3], bksv[2],
+ bksv[1], bksv[0]);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ link0_bksv_0 = bksv[3];
+ link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2];
+ link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1];
+ link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0];
+ link0_bksv_1 = bksv[4];
+ DEV_DBG("%s: %s: BKSV=%02x%08x\n", __func__, HDCP_STATE_NAME,
+ link0_bksv_1, link0_bksv_0);
+
+ if (hdcp_ctrl->tz_hdcp) {
+ memset(scm_buf, 0x00, sizeof(scm_buf));
+
+ scm_buf[0].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA0;
+ scm_buf[0].val = link0_bksv_0;
+ scm_buf[1].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA1;
+ scm_buf[1].val = link0_bksv_1;
+
+ ret = hdcp_scm_call(scm_buf, &resp);
+
+ if (ret || resp) {
+ DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+ __func__, ret, resp);
+ rc = -EINVAL;
+ goto error;
+ }
+ } else if (hdcp_ctrl->hdmi_tx_ver_4) {
+ DSS_REG_W(hdcp_io, HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0,
+ link0_bksv_0);
+ DSS_REG_W(hdcp_io, HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1,
+ link0_bksv_1);
+ } else {
+ DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA0, link0_bksv_0);
+ DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA1, link0_bksv_1);
+ }
+
+ /* Enable HDCP interrupts and ack/clear any stale interrupts */
+ DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0xE6);
+
+ /*
+ * HDCP Compliace Test case 1A-01:
+ * Wait here at least 100ms before reading R0'
+ */
+ msleep(125);
+
+ /* Read R0' at offset 0x08 */
+ memset(buf, 0, sizeof(buf));
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = 0x74;
+ ddc_data.offset = 0x08;
+ ddc_data.data_buf = buf;
+ ddc_data.data_len = 2;
+ ddc_data.request_len = 2;
+ ddc_data.retry = 5;
+ ddc_data.what = "R0'";
+
+ hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+ if (rc) {
+ DEV_ERR("%s: %s: R0' read failed\n", __func__, HDCP_STATE_NAME);
+ goto error;
+ }
+ DEV_DBG("%s: %s: R0'=%02x%02x\n", __func__, HDCP_STATE_NAME,
+ buf[1], buf[0]);
+
+ /* Write R0' to HDCP registers and check to see if it is a match */
+ reinit_completion(&hdcp_ctrl->r0_checked);
+ DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA2_0, (((u32)buf[1]) << 8) | buf[0]);
+ timeout_count = wait_for_completion_timeout(
+ &hdcp_ctrl->r0_checked, HZ*2);
+ link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+ is_match = link0_status & BIT(12);
+ if (!is_match) {
+ DEV_DBG("%s: %s: Link0_Status=0x%08x\n", __func__,
+ HDCP_STATE_NAME, link0_status);
+ if (!timeout_count) {
+ DEV_ERR("%s: %s: Timeout. No R0 mtch. R0'=%02x%02x\n",
+ __func__, HDCP_STATE_NAME, buf[1], buf[0]);
+ rc = -ETIMEDOUT;
+ goto error;
+ } else {
+ DEV_ERR("%s: %s: R0 mismatch. R0'=%02x%02x\n", __func__,
+ HDCP_STATE_NAME, buf[1], buf[0]);
+ rc = -EINVAL;
+ goto error;
+ }
+ } else {
+ DEV_DBG("%s: %s: R0 matches\n", __func__, HDCP_STATE_NAME);
+ }
+
+error:
+ if (rc) {
+ DEV_ERR("%s: %s: Authentication Part I failed\n", __func__,
+ hdcp_ctrl ? HDCP_STATE_NAME : "???");
+ } else {
+ /* Enable HDCP Encryption */
+ DSS_REG_W(io, HDMI_HDCP_CTRL, BIT(0) | BIT(8));
+ DEV_INFO("%s: %s: Authentication Part I successful\n",
+ __func__, HDCP_STATE_NAME);
+ }
+ return rc;
+} /* hdmi_hdcp_authentication_part1 */
+
+static int read_write_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl,
+ struct hdmi_tx_ddc_data ddc_data,
+ struct mdss_io_data *io, int off, char *name,
+ u32 reg, bool wr)
+{
+ int rc = 0;
+
+ do {
+ ddc_data.offset = off;
+ memset(ddc_data.what, 0, 20);
+ snprintf(ddc_data.what, 20, name);
+ hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+ rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+ if (rc) {
+ DEV_ERR("%s: %s: Read %s failed\n", __func__,
+ HDCP_STATE_NAME, ddc_data.what);
+ return rc;
+ }
+ DEV_DBG("%s: %s: %s: buf[0]=%x, [1]=%x,[2]=%x, [3]=%x\n",
+ __func__, HDCP_STATE_NAME, ddc_data.what,
+ ddc_data.data_buf[0], ddc_data.data_buf[1],
+ ddc_data.data_buf[2], ddc_data.data_buf[3]);
+ if (wr) {
+ DSS_REG_W((io), (reg),
+ (ddc_data.data_buf[3] << 24 |
+ ddc_data.data_buf[2] << 16 |
+ ddc_data.data_buf[1] << 8 |
+ ddc_data.data_buf[0]));
+ }
+ } while (0);
+ return rc;
+}
+
+static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ char what[20];
+ int rc = 0;
+ u8 buf[4];
+ struct hdmi_tx_ddc_data ddc_data;
+ struct mdss_io_data *io;
+
+ struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG];
+ u32 phy_addr;
+
+ struct hdmi_hdcp_reg_data reg_data[] = {
+ {HDMI_HDCP_RCVPORT_DATA7, 0x20, "V' H0"},
+ {HDMI_HDCP_RCVPORT_DATA8, 0x24, "V' H1"},
+ {HDMI_HDCP_RCVPORT_DATA9, 0x28, "V' H2"},
+ {HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
+ {HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
+ };
+ u32 size = ARRAY_SIZE(reg_data)/sizeof(reg_data[0]);
+ u32 iter = 0;
+ u32 ret = 0;
+ u32 resp = 0;
+
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = 0x74;
+ ddc_data.data_buf = buf;
+ ddc_data.data_len = 4;
+ ddc_data.request_len = 4;
+ ddc_data.retry = 5;
+ ddc_data.what = what;
+
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ phy_addr = hdcp_ctrl->init_data.phy_addr;
+
+ io = hdcp_ctrl->init_data.core_io;
+
+ if (hdcp_ctrl->tz_hdcp) {
+ memset(scm_buf, 0x00, sizeof(scm_buf));
+
+ for (iter = 0; iter < size && iter < SCM_HDCP_MAX_REG; iter++) {
+ struct hdmi_hdcp_reg_data *rd = reg_data + iter;
+
+ if (read_write_v_h(hdcp_ctrl, ddc_data, io, rd->off,
+ rd->name, 0, false))
+ goto error;
+
+ rd->reg_val = buf[3] << 24 | buf[2] << 16 |
+ buf[1] << 8 | buf[0];
+
+ scm_buf[iter].addr = phy_addr + reg_data[iter].reg_id;
+ scm_buf[iter].val = reg_data[iter].reg_val;
+ }
+
+ ret = hdcp_scm_call(scm_buf, &resp);
+ if (ret || resp) {
+ DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+ __func__, ret, resp);
+ rc = -EINVAL;
+ goto error;
+ }
+ } else if (hdcp_ctrl->hdmi_tx_ver_4) {
+ struct mdss_io_data *hdcp_io = hdcp_ctrl->init_data.hdcp_io;
+
+ /* Read V'.HO 4 Byte at offset 0x20 */
+ if (read_write_v_h(hdcp_ctrl, ddc_data, hdcp_io, 0x20, "V' H0",
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7, true))
+ goto error;
+
+ /* Read V'.H1 4 Byte at offset 0x24 */
+ if (read_write_v_h(hdcp_ctrl, ddc_data, hdcp_io, 0x24, "V' H1",
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8, true))
+ goto error;
+
+ /* Read V'.H2 4 Byte at offset 0x28 */
+ if (read_write_v_h(hdcp_ctrl, ddc_data, hdcp_io, 0x28, "V' H2",
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9, true))
+ goto error;
+
+ /* Read V'.H3 4 Byte at offset 0x2C */
+ if (read_write_v_h(hdcp_ctrl, ddc_data, hdcp_io, 0x2C, "V' H3",
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10, true))
+ goto error;
+
+ /* Read V'.H4 4 Byte at offset 0x30 */
+ if (read_write_v_h(hdcp_ctrl, ddc_data, hdcp_io, 0x30, "V' H4",
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11, true))
+ goto error;
+ } else {
+ /* Read V'.HO 4 Byte at offset 0x20 */
+ if (read_write_v_h(hdcp_ctrl, ddc_data, io, 0x20, "V' H0",
+ HDMI_HDCP_RCVPORT_DATA7, true))
+ goto error;
+
+ /* Read V'.H1 4 Byte at offset 0x24 */
+ if (read_write_v_h(hdcp_ctrl, ddc_data, io, 0x24, "V' H1",
+ HDMI_HDCP_RCVPORT_DATA8, true))
+ goto error;
+
+ /* Read V'.H2 4 Byte at offset 0x28 */
+ if (read_write_v_h(hdcp_ctrl, ddc_data, io, 0x28, "V' H2",
+ HDMI_HDCP_RCVPORT_DATA9, true))
+ goto error;
+
+ /* Read V'.H3 4 Byte at offset 0x2C */
+ if (read_write_v_h(hdcp_ctrl, ddc_data, io, 0x2C, "V' H3",
+ HDMI_HDCP_RCVPORT_DATA10, true))
+ goto error;
+
+ /* Read V'.H4 4 Byte at offset 0x30 */
+ if (read_write_v_h(hdcp_ctrl, ddc_data, io, 0x30, "V' H4",
+ HDMI_HDCP_RCVPORT_DATA11, true))
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int hdmi_hdcp_authentication_part2(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc, cnt, i;
+ struct hdmi_tx_ddc_data ddc_data;
+ u32 timeout_count, down_stream_devices = 0;
+ u32 repeater_cascade_depth = 0;
+ u8 buf[0xFF];
+ u8 *ksv_fifo = NULL;
+ u8 bcaps;
+ u16 bstatus, max_devs_exceeded = 0, max_cascade_exceeded = 0;
+ u32 link0_status;
+ u32 ksv_bytes;
+ struct mdss_io_data *io;
+
+ struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG];
+ u32 phy_addr;
+ u32 ret = 0;
+ u32 resp = 0;
+
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ phy_addr = hdcp_ctrl->init_data.phy_addr;
+
+ if (hdcp_ctrl->hdcp_state != HDCP_STATE_AUTHENTICATING) {
+ DEV_DBG("%s: %s: invalid state. returning\n", __func__,
+ HDCP_STATE_NAME);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ ksv_fifo = hdcp_ctrl->current_tp.ksv_list;
+
+ io = hdcp_ctrl->init_data.core_io;
+
+ memset(buf, 0, sizeof(buf));
+ memset(ksv_fifo, 0,
+ sizeof(hdcp_ctrl->current_tp.ksv_list));
+
+ /*
+ * Wait until READY bit is set in BCAPS, as per HDCP specifications
+ * maximum permitted time to check for READY bit is five seconds.
+ */
+ timeout_count = 50;
+ do {
+ timeout_count--;
+ /* Read BCAPS at offset 0x40 */
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = 0x74;
+ ddc_data.offset = 0x40;
+ ddc_data.data_buf = &bcaps;
+ ddc_data.data_len = 1;
+ ddc_data.request_len = 1;
+ ddc_data.retry = 5;
+ ddc_data.what = "Bcaps";
+ ddc_data.retry_align = true;
+
+ hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+ if (rc) {
+ DEV_ERR("%s: %s: BCAPS read failed\n", __func__,
+ HDCP_STATE_NAME);
+ goto error;
+ }
+ msleep(100);
+ } while (!(bcaps & BIT(5)) && timeout_count);
+
+ /* Read BSTATUS at offset 0x41 */
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = 0x74;
+ ddc_data.offset = 0x41;
+ ddc_data.data_buf = buf;
+ ddc_data.data_len = 2;
+ ddc_data.request_len = 2;
+ ddc_data.retry = 5;
+ ddc_data.what = "Bstatuss";
+ ddc_data.retry_align = true;
+
+ hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+ if (rc) {
+ DEV_ERR("%s: %s: BSTATUS read failed\n", __func__,
+ HDCP_STATE_NAME);
+ goto error;
+ }
+ bstatus = buf[1];
+ bstatus = (bstatus << 8) | buf[0];
+
+ if (hdcp_ctrl->tz_hdcp) {
+ memset(scm_buf, 0x00, sizeof(scm_buf));
+
+ /* Write BSTATUS and BCAPS to HDCP registers */
+ scm_buf[0].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA12;
+ scm_buf[0].val = bcaps | (bstatus << 8);
+
+ ret = hdcp_scm_call(scm_buf, &resp);
+ if (ret || resp) {
+ DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+ __func__, ret, resp);
+ rc = -EINVAL;
+ goto error;
+ }
+ } else if (hdcp_ctrl->hdmi_tx_ver_4) {
+ DSS_REG_W(hdcp_ctrl->init_data.hdcp_io,
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12,
+ bcaps | (bstatus << 8));
+ } else {
+ DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA12, bcaps | (bstatus << 8));
+ }
+
+ down_stream_devices = bstatus & 0x7F;
+ if (down_stream_devices == 0) {
+ /*
+ * If no downstream devices are attached to the repeater
+ * then part II fails.
+ * todo: The other approach would be to continue PART II.
+ */
+ DEV_ERR("%s: %s: No downstream devices\n", __func__,
+ HDCP_STATE_NAME);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /* Cascaded repeater depth */
+ repeater_cascade_depth = (bstatus >> 8) & 0x7;
+
+ /*
+ * HDCP Compliance 1B-05:
+ * Check if no. of devices connected to repeater
+ * exceed max_devices_connected from bit 7 of Bstatus.
+ */
+ max_devs_exceeded = (bstatus & BIT(7)) >> 7;
+ if (max_devs_exceeded == 0x01) {
+ DEV_ERR("%s: %s: no. of devs connected exceeds max allowed",
+ __func__, HDCP_STATE_NAME);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * HDCP Compliance 1B-06:
+ * Check if no. of cascade connected to repeater
+ * exceed max_cascade_connected from bit 11 of Bstatus.
+ */
+ max_cascade_exceeded = (bstatus & BIT(11)) >> 11;
+ if (max_cascade_exceeded == 0x01) {
+ DEV_ERR("%s: %s: no. of cascade conn exceeds max allowed",
+ __func__, HDCP_STATE_NAME);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * Read KSV FIFO over DDC
+ * Key Slection vector FIFO Used to pull downstream KSVs
+ * from HDCP Repeaters.
+ * All bytes (DEVICE_COUNT * 5) must be read in a single,
+ * auto incrementing access.
+ * All bytes read as 0x00 for HDCP Receivers that are not
+ * HDCP Repeaters (REPEATER == 0).
+ */
+ ksv_bytes = 5 * down_stream_devices;
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = 0x74;
+ ddc_data.offset = 0x43;
+ ddc_data.data_buf = ksv_fifo;
+ ddc_data.data_len = ksv_bytes;
+ ddc_data.request_len = ksv_bytes;
+ ddc_data.retry = 5;
+ ddc_data.what = "KSV FIFO";
+
+ hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ cnt = 0;
+ do {
+ rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+ if (rc) {
+ DEV_ERR("%s: %s: KSV FIFO read failed\n", __func__,
+ HDCP_STATE_NAME);
+ /*
+ * HDCP Compliace Test case 1B-01:
+ * Wait here until all the ksv bytes have been
+ * read from the KSV FIFO register.
+ */
+ msleep(25);
+ } else {
+ break;
+ }
+ cnt++;
+ } while (cnt != 20);
+
+ if (cnt == 20)
+ goto error;
+
+ rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+ if (rc)
+ goto error;
+
+ /*
+ * Write KSV FIFO to HDCP_SHA_DATA.
+ * This is done 1 byte at time starting with the LSB.
+ * On the very last byte write, the HDCP_SHA_DATA_DONE bit[0]
+ */
+
+ /* First, reset SHA engine */
+ /* Next, enable SHA engine, SEL=DIGA_HDCP */
+ if (hdcp_ctrl->tz_hdcp) {
+ memset(scm_buf, 0x00, sizeof(scm_buf));
+
+ scm_buf[0].addr = phy_addr + HDMI_HDCP_SHA_CTRL;
+ scm_buf[0].val = HDCP_REG_ENABLE;
+ scm_buf[1].addr = phy_addr + HDMI_HDCP_SHA_CTRL;
+ scm_buf[1].val = HDCP_REG_DISABLE;
+
+ ret = hdcp_scm_call(scm_buf, &resp);
+ if (ret || resp) {
+ DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+ __func__, ret, resp);
+ rc = -EINVAL;
+ goto error;
+ }
+ } else if (hdcp_ctrl->hdmi_tx_ver_4) {
+ DSS_REG_W(hdcp_ctrl->init_data.hdcp_io,
+ HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL,
+ HDCP_REG_ENABLE);
+ DSS_REG_W(hdcp_ctrl->init_data.hdcp_io,
+ HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL,
+ HDCP_REG_DISABLE);
+ } else {
+ DSS_REG_W(io, HDMI_HDCP_SHA_CTRL, HDCP_REG_ENABLE);
+ DSS_REG_W(io, HDMI_HDCP_SHA_CTRL, HDCP_REG_DISABLE);
+ }
+
+ for (i = 0; i < ksv_bytes - 1; i++) {
+ /* Write KSV byte and do not set DONE bit[0] */
+ if (hdcp_ctrl->tz_hdcp) {
+ memset(scm_buf, 0x00, sizeof(scm_buf));
+
+ scm_buf[0].addr = phy_addr + HDMI_HDCP_SHA_DATA;
+ scm_buf[0].val = ksv_fifo[i] << 16;
+
+ ret = hdcp_scm_call(scm_buf, &resp);
+ if (ret || resp) {
+ DEV_ERR("%s: scm_call ret = %d, resp = %d\n",
+ __func__, ret, resp);
+ rc = -EINVAL;
+ goto error;
+ }
+ } else if (hdcp_ctrl->hdmi_tx_ver_4) {
+ DSS_REG_W_ND(hdcp_ctrl->init_data.hdcp_io,
+ HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA,
+ ksv_fifo[i] << 16);
+ } else {
+ DSS_REG_W_ND(io, HDMI_HDCP_SHA_DATA, ksv_fifo[i] << 16);
+ }
+
+ /*
+ * Once 64 bytes have been written, we need to poll for
+ * HDCP_SHA_BLOCK_DONE before writing any further
+ */
+ if (i && !((i + 1) % 64)) {
+ timeout_count = 100;
+ while (!(DSS_REG_R(io, HDMI_HDCP_SHA_STATUS) & BIT(0))
+ && (--timeout_count)) {
+ DEV_DBG("%s: %s: Wrote 64 bytes KSV FIFO\n",
+ __func__, HDCP_STATE_NAME);
+ DEV_DBG("%s: %s: HDCP_SHA_STATUS=%08x\n",
+ __func__, HDCP_STATE_NAME,
+ DSS_REG_R(io, HDMI_HDCP_SHA_STATUS));
+ msleep(20);
+ }
+ if (!timeout_count) {
+ rc = -ETIMEDOUT;
+ DEV_ERR("%s: %s: Write KSV FIFO timedout",
+ __func__, HDCP_STATE_NAME);
+ goto error;
+ }
+ }
+
+ }
+
+ /* Write l to DONE bit[0] */
+ if (hdcp_ctrl->tz_hdcp) {
+ memset(scm_buf, 0x00, sizeof(scm_buf));
+
+ scm_buf[0].addr = phy_addr + HDMI_HDCP_SHA_DATA;
+ scm_buf[0].val = (ksv_fifo[ksv_bytes - 1] << 16) | 0x1;
+
+ ret = hdcp_scm_call(scm_buf, &resp);
+ if (ret || resp) {
+ DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+ __func__, ret, resp);
+ rc = -EINVAL;
+ goto error;
+ }
+ } else if (hdcp_ctrl->hdmi_tx_ver_4) {
+ DSS_REG_W_ND(hdcp_ctrl->init_data.hdcp_io,
+ HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA,
+ (ksv_fifo[ksv_bytes - 1] << 16) | 0x1);
+ } else {
+ DSS_REG_W_ND(io, HDMI_HDCP_SHA_DATA,
+ (ksv_fifo[ksv_bytes - 1] << 16) | 0x1);
+ }
+
+ /* Now wait for HDCP_SHA_COMP_DONE */
+ timeout_count = 100;
+ while ((0x10 != (DSS_REG_R(io, HDMI_HDCP_SHA_STATUS)
+ & 0xFFFFFF10)) && --timeout_count)
+ msleep(20);
+ if (!timeout_count) {
+ rc = -ETIMEDOUT;
+ DEV_ERR("%s: %s: SHA computation timedout", __func__,
+ HDCP_STATE_NAME);
+ goto error;
+ }
+
+ /* Wait for V_MATCHES */
+ timeout_count = 100;
+ link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+ while (((link0_status & BIT(20)) != BIT(20)) && --timeout_count) {
+ DEV_DBG("%s: %s: Waiting for V_MATCHES(%d). l0_status=0x%08x\n",
+ __func__, HDCP_STATE_NAME, timeout_count, link0_status);
+ msleep(20);
+ link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+ }
+ if (!timeout_count) {
+ rc = -ETIMEDOUT;
+ DEV_ERR("%s: %s: HDCP V Match timedout", __func__,
+ HDCP_STATE_NAME);
+ goto error;
+ }
+
+error:
+ if (rc)
+ DEV_ERR("%s: %s: Authentication Part II failed\n", __func__,
+ hdcp_ctrl ? HDCP_STATE_NAME : "???");
+ else
+ DEV_INFO("%s: %s: Authentication Part II successful\n",
+ __func__, HDCP_STATE_NAME);
+
+ if (!hdcp_ctrl) {
+ DEV_ERR("%s: hdcp_ctrl null. Topology not updated\n",
+ __func__);
+ return rc;
+ }
+ /* Update topology information */
+ hdcp_ctrl->current_tp.dev_count = down_stream_devices;
+ hdcp_ctrl->current_tp.max_cascade_exceeded = max_cascade_exceeded;
+ hdcp_ctrl->current_tp.max_dev_exceeded = max_devs_exceeded;
+ hdcp_ctrl->current_tp.depth = repeater_cascade_depth;
+
+ return rc;
+} /* hdmi_hdcp_authentication_part2 */
+
+static void hdmi_hdcp_cache_topology(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ memcpy((void *)&hdcp_ctrl->cached_tp,
+ (void *) &hdcp_ctrl->current_tp,
+ sizeof(hdcp_ctrl->cached_tp));
+}
+
+static void hdmi_hdcp_notify_topology(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ char a[16], b[16];
+ char *envp[] = {
+ [0] = "HDCP_MGR_EVENT=MSG_READY",
+ [1] = a,
+ [2] = b,
+ NULL,
+ };
+
+ snprintf(envp[1], 16, "%d", (int)DOWN_CHECK_TOPOLOGY);
+ snprintf(envp[2], 16, "%d", (int)HDCP_V1_TX);
+ kobject_uevent_env(hdcp_ctrl->init_data.sysfs_kobj, KOBJ_CHANGE, envp);
+
+ DEV_DBG("%s Event Sent: %s msgID = %s srcID = %s\n", __func__,
+ envp[0], envp[1], envp[2]);
+}
+
+static void hdmi_hdcp_int_work(struct work_struct *work)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+ struct hdmi_hdcp_ctrl, hdcp_int_work);
+
+ if (!hdcp_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ mutex_lock(hdcp_ctrl->init_data.mutex);
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAIL;
+ mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+ if (hdcp_ctrl->init_data.notify_status) {
+ hdcp_ctrl->init_data.notify_status(
+ hdcp_ctrl->init_data.cb_data,
+ hdcp_ctrl->hdcp_state);
+ }
+} /* hdmi_hdcp_int_work */
+
+static void hdmi_hdcp_auth_work(struct work_struct *work)
+{
+ int rc;
+ struct delayed_work *dw = to_delayed_work(work);
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(dw,
+ struct hdmi_hdcp_ctrl, hdcp_auth_work);
+ struct mdss_io_data *io;
+
+ if (!hdcp_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (hdcp_ctrl->hdcp_state != HDCP_STATE_AUTHENTICATING) {
+ DEV_DBG("%s: %s: invalid state. returning\n", __func__,
+ HDCP_STATE_NAME);
+ return;
+ }
+
+ io = hdcp_ctrl->init_data.core_io;
+ /* Enabling Software DDC */
+ DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io,
+ HDMI_DDC_ARBITRATION) & ~(BIT(4)));
+
+ rc = hdmi_hdcp_authentication_part1(hdcp_ctrl);
+ if (rc) {
+ DEV_DBG("%s: %s: HDCP Auth Part I failed\n", __func__,
+ HDCP_STATE_NAME);
+ goto error;
+ }
+
+ if (hdcp_ctrl->current_tp.ds_type == DS_REPEATER) {
+ rc = hdmi_hdcp_authentication_part2(hdcp_ctrl);
+ if (rc) {
+ DEV_DBG("%s: %s: HDCP Auth Part II failed\n", __func__,
+ HDCP_STATE_NAME);
+ goto error;
+ }
+ } else {
+ DEV_INFO("%s: Downstream device is not a repeater\n", __func__);
+ }
+ /* Disabling software DDC before going into part3 to make sure
+ * there is no Arbitration between software and hardware for DDCi
+ */
+ DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io,
+ HDMI_DDC_ARBITRATION) | (BIT(4)));
+
+error:
+ /*
+ * Ensure that the state did not change during authentication.
+ * If it did, it means that deauthenticate/reauthenticate was
+ * called. In that case, this function need not notify HDMI Tx
+ * of the result
+ */
+ mutex_lock(hdcp_ctrl->init_data.mutex);
+ if (hdcp_ctrl->hdcp_state == HDCP_STATE_AUTHENTICATING) {
+ if (rc) {
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAIL;
+ } else {
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
+ hdcp_ctrl->auth_retries = 0;
+ hdmi_hdcp_cache_topology(hdcp_ctrl);
+ hdmi_hdcp_notify_topology(hdcp_ctrl);
+ }
+ mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+ /* Notify HDMI Tx controller of the result */
+ DEV_DBG("%s: %s: Notifying HDMI Tx of auth result\n",
+ __func__, HDCP_STATE_NAME);
+ if (hdcp_ctrl->init_data.notify_status) {
+ hdcp_ctrl->init_data.notify_status(
+ hdcp_ctrl->init_data.cb_data,
+ hdcp_ctrl->hdcp_state);
+ }
+ } else {
+ DEV_DBG("%s: %s: HDCP state changed during authentication\n",
+ __func__, HDCP_STATE_NAME);
+ mutex_unlock(hdcp_ctrl->init_data.mutex);
+ }
+} /* hdmi_hdcp_auth_work */
+
+int hdmi_hdcp_authenticate(void *input)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+
+ if (!hdcp_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ if (hdcp_ctrl->hdcp_state != HDCP_STATE_INACTIVE) {
+ DEV_DBG("%s: %s: already active or activating. returning\n",
+ __func__, HDCP_STATE_NAME);
+ return 0;
+ }
+
+ DEV_DBG("%s: %s: Queuing work to start HDCP authentication", __func__,
+ HDCP_STATE_NAME);
+
+ if (!hdmi_hdcp_load_keys(input))
+ queue_delayed_work(hdcp_ctrl->init_data.workq,
+ &hdcp_ctrl->hdcp_auth_work, HZ/2);
+ else
+ queue_work(hdcp_ctrl->init_data.workq,
+ &hdcp_ctrl->hdcp_int_work);
+
+ return 0;
+} /* hdmi_hdcp_authenticate */
+
+int hdmi_hdcp_reauthenticate(void *input)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+ struct mdss_io_data *io;
+ u32 hdmi_hw_version;
+ u32 ret = 0;
+
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ io = hdcp_ctrl->init_data.core_io;
+
+ if (hdcp_ctrl->hdcp_state != HDCP_STATE_AUTH_FAIL) {
+ DEV_DBG("%s: %s: invalid state. returning\n", __func__,
+ HDCP_STATE_NAME);
+ return 0;
+ }
+
+ hdmi_hw_version = DSS_REG_R(io, HDMI_VERSION);
+ if (hdmi_hw_version >= 0x30030000) {
+ DSS_REG_W(io, HDMI_CTRL_SW_RESET, BIT(1));
+ DSS_REG_W(io, HDMI_CTRL_SW_RESET, 0);
+ }
+
+ /* Disable HDCP interrupts */
+ DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0);
+
+ DSS_REG_W(io, HDMI_HDCP_RESET, BIT(0));
+
+ /* Wait to be clean on DDC HW engine */
+ hdmi_hdcp_hw_ddc_clean(hdcp_ctrl);
+
+ /* Disable encryption and disable the HDCP block */
+ DSS_REG_W(io, HDMI_HDCP_CTRL, 0);
+
+ if (!hdmi_hdcp_load_keys(input))
+ queue_delayed_work(hdcp_ctrl->init_data.workq,
+ &hdcp_ctrl->hdcp_auth_work, HZ/2);
+ else
+ queue_work(hdcp_ctrl->init_data.workq,
+ &hdcp_ctrl->hdcp_int_work);
+
+ return ret;
+} /* hdmi_hdcp_reauthenticate */
+
+void hdmi_hdcp_off(void *input)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+ struct mdss_io_data *io;
+ int rc = 0;
+
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ io = hdcp_ctrl->init_data.core_io;
+
+ if (hdcp_ctrl->hdcp_state == HDCP_STATE_INACTIVE) {
+ DEV_DBG("%s: %s: inactive. returning\n", __func__,
+ HDCP_STATE_NAME);
+ return;
+ }
+
+ /*
+ * Disable HDCP interrupts.
+ * Also, need to set the state to inactive here so that any ongoing
+ * reauth works will know that the HDCP session has been turned off.
+ */
+ mutex_lock(hdcp_ctrl->init_data.mutex);
+ DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0);
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+ mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+ /*
+ * Cancel any pending auth/reauth attempts.
+ * If one is ongoing, this will wait for it to finish.
+ * No more reauthentiaction attempts will be scheduled since we
+ * set the currect state to inactive.
+ */
+ rc = cancel_delayed_work_sync(&hdcp_ctrl->hdcp_auth_work);
+ if (rc)
+ DEV_DBG("%s: %s: Deleted hdcp auth work\n", __func__,
+ HDCP_STATE_NAME);
+ rc = cancel_work_sync(&hdcp_ctrl->hdcp_int_work);
+ if (rc)
+ DEV_DBG("%s: %s: Deleted hdcp int work\n", __func__,
+ HDCP_STATE_NAME);
+
+ DSS_REG_W(io, HDMI_HDCP_RESET, BIT(0));
+
+ /* Disable encryption and disable the HDCP block */
+ DSS_REG_W(io, HDMI_HDCP_CTRL, 0);
+
+ DEV_DBG("%s: %s: HDCP: Off\n", __func__, HDCP_STATE_NAME);
+} /* hdmi_hdcp_off */
+
+int hdmi_hdcp_isr(void *input)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+ int rc = 0;
+ struct mdss_io_data *io;
+ u32 hdcp_int_val;
+
+ if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ io = hdcp_ctrl->init_data.core_io;
+
+ hdcp_int_val = DSS_REG_R(io, HDMI_HDCP_INT_CTRL);
+
+ /* Ignore HDCP interrupts if HDCP is disabled */
+ if (hdcp_ctrl->hdcp_state == HDCP_STATE_INACTIVE) {
+ DSS_REG_W(io, HDMI_HDCP_INT_CTRL, HDCP_INT_CLR);
+ return 0;
+ }
+
+ if (hdcp_int_val & BIT(0)) {
+ /* AUTH_SUCCESS_INT */
+ DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(1)));
+ DEV_INFO("%s: %s: AUTH_SUCCESS_INT received\n", __func__,
+ HDCP_STATE_NAME);
+ if (hdcp_ctrl->hdcp_state == HDCP_STATE_AUTHENTICATING)
+ complete_all(&hdcp_ctrl->r0_checked);
+ }
+
+ if (hdcp_int_val & BIT(4)) {
+ /* AUTH_FAIL_INT */
+ u32 link_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+
+ DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(5)));
+ DEV_INFO("%s: %s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
+ __func__, HDCP_STATE_NAME, link_status);
+ if (hdcp_ctrl->hdcp_state == HDCP_STATE_AUTHENTICATED) {
+ /* Inform HDMI Tx of the failure */
+ queue_work(hdcp_ctrl->init_data.workq,
+ &hdcp_ctrl->hdcp_int_work);
+ /* todo: print debug log with auth fail reason */
+ } else if (hdcp_ctrl->hdcp_state == HDCP_STATE_AUTHENTICATING) {
+ complete_all(&hdcp_ctrl->r0_checked);
+ }
+
+ /* Clear AUTH_FAIL_INFO as well */
+ DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(7)));
+ }
+
+ if (hdcp_int_val & BIT(8)) {
+ /* DDC_XFER_REQ_INT */
+ DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(9)));
+ DEV_INFO("%s: %s: DDC_XFER_REQ_INT received\n", __func__,
+ HDCP_STATE_NAME);
+ }
+
+ if (hdcp_int_val & BIT(12)) {
+ /* DDC_XFER_DONE_INT */
+ DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(13)));
+ DEV_INFO("%s: %s: DDC_XFER_DONE received\n", __func__,
+ HDCP_STATE_NAME);
+ }
+
+error:
+ return rc;
+} /* hdmi_hdcp_isr */
+
+static ssize_t hdmi_hdcp_sysfs_rda_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_hdcp_ctrl *hdcp_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP);
+
+ if (!hdcp_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(hdcp_ctrl->init_data.mutex);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", hdcp_ctrl->hdcp_state);
+ DEV_DBG("%s: '%d'\n", __func__, hdcp_ctrl->hdcp_state);
+ mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+ return ret;
+} /* hdmi_hdcp_sysfs_rda_hdcp*/
+
+static ssize_t hdmi_hdcp_sysfs_rda_tp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ struct hdmi_hdcp_ctrl *hdcp_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP);
+
+ if (!hdcp_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (hdcp_ctrl->tp_msgid) {
+ case DOWN_CHECK_TOPOLOGY:
+ case DOWN_REQUEST_TOPOLOGY:
+ buf[MSG_ID_IDX] = hdcp_ctrl->tp_msgid;
+ buf[RET_CODE_IDX] = HDCP_AUTHED;
+ ret = HEADER_LEN;
+
+ memcpy(buf + HEADER_LEN, &hdcp_ctrl->cached_tp,
+ sizeof(struct HDCP_V2V1_MSG_TOPOLOGY));
+
+ ret += sizeof(struct HDCP_V2V1_MSG_TOPOLOGY);
+
+ /* clear the flag once data is read back to user space*/
+ hdcp_ctrl->tp_msgid = -1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+} /* hdmi_hdcp_sysfs_rda_tp*/
+
+static ssize_t hdmi_hdcp_sysfs_wta_tp(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int msgid = 0;
+ ssize_t ret = count;
+ struct hdmi_hdcp_ctrl *hdcp_ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP);
+
+ if (!hdcp_ctrl || !buf) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ msgid = buf[0];
+
+ switch (msgid) {
+ case DOWN_CHECK_TOPOLOGY:
+ case DOWN_REQUEST_TOPOLOGY:
+ hdcp_ctrl->tp_msgid = msgid;
+ break;
+ /* more cases added here */
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+} /* hdmi_tx_sysfs_wta_hpd */
+
+static DEVICE_ATTR(status, 0444, hdmi_hdcp_sysfs_rda_status, NULL);
+static DEVICE_ATTR(tp, 0644, hdmi_hdcp_sysfs_rda_tp,
+ hdmi_hdcp_sysfs_wta_tp);
+
+
+static struct attribute *hdmi_hdcp_fs_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_tp.attr,
+ NULL,
+};
+
+static struct attribute_group hdmi_hdcp_fs_attr_group = {
+ .name = "hdcp",
+ .attrs = hdmi_hdcp_fs_attrs,
+};
+
+void hdmi_hdcp_deinit(void *input)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+
+ if (!hdcp_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ sysfs_remove_group(hdcp_ctrl->init_data.sysfs_kobj,
+ &hdmi_hdcp_fs_attr_group);
+
+ kfree(hdcp_ctrl);
+} /* hdmi_hdcp_deinit */
+
+void *hdmi_hdcp_init(struct hdmi_hdcp_init_data *init_data)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
+ int ret;
+ static struct hdmi_hdcp_ops ops = {
+ .hdmi_hdcp_isr = hdmi_hdcp_isr,
+ .hdmi_hdcp_reauthenticate = hdmi_hdcp_reauthenticate,
+ .hdmi_hdcp_authenticate = hdmi_hdcp_authenticate,
+ .hdmi_hdcp_off = hdmi_hdcp_off
+ };
+
+ if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
+ !init_data->mutex || !init_data->ddc_ctrl ||
+ !init_data->notify_status || !init_data->workq ||
+ !init_data->cb_data) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ goto error;
+ }
+
+ if (init_data->hdmi_tx_ver >= HDMI_TX_VERSION_4
+ && !init_data->hdcp_io) {
+ DEV_ERR("%s: hdcp_io required for HDMI Tx Ver 4\n", __func__);
+ goto error;
+ }
+
+ hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
+ if (!hdcp_ctrl) {
+ DEV_ERR("%s: Out of memory\n", __func__);
+ goto error;
+ }
+
+ hdcp_ctrl->init_data = *init_data;
+ hdcp_ctrl->ops = &ops;
+ hdcp_ctrl->hdmi_tx_ver_4 =
+ (init_data->hdmi_tx_ver >= HDMI_TX_VERSION_4);
+
+ if (sysfs_create_group(init_data->sysfs_kobj,
+ &hdmi_hdcp_fs_attr_group)) {
+ DEV_ERR("%s: hdcp sysfs group creation failed\n", __func__);
+ goto error;
+ }
+
+ INIT_DELAYED_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
+ INIT_WORK(&hdcp_ctrl->hdcp_int_work, hdmi_hdcp_int_work);
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+ init_completion(&hdcp_ctrl->r0_checked);
+
+ if (!hdcp_ctrl->hdmi_tx_ver_4) {
+ ret = scm_is_call_available(SCM_SVC_HDCP, SCM_CMD_HDCP);
+ if (ret <= 0) {
+ DEV_ERR("%s: secure hdcp service unavailable, ret = %d",
+ __func__, ret);
+ } else {
+ DEV_DBG("%s: tz_hdcp = 1\n", __func__);
+ hdcp_ctrl->tz_hdcp = 1;
+ }
+ }
+
+ DEV_DBG("%s: HDCP module initialized. HDCP_STATE=%s", __func__,
+ HDCP_STATE_NAME);
+
+error:
+ return (void *)hdcp_ctrl;
+} /* hdmi_hdcp_init */
+
+struct hdmi_hdcp_ops *hdmi_hdcp_start(void *input)
+{
+ return ((struct hdmi_hdcp_ctrl *)input)->ops;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp.h b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.h
new file mode 100644
index 0000000..2276009
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2012, 2014-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_HDCP_H__
+#define __MDSS_HDMI_HDCP_H__
+
+#include "mdss_hdmi_util.h"
+#include <video/msm_hdmi_modes.h>
+#include <soc/qcom/scm.h>
+
+enum hdmi_hdcp_state {
+ HDCP_STATE_INACTIVE,
+ HDCP_STATE_AUTHENTICATING,
+ HDCP_STATE_AUTHENTICATED,
+ HDCP_STATE_AUTH_FAIL,
+ HDCP_STATE_AUTH_ENC_NONE,
+ HDCP_STATE_AUTH_ENC_1X,
+ HDCP_STATE_AUTH_ENC_2P2
+};
+
+struct hdmi_hdcp_init_data {
+ struct mdss_io_data *core_io;
+ struct mdss_io_data *qfprom_io;
+ struct mdss_io_data *hdcp_io;
+ struct mutex *mutex;
+ struct kobject *sysfs_kobj;
+ struct workqueue_struct *workq;
+ void *cb_data;
+ void (*notify_status)(void *cb_data, enum hdmi_hdcp_state status);
+ struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+ u32 phy_addr;
+ u32 hdmi_tx_ver;
+ struct msm_hdmi_mode_timing_info *timing;
+ bool tethered;
+};
+
+struct hdmi_hdcp_ops {
+ int (*hdmi_hdcp_isr)(void *ptr);
+ int (*hdmi_hdcp_reauthenticate)(void *input);
+ int (*hdmi_hdcp_authenticate)(void *hdcp_ctrl);
+ bool (*feature_supported)(void *input);
+ void (*hdmi_hdcp_off)(void *hdcp_ctrl);
+};
+
+void *hdmi_hdcp_init(struct hdmi_hdcp_init_data *init_data);
+void *hdmi_hdcp2p2_init(struct hdmi_hdcp_init_data *init_data);
+void hdmi_hdcp_deinit(void *input);
+void hdmi_hdcp2p2_deinit(void *input);
+
+struct hdmi_hdcp_ops *hdmi_hdcp_start(void *input);
+struct hdmi_hdcp_ops *hdmi_hdcp2p2_start(void *input);
+
+const char *hdcp_state_name(enum hdmi_hdcp_state hdcp_state);
+
+#endif /* __MDSS_HDMI_HDCP_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
new file mode 100644
index 0000000..8dce151
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
@@ -0,0 +1,1108 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/kthread.h>
+
+#include <linux/hdcp_qseecom.h>
+#include "mdss_hdmi_hdcp.h"
+#include "video/msm_hdmi_hdcp_mgr.h"
+#include "mdss_hdmi_util.h"
+
+/*
+ * Defined addresses and offsets of standard HDCP 2.2 sink registers
+ * for DDC, as defined in HDCP 2.2 spec section 2.14 table 2.7
+ */
+#define HDCP_SINK_DDC_SLAVE_ADDR 0x74 /* Sink DDC slave address */
+#define HDCP_SINK_DDC_HDCP2_VERSION 0x50 /* Does sink support HDCP2.2 */
+#define HDCP_SINK_DDC_HDCP2_WRITE_MESSAGE 0x60 /* HDCP Tx writes here */
+#define HDCP_SINK_DDC_HDCP2_RXSTATUS 0x70 /* RxStatus, 2 bytes */
+#define HDCP_SINK_DDC_HDCP2_READ_MESSAGE 0x80 /* HDCP Tx reads here */
+
+#define HDCP2P2_DEFAULT_TIMEOUT 500
+
+/*
+ * HDCP 2.2 encryption requires the data encryption block that is present in
+ * HDMI controller version 4.0.0 and above
+ */
+#define MIN_HDMI_TX_MAJOR_VERSION 4
+
+enum hdmi_hdcp2p2_sink_status {
+ SINK_DISCONNECTED,
+ SINK_CONNECTED
+};
+
+enum hdmi_auth_status {
+ HDMI_HDCP_AUTH_STATUS_FAILURE,
+ HDMI_HDCP_AUTH_STATUS_SUCCESS
+};
+
+struct hdmi_hdcp2p2_ctrl {
+ atomic_t auth_state;
+ bool tethered;
+ enum hdmi_hdcp2p2_sink_status sink_status; /* Is sink connected */
+ struct hdmi_hdcp_init_data init_data; /* Feature data from HDMI drv */
+ struct mutex mutex; /* mutex to protect access to ctrl */
+ struct mutex msg_lock; /* mutex to protect access to msg buffer */
+ struct mutex wakeup_mutex; /* mutex to protect access to wakeup call*/
+ struct hdmi_hdcp_ops *ops;
+ void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */
+ struct hdcp_txmtr_ops *lib; /* Ops for driver to call into TZ */
+
+ enum hdcp_wakeup_cmd wakeup_cmd;
+ enum hdmi_auth_status auth_status;
+ char *send_msg_buf;
+ uint32_t send_msg_len;
+ uint32_t timeout;
+ uint32_t timeout_left;
+
+ struct task_struct *thread;
+ struct kthread_worker worker;
+ struct kthread_work status;
+ struct kthread_work auth;
+ struct kthread_work send_msg;
+ struct kthread_work recv_msg;
+ struct kthread_work link;
+ struct kthread_work poll;
+};
+
+static int hdmi_hdcp2p2_auth(struct hdmi_hdcp2p2_ctrl *ctrl);
+static void hdmi_hdcp2p2_send_msg(struct hdmi_hdcp2p2_ctrl *ctrl);
+static void hdmi_hdcp2p2_recv_msg(struct hdmi_hdcp2p2_ctrl *ctrl);
+static void hdmi_hdcp2p2_auth_status(struct hdmi_hdcp2p2_ctrl *ctrl);
+static int hdmi_hdcp2p2_link_check(struct hdmi_hdcp2p2_ctrl *ctrl);
+
+static inline bool hdmi_hdcp2p2_is_valid_state(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (ctrl->wakeup_cmd == HDCP_WKUP_CMD_AUTHENTICATE)
+ return true;
+
+ if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
+ return true;
+
+ return false;
+}
+
+static int hdmi_hdcp2p2_copy_buf(struct hdmi_hdcp2p2_ctrl *ctrl,
+ struct hdcp_wakeup_data *data)
+{
+ mutex_lock(&ctrl->msg_lock);
+
+ if (!data->send_msg_len) {
+ mutex_unlock(&ctrl->msg_lock);
+ return 0;
+ }
+
+ ctrl->send_msg_len = data->send_msg_len;
+
+ kzfree(ctrl->send_msg_buf);
+
+ ctrl->send_msg_buf = kzalloc(data->send_msg_len, GFP_KERNEL);
+
+ if (!ctrl->send_msg_buf) {
+ mutex_unlock(&ctrl->msg_lock);
+ return -ENOMEM;
+ }
+
+ memcpy(ctrl->send_msg_buf, data->send_msg_buf, ctrl->send_msg_len);
+
+ mutex_unlock(&ctrl->msg_lock);
+
+ return 0;
+}
+
+static int hdmi_hdcp2p2_wakeup(struct hdcp_wakeup_data *data)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl;
+
+ if (!data) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ ctrl = data->context;
+ if (!ctrl) {
+ pr_err("invalid ctrl\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->wakeup_mutex);
+
+ pr_debug("cmd: %s, timeout %dms, tethered %d\n",
+ hdcp_cmd_to_str(data->cmd),
+ data->timeout, ctrl->tethered);
+
+ ctrl->wakeup_cmd = data->cmd;
+
+ if (data->timeout)
+ ctrl->timeout = data->timeout * 2;
+ else
+ ctrl->timeout = HDCP2P2_DEFAULT_TIMEOUT;
+
+ if (!hdmi_hdcp2p2_is_valid_state(ctrl)) {
+ pr_err("invalid state\n");
+ goto exit;
+ }
+
+ if (hdmi_hdcp2p2_copy_buf(ctrl, data))
+ goto exit;
+
+ if (ctrl->wakeup_cmd == HDCP_WKUP_CMD_STATUS_SUCCESS)
+ ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_SUCCESS;
+ else if (ctrl->wakeup_cmd == HDCP_WKUP_CMD_STATUS_FAILED)
+ ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_FAILURE;
+
+ if (ctrl->tethered)
+ goto exit;
+
+ switch (ctrl->wakeup_cmd) {
+ case HDCP_WKUP_CMD_SEND_MESSAGE:
+ kthread_queue_work(&ctrl->worker, &ctrl->send_msg);
+ break;
+ case HDCP_WKUP_CMD_RECV_MESSAGE:
+ kthread_queue_work(&ctrl->worker, &ctrl->recv_msg);
+ break;
+ case HDCP_WKUP_CMD_STATUS_SUCCESS:
+ case HDCP_WKUP_CMD_STATUS_FAILED:
+ kthread_queue_work(&ctrl->worker, &ctrl->status);
+ break;
+ case HDCP_WKUP_CMD_LINK_POLL:
+ kthread_queue_work(&ctrl->worker, &ctrl->poll);
+ break;
+ case HDCP_WKUP_CMD_AUTHENTICATE:
+ kthread_queue_work(&ctrl->worker, &ctrl->auth);
+ break;
+ default:
+ pr_err("invalid wakeup command %d\n", ctrl->wakeup_cmd);
+ }
+exit:
+ mutex_unlock(&ctrl->wakeup_mutex);
+ return 0;
+}
+
+static inline int hdmi_hdcp2p2_wakeup_lib(struct hdmi_hdcp2p2_ctrl *ctrl,
+ struct hdcp_lib_wakeup_data *data)
+{
+ int rc = 0;
+
+ if (ctrl && ctrl->lib && ctrl->lib->wakeup &&
+ data && (data->cmd != HDCP_LIB_WKUP_CMD_INVALID)) {
+ rc = ctrl->lib->wakeup(data);
+ if (rc)
+ pr_err("error sending %s to lib\n",
+ hdcp_lib_cmd_to_str(data->cmd));
+ }
+
+ return rc;
+}
+
+static void hdmi_hdcp2p2_run(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ while (1) {
+ switch (ctrl->wakeup_cmd) {
+ case HDCP_WKUP_CMD_SEND_MESSAGE:
+ ctrl->wakeup_cmd = HDCP_WKUP_CMD_INVALID;
+ hdmi_hdcp2p2_send_msg(ctrl);
+ break;
+ case HDCP_WKUP_CMD_RECV_MESSAGE:
+ ctrl->wakeup_cmd = HDCP_WKUP_CMD_INVALID;
+ hdmi_hdcp2p2_recv_msg(ctrl);
+ break;
+ case HDCP_WKUP_CMD_STATUS_SUCCESS:
+ case HDCP_WKUP_CMD_STATUS_FAILED:
+ hdmi_hdcp2p2_auth_status(ctrl);
+ goto exit;
+ case HDCP_WKUP_CMD_LINK_POLL:
+ hdmi_hdcp2p2_link_check(ctrl);
+ goto exit;
+ default:
+ goto exit;
+ }
+ }
+exit:
+ ctrl->wakeup_cmd = HDCP_WKUP_CMD_INVALID;
+}
+
+int hdmi_hdcp2p2_authenticate_tethered(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ int rc = 0;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ rc = hdmi_hdcp2p2_auth(ctrl);
+ if (rc) {
+ pr_err("auth failed %d\n", rc);
+ goto exit;
+ }
+
+ hdmi_hdcp2p2_run(ctrl);
+exit:
+ return rc;
+}
+
+static void hdmi_hdcp2p2_reset(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ ctrl->sink_status = SINK_DISCONNECTED;
+ atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+}
+
+static void hdmi_hdcp2p2_off(void *input)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = (struct hdmi_hdcp2p2_ctrl *)input;
+ struct hdcp_wakeup_data cdata = {HDCP_WKUP_CMD_AUTHENTICATE};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ hdmi_hdcp2p2_reset(ctrl);
+
+ kthread_flush_worker(&ctrl->worker);
+
+ hdmi_hdcp2p2_ddc_disable(ctrl->init_data.ddc_ctrl);
+
+ if (ctrl->tethered) {
+ hdmi_hdcp2p2_auth(ctrl);
+ } else {
+ cdata.context = input;
+ hdmi_hdcp2p2_wakeup(&cdata);
+ }
+}
+
+static int hdmi_hdcp2p2_authenticate(void *input)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = input;
+ struct hdcp_wakeup_data cdata = {HDCP_WKUP_CMD_AUTHENTICATE};
+ u32 regval;
+ int rc = 0;
+
+ /* Enable authentication success interrupt */
+ regval = DSS_REG_R(ctrl->init_data.core_io, HDMI_HDCP_INT_CTRL2);
+ regval |= BIT(1) | BIT(2);
+
+ DSS_REG_W(ctrl->init_data.core_io, HDMI_HDCP_INT_CTRL2, regval);
+
+ kthread_flush_worker(&ctrl->worker);
+
+ ctrl->sink_status = SINK_CONNECTED;
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING);
+
+ /* make sure ddc is idle before starting hdcp 2.2 authentication */
+ hdmi_scrambler_ddc_disable(ctrl->init_data.ddc_ctrl);
+ hdmi_hdcp2p2_ddc_disable(ctrl->init_data.ddc_ctrl);
+
+ if (ctrl->tethered) {
+ hdmi_hdcp2p2_authenticate_tethered(ctrl);
+ } else {
+ cdata.context = input;
+ hdmi_hdcp2p2_wakeup(&cdata);
+ }
+
+ return rc;
+}
+
+static int hdmi_hdcp2p2_reauthenticate(void *input)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = (struct hdmi_hdcp2p2_ctrl *)input;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ hdmi_hdcp2p2_reset((struct hdmi_hdcp2p2_ctrl *)input);
+
+ return hdmi_hdcp2p2_authenticate(input);
+}
+
+static ssize_t hdmi_hdcp2p2_sysfs_rda_tethered(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_hdcp2p2_ctrl *ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP2P2);
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->mutex);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", ctrl->tethered);
+ mutex_unlock(&ctrl->mutex);
+
+ return ret;
+}
+
+static ssize_t hdmi_hdcp2p2_sysfs_wta_tethered(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP2P2);
+ int rc, tethered;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->mutex);
+ rc = kstrtoint(buf, 10, &tethered);
+ if (rc) {
+ pr_err("kstrtoint failed. rc=%d\n", rc);
+ goto exit;
+ }
+
+ ctrl->tethered = !!tethered;
+
+ //if (ctrl->lib && ctrl->lib->update_exec_type && ctrl->lib_ctx)
+ // ctrl->lib->update_exec_type(ctrl->lib_ctx, ctrl->tethered);
+exit:
+ mutex_unlock(&ctrl->mutex);
+
+ return count;
+}
+
+static ssize_t hdmi_hdcp2p2_sysfs_wta_min_level_change(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl =
+ hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP2P2);
+ struct hdcp_lib_wakeup_data cdata = {
+ HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE};
+ bool enc_notify = true;
+ enum hdmi_hdcp_state enc_lvl;
+ int min_enc_lvl;
+ int rc;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ rc = kstrtoint(buf, 10, &min_enc_lvl);
+ if (rc) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ goto exit;
+ }
+
+ switch (min_enc_lvl) {
+ case 0:
+ enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+ break;
+ case 1:
+ enc_lvl = HDCP_STATE_AUTH_ENC_1X;
+ break;
+ case 2:
+ enc_lvl = HDCP_STATE_AUTH_ENC_2P2;
+ break;
+ default:
+ enc_notify = false;
+ }
+
+ pr_debug("enc level changed %d\n", min_enc_lvl);
+
+ cdata.context = ctrl->lib_ctx;
+ hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ if (ctrl->tethered)
+ hdmi_hdcp2p2_run(ctrl);
+
+ if (enc_notify && ctrl->init_data.notify_status)
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl);
+
+ rc = count;
+exit:
+ return rc;
+}
+
+static void hdmi_hdcp2p2_auth_failed(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+ hdmi_hdcp2p2_ddc_disable(ctrl->init_data.ddc_ctrl);
+
+ /* notify hdmi tx about HDCP failure */
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTH_FAIL);
+}
+
+static int hdmi_hdcp2p2_ddc_read_message(struct hdmi_hdcp2p2_ctrl *ctrl,
+ u8 *buf, int size, u32 timeout)
+{
+ struct hdmi_tx_ddc_data ddc_data;
+ int rc;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ return -EINVAL;
+ }
+
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+ ddc_data.offset = HDCP_SINK_DDC_HDCP2_READ_MESSAGE;
+ ddc_data.data_buf = buf;
+ ddc_data.data_len = size;
+ ddc_data.request_len = size;
+ ddc_data.retry = 0;
+ ddc_data.hard_timeout = timeout;
+ ddc_data.what = "HDCP2ReadMessage";
+
+ ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ pr_debug("read msg timeout %dms\n", timeout);
+
+ rc = hdmi_ddc_read(ctrl->init_data.ddc_ctrl);
+ if (rc)
+ pr_err("Cannot read HDCP message register\n");
+
+ ctrl->timeout_left = ctrl->init_data.ddc_ctrl->ddc_data.timeout_left;
+
+ return rc;
+}
+
+int hdmi_hdcp2p2_ddc_write_message(struct hdmi_hdcp2p2_ctrl *ctrl,
+ u8 *buf, size_t size)
+{
+ struct hdmi_tx_ddc_data ddc_data;
+ int rc;
+
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+ ddc_data.offset = HDCP_SINK_DDC_HDCP2_WRITE_MESSAGE;
+ ddc_data.data_buf = buf;
+ ddc_data.data_len = size;
+ ddc_data.hard_timeout = ctrl->timeout;
+ ddc_data.what = "HDCP2WriteMessage";
+
+ ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ rc = hdmi_ddc_write(ctrl->init_data.ddc_ctrl);
+ if (rc)
+ pr_err("Cannot write HDCP message register\n");
+
+ ctrl->timeout_left = ctrl->init_data.ddc_ctrl->ddc_data.timeout_left;
+
+ return rc;
+}
+
+static int hdmi_hdcp2p2_read_version(struct hdmi_hdcp2p2_ctrl *ctrl,
+ u8 *hdcp2version)
+{
+ struct hdmi_tx_ddc_data ddc_data;
+ int rc;
+
+ memset(&ddc_data, 0, sizeof(ddc_data));
+ ddc_data.dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+ ddc_data.offset = HDCP_SINK_DDC_HDCP2_VERSION;
+ ddc_data.data_buf = hdcp2version;
+ ddc_data.data_len = 1;
+ ddc_data.request_len = 1;
+ ddc_data.retry = 1;
+ ddc_data.what = "HDCP2Version";
+
+ ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+ rc = hdmi_ddc_read(ctrl->init_data.ddc_ctrl);
+ if (rc) {
+ pr_err("Cannot read HDCP2Version register");
+ return rc;
+ }
+
+ pr_debug("Read HDCP2Version as %u\n", *hdcp2version);
+ return rc;
+}
+
+static DEVICE_ATTR(min_level_change, 0200, NULL,
+ hdmi_hdcp2p2_sysfs_wta_min_level_change);
+static DEVICE_ATTR(tethered, 0644, hdmi_hdcp2p2_sysfs_rda_tethered,
+ hdmi_hdcp2p2_sysfs_wta_tethered);
+
+static struct attribute *hdmi_hdcp2p2_fs_attrs[] = {
+ &dev_attr_min_level_change.attr,
+ &dev_attr_tethered.attr,
+ NULL,
+};
+
+static struct attribute_group hdmi_hdcp2p2_fs_attr_group = {
+ .name = "hdcp2p2",
+ .attrs = hdmi_hdcp2p2_fs_attrs,
+};
+
+static bool hdmi_hdcp2p2_feature_supported(void *input)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = input;
+ struct hdcp_txmtr_ops *lib = NULL;
+ bool supported = false;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ goto end;
+ }
+
+ lib = ctrl->lib;
+ if (!lib) {
+ pr_err("invalid lib ops data\n");
+ goto end;
+ }
+
+ if (lib->feature_supported)
+ supported = lib->feature_supported(
+ ctrl->lib_ctx);
+end:
+ return supported;
+}
+
+static void hdmi_hdcp2p2_send_msg(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ int rc = 0;
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+ uint32_t msglen;
+ char *msg = NULL;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ goto exit;
+ }
+
+ mutex_lock(&ctrl->msg_lock);
+ msglen = ctrl->send_msg_len;
+
+ if (!msglen) {
+ mutex_unlock(&ctrl->msg_lock);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ msg = kzalloc(msglen, GFP_KERNEL);
+ if (!msg) {
+ mutex_unlock(&ctrl->msg_lock);
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(msg, ctrl->send_msg_buf, msglen);
+ mutex_unlock(&ctrl->msg_lock);
+
+ /* Forward the message to the sink */
+ rc = hdmi_hdcp2p2_ddc_write_message(ctrl, msg, (size_t)msglen);
+ if (rc) {
+ pr_err("Error sending msg to sink %d\n", rc);
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED;
+ } else {
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS;
+ cdata.timeout = ctrl->timeout_left;
+ }
+exit:
+ kfree(msg);
+
+ hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static void hdmi_hdcp2p2_send_msg_work(struct kthread_work *work)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct hdmi_hdcp2p2_ctrl, send_msg);
+
+ hdmi_hdcp2p2_send_msg(ctrl);
+}
+
+static void hdmi_hdcp2p2_link_cb(void *data)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = data;
+
+ if (!ctrl) {
+ pr_debug("invalid input\n");
+ return;
+ }
+
+ if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
+ kthread_queue_work(&ctrl->worker, &ctrl->link);
+}
+
+static void hdmi_hdcp2p2_recv_msg(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ int rc = 0, timeout_hsync;
+ char *recvd_msg_buf = NULL;
+ struct hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+ struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ ddc_ctrl = ctrl->init_data.ddc_ctrl;
+ if (!ddc_ctrl) {
+ pr_err("invalid ddc ctrl\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ goto exit;
+ }
+ hdmi_ddc_config(ddc_ctrl);
+
+ ddc_data = &ddc_ctrl->hdcp2p2_ddc_data;
+
+ memset(ddc_data, 0, sizeof(*ddc_data));
+
+ timeout_hsync = hdmi_utils_get_timeout_in_hysnc(
+ ctrl->init_data.timing, ctrl->timeout);
+
+ if (timeout_hsync <= 0) {
+ pr_err("err in timeout hsync calc\n");
+ timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+ }
+
+ pr_debug("timeout for rxstatus %dms, %d hsync\n",
+ ctrl->timeout, timeout_hsync);
+
+ ddc_data->intr_mask = RXSTATUS_MESSAGE_SIZE | RXSTATUS_REAUTH_REQ;
+ ddc_data->timeout_ms = ctrl->timeout;
+ ddc_data->timeout_hsync = timeout_hsync;
+ ddc_data->periodic_timer_hsync = timeout_hsync / 20;
+ ddc_data->read_method = HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER;
+ ddc_data->wait = true;
+
+ rc = hdmi_hdcp2p2_ddc_read_rxstatus(ddc_ctrl);
+ if (rc) {
+ pr_err("error reading rxstatus %d\n", rc);
+ goto exit;
+ }
+
+ if (ddc_data->reauth_req) {
+ ddc_data->reauth_req = false;
+
+ pr_debug("reauth triggered by sink\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ ctrl->timeout_left = ddc_data->timeout_left;
+
+ pr_debug("timeout left after rxstatus %dms, msg size %d\n",
+ ctrl->timeout_left, ddc_data->message_size);
+
+ if (!ddc_data->message_size) {
+ pr_err("recvd invalid message size\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ recvd_msg_buf = kzalloc(ddc_data->message_size, GFP_KERNEL);
+ if (!recvd_msg_buf) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ rc = hdmi_hdcp2p2_ddc_read_message(ctrl, recvd_msg_buf,
+ ddc_data->message_size, ctrl->timeout_left);
+ if (rc) {
+ pr_err("error reading message %d\n", rc);
+ goto exit;
+ }
+
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS;
+ cdata.recvd_msg_buf = recvd_msg_buf;
+ cdata.recvd_msg_len = ddc_data->message_size;
+ cdata.timeout = ctrl->timeout_left;
+exit:
+ if (rc == -ETIMEDOUT)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT;
+ else if (rc)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED;
+
+ hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ kfree(recvd_msg_buf);
+}
+
+static void hdmi_hdcp2p2_recv_msg_work(struct kthread_work *work)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct hdmi_hdcp2p2_ctrl, recv_msg);
+
+ hdmi_hdcp2p2_recv_msg(ctrl);
+}
+
+static int hdmi_hdcp2p2_link_check(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+ struct hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+ int timeout_hsync;
+
+ ddc_ctrl = ctrl->init_data.ddc_ctrl;
+ if (!ddc_ctrl)
+ return -EINVAL;
+
+ hdmi_ddc_config(ddc_ctrl);
+
+ ddc_data = &ddc_ctrl->hdcp2p2_ddc_data;
+
+ memset(ddc_data, 0, sizeof(*ddc_data));
+
+ timeout_hsync = hdmi_utils_get_timeout_in_hysnc(
+ ctrl->init_data.timing, jiffies_to_msecs(HZ / 2));
+
+ if (timeout_hsync <= 0) {
+ pr_err("err in timeout hsync calc\n");
+ timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+ }
+ pr_debug("timeout for rxstatus %d hsyncs\n", timeout_hsync);
+
+ ddc_data->intr_mask = RXSTATUS_READY | RXSTATUS_MESSAGE_SIZE |
+ RXSTATUS_REAUTH_REQ;
+ ddc_data->timeout_hsync = timeout_hsync;
+ ddc_data->periodic_timer_hsync = timeout_hsync;
+ ddc_data->read_method = HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER;
+ ddc_data->link_cb = hdmi_hdcp2p2_link_cb;
+ ddc_data->link_data = ctrl;
+
+ return hdmi_hdcp2p2_ddc_read_rxstatus(ddc_ctrl);
+}
+
+static void hdmi_hdcp2p2_poll_work(struct kthread_work *work)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct hdmi_hdcp2p2_ctrl, poll);
+
+ hdmi_hdcp2p2_link_check(ctrl);
+}
+
+static void hdmi_hdcp2p2_auth_status(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ return;
+ }
+
+ if (ctrl->auth_status == HDMI_HDCP_AUTH_STATUS_SUCCESS) {
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTHENTICATED);
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
+
+ if (ctrl->tethered)
+ hdmi_hdcp2p2_link_check(ctrl);
+ } else {
+ hdmi_hdcp2p2_auth_failed(ctrl);
+ }
+}
+
+static void hdmi_hdcp2p2_auth_status_work(struct kthread_work *work)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct hdmi_hdcp2p2_ctrl, status);
+
+ hdmi_hdcp2p2_auth_status(ctrl);
+}
+
+static void hdmi_hdcp2p2_link_work(struct kthread_work *work)
+{
+ int rc = 0;
+ struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct hdmi_hdcp2p2_ctrl, link);
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+ char *recvd_msg_buf = NULL;
+ struct hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+ struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ ddc_ctrl = ctrl->init_data.ddc_ctrl;
+ if (!ddc_ctrl) {
+ rc = -EINVAL;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ goto exit;
+ }
+
+ ddc_data = &ddc_ctrl->hdcp2p2_ddc_data;
+
+ if (ddc_data->reauth_req) {
+ pr_debug("reauth triggered by sink\n");
+
+ ddc_data->reauth_req = false;
+ rc = -ENOLINK;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ goto exit;
+ }
+
+ if (ddc_data->ready && ddc_data->message_size) {
+ pr_debug("topology changed. rxstatus msg size %d\n",
+ ddc_data->message_size);
+
+ ddc_data->ready = false;
+
+ recvd_msg_buf = kzalloc(ddc_data->message_size, GFP_KERNEL);
+ if (!recvd_msg_buf) {
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ goto exit;
+ }
+
+ rc = hdmi_hdcp2p2_ddc_read_message(ctrl, recvd_msg_buf,
+ ddc_data->message_size, HDCP2P2_DEFAULT_TIMEOUT);
+ if (rc) {
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ pr_err("error reading message %d\n", rc);
+ } else {
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS;
+ cdata.recvd_msg_buf = recvd_msg_buf;
+ cdata.recvd_msg_len = ddc_data->message_size;
+ }
+
+ ddc_data->message_size = 0;
+ }
+exit:
+ hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ kfree(recvd_msg_buf);
+
+ if (ctrl->tethered)
+ hdmi_hdcp2p2_run(ctrl);
+
+ if (rc) {
+ hdmi_hdcp2p2_auth_failed(ctrl);
+ return;
+ }
+}
+
+static int hdmi_hdcp2p2_auth(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+ int rc = 0;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_START;
+ else
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+
+ rc = hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ if (rc)
+ hdmi_hdcp2p2_auth_failed(ctrl);
+
+ return rc;
+}
+
+static void hdmi_hdcp2p2_auth_work(struct kthread_work *work)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct hdmi_hdcp2p2_ctrl, auth);
+
+ hdmi_hdcp2p2_auth(ctrl);
+}
+
+void hdmi_hdcp2p2_deinit(void *input)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = (struct hdmi_hdcp2p2_ctrl *)input;
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ cdata.context = ctrl->lib_ctx;
+ hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ kthread_stop(ctrl->thread);
+
+ sysfs_remove_group(ctrl->init_data.sysfs_kobj,
+ &hdmi_hdcp2p2_fs_attr_group);
+
+ mutex_destroy(&ctrl->mutex);
+ mutex_destroy(&ctrl->msg_lock);
+ mutex_destroy(&ctrl->wakeup_mutex);
+ kfree(ctrl);
+}
+
+void *hdmi_hdcp2p2_init(struct hdmi_hdcp_init_data *init_data)
+{
+ int rc;
+ struct hdmi_hdcp2p2_ctrl *ctrl;
+ static struct hdmi_hdcp_ops ops = {
+ .hdmi_hdcp_reauthenticate = hdmi_hdcp2p2_reauthenticate,
+ .hdmi_hdcp_authenticate = hdmi_hdcp2p2_authenticate,
+ .feature_supported = hdmi_hdcp2p2_feature_supported,
+ .hdmi_hdcp_off = hdmi_hdcp2p2_off
+ };
+
+ static struct hdcp_client_ops client_ops = {
+ .wakeup = hdmi_hdcp2p2_wakeup,
+ };
+
+ static struct hdcp_txmtr_ops txmtr_ops;
+ struct hdcp_register_data register_data;
+
+ pr_debug("HDCP2P2 feature initialization\n");
+
+ if (!init_data || !init_data->core_io || !init_data->mutex ||
+ !init_data->ddc_ctrl || !init_data->notify_status ||
+ !init_data->workq || !init_data->cb_data) {
+ pr_err("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (init_data->hdmi_tx_ver < MIN_HDMI_TX_MAJOR_VERSION) {
+ pr_err("HDMI Tx does not support HDCP 2.2\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ ctrl->init_data = *init_data;
+ ctrl->lib = &txmtr_ops;
+ ctrl->tethered = init_data->tethered;
+
+ rc = sysfs_create_group(init_data->sysfs_kobj,
+ &hdmi_hdcp2p2_fs_attr_group);
+ if (rc) {
+ pr_err("hdcp2p2 sysfs group creation failed\n");
+ goto error;
+ }
+
+ ctrl->sink_status = SINK_DISCONNECTED;
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+
+ ctrl->ops = &ops;
+ mutex_init(&ctrl->mutex);
+ mutex_init(&ctrl->msg_lock);
+ mutex_init(&ctrl->wakeup_mutex);
+
+ register_data.hdcp_ctx = &ctrl->lib_ctx;
+ register_data.client_ops = &client_ops;
+ register_data.txmtr_ops = &txmtr_ops;
+ register_data.client_ctx = ctrl;
+ //register_data.tethered = ctrl->tethered;
+
+ rc = hdcp_library_register(®ister_data);
+ if (rc) {
+ pr_err("Unable to register with HDCP 2.2 library\n");
+ goto error;
+ }
+
+ kthread_init_worker(&ctrl->worker);
+
+ kthread_init_work(&ctrl->auth, hdmi_hdcp2p2_auth_work);
+ kthread_init_work(&ctrl->send_msg, hdmi_hdcp2p2_send_msg_work);
+ kthread_init_work(&ctrl->recv_msg, hdmi_hdcp2p2_recv_msg_work);
+ kthread_init_work(&ctrl->status, hdmi_hdcp2p2_auth_status_work);
+ kthread_init_work(&ctrl->link, hdmi_hdcp2p2_link_work);
+ kthread_init_work(&ctrl->poll, hdmi_hdcp2p2_poll_work);
+
+ ctrl->thread = kthread_run(kthread_worker_fn,
+ &ctrl->worker, "hdmi_hdcp2p2");
+
+ if (IS_ERR(ctrl->thread)) {
+ pr_err("unable to start hdcp2p2 thread\n");
+ rc = PTR_ERR(ctrl->thread);
+ ctrl->thread = NULL;
+ goto error;
+ }
+
+ return ctrl;
+error:
+ kfree(ctrl);
+ return ERR_PTR(rc);
+}
+
+static bool hdmi_hdcp2p2_supported(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ u8 hdcp2version;
+
+ int rc = hdmi_hdcp2p2_read_version(ctrl, &hdcp2version);
+
+ if (rc)
+ goto error;
+
+ if (hdcp2version & BIT(2)) {
+ pr_debug("Sink is HDCP 2.2 capable\n");
+ return true;
+ }
+
+error:
+ pr_debug("Sink is not HDCP 2.2 capable\n");
+ return false;
+}
+
+struct hdmi_hdcp_ops *hdmi_hdcp2p2_start(void *input)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl = input;
+
+ pr_debug("Checking sink capability\n");
+ if (hdmi_hdcp2p2_supported(ctrl))
+ return ctrl->ops;
+ else
+ return NULL;
+
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_mhl.h b/drivers/video/fbdev/msm/mdss_hdmi_mhl.h
new file mode 100644
index 0000000..924a1a0
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_mhl.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_HDMI_MHL_H__
+#define __MDSS_HDMI_MHL_H__
+
+#include <linux/platform_device.h>
+
+struct msm_hdmi_mhl_ops {
+ u8 (*tmds_enabled)(struct platform_device *pdev);
+ int (*set_mhl_max_pclk)(struct platform_device *pdev, u32 max_val);
+ int (*set_upstream_hpd)(struct platform_device *pdev, uint8_t on);
+};
+
+int msm_hdmi_register_mhl(struct platform_device *pdev,
+ struct msm_hdmi_mhl_ops *ops, void *data);
+#endif /* __MDSS_HDMI_MHL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_panel.c b/drivers/video/fbdev/msm/mdss_hdmi_panel.c
new file mode 100644
index 0000000..3823d3b
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_panel.c
@@ -0,0 +1,932 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/types.h>
+
+#include "video/msm_hdmi_modes.h"
+#include "mdss_debug.h"
+#include "mdss_fb.h"
+#include "mdss.h"
+#include "mdss_hdmi_util.h"
+#include "mdss_panel.h"
+#include "mdss_hdmi_panel.h"
+
+#define HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ 340000
+#define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200
+
+#define HDMI_TX_KHZ_TO_HZ 1000U
+
+/* AVI INFOFRAME DATA */
+#define NUM_MODES_AVI 20
+#define AVI_MAX_DATA_BYTES 13
+
+/* Line numbers at which AVI Infoframe and Vendor Infoframe will be sent */
+#define AVI_IFRAME_LINE_NUMBER 1
+#define VENDOR_IFRAME_LINE_NUMBER 3
+
+#define IFRAME_CHECKSUM_32(d) \
+ ((d & 0xff) + ((d >> 8) & 0xff) + \
+ ((d >> 16) & 0xff) + ((d >> 24) & 0xff))
+
+#define IFRAME_PACKET_OFFSET 0x80
+/*
+ * InfoFrame Type Code:
+ * 0x0 - Reserved
+ * 0x1 - Vendor Specific
+ * 0x2 - Auxiliary Video Information
+ * 0x3 - Source Product Description
+ * 0x4 - AUDIO
+ * 0x5 - MPEG Source
+ * 0x6 - NTSC VBI
+ * 0x7 - 0xFF - Reserved
+ */
+#define AVI_IFRAME_TYPE 0x2
+#define AVI_IFRAME_VERSION 0x2
+#define LEFT_SHIFT_BYTE(x) ((x) << 8)
+#define LEFT_SHIFT_WORD(x) ((x) << 16)
+#define LEFT_SHIFT_24BITS(x) ((x) << 24)
+
+/* AVI Infoframe data byte 3, bit 7 (msb) represents ITC bit */
+#define SET_ITC_BIT(byte) (byte = (byte | BIT(7)))
+#define CLR_ITC_BIT(byte) (byte = (byte & ~BIT(7)))
+
+/*
+ * CN represents IT content type, if ITC bit in infoframe data byte 3
+ * is set, CN bits will represent content type as below:
+ * 0b00 Graphics
+ * 0b01 Photo
+ * 0b10 Cinema
+ * 0b11 Game
+ */
+#define CONFIG_CN_BITS(bits, byte) \
+ (byte = (byte & ~(BIT(4) | BIT(5))) |\
+ ((bits & (BIT(0) | BIT(1))) << 4))
+
+struct hdmi_avi_iframe_bar_info {
+ bool vert_binfo_present;
+ bool horz_binfo_present;
+ u32 end_of_top_bar;
+ u32 start_of_bottom_bar;
+ u32 end_of_left_bar;
+ u32 start_of_right_bar;
+};
+
+struct hdmi_avi_infoframe_config {
+ u32 pixel_format;
+ u32 scan_info;
+ bool act_fmt_info_present;
+ u32 colorimetry_info;
+ u32 ext_colorimetry_info;
+ u32 rgb_quantization_range;
+ u32 yuv_quantization_range;
+ u32 scaling_info;
+ bool is_it_content;
+ u8 content_type;
+ u8 pixel_rpt_factor;
+ struct hdmi_avi_iframe_bar_info bar_info;
+};
+
+struct hdmi_video_config {
+ struct msm_hdmi_mode_timing_info *timing;
+ struct hdmi_avi_infoframe_config avi_iframe;
+};
+
+struct hdmi_panel {
+ struct mdss_io_data *io;
+ struct hdmi_util_ds_data *ds_data;
+ struct hdmi_panel_data *data;
+ struct hdmi_video_config vid_cfg;
+ struct hdmi_tx_ddc_ctrl *ddc;
+ u32 version;
+ u32 vic;
+ u8 *spd_vendor_name;
+ u8 *spd_product_description;
+ bool on;
+ bool scrambler_enabled;
+};
+
+enum {
+ DATA_BYTE_1,
+ DATA_BYTE_2,
+ DATA_BYTE_3,
+ DATA_BYTE_4,
+ DATA_BYTE_5,
+ DATA_BYTE_6,
+ DATA_BYTE_7,
+ DATA_BYTE_8,
+ DATA_BYTE_9,
+ DATA_BYTE_10,
+ DATA_BYTE_11,
+ DATA_BYTE_12,
+ DATA_BYTE_13,
+};
+
+enum hdmi_quantization_range {
+ HDMI_QUANTIZATION_DEFAULT,
+ HDMI_QUANTIZATION_LIMITED_RANGE,
+ HDMI_QUANTIZATION_FULL_RANGE
+};
+
+enum hdmi_scaling_info {
+ HDMI_SCALING_NONE,
+ HDMI_SCALING_HORZ,
+ HDMI_SCALING_VERT,
+ HDMI_SCALING_HORZ_VERT,
+};
+
+static int hdmi_panel_get_vic(struct mdss_panel_info *pinfo,
+ struct hdmi_util_ds_data *ds_data)
+{
+ int new_vic = -1;
+ u32 h_total, v_total;
+ struct msm_hdmi_mode_timing_info timing;
+
+ if (!pinfo) {
+ pr_err("invalid panel data\n");
+ return -EINVAL;
+ }
+
+ if (pinfo->vic) {
+ struct msm_hdmi_mode_timing_info info = {0};
+ u32 ret = hdmi_get_supported_mode(&info, ds_data, pinfo->vic);
+ u32 supported = info.supported;
+
+ if (!ret && supported) {
+ new_vic = pinfo->vic;
+ } else {
+ pr_err("invalid or not supported vic %d\n",
+ pinfo->vic);
+ return -EPERM;
+ }
+ } else {
+ timing.active_h = pinfo->xres;
+ timing.back_porch_h = pinfo->lcdc.h_back_porch;
+ timing.front_porch_h = pinfo->lcdc.h_front_porch;
+ timing.pulse_width_h = pinfo->lcdc.h_pulse_width;
+
+ h_total = timing.active_h + timing.back_porch_h +
+ timing.front_porch_h + timing.pulse_width_h;
+
+ pr_debug("ah=%d bph=%d fph=%d pwh=%d ht=%d\n",
+ timing.active_h, timing.back_porch_h,
+ timing.front_porch_h, timing.pulse_width_h,
+ h_total);
+
+ timing.active_v = pinfo->yres;
+ timing.back_porch_v = pinfo->lcdc.v_back_porch;
+ timing.front_porch_v = pinfo->lcdc.v_front_porch;
+ timing.pulse_width_v = pinfo->lcdc.v_pulse_width;
+
+ v_total = timing.active_v + timing.back_porch_v +
+ timing.front_porch_v + timing.pulse_width_v;
+
+ pr_debug("av=%d bpv=%d fpv=%d pwv=%d vt=%d\n",
+ timing.active_v, timing.back_porch_v,
+ timing.front_porch_v, timing.pulse_width_v, v_total);
+
+ timing.pixel_freq = ((unsigned long int)pinfo->clk_rate / 1000);
+ if (h_total && v_total) {
+ timing.refresh_rate = ((timing.pixel_freq * 1000) /
+ (h_total * v_total)) * 1000;
+ } else {
+ pr_err("cannot cal refresh rate\n");
+ return -EPERM;
+ }
+
+ pr_debug("pixel_freq=%d refresh_rate=%d\n",
+ timing.pixel_freq, timing.refresh_rate);
+
+ new_vic = hdmi_get_video_id_code(&timing, ds_data);
+ }
+
+ return new_vic;
+}
+
+static void hdmi_panel_update_dfps_data(struct hdmi_panel *panel)
+{
+ struct mdss_panel_info *pinfo = panel->data->pinfo;
+
+ pinfo->saved_total = mdss_panel_get_htotal(pinfo, true);
+ pinfo->saved_fporch = panel->vid_cfg.timing->front_porch_h;
+
+ pinfo->current_fps = panel->vid_cfg.timing->refresh_rate;
+ pinfo->default_fps = panel->vid_cfg.timing->refresh_rate;
+ pinfo->lcdc.frame_rate = panel->vid_cfg.timing->refresh_rate;
+}
+
+static int hdmi_panel_config_avi(struct hdmi_panel *panel)
+{
+ struct mdss_panel_info *pinfo = panel->data->pinfo;
+ struct hdmi_video_config *vid_cfg = &panel->vid_cfg;
+ struct hdmi_avi_infoframe_config *avi = &vid_cfg->avi_iframe;
+ struct msm_hdmi_mode_timing_info *timing;
+ u32 ret = 0;
+
+ timing = panel->vid_cfg.timing;
+ if (!timing) {
+ pr_err("fmt not supported: %d\n", panel->vic);
+ ret = -EPERM;
+ goto end;
+ }
+
+ /* Setup AVI Infoframe content */
+ avi->pixel_format = pinfo->out_format;
+ avi->is_it_content = panel->data->is_it_content;
+ avi->content_type = panel->data->content_type;
+ avi->scan_info = panel->data->scan_info;
+
+ avi->bar_info.end_of_top_bar = 0x0;
+ avi->bar_info.start_of_bottom_bar = timing->active_v + 1;
+ avi->bar_info.end_of_left_bar = 0;
+ avi->bar_info.start_of_right_bar = timing->active_h + 1;
+
+ avi->act_fmt_info_present = true;
+ avi->rgb_quantization_range = HDMI_QUANTIZATION_DEFAULT;
+ avi->yuv_quantization_range = HDMI_QUANTIZATION_DEFAULT;
+
+ avi->scaling_info = HDMI_SCALING_NONE;
+
+ avi->colorimetry_info = 0;
+ avi->ext_colorimetry_info = 0;
+
+ avi->pixel_rpt_factor = 0;
+end:
+ return ret;
+}
+
+static int hdmi_panel_setup_video(struct hdmi_panel *panel)
+{
+ u32 total_h, start_h, end_h;
+ u32 total_v, start_v, end_v;
+ u32 div = 0;
+ struct mdss_io_data *io = panel->io;
+ struct msm_hdmi_mode_timing_info *timing;
+
+ timing = panel->vid_cfg.timing;
+ if (!timing) {
+ pr_err("fmt not supported: %d\n", panel->vic);
+ return -EPERM;
+ }
+
+ /* reduce horizontal params by half for YUV420 output */
+ if (panel->vid_cfg.avi_iframe.pixel_format == MDP_Y_CBCR_H2V2)
+ div = 1;
+
+ total_h = (hdmi_tx_get_h_total(timing) >> div) - 1;
+ total_v = hdmi_tx_get_v_total(timing) - 1;
+
+ if (((total_v << 16) & 0xE0000000) || (total_h & 0xFFFFE000)) {
+ pr_err("total v=%d or h=%d is larger than supported\n",
+ total_v, total_h);
+ return -EPERM;
+ }
+ DSS_REG_W(io, HDMI_TOTAL, (total_v << 16) | (total_h << 0));
+
+ start_h = (timing->back_porch_h >> div) +
+ (timing->pulse_width_h >> div);
+ end_h = (total_h + 1) - (timing->front_porch_h >> div);
+ if (((end_h << 16) & 0xE0000000) || (start_h & 0xFFFFE000)) {
+ pr_err("end_h=%d or start_h=%d is larger than supported\n",
+ end_h, start_h);
+ return -EPERM;
+ }
+ DSS_REG_W(io, HDMI_ACTIVE_H, (end_h << 16) | (start_h << 0));
+
+ start_v = timing->back_porch_v + timing->pulse_width_v - 1;
+ end_v = total_v - timing->front_porch_v;
+ if (((end_v << 16) & 0xE0000000) || (start_v & 0xFFFFE000)) {
+ pr_err("end_v=%d or start_v=%d is larger than supported\n",
+ end_v, start_v);
+ return -EPERM;
+ }
+ DSS_REG_W(io, HDMI_ACTIVE_V, (end_v << 16) | (start_v << 0));
+
+ if (timing->interlaced) {
+ DSS_REG_W(io, HDMI_V_TOTAL_F2, (total_v + 1) << 0);
+ DSS_REG_W(io, HDMI_ACTIVE_V_F2,
+ ((end_v + 1) << 16) | ((start_v + 1) << 0));
+ } else {
+ DSS_REG_W(io, HDMI_V_TOTAL_F2, 0);
+ DSS_REG_W(io, HDMI_ACTIVE_V_F2, 0);
+ }
+
+ DSS_REG_W(io, HDMI_FRAME_CTRL,
+ ((timing->interlaced << 31) & 0x80000000) |
+ ((timing->active_low_h << 29) & 0x20000000) |
+ ((timing->active_low_v << 28) & 0x10000000));
+
+ hdmi_panel_update_dfps_data(panel);
+
+ return 0;
+}
+
+static void hdmi_panel_set_avi_infoframe(struct hdmi_panel *panel)
+{
+ int i;
+ u8 avi_iframe[AVI_MAX_DATA_BYTES] = {0};
+ u8 checksum;
+ u32 sum, reg_val;
+ struct mdss_io_data *io = panel->io;
+ struct hdmi_avi_infoframe_config *avi;
+ struct msm_hdmi_mode_timing_info *timing;
+
+ avi = &panel->vid_cfg.avi_iframe;
+ timing = panel->vid_cfg.timing;
+
+ /*
+ * BYTE - 1:
+ * 0:1 - Scan Information
+ * 2:3 - Bar Info
+ * 4 - Active Format Info present
+ * 5:6 - Pixel format type;
+ * 7 - Reserved;
+ */
+ avi_iframe[0] = (avi->scan_info & 0x3) |
+ (avi->bar_info.vert_binfo_present ? BIT(2) : 0) |
+ (avi->bar_info.horz_binfo_present ? BIT(3) : 0) |
+ (avi->act_fmt_info_present ? BIT(4) : 0);
+ if (avi->pixel_format == MDP_Y_CBCR_H2V2)
+ avi_iframe[0] |= (0x3 << 5);
+ else if (avi->pixel_format == MDP_Y_CBCR_H2V1)
+ avi_iframe[0] |= (0x1 << 5);
+ else if (avi->pixel_format == MDP_Y_CBCR_H1V1)
+ avi_iframe[0] |= (0x2 << 5);
+
+ /*
+ * BYTE - 2:
+ * 0:3 - Active format info
+ * 4:5 - Picture aspect ratio
+ * 6:7 - Colorimetry info
+ */
+ avi_iframe[1] |= 0x08;
+ if (timing->ar == HDMI_RES_AR_4_3)
+ avi_iframe[1] |= (0x1 << 4);
+ else if (timing->ar == HDMI_RES_AR_16_9)
+ avi_iframe[1] |= (0x2 << 4);
+
+ avi_iframe[1] |= (avi->colorimetry_info & 0x3) << 6;
+
+ /*
+ * BYTE - 3:
+ * 0:1 - Scaling info
+ * 2:3 - Quantization range
+ * 4:6 - Extended Colorimetry
+ * 7 - IT content
+ */
+ avi_iframe[2] |= (avi->scaling_info & 0x3) |
+ ((avi->rgb_quantization_range & 0x3) << 2) |
+ ((avi->ext_colorimetry_info & 0x7) << 4) |
+ ((avi->is_it_content ? 0x1 : 0x0) << 7);
+ /*
+ * BYTE - 4:
+ * 0:7 - VIC
+ */
+ if (timing->video_format < HDMI_VFRMT_END)
+ avi_iframe[3] = timing->video_format;
+
+ /*
+ * BYTE - 5:
+ * 0:3 - Pixel Repeat factor
+ * 4:5 - Content type
+ * 6:7 - YCC Quantization range
+ */
+ avi_iframe[4] = (avi->pixel_rpt_factor & 0xF) |
+ ((avi->content_type & 0x3) << 4) |
+ ((avi->yuv_quantization_range & 0x3) << 6);
+
+ /* BYTE - 6,7: End of top bar */
+ avi_iframe[5] = avi->bar_info.end_of_top_bar & 0xFF;
+ avi_iframe[6] = ((avi->bar_info.end_of_top_bar & 0xFF00) >> 8);
+
+ /* BYTE - 8,9: Start of bottom bar */
+ avi_iframe[7] = avi->bar_info.start_of_bottom_bar & 0xFF;
+ avi_iframe[8] = ((avi->bar_info.start_of_bottom_bar & 0xFF00) >>
+ 8);
+
+ /* BYTE - 10,11: Endof of left bar */
+ avi_iframe[9] = avi->bar_info.end_of_left_bar & 0xFF;
+ avi_iframe[10] = ((avi->bar_info.end_of_left_bar & 0xFF00) >> 8);
+
+ /* BYTE - 12,13: Start of right bar */
+ avi_iframe[11] = avi->bar_info.start_of_right_bar & 0xFF;
+ avi_iframe[12] = ((avi->bar_info.start_of_right_bar & 0xFF00) >>
+ 8);
+
+ sum = IFRAME_PACKET_OFFSET + AVI_IFRAME_TYPE +
+ AVI_IFRAME_VERSION + AVI_MAX_DATA_BYTES;
+
+ for (i = 0; i < AVI_MAX_DATA_BYTES; i++)
+ sum += avi_iframe[i];
+ sum &= 0xFF;
+ sum = 256 - sum;
+ checksum = (u8) sum;
+
+ reg_val = checksum |
+ LEFT_SHIFT_BYTE(avi_iframe[DATA_BYTE_1]) |
+ LEFT_SHIFT_WORD(avi_iframe[DATA_BYTE_2]) |
+ LEFT_SHIFT_24BITS(avi_iframe[DATA_BYTE_3]);
+ DSS_REG_W(io, HDMI_AVI_INFO0, reg_val);
+
+ reg_val = avi_iframe[DATA_BYTE_4] |
+ LEFT_SHIFT_BYTE(avi_iframe[DATA_BYTE_5]) |
+ LEFT_SHIFT_WORD(avi_iframe[DATA_BYTE_6]) |
+ LEFT_SHIFT_24BITS(avi_iframe[DATA_BYTE_7]);
+ DSS_REG_W(io, HDMI_AVI_INFO1, reg_val);
+
+ reg_val = avi_iframe[DATA_BYTE_8] |
+ LEFT_SHIFT_BYTE(avi_iframe[DATA_BYTE_9]) |
+ LEFT_SHIFT_WORD(avi_iframe[DATA_BYTE_10]) |
+ LEFT_SHIFT_24BITS(avi_iframe[DATA_BYTE_11]);
+ DSS_REG_W(io, HDMI_AVI_INFO2, reg_val);
+
+ reg_val = avi_iframe[DATA_BYTE_12] |
+ LEFT_SHIFT_BYTE(avi_iframe[DATA_BYTE_13]) |
+ LEFT_SHIFT_24BITS(AVI_IFRAME_VERSION);
+ DSS_REG_W(io, HDMI_AVI_INFO3, reg_val);
+
+ /* AVI InfFrame enable (every frame) */
+ DSS_REG_W(io, HDMI_INFOFRAME_CTRL0,
+ DSS_REG_R(io, HDMI_INFOFRAME_CTRL0) | BIT(1) | BIT(0));
+
+ reg_val = DSS_REG_R(io, HDMI_INFOFRAME_CTRL1);
+ reg_val &= ~0x3F;
+ reg_val |= AVI_IFRAME_LINE_NUMBER;
+ DSS_REG_W(io, HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void hdmi_panel_set_vendor_specific_infoframe(void *input)
+{
+ int i;
+ u8 vs_iframe[9]; /* two header + length + 6 data */
+ u32 sum, reg_val;
+ u32 hdmi_vic, hdmi_video_format, s3d_struct = 0;
+ struct hdmi_panel *panel = input;
+ struct mdss_io_data *io = panel->io;
+
+ /* HDMI Spec 1.4a Table 8-10 */
+ vs_iframe[0] = 0x81; /* type */
+ vs_iframe[1] = 0x1; /* version */
+ vs_iframe[2] = 0x8; /* length */
+
+ vs_iframe[3] = 0x0; /* PB0: checksum */
+
+ /* PB1..PB3: 24 Bit IEEE Registration Code 00_0C_03 */
+ vs_iframe[4] = 0x03;
+ vs_iframe[5] = 0x0C;
+ vs_iframe[6] = 0x00;
+
+ if ((panel->data->s3d_mode != HDMI_S3D_NONE) &&
+ panel->data->s3d_support) {
+ switch (panel->data->s3d_mode) {
+ case HDMI_S3D_SIDE_BY_SIDE:
+ s3d_struct = 0x8;
+ break;
+ case HDMI_S3D_TOP_AND_BOTTOM:
+ s3d_struct = 0x6;
+ break;
+ default:
+ s3d_struct = 0;
+ }
+ hdmi_video_format = 0x2;
+ hdmi_vic = 0;
+ /* PB5: 3D_Structure[7:4], Reserved[3:0] */
+ vs_iframe[8] = s3d_struct << 4;
+ } else {
+ hdmi_video_format = 0x1;
+ switch (panel->vic) {
+ case HDMI_EVFRMT_3840x2160p30_16_9:
+ hdmi_vic = 0x1;
+ break;
+ case HDMI_EVFRMT_3840x2160p25_16_9:
+ hdmi_vic = 0x2;
+ break;
+ case HDMI_EVFRMT_3840x2160p24_16_9:
+ hdmi_vic = 0x3;
+ break;
+ case HDMI_EVFRMT_4096x2160p24_16_9:
+ hdmi_vic = 0x4;
+ break;
+ default:
+ hdmi_video_format = 0x0;
+ hdmi_vic = 0x0;
+ }
+ /* PB5: HDMI_VIC */
+ vs_iframe[8] = hdmi_vic;
+ }
+ /* PB4: HDMI Video Format[7:5], Reserved[4:0] */
+ vs_iframe[7] = (hdmi_video_format << 5) & 0xE0;
+
+ /* compute checksum */
+ sum = 0;
+ for (i = 0; i < 9; i++)
+ sum += vs_iframe[i];
+
+ sum &= 0xFF;
+ sum = 256 - sum;
+ vs_iframe[3] = (u8)sum;
+
+ reg_val = (s3d_struct << 24) | (hdmi_vic << 16) |
+ (vs_iframe[3] << 8) | (hdmi_video_format << 5) |
+ vs_iframe[2];
+ DSS_REG_W(io, HDMI_VENSPEC_INFO0, reg_val);
+
+ /* vendor specific info-frame enable (every frame) */
+ DSS_REG_W(io, HDMI_INFOFRAME_CTRL0,
+ DSS_REG_R(io, HDMI_INFOFRAME_CTRL0) | BIT(13) | BIT(12));
+
+ reg_val = DSS_REG_R(io, HDMI_INFOFRAME_CTRL1);
+ reg_val &= ~0x3F000000;
+ reg_val |= (VENDOR_IFRAME_LINE_NUMBER << 24);
+ DSS_REG_W(io, HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void hdmi_panel_set_spd_infoframe(struct hdmi_panel *panel)
+{
+ u32 packet_header = 0;
+ u32 check_sum = 0;
+ u32 packet_payload = 0;
+ u32 packet_control = 0;
+ u8 *vendor_name = NULL;
+ u8 *product_description = NULL;
+ struct mdss_io_data *io = panel->io;
+
+ vendor_name = panel->spd_vendor_name;
+ product_description = panel->spd_product_description;
+
+ /* Setup Packet header and payload */
+ /*
+ * 0x83 InfoFrame Type Code
+ * 0x01 InfoFrame Version Number
+ * 0x19 Length of Source Product Description InfoFrame
+ */
+ packet_header = 0x83 | (0x01 << 8) | (0x19 << 16);
+ DSS_REG_W(io, HDMI_GENERIC1_HDR, packet_header);
+ check_sum += IFRAME_CHECKSUM_32(packet_header);
+
+ packet_payload = (vendor_name[3] & 0x7f)
+ | ((vendor_name[4] & 0x7f) << 8)
+ | ((vendor_name[5] & 0x7f) << 16)
+ | ((vendor_name[6] & 0x7f) << 24);
+ DSS_REG_W(io, HDMI_GENERIC1_1, packet_payload);
+ check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+ /* Product Description (7-bit ASCII code) */
+ packet_payload = (vendor_name[7] & 0x7f)
+ | ((product_description[0] & 0x7f) << 8)
+ | ((product_description[1] & 0x7f) << 16)
+ | ((product_description[2] & 0x7f) << 24);
+ DSS_REG_W(io, HDMI_GENERIC1_2, packet_payload);
+ check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+ packet_payload = (product_description[3] & 0x7f)
+ | ((product_description[4] & 0x7f) << 8)
+ | ((product_description[5] & 0x7f) << 16)
+ | ((product_description[6] & 0x7f) << 24);
+ DSS_REG_W(io, HDMI_GENERIC1_3, packet_payload);
+ check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+ packet_payload = (product_description[7] & 0x7f)
+ | ((product_description[8] & 0x7f) << 8)
+ | ((product_description[9] & 0x7f) << 16)
+ | ((product_description[10] & 0x7f) << 24);
+ DSS_REG_W(io, HDMI_GENERIC1_4, packet_payload);
+ check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+ packet_payload = (product_description[11] & 0x7f)
+ | ((product_description[12] & 0x7f) << 8)
+ | ((product_description[13] & 0x7f) << 16)
+ | ((product_description[14] & 0x7f) << 24);
+ DSS_REG_W(io, HDMI_GENERIC1_5, packet_payload);
+ check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+ /*
+ * Source Device Information
+ * 00h unknown
+ * 01h Digital STB
+ * 02h DVD
+ * 03h D-VHS
+ * 04h HDD Video
+ * 05h DVC
+ * 06h DSC
+ * 07h Video CD
+ * 08h Game
+ * 09h PC general
+ */
+ packet_payload = (product_description[15] & 0x7f) | 0x00 << 8;
+ DSS_REG_W(io, HDMI_GENERIC1_6, packet_payload);
+ check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+ /* Vendor Name (7bit ASCII code) */
+ packet_payload = ((vendor_name[0] & 0x7f) << 8)
+ | ((vendor_name[1] & 0x7f) << 16)
+ | ((vendor_name[2] & 0x7f) << 24);
+ check_sum += IFRAME_CHECKSUM_32(packet_payload);
+ packet_payload |= ((0x100 - (0xff & check_sum)) & 0xff);
+ DSS_REG_W(io, HDMI_GENERIC1_0, packet_payload);
+
+ /*
+ * GENERIC1_LINE | GENERIC1_CONT | GENERIC1_SEND
+ * Setup HDMI TX generic packet control
+ * Enable this packet to transmit every frame
+ * Enable HDMI TX engine to transmit Generic packet 1
+ */
+ packet_control = DSS_REG_R_ND(io, HDMI_GEN_PKT_CTRL);
+ packet_control |= ((0x1 << 24) | (1 << 5) | (1 << 4));
+ DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+static int hdmi_panel_setup_infoframe(struct hdmi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (panel->data->infoframe) {
+ hdmi_panel_set_avi_infoframe(panel);
+ hdmi_panel_set_vendor_specific_infoframe(panel);
+ hdmi_panel_set_spd_infoframe(panel);
+ }
+end:
+ return rc;
+}
+
+static int hdmi_panel_setup_scrambler(struct hdmi_panel *panel)
+{
+ int rc = 0;
+ int timeout_hsync;
+ u32 reg_val = 0;
+ u32 tmds_clock_ratio = 0;
+ bool scrambler_on = false;
+ struct msm_hdmi_mode_timing_info *timing = NULL;
+
+ if (!panel) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ timing = panel->vid_cfg.timing;
+ if (!timing) {
+ pr_err("Invalid timing info\n");
+ return -EINVAL;
+ }
+
+ /* Scrambling is supported from HDMI TX 4.0 */
+ if (panel->version < HDMI_TX_SCRAMBLER_MIN_TX_VERSION) {
+ pr_debug("scrambling not supported by tx\n");
+ return 0;
+ }
+
+ if (timing->pixel_freq > HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ) {
+ scrambler_on = true;
+ tmds_clock_ratio = 1;
+ } else {
+ scrambler_on = panel->data->scrambler;
+ }
+
+ pr_debug("scrambler %s\n", scrambler_on ? "on" : "off");
+
+ if (scrambler_on) {
+ rc = hdmi_scdc_write(panel->ddc,
+ HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+ tmds_clock_ratio);
+ if (rc) {
+ pr_err("TMDS CLK RATIO ERR\n");
+ return rc;
+ }
+
+ reg_val = DSS_REG_R(panel->io, HDMI_CTRL);
+ reg_val |= BIT(31); /* Enable Update DATAPATH_MODE */
+ reg_val |= BIT(28); /* Set SCRAMBLER_EN bit */
+
+ DSS_REG_W(panel->io, HDMI_CTRL, reg_val);
+
+ rc = hdmi_scdc_write(panel->ddc,
+ HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x1);
+ if (!rc) {
+ panel->scrambler_enabled = true;
+ } else {
+ pr_err("failed to enable scrambling\n");
+ return rc;
+ }
+
+ /*
+ * Setup hardware to periodically check for scrambler
+ * status bit on the sink. Sink should set this bit
+ * with in 200ms after scrambler is enabled.
+ */
+ timeout_hsync = hdmi_utils_get_timeout_in_hysnc(
+ panel->vid_cfg.timing,
+ HDMI_TX_SCRAMBLER_TIMEOUT_MSEC);
+
+ if (timeout_hsync <= 0) {
+ pr_err("err in timeout hsync calc\n");
+ timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+ }
+
+ pr_debug("timeout for scrambling en: %d hsyncs\n",
+ timeout_hsync);
+
+ rc = hdmi_setup_ddc_timers(panel->ddc,
+ HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS, timeout_hsync);
+ } else {
+ hdmi_scdc_write(panel->ddc,
+ HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x0);
+
+ panel->scrambler_enabled = false;
+ }
+
+ return rc;
+}
+
+static int hdmi_panel_update_fps(void *input, u32 fps)
+{
+ struct hdmi_panel *panel = input;
+ struct mdss_panel_info *pinfo = panel->data->pinfo;
+ struct msm_hdmi_mode_timing_info *timing = panel->vid_cfg.timing;
+ u64 pclk;
+ int vic;
+
+ timing->back_porch_h = pinfo->lcdc.h_back_porch;
+ timing->front_porch_h = pinfo->lcdc.h_front_porch;
+ timing->pulse_width_h = pinfo->lcdc.h_pulse_width;
+
+ timing->back_porch_v = pinfo->lcdc.v_back_porch;
+ timing->front_porch_v = pinfo->lcdc.v_front_porch;
+ timing->pulse_width_v = pinfo->lcdc.v_pulse_width;
+
+ timing->refresh_rate = fps;
+
+ pclk = pinfo->clk_rate;
+ do_div(pclk, HDMI_TX_KHZ_TO_HZ);
+ timing->pixel_freq = (unsigned long) pclk;
+
+ if (hdmi_panel_setup_video(panel)) {
+ DEV_DBG("%s: no change in video timing\n", __func__);
+ goto end;
+ }
+
+ vic = hdmi_get_video_id_code(timing, panel->ds_data);
+
+ if (vic > 0 && panel->vic != vic) {
+ panel->vic = vic;
+ DEV_DBG("%s: switched to new resolution id %d\n",
+ __func__, vic);
+ }
+
+ pinfo->dynamic_fps = false;
+end:
+ return panel->vic;
+}
+
+static int hdmi_panel_power_on(void *input)
+{
+ int rc = 0;
+ bool res_changed = false;
+ struct hdmi_panel *panel = input;
+ struct mdss_panel_info *pinfo;
+ struct msm_hdmi_mode_timing_info *info;
+
+ if (!panel) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ pinfo = panel->data->pinfo;
+ if (!pinfo) {
+ pr_err("invalid panel data\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (panel->vic != panel->data->vic) {
+ res_changed = true;
+
+ pr_debug("switching from %d => %d\n",
+ panel->vic, panel->data->vic);
+
+ panel->vic = panel->data->vic;
+ }
+
+ if (pinfo->cont_splash_enabled) {
+ pinfo->cont_splash_enabled = false;
+
+ if (!res_changed) {
+ panel->on = true;
+
+ hdmi_panel_set_vendor_specific_infoframe(panel);
+ hdmi_panel_set_spd_infoframe(panel);
+
+ pr_debug("handoff done\n");
+
+ goto end;
+ }
+ }
+
+ rc = hdmi_panel_config_avi(panel);
+ if (rc) {
+ pr_err("avi config failed. rc=%d\n", rc);
+ goto err;
+ }
+
+ rc = hdmi_panel_setup_video(panel);
+ if (rc) {
+ pr_err("video setup failed. rc=%d\n", rc);
+ goto err;
+ }
+
+ rc = hdmi_panel_setup_infoframe(panel);
+ if (rc) {
+ pr_err("infoframe setup failed. rc=%d\n", rc);
+ goto err;
+ }
+
+ rc = hdmi_panel_setup_scrambler(panel);
+ if (rc) {
+ pr_err("scrambler setup failed. rc=%d\n", rc);
+ goto err;
+ }
+end:
+ panel->on = true;
+
+ info = panel->vid_cfg.timing;
+ pr_debug("%dx%d%s@%dHz %dMHz %s (%d)\n",
+ info->active_h, info->active_v,
+ info->interlaced ? "i" : "p",
+ info->refresh_rate / 1000,
+ info->pixel_freq / 1000,
+ pinfo->out_format == MDP_Y_CBCR_H2V2 ? "Y420" : "RGB",
+ info->video_format);
+err:
+ return rc;
+}
+
+static int hdmi_panel_power_off(void *input)
+{
+ struct hdmi_panel *panel = input;
+
+ panel->on = false;
+
+ pr_debug("panel off\n");
+ return 0;
+}
+
+void *hdmi_panel_init(struct hdmi_panel_init_data *data)
+{
+ struct hdmi_panel *panel = NULL;
+
+ if (!data) {
+ pr_err("invalid input\n");
+ goto end;
+ }
+
+ panel = kzalloc(sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ goto end;
+
+ panel->io = data->io;
+ panel->ds_data = data->ds_data;
+ panel->data = data->panel_data;
+ panel->spd_vendor_name = data->spd_vendor_name;
+ panel->spd_product_description = data->spd_product_description;
+ panel->version = data->version;
+ panel->ddc = data->ddc;
+ panel->vid_cfg.timing = data->timing;
+
+ if (data->ops) {
+ data->ops->on = hdmi_panel_power_on;
+ data->ops->off = hdmi_panel_power_off;
+ data->ops->get_vic = hdmi_panel_get_vic;
+ data->ops->vendor = hdmi_panel_set_vendor_specific_infoframe;
+ data->ops->update_fps = hdmi_panel_update_fps;
+ }
+end:
+ return panel;
+}
+
+void hdmi_panel_deinit(void *input)
+{
+ struct hdmi_panel *panel = input;
+
+ kfree(panel);
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_panel.h b/drivers/video/fbdev/msm/mdss_hdmi_panel.h
new file mode 100644
index 0000000..50e168a
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_panel.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_PANEL_H__
+#define __MDSS_HDMI_PANEL_H__
+
+#include "mdss_panel.h"
+#include "mdss_hdmi_util.h"
+
+/**
+ * struct hdmi_panel_data - panel related data information
+ *
+ * @pinfo: pointer to mdss panel information
+ * @s3d_mode: 3d mode supported
+ * @vic: video indentification code
+ * @scan_info: scan information of the TV
+ * @s3d_support: set to true if 3d supported, false otherwize
+ * @content_type: type of content like game, cinema etc
+ * @infoframe: set to true if infoframes should be sent to sink
+ * @is_it_content: set to true if content is IT
+ * @scrambler: set to true if scrambler needs to be enabled
+ */
+struct hdmi_panel_data {
+ struct mdss_panel_info *pinfo;
+ u32 s3d_mode;
+ u32 vic;
+ u32 scan_info;
+ u8 content_type;
+ bool s3d_support;
+ bool infoframe;
+ bool is_it_content;
+ bool scrambler;
+};
+
+/**
+ * struct hdmi_panel_ops - panel operation for clients
+ *
+ * @on: pointer to a function which powers on the panel
+ * @off: pointer to a function which powers off the panel
+ * @vendor: pointer to a function which programs vendor specific infoframe
+ * @update_fps: pointer to a function which updates fps
+ * @get_vic: pointer to a function which get the vic from panel information.
+ */
+struct hdmi_panel_ops {
+ int (*on)(void *input);
+ int (*off)(void *input);
+ void (*vendor)(void *input);
+ int (*update_fps)(void *input, u32 fps);
+ int (*get_vic)(struct mdss_panel_info *pinfo,
+ struct hdmi_util_ds_data *ds_data);
+};
+
+/**
+ * struct hdmi_panel_init_data - initialization data for hdmi panel
+ *
+ * @io: pointer to logical memory of the hdmi tx core
+ * @ds_data: pointer to down stream data
+ * @panel_data: pointer to panel data
+ * @ddc: pointer to display data channel's data
+ * @ops: pointer to pnael ops to be filled by hdmi panel
+ * @timing: pointer to the timing details of current resolution
+ * @spd_vendor_name: pointer to spd vendor infoframe data
+ * @spd_product_description: pointer to spd product description infoframe data
+ * @version: hardware version of the hdmi tx
+ */
+struct hdmi_panel_init_data {
+ struct mdss_io_data *io;
+ struct hdmi_util_ds_data *ds_data;
+ struct hdmi_panel_data *panel_data;
+ struct hdmi_tx_ddc_ctrl *ddc;
+ struct hdmi_panel_ops *ops;
+ struct msm_hdmi_mode_timing_info *timing;
+ u8 *spd_vendor_name;
+ u8 *spd_product_description;
+ u32 version;
+};
+
+/**
+ * hdmi_panel_init() - initializes hdmi panel
+ *
+ * initializes the hdmi panel, allocates the memory, assign the input
+ * data to local variables and provide the operation function pointers.
+ *
+ * @data: initialization data.
+ * return: hdmi panel data that need to be send with hdmi ops.
+ */
+void *hdmi_panel_init(struct hdmi_panel_init_data *data);
+
+/**
+ * hdmi_panel_deinit() - deinitializes hdmi panel
+ *
+ * releases memory and all resources.
+ *
+ * @input: hdmi panel data.
+ */
+void hdmi_panel_deinit(void *input);
+
+#endif /* __MDSS_HDMI_PANEL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
new file mode 100644
index 0000000..4f2bb09
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -0,0 +1,4639 @@
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/types.h>
+#include <linux/hdcp_qseecom.h>
+#include <linux/clk.h>
+
+#define REG_DUMP 0
+
+#include "mdss_debug.h"
+#include "mdss_fb.h"
+#include "mdss_hdmi_cec.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss_hdmi_hdcp.h"
+#include "mdss_hdmi_tx.h"
+#include "mdss_hdmi_audio.h"
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_hdmi_mhl.h"
+
+#define DRV_NAME "hdmi-tx"
+#define COMPATIBLE_NAME "qcom,hdmi-tx"
+
+#define HDMI_TX_EVT_STR(x) #x
+#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
+#define DEFAULT_HDMI_PRIMARY_RESOLUTION HDMI_VFRMT_1920x1080p60_16_9
+
+/* HDMI PHY/PLL bit field macros */
+#define SW_RESET BIT(2)
+#define SW_RESET_PLL BIT(0)
+
+#define HPD_DISCONNECT_POLARITY 0
+#define HPD_CONNECT_POLARITY 1
+
+/*
+ * Audio engine may take 1 to 3 sec to shutdown
+ * in normal cases. To handle worst cases, making
+ * timeout for audio engine shutdown as 5 sec.
+ */
+#define AUDIO_POLL_SLEEP_US (5 * 1000)
+#define AUDIO_POLL_TIMEOUT_US (AUDIO_POLL_SLEEP_US * 1000)
+
+#define HDMI_TX_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO 2
+#define HDMI_TX_YUV422_24BPP_PCLK_TMDS_CH_RATE_RATIO 1
+#define HDMI_TX_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO 1
+
+#define HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ 340000
+#define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200
+
+/* Maximum pixel clock rates for hdmi tx */
+#define HDMI_DEFAULT_MAX_PCLK_RATE 148500
+#define HDMI_TX_3_MAX_PCLK_RATE 297000
+#define HDMI_TX_4_MAX_PCLK_RATE 600000
+
+#define hdmi_tx_get_fd(x) (x ? hdmi_ctrl->feature_data[ffs(x) - 1] : 0)
+#define hdmi_tx_set_fd(x, y) {if (x) hdmi_ctrl->feature_data[ffs(x) - 1] = y; }
+
+#define MAX_EDID_READ_RETRY 5
+
+#define HDMI_TX_MIN_FPS 20000
+#define HDMI_TX_MAX_FPS 120000
+
+/* Enable HDCP by default */
+static bool hdcp_feature_on = true;
+
+/*
+ * CN represents IT content type, if ITC bit in infoframe data byte 3
+ * is set, CN bits will represent content type as below:
+ * 0b00 Graphics
+ * 0b01 Photo
+ * 0b10 Cinema
+ * 0b11 Game
+ */
+#define CONFIG_CN_BITS(bits, byte) \
+ (byte = (byte & ~(BIT(4) | BIT(5))) |\
+ ((bits & (BIT(0) | BIT(1))) << 4))
+
+enum hdmi_tx_hpd_states {
+ HPD_OFF,
+ HPD_ON,
+ HPD_ON_CONDITIONAL_MTP,
+ HPD_DISABLE,
+ HPD_ENABLE
+};
+
+static int hdmi_tx_set_mhl_hpd(struct platform_device *pdev, uint8_t on);
+static int hdmi_tx_sysfs_enable_hpd(struct hdmi_tx_ctrl *hdmi_ctrl, int on);
+static irqreturn_t hdmi_tx_isr(int irq, void *data);
+static void hdmi_tx_hpd_off(struct hdmi_tx_ctrl *hdmi_ctrl);
+static int hdmi_tx_hpd_on(struct hdmi_tx_ctrl *hdmi_ctrl);
+static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl,
+ enum hdmi_tx_power_module_type module, int enable);
+static int hdmi_tx_setup_tmds_clk_rate(struct hdmi_tx_ctrl *hdmi_ctrl);
+static void hdmi_tx_fps_work(struct work_struct *work);
+
+static struct mdss_hw hdmi_tx_hw = {
+ .hw_ndx = MDSS_HW_HDMI,
+ .ptr = NULL,
+ .irq_handler = hdmi_tx_isr,
+};
+
+static struct mdss_gpio hpd_gpio_config[] = {
+ {0, 1, COMPATIBLE_NAME "-hpd"},
+ {0, 1, COMPATIBLE_NAME "-mux-en"},
+ {0, 0, COMPATIBLE_NAME "-mux-sel"},
+ {0, 1, COMPATIBLE_NAME "-mux-lpm"}
+};
+
+static struct mdss_gpio ddc_gpio_config[] = {
+ {0, 1, COMPATIBLE_NAME "-ddc-mux-sel"},
+ {0, 1, COMPATIBLE_NAME "-ddc-clk"},
+ {0, 1, COMPATIBLE_NAME "-ddc-data"}
+};
+
+static struct mdss_gpio core_gpio_config[] = {
+};
+
+static struct mdss_gpio cec_gpio_config[] = {
+ {0, 1, COMPATIBLE_NAME "-cec"}
+};
+
+const char *hdmi_pm_name(enum hdmi_tx_power_module_type module)
+{
+ switch (module) {
+ case HDMI_TX_HPD_PM: return "HDMI_TX_HPD_PM";
+ case HDMI_TX_DDC_PM: return "HDMI_TX_DDC_PM";
+ case HDMI_TX_CORE_PM: return "HDMI_TX_CORE_PM";
+ case HDMI_TX_CEC_PM: return "HDMI_TX_CEC_PM";
+ default: return "???";
+ }
+} /* hdmi_pm_name */
+
+static int hdmi_tx_get_version(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc;
+ int reg_val;
+ struct mdss_io_data *io;
+
+ rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, true);
+ if (rc) {
+ DEV_ERR("%s: Failed to read HDMI version\n", __func__);
+ goto fail;
+ }
+
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: core io not inititalized\n", __func__);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ reg_val = DSS_REG_R(io, HDMI_VERSION);
+ reg_val = (reg_val & 0xF0000000) >> 28;
+ hdmi_ctrl->hdmi_tx_ver = reg_val;
+
+ switch (hdmi_ctrl->hdmi_tx_ver) {
+ case (HDMI_TX_VERSION_3):
+ hdmi_ctrl->max_pclk_khz = HDMI_TX_3_MAX_PCLK_RATE;
+ break;
+ case (HDMI_TX_VERSION_4):
+ hdmi_ctrl->max_pclk_khz = HDMI_TX_4_MAX_PCLK_RATE;
+ break;
+ default:
+ hdmi_ctrl->max_pclk_khz = HDMI_DEFAULT_MAX_PCLK_RATE;
+ break;
+ }
+
+ rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, false);
+ if (rc) {
+ DEV_ERR("%s: FAILED to disable power\n", __func__);
+ goto fail;
+ }
+
+fail:
+ return rc;
+}
+
+int register_hdmi_cable_notification(struct hdmi_cable_notify *handler)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct list_head *pos;
+
+ if (!hdmi_tx_hw.ptr) {
+ DEV_WARN("%s: HDMI Tx core not ready\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!handler) {
+ DEV_ERR("%s: Empty handler\n", __func__);
+ return -ENODEV;
+ }
+
+ hdmi_ctrl = (struct hdmi_tx_ctrl *) hdmi_tx_hw.ptr;
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ handler->status = hdmi_ctrl->hpd_state;
+ list_for_each(pos, &hdmi_ctrl->cable_notify_handlers);
+ list_add_tail(&handler->link, pos);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return handler->status;
+} /* register_hdmi_cable_notification */
+
+int unregister_hdmi_cable_notification(struct hdmi_cable_notify *handler)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ if (!hdmi_tx_hw.ptr) {
+ DEV_WARN("%s: HDMI Tx core not ready\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!handler) {
+ DEV_ERR("%s: Empty handler\n", __func__);
+ return -ENODEV;
+ }
+
+ hdmi_ctrl = (struct hdmi_tx_ctrl *) hdmi_tx_hw.ptr;
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ list_del(&handler->link);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return 0;
+} /* unregister_hdmi_cable_notification */
+
+static void hdmi_tx_cable_notify_work(struct work_struct *work)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct hdmi_cable_notify *pos;
+
+ hdmi_ctrl = container_of(work, struct hdmi_tx_ctrl, cable_notify_work);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid hdmi data\n", __func__);
+ return;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ list_for_each_entry(pos, &hdmi_ctrl->cable_notify_handlers, link) {
+ if (pos->status != hdmi_ctrl->hpd_state) {
+ pos->status = hdmi_ctrl->hpd_state;
+ pos->hpd_notify(pos);
+ }
+ }
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+} /* hdmi_tx_cable_notify_work */
+
+static bool hdmi_tx_is_cea_format(int mode)
+{
+ bool cea_fmt;
+
+ if ((mode > 0) && (mode <= HDMI_EVFRMT_END))
+ cea_fmt = true;
+ else
+ cea_fmt = false;
+
+ DEV_DBG("%s: %s\n", __func__, cea_fmt ? "Yes" : "No");
+
+ return cea_fmt;
+}
+
+static inline bool hdmi_tx_is_hdcp_enabled(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ return hdmi_ctrl->hdcp_feature_on &&
+ (hdmi_ctrl->hdcp14_present || hdmi_ctrl->hdcp22_present) &&
+ hdmi_ctrl->hdcp_ops;
+}
+
+static const char *hdmi_tx_pm_name(enum hdmi_tx_power_module_type module)
+{
+ switch (module) {
+ case HDMI_TX_HPD_PM: return "HDMI_TX_HPD_PM";
+ case HDMI_TX_DDC_PM: return "HDMI_TX_DDC_PM";
+ case HDMI_TX_CORE_PM: return "HDMI_TX_CORE_PM";
+ case HDMI_TX_CEC_PM: return "HDMI_TX_CEC_PM";
+ default: return "???";
+ }
+} /* hdmi_tx_pm_name */
+
+static const char *hdmi_tx_io_name(u32 type)
+{
+ switch (type) {
+ case HDMI_TX_CORE_IO: return "core_physical";
+ case HDMI_TX_QFPROM_IO: return "qfprom_physical";
+ case HDMI_TX_HDCP_IO: return "hdcp_physical";
+ default: return NULL;
+ }
+} /* hdmi_tx_io_name */
+
+static void hdmi_tx_audio_setup(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if (hdmi_ctrl && hdmi_ctrl->audio_ops.on) {
+ u32 pclk = hdmi_tx_setup_tmds_clk_rate(hdmi_ctrl);
+
+ hdmi_ctrl->audio_ops.on(hdmi_ctrl->audio_data,
+ pclk, &hdmi_ctrl->audio_params);
+ }
+}
+
+static inline u32 hdmi_tx_is_dvi_mode(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ return hdmi_edid_get_sink_mode(
+ hdmi_tx_get_fd(HDMI_TX_FEAT_EDID)) ? 0 : 1;
+} /* hdmi_tx_is_dvi_mode */
+
+static inline bool hdmi_tx_is_panel_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ return hdmi_ctrl->hpd_state && hdmi_ctrl->panel_power_on;
+}
+
+static inline bool hdmi_tx_is_cec_wakeup_en(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ void *fd = NULL;
+
+ if (!hdmi_ctrl)
+ return false;
+
+ fd = hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_HW);
+
+ if (!fd)
+ return false;
+
+ return hdmi_cec_is_wakeup_en(fd);
+}
+
+static inline void hdmi_tx_cec_device_suspend(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ void *fd = NULL;
+
+ if (!hdmi_ctrl)
+ return;
+
+ fd = hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_HW);
+
+ if (!fd)
+ return;
+
+ hdmi_cec_device_suspend(fd, hdmi_ctrl->panel_suspend);
+}
+
+
+static inline void hdmi_tx_send_cable_notification(
+ struct hdmi_tx_ctrl *hdmi_ctrl, int val)
+{
+ int state = 0;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+ state = hdmi_ctrl->sdev.state;
+
+ extcon_set_state_sync(&hdmi_ctrl->sdev, EXTCON_DISP_HDMI, state);
+
+ DEV_INFO("%s: cable state %s %d\n", __func__,
+ hdmi_ctrl->sdev.state == state ?
+ "is same" : "switched to",
+ hdmi_ctrl->sdev.state);
+
+ /* Notify all registered modules of cable connection status */
+ schedule_work(&hdmi_ctrl->cable_notify_work);
+} /* hdmi_tx_send_cable_notification */
+
+static inline void hdmi_tx_set_audio_switch_node(
+ struct hdmi_tx_ctrl *hdmi_ctrl, int val)
+{
+ if (hdmi_ctrl && hdmi_ctrl->audio_ops.notify &&
+ !hdmi_tx_is_dvi_mode(hdmi_ctrl))
+ hdmi_ctrl->audio_ops.notify(hdmi_ctrl->audio_data, val);
+}
+
+static void hdmi_tx_wait_for_audio_engine(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ u64 status = 0;
+ u32 wait_for_vote = 50;
+ struct mdss_io_data *io = NULL;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: core io not inititalized\n", __func__);
+ return;
+ }
+
+ /*
+ * wait for 5 sec max for audio engine to acknowledge if hdmi tx core
+ * can be safely turned off. Sleep for a reasonable time to make sure
+ * vote_hdmi_core_on variable is updated properly by audio.
+ */
+ while (hdmi_ctrl->vote_hdmi_core_on && --wait_for_vote)
+ msleep(100);
+
+
+ if (!wait_for_vote)
+ DEV_ERR("%s: HDMI core still voted for power on\n", __func__);
+
+ if (readl_poll_timeout(io->base + HDMI_AUDIO_PKT_CTRL, status,
+ (status & BIT(0)) == 0, AUDIO_POLL_SLEEP_US,
+ AUDIO_POLL_TIMEOUT_US))
+ DEV_ERR("%s: Error turning off audio packet transmission.\n",
+ __func__);
+
+ if (readl_poll_timeout(io->base + HDMI_AUDIO_CFG, status,
+ (status & BIT(0)) == 0, AUDIO_POLL_SLEEP_US,
+ AUDIO_POLL_TIMEOUT_US))
+ DEV_ERR("%s: Error turning off audio engine.\n", __func__);
+}
+
+static struct hdmi_tx_ctrl *hdmi_tx_get_drvdata_from_panel_data(
+ struct mdss_panel_data *mpd)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ if (mpd) {
+ hdmi_ctrl = container_of(mpd, struct hdmi_tx_ctrl, panel_data);
+ if (!hdmi_ctrl)
+ DEV_ERR("%s: hdmi_ctrl = NULL\n", __func__);
+ } else {
+ DEV_ERR("%s: mdss_panel_data = NULL\n", __func__);
+ }
+ return hdmi_ctrl;
+} /* hdmi_tx_get_drvdata_from_panel_data */
+
+static struct hdmi_tx_ctrl *hdmi_tx_get_drvdata_from_sysfs_dev(
+ struct device *device)
+{
+ struct msm_fb_data_type *mfd = NULL;
+ struct mdss_panel_data *panel_data = NULL;
+ struct fb_info *fbi = dev_get_drvdata(device);
+
+ if (fbi) {
+ mfd = (struct msm_fb_data_type *)fbi->par;
+ panel_data = dev_get_platdata(&mfd->pdev->dev);
+
+ return hdmi_tx_get_drvdata_from_panel_data(panel_data);
+ }
+ DEV_ERR("%s: fbi = NULL\n", __func__);
+ return NULL;
+} /* hdmi_tx_get_drvdata_from_sysfs_dev */
+
+/* todo: Fix this. Right now this is declared in hdmi_util.h */
+void *hdmi_get_featuredata_from_sysfs_dev(struct device *device,
+ u32 feature_type)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ if (!device || feature_type >= HDMI_TX_FEAT_MAX) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return NULL;
+ }
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(device);
+ if (hdmi_ctrl)
+ return hdmi_tx_get_fd(feature_type);
+ else
+ return NULL;
+
+} /* hdmi_tx_get_featuredata_from_sysfs_dev */
+EXPORT_SYMBOL(hdmi_get_featuredata_from_sysfs_dev);
+
+static int hdmi_tx_config_5v(struct hdmi_tx_ctrl *hdmi_ctrl, bool enable)
+{
+ struct mdss_module_power *pd = NULL;
+ int ret = 0;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ pd = &hdmi_ctrl->pdata.power_data[HDMI_TX_HPD_PM];
+ if (!pd || !pd->gpio_config) {
+ DEV_ERR("%s: Error: invalid power data\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ gpio_set_value(pd->gpio_config->gpio, enable);
+end:
+ return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_connected(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->hpd_state);
+ DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_state);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+} /* hdmi_tx_sysfs_rda_connected */
+
+static ssize_t hdmi_tx_sysfs_wta_edid(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ int i = 0;
+ const char *buf_t = buf;
+ const int char_to_nib = 2;
+ int edid_size = count / char_to_nib;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl || !hdmi_ctrl->edid_buf) {
+ DEV_ERR("%s: invalid data\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ if ((edid_size < EDID_BLOCK_SIZE) ||
+ (edid_size > hdmi_ctrl->edid_buf_size)) {
+ DEV_DBG("%s: disabling custom edid\n", __func__);
+
+ ret = -EINVAL;
+ hdmi_ctrl->custom_edid = false;
+ goto end;
+ }
+
+ memset(hdmi_ctrl->edid_buf, 0, hdmi_ctrl->edid_buf_size);
+
+ while (edid_size--) {
+ char t[char_to_nib + 1];
+ int d;
+
+ memcpy(t, buf_t, sizeof(char) * char_to_nib);
+ t[char_to_nib] = '\0';
+
+ ret = kstrtoint(t, 16, &d);
+ if (ret) {
+ pr_err("kstrtoint error %d\n", ret);
+ goto end;
+ }
+
+ memcpy(hdmi_ctrl->edid_buf + i++, &d,
+ sizeof(*hdmi_ctrl->edid_buf));
+
+ buf_t += char_to_nib;
+ }
+
+ ret = strnlen(buf, PAGE_SIZE);
+ hdmi_ctrl->custom_edid = true;
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_edid(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ u32 size;
+ u32 cea_blks;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl || !hdmi_ctrl->edid_buf) {
+ DEV_ERR("%s: invalid data\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ cea_blks = hdmi_ctrl->edid_buf[EDID_BLOCK_SIZE - 2];
+ if (cea_blks >= MAX_EDID_BLOCKS) {
+ DEV_ERR("%s: invalid cea blocks\n", __func__);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return -EINVAL;
+ }
+ size = (cea_blks + 1) * EDID_BLOCK_SIZE;
+ size = min_t(u32, size, PAGE_SIZE);
+
+ DEV_DBG("%s: edid size %d\n", __func__, size);
+
+ memcpy(buf, hdmi_ctrl->edid_buf, size);
+
+ print_hex_dump(KERN_DEBUG, "HDMI EDID: ", DUMP_PREFIX_NONE,
+ 16, 1, buf, size, false);
+
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return size;
+}
+
+static ssize_t hdmi_tx_sysfs_wta_audio_cb(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ack, rc = 0;
+ ssize_t ret = strnlen(buf, PAGE_SIZE);
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ rc = kstrtoint(buf, 10, &ack);
+ if (rc) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ goto end;
+ }
+
+ if (hdmi_ctrl->audio_ops.ack)
+ hdmi_ctrl->audio_ops.ack(hdmi_ctrl->audio_data,
+ ack, hdmi_ctrl->hpd_state);
+end:
+ return ret;
+}
+
+static int hdmi_tx_update_pixel_clk(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct mdss_module_power *power_data = NULL;
+ struct mdss_panel_info *pinfo;
+ int rc = 0;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ pinfo = &hdmi_ctrl->panel_data.panel_info;
+
+ power_data = &hdmi_ctrl->pdata.power_data[HDMI_TX_CORE_PM];
+ if (!power_data) {
+ DEV_ERR("%s: Error: invalid power data\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (power_data->clk_config->rate == pinfo->clk_rate) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ power_data->clk_config->rate = pinfo->clk_rate;
+
+ if (pinfo->out_format == MDP_Y_CBCR_H2V2)
+ power_data->clk_config->rate /= 2;
+
+ DEV_DBG("%s: rate %ld\n", __func__, power_data->clk_config->rate);
+
+ msm_mdss_clk_set_rate(power_data->clk_config, power_data->num_clk);
+end:
+ return rc;
+}
+
+static ssize_t hdmi_tx_sysfs_wta_hot_plug(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int hot_plug, rc;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ rc = kstrtoint(buf, 10, &hot_plug);
+ if (rc) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ goto end;
+ }
+
+ hdmi_ctrl->hpd_state = !!hot_plug;
+
+ queue_work(hdmi_ctrl->workq, &hdmi_ctrl->hpd_int_work);
+
+ rc = strnlen(buf, PAGE_SIZE);
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return rc;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_sim_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->sim_mode);
+ DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->sim_mode);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_wta_sim_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int sim_mode, rc;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct mdss_io_data *io = NULL;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: core io is not initialized\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!hdmi_ctrl->hpd_initialized) {
+ DEV_ERR("%s: hpd not enabled\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = kstrtoint(buf, 10, &sim_mode);
+ if (rc) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ goto end;
+ }
+
+ hdmi_ctrl->sim_mode = !!sim_mode;
+
+ if (hdmi_ctrl->sim_mode) {
+ DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(0));
+ } else {
+ int cable_sense = DSS_REG_R(io, HDMI_HPD_INT_STATUS) & BIT(1);
+
+ DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(0) | BIT(2) |
+ (cable_sense ? 0 : BIT(1)));
+ }
+
+ rc = strnlen(buf, PAGE_SIZE);
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return rc;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_video_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->vic);
+ DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->vic);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+} /* hdmi_tx_sysfs_rda_video_mode */
+
+static ssize_t hdmi_tx_sysfs_rda_hpd(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->hpd_feature_on);
+ DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_feature_on);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+} /* hdmi_tx_sysfs_rda_hpd */
+
+static ssize_t hdmi_tx_sysfs_wta_hpd(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int hpd, rc = 0;
+ ssize_t ret = strnlen(buf, PAGE_SIZE);
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ rc = kstrtoint(buf, 10, &hpd);
+ if (rc) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ goto end;
+ }
+
+ DEV_DBG("%s: %d\n", __func__, hpd);
+
+ if (hdmi_ctrl->ds_registered && hpd &&
+ (!hdmi_ctrl->mhl_hpd_on || hdmi_ctrl->hpd_feature_on)) {
+ DEV_DBG("%s: DS registered, HPD on not allowed\n", __func__);
+ goto end;
+ }
+
+ switch (hpd) {
+ case HPD_OFF:
+ case HPD_DISABLE:
+ if (hpd == HPD_DISABLE)
+ hdmi_ctrl->hpd_disabled = true;
+
+ if (!hdmi_ctrl->hpd_feature_on) {
+ DEV_DBG("%s: HPD is already off\n", __func__);
+ goto end;
+ }
+
+ /* disable audio ack feature */
+ if (hdmi_ctrl->audio_ops.ack)
+ hdmi_ctrl->audio_ops.ack(hdmi_ctrl->audio_data,
+ AUDIO_ACK_SET_ENABLE, hdmi_ctrl->hpd_state);
+
+ if (hdmi_ctrl->panel_power_on) {
+ hdmi_ctrl->hpd_off_pending = true;
+ hdmi_tx_config_5v(hdmi_ctrl, false);
+ } else {
+ hdmi_tx_hpd_off(hdmi_ctrl);
+
+ hdmi_ctrl->sdev.state = 0;
+ hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+ }
+
+ break;
+ case HPD_ON:
+ if (hdmi_ctrl->hpd_disabled == true) {
+ DEV_ERR("%s: hpd is disabled, state %d not allowed\n",
+ __func__, hpd);
+ goto end;
+ }
+
+ if (hdmi_ctrl->pdata.cond_power_on) {
+ DEV_ERR("%s: hpd state %d not allowed w/ cond. hpd\n",
+ __func__, hpd);
+ goto end;
+ }
+
+ if (hdmi_ctrl->hpd_feature_on) {
+ DEV_DBG("%s: HPD is already on\n", __func__);
+ goto end;
+ }
+
+ rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+ break;
+ case HPD_ON_CONDITIONAL_MTP:
+ if (hdmi_ctrl->hpd_disabled == true) {
+ DEV_ERR("%s: hpd is disabled, state %d not allowed\n",
+ __func__, hpd);
+ goto end;
+ }
+
+ if (!hdmi_ctrl->pdata.cond_power_on) {
+ DEV_ERR("%s: hpd state %d not allowed w/o cond. hpd\n",
+ __func__, hpd);
+ goto end;
+ }
+
+ if (hdmi_ctrl->hpd_feature_on) {
+ DEV_DBG("%s: HPD is already on\n", __func__);
+ goto end;
+ }
+
+ rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+ break;
+ case HPD_ENABLE:
+ hdmi_ctrl->hpd_disabled = false;
+
+ rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+ break;
+ default:
+ DEV_ERR("%s: Invalid HPD state requested\n", __func__);
+ goto end;
+ }
+
+ if (!rc) {
+ hdmi_ctrl->hpd_feature_on =
+ (~hdmi_ctrl->hpd_feature_on) & BIT(0);
+ DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_feature_on);
+ } else {
+ DEV_ERR("%s: failed to '%s' hpd. rc = %d\n", __func__,
+ hpd ? "enable" : "disable", rc);
+ ret = rc;
+ }
+
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return ret;
+} /* hdmi_tx_sysfs_wta_hpd */
+
+static ssize_t hdmi_tx_sysfs_wta_vendor_name(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ ssize_t ret, sz;
+ u8 *s = (u8 *) buf;
+ u8 *d = NULL;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ d = hdmi_ctrl->spd_vendor_name;
+ ret = strnlen(buf, PAGE_SIZE);
+ ret = (ret > 8) ? 8 : ret;
+
+ sz = sizeof(hdmi_ctrl->spd_vendor_name);
+ memset(hdmi_ctrl->spd_vendor_name, 0, sz);
+ while (*s) {
+ if (*s & 0x60 && *s ^ 0x7f) {
+ *d = *s;
+ } else {
+ /* stop copying if control character found */
+ break;
+ }
+
+ if (++s > (u8 *) (buf + ret))
+ break;
+
+ d++;
+ }
+ hdmi_ctrl->spd_vendor_name[sz - 1] = 0;
+
+ DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_vendor_name);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+} /* hdmi_tx_sysfs_wta_vendor_name */
+
+static ssize_t hdmi_tx_sysfs_rda_vendor_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", hdmi_ctrl->spd_vendor_name);
+ DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_vendor_name);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+} /* hdmi_tx_sysfs_rda_vendor_name */
+
+static ssize_t hdmi_tx_sysfs_wta_product_description(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ ssize_t ret, sz;
+ u8 *s = (u8 *) buf;
+ u8 *d = NULL;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ d = hdmi_ctrl->spd_product_description;
+ ret = strnlen(buf, PAGE_SIZE);
+ ret = (ret > 16) ? 16 : ret;
+
+ sz = sizeof(hdmi_ctrl->spd_product_description);
+ memset(hdmi_ctrl->spd_product_description, 0, sz);
+ while (*s) {
+ if (*s & 0x60 && *s ^ 0x7f) {
+ *d = *s;
+ } else {
+ /* stop copying if control character found */
+ break;
+ }
+
+ if (++s > (u8 *) (buf + ret))
+ break;
+
+ d++;
+ }
+ hdmi_ctrl->spd_product_description[sz - 1] = 0;
+
+ DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_product_description);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+} /* hdmi_tx_sysfs_wta_product_description */
+
+static ssize_t hdmi_tx_sysfs_rda_product_description(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ hdmi_ctrl->spd_product_description);
+ DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_product_description);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+} /* hdmi_tx_sysfs_rda_product_description */
+
+static ssize_t hdmi_tx_sysfs_wta_avi_itc(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ int itc = 0;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ ret = kstrtoint(buf, 10, &itc);
+ if (ret) {
+ DEV_ERR("%s: kstrtoint failed. rc =%d\n", __func__, ret);
+ goto end;
+ }
+
+ if (itc < 0 || itc > 1) {
+ DEV_ERR("%s: Invalid ITC %d\n", __func__, itc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ hdmi_ctrl->panel.is_it_content = itc ? true : false;
+
+ ret = strnlen(buf, PAGE_SIZE);
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return ret;
+} /* hdmi_tx_sysfs_wta_avi_itc */
+
+static ssize_t hdmi_tx_sysfs_wta_avi_cn_bits(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ int cn_bits = 0;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ ret = kstrtoint(buf, 10, &cn_bits);
+ if (ret) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, ret);
+ goto end;
+ }
+
+ /* As per CEA-861-E, CN is a positive number and can be max 3 */
+ if (cn_bits < 0 || cn_bits > 3) {
+ DEV_ERR("%s: Invalid CN %d\n", __func__, cn_bits);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ hdmi_ctrl->panel.content_type = cn_bits;
+
+ ret = strnlen(buf, PAGE_SIZE);
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+} /* hdmi_tx_sysfs_wta_cn_bits */
+
+static ssize_t hdmi_tx_sysfs_wta_s3d_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret, s3d_mode;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ void *pdata;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = hdmi_tx_get_fd(HDMI_TX_FEAT_PANEL);
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ ret = kstrtoint(buf, 10, &s3d_mode);
+ if (ret) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, ret);
+ goto end;
+ }
+
+ if (s3d_mode < HDMI_S3D_NONE || s3d_mode >= HDMI_S3D_MAX) {
+ DEV_ERR("%s: invalid s3d mode = %d\n", __func__, s3d_mode);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (s3d_mode > HDMI_S3D_NONE &&
+ !hdmi_edid_is_s3d_mode_supported(
+ hdmi_tx_get_fd(HDMI_TX_FEAT_EDID),
+ hdmi_ctrl->vic, s3d_mode)) {
+ DEV_ERR("%s: s3d mode not supported in current video mode\n",
+ __func__);
+ ret = -EPERM;
+ hdmi_ctrl->panel.s3d_support = false;
+ goto end;
+ }
+
+ hdmi_ctrl->panel.s3d_mode = s3d_mode;
+ hdmi_ctrl->panel.s3d_support = true;
+
+ if (hdmi_ctrl->panel_ops.vendor)
+ hdmi_ctrl->panel_ops.vendor(pdata);
+
+ ret = strnlen(buf, PAGE_SIZE);
+ DEV_DBG("%s: %d\n", __func__, hdmi_ctrl->s3d_mode);
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_s3d_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->s3d_mode);
+ DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->s3d_mode);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_wta_5v(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int read, ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct mdss_module_power *pd = NULL;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ pd = &hdmi_ctrl->pdata.power_data[HDMI_TX_HPD_PM];
+ if (!pd || !pd->gpio_config) {
+ DEV_ERR("%s: Error: invalid power data\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = kstrtoint(buf, 10, &read);
+ if (ret) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, ret);
+ goto end;
+ }
+
+ read = ~(!!read ^ pd->gpio_config->value) & BIT(0);
+
+ ret = hdmi_tx_config_5v(hdmi_ctrl, read);
+ if (ret)
+ goto end;
+
+ ret = strnlen(buf, PAGE_SIZE);
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return ret;
+}
+
+static DEVICE_ATTR(connected, 0444, hdmi_tx_sysfs_rda_connected, NULL);
+static DEVICE_ATTR(hdmi_audio_cb, 0200, NULL, hdmi_tx_sysfs_wta_audio_cb);
+static DEVICE_ATTR(hot_plug, 0200, NULL, hdmi_tx_sysfs_wta_hot_plug);
+static DEVICE_ATTR(sim_mode, 0644, hdmi_tx_sysfs_rda_sim_mode,
+ hdmi_tx_sysfs_wta_sim_mode);
+static DEVICE_ATTR(edid, 0644, hdmi_tx_sysfs_rda_edid,
+ hdmi_tx_sysfs_wta_edid);
+static DEVICE_ATTR(video_mode, 0444, hdmi_tx_sysfs_rda_video_mode, NULL);
+static DEVICE_ATTR(hpd, 0644, hdmi_tx_sysfs_rda_hpd,
+ hdmi_tx_sysfs_wta_hpd);
+static DEVICE_ATTR(vendor_name, 0644,
+ hdmi_tx_sysfs_rda_vendor_name, hdmi_tx_sysfs_wta_vendor_name);
+static DEVICE_ATTR(product_description, 0644,
+ hdmi_tx_sysfs_rda_product_description,
+ hdmi_tx_sysfs_wta_product_description);
+static DEVICE_ATTR(avi_itc, 0200, NULL, hdmi_tx_sysfs_wta_avi_itc);
+static DEVICE_ATTR(avi_cn0_1, 0200, NULL, hdmi_tx_sysfs_wta_avi_cn_bits);
+static DEVICE_ATTR(s3d_mode, 0644, hdmi_tx_sysfs_rda_s3d_mode,
+ hdmi_tx_sysfs_wta_s3d_mode);
+static DEVICE_ATTR(5v, 0200, NULL, hdmi_tx_sysfs_wta_5v);
+
+static struct attribute *hdmi_tx_fs_attrs[] = {
+ &dev_attr_connected.attr,
+ &dev_attr_hdmi_audio_cb.attr,
+ &dev_attr_hot_plug.attr,
+ &dev_attr_sim_mode.attr,
+ &dev_attr_edid.attr,
+ &dev_attr_video_mode.attr,
+ &dev_attr_hpd.attr,
+ &dev_attr_vendor_name.attr,
+ &dev_attr_product_description.attr,
+ &dev_attr_avi_itc.attr,
+ &dev_attr_avi_cn0_1.attr,
+ &dev_attr_s3d_mode.attr,
+ &dev_attr_5v.attr,
+ NULL,
+};
+static struct attribute_group hdmi_tx_fs_attrs_group = {
+ .attrs = hdmi_tx_fs_attrs,
+};
+
+static int hdmi_tx_sysfs_create(struct hdmi_tx_ctrl *hdmi_ctrl,
+ struct fb_info *fbi)
+{
+ int rc;
+
+ if (!hdmi_ctrl || !fbi) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ rc = sysfs_create_group(&fbi->dev->kobj,
+ &hdmi_tx_fs_attrs_group);
+ if (rc) {
+ DEV_ERR("%s: failed, rc=%d\n", __func__, rc);
+ return rc;
+ }
+ hdmi_ctrl->kobj = &fbi->dev->kobj;
+ DEV_DBG("%s: sysfs group %pK\n", __func__, hdmi_ctrl->kobj);
+
+ return 0;
+} /* hdmi_tx_sysfs_create */
+
+static void hdmi_tx_sysfs_remove(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+ if (hdmi_ctrl->kobj)
+ sysfs_remove_group(hdmi_ctrl->kobj, &hdmi_tx_fs_attrs_group);
+ hdmi_ctrl->kobj = NULL;
+} /* hdmi_tx_sysfs_remove */
+
+static int hdmi_tx_config_avmute(struct hdmi_tx_ctrl *hdmi_ctrl, bool set)
+{
+ struct mdss_io_data *io;
+ u32 av_mute_status;
+ bool av_pkt_en = false;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: Core io is not initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ av_mute_status = DSS_REG_R(io, HDMI_GC);
+
+ if (set) {
+ if (!(av_mute_status & BIT(0))) {
+ DSS_REG_W(io, HDMI_GC, av_mute_status | BIT(0));
+ av_pkt_en = true;
+ }
+ } else {
+ if (av_mute_status & BIT(0)) {
+ DSS_REG_W(io, HDMI_GC, av_mute_status & ~BIT(0));
+ av_pkt_en = true;
+ }
+ }
+
+ /* Enable AV Mute tranmission here */
+ if (av_pkt_en)
+ DSS_REG_W(io, HDMI_VBI_PKT_CTRL,
+ DSS_REG_R(io, HDMI_VBI_PKT_CTRL) | (BIT(4) & BIT(5)));
+
+ DEV_DBG("%s: AVMUTE %s\n", __func__, set ? "set" : "cleared");
+
+ return 0;
+} /* hdmi_tx_config_avmute */
+
+static bool hdmi_tx_is_encryption_set(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct mdss_io_data *io;
+ bool enc_en = true;
+ u32 reg_val;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ goto end;
+ }
+
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: Core io is not initialized\n", __func__);
+ goto end;
+ }
+
+ reg_val = DSS_REG_R_ND(io, HDMI_HDCP_CTRL2);
+ if ((reg_val & BIT(0)) && (reg_val & BIT(1)))
+ goto end;
+
+ if (DSS_REG_R_ND(io, HDMI_CTRL) & BIT(2))
+ goto end;
+
+ return false;
+
+end:
+ return enc_en;
+} /* hdmi_tx_is_encryption_set */
+
+static void hdmi_tx_hdcp_cb(void *ptr, enum hdmi_hdcp_state status)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = (struct hdmi_tx_ctrl *)ptr;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ hdmi_ctrl->hdcp_status = status;
+
+ queue_delayed_work(hdmi_ctrl->workq, &hdmi_ctrl->hdcp_cb_work, HZ/4);
+}
+
+static inline bool hdmi_tx_is_stream_shareable(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ bool ret;
+
+ switch (hdmi_ctrl->enc_lvl) {
+ case HDCP_STATE_AUTH_ENC_NONE:
+ ret = true;
+ break;
+ case HDCP_STATE_AUTH_ENC_1X:
+ ret = hdmi_tx_is_hdcp_enabled(hdmi_ctrl) &&
+ hdmi_ctrl->auth_state;
+ break;
+ case HDCP_STATE_AUTH_ENC_2P2:
+ ret = hdmi_ctrl->hdcp_feature_on &&
+ hdmi_ctrl->hdcp22_present &&
+ hdmi_ctrl->auth_state;
+ break;
+ default:
+ ret = false;
+ }
+
+ return ret;
+}
+
+static void hdmi_tx_hdcp_cb_work(struct work_struct *work)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct delayed_work *dw = to_delayed_work(work);
+ int rc = 0;
+
+ hdmi_ctrl = container_of(dw, struct hdmi_tx_ctrl, hdcp_cb_work);
+ if (!hdmi_ctrl) {
+ DEV_DBG("%s: invalid input\n", __func__);
+ return;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ switch (hdmi_ctrl->hdcp_status) {
+ case HDCP_STATE_AUTHENTICATED:
+ hdmi_ctrl->auth_state = true;
+
+ if (hdmi_tx_is_panel_on(hdmi_ctrl) &&
+ hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+ rc = hdmi_tx_config_avmute(hdmi_ctrl, false);
+ hdmi_tx_set_audio_switch_node(hdmi_ctrl, 1);
+ }
+
+ if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present)
+ hdcp1_set_enc(true);
+ break;
+ case HDCP_STATE_AUTH_FAIL:
+ if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+ if (hdmi_ctrl->auth_state)
+ hdcp1_set_enc(false);
+ }
+
+ hdmi_ctrl->auth_state = false;
+
+ if (hdmi_tx_is_panel_on(hdmi_ctrl)) {
+ DEV_DBG("%s: Reauthenticating\n", __func__);
+
+ if (hdmi_tx_is_encryption_set(hdmi_ctrl) ||
+ !hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+ hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+ rc = hdmi_tx_config_avmute(hdmi_ctrl, true);
+ }
+
+ rc = hdmi_ctrl->hdcp_ops->hdmi_hdcp_reauthenticate(
+ hdmi_ctrl->hdcp_data);
+ if (rc)
+ DEV_ERR("%s: HDCP reauth failed. rc=%d\n",
+ __func__, rc);
+ } else {
+ DEV_DBG("%s: Not reauthenticating. Cable not conn\n",
+ __func__);
+ }
+
+ break;
+ case HDCP_STATE_AUTH_ENC_NONE:
+ hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+
+ if (hdmi_tx_is_panel_on(hdmi_ctrl)) {
+ rc = hdmi_tx_config_avmute(hdmi_ctrl, false);
+ hdmi_tx_set_audio_switch_node(hdmi_ctrl, 1);
+ }
+ break;
+ case HDCP_STATE_AUTH_ENC_1X:
+ case HDCP_STATE_AUTH_ENC_2P2:
+ hdmi_ctrl->enc_lvl = hdmi_ctrl->hdcp_status;
+
+ if (hdmi_tx_is_panel_on(hdmi_ctrl) &&
+ hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+ rc = hdmi_tx_config_avmute(hdmi_ctrl, false);
+ hdmi_tx_set_audio_switch_node(hdmi_ctrl, 1);
+ } else {
+ hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+ rc = hdmi_tx_config_avmute(hdmi_ctrl, true);
+ }
+ break;
+ default:
+ break;
+ /* do nothing */
+ }
+
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+}
+
+static u32 hdmi_tx_ddc_read(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+ u32 block, u8 *edid_buf)
+{
+ u32 block_size = EDID_BLOCK_SIZE;
+ struct hdmi_tx_ddc_data ddc_data;
+ u32 status = 0, retry_cnt = 0, i;
+
+ if (!ddc_ctrl || !edid_buf) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ do {
+ DEV_DBG("EDID: reading block(%d) with block-size=%d\n",
+ block, block_size);
+
+ for (i = 0; i < EDID_BLOCK_SIZE; i += block_size) {
+ memset(&ddc_data, 0, sizeof(ddc_data));
+
+ ddc_data.dev_addr = EDID_BLOCK_ADDR;
+ ddc_data.offset = block * EDID_BLOCK_SIZE + i;
+ ddc_data.data_buf = edid_buf + i;
+ ddc_data.data_len = block_size;
+ ddc_data.request_len = block_size;
+ ddc_data.retry = 1;
+ ddc_data.what = "EDID";
+ ddc_data.retry_align = true;
+
+ ddc_ctrl->ddc_data = ddc_data;
+
+ /* Read EDID twice with 32bit alighnment too */
+ if (block < 2)
+ status = hdmi_ddc_read(ddc_ctrl);
+ else
+ status = hdmi_ddc_read_seg(ddc_ctrl);
+
+ if (status)
+ break;
+ }
+ if (retry_cnt++ >= MAX_EDID_READ_RETRY)
+ block_size /= 2;
+
+ } while (status && (block_size >= 16));
+
+ return status;
+}
+
+static int hdmi_tx_read_edid_retry(struct hdmi_tx_ctrl *hdmi_ctrl, u8 block)
+{
+ u32 checksum_retry = 0;
+ u8 *ebuf;
+ int ret = 0;
+ struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ebuf = hdmi_ctrl->edid_buf;
+ if (!ebuf) {
+ DEV_ERR("%s: invalid edid buf\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ddc_ctrl = &hdmi_ctrl->ddc_ctrl;
+
+ while (checksum_retry++ < MAX_EDID_READ_RETRY) {
+ ret = hdmi_tx_ddc_read(ddc_ctrl, block,
+ ebuf + (block * EDID_BLOCK_SIZE));
+ if (ret)
+ continue;
+ else
+ break;
+ }
+end:
+ return ret;
+}
+
+static int hdmi_tx_read_edid(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int ndx, check_sum;
+ int cea_blks = 0, block = 0, total_blocks = 0;
+ int ret = 0;
+ u8 *ebuf;
+ struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ebuf = hdmi_ctrl->edid_buf;
+ if (!ebuf) {
+ DEV_ERR("%s: invalid edid buf\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ memset(ebuf, 0, hdmi_ctrl->edid_buf_size);
+
+ ddc_ctrl = &hdmi_ctrl->ddc_ctrl;
+
+ do {
+ if (block * EDID_BLOCK_SIZE > hdmi_ctrl->edid_buf_size) {
+ DEV_ERR("%s: no mem for block %d, max mem %d\n",
+ __func__, block, hdmi_ctrl->edid_buf_size);
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ ret = hdmi_tx_read_edid_retry(hdmi_ctrl, block);
+ if (ret) {
+ DEV_ERR("%s: edid read failed\n", __func__);
+ goto end;
+ }
+
+ /* verify checksum to validate edid block */
+ check_sum = 0;
+ for (ndx = 0; ndx < EDID_BLOCK_SIZE; ++ndx)
+ check_sum += ebuf[ndx];
+
+ if (check_sum & 0xFF) {
+ DEV_ERR("%s: checksum mismatch\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* get number of cea extension blocks as given in block 0*/
+ if (block == 0) {
+ cea_blks = ebuf[EDID_BLOCK_SIZE - 2];
+ if (cea_blks < 0 || cea_blks >= MAX_EDID_BLOCKS) {
+ cea_blks = 0;
+ DEV_ERR("%s: invalid cea blocks %d\n",
+ __func__, cea_blks);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ total_blocks = cea_blks + 1;
+ }
+ } while ((cea_blks-- > 0) && (block++ < MAX_EDID_BLOCKS));
+end:
+
+ return ret;
+}
+
+/* Enable HDMI features */
+static int hdmi_tx_init_panel(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct hdmi_panel_init_data panel_init_data = {0};
+ void *panel_data;
+ int rc = 0;
+
+ hdmi_ctrl->panel.pinfo = &hdmi_ctrl->panel_data.panel_info;
+
+ panel_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ panel_init_data.ds_data = &hdmi_ctrl->ds_data;
+ panel_init_data.ops = &hdmi_ctrl->panel_ops;
+ panel_init_data.panel_data = &hdmi_ctrl->panel;
+ panel_init_data.spd_vendor_name = hdmi_ctrl->spd_vendor_name;
+ panel_init_data.spd_product_description =
+ hdmi_ctrl->spd_product_description;
+ panel_init_data.version = hdmi_ctrl->hdmi_tx_ver;
+ panel_init_data.ddc = &hdmi_ctrl->ddc_ctrl;
+ panel_init_data.timing = &hdmi_ctrl->timing;
+
+ panel_data = hdmi_panel_init(&panel_init_data);
+ if (IS_ERR_OR_NULL(panel_data)) {
+ DEV_ERR("%s: panel init failed\n", __func__);
+ rc = -EINVAL;
+ } else {
+ hdmi_tx_set_fd(HDMI_TX_FEAT_PANEL, panel_data);
+ DEV_DBG("%s: panel initialized\n", __func__);
+ }
+
+ return rc;
+}
+
+static int hdmi_tx_init_edid(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct hdmi_edid_init_data edid_init_data = {0};
+ void *edid_data;
+ int rc = 0;
+
+ edid_init_data.kobj = hdmi_ctrl->kobj;
+ edid_init_data.ds_data = hdmi_ctrl->ds_data;
+ edid_init_data.max_pclk_khz = hdmi_ctrl->max_pclk_khz;
+
+ edid_data = hdmi_edid_init(&edid_init_data);
+ if (!edid_data) {
+ DEV_ERR("%s: edid init failed\n", __func__);
+ rc = -ENODEV;
+ goto end;
+ }
+
+ hdmi_ctrl->panel_data.panel_info.edid_data = edid_data;
+ hdmi_tx_set_fd(HDMI_TX_FEAT_EDID, edid_data);
+
+ /* get edid buffer from edid parser */
+ hdmi_ctrl->edid_buf = edid_init_data.buf;
+ hdmi_ctrl->edid_buf_size = edid_init_data.buf_size;
+
+ hdmi_edid_set_video_resolution(edid_data, hdmi_ctrl->vic, true);
+end:
+ return rc;
+}
+
+static int hdmi_tx_init_hdcp(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct hdmi_hdcp_init_data hdcp_init_data = {0};
+ struct resource *res;
+ void *hdcp_data;
+ int rc = 0;
+
+ res = platform_get_resource_byname(hdmi_ctrl->pdev,
+ IORESOURCE_MEM, hdmi_tx_io_name(HDMI_TX_CORE_IO));
+ if (!res) {
+ DEV_ERR("%s: Error getting HDMI tx core resource\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ hdcp_init_data.phy_addr = res->start;
+ hdcp_init_data.core_io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ hdcp_init_data.qfprom_io = &hdmi_ctrl->pdata.io[HDMI_TX_QFPROM_IO];
+ hdcp_init_data.hdcp_io = &hdmi_ctrl->pdata.io[HDMI_TX_HDCP_IO];
+ hdcp_init_data.mutex = &hdmi_ctrl->mutex;
+ hdcp_init_data.sysfs_kobj = hdmi_ctrl->kobj;
+ hdcp_init_data.ddc_ctrl = &hdmi_ctrl->ddc_ctrl;
+ hdcp_init_data.workq = hdmi_ctrl->workq;
+ hdcp_init_data.notify_status = hdmi_tx_hdcp_cb;
+ hdcp_init_data.cb_data = (void *)hdmi_ctrl;
+ hdcp_init_data.hdmi_tx_ver = hdmi_ctrl->hdmi_tx_ver;
+ hdcp_init_data.timing = &hdmi_ctrl->timing;
+
+ if (hdmi_ctrl->hdcp14_present) {
+ hdcp_data = hdmi_hdcp_init(&hdcp_init_data);
+
+ if (IS_ERR_OR_NULL(hdcp_data)) {
+ DEV_ERR("%s: hdcp 1.4 init failed\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ } else {
+ hdmi_tx_set_fd(HDMI_TX_FEAT_HDCP, hdcp_data);
+ DEV_DBG("%s: HDCP 1.4 initialized\n", __func__);
+ }
+ }
+
+ hdcp_data = hdmi_hdcp2p2_init(&hdcp_init_data);
+
+ if (IS_ERR_OR_NULL(hdcp_data)) {
+ DEV_ERR("%s: hdcp 2.2 init failed\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ } else {
+ hdmi_tx_set_fd(HDMI_TX_FEAT_HDCP2P2, hdcp_data);
+ DEV_DBG("%s: HDCP 2.2 initialized\n", __func__);
+ }
+end:
+ return rc;
+}
+
+static int hdmi_tx_init_cec_hw(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct hdmi_cec_init_data cec_init_data = {0};
+ void *cec_hw_data;
+ int rc = 0;
+
+ cec_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ cec_init_data.workq = hdmi_ctrl->workq;
+ cec_init_data.pinfo = &hdmi_ctrl->panel_data.panel_info;
+ cec_init_data.ops = &hdmi_ctrl->hdmi_cec_ops;
+ cec_init_data.cbs = &hdmi_ctrl->hdmi_cec_cbs;
+
+ cec_hw_data = hdmi_cec_init(&cec_init_data);
+ if (IS_ERR_OR_NULL(cec_hw_data)) {
+ DEV_ERR("%s: cec init failed\n", __func__);
+ rc = -EINVAL;
+ } else {
+ hdmi_ctrl->panel_data.panel_info.is_cec_supported = true;
+ hdmi_tx_set_fd(HDMI_TX_FEAT_CEC_HW, cec_hw_data);
+ DEV_DBG("%s: cec hw initialized\n", __func__);
+ }
+
+ return rc;
+}
+
+static int hdmi_tx_init_cec_abst(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct cec_abstract_init_data cec_abst_init_data = {0};
+ void *cec_abst_data;
+ int rc = 0;
+
+ cec_abst_init_data.kobj = hdmi_ctrl->kobj;
+ cec_abst_init_data.ops = &hdmi_ctrl->hdmi_cec_ops;
+ cec_abst_init_data.cbs = &hdmi_ctrl->hdmi_cec_cbs;
+
+ cec_abst_data = cec_abstract_init(&cec_abst_init_data);
+ if (IS_ERR_OR_NULL(cec_abst_data)) {
+ DEV_ERR("%s: cec abst init failed\n", __func__);
+ rc = -EINVAL;
+ } else {
+ hdmi_tx_set_fd(HDMI_TX_FEAT_CEC_ABST, cec_abst_data);
+ hdmi_ctrl->panel_data.panel_info.cec_data = cec_abst_data;
+ DEV_DBG("%s: cec abst initialized\n", __func__);
+ }
+
+ return rc;
+}
+
+static int hdmi_tx_init_audio(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct hdmi_audio_init_data audio_init_data = {0};
+ void *audio_data;
+ int rc = 0;
+
+ audio_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ audio_init_data.ops = &hdmi_ctrl->audio_ops;
+
+ audio_data = hdmi_audio_register(&audio_init_data);
+ if (!audio_data) {
+ rc = -EINVAL;
+ DEV_ERR("%s: audio init failed\n", __func__);
+ } else {
+ hdmi_ctrl->audio_data = audio_data;
+ DEV_DBG("%s: audio initialized\n", __func__);
+ }
+
+ return rc;
+}
+
+static void hdmi_tx_deinit_features(struct hdmi_tx_ctrl *hdmi_ctrl,
+ u32 features)
+{
+ void *fd;
+
+ if (features & HDMI_TX_FEAT_CEC_ABST) {
+ fd = hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_ABST);
+
+ cec_abstract_deinit(fd);
+
+ hdmi_ctrl->panel_data.panel_info.cec_data = NULL;
+ hdmi_tx_set_fd(HDMI_TX_FEAT_CEC_ABST, 0);
+ }
+
+ if (features & HDMI_TX_FEAT_CEC_HW) {
+ fd = hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_HW);
+
+ hdmi_cec_deinit(fd);
+ hdmi_ctrl->panel_data.panel_info.is_cec_supported = false;
+ hdmi_tx_set_fd(HDMI_TX_FEAT_CEC_HW, 0);
+ }
+
+ if (features & HDMI_TX_FEAT_HDCP2P2) {
+ fd = hdmi_tx_get_fd(HDMI_TX_FEAT_HDCP2P2);
+
+ hdmi_hdcp2p2_deinit(fd);
+ hdmi_tx_set_fd(HDMI_TX_FEAT_HDCP2P2, 0);
+ }
+
+ if (features & HDMI_TX_FEAT_HDCP) {
+ fd = hdmi_tx_get_fd(HDMI_TX_FEAT_HDCP);
+
+ hdmi_hdcp_deinit(fd);
+ hdmi_tx_set_fd(HDMI_TX_FEAT_HDCP, 0);
+ }
+
+ if (features & HDMI_TX_FEAT_EDID) {
+ fd = hdmi_tx_get_fd(HDMI_TX_FEAT_EDID);
+
+ hdmi_edid_deinit(fd);
+ hdmi_ctrl->edid_buf = NULL;
+ hdmi_ctrl->edid_buf_size = 0;
+ hdmi_tx_set_fd(HDMI_TX_FEAT_EDID, 0);
+ }
+} /* hdmi_tx_init_features */
+
+static int hdmi_tx_init_features(struct hdmi_tx_ctrl *hdmi_ctrl,
+ struct fb_info *fbi)
+{
+ int ret = 0;
+ u32 deinit_features = 0;
+
+ if (!hdmi_ctrl || !fbi) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = hdmi_tx_init_panel(hdmi_ctrl);
+ if (ret)
+ goto end;
+
+ ret = hdmi_tx_init_edid(hdmi_ctrl);
+ if (ret) {
+ deinit_features |= HDMI_TX_FEAT_PANEL;
+ goto err;
+ }
+
+ ret = hdmi_tx_init_hdcp(hdmi_ctrl);
+ if (ret) {
+ deinit_features |= HDMI_TX_FEAT_EDID;
+ goto err;
+ }
+
+ ret = hdmi_tx_init_cec_hw(hdmi_ctrl);
+ if (ret) {
+ deinit_features |= HDMI_TX_FEAT_HDCP;
+ goto err;
+ }
+
+ ret = hdmi_tx_init_cec_abst(hdmi_ctrl);
+ if (ret) {
+ deinit_features |= HDMI_TX_FEAT_CEC_HW;
+ goto err;
+ }
+
+ ret = hdmi_tx_init_audio(hdmi_ctrl);
+ if (ret) {
+ deinit_features |= HDMI_TX_FEAT_CEC_ABST;
+ goto err;
+ }
+
+ return 0;
+err:
+ hdmi_tx_deinit_features(hdmi_ctrl, deinit_features);
+end:
+ return ret;
+}
+
+static inline u32 hdmi_tx_is_controller_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct mdss_io_data *io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+
+ return DSS_REG_R_ND(io, HDMI_CTRL) & BIT(0);
+} /* hdmi_tx_is_controller_on */
+
+static int hdmi_tx_init_panel_info(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct mdss_panel_info *pinfo;
+ struct msm_hdmi_mode_timing_info timing = {0};
+ u32 ret;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = hdmi_get_supported_mode(&timing, &hdmi_ctrl->ds_data,
+ hdmi_ctrl->vic);
+ pinfo = &hdmi_ctrl->panel_data.panel_info;
+
+ if (ret || !timing.supported || !pinfo) {
+ DEV_ERR("%s: invalid timing data\n", __func__);
+ return -EINVAL;
+ }
+
+ pinfo->xres = timing.active_h;
+ pinfo->yres = timing.active_v;
+ pinfo->clk_rate = timing.pixel_freq * 1000;
+
+ pinfo->lcdc.h_back_porch = timing.back_porch_h;
+ pinfo->lcdc.h_front_porch = timing.front_porch_h;
+ pinfo->lcdc.h_pulse_width = timing.pulse_width_h;
+ pinfo->lcdc.v_back_porch = timing.back_porch_v;
+ pinfo->lcdc.v_front_porch = timing.front_porch_v;
+ pinfo->lcdc.v_pulse_width = timing.pulse_width_v;
+ pinfo->lcdc.frame_rate = timing.refresh_rate;
+
+ pinfo->type = DTV_PANEL;
+ pinfo->pdest = DISPLAY_3;
+ pinfo->wait_cycle = 0;
+ pinfo->out_format = MDP_RGB_888;
+ pinfo->bpp = 24;
+ pinfo->fb_num = 1;
+
+ pinfo->min_fps = HDMI_TX_MIN_FPS;
+ pinfo->max_fps = HDMI_TX_MAX_FPS;
+
+ pinfo->lcdc.border_clr = 0; /* blk */
+ pinfo->lcdc.underflow_clr = 0xff; /* blue */
+ pinfo->lcdc.hsync_skew = 0;
+
+ pinfo->is_pluggable = hdmi_ctrl->pdata.pluggable;
+
+ hdmi_ctrl->timing = timing;
+
+ return 0;
+} /* hdmi_tx_init_panel_info */
+
+static int hdmi_tx_read_sink_info(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int status = 0;
+ void *data;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ data = hdmi_tx_get_fd(HDMI_TX_FEAT_EDID);
+
+ if (!hdmi_tx_is_controller_on(hdmi_ctrl)) {
+ DEV_ERR("%s: failed: HDMI controller is off", __func__);
+ status = -ENXIO;
+ goto error;
+ }
+
+ if (!hdmi_ctrl->custom_edid && !hdmi_ctrl->sim_mode) {
+ hdmi_ddc_config(&hdmi_ctrl->ddc_ctrl);
+
+ status = hdmi_tx_read_edid(hdmi_ctrl);
+ if (status) {
+ DEV_ERR("%s: error reading edid\n", __func__);
+ goto error;
+ }
+ }
+
+ /* parse edid if a valid edid buffer is present */
+ if (hdmi_ctrl->custom_edid || !hdmi_ctrl->sim_mode) {
+ status = hdmi_edid_parser(data);
+ if (status)
+ DEV_ERR("%s: edid parse failed\n", __func__);
+ }
+
+error:
+ return status;
+} /* hdmi_tx_read_sink_info */
+
+static void hdmi_tx_update_hdcp_info(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ void *fd = NULL;
+ struct hdmi_hdcp_ops *ops = NULL;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ /* check first if hdcp2p2 is supported */
+ fd = hdmi_tx_get_fd(HDMI_TX_FEAT_HDCP2P2);
+ if (fd)
+ ops = hdmi_hdcp2p2_start(fd);
+
+ if (ops && ops->feature_supported)
+ hdmi_ctrl->hdcp22_present = ops->feature_supported(fd);
+ else
+ hdmi_ctrl->hdcp22_present = false;
+
+ if (!hdmi_ctrl->hdcp22_present) {
+ if (hdmi_ctrl->hdcp1_use_sw_keys)
+ hdmi_ctrl->hdcp14_present =
+ hdcp1_check_if_supported_load_app();
+
+ if (hdmi_ctrl->hdcp14_present) {
+ fd = hdmi_tx_get_fd(HDMI_TX_FEAT_HDCP);
+ ops = hdmi_hdcp_start(fd);
+ }
+ }
+
+ /* update internal data about hdcp */
+ hdmi_ctrl->hdcp_data = fd;
+ hdmi_ctrl->hdcp_ops = ops;
+}
+
+static void hdmi_tx_hpd_int_work(struct work_struct *work)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct mdss_io_data *io;
+ int rc = -EINVAL;
+ int retry = MAX_EDID_READ_RETRY;
+
+ hdmi_ctrl = container_of(work, struct hdmi_tx_ctrl, hpd_int_work);
+ if (!hdmi_ctrl) {
+ DEV_DBG("%s: invalid input\n", __func__);
+ return;
+ }
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ if (!hdmi_ctrl->hpd_initialized) {
+ DEV_DBG("hpd not initialized\n");
+ goto end;
+ }
+
+ DEV_DBG("%s: %s\n", __func__,
+ hdmi_ctrl->hpd_state ? "CONNECT" : "DISCONNECT");
+
+ if (hdmi_ctrl->hpd_state) {
+ if (hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, true)) {
+ DEV_ERR("%s: Failed to enable ddc power\n", __func__);
+ goto end;
+ }
+
+ /* Enable SW DDC before EDID read */
+ DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION,
+ DSS_REG_R(io, HDMI_DDC_ARBITRATION) & ~(BIT(4)));
+
+ while (rc && retry--)
+ rc = hdmi_tx_read_sink_info(hdmi_ctrl);
+ if (!retry && rc)
+ pr_warn_ratelimited("%s: EDID read failed\n", __func__);
+
+ if (hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, false))
+ DEV_ERR("%s: Failed to disable ddc power\n", __func__);
+
+ hdmi_tx_send_cable_notification(hdmi_ctrl, true);
+ } else {
+ hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+ hdmi_tx_wait_for_audio_engine(hdmi_ctrl);
+
+ hdmi_tx_send_cable_notification(hdmi_ctrl, false);
+ }
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+} /* hdmi_tx_hpd_int_work */
+
+static int hdmi_tx_check_capability(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ u32 hdmi_disabled, hdcp_disabled, reg_val;
+ struct mdss_io_data *io = NULL;
+ int ret = 0;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_QFPROM_IO];
+ if (!io->base) {
+ DEV_ERR("%s: QFPROM io is not initialized\n", __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* check if hdmi and hdcp are disabled */
+ if (hdmi_ctrl->hdmi_tx_ver < HDMI_TX_VERSION_4) {
+ hdcp_disabled = DSS_REG_R_ND(io,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_LSB) & BIT(31);
+
+ hdmi_disabled = DSS_REG_R_ND(io,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_MSB) & BIT(0);
+ } else {
+ reg_val = DSS_REG_R_ND(io,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_LSB + QFPROM_RAW_VERSION_4);
+ hdcp_disabled = reg_val & BIT(12);
+ hdmi_disabled = reg_val & BIT(13);
+
+ reg_val = DSS_REG_R_ND(io, SEC_CTRL_HW_VERSION);
+ /*
+ * With HDCP enabled on capable hardware, check if HW
+ * or SW keys should be used.
+ */
+ if (!hdcp_disabled && (reg_val >= HDCP_SEL_MIN_SEC_VERSION)) {
+ reg_val = DSS_REG_R_ND(io,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
+ QFPROM_RAW_VERSION_4);
+ if (!(reg_val & BIT(23)))
+ hdmi_ctrl->hdcp1_use_sw_keys = true;
+ }
+ }
+
+ DEV_DBG("%s: Features <HDMI:%s, HDCP:%s>\n", __func__,
+ hdmi_disabled ? "OFF" : "ON", hdcp_disabled ? "OFF" : "ON");
+
+ if (hdmi_disabled) {
+ DEV_ERR("%s: HDMI disabled\n", __func__);
+ ret = -ENODEV;
+ goto end;
+ }
+
+ hdmi_ctrl->hdcp14_present = !hdcp_disabled;
+end:
+ return ret;
+} /* hdmi_tx_check_capability */
+
+static void hdmi_tx_set_mode(struct hdmi_tx_ctrl *hdmi_ctrl, u32 power_on)
+{
+ struct mdss_io_data *io = NULL;
+ /* Defaults: Disable block, HDMI mode */
+ u32 reg_val = BIT(1);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: Core io is not initialized\n", __func__);
+ return;
+ }
+
+ if (power_on) {
+ /* Enable the block */
+ reg_val |= BIT(0);
+
+ /**
+ * HDMI Encryption, if HDCP is enabled
+ * The ENC_REQUIRED bit is only available on HDMI Tx major
+ * version less than 4. From 4 onwards, this bit is controlled
+ * by TZ
+ */
+ if (hdmi_ctrl->hdmi_tx_ver < 4 &&
+ hdmi_tx_is_hdcp_enabled(hdmi_ctrl) &&
+ !hdmi_ctrl->pdata.primary)
+ reg_val |= BIT(2);
+
+ /* Set transmission mode to DVI based in EDID info */
+ if (!hdmi_edid_get_sink_mode(hdmi_tx_get_fd(HDMI_TX_FEAT_EDID)))
+ reg_val &= ~BIT(1); /* DVI mode */
+
+ /*
+ * Use DATAPATH_MODE as 1 always, the new mode that also
+ * supports scrambler and HDCP 2.2. The legacy mode should no
+ * longer be used
+ */
+ reg_val |= BIT(31);
+ }
+
+ DSS_REG_W(io, HDMI_CTRL, reg_val);
+
+ DEV_DBG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+ power_on ? "Enable" : "Disable", reg_val);
+} /* hdmi_tx_set_mode */
+
+static int hdmi_tx_pinctrl_set_state(struct hdmi_tx_ctrl *hdmi_ctrl,
+ enum hdmi_tx_power_module_type module, bool active)
+{
+ struct pinctrl_state *pin_state = NULL;
+ int rc = -EFAULT;
+ struct mdss_module_power *power_data = NULL;
+ u64 cur_pin_states;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.pinctrl))
+ return 0;
+
+ power_data = &hdmi_ctrl->pdata.power_data[module];
+
+ cur_pin_states = active ? (hdmi_ctrl->pdata.pin_states | BIT(module))
+ : (hdmi_ctrl->pdata.pin_states & ~BIT(module));
+
+ if (cur_pin_states & BIT(HDMI_TX_HPD_PM)) {
+ if (cur_pin_states & BIT(HDMI_TX_DDC_PM)) {
+ if (cur_pin_states & BIT(HDMI_TX_CEC_PM))
+ pin_state = hdmi_ctrl->pin_res.state_active;
+ else
+ pin_state =
+ hdmi_ctrl->pin_res.state_ddc_active;
+ } else if (cur_pin_states & BIT(HDMI_TX_CEC_PM)) {
+ pin_state = hdmi_ctrl->pin_res.state_cec_active;
+ } else {
+ pin_state = hdmi_ctrl->pin_res.state_hpd_active;
+ }
+ } else {
+ pin_state = hdmi_ctrl->pin_res.state_suspend;
+ }
+
+ if (!IS_ERR_OR_NULL(pin_state)) {
+ rc = pinctrl_select_state(hdmi_ctrl->pin_res.pinctrl,
+ pin_state);
+ if (rc)
+ pr_err("%s: cannot set pins\n", __func__);
+ else
+ hdmi_ctrl->pdata.pin_states = cur_pin_states;
+ } else {
+ pr_err("%s: pinstate not found\n", __func__);
+ }
+
+ return rc;
+}
+
+static int hdmi_tx_pinctrl_init(struct platform_device *pdev)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl;
+
+ hdmi_ctrl = platform_get_drvdata(pdev);
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ hdmi_ctrl->pin_res.pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.pinctrl)) {
+ pr_err("%s: failed to get pinctrl\n", __func__);
+ return PTR_ERR(hdmi_ctrl->pin_res.pinctrl);
+ }
+
+ hdmi_ctrl->pin_res.state_active =
+ pinctrl_lookup_state(hdmi_ctrl->pin_res.pinctrl, "hdmi_active");
+ if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.state_active))
+ pr_debug("%s: cannot get active pinstate\n", __func__);
+
+ hdmi_ctrl->pin_res.state_hpd_active =
+ pinctrl_lookup_state(hdmi_ctrl->pin_res.pinctrl,
+ "hdmi_hpd_active");
+ if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.state_hpd_active))
+ pr_debug("%s: cannot get hpd active pinstate\n", __func__);
+
+ hdmi_ctrl->pin_res.state_cec_active =
+ pinctrl_lookup_state(hdmi_ctrl->pin_res.pinctrl,
+ "hdmi_cec_active");
+ if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.state_cec_active))
+ pr_debug("%s: cannot get cec active pinstate\n", __func__);
+
+ hdmi_ctrl->pin_res.state_ddc_active =
+ pinctrl_lookup_state(hdmi_ctrl->pin_res.pinctrl,
+ "hdmi_ddc_active");
+ if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.state_ddc_active))
+ pr_debug("%s: cannot get ddc active pinstate\n", __func__);
+
+ hdmi_ctrl->pin_res.state_suspend =
+ pinctrl_lookup_state(hdmi_ctrl->pin_res.pinctrl, "hdmi_sleep");
+ if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.state_suspend))
+ pr_debug("%s: cannot get sleep pinstate\n", __func__);
+
+ return 0;
+}
+
+static int hdmi_tx_config_power(struct hdmi_tx_ctrl *hdmi_ctrl,
+ enum hdmi_tx_power_module_type module, int config)
+{
+ int rc = 0;
+ struct mdss_module_power *power_data = NULL;
+ char name[MAX_CLIENT_NAME_LEN];
+
+ if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) {
+ DEV_ERR("%s: Error: invalid input\n", __func__);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ power_data = &hdmi_ctrl->pdata.power_data[module];
+ if (!power_data) {
+ DEV_ERR("%s: Error: invalid power data\n", __func__);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (config) {
+ rc = msm_mdss_config_vreg(&hdmi_ctrl->pdev->dev,
+ power_data->vreg_config, power_data->num_vreg, 1);
+ if (rc) {
+ DEV_ERR("%s: Failed to config %s vreg. Err=%d\n",
+ __func__, hdmi_tx_pm_name(module), rc);
+ goto exit;
+ }
+
+ snprintf(name, MAX_CLIENT_NAME_LEN, "hdmi:%u", module);
+ hdmi_ctrl->pdata.reg_bus_clt[module] =
+ mdss_reg_bus_vote_client_create(name);
+ if (IS_ERR(hdmi_ctrl->pdata.reg_bus_clt[module])) {
+ pr_err("reg bus client create failed\n");
+ msm_mdss_config_vreg(&hdmi_ctrl->pdev->dev,
+ power_data->vreg_config, power_data->num_vreg, 0);
+ rc = PTR_ERR(hdmi_ctrl->pdata.reg_bus_clt[module]);
+ goto exit;
+ }
+
+ rc = msm_mdss_get_clk(&hdmi_ctrl->pdev->dev,
+ power_data->clk_config, power_data->num_clk);
+ if (rc) {
+ DEV_ERR("%s: Failed to get %s clk. Err=%d\n",
+ __func__, hdmi_tx_pm_name(module), rc);
+
+ mdss_reg_bus_vote_client_destroy(
+ hdmi_ctrl->pdata.reg_bus_clt[module]);
+ hdmi_ctrl->pdata.reg_bus_clt[module] = NULL;
+ msm_mdss_config_vreg(&hdmi_ctrl->pdev->dev,
+ power_data->vreg_config, power_data->num_vreg, 0);
+ }
+ } else {
+ msm_mdss_put_clk(power_data->clk_config, power_data->num_clk);
+ mdss_reg_bus_vote_client_destroy(
+ hdmi_ctrl->pdata.reg_bus_clt[module]);
+ hdmi_ctrl->pdata.reg_bus_clt[module] = NULL;
+
+ rc = msm_mdss_config_vreg(&hdmi_ctrl->pdev->dev,
+ power_data->vreg_config, power_data->num_vreg, 0);
+ if (rc)
+ DEV_ERR("%s: Fail to deconfig %s vreg. Err=%d\n",
+ __func__, hdmi_tx_pm_name(module), rc);
+ }
+
+exit:
+ return rc;
+} /* hdmi_tx_config_power */
+
+static int hdmi_tx_check_clk_state(struct hdmi_tx_ctrl *hdmi_ctrl,
+ enum hdmi_tx_power_module_type module)
+{
+ int i;
+ int rc = 0;
+ struct mdss_module_power *pd = NULL;
+
+ if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) {
+ DEV_ERR("%s: Error: invalid input\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ pd = &hdmi_ctrl->pdata.power_data[module];
+ if (!pd) {
+ DEV_ERR("%s: Error: invalid power data\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ for (i = 0; i < pd->num_clk; i++) {
+ struct clk *clk = pd->clk_config[i].clk;
+
+ if (clk) {
+ u32 rate = clk_get_rate(clk);
+
+ DEV_DBG("%s: clk %s: rate %d\n", __func__,
+ pd->clk_config[i].clk_name, rate);
+
+ if (!rate) {
+ rc = -EINVAL;
+ goto error;
+ }
+ } else {
+ DEV_ERR("%s: clk %s: not configured\n", __func__,
+ pd->clk_config[i].clk_name);
+
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ return rc;
+}
+
+static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl,
+ enum hdmi_tx_power_module_type module, int enable)
+{
+ int rc = 0;
+ struct mdss_module_power *power_data = NULL;
+
+ if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) {
+ DEV_ERR("%s: Error: invalid input\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ power_data = &hdmi_ctrl->pdata.power_data[module];
+ if (!power_data) {
+ DEV_ERR("%s: Error: invalid power data\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (hdmi_ctrl->panel_data.panel_info.cont_splash_enabled) {
+ DEV_DBG("%s: %s enabled by splash.\n",
+ __func__, hdmi_pm_name(module));
+ return 0;
+ }
+
+ if (enable && !hdmi_ctrl->power_data_enable[module]) {
+ rc = msm_mdss_enable_vreg(power_data->vreg_config,
+ power_data->num_vreg, 1);
+ if (rc) {
+ DEV_ERR("%s: Failed to enable %s vreg. Error=%d\n",
+ __func__, hdmi_tx_pm_name(module), rc);
+ goto error;
+ }
+
+ rc = hdmi_tx_pinctrl_set_state(hdmi_ctrl, module, enable);
+ if (rc) {
+ DEV_ERR("%s: Failed to set %s pinctrl state\n",
+ __func__, hdmi_tx_pm_name(module));
+ goto error;
+ }
+
+ rc = msm_mdss_enable_gpio(power_data->gpio_config,
+ power_data->num_gpio, 1);
+ if (rc) {
+ DEV_ERR("%s: Failed to enable %s gpio. Error=%d\n",
+ __func__, hdmi_tx_pm_name(module), rc);
+ goto disable_vreg;
+ }
+ mdss_update_reg_bus_vote(hdmi_ctrl->pdata.reg_bus_clt[module],
+ VOTE_INDEX_LOW);
+
+ rc = msm_mdss_clk_set_rate(power_data->clk_config,
+ power_data->num_clk);
+ if (rc) {
+ DEV_ERR("%s: failed to set clks rate for %s. err=%d\n",
+ __func__, hdmi_tx_pm_name(module), rc);
+ goto disable_gpio;
+ }
+
+ rc = msm_mdss_enable_clk(power_data->clk_config,
+ power_data->num_clk, 1);
+ if (rc) {
+ DEV_ERR("%s: Failed to enable clks for %s. Error=%d\n",
+ __func__, hdmi_tx_pm_name(module), rc);
+ goto disable_gpio;
+ }
+ hdmi_ctrl->power_data_enable[module] = true;
+ } else if (!enable && hdmi_ctrl->power_data_enable[module] &&
+ (!hdmi_tx_is_cec_wakeup_en(hdmi_ctrl) ||
+ ((module != HDMI_TX_HPD_PM) && (module != HDMI_TX_CEC_PM)))) {
+ msm_mdss_enable_clk(power_data->clk_config,
+ power_data->num_clk, 0);
+ mdss_update_reg_bus_vote(hdmi_ctrl->pdata.reg_bus_clt[module],
+ VOTE_INDEX_DISABLE);
+ msm_mdss_enable_gpio(power_data->gpio_config,
+ power_data->num_gpio, 0);
+ hdmi_tx_pinctrl_set_state(hdmi_ctrl, module, 0);
+ msm_mdss_enable_vreg(power_data->vreg_config,
+ power_data->num_vreg, 0);
+ hdmi_ctrl->power_data_enable[module] = false;
+ }
+
+ return rc;
+
+disable_gpio:
+ mdss_update_reg_bus_vote(hdmi_ctrl->pdata.reg_bus_clt[module],
+ VOTE_INDEX_DISABLE);
+ msm_mdss_enable_gpio(power_data->gpio_config, power_data->num_gpio, 0);
+disable_vreg:
+ msm_mdss_enable_vreg(power_data->vreg_config, power_data->num_vreg, 0);
+error:
+ return rc;
+} /* hdmi_tx_enable_power */
+
+static void hdmi_tx_core_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CEC_PM, 0);
+ hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CORE_PM, 0);
+} /* hdmi_tx_core_off */
+
+static int hdmi_tx_core_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc = 0;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CORE_PM, 1);
+ if (rc) {
+ DEV_ERR("%s: core hdmi_msm_enable_power failed rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CEC_PM, 1);
+ if (rc) {
+ DEV_ERR("%s: cec hdmi_msm_enable_power failed rc = %d\n",
+ __func__, rc);
+ goto disable_core_power;
+ }
+
+ return rc;
+disable_core_power:
+ hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CORE_PM, 0);
+ return rc;
+} /* hdmi_tx_core_on */
+
+static void hdmi_tx_phy_reset(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ unsigned int phy_reset_polarity = 0x0;
+ unsigned int pll_reset_polarity = 0x0;
+ unsigned int val;
+ struct mdss_io_data *io = NULL;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: core io not inititalized\n", __func__);
+ return;
+ }
+
+ val = DSS_REG_R_ND(io, HDMI_PHY_CTRL);
+
+ phy_reset_polarity = val >> 3 & 0x1;
+ pll_reset_polarity = val >> 1 & 0x1;
+
+ if (phy_reset_polarity == 0)
+ DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET);
+ else
+ DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET));
+
+ if (pll_reset_polarity == 0)
+ DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET_PLL);
+ else
+ DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET_PLL));
+
+ if (phy_reset_polarity == 0)
+ DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET));
+ else
+ DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET);
+
+ if (pll_reset_polarity == 0)
+ DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET_PLL));
+ else
+ DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET_PLL);
+} /* hdmi_tx_phy_reset */
+
+static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
+ struct msm_hdmi_audio_setup_params *params)
+{
+ int rc = 0;
+ struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+ u32 is_mode_dvi;
+
+ if (!hdmi_ctrl || !params) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ is_mode_dvi = hdmi_tx_is_dvi_mode(hdmi_ctrl);
+
+ if (!is_mode_dvi && hdmi_tx_is_panel_on(hdmi_ctrl)) {
+ memcpy(&hdmi_ctrl->audio_params, params,
+ sizeof(struct msm_hdmi_audio_setup_params));
+
+ hdmi_tx_audio_setup(hdmi_ctrl);
+ } else {
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ struct hdmi_audio_status status = {0};
+
+ if (hdmi_ctrl->audio_ops.status)
+ hdmi_ctrl->audio_ops.status(hdmi_ctrl->audio_data,
+ &status);
+
+ dev_err_ratelimited(&hdmi_ctrl->pdev->dev,
+ "%s: hpd %d, ack %d, switch %d, mode %s, power %d\n",
+ __func__, hdmi_ctrl->hpd_state,
+ status.ack_pending, status.switched,
+ is_mode_dvi ? "dvi" : "hdmi",
+ hdmi_ctrl->panel_power_on);
+ }
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return rc;
+}
+
+static int hdmi_tx_get_audio_edid_blk(struct platform_device *pdev,
+ struct msm_hdmi_audio_edid_blk *blk)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ return hdmi_edid_get_audio_blk(
+ hdmi_tx_get_fd(HDMI_TX_FEAT_EDID), blk);
+} /* hdmi_tx_get_audio_edid_blk */
+
+static u8 hdmi_tx_tmds_enabled(struct platform_device *pdev)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ /* status of tmds */
+ return (hdmi_ctrl->timing_gen_on == true);
+}
+
+static int hdmi_tx_set_mhl_max_pclk(struct platform_device *pdev, u32 max_val)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ hdmi_ctrl = platform_get_drvdata(pdev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+ if (max_val) {
+ hdmi_ctrl->ds_data.ds_max_clk = max_val;
+ hdmi_ctrl->ds_data.ds_registered = true;
+ } else {
+ DEV_ERR("%s: invalid max pclk val\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int msm_hdmi_register_mhl(struct platform_device *pdev,
+ struct msm_hdmi_mhl_ops *ops, void *data)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid pdev\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!ops) {
+ DEV_ERR("%s: invalid ops\n", __func__);
+ return -EINVAL;
+ }
+
+ ops->tmds_enabled = hdmi_tx_tmds_enabled;
+ ops->set_mhl_max_pclk = hdmi_tx_set_mhl_max_pclk;
+ ops->set_upstream_hpd = hdmi_tx_set_mhl_hpd;
+
+ hdmi_ctrl->ds_registered = true;
+
+ return 0;
+}
+
+static int hdmi_tx_get_cable_status(struct platform_device *pdev, u32 vote)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+ unsigned long flags;
+ u32 hpd;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&hdmi_ctrl->hpd_state_lock, flags);
+ hpd = hdmi_tx_is_panel_on(hdmi_ctrl);
+ spin_unlock_irqrestore(&hdmi_ctrl->hpd_state_lock, flags);
+
+ hdmi_ctrl->vote_hdmi_core_on = false;
+
+ if (vote && hpd)
+ hdmi_ctrl->vote_hdmi_core_on = true;
+
+ /*
+ * if cable is not connected and audio calls this function,
+ * consider this as an error as it will result in whole
+ * audio path to fail.
+ */
+ if (!hpd) {
+ struct hdmi_audio_status status = {0};
+
+ if (hdmi_ctrl->audio_ops.status)
+ hdmi_ctrl->audio_ops.status(hdmi_ctrl->audio_data,
+ &status);
+
+ dev_err_ratelimited(&hdmi_ctrl->pdev->dev,
+ "%s: hpd %d, ack %d, switch %d, power %d\n",
+ __func__, hdmi_ctrl->hpd_state,
+ status.ack_pending, status.switched,
+ hdmi_ctrl->panel_power_on);
+ }
+
+ return hpd;
+}
+
+int msm_mdss_hdmi_register_audio_codec(struct platform_device *pdev,
+ struct msm_hdmi_audio_codec_ops *ops)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+ if (!hdmi_ctrl || !ops) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -ENODEV;
+ }
+
+ ops->audio_info_setup = hdmi_tx_audio_info_setup;
+ ops->get_audio_edid_blk = hdmi_tx_get_audio_edid_blk;
+ ops->hdmi_cable_status = hdmi_tx_get_cable_status;
+
+ return 0;
+} /* hdmi_tx_audio_register */
+EXPORT_SYMBOL(msm_mdss_hdmi_register_audio_codec);
+
+static int hdmi_tx_setup_tmds_clk_rate(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ u32 rate = 0;
+ struct msm_hdmi_mode_timing_info *timing = NULL;
+ u32 rate_ratio;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: Bad input parameters\n", __func__);
+ goto end;
+ }
+
+ timing = &hdmi_ctrl->timing;
+ if (!timing) {
+ DEV_ERR("%s: Invalid timing info\n", __func__);
+ goto end;
+ }
+
+ switch (hdmi_ctrl->panel_data.panel_info.out_format) {
+ case MDP_Y_CBCR_H2V2:
+ rate_ratio = HDMI_TX_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+ break;
+ case MDP_Y_CBCR_H2V1:
+ rate_ratio = HDMI_TX_YUV422_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+ break;
+ default:
+ rate_ratio = HDMI_TX_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+ break;
+ }
+
+ rate = timing->pixel_freq / rate_ratio;
+
+end:
+ return rate;
+}
+
+static inline bool hdmi_tx_hw_is_cable_connected(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ return DSS_REG_R(&hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO],
+ HDMI_HPD_INT_STATUS) & BIT(1) ? true : false;
+}
+
+static void hdmi_tx_hpd_polarity_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
+ bool polarity)
+{
+ struct mdss_io_data *io = NULL;
+ bool cable_sense;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: core io is not initialized\n", __func__);
+ return;
+ }
+
+ if (hdmi_ctrl->sim_mode) {
+ DEV_DBG("%s: sim mode enabled\n", __func__);
+ return;
+ }
+
+ if (polarity)
+ DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(2) | BIT(1));
+ else
+ DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(2));
+
+ cable_sense = hdmi_tx_hw_is_cable_connected(hdmi_ctrl);
+ DEV_DBG("%s: listen = %s, sense = %s\n", __func__,
+ polarity ? "connect" : "disconnect",
+ cable_sense ? "connect" : "disconnect");
+
+ if (cable_sense == polarity) {
+ u32 reg_val = DSS_REG_R(io, HDMI_HPD_CTRL);
+
+ /* Toggle HPD circuit to trigger HPD sense */
+ DSS_REG_W(io, HDMI_HPD_CTRL, reg_val & ~BIT(28));
+ DSS_REG_W(io, HDMI_HPD_CTRL, reg_val | BIT(28));
+ }
+} /* hdmi_tx_hpd_polarity_setup */
+
+static inline void hdmi_tx_audio_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if (hdmi_ctrl && hdmi_ctrl->audio_ops.off)
+ hdmi_ctrl->audio_ops.off(hdmi_ctrl->audio_data);
+
+ memset(&hdmi_ctrl->audio_params, 0,
+ sizeof(struct msm_hdmi_audio_setup_params));
+}
+
+static int hdmi_tx_power_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct mdss_io_data *io = NULL;
+ void *pdata = NULL;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = hdmi_tx_get_fd(HDMI_TX_FEAT_PANEL);
+ if (!pdata) {
+ DEV_ERR("%s: invalid panel data\n", __func__);
+ return -EINVAL;
+ }
+
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: Core io is not initialized\n", __func__);
+ goto end;
+ }
+
+ if (!hdmi_ctrl->panel_power_on) {
+ DEV_DBG("%s: hdmi_ctrl is already off\n", __func__);
+ goto end;
+ }
+
+ if (!hdmi_tx_is_dvi_mode(hdmi_ctrl))
+ hdmi_tx_audio_off(hdmi_ctrl);
+
+ if (hdmi_ctrl->panel_ops.off)
+ hdmi_ctrl->panel_ops.off(pdata);
+
+ hdmi_tx_core_off(hdmi_ctrl);
+
+ hdmi_ctrl->panel_power_on = false;
+
+ if (hdmi_ctrl->hpd_off_pending || hdmi_ctrl->panel_suspend ||
+ !hdmi_ctrl->pdata.pluggable)
+ hdmi_tx_hpd_off(hdmi_ctrl);
+
+ if (hdmi_ctrl->hdmi_tx_hpd_done)
+ hdmi_ctrl->hdmi_tx_hpd_done(
+ hdmi_ctrl->downstream_data);
+end:
+ DEV_INFO("%s: HDMI Core: OFF\n", __func__);
+ return 0;
+} /* hdmi_tx_power_off */
+
+static int hdmi_tx_power_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int ret;
+ u32 div = 0;
+ struct mdss_panel_data *panel_data = &hdmi_ctrl->panel_data;
+ void *pdata = hdmi_tx_get_fd(HDMI_TX_FEAT_PANEL);
+ void *edata = hdmi_tx_get_fd(HDMI_TX_FEAT_EDID);
+
+ if (!hdmi_ctrl->pdata.pluggable)
+ hdmi_tx_hpd_on(hdmi_ctrl);
+
+ ret = hdmi_tx_check_clk_state(hdmi_ctrl, HDMI_TX_HPD_PM);
+ if (ret) {
+ DEV_ERR("%s: clocks not on\n", __func__);
+ return -EINVAL;
+ }
+
+ if (hdmi_ctrl->panel_ops.get_vic)
+ hdmi_ctrl->vic = hdmi_ctrl->panel_ops.get_vic(
+ &panel_data->panel_info, &hdmi_ctrl->ds_data);
+
+ if (hdmi_ctrl->vic <= 0) {
+ DEV_ERR("%s: invalid vic\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = hdmi_get_supported_mode(&hdmi_ctrl->timing,
+ &hdmi_ctrl->ds_data, hdmi_ctrl->vic);
+ if (ret || !hdmi_ctrl->timing.supported) {
+ DEV_ERR("%s: invalid timing data\n", __func__);
+ return -EINVAL;
+ }
+
+ hdmi_ctrl->panel.vic = hdmi_ctrl->vic;
+
+ if (!hdmi_tx_is_dvi_mode(hdmi_ctrl) &&
+ hdmi_tx_is_cea_format(hdmi_ctrl->vic))
+ hdmi_ctrl->panel.infoframe = true;
+ else
+ hdmi_ctrl->panel.infoframe = false;
+
+ hdmi_ctrl->panel.scan_info = hdmi_edid_get_sink_scaninfo(edata,
+ hdmi_ctrl->vic);
+ hdmi_ctrl->panel.scrambler = hdmi_edid_get_sink_scrambler_support(
+ edata);
+
+ if (hdmi_ctrl->panel_ops.on)
+ hdmi_ctrl->panel_ops.on(pdata);
+
+ if (panel_data->panel_info.out_format == MDP_Y_CBCR_H2V2)
+ div = 1;
+
+ hdmi_ctrl->pdata.power_data[HDMI_TX_CORE_PM].clk_config[0].rate =
+ (hdmi_ctrl->timing.pixel_freq * 1000) >> div;
+
+ hdmi_edid_set_video_resolution(hdmi_tx_get_fd(HDMI_TX_FEAT_EDID),
+ hdmi_ctrl->vic, false);
+
+ hdmi_tx_core_on(hdmi_ctrl);
+
+ if (hdmi_ctrl->panel.infoframe &&
+ !hdmi_tx_is_encryption_set(hdmi_ctrl) &&
+ hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+ hdmi_tx_set_audio_switch_node(hdmi_ctrl, 1);
+ hdmi_tx_config_avmute(hdmi_ctrl, false);
+ }
+
+ hdmi_ctrl->panel_power_on = true;
+
+ hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_DISCONNECT_POLARITY);
+
+ if (hdmi_ctrl->hdmi_tx_hpd_done)
+ hdmi_ctrl->hdmi_tx_hpd_done(hdmi_ctrl->downstream_data);
+
+ DEV_DBG("%s: hdmi_ctrl core on\n", __func__);
+ return 0;
+}
+
+static void hdmi_tx_hpd_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc = 0;
+ struct mdss_io_data *io = NULL;
+ unsigned long flags;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (!hdmi_ctrl->hpd_initialized) {
+ DEV_DBG("%s: HPD is already OFF, returning\n", __func__);
+ return;
+ }
+
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: core io not inititalized\n", __func__);
+ return;
+ }
+
+ /* Turn off HPD interrupts */
+ DSS_REG_W(io, HDMI_HPD_INT_CTRL, 0);
+
+ /* non pluggable display should not enable wakeup interrupt */
+ if ((hdmi_tx_is_cec_wakeup_en(hdmi_ctrl) &&
+ hdmi_ctrl->pdata.pluggable)) {
+ hdmi_ctrl->mdss_util->enable_wake_irq(&hdmi_tx_hw);
+ } else {
+ hdmi_ctrl->mdss_util->disable_irq(&hdmi_tx_hw);
+ hdmi_tx_set_mode(hdmi_ctrl, false);
+ }
+ hdmi_tx_config_5v(hdmi_ctrl, false);
+ rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, 0);
+ if (rc)
+ DEV_INFO("%s: Failed to disable hpd power. Error=%d\n",
+ __func__, rc);
+
+ spin_lock_irqsave(&hdmi_ctrl->hpd_state_lock, flags);
+ hdmi_ctrl->hpd_state = false;
+ spin_unlock_irqrestore(&hdmi_ctrl->hpd_state_lock, flags);
+
+ hdmi_ctrl->hpd_initialized = false;
+ hdmi_ctrl->hpd_off_pending = false;
+
+ DEV_DBG("%s: HPD is now OFF\n", __func__);
+} /* hdmi_tx_hpd_off */
+
+static int hdmi_tx_hpd_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ u32 reg_val;
+ int rc = 0;
+ struct mdss_io_data *io = NULL;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: core io not inititalized\n", __func__);
+ return -EINVAL;
+ }
+
+ if (hdmi_ctrl->hpd_initialized) {
+ DEV_DBG("%s: HPD is already ON\n", __func__);
+ } else {
+ rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, true);
+ if (rc) {
+ DEV_ERR("%s: Failed to enable hpd power. rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ mdss_reg_dump(io->base, io->len, "HDMI-INIT: ", REG_DUMP);
+
+ if (!hdmi_ctrl->panel_data.panel_info.cont_splash_enabled) {
+ hdmi_tx_set_mode(hdmi_ctrl, false);
+ hdmi_tx_phy_reset(hdmi_ctrl);
+ hdmi_tx_set_mode(hdmi_ctrl, true);
+ }
+
+ DSS_REG_W(io, HDMI_USEC_REFTIMER, 0x0001001B);
+
+ if (hdmi_tx_is_cec_wakeup_en(hdmi_ctrl))
+ hdmi_ctrl->mdss_util->disable_wake_irq(&hdmi_tx_hw);
+
+ hdmi_ctrl->mdss_util->enable_irq(&hdmi_tx_hw);
+
+ hdmi_ctrl->hpd_initialized = true;
+
+ DEV_INFO("%s: HDMI HW version = 0x%x\n", __func__,
+ DSS_REG_R_ND(&hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO],
+ HDMI_VERSION));
+
+ /* set timeout to 4.1ms (max) for hardware debounce */
+ reg_val = DSS_REG_R(io, HDMI_HPD_CTRL) | 0x1FFF;
+
+ /* Turn on HPD HW circuit */
+ DSS_REG_W(io, HDMI_HPD_CTRL, reg_val | BIT(28));
+
+ hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_CONNECT_POLARITY);
+ DEV_DBG("%s: HPD is now ON\n", __func__);
+ }
+
+ return rc;
+} /* hdmi_tx_hpd_on */
+
+static int hdmi_tx_sysfs_enable_hpd(struct hdmi_tx_ctrl *hdmi_ctrl, int on)
+{
+ int rc = 0;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ DEV_DBG("%s: %d\n", __func__, on);
+ if (on) {
+ hdmi_ctrl->hpd_off_pending = false;
+ rc = hdmi_tx_hpd_on(hdmi_ctrl);
+ } else {
+ if (!hdmi_ctrl->panel_power_on)
+ hdmi_tx_hpd_off(hdmi_ctrl);
+ else
+ hdmi_ctrl->hpd_off_pending = true;
+ }
+
+ return rc;
+} /* hdmi_tx_sysfs_enable_hpd */
+
+static int hdmi_tx_set_mhl_hpd(struct platform_device *pdev, uint8_t on)
+{
+ int rc = 0;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ hdmi_ctrl = platform_get_drvdata(pdev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ /* mhl status should override */
+ hdmi_ctrl->mhl_hpd_on = on;
+
+ if (!on && hdmi_ctrl->hpd_feature_on) {
+ rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, false);
+ } else if (on && !hdmi_ctrl->hpd_feature_on) {
+ rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+ } else {
+ DEV_DBG("%s: hpd is already '%s'. return\n", __func__,
+ hdmi_ctrl->hpd_feature_on ? "enabled" : "disabled");
+ goto end;
+ }
+
+ if (!rc) {
+ hdmi_ctrl->hpd_feature_on =
+ (~hdmi_ctrl->hpd_feature_on) & BIT(0);
+ DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_feature_on);
+ } else {
+ DEV_ERR("%s: failed to '%s' hpd. rc = %d\n", __func__,
+ on ? "enable" : "disable", rc);
+ }
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return rc;
+}
+
+static irqreturn_t hdmi_tx_isr(int irq, void *data)
+{
+ struct mdss_io_data *io = NULL;
+ struct hdmi_tx_ctrl *hdmi_ctrl = (struct hdmi_tx_ctrl *)data;
+ unsigned long flags;
+ u32 hpd_current_state;
+ u32 reg_val = 0;
+
+ if (!hdmi_ctrl) {
+ DEV_WARN("%s: invalid input data, ISR ignored\n", __func__);
+ goto end;
+ }
+
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_WARN("%s: core io not initialized, ISR ignored\n",
+ __func__);
+ goto end;
+ }
+
+ if (DSS_REG_R(io, HDMI_HPD_INT_STATUS) & BIT(0)) {
+ spin_lock_irqsave(&hdmi_ctrl->hpd_state_lock, flags);
+ hpd_current_state = hdmi_ctrl->hpd_state;
+ hdmi_ctrl->hpd_state =
+ (DSS_REG_R(io, HDMI_HPD_INT_STATUS) & BIT(1)) >> 1;
+ spin_unlock_irqrestore(&hdmi_ctrl->hpd_state_lock, flags);
+
+ if (!completion_done(&hdmi_ctrl->hpd_int_done))
+ complete_all(&hdmi_ctrl->hpd_int_done);
+
+ /*
+ * check if this is a spurious interrupt, if yes, reset
+ * interrupts and return
+ */
+ if (hpd_current_state == hdmi_ctrl->hpd_state) {
+ DEV_DBG("%s: spurious interrupt %d\n", __func__,
+ hpd_current_state);
+
+ /* enable interrupts */
+ reg_val |= BIT(2);
+
+ /* set polarity, reverse of current state */
+ reg_val |= (~hpd_current_state << 1) & BIT(1);
+
+ /* ack interrupt */
+ reg_val |= BIT(0);
+
+ DSS_REG_W(io, HDMI_HPD_INT_CTRL, reg_val);
+ goto end;
+ }
+
+ /*
+ * Ack the current hpd interrupt and stop listening to
+ * new hpd interrupt.
+ */
+ DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(0));
+
+ queue_work(hdmi_ctrl->workq, &hdmi_ctrl->hpd_int_work);
+ }
+
+ if (hdmi_ddc_isr(&hdmi_ctrl->ddc_ctrl,
+ hdmi_ctrl->hdmi_tx_ver))
+ DEV_ERR("%s: hdmi_ddc_isr failed\n", __func__);
+
+ if (hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_HW)) {
+ if (hdmi_cec_isr(hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_HW)))
+ DEV_ERR("%s: hdmi_cec_isr failed\n", __func__);
+ }
+
+ if (hdmi_ctrl->hdcp_ops && hdmi_ctrl->hdcp_data) {
+ if (hdmi_ctrl->hdcp_ops->hdmi_hdcp_isr) {
+ if (hdmi_ctrl->hdcp_ops->hdmi_hdcp_isr(
+ hdmi_ctrl->hdcp_data))
+ DEV_ERR("%s: hdmi_hdcp_isr failed\n",
+ __func__);
+ }
+ }
+end:
+ return IRQ_HANDLED;
+} /* hdmi_tx_isr */
+
+static void hdmi_tx_dev_deinit(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ hdmi_tx_deinit_features(hdmi_ctrl, HDMI_TX_FEAT_MAX);
+
+ hdmi_ctrl->hdcp_ops = NULL;
+ hdmi_ctrl->hdcp_data = NULL;
+
+ extcon_dev_unregister(&hdmi_ctrl->sdev);
+ if (hdmi_ctrl->workq)
+ destroy_workqueue(hdmi_ctrl->workq);
+ mutex_destroy(&hdmi_ctrl->tx_lock);
+ mutex_destroy(&hdmi_ctrl->mutex);
+
+ hdmi_tx_hw.ptr = NULL;
+} /* hdmi_tx_dev_deinit */
+
+static int hdmi_tx_dev_init(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc = 0;
+ struct hdmi_tx_platform_data *pdata = NULL;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = &hdmi_ctrl->pdata;
+
+ rc = hdmi_tx_check_capability(hdmi_ctrl);
+ if (rc) {
+ DEV_ERR("%s: no HDMI device\n", __func__);
+ goto fail_no_hdmi;
+ }
+
+ /* irq enable/disable will be handled in hpd on/off */
+ hdmi_tx_hw.ptr = (void *)hdmi_ctrl;
+
+ mutex_init(&hdmi_ctrl->mutex);
+ mutex_init(&hdmi_ctrl->tx_lock);
+
+ INIT_LIST_HEAD(&hdmi_ctrl->cable_notify_handlers);
+
+ hdmi_ctrl->workq = create_workqueue("hdmi_tx_workq");
+ if (!hdmi_ctrl->workq) {
+ DEV_ERR("%s: hdmi_tx_workq creation failed.\n", __func__);
+ rc = -EPERM;
+ goto fail_create_workq;
+ }
+
+ hdmi_ctrl->ddc_ctrl.io = &pdata->io[HDMI_TX_CORE_IO];
+ init_completion(&hdmi_ctrl->ddc_ctrl.ddc_sw_done);
+
+ hdmi_ctrl->panel_power_on = false;
+ hdmi_ctrl->panel_suspend = false;
+
+ hdmi_ctrl->hpd_state = false;
+ hdmi_ctrl->hpd_initialized = false;
+ hdmi_ctrl->hpd_off_pending = false;
+ init_completion(&hdmi_ctrl->hpd_int_done);
+
+ INIT_WORK(&hdmi_ctrl->hpd_int_work, hdmi_tx_hpd_int_work);
+ INIT_WORK(&hdmi_ctrl->fps_work, hdmi_tx_fps_work);
+ INIT_WORK(&hdmi_ctrl->cable_notify_work, hdmi_tx_cable_notify_work);
+ INIT_DELAYED_WORK(&hdmi_ctrl->hdcp_cb_work, hdmi_tx_hdcp_cb_work);
+
+ spin_lock_init(&hdmi_ctrl->hpd_state_lock);
+
+ return 0;
+
+fail_create_workq:
+ if (hdmi_ctrl->workq)
+ destroy_workqueue(hdmi_ctrl->workq);
+ mutex_destroy(&hdmi_ctrl->mutex);
+fail_no_hdmi:
+ return rc;
+} /* hdmi_tx_dev_init */
+
+static int hdmi_tx_start_hdcp(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ if (hdmi_ctrl->panel_data.panel_info.cont_splash_enabled ||
+ !hdmi_tx_is_hdcp_enabled(hdmi_ctrl))
+ return 0;
+
+ if (hdmi_tx_is_encryption_set(hdmi_ctrl))
+ hdmi_tx_config_avmute(hdmi_ctrl, true);
+
+ rc = hdmi_ctrl->hdcp_ops->hdmi_hdcp_authenticate(hdmi_ctrl->hdcp_data);
+ if (rc)
+ DEV_ERR("%s: hdcp auth failed. rc=%d\n", __func__, rc);
+
+ return rc;
+}
+
+static int hdmi_tx_init_switch_dev(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc = -EINVAL;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ goto end;
+ }
+
+ hdmi_ctrl->sdev.name = "hdmi";
+ rc = extcon_set_state_sync(&hdmi_ctrl->sdev, EXTCON_DISP_HDMI, false);
+ if (rc) {
+ DEV_ERR("%s: display switch registration failed\n", __func__);
+ goto end;
+ }
+end:
+ return rc;
+}
+
+static int hdmi_tx_hdcp_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc = 0;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ DEV_DBG("%s: Turning off HDCP\n", __func__);
+ hdmi_ctrl->hdcp_ops->hdmi_hdcp_off(
+ hdmi_ctrl->hdcp_data);
+
+ hdmi_ctrl->hdcp_ops = NULL;
+
+ rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM,
+ false);
+ if (rc)
+ DEV_ERR("%s: Failed to disable ddc power\n",
+ __func__);
+
+ return rc;
+}
+
+static char *hdmi_tx_get_event_name(int event)
+{
+ switch (event) {
+ case MDSS_EVENT_RESET:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_RESET);
+ case MDSS_EVENT_LINK_READY:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_LINK_READY);
+ case MDSS_EVENT_UNBLANK:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_UNBLANK);
+ case MDSS_EVENT_PANEL_ON:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_ON);
+ case MDSS_EVENT_BLANK:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_BLANK);
+ case MDSS_EVENT_PANEL_OFF:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_OFF);
+ case MDSS_EVENT_CLOSE:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_CLOSE);
+ case MDSS_EVENT_SUSPEND:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_SUSPEND);
+ case MDSS_EVENT_RESUME:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_RESUME);
+ case MDSS_EVENT_CHECK_PARAMS:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_CHECK_PARAMS);
+ case MDSS_EVENT_CONT_SPLASH_BEGIN:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_CONT_SPLASH_BEGIN);
+ case MDSS_EVENT_CONT_SPLASH_FINISH:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_CONT_SPLASH_FINISH);
+ case MDSS_EVENT_PANEL_UPDATE_FPS:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_UPDATE_FPS);
+ case MDSS_EVENT_FB_REGISTERED:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_FB_REGISTERED);
+ case MDSS_EVENT_PANEL_CLK_CTRL:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_CLK_CTRL);
+ case MDSS_EVENT_DSI_CMDLIST_KOFF:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_DSI_CMDLIST_KOFF);
+ case MDSS_EVENT_ENABLE_PARTIAL_ROI:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_ENABLE_PARTIAL_ROI);
+ case MDSS_EVENT_DSI_STREAM_SIZE:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_DSI_STREAM_SIZE);
+ case MDSS_EVENT_DSI_DYNAMIC_SWITCH:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_DSI_DYNAMIC_SWITCH);
+ case MDSS_EVENT_REGISTER_RECOVERY_HANDLER:
+ return HDMI_TX_EVT_STR(MDSS_EVENT_REGISTER_RECOVERY_HANDLER);
+ default:
+ return "unknown";
+ }
+}
+
+static void hdmi_tx_update_fps(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ void *pdata = NULL;
+ struct mdss_panel_info *pinfo;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ pdata = hdmi_tx_get_fd(HDMI_TX_FEAT_PANEL);
+ if (!pdata) {
+ DEV_ERR("%s: invalid panel data\n", __func__);
+ return;
+ }
+
+ pinfo = &hdmi_ctrl->panel_data.panel_info;
+
+ if (!pinfo->dynamic_fps) {
+ DEV_DBG("%s: Dynamic fps not enabled\n", __func__);
+ return;
+ }
+
+ DEV_DBG("%s: current fps %d, new fps %d\n", __func__,
+ pinfo->current_fps, hdmi_ctrl->dynamic_fps);
+
+ if (hdmi_ctrl->dynamic_fps == pinfo->current_fps) {
+ DEV_DBG("%s: Panel is already at this FPS: %d\n",
+ __func__, hdmi_ctrl->dynamic_fps);
+ return;
+ }
+
+ if (hdmi_tx_is_hdcp_enabled(hdmi_ctrl))
+ hdmi_tx_hdcp_off(hdmi_ctrl);
+
+ if (hdmi_ctrl->panel_ops.update_fps)
+ hdmi_ctrl->vic = hdmi_ctrl->panel_ops.update_fps(pdata,
+ hdmi_ctrl->dynamic_fps);
+
+ hdmi_tx_update_pixel_clk(hdmi_ctrl);
+
+ hdmi_tx_start_hdcp(hdmi_ctrl);
+}
+
+static void hdmi_tx_fps_work(struct work_struct *work)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ hdmi_ctrl = container_of(work, struct hdmi_tx_ctrl, fps_work);
+ if (!hdmi_ctrl) {
+ DEV_DBG("%s: invalid input\n", __func__);
+ return;
+ }
+
+ hdmi_tx_update_fps(hdmi_ctrl);
+}
+
+static int hdmi_tx_evt_handle_register(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc = 0;
+
+ rc = hdmi_tx_sysfs_create(hdmi_ctrl, hdmi_ctrl->evt_arg);
+ if (rc) {
+ DEV_ERR("%s: hdmi_tx_sysfs_create failed.rc=%d\n",
+ __func__, rc);
+ goto sysfs_err;
+ }
+ rc = hdmi_tx_init_features(hdmi_ctrl, hdmi_ctrl->evt_arg);
+ if (rc) {
+ DEV_ERR("%s: init_features failed.rc=%d\n", __func__, rc);
+ goto init_err;
+ }
+
+ rc = hdmi_tx_init_switch_dev(hdmi_ctrl);
+ if (rc) {
+ DEV_ERR("%s: init switch dev failed.rc=%d\n", __func__, rc);
+ goto switch_err;
+ }
+
+ if (hdmi_ctrl->pdata.primary || !hdmi_ctrl->pdata.pluggable) {
+ reinit_completion(&hdmi_ctrl->hpd_int_done);
+ rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+ if (rc) {
+ DEV_ERR("%s: hpd_enable failed. rc=%d\n", __func__, rc);
+ goto primary_err;
+ } else {
+ hdmi_ctrl->hpd_feature_on = true;
+ }
+ }
+
+ return 0;
+
+primary_err:
+ extcon_dev_unregister(&hdmi_ctrl->sdev);
+switch_err:
+ hdmi_tx_deinit_features(hdmi_ctrl, HDMI_TX_FEAT_MAX);
+init_err:
+ hdmi_tx_sysfs_remove(hdmi_ctrl);
+sysfs_err:
+ return rc;
+}
+
+static int hdmi_tx_evt_handle_check_param(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int new_vic = -1;
+ int rc = 0;
+
+ if (hdmi_ctrl->panel_ops.get_vic)
+ new_vic = hdmi_ctrl->panel_ops.get_vic(
+ hdmi_ctrl->evt_arg, &hdmi_ctrl->ds_data);
+
+ if ((new_vic < 0) || (new_vic > HDMI_VFRMT_MAX)) {
+ DEV_ERR("%s: invalid or not supported vic\n", __func__);
+ goto end;
+ }
+
+ /*
+ * return value of 1 lets mdss know that panel
+ * needs a reconfig due to new resolution and
+ * it will issue close and open subsequently.
+ */
+ if (new_vic != hdmi_ctrl->vic) {
+ rc = 1;
+ DEV_DBG("%s: res change %d ==> %d\n", __func__,
+ hdmi_ctrl->vic, new_vic);
+ }
+end:
+ return rc;
+}
+
+static int hdmi_tx_evt_handle_resume(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc = 0;
+
+ hdmi_ctrl->panel_suspend = false;
+ hdmi_tx_cec_device_suspend(hdmi_ctrl);
+
+ if (!hdmi_ctrl->hpd_feature_on)
+ goto end;
+
+ rc = hdmi_tx_hpd_on(hdmi_ctrl);
+ if (rc) {
+ DEV_ERR("%s: hpd_on failed. rc=%d\n", __func__, rc);
+ goto end;
+ }
+
+ if (hdmi_ctrl->sdev.state &&
+ !hdmi_tx_hw_is_cable_connected(hdmi_ctrl)) {
+ u32 timeout;
+
+ reinit_completion(&hdmi_ctrl->hpd_int_done);
+ timeout = wait_for_completion_timeout(
+ &hdmi_ctrl->hpd_int_done, HZ/10);
+ if (!timeout && !hdmi_ctrl->hpd_state) {
+ DEV_DBG("%s: cable removed during suspend\n", __func__);
+ hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+ hdmi_tx_wait_for_audio_engine(hdmi_ctrl);
+ hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
+ }
+ }
+end:
+ return rc;
+}
+
+static int hdmi_tx_evt_handle_reset(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if (!hdmi_ctrl->panel_data.panel_info.cont_splash_enabled &&
+ hdmi_ctrl->hpd_initialized) {
+ hdmi_tx_set_mode(hdmi_ctrl, false);
+ hdmi_tx_phy_reset(hdmi_ctrl);
+ hdmi_tx_set_mode(hdmi_ctrl, true);
+ }
+
+ return 0;
+}
+
+static int hdmi_tx_evt_handle_unblank(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc;
+
+ rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, true);
+ if (rc) {
+ DEV_ERR("%s: ddc power on failed. rc=%d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_tx_power_on(hdmi_ctrl);
+ if (rc)
+ DEV_ERR("%s: hdmi_tx_power_on failed. rc=%d\n", __func__, rc);
+end:
+ return rc;
+}
+
+static int hdmi_tx_evt_handle_panel_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc = 0;
+
+ if (!hdmi_ctrl->sim_mode) {
+ hdmi_tx_update_hdcp_info(hdmi_ctrl);
+
+ rc = hdmi_tx_start_hdcp(hdmi_ctrl);
+ if (rc)
+ DEV_ERR("%s: hdcp start failed rc=%d\n", __func__, rc);
+ }
+
+ hdmi_ctrl->timing_gen_on = true;
+
+ if (hdmi_ctrl->panel_suspend) {
+ DEV_DBG("%s: panel suspend has triggered\n", __func__);
+
+ hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+ hdmi_tx_wait_for_audio_engine(hdmi_ctrl);
+ hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
+ }
+
+ return rc;
+}
+
+static int hdmi_tx_evt_handle_suspend(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if ((!hdmi_ctrl->hpd_feature_on) || (hdmi_ctrl->panel_suspend == true))
+ goto end;
+
+ if ((!hdmi_ctrl->hpd_state && !hdmi_ctrl->panel_power_on) ||
+ (hdmi_ctrl->hpd_state && !hdmi_ctrl->pdata.pluggable))
+ hdmi_tx_hpd_off(hdmi_ctrl);
+
+ hdmi_ctrl->panel_suspend = true;
+ hdmi_tx_cec_device_suspend(hdmi_ctrl);
+end:
+ return 0;
+}
+
+static int hdmi_tx_evt_handle_blank(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if (hdmi_tx_is_hdcp_enabled(hdmi_ctrl))
+ hdmi_tx_hdcp_off(hdmi_ctrl);
+
+ return 0;
+}
+
+static int hdmi_tx_evt_handle_panel_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc;
+
+ rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, false);
+ if (rc) {
+ DEV_ERR("%s: Failed to disable ddc power\n", __func__);
+ goto end;
+ }
+
+ if (hdmi_ctrl->panel_power_on) {
+ hdmi_tx_config_avmute(hdmi_ctrl, 1);
+ rc = hdmi_tx_power_off(hdmi_ctrl);
+ if (rc)
+ DEV_ERR("%s: hdmi_tx_power_off failed.rc=%d\n",
+ __func__, rc);
+ } else {
+ DEV_DBG("%s: hdmi_ctrl is already powered off\n", __func__);
+ }
+
+ hdmi_ctrl->timing_gen_on = false;
+end:
+ return rc;
+}
+
+static int hdmi_tx_evt_handle_close(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if (hdmi_ctrl->hpd_feature_on && hdmi_ctrl->hpd_initialized &&
+ !hdmi_ctrl->hpd_state)
+ hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_CONNECT_POLARITY);
+
+ return 0;
+}
+
+static int hdmi_tx_event_handler(struct mdss_panel_data *panel_data,
+ int event, void *arg)
+{
+ int rc = 0;
+ hdmi_tx_evt_handler handler;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_panel_data(panel_data);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* UPDATE FPS is called from atomic context */
+ if (event == MDSS_EVENT_PANEL_UPDATE_FPS) {
+ hdmi_ctrl->dynamic_fps = (u32) (unsigned long)arg;
+ DEV_DBG("%s: fps %d\n", __func__, hdmi_ctrl->dynamic_fps);
+ queue_work(hdmi_ctrl->workq, &hdmi_ctrl->fps_work);
+ return rc;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ hdmi_ctrl->evt_arg = arg;
+
+ DEV_DBG("%s: event = %s suspend=%d, hpd_feature=%d\n", __func__,
+ hdmi_tx_get_event_name(event), hdmi_ctrl->panel_suspend,
+ hdmi_ctrl->hpd_feature_on);
+
+ handler = hdmi_ctrl->evt_handler[event];
+ if (handler)
+ rc = handler(hdmi_ctrl);
+
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+end:
+ return rc;
+}
+
+static int hdmi_tx_register_panel(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int rc = 0;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ hdmi_ctrl->panel_data.event_handler = hdmi_tx_event_handler;
+
+ if (!hdmi_ctrl->pdata.primary)
+ hdmi_ctrl->vic = DEFAULT_VIDEO_RESOLUTION;
+
+ rc = hdmi_tx_init_panel_info(hdmi_ctrl);
+ if (rc) {
+ DEV_ERR("%s: hdmi_init_panel_info failed\n", __func__);
+ return rc;
+ }
+
+ rc = mdss_register_panel(hdmi_ctrl->pdev, &hdmi_ctrl->panel_data);
+ if (rc) {
+ DEV_ERR("%s: FAILED: to register HDMI panel\n", __func__);
+ return rc;
+ }
+
+ rc = hdmi_ctrl->mdss_util->register_irq(&hdmi_tx_hw);
+ if (rc)
+ DEV_ERR("%s: mdss_register_irq failed.\n", __func__);
+
+ return rc;
+} /* hdmi_tx_register_panel */
+
+static void hdmi_tx_deinit_resource(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int i;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ /* VREG & CLK */
+ for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--) {
+ if (hdmi_tx_config_power(hdmi_ctrl, i, 0))
+ DEV_ERR("%s: '%s' power deconfig fail\n",
+ __func__, hdmi_tx_pm_name(i));
+ }
+
+ /* IO */
+ for (i = HDMI_TX_MAX_IO - 1; i >= 0; i--) {
+ if (hdmi_ctrl->pdata.io[i].base)
+ msm_mdss_iounmap(&hdmi_ctrl->pdata.io[i]);
+ }
+} /* hdmi_tx_deinit_resource */
+
+static int hdmi_tx_init_resource(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ int i, rc = 0;
+ struct hdmi_tx_platform_data *pdata = NULL;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = &hdmi_ctrl->pdata;
+
+ hdmi_tx_pinctrl_init(hdmi_ctrl->pdev);
+
+ /* IO */
+ for (i = 0; i < HDMI_TX_MAX_IO; i++) {
+ rc = msm_mdss_ioremap_byname(hdmi_ctrl->pdev, &pdata->io[i],
+ hdmi_tx_io_name(i));
+ if (rc) {
+ DEV_DBG("%s: '%s' remap failed or not available\n",
+ __func__, hdmi_tx_io_name(i));
+ }
+ DEV_INFO("%s: '%s': start = 0x%pK, len=0x%x\n", __func__,
+ hdmi_tx_io_name(i), pdata->io[i].base,
+ pdata->io[i].len);
+ }
+
+ /* VREG & CLK */
+ for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+ rc = hdmi_tx_config_power(hdmi_ctrl, i, 1);
+ if (rc) {
+ DEV_ERR("%s: '%s' power config failed.rc=%d\n",
+ __func__, hdmi_tx_pm_name(i), rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ hdmi_tx_deinit_resource(hdmi_ctrl);
+ return rc;
+} /* hdmi_tx_init_resource */
+
+static void hdmi_tx_put_dt_clk_data(struct device *dev,
+ struct mdss_module_power *module_power)
+{
+ if (!module_power) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (module_power->clk_config) {
+ devm_kfree(dev, module_power->clk_config);
+ module_power->clk_config = NULL;
+ }
+ module_power->num_clk = 0;
+} /* hdmi_tx_put_dt_clk_data */
+
+/* todo: once clk are moved to device tree then change this implementation */
+static int hdmi_tx_get_dt_clk_data(struct device *dev,
+ struct mdss_module_power *mp, u32 module_type)
+{
+ int rc = 0;
+
+ if (!dev || !mp) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+
+ switch (module_type) {
+ case HDMI_TX_HPD_PM:
+ mp->num_clk = 4;
+ mp->clk_config = devm_kzalloc(dev, sizeof(struct mdss_clk) *
+ mp->num_clk, GFP_KERNEL);
+ if (!mp->clk_config) {
+ DEV_ERR("%s: can't alloc '%s' clk mem\n", __func__,
+ hdmi_tx_pm_name(module_type));
+ goto error;
+ }
+
+ snprintf(mp->clk_config[0].clk_name, 32, "%s", "iface_clk");
+ mp->clk_config[0].type = DSS_CLK_AHB;
+ mp->clk_config[0].rate = 0;
+
+ snprintf(mp->clk_config[1].clk_name, 32, "%s", "core_clk");
+ mp->clk_config[1].type = DSS_CLK_OTHER;
+ mp->clk_config[1].rate = 19200000;
+
+ /*
+ * This clock is required to clock MDSS interrupt registers
+ * when HDMI is the only block turned on within MDSS. Since
+ * rate for this clock is controlled by MDP driver, treat this
+ * similar to AHB clock and do not set rate for it.
+ */
+ snprintf(mp->clk_config[2].clk_name, 32, "%s", "mdp_core_clk");
+ mp->clk_config[2].type = DSS_CLK_AHB;
+ mp->clk_config[2].rate = 0;
+
+ snprintf(mp->clk_config[3].clk_name, 32, "%s", "alt_iface_clk");
+ mp->clk_config[3].type = DSS_CLK_AHB;
+ mp->clk_config[3].rate = 0;
+ break;
+
+ case HDMI_TX_CORE_PM:
+ mp->num_clk = 1;
+ mp->clk_config = devm_kzalloc(dev, sizeof(struct mdss_clk) *
+ mp->num_clk, GFP_KERNEL);
+ if (!mp->clk_config) {
+ DEV_ERR("%s: can't alloc '%s' clk mem\n", __func__,
+ hdmi_tx_pm_name(module_type));
+ goto error;
+ }
+
+ snprintf(mp->clk_config[0].clk_name, 32, "%s", "extp_clk");
+ mp->clk_config[0].type = DSS_CLK_PCLK;
+ /* This rate will be overwritten when core is powered on */
+ mp->clk_config[0].rate = 148500000;
+ break;
+
+ case HDMI_TX_DDC_PM:
+ case HDMI_TX_CEC_PM:
+ mp->num_clk = 0;
+ DEV_DBG("%s: no clk\n", __func__);
+ break;
+
+ default:
+ DEV_ERR("%s: invalid module type=%d\n", __func__,
+ module_type);
+ return -EINVAL;
+ }
+
+ return rc;
+
+error:
+ if (mp->clk_config) {
+ devm_kfree(dev, mp->clk_config);
+ mp->clk_config = NULL;
+ }
+ mp->num_clk = 0;
+
+ return rc;
+} /* hdmi_tx_get_dt_clk_data */
+
+static void hdmi_tx_put_dt_vreg_data(struct device *dev,
+ struct mdss_module_power *module_power)
+{
+ if (!module_power) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (module_power->vreg_config) {
+ devm_kfree(dev, module_power->vreg_config);
+ module_power->vreg_config = NULL;
+ }
+ module_power->num_vreg = 0;
+} /* hdmi_tx_put_dt_vreg_data */
+
+static int hdmi_tx_get_dt_vreg_data(struct device *dev,
+ struct mdss_module_power *mp, u32 module_type)
+{
+ int i, j, rc = 0;
+ int dt_vreg_total = 0, mod_vreg_total = 0;
+ u32 ndx_mask = 0;
+ u32 *val_array = NULL;
+ const char *mod_name = NULL;
+ struct device_node *of_node = NULL;
+
+ if (!dev || !mp) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (module_type) {
+ case HDMI_TX_HPD_PM:
+ mod_name = "hpd";
+ break;
+ case HDMI_TX_DDC_PM:
+ mod_name = "ddc";
+ break;
+ case HDMI_TX_CORE_PM:
+ mod_name = "core";
+ break;
+ case HDMI_TX_CEC_PM:
+ mod_name = "cec";
+ break;
+ default:
+ DEV_ERR("%s: invalid module type=%d\n", __func__,
+ module_type);
+ return -EINVAL;
+ }
+
+ DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+
+ of_node = dev->of_node;
+
+ dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
+ if (dt_vreg_total < 0) {
+ DEV_ERR("%s: vreg not found. rc=%d\n", __func__,
+ dt_vreg_total);
+ rc = dt_vreg_total;
+ goto error;
+ }
+
+ /* count how many vreg for particular hdmi module */
+ for (i = 0; i < dt_vreg_total; i++) {
+ const char *st = NULL;
+
+ rc = of_property_read_string_index(of_node,
+ "qcom,supply-names", i, &st);
+ if (rc) {
+ DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
+ __func__, i, rc);
+ goto error;
+ }
+
+ if (strnstr(st, mod_name, strlen(st))) {
+ ndx_mask |= BIT(i);
+ mod_vreg_total++;
+ }
+ }
+
+ if (mod_vreg_total > 0) {
+ mp->num_vreg = mod_vreg_total;
+ mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) *
+ mod_vreg_total, GFP_KERNEL);
+ if (!mp->vreg_config) {
+ DEV_ERR("%s: can't alloc '%s' vreg mem\n", __func__,
+ hdmi_tx_pm_name(module_type));
+ goto error;
+ }
+ } else {
+ DEV_DBG("%s: no vreg\n", __func__);
+ return 0;
+ }
+
+ val_array = devm_kzalloc(dev, sizeof(u32) * dt_vreg_total, GFP_KERNEL);
+ if (!val_array) {
+ DEV_ERR("%s: can't allocate vreg scratch mem\n", __func__);
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ for (i = 0, j = 0; (i < dt_vreg_total) && (j < mod_vreg_total); i++) {
+ const char *st = NULL;
+
+ if (!(ndx_mask & BIT(0))) {
+ ndx_mask >>= 1;
+ continue;
+ }
+
+ /* vreg-name */
+ rc = of_property_read_string_index(of_node,
+ "qcom,supply-names", i, &st);
+ if (rc) {
+ DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
+ __func__, i, rc);
+ goto error;
+ }
+ snprintf(mp->vreg_config[j].vreg_name, 32, "%s", st);
+
+ /* vreg-min-voltage */
+ memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+ rc = of_property_read_u32_array(of_node,
+ "qcom,min-voltage-level", val_array,
+ dt_vreg_total);
+ if (rc) {
+ DEV_ERR("%s: error read '%s' min volt. rc=%d\n",
+ __func__, hdmi_tx_pm_name(module_type), rc);
+ goto error;
+ }
+ mp->vreg_config[j].min_voltage = val_array[i];
+
+ /* vreg-max-voltage */
+ memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+ rc = of_property_read_u32_array(of_node,
+ "qcom,max-voltage-level", val_array,
+ dt_vreg_total);
+ if (rc) {
+ DEV_ERR("%s: error read '%s' max volt. rc=%d\n",
+ __func__, hdmi_tx_pm_name(module_type), rc);
+ goto error;
+ }
+ mp->vreg_config[j].max_voltage = val_array[i];
+
+ /* vreg-op-mode */
+ memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+ rc = of_property_read_u32_array(of_node,
+ "qcom,enable-load", val_array,
+ dt_vreg_total);
+ if (rc) {
+ DEV_ERR("%s: error read '%s' enable load. rc=%d\n",
+ __func__, hdmi_tx_pm_name(module_type), rc);
+ goto error;
+ }
+ mp->vreg_config[j].load[DSS_REG_MODE_ENABLE] = val_array[i];
+
+ memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+ rc = of_property_read_u32_array(of_node,
+ "qcom,disable-load", val_array,
+ dt_vreg_total);
+ if (rc) {
+ DEV_ERR("%s: error read '%s' disable load. rc=%d\n",
+ __func__, hdmi_tx_pm_name(module_type), rc);
+ goto error;
+ }
+ mp->vreg_config[j].load[DSS_REG_MODE_DISABLE] = val_array[i];
+
+ DEV_DBG("%s: %s min=%d, max=%d, enable=%d disable=%d\n",
+ __func__,
+ mp->vreg_config[j].vreg_name,
+ mp->vreg_config[j].min_voltage,
+ mp->vreg_config[j].max_voltage,
+ mp->vreg_config[j].load[DSS_REG_MODE_ENABLE],
+ mp->vreg_config[j].load[DSS_REG_MODE_DISABLE]);
+
+ ndx_mask >>= 1;
+ j++;
+ }
+
+ devm_kfree(dev, val_array);
+
+ return rc;
+
+error:
+ if (mp->vreg_config) {
+ devm_kfree(dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ }
+ mp->num_vreg = 0;
+
+ if (val_array)
+ devm_kfree(dev, val_array);
+ return rc;
+} /* hdmi_tx_get_dt_vreg_data */
+
+static void hdmi_tx_put_dt_gpio_data(struct device *dev,
+ struct mdss_module_power *module_power)
+{
+ if (!module_power) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (module_power->gpio_config) {
+ devm_kfree(dev, module_power->gpio_config);
+ module_power->gpio_config = NULL;
+ }
+ module_power->num_gpio = 0;
+} /* hdmi_tx_put_dt_gpio_data */
+
+static int hdmi_tx_get_dt_gpio_data(struct device *dev,
+ struct mdss_module_power *mp, u32 module_type)
+{
+ int i, j;
+ int mp_gpio_cnt = 0, gpio_list_size = 0;
+ struct mdss_gpio *gpio_list = NULL;
+ struct device_node *of_node = NULL;
+
+ DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+
+ if (!dev || !mp) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ of_node = dev->of_node;
+
+ switch (module_type) {
+ case HDMI_TX_HPD_PM:
+ gpio_list_size = ARRAY_SIZE(hpd_gpio_config);
+ gpio_list = hpd_gpio_config;
+ break;
+ case HDMI_TX_DDC_PM:
+ gpio_list_size = ARRAY_SIZE(ddc_gpio_config);
+ gpio_list = ddc_gpio_config;
+ break;
+ case HDMI_TX_CORE_PM:
+ gpio_list_size = ARRAY_SIZE(core_gpio_config);
+ gpio_list = core_gpio_config;
+ break;
+ case HDMI_TX_CEC_PM:
+ gpio_list_size = ARRAY_SIZE(cec_gpio_config);
+ gpio_list = cec_gpio_config;
+ break;
+ default:
+ DEV_ERR("%s: invalid module type=%d\n", __func__,
+ module_type);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < gpio_list_size; i++)
+ if (of_find_property(of_node, gpio_list[i].gpio_name, NULL))
+ mp_gpio_cnt++;
+
+ if (!mp_gpio_cnt) {
+ DEV_DBG("%s: no gpio\n", __func__);
+ return 0;
+ }
+
+ DEV_DBG("%s: mp_gpio_cnt = %d\n", __func__, mp_gpio_cnt);
+ mp->num_gpio = mp_gpio_cnt;
+
+ mp->gpio_config = devm_kzalloc(dev, sizeof(struct mdss_gpio) *
+ mp_gpio_cnt, GFP_KERNEL);
+ if (!mp->gpio_config) {
+ DEV_ERR("%s: can't alloc '%s' gpio mem\n", __func__,
+ hdmi_tx_pm_name(module_type));
+
+ mp->num_gpio = 0;
+ return -ENOMEM;
+ }
+
+ for (i = 0, j = 0; i < gpio_list_size; i++) {
+ int gpio = of_get_named_gpio(of_node,
+ gpio_list[i].gpio_name, 0);
+ if (gpio < 0) {
+ DEV_DBG("%s: no gpio named %s\n", __func__,
+ gpio_list[i].gpio_name);
+ continue;
+ }
+ memcpy(&mp->gpio_config[j], &gpio_list[i],
+ sizeof(struct mdss_gpio));
+
+ mp->gpio_config[j].gpio = (unsigned int)gpio;
+
+ DEV_DBG("%s: gpio num=%d, name=%s, value=%d\n",
+ __func__, mp->gpio_config[j].gpio,
+ mp->gpio_config[j].gpio_name,
+ mp->gpio_config[j].value);
+ j++;
+ }
+
+ return 0;
+} /* hdmi_tx_get_dt_gpio_data */
+
+static void hdmi_tx_put_dt_data(struct device *dev,
+ struct hdmi_tx_platform_data *pdata)
+{
+ int i;
+
+ if (!dev || !pdata) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--)
+ hdmi_tx_put_dt_clk_data(dev, &pdata->power_data[i]);
+
+ for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--)
+ hdmi_tx_put_dt_vreg_data(dev, &pdata->power_data[i]);
+
+ for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--)
+ hdmi_tx_put_dt_gpio_data(dev, &pdata->power_data[i]);
+} /* hdmi_tx_put_dt_data */
+
+static int hdmi_tx_get_dt_data(struct platform_device *pdev,
+ struct hdmi_tx_platform_data *pdata)
+{
+ int i, rc = 0, len = 0;
+ struct device_node *of_node = NULL;
+ struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+ const char *data;
+
+ if (!pdev || !pdata) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ of_node = pdev->dev.of_node;
+
+ rc = of_property_read_u32(of_node, "cell-index", &pdev->id);
+ if (rc) {
+ DEV_ERR("%s: dev id from dt not found.rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ DEV_DBG("%s: id=%d\n", __func__, pdev->id);
+
+ /* GPIO */
+ for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+ rc = hdmi_tx_get_dt_gpio_data(&pdev->dev,
+ &pdata->power_data[i], i);
+ if (rc) {
+ DEV_ERR("%s: '%s' get_dt_gpio_data failed.rc=%d\n",
+ __func__, hdmi_tx_pm_name(i), rc);
+ goto error;
+ }
+ }
+
+ /* VREG */
+ for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+ rc = hdmi_tx_get_dt_vreg_data(&pdev->dev,
+ &pdata->power_data[i], i);
+ if (rc) {
+ DEV_ERR("%s: '%s' get_dt_vreg_data failed.rc=%d\n",
+ __func__, hdmi_tx_pm_name(i), rc);
+ goto error;
+ }
+ }
+
+ /* CLK */
+ for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+ rc = hdmi_tx_get_dt_clk_data(&pdev->dev,
+ &pdata->power_data[i], i);
+ if (rc) {
+ DEV_ERR("%s: '%s' get_dt_clk_data failed.rc=%d\n",
+ __func__, hdmi_tx_pm_name(i), rc);
+ goto error;
+ }
+ }
+
+ if (!hdmi_ctrl->pdata.primary)
+ hdmi_ctrl->pdata.primary = of_property_read_bool(
+ pdev->dev.of_node, "qcom,primary_panel");
+
+ pdata->cond_power_on = of_property_read_bool(pdev->dev.of_node,
+ "qcom,conditional-power-on");
+
+ pdata->pluggable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,pluggable");
+
+ data = of_get_property(pdev->dev.of_node, "qcom,display-id", &len);
+ if (!data || len <= 0)
+ pr_err("%s:%d Unable to read qcom,display-id, data=%pK,len=%d\n",
+ __func__, __LINE__, data, len);
+ else
+ snprintf(hdmi_ctrl->panel_data.panel_info.display_id,
+ MDSS_DISPLAY_ID_MAX_LEN, "%s", data);
+
+ return rc;
+
+error:
+ hdmi_tx_put_dt_data(&pdev->dev, pdata);
+ return rc;
+} /* hdmi_tx_get_dt_data */
+
+static int hdmi_tx_init_event_handler(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ hdmi_tx_evt_handler *handler;
+
+ if (!hdmi_ctrl)
+ return -EINVAL;
+
+ handler = hdmi_ctrl->evt_handler;
+
+ handler[MDSS_EVENT_FB_REGISTERED] = hdmi_tx_evt_handle_register;
+ handler[MDSS_EVENT_CHECK_PARAMS] = hdmi_tx_evt_handle_check_param;
+ handler[MDSS_EVENT_RESUME] = hdmi_tx_evt_handle_resume;
+ handler[MDSS_EVENT_RESET] = hdmi_tx_evt_handle_reset;
+ handler[MDSS_EVENT_UNBLANK] = hdmi_tx_evt_handle_unblank;
+ handler[MDSS_EVENT_PANEL_ON] = hdmi_tx_evt_handle_panel_on;
+ handler[MDSS_EVENT_SUSPEND] = hdmi_tx_evt_handle_suspend;
+ handler[MDSS_EVENT_BLANK] = hdmi_tx_evt_handle_blank;
+ handler[MDSS_EVENT_PANEL_OFF] = hdmi_tx_evt_handle_panel_off;
+ handler[MDSS_EVENT_CLOSE] = hdmi_tx_evt_handle_close;
+
+ return 0;
+}
+
+static int hdmi_tx_probe(struct platform_device *pdev)
+{
+ int rc = 0, i;
+ struct device_node *of_node = pdev->dev.of_node;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct mdss_panel_cfg *pan_cfg = NULL;
+
+ if (!of_node) {
+ DEV_ERR("%s: FAILED: of_node not found\n", __func__);
+ rc = -ENODEV;
+ return rc;
+ }
+
+ hdmi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*hdmi_ctrl), GFP_KERNEL);
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: FAILED: cannot alloc hdmi tx ctrl\n", __func__);
+ rc = -ENOMEM;
+ goto failed_no_mem;
+ }
+
+ hdmi_ctrl->mdss_util = mdss_get_util_intf();
+ if (hdmi_ctrl->mdss_util == NULL) {
+ pr_err("Failed to get mdss utility functions\n");
+ rc = -ENODEV;
+ goto failed_dt_data;
+ }
+
+ platform_set_drvdata(pdev, hdmi_ctrl);
+ hdmi_ctrl->pdev = pdev;
+ hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+
+ pan_cfg = mdss_panel_intf_type(MDSS_PANEL_INTF_HDMI);
+ if (IS_ERR(pan_cfg)) {
+ return PTR_ERR(pan_cfg);
+ } else if (pan_cfg) {
+ int vic;
+
+ if (kstrtoint(pan_cfg->arg_cfg, 10, &vic) ||
+ vic <= HDMI_VFRMT_UNKNOWN || vic >= HDMI_VFRMT_MAX)
+ vic = DEFAULT_HDMI_PRIMARY_RESOLUTION;
+
+ hdmi_ctrl->pdata.primary = true;
+ hdmi_ctrl->vic = vic;
+ hdmi_ctrl->panel_data.panel_info.is_prim_panel = true;
+ hdmi_ctrl->panel_data.panel_info.cont_splash_enabled =
+ hdmi_ctrl->mdss_util->panel_intf_status(DISPLAY_1,
+ MDSS_PANEL_INTF_HDMI) ? true : false;
+ }
+
+ hdmi_tx_hw.irq_info = mdss_intr_line();
+ if (hdmi_tx_hw.irq_info == NULL) {
+ pr_err("Failed to get mdss irq information\n");
+ return -ENODEV;
+ }
+
+ rc = hdmi_tx_get_dt_data(pdev, &hdmi_ctrl->pdata);
+ if (rc) {
+ DEV_ERR("%s: FAILED: parsing device tree data. rc=%d\n",
+ __func__, rc);
+ goto failed_dt_data;
+ }
+
+ rc = hdmi_tx_init_resource(hdmi_ctrl);
+ if (rc) {
+ DEV_ERR("%s: FAILED: resource init. rc=%d\n",
+ __func__, rc);
+ goto failed_res_init;
+ }
+
+ rc = hdmi_tx_get_version(hdmi_ctrl);
+ if (rc) {
+ DEV_ERR("%s: FAILED: hdmi_tx_get_version. rc=%d\n",
+ __func__, rc);
+ goto failed_reg_panel;
+ }
+
+ rc = hdmi_tx_dev_init(hdmi_ctrl);
+ if (rc) {
+ DEV_ERR("%s: FAILED: hdmi_tx_dev_init. rc=%d\n", __func__, rc);
+ goto failed_dev_init;
+ }
+
+ rc = hdmi_tx_init_event_handler(hdmi_ctrl);
+ if (rc) {
+ DEV_ERR("%s: FAILED: hdmi_tx_init_event_handler. rc=%d\n",
+ __func__, rc);
+ goto failed_dev_init;
+ }
+
+ rc = hdmi_tx_register_panel(hdmi_ctrl);
+ if (rc) {
+ DEV_ERR("%s: FAILED: register_panel. rc=%d\n", __func__, rc);
+ goto failed_reg_panel;
+ }
+
+ rc = of_platform_populate(of_node, NULL, NULL, &pdev->dev);
+ if (rc) {
+ DEV_ERR("%s: Failed to add child devices. rc=%d\n",
+ __func__, rc);
+ goto failed_reg_panel;
+ } else {
+ DEV_DBG("%s: Add child devices.\n", __func__);
+ }
+
+ if (mdss_debug_register_io("hdmi",
+ &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO], NULL))
+ DEV_WARN("%s: hdmi_tx debugfs register failed\n", __func__);
+
+ if (hdmi_ctrl->panel_data.panel_info.cont_splash_enabled) {
+ for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+ msm_mdss_enable_vreg(
+ hdmi_ctrl->pdata.power_data[i].vreg_config,
+ hdmi_ctrl->pdata.power_data[i].num_vreg, 1);
+
+ hdmi_tx_pinctrl_set_state(hdmi_ctrl, i, 1);
+
+ msm_mdss_enable_gpio(
+ hdmi_ctrl->pdata.power_data[i].gpio_config,
+ hdmi_ctrl->pdata.power_data[i].num_gpio, 1);
+
+ msm_mdss_enable_clk(
+ hdmi_ctrl->pdata.power_data[i].clk_config,
+ hdmi_ctrl->pdata.power_data[i].num_clk, 1);
+
+ hdmi_ctrl->power_data_enable[i] = true;
+ }
+ }
+
+ return rc;
+
+failed_reg_panel:
+ hdmi_tx_dev_deinit(hdmi_ctrl);
+failed_dev_init:
+ hdmi_tx_deinit_resource(hdmi_ctrl);
+failed_res_init:
+ hdmi_tx_put_dt_data(&pdev->dev, &hdmi_ctrl->pdata);
+failed_dt_data:
+ devm_kfree(&pdev->dev, hdmi_ctrl);
+failed_no_mem:
+ return rc;
+} /* hdmi_tx_probe */
+
+static int hdmi_tx_remove(struct platform_device *pdev)
+{
+ struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ hdmi_tx_sysfs_remove(hdmi_ctrl);
+ hdmi_tx_dev_deinit(hdmi_ctrl);
+ hdmi_tx_deinit_resource(hdmi_ctrl);
+ hdmi_tx_put_dt_data(&pdev->dev, &hdmi_ctrl->pdata);
+ devm_kfree(&hdmi_ctrl->pdev->dev, hdmi_ctrl);
+
+ return 0;
+} /* hdmi_tx_remove */
+
+static const struct of_device_id hdmi_tx_dt_match[] = {
+ {.compatible = COMPATIBLE_NAME,},
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, hdmi_tx_dt_match);
+
+static struct platform_driver this_driver = {
+ .probe = hdmi_tx_probe,
+ .remove = hdmi_tx_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hdmi_tx_dt_match,
+ },
+};
+
+static int __init hdmi_tx_drv_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&this_driver);
+ if (rc)
+ DEV_ERR("%s: FAILED: rc=%d\n", __func__, rc);
+
+ return rc;
+} /* hdmi_tx_drv_init */
+
+static void __exit hdmi_tx_drv_exit(void)
+{
+ platform_driver_unregister(&this_driver);
+} /* hdmi_tx_drv_exit */
+
+static int set_hdcp_feature_on(const char *val, const struct kernel_param *kp)
+{
+ int rc = 0;
+
+ rc = param_set_bool(val, kp);
+ if (!rc)
+ pr_debug("%s: HDCP feature = %d\n", __func__, hdcp_feature_on);
+
+ return rc;
+}
+
+static struct kernel_param_ops hdcp_feature_on_param_ops = {
+ .set = set_hdcp_feature_on,
+ .get = param_get_bool,
+};
+
+module_param_cb(hdcp, &hdcp_feature_on_param_ops, &hdcp_feature_on,
+ 0644);
+MODULE_PARM_DESC(hdcp, "Enable or Disable HDCP");
+
+module_init(hdmi_tx_drv_init);
+module_exit(hdmi_tx_drv_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HDMI MSM TX driver");
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.h b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
new file mode 100644
index 0000000..6a13c75
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
@@ -0,0 +1,141 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_TX_H__
+#define __MDSS_HDMI_TX_H__
+
+#include <linux/extcon.h>
+#include "mdss_hdmi_util.h"
+#include "mdss_hdmi_panel.h"
+#include "mdss_cec_core.h"
+#include "mdss_hdmi_audio.h"
+
+#define MAX_SWITCH_NAME_SIZE 5
+
+enum hdmi_tx_io_type {
+ HDMI_TX_CORE_IO,
+ HDMI_TX_QFPROM_IO,
+ HDMI_TX_HDCP_IO,
+ HDMI_TX_MAX_IO
+};
+
+enum hdmi_tx_power_module_type {
+ HDMI_TX_HPD_PM,
+ HDMI_TX_DDC_PM,
+ HDMI_TX_CORE_PM,
+ HDMI_TX_CEC_PM,
+ HDMI_TX_MAX_PM
+};
+
+/* Data filled from device tree */
+struct hdmi_tx_platform_data {
+ bool primary;
+ bool cont_splash_enabled;
+ bool cond_power_on;
+ struct mdss_io_data io[HDMI_TX_MAX_IO];
+ struct mdss_module_power power_data[HDMI_TX_MAX_PM];
+ struct reg_bus_client *reg_bus_clt[HDMI_TX_MAX_PM];
+ /* bitfield representing each module's pin state */
+ u64 pin_states;
+ bool pluggable;
+};
+
+struct hdmi_tx_pinctrl {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *state_active;
+ struct pinctrl_state *state_hpd_active;
+ struct pinctrl_state *state_cec_active;
+ struct pinctrl_state *state_ddc_active;
+ struct pinctrl_state *state_suspend;
+};
+
+struct hdmi_tx_ctrl;
+typedef int (*hdmi_tx_evt_handler) (struct hdmi_tx_ctrl *);
+
+struct hdmi_tx_ctrl {
+ struct platform_device *pdev;
+ struct hdmi_tx_platform_data pdata;
+ struct mdss_panel_data panel_data;
+ struct mdss_util_intf *mdss_util;
+ struct msm_hdmi_mode_timing_info timing;
+ struct hdmi_tx_pinctrl pin_res;
+ struct mutex mutex;
+ struct mutex tx_lock;
+ struct list_head cable_notify_handlers;
+ struct kobject *kobj;
+ struct extcon_dev sdev;
+ struct workqueue_struct *workq;
+ struct hdmi_util_ds_data ds_data;
+ struct completion hpd_int_done;
+ struct work_struct hpd_int_work;
+ struct delayed_work hdcp_cb_work;
+ struct work_struct cable_notify_work;
+ struct hdmi_tx_ddc_ctrl ddc_ctrl;
+ struct hdmi_hdcp_ops *hdcp_ops;
+ struct cec_ops hdmi_cec_ops;
+ struct cec_cbs hdmi_cec_cbs;
+ struct hdmi_audio_ops audio_ops;
+ struct msm_hdmi_audio_setup_params audio_params;
+ struct hdmi_panel_data panel;
+ struct hdmi_panel_ops panel_ops;
+ struct work_struct fps_work;
+
+ spinlock_t hpd_state_lock;
+
+ u32 panel_power_on;
+ u32 panel_suspend;
+ u32 vic;
+ u32 hdmi_tx_ver;
+ u32 max_pclk_khz;
+ u32 hpd_state;
+ u32 hpd_off_pending;
+ u32 hpd_feature_on;
+ u32 hpd_initialized;
+ u32 vote_hdmi_core_on;
+ u32 dynamic_fps;
+ u32 hdcp14_present;
+ u32 enc_lvl;
+ u32 edid_buf_size;
+ u32 s3d_mode;
+
+ u8 timing_gen_on;
+ u8 mhl_hpd_on;
+ u8 hdcp_status;
+ u8 spd_vendor_name[9];
+ u8 spd_product_description[17];
+
+ bool hdcp_feature_on;
+ bool hpd_disabled;
+ bool ds_registered;
+ bool scrambler_enabled;
+ bool hdcp1_use_sw_keys;
+ bool hdcp14_sw_keys;
+ bool auth_state;
+ bool custom_edid;
+ bool sim_mode;
+ bool hdcp22_present;
+ bool power_data_enable[HDMI_TX_MAX_PM];
+
+ void (*hdmi_tx_hpd_done)(void *data);
+ void *downstream_data;
+ void *audio_data;
+ void *feature_data[hweight8(HDMI_TX_FEAT_MAX)];
+ void *hdcp_data;
+ void *evt_arg;
+ u8 *edid_buf;
+
+ char disp_switch_name[MAX_SWITCH_NAME_SIZE];
+
+ hdmi_tx_evt_handler evt_handler[MDSS_EVENT_MAX - 1];
+};
+
+#endif /* __MDSS_HDMI_TX_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
new file mode 100644
index 0000000..b5bedfa
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -0,0 +1,1694 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "mdss_hdmi_util.h"
+
+#define RESOLUTION_NAME_STR_LEN 30
+#define HDMI_SEC_TO_MS 1000
+#define HDMI_MS_TO_US 1000
+#define HDMI_SEC_TO_US (HDMI_SEC_TO_MS * HDMI_MS_TO_US)
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_BUSY_WAIT_DELAY_US 100
+
+#define HDMI_SCDC_UNKNOWN_REGISTER "Unknown register"
+
+static char res_buf[RESOLUTION_NAME_STR_LEN];
+
+enum trigger_mode {
+ TRIGGER_WRITE,
+ TRIGGER_READ
+};
+
+int hdmi_utils_get_timeout_in_hysnc(struct msm_hdmi_mode_timing_info *timing,
+ u32 timeout_ms)
+{
+ u32 fps, v_total;
+ u32 time_taken_by_one_line_us, lines_needed_for_given_time;
+
+ if (!timing || !timeout_ms) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ fps = timing->refresh_rate / HDMI_KHZ_TO_HZ;
+ v_total = hdmi_tx_get_v_total(timing);
+
+ /*
+ * pixel clock = h_total * v_total * fps
+ * 1 sec = pixel clock number of pixels are transmitted.
+ * time taken by one line (h_total) = 1 / (v_total * fps).
+ */
+ time_taken_by_one_line_us = HDMI_SEC_TO_US / (v_total * fps);
+ lines_needed_for_given_time = (timeout_ms * HDMI_MS_TO_US) /
+ time_taken_by_one_line_us;
+
+ return lines_needed_for_given_time;
+}
+
+static int hdmi_ddc_clear_irq(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+ char *what)
+{
+ u32 ddc_int_ctrl, ddc_status, in_use, timeout;
+ u32 sw_done_mask = BIT(2);
+ u32 sw_done_ack = BIT(1);
+ u32 in_use_by_sw = BIT(0);
+ u32 in_use_by_hw = BIT(1);
+
+ if (!ddc_ctrl || !ddc_ctrl->io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ /* clear and enable interrutps */
+ ddc_int_ctrl = sw_done_mask | sw_done_ack;
+
+ DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL, ddc_int_ctrl);
+
+ /* wait until DDC HW is free */
+ timeout = 100;
+ do {
+ ddc_status = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_HW_STATUS);
+ in_use = ddc_status & (in_use_by_sw | in_use_by_hw);
+ if (in_use) {
+ pr_debug("ddc is in use by %s, timeout(%d)\n",
+ ddc_status & in_use_by_sw ? "sw" : "hw",
+ timeout);
+ udelay(100);
+ }
+ } while (in_use && --timeout);
+
+ if (!timeout) {
+ pr_err("%s: timedout\n", what);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void hdmi_scrambler_ddc_reset(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+ u32 reg_val;
+
+ if (!ctrl) {
+ pr_err("Invalid parameters\n");
+ return;
+ }
+
+ /* clear ack and disable interrupts */
+ reg_val = BIT(14) | BIT(9) | BIT(5) | BIT(1);
+ DSS_REG_W(ctrl->io, HDMI_DDC_INT_CTRL2, reg_val);
+
+ /* Reset DDC timers */
+ reg_val = BIT(0) | DSS_REG_R(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_CTRL);
+ DSS_REG_W(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val);
+
+ reg_val = DSS_REG_R(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_CTRL);
+ reg_val &= ~BIT(0);
+ DSS_REG_W(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val);
+}
+
+void hdmi_scrambler_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+ u32 reg_val;
+
+ if (!ctrl) {
+ pr_err("Invalid parameters\n");
+ return;
+ }
+
+ hdmi_scrambler_ddc_reset(ctrl);
+
+ /* Disable HW DDC access to RxStatus register */
+ reg_val = DSS_REG_R(ctrl->io, HDMI_HW_DDC_CTRL);
+ reg_val &= ~(BIT(8) | BIT(9));
+
+ DSS_REG_W(ctrl->io, HDMI_HW_DDC_CTRL, reg_val);
+}
+
+static int hdmi_scrambler_ddc_check_status(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+ int rc = 0;
+ u32 reg_val;
+
+ if (!ctrl) {
+ pr_err("invalid ddc ctrl\n");
+ return -EINVAL;
+ }
+
+ /* check for errors and clear status */
+ reg_val = DSS_REG_R(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_STATUS);
+
+ if (reg_val & BIT(4)) {
+ pr_err("ddc aborted\n");
+ reg_val |= BIT(5);
+ rc = -ECONNABORTED;
+ }
+
+ if (reg_val & BIT(8)) {
+ pr_err("timed out\n");
+ reg_val |= BIT(9);
+ rc = -ETIMEDOUT;
+ }
+
+ if (reg_val & BIT(12)) {
+ pr_err("NACK0\n");
+ reg_val |= BIT(13);
+ rc = -EIO;
+ }
+
+ if (reg_val & BIT(14)) {
+ pr_err("NACK1\n");
+ reg_val |= BIT(15);
+ rc = -EIO;
+ }
+
+ DSS_REG_W(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_STATUS, reg_val);
+
+ return rc;
+}
+
+static int hdmi_scrambler_status_timer_setup(struct hdmi_tx_ddc_ctrl *ctrl,
+ u32 timeout_hsync)
+{
+ u32 reg_val;
+ int rc;
+ struct mdss_io_data *io = NULL;
+
+ if (!ctrl || !ctrl->io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ io = ctrl->io;
+
+ hdmi_ddc_clear_irq(ctrl, "scrambler");
+
+ DSS_REG_W(io, HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL, timeout_hsync);
+ DSS_REG_W(io, HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2, timeout_hsync);
+
+ reg_val = DSS_REG_R(io, HDMI_DDC_INT_CTRL5);
+ reg_val |= BIT(10);
+ DSS_REG_W(io, HDMI_DDC_INT_CTRL5, reg_val);
+
+ reg_val = DSS_REG_R(io, HDMI_DDC_INT_CTRL2);
+ /* Trigger interrupt if scrambler status is 0 or DDC failure */
+ reg_val |= BIT(10);
+ reg_val &= ~(BIT(15) | BIT(16));
+ reg_val |= BIT(16);
+ DSS_REG_W(io, HDMI_DDC_INT_CTRL2, reg_val);
+
+ /* Enable DDC access */
+ reg_val = DSS_REG_R(io, HDMI_HW_DDC_CTRL);
+
+ reg_val &= ~(BIT(8) | BIT(9));
+ reg_val |= BIT(8);
+ DSS_REG_W(io, HDMI_HW_DDC_CTRL, reg_val);
+
+ /* WAIT for 200ms as per HDMI 2.0 standard for sink to respond */
+ msleep(200);
+
+ /* clear the scrambler status */
+ rc = hdmi_scrambler_ddc_check_status(ctrl);
+ if (rc)
+ pr_err("scrambling ddc error %d\n", rc);
+
+ hdmi_scrambler_ddc_disable(ctrl);
+
+ return rc;
+}
+
+static inline char *hdmi_scdc_reg2string(u32 type)
+{
+ switch (type) {
+ case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+ return "HDMI_TX_SCDC_SCRAMBLING_STATUS";
+ case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+ return "HDMI_TX_SCDC_SCRAMBLING_ENABLE";
+ case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+ return "HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE";
+ case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+ return "HDMI_TX_SCDC_CLOCK_DET_STATUS";
+ case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+ return "HDMI_TX_SCDC_CH0_LOCK_STATUS";
+ case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+ return "HDMI_TX_SCDC_CH1_LOCK_STATUS";
+ case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+ return "HDMI_TX_SCDC_CH2_LOCK_STATUS";
+ case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+ return "HDMI_TX_SCDC_CH0_ERROR_COUNT";
+ case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+ return "HDMI_TX_SCDC_CH1_ERROR_COUNT";
+ case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+ return "HDMI_TX_SCDC_CH2_ERROR_COUNT";
+ case HDMI_TX_SCDC_READ_ENABLE:
+ return"HDMI_TX_SCDC_READ_ENABLE";
+ default:
+ return HDMI_SCDC_UNKNOWN_REGISTER;
+ }
+}
+
+static struct msm_hdmi_mode_timing_info hdmi_resv_timings[
+ RESERVE_VFRMT_END - HDMI_VFRMT_RESERVE1 + 1];
+
+static int hdmi_get_resv_timing_info(
+ struct msm_hdmi_mode_timing_info *mode, int id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_resv_timings); i++) {
+ struct msm_hdmi_mode_timing_info *info = &hdmi_resv_timings[i];
+
+ if (info->video_format == id) {
+ *mode = *info;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int hdmi_set_resv_timing_info(struct msm_hdmi_mode_timing_info *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_resv_timings); i++) {
+ struct msm_hdmi_mode_timing_info *info = &hdmi_resv_timings[i];
+
+ if (info->video_format == 0) {
+ *info = *mode;
+ info->video_format = HDMI_VFRMT_RESERVE1 + i;
+ return info->video_format;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+bool hdmi_is_valid_resv_timing(int mode)
+{
+ struct msm_hdmi_mode_timing_info *info;
+
+ if (mode < HDMI_VFRMT_RESERVE1 || mode > RESERVE_VFRMT_END) {
+ pr_err("invalid mode %d\n", mode);
+ return false;
+ }
+
+ info = &hdmi_resv_timings[mode - HDMI_VFRMT_RESERVE1];
+
+ return info->video_format >= HDMI_VFRMT_RESERVE1 &&
+ info->video_format <= RESERVE_VFRMT_END;
+}
+
+void hdmi_reset_resv_timing_info(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_resv_timings); i++) {
+ struct msm_hdmi_mode_timing_info *info = &hdmi_resv_timings[i];
+
+ info->video_format = 0;
+ }
+}
+
+int msm_hdmi_get_timing_info(
+ struct msm_hdmi_mode_timing_info *mode, int id)
+{
+ int ret = 0;
+
+ switch (id) {
+ case HDMI_VFRMT_640x480p60_4_3:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_640x480p60_4_3);
+ break;
+ case HDMI_VFRMT_720x480p60_4_3:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_720x480p60_4_3);
+ break;
+ case HDMI_VFRMT_720x480p60_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_720x480p60_16_9);
+ break;
+ case HDMI_VFRMT_1280x720p60_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1280x720p60_16_9);
+ break;
+ case HDMI_VFRMT_1920x1080i60_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080i60_16_9);
+ break;
+ case HDMI_VFRMT_1440x480i60_4_3:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1440x480i60_4_3);
+ break;
+ case HDMI_VFRMT_1440x480i60_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1440x480i60_16_9);
+ break;
+ case HDMI_VFRMT_1920x1080p60_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080p60_16_9);
+ break;
+ case HDMI_VFRMT_720x576p50_4_3:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_720x576p50_4_3);
+ break;
+ case HDMI_VFRMT_720x576p50_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_720x576p50_16_9);
+ break;
+ case HDMI_VFRMT_1280x720p50_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1280x720p50_16_9);
+ break;
+ case HDMI_VFRMT_1440x576i50_4_3:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1440x576i50_4_3);
+ break;
+ case HDMI_VFRMT_1440x576i50_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1440x576i50_16_9);
+ break;
+ case HDMI_VFRMT_1920x1080p50_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080p50_16_9);
+ break;
+ case HDMI_VFRMT_1920x1080p24_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080p24_16_9);
+ break;
+ case HDMI_VFRMT_1920x1080p25_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080p25_16_9);
+ break;
+ case HDMI_VFRMT_1920x1080p30_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080p30_16_9);
+ break;
+ case HDMI_EVFRMT_3840x2160p30_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_EVFRMT_3840x2160p30_16_9);
+ break;
+ case HDMI_EVFRMT_3840x2160p25_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_EVFRMT_3840x2160p25_16_9);
+ break;
+ case HDMI_EVFRMT_3840x2160p24_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_EVFRMT_3840x2160p24_16_9);
+ break;
+ case HDMI_EVFRMT_4096x2160p24_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_EVFRMT_4096x2160p24_16_9);
+ break;
+ case HDMI_VFRMT_1024x768p60_4_3:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1024x768p60_4_3);
+ break;
+ case HDMI_VFRMT_1280x1024p60_5_4:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1280x1024p60_5_4);
+ break;
+ case HDMI_VFRMT_2560x1600p60_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_2560x1600p60_16_9);
+ break;
+ case HDMI_VFRMT_800x600p60_4_3:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_800x600p60_4_3);
+ break;
+ case HDMI_VFRMT_848x480p60_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_848x480p60_16_9);
+ break;
+ case HDMI_VFRMT_1280x960p60_4_3:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1280x960p60_4_3);
+ break;
+ case HDMI_VFRMT_1360x768p60_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1360x768p60_16_9);
+ break;
+ case HDMI_VFRMT_1440x900p60_16_10:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1440x900p60_16_10);
+ break;
+ case HDMI_VFRMT_1400x1050p60_4_3:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1400x1050p60_4_3);
+ break;
+ case HDMI_VFRMT_1680x1050p60_16_10:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1680x1050p60_16_10);
+ break;
+ case HDMI_VFRMT_1600x1200p60_4_3:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1600x1200p60_4_3);
+ break;
+ case HDMI_VFRMT_1920x1200p60_16_10:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1200p60_16_10);
+ break;
+ case HDMI_VFRMT_1366x768p60_16_10:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1366x768p60_16_10);
+ break;
+ case HDMI_VFRMT_1280x800p60_16_10:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1280x800p60_16_10);
+ break;
+ case HDMI_VFRMT_3840x2160p24_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p24_16_9);
+ break;
+ case HDMI_VFRMT_3840x2160p25_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p25_16_9);
+ break;
+ case HDMI_VFRMT_3840x2160p30_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p30_16_9);
+ break;
+ case HDMI_VFRMT_3840x2160p50_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p50_16_9);
+ break;
+ case HDMI_VFRMT_3840x2160p60_16_9:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p60_16_9);
+ break;
+ case HDMI_VFRMT_4096x2160p24_256_135:
+ MSM_HDMI_MODES_GET_DETAILS(mode,
+ HDMI_VFRMT_4096x2160p24_256_135);
+ break;
+ case HDMI_VFRMT_4096x2160p25_256_135:
+ MSM_HDMI_MODES_GET_DETAILS(mode,
+ HDMI_VFRMT_4096x2160p25_256_135);
+ break;
+ case HDMI_VFRMT_4096x2160p30_256_135:
+ MSM_HDMI_MODES_GET_DETAILS(mode,
+ HDMI_VFRMT_4096x2160p30_256_135);
+ break;
+ case HDMI_VFRMT_4096x2160p50_256_135:
+ MSM_HDMI_MODES_GET_DETAILS(mode,
+ HDMI_VFRMT_4096x2160p50_256_135);
+ break;
+ case HDMI_VFRMT_4096x2160p60_256_135:
+ MSM_HDMI_MODES_GET_DETAILS(mode,
+ HDMI_VFRMT_4096x2160p60_256_135);
+ break;
+ case HDMI_VFRMT_3840x2160p24_64_27:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p24_64_27);
+ break;
+ case HDMI_VFRMT_3840x2160p25_64_27:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p25_64_27);
+ break;
+ case HDMI_VFRMT_3840x2160p30_64_27:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p30_64_27);
+ break;
+ case HDMI_VFRMT_3840x2160p50_64_27:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p50_64_27);
+ break;
+ case HDMI_VFRMT_3840x2160p60_64_27:
+ MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p60_64_27);
+ break;
+ default:
+ ret = hdmi_get_resv_timing_info(mode, id);
+ }
+
+ return ret;
+}
+
+int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
+ struct hdmi_util_ds_data *ds_data, u32 mode)
+{
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ if (mode >= HDMI_VFRMT_MAX)
+ return -EINVAL;
+
+ ret = msm_hdmi_get_timing_info(info, mode);
+
+ if (!ret && ds_data && ds_data->ds_registered && ds_data->ds_max_clk) {
+ if (info->pixel_freq > ds_data->ds_max_clk)
+ info->supported = false;
+ }
+
+ return ret;
+} /* hdmi_get_supported_mode */
+
+const char *msm_hdmi_mode_2string(u32 mode)
+{
+ static struct msm_hdmi_mode_timing_info ri = {0};
+ char *aspect_ratio;
+
+ if (mode >= HDMI_VFRMT_MAX)
+ return "???";
+
+ if (hdmi_get_supported_mode(&ri, NULL, mode))
+ return "???";
+
+ memset(res_buf, 0, sizeof(res_buf));
+
+ if (!ri.supported) {
+ snprintf(res_buf, RESOLUTION_NAME_STR_LEN, "%d", mode);
+ return res_buf;
+ }
+
+ switch (ri.ar) {
+ case HDMI_RES_AR_4_3:
+ aspect_ratio = "4/3";
+ break;
+ case HDMI_RES_AR_5_4:
+ aspect_ratio = "5/4";
+ break;
+ case HDMI_RES_AR_16_9:
+ aspect_ratio = "16/9";
+ break;
+ case HDMI_RES_AR_16_10:
+ aspect_ratio = "16/10";
+ break;
+ default:
+ aspect_ratio = "???";
+ };
+
+ snprintf(res_buf, RESOLUTION_NAME_STR_LEN, "%dx%d %s%dHz %s",
+ ri.active_h, ri.active_v, ri.interlaced ? "i" : "p",
+ ri.refresh_rate / 1000, aspect_ratio);
+
+ return res_buf;
+}
+
+int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
+ struct hdmi_util_ds_data *ds_data)
+{
+ int i, vic = -1;
+ struct msm_hdmi_mode_timing_info supported_timing = {0};
+ u32 ret;
+
+ if (!timing_in) {
+ pr_err("invalid input\n");
+ goto exit;
+ }
+
+ /* active_low_h, active_low_v and interlaced are not checked against */
+ for (i = 0; i < HDMI_VFRMT_MAX; i++) {
+ ret = hdmi_get_supported_mode(&supported_timing, ds_data, i);
+
+ if (ret || !supported_timing.supported)
+ continue;
+ if (timing_in->active_h != supported_timing.active_h)
+ continue;
+ if (timing_in->front_porch_h != supported_timing.front_porch_h)
+ continue;
+ if (timing_in->pulse_width_h != supported_timing.pulse_width_h)
+ continue;
+ if (timing_in->back_porch_h != supported_timing.back_porch_h)
+ continue;
+ if (timing_in->active_v != supported_timing.active_v)
+ continue;
+ if (timing_in->front_porch_v != supported_timing.front_porch_v)
+ continue;
+ if (timing_in->pulse_width_v != supported_timing.pulse_width_v)
+ continue;
+ if (timing_in->back_porch_v != supported_timing.back_porch_v)
+ continue;
+ if (timing_in->pixel_freq != supported_timing.pixel_freq)
+ continue;
+ if (timing_in->refresh_rate != supported_timing.refresh_rate)
+ continue;
+
+ vic = (int)supported_timing.video_format;
+ break;
+ }
+
+ if (vic < 0)
+ pr_err("timing is not supported h=%d v=%d\n",
+ timing_in->active_h, timing_in->active_v);
+ else
+ pr_debug("vic = %d timing = %s\n", vic,
+ msm_hdmi_mode_2string((u32)vic));
+exit:
+
+ return vic;
+} /* hdmi_get_video_id_code */
+
+static const char *hdmi_get_single_video_3d_fmt_2string(u32 format)
+{
+ switch (format) {
+ case TOP_AND_BOTTOM: return "TAB";
+ case FRAME_PACKING: return "FP";
+ case SIDE_BY_SIDE_HALF: return "SSH";
+ }
+ return "";
+} /* hdmi_get_single_video_3d_fmt_2string */
+
+ssize_t hdmi_get_video_3d_fmt_2string(u32 format, char *buf, u32 size)
+{
+ ssize_t ret, len = 0;
+
+ ret = scnprintf(buf, size, "%s",
+ hdmi_get_single_video_3d_fmt_2string(
+ format & FRAME_PACKING));
+ len += ret;
+
+ if (len && (format & TOP_AND_BOTTOM))
+ ret = scnprintf(buf + len, size - len, ":%s",
+ hdmi_get_single_video_3d_fmt_2string(
+ format & TOP_AND_BOTTOM));
+ else
+ ret = scnprintf(buf + len, size - len, "%s",
+ hdmi_get_single_video_3d_fmt_2string(
+ format & TOP_AND_BOTTOM));
+ len += ret;
+
+ if (len && (format & SIDE_BY_SIDE_HALF))
+ ret = scnprintf(buf + len, size - len, ":%s",
+ hdmi_get_single_video_3d_fmt_2string(
+ format & SIDE_BY_SIDE_HALF));
+ else
+ ret = scnprintf(buf + len, size - len, "%s",
+ hdmi_get_single_video_3d_fmt_2string(
+ format & SIDE_BY_SIDE_HALF));
+ len += ret;
+
+ return len;
+} /* hdmi_get_video_3d_fmt_2string */
+
+static void hdmi_ddc_trigger(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+ enum trigger_mode mode, bool seg)
+{
+ struct hdmi_tx_ddc_data *ddc_data = &ddc_ctrl->ddc_data;
+ struct mdss_io_data *io = ddc_ctrl->io;
+ u32 const seg_addr = 0x60, seg_num = 0x01;
+ u32 ddc_ctrl_reg_val;
+
+ ddc_data->dev_addr &= 0xFE;
+
+ if (mode == TRIGGER_READ && seg) {
+ DSS_REG_W_ND(io, HDMI_DDC_DATA, BIT(31) | (seg_addr << 8));
+ DSS_REG_W_ND(io, HDMI_DDC_DATA, seg_num << 8);
+ }
+
+ /* handle portion #1 */
+ DSS_REG_W_ND(io, HDMI_DDC_DATA, BIT(31) | (ddc_data->dev_addr << 8));
+
+ /* handle portion #2 */
+ DSS_REG_W_ND(io, HDMI_DDC_DATA, ddc_data->offset << 8);
+
+ if (mode == TRIGGER_READ) {
+ /* handle portion #3 */
+ DSS_REG_W_ND(io, HDMI_DDC_DATA,
+ (ddc_data->dev_addr | BIT(0)) << 8);
+
+ /* HDMI_I2C_TRANSACTION0 */
+ DSS_REG_W_ND(io, HDMI_DDC_TRANS0, BIT(12) | BIT(16));
+
+ /* Write to HDMI_I2C_TRANSACTION1 */
+ if (seg) {
+ DSS_REG_W_ND(io, HDMI_DDC_TRANS1, BIT(12) | BIT(16));
+ DSS_REG_W_ND(io, HDMI_DDC_TRANS2,
+ BIT(0) | BIT(12) | BIT(13) |
+ (ddc_data->request_len << 16));
+
+ ddc_ctrl_reg_val = BIT(0) | BIT(21);
+ } else {
+ DSS_REG_W_ND(io, HDMI_DDC_TRANS1,
+ BIT(0) | BIT(12) | BIT(13) |
+ (ddc_data->request_len << 16));
+
+ ddc_ctrl_reg_val = BIT(0) | BIT(20);
+ }
+ } else {
+ int ndx;
+
+ /* write buffer */
+ for (ndx = 0; ndx < ddc_data->data_len; ++ndx)
+ DSS_REG_W_ND(io, HDMI_DDC_DATA,
+ ((u32)ddc_data->data_buf[ndx]) << 8);
+
+ DSS_REG_W_ND(io, HDMI_DDC_TRANS0,
+ (ddc_data->data_len + 1) << 16 | BIT(12) | BIT(13));
+
+ ddc_ctrl_reg_val = BIT(0);
+ }
+
+ /* Trigger the I2C transfer */
+ DSS_REG_W_ND(io, HDMI_DDC_CTRL, ddc_ctrl_reg_val);
+}
+
+static void hdmi_ddc_clear_status(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+ u32 reg_val;
+
+ /* Read DDC status */
+ reg_val = DSS_REG_R(ddc_ctrl->io, HDMI_DDC_SW_STATUS);
+ reg_val &= BIT(12) | BIT(13) | BIT(14) | BIT(15);
+
+ /* Check if any NACK occurred */
+ if (reg_val) {
+ pr_debug("%s: NACK: HDMI_DDC_SW_STATUS 0x%x\n",
+ ddc_ctrl->ddc_data.what, reg_val);
+
+ /* SW_STATUS_RESET, SOFT_RESET */
+ reg_val = BIT(3) | BIT(1);
+
+ DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, reg_val);
+ }
+}
+
+static int hdmi_ddc_read_retry(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+ u32 reg_val, ndx, time_out_count, wait_time;
+ struct hdmi_tx_ddc_data *ddc_data;
+ int status;
+ int busy_wait_us = 0;
+
+ if (!ddc_ctrl || !ddc_ctrl->io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ ddc_data = &ddc_ctrl->ddc_data;
+
+ if (!ddc_data->data_buf) {
+ status = -EINVAL;
+ pr_err("%s: invalid buf\n", ddc_data->what);
+ goto error;
+ }
+
+ if (ddc_data->retry < 0) {
+ pr_err("invalid no. of retries %d\n", ddc_data->retry);
+ status = -EINVAL;
+ goto error;
+ }
+
+ do {
+ status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+ if (status)
+ continue;
+
+ if (ddc_data->hard_timeout) {
+ pr_debug("using hard_timeout %dms\n",
+ ddc_data->hard_timeout);
+
+ busy_wait_us = ddc_data->hard_timeout * HDMI_MS_TO_US;
+ atomic_set(&ddc_ctrl->read_busy_wait_done, 0);
+ } else {
+ reinit_completion(&ddc_ctrl->ddc_sw_done);
+ wait_time = HZ / 2;
+ }
+
+ hdmi_ddc_trigger(ddc_ctrl, TRIGGER_READ, false);
+
+ if (ddc_data->hard_timeout) {
+ while (busy_wait_us > 0 &&
+ !atomic_read(&ddc_ctrl->read_busy_wait_done)) {
+ udelay(HDMI_BUSY_WAIT_DELAY_US);
+ busy_wait_us -= HDMI_BUSY_WAIT_DELAY_US;
+ };
+
+ if (busy_wait_us < 0)
+ busy_wait_us = 0;
+
+ time_out_count = busy_wait_us / HDMI_MS_TO_US;
+
+ ddc_data->timeout_left = time_out_count;
+ } else {
+ time_out_count = wait_for_completion_timeout(
+ &ddc_ctrl->ddc_sw_done, wait_time);
+
+ ddc_data->timeout_left =
+ jiffies_to_msecs(time_out_count);
+ }
+
+ pr_debug("ddc read done at %dms\n", jiffies_to_msecs(jiffies));
+
+ if (!time_out_count) {
+ pr_debug("%s: timedout\n", ddc_data->what);
+
+ status = -ETIMEDOUT;
+ }
+
+ hdmi_ddc_clear_status(ddc_ctrl);
+ } while (status && ddc_data->retry--);
+
+ if (status)
+ goto error;
+
+ /* Write data to DDC buffer */
+ DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+ BIT(0) | (3 << 16) | BIT(31));
+
+ /* Discard first byte */
+ DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+ for (ndx = 0; ndx < ddc_data->data_len; ++ndx) {
+ reg_val = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+ ddc_data->data_buf[ndx] = (u8)((reg_val & 0x0000FF00) >> 8);
+ }
+
+ pr_debug("%s: success\n", ddc_data->what);
+
+error:
+ return status;
+} /* hdmi_ddc_read_retry */
+
+void hdmi_ddc_config(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+ if (!ddc_ctrl || !ddc_ctrl->io) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ /* Configure Pre-Scale multiplier & Threshold */
+ DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_SPEED, (10 << 16) | (2 << 0));
+
+ /*
+ * Setting 31:24 bits : Time units to wait before timeout
+ * when clock is being stalled by external sink device
+ */
+ DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_SETUP, 0xFF000000);
+
+ /* Enable reference timer to 19 micro-seconds */
+ DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_REF, (1 << 16) | (19 << 0));
+} /* hdmi_ddc_config */
+
+static void hdmi_hdcp2p2_ddc_clear_status(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+ u32 reg_val;
+
+ if (!ctrl) {
+ pr_err("invalid ddc ctrl\n");
+ return;
+ }
+
+ /* check for errors and clear status */
+ reg_val = DSS_REG_R(ctrl->io, HDMI_HDCP2P2_DDC_STATUS);
+
+ if (reg_val & BIT(4)) {
+ pr_debug("ddc aborted\n");
+ reg_val |= BIT(5);
+ }
+
+ if (reg_val & BIT(8)) {
+ pr_debug("timed out\n");
+ reg_val |= BIT(9);
+ }
+
+ if (reg_val & BIT(12)) {
+ pr_debug("NACK0\n");
+ reg_val |= BIT(13);
+ }
+
+ if (reg_val & BIT(14)) {
+ pr_debug("NACK1\n");
+ reg_val |= BIT(15);
+ }
+
+ DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_STATUS, reg_val);
+}
+
+static int hdmi_ddc_hdcp2p2_isr(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+ struct mdss_io_data *io = NULL;
+ struct hdmi_tx_hdcp2p2_ddc_data *data;
+ u32 intr0, intr2, intr5;
+ u32 msg_size;
+ int rc = 0;
+
+ if (!ddc_ctrl || !ddc_ctrl->io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ io = ddc_ctrl->io;
+
+ data = &ddc_ctrl->hdcp2p2_ddc_data;
+
+ intr0 = DSS_REG_R(io, HDMI_DDC_INT_CTRL0);
+ intr2 = DSS_REG_R(io, HDMI_HDCP_INT_CTRL2);
+ intr5 = DSS_REG_R_ND(io, HDMI_DDC_INT_CTRL5);
+
+ pr_debug("intr0: 0x%x, intr2: 0x%x, intr5: 0x%x\n",
+ intr0, intr2, intr5);
+
+ /* check if encryption is enabled */
+ if (intr2 & BIT(0)) {
+ /*
+ * ack encryption ready interrupt.
+ * disable encryption ready interrupt.
+ * enable encryption not ready interrupt.
+ */
+ intr2 &= ~BIT(2);
+ intr2 |= BIT(1) | BIT(6);
+
+ pr_debug("HDCP 2.2 Encryption enabled\n");
+ data->encryption_ready = true;
+ }
+
+ /* check if encryption is disabled */
+ if (intr2 & BIT(4)) {
+ /*
+ * ack encryption not ready interrupt.
+ * disable encryption not ready interrupt.
+ * enable encryption ready interrupt.
+ */
+ intr2 &= ~BIT(6);
+ intr2 |= BIT(5) | BIT(2);
+
+ pr_debug("HDCP 2.2 Encryption disabled\n");
+ data->encryption_ready = false;
+ }
+
+ DSS_REG_W_ND(io, HDMI_HDCP_INT_CTRL2, intr2);
+
+ /* get the message size bits 29:20 */
+ msg_size = (intr0 & (0x3FF << 20)) >> 20;
+
+ if (msg_size) {
+ /* ack and disable message size interrupt */
+ intr0 |= BIT(30);
+ intr0 &= ~BIT(31);
+
+ data->message_size = msg_size;
+ }
+
+ /* check and disable ready interrupt */
+ if (intr0 & BIT(16)) {
+ /* ack ready/not ready interrupt */
+ intr0 |= BIT(17);
+
+ intr0 &= ~BIT(18);
+ data->ready = true;
+ }
+
+ /* check for reauth req interrupt */
+ if (intr0 & BIT(12)) {
+ /* ack and disable reauth req interrupt */
+ intr0 |= BIT(13);
+ intr0 &= ~BIT(14);
+
+ data->reauth_req = true;
+ }
+
+ /* check for ddc fail interrupt */
+ if (intr0 & BIT(8)) {
+ /* ack ddc fail interrupt */
+ intr0 |= BIT(9);
+
+ data->ddc_max_retries_fail = true;
+ }
+
+ /* check for ddc done interrupt */
+ if (intr0 & BIT(4)) {
+ /* ack ddc done interrupt */
+ intr0 |= BIT(5);
+
+ data->ddc_done = true;
+ }
+
+ /* check for ddc read req interrupt */
+ if (intr0 & BIT(0)) {
+ /* ack read req interrupt */
+ intr0 |= BIT(1);
+
+ data->ddc_read_req = true;
+ }
+
+ DSS_REG_W_ND(io, HDMI_DDC_INT_CTRL0, intr0);
+
+ if (intr5 & BIT(0)) {
+ pr_err("RXSTATUS_DDC_REQ_TIMEOUT\n");
+
+ /* ack and disable timeout interrupt */
+ intr5 |= BIT(1);
+ intr5 &= ~BIT(2);
+
+ data->ddc_timeout = true;
+ }
+ DSS_REG_W_ND(io, HDMI_DDC_INT_CTRL5, intr5);
+
+ if (data->message_size || data->ready || data->reauth_req) {
+ if (data->wait) {
+ atomic_set(&ddc_ctrl->rxstatus_busy_wait_done, 1);
+ } else if (data->link_cb && data->link_data) {
+ data->link_cb(data->link_data);
+ } else {
+ pr_err("new msg/reauth not handled\n");
+ rc = -EINVAL;
+ }
+ }
+
+ hdmi_hdcp2p2_ddc_clear_status(ddc_ctrl);
+
+ return rc;
+}
+
+static int hdmi_ddc_scrambling_isr(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+ struct mdss_io_data *io;
+ bool scrambler_timer_off = false;
+ u32 intr2, intr5;
+
+ if (!ddc_ctrl || !ddc_ctrl->io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ io = ddc_ctrl->io;
+
+ intr2 = DSS_REG_R_ND(io, HDMI_DDC_INT_CTRL2);
+ intr5 = DSS_REG_R_ND(io, HDMI_DDC_INT_CTRL5);
+
+ pr_debug("intr2: 0x%x, intr5: 0x%x\n", intr2, intr5);
+
+ if (intr2 & BIT(12)) {
+ pr_err("SCRAMBLER_STATUS_NOT\n");
+
+ intr2 |= BIT(14);
+
+ scrambler_timer_off = true;
+ }
+
+ if (intr2 & BIT(8)) {
+ pr_err("SCRAMBLER_STATUS_DDC_FAILED\n");
+
+ intr2 |= BIT(9);
+
+ scrambler_timer_off = true;
+ }
+ DSS_REG_W_ND(io, HDMI_DDC_INT_CTRL2, intr2);
+
+ if (intr5 & BIT(8)) {
+ pr_err("SCRAMBLER_STATUS_DDC_REQ_TIMEOUT\n");
+
+ intr5 |= BIT(9);
+ intr5 &= ~BIT(10);
+
+ scrambler_timer_off = true;
+ }
+ DSS_REG_W_ND(io, HDMI_DDC_INT_CTRL5, intr5);
+
+ if (scrambler_timer_off)
+ hdmi_scrambler_ddc_disable(ddc_ctrl);
+
+ return 0;
+}
+
+int hdmi_ddc_isr(struct hdmi_tx_ddc_ctrl *ddc_ctrl, u32 version)
+{
+ u32 ddc_int_ctrl, ret = 0;
+
+ if (!ddc_ctrl || !ddc_ctrl->io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ ddc_int_ctrl = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL);
+ pr_debug("intr: 0x%x\n", ddc_int_ctrl);
+
+ if (ddc_int_ctrl & BIT(0)) {
+ pr_debug("sw done\n");
+
+ ddc_int_ctrl |= BIT(1);
+ if (ddc_ctrl->ddc_data.hard_timeout) {
+ atomic_set(&ddc_ctrl->read_busy_wait_done, 1);
+ atomic_set(&ddc_ctrl->write_busy_wait_done, 1);
+ } else {
+ complete(&ddc_ctrl->ddc_sw_done);
+ }
+ }
+
+ if (ddc_int_ctrl & BIT(4)) {
+ pr_debug("hw done\n");
+ ddc_int_ctrl |= BIT(5);
+ }
+
+ DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL, ddc_int_ctrl);
+
+ if (version >= HDMI_TX_SCRAMBLER_MIN_TX_VERSION) {
+ ret = hdmi_ddc_scrambling_isr(ddc_ctrl);
+ if (ret)
+ pr_err("err in scrambling isr\n");
+ }
+
+ ret = hdmi_ddc_hdcp2p2_isr(ddc_ctrl);
+ if (ret)
+ pr_err("err in hdcp2p2 isr\n");
+
+ return ret;
+} /* hdmi_ddc_isr */
+
+int hdmi_ddc_read(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+ int rc = 0;
+ int retry;
+ struct hdmi_tx_ddc_data *ddc_data;
+
+ if (!ddc_ctrl) {
+ pr_err("invalid ddc ctrl\n");
+ return -EINVAL;
+ }
+
+ ddc_data = &ddc_ctrl->ddc_data;
+ retry = ddc_data->retry;
+
+ rc = hdmi_ddc_read_retry(ddc_ctrl);
+ if (!rc)
+ return rc;
+
+ if (ddc_data->retry_align) {
+ ddc_data->retry = retry;
+
+ ddc_data->request_len = 32 * ((ddc_data->data_len + 31) / 32);
+ rc = hdmi_ddc_read_retry(ddc_ctrl);
+ }
+
+ return rc;
+} /* hdmi_ddc_read */
+
+int hdmi_ddc_read_seg(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+ int status;
+ u32 reg_val, ndx, time_out_count;
+ struct hdmi_tx_ddc_data *ddc_data;
+
+ if (!ddc_ctrl || !ddc_ctrl->io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ ddc_data = &ddc_ctrl->ddc_data;
+
+ if (!ddc_data->data_buf) {
+ status = -EINVAL;
+ pr_err("%s: invalid buf\n", ddc_data->what);
+ goto error;
+ }
+
+ if (ddc_data->retry < 0) {
+ pr_err("invalid no. of retries %d\n", ddc_data->retry);
+ status = -EINVAL;
+ goto error;
+ }
+
+ do {
+ status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+ if (status)
+ continue;
+
+ reinit_completion(&ddc_ctrl->ddc_sw_done);
+
+ hdmi_ddc_trigger(ddc_ctrl, TRIGGER_READ, true);
+
+ time_out_count = wait_for_completion_timeout(
+ &ddc_ctrl->ddc_sw_done, HZ / 2);
+
+ if (!time_out_count) {
+ pr_debug("%s: timedout\n", ddc_data->what);
+
+ status = -ETIMEDOUT;
+ }
+
+ hdmi_ddc_clear_status(ddc_ctrl);
+ } while (status && ddc_data->retry--);
+
+ if (status)
+ goto error;
+
+ /* Write data to DDC buffer */
+ DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+ BIT(0) | (5 << 16) | BIT(31));
+
+ /* Discard first byte */
+ DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+
+ for (ndx = 0; ndx < ddc_data->data_len; ++ndx) {
+ reg_val = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+ ddc_data->data_buf[ndx] = (u8) ((reg_val & 0x0000FF00) >> 8);
+ }
+
+ pr_debug("%s: success\n", ddc_data->what);
+
+error:
+ return status;
+} /* hdmi_ddc_read_seg */
+
+int hdmi_ddc_write(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+ int status;
+ u32 time_out_count;
+ struct hdmi_tx_ddc_data *ddc_data;
+ u32 wait_time;
+ int busy_wait_us = 0;
+
+ if (!ddc_ctrl || !ddc_ctrl->io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ ddc_data = &ddc_ctrl->ddc_data;
+
+ if (!ddc_data->data_buf) {
+ status = -EINVAL;
+ pr_err("%s: invalid buf\n", ddc_data->what);
+ goto error;
+ }
+
+ if (ddc_data->retry < 0) {
+ pr_err("invalid no. of retries %d\n", ddc_data->retry);
+ status = -EINVAL;
+ goto error;
+ }
+
+ do {
+ status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+ if (status)
+ continue;
+
+ if (ddc_data->hard_timeout) {
+ pr_debug("using hard_timeout %dms\n",
+ ddc_data->hard_timeout);
+
+ busy_wait_us = ddc_data->hard_timeout * HDMI_MS_TO_US;
+ atomic_set(&ddc_ctrl->write_busy_wait_done, 0);
+ } else {
+ reinit_completion(&ddc_ctrl->ddc_sw_done);
+ wait_time = HZ / 2;
+ }
+
+ hdmi_ddc_trigger(ddc_ctrl, TRIGGER_WRITE, false);
+
+ if (ddc_data->hard_timeout) {
+ while (busy_wait_us > 0 &&
+ !atomic_read(&ddc_ctrl->write_busy_wait_done)) {
+ udelay(HDMI_BUSY_WAIT_DELAY_US);
+ busy_wait_us -= HDMI_BUSY_WAIT_DELAY_US;
+ };
+
+ if (busy_wait_us < 0)
+ busy_wait_us = 0;
+
+ time_out_count = busy_wait_us / HDMI_MS_TO_US;
+
+ ddc_data->timeout_left = time_out_count;
+ } else {
+ time_out_count = wait_for_completion_timeout(
+ &ddc_ctrl->ddc_sw_done, wait_time);
+
+ ddc_data->timeout_left =
+ jiffies_to_msecs(time_out_count);
+ }
+
+ pr_debug("DDC write done at %dms\n", jiffies_to_msecs(jiffies));
+
+ if (!time_out_count) {
+ pr_debug("%s timout\n", ddc_data->what);
+
+ status = -ETIMEDOUT;
+ }
+
+ hdmi_ddc_clear_status(ddc_ctrl);
+ } while (status && ddc_data->retry--);
+
+ if (status)
+ goto error;
+
+ pr_debug("%s: success\n", ddc_data->what);
+error:
+ return status;
+} /* hdmi_ddc_write */
+
+
+int hdmi_ddc_abort_transaction(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+ int status;
+ struct hdmi_tx_ddc_data *ddc_data;
+
+ if (!ddc_ctrl || !ddc_ctrl->io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ ddc_data = &ddc_ctrl->ddc_data;
+
+ status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+ if (status)
+ goto error;
+
+ DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_ARBITRATION, BIT(12)|BIT(8));
+
+error:
+ return status;
+
+}
+
+int hdmi_scdc_read(struct hdmi_tx_ddc_ctrl *ctrl, u32 data_type, u32 *val)
+{
+ struct hdmi_tx_ddc_data data = {0};
+ int rc = 0;
+ u8 data_buf[2] = {0};
+
+ if (!ctrl || !ctrl->io || !val) {
+ pr_err("Bad Parameters\n");
+ return -EINVAL;
+ }
+
+ if (data_type >= HDMI_TX_SCDC_MAX) {
+ pr_err("Unsupported data type\n");
+ return -EINVAL;
+ }
+
+ data.what = hdmi_scdc_reg2string(data_type);
+ data.dev_addr = 0xA8;
+ data.retry = 1;
+ data.data_buf = data_buf;
+
+ switch (data_type) {
+ case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+ data.data_len = 1;
+ data.request_len = 1;
+ data.offset = HDMI_SCDC_SCRAMBLER_STATUS;
+ break;
+ case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+ case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+ data.data_len = 1;
+ data.request_len = 1;
+ data.offset = HDMI_SCDC_TMDS_CONFIG;
+ break;
+ case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+ case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+ case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+ case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+ data.data_len = 1;
+ data.request_len = 1;
+ data.offset = HDMI_SCDC_STATUS_FLAGS_0;
+ break;
+ case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+ data.data_len = 2;
+ data.request_len = 2;
+ data.offset = HDMI_SCDC_ERR_DET_0_L;
+ break;
+ case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+ data.data_len = 2;
+ data.request_len = 2;
+ data.offset = HDMI_SCDC_ERR_DET_1_L;
+ break;
+ case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+ data.data_len = 2;
+ data.request_len = 2;
+ data.offset = HDMI_SCDC_ERR_DET_2_L;
+ break;
+ case HDMI_TX_SCDC_READ_ENABLE:
+ data.data_len = 1;
+ data.request_len = 1;
+ data.offset = HDMI_SCDC_CONFIG_0;
+ break;
+ default:
+ break;
+ }
+
+ ctrl->ddc_data = data;
+
+ rc = hdmi_ddc_read(ctrl);
+ if (rc) {
+ pr_err("DDC Read failed for %s\n", data.what);
+ return rc;
+ }
+
+ switch (data_type) {
+ case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+ *val = (data_buf[0] & BIT(1)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+ *val = (data_buf[0] & BIT(1)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+ *val = (data_buf[0] & BIT(2)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+ *val = (data_buf[0] & BIT(3)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+ case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+ case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+ if (data_buf[1] & BIT(7))
+ *val = (data_buf[0] | ((data_buf[1] & 0x7F) << 8));
+ else
+ *val = 0;
+ break;
+ case HDMI_TX_SCDC_READ_ENABLE:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int hdmi_scdc_write(struct hdmi_tx_ddc_ctrl *ctrl, u32 data_type, u32 val)
+{
+ struct hdmi_tx_ddc_data data = {0};
+ struct hdmi_tx_ddc_data rdata = {0};
+ int rc = 0;
+ u8 data_buf[2] = {0};
+ u8 read_val = 0;
+
+ if (!ctrl || !ctrl->io) {
+ pr_err("Bad Parameters\n");
+ return -EINVAL;
+ }
+
+ if (data_type >= HDMI_TX_SCDC_MAX) {
+ pr_err("Unsupported data type\n");
+ return -EINVAL;
+ }
+
+ data.what = hdmi_scdc_reg2string(data_type);
+ data.dev_addr = 0xA8;
+ data.retry = 1;
+ data.data_buf = data_buf;
+
+ switch (data_type) {
+ case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+ case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+ rdata.what = "TMDS CONFIG";
+ rdata.dev_addr = 0xA8;
+ rdata.retry = 2;
+ rdata.data_buf = &read_val;
+ rdata.data_len = 1;
+ rdata.offset = HDMI_SCDC_TMDS_CONFIG;
+ rdata.request_len = 1;
+ ctrl->ddc_data = rdata;
+ rc = hdmi_ddc_read(ctrl);
+ if (rc) {
+ pr_err("scdc read failed\n");
+ return rc;
+ }
+ if (data_type == HDMI_TX_SCDC_SCRAMBLING_ENABLE) {
+ data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(0))) |
+ ((u8)(val & BIT(0))));
+ } else {
+ data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(1))) |
+ (((u8)(val & BIT(0))) << 1));
+ }
+ data.data_len = 1;
+ data.request_len = 1;
+ data.offset = HDMI_SCDC_TMDS_CONFIG;
+ break;
+ case HDMI_TX_SCDC_READ_ENABLE:
+ data.data_len = 1;
+ data.request_len = 1;
+ data.offset = HDMI_SCDC_CONFIG_0;
+ data_buf[0] = (u8)(val & 0x1);
+ break;
+ default:
+ pr_err("Cannot write to read only reg (%d)\n",
+ data_type);
+ return -EINVAL;
+ }
+
+ ctrl->ddc_data = data;
+
+ rc = hdmi_ddc_write(ctrl);
+ if (rc) {
+ pr_err("DDC Read failed for %s\n", data.what);
+ return rc;
+ }
+
+ return 0;
+}
+
+int hdmi_setup_ddc_timers(struct hdmi_tx_ddc_ctrl *ctrl,
+ u32 type, u32 to_in_num_lines)
+{
+ if (!ctrl) {
+ pr_err("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ if (type >= HDMI_TX_DDC_TIMER_MAX) {
+ pr_err("Invalid timer type %d\n", type);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS:
+ hdmi_scrambler_status_timer_setup(ctrl, to_in_num_lines);
+ break;
+ default:
+ pr_err("%d type not supported\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hdmi_hdcp2p2_ddc_reset(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+ u32 reg_val;
+
+ if (!ctrl) {
+ pr_err("Invalid parameters\n");
+ return;
+ }
+
+ /*
+ * Clear acks for DDC_REQ, DDC_DONE, DDC_FAILED, RXSTATUS_READY,
+ * RXSTATUS_MSG_SIZE
+ */
+ reg_val = BIT(30) | BIT(17) | BIT(13) | BIT(9) | BIT(5) | BIT(1);
+ DSS_REG_W(ctrl->io, HDMI_DDC_INT_CTRL0, reg_val);
+
+ /* Reset DDC timers */
+ reg_val = BIT(0) | DSS_REG_R(ctrl->io, HDMI_HDCP2P2_DDC_CTRL);
+ DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+ reg_val = DSS_REG_R(ctrl->io, HDMI_HDCP2P2_DDC_CTRL);
+ reg_val &= ~BIT(0);
+ DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+}
+
+void hdmi_hdcp2p2_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+ u32 reg_val;
+
+ if (!ctrl) {
+ pr_err("Invalid parameters\n");
+ return;
+ }
+
+ hdmi_hdcp2p2_ddc_reset(ctrl);
+
+ /* Disable HW DDC access to RxStatus register */
+ reg_val = DSS_REG_R(ctrl->io, HDMI_HW_DDC_CTRL);
+ reg_val &= ~(BIT(1) | BIT(0));
+
+ DSS_REG_W(ctrl->io, HDMI_HW_DDC_CTRL, reg_val);
+}
+
+int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+ u32 reg_val;
+ u32 intr_en_mask;
+ u32 timeout;
+ u32 timer;
+ int rc = 0;
+ struct hdmi_tx_hdcp2p2_ddc_data *data;
+ int busy_wait_us;
+
+ if (!ctrl) {
+ pr_err("Invalid ctrl data\n");
+ return -EINVAL;
+ }
+
+ data = &ctrl->hdcp2p2_ddc_data;
+ if (!data) {
+ pr_err("Invalid ddc data\n");
+ return -EINVAL;
+ }
+
+ rc = hdmi_ddc_clear_irq(ctrl, "rxstatus");
+ if (rc)
+ return rc;
+
+ intr_en_mask = data->intr_mask;
+ intr_en_mask |= BIT(HDCP2P2_RXSTATUS_DDC_FAILED_INTR_MASK);
+
+ /* Disable short read for now, sinks don't support it */
+ reg_val = DSS_REG_R(ctrl->io, HDMI_HDCP2P2_DDC_CTRL);
+ reg_val |= BIT(4);
+ DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+
+ /*
+ * Setup the DDC timers for HDMI_HDCP2P2_DDC_TIMER_CTRL1 and
+ * HDMI_HDCP2P2_DDC_TIMER_CTRL2.
+ * Following are the timers:
+ * 1. DDC_REQUEST_TIMER: Timeout in hsyncs in which to wait for the
+ * HDCP 2.2 sink to respond to an RxStatus request
+ * 2. DDC_URGENT_TIMER: Time period in hsyncs to issue an urgent flag
+ * when an RxStatus DDC request is made but not accepted by I2C
+ * engine
+ * 3. DDC_TIMEOUT_TIMER: Timeout in hsyncs which starts counting when
+ * a request is made and stops when it is accepted by DDC arbiter
+ */
+ timeout = data->timeout_hsync;
+ timer = data->periodic_timer_hsync;
+ pr_debug("timeout: %d hsyncs, timer %d hsync\n", timeout, timer);
+
+ DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_TIMER_CTRL, timer);
+
+ /* Set both urgent and hw-timeout fields to the same value */
+ DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_TIMER_CTRL2,
+ (timeout << 16 | timeout));
+
+ /* enable interrupts */
+ reg_val = intr_en_mask;
+ /* Clear interrupt status bits */
+ reg_val |= intr_en_mask >> 1;
+
+ pr_debug("writing HDMI_DDC_INT_CTRL0 0x%x\n", reg_val);
+ DSS_REG_W(ctrl->io, HDMI_DDC_INT_CTRL0, reg_val);
+
+ reg_val = DSS_REG_R(ctrl->io, HDMI_DDC_INT_CTRL5);
+ /* clear and enable RxStatus read timeout */
+ reg_val |= BIT(2) | BIT(1);
+
+ DSS_REG_W(ctrl->io, HDMI_DDC_INT_CTRL5, reg_val);
+
+ /*
+ * Enable hardware DDC access to RxStatus register
+ *
+ * HDMI_HW_DDC_CTRL:Bits 1:0 (RXSTATUS_DDC_ENABLE) read like this:
+ *
+ * 0 = disable HW controlled DDC access to RxStatus
+ * 1 = automatic on when HDCP 2.2 is authenticated and loop based on
+ * request timer (i.e. the hardware will loop automatically)
+ * 2 = force on and loop based on request timer (hardware will loop)
+ * 3 = enable by sw trigger and loop until interrupt is generated for
+ * RxStatus.reauth_req, RxStatus.ready or RxStatus.message_Size.
+ *
+ * Depending on the value of ddc_data::poll_sink, we make the decision
+ * to use either SW_TRIGGER(3) (poll_sink = false) which means that the
+ * hardware will poll sink and generate interrupt when sink responds,
+ * or use AUTOMATIC_LOOP(1) (poll_sink = true) which will poll the sink
+ * based on request timer
+ */
+ reg_val = DSS_REG_R(ctrl->io, HDMI_HW_DDC_CTRL);
+ reg_val &= ~(BIT(1) | BIT(0));
+
+ busy_wait_us = data->timeout_ms * HDMI_MS_TO_US;
+ atomic_set(&ctrl->rxstatus_busy_wait_done, 0);
+
+ /* read method: HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER */
+ reg_val |= BIT(1) | BIT(0);
+ DSS_REG_W(ctrl->io, HDMI_HW_DDC_CTRL, reg_val);
+ DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_SW_TRIGGER, 1);
+
+ if (data->wait) {
+ while (busy_wait_us > 0 &&
+ !atomic_read(&ctrl->rxstatus_busy_wait_done)) {
+ udelay(HDMI_BUSY_WAIT_DELAY_US);
+ busy_wait_us -= HDMI_BUSY_WAIT_DELAY_US;
+ };
+
+ if (busy_wait_us < 0)
+ busy_wait_us = 0;
+
+ data->timeout_left = busy_wait_us / HDMI_MS_TO_US;
+
+ if (!data->timeout_left) {
+ pr_err("sw ddc rxstatus timeout\n");
+ rc = -ETIMEDOUT;
+ }
+
+ hdmi_hdcp2p2_ddc_disable(ctrl);
+ }
+
+ return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h
new file mode 100644
index 0000000..ecab9d5
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h
@@ -0,0 +1,514 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HDMI_UTIL_H__
+#define __HDMI_UTIL_H__
+#include <linux/mdss_io_util.h>
+#include "video/msm_hdmi_modes.h"
+
+/* HDMI_TX Registers */
+#define HDMI_CTRL (0x00000000)
+#define HDMI_TEST_PATTERN (0x00000010)
+#define HDMI_RANDOM_PATTERN (0x00000014)
+#define HDMI_PKT_BLK_CTRL (0x00000018)
+#define HDMI_STATUS (0x0000001C)
+#define HDMI_AUDIO_PKT_CTRL (0x00000020)
+#define HDMI_ACR_PKT_CTRL (0x00000024)
+#define HDMI_VBI_PKT_CTRL (0x00000028)
+#define HDMI_INFOFRAME_CTRL0 (0x0000002C)
+#define HDMI_INFOFRAME_CTRL1 (0x00000030)
+#define HDMI_GEN_PKT_CTRL (0x00000034)
+#define HDMI_ACP (0x0000003C)
+#define HDMI_GC (0x00000040)
+#define HDMI_AUDIO_PKT_CTRL2 (0x00000044)
+#define HDMI_ISRC1_0 (0x00000048)
+#define HDMI_ISRC1_1 (0x0000004C)
+#define HDMI_ISRC1_2 (0x00000050)
+#define HDMI_ISRC1_3 (0x00000054)
+#define HDMI_ISRC1_4 (0x00000058)
+#define HDMI_ISRC2_0 (0x0000005C)
+#define HDMI_ISRC2_1 (0x00000060)
+#define HDMI_ISRC2_2 (0x00000064)
+#define HDMI_ISRC2_3 (0x00000068)
+#define HDMI_AVI_INFO0 (0x0000006C)
+#define HDMI_AVI_INFO1 (0x00000070)
+#define HDMI_AVI_INFO2 (0x00000074)
+#define HDMI_AVI_INFO3 (0x00000078)
+#define HDMI_MPEG_INFO0 (0x0000007C)
+#define HDMI_MPEG_INFO1 (0x00000080)
+#define HDMI_GENERIC0_HDR (0x00000084)
+#define HDMI_GENERIC0_0 (0x00000088)
+#define HDMI_GENERIC0_1 (0x0000008C)
+#define HDMI_GENERIC0_2 (0x00000090)
+#define HDMI_GENERIC0_3 (0x00000094)
+#define HDMI_GENERIC0_4 (0x00000098)
+#define HDMI_GENERIC0_5 (0x0000009C)
+#define HDMI_GENERIC0_6 (0x000000A0)
+#define HDMI_GENERIC1_HDR (0x000000A4)
+#define HDMI_GENERIC1_0 (0x000000A8)
+#define HDMI_GENERIC1_1 (0x000000AC)
+#define HDMI_GENERIC1_2 (0x000000B0)
+#define HDMI_GENERIC1_3 (0x000000B4)
+#define HDMI_GENERIC1_4 (0x000000B8)
+#define HDMI_GENERIC1_5 (0x000000BC)
+#define HDMI_GENERIC1_6 (0x000000C0)
+#define HDMI_ACR_32_0 (0x000000C4)
+#define HDMI_ACR_32_1 (0x000000C8)
+#define HDMI_ACR_44_0 (0x000000CC)
+#define HDMI_ACR_44_1 (0x000000D0)
+#define HDMI_ACR_48_0 (0x000000D4)
+#define HDMI_ACR_48_1 (0x000000D8)
+#define HDMI_ACR_STATUS_0 (0x000000DC)
+#define HDMI_ACR_STATUS_1 (0x000000E0)
+#define HDMI_AUDIO_INFO0 (0x000000E4)
+#define HDMI_AUDIO_INFO1 (0x000000E8)
+#define HDMI_CS_60958_0 (0x000000EC)
+#define HDMI_CS_60958_1 (0x000000F0)
+#define HDMI_RAMP_CTRL0 (0x000000F8)
+#define HDMI_RAMP_CTRL1 (0x000000FC)
+#define HDMI_RAMP_CTRL2 (0x00000100)
+#define HDMI_RAMP_CTRL3 (0x00000104)
+#define HDMI_CS_60958_2 (0x00000108)
+#define HDMI_HDCP_CTRL2 (0x0000010C)
+#define HDMI_HDCP_CTRL (0x00000110)
+#define HDMI_HDCP_DEBUG_CTRL (0x00000114)
+#define HDMI_HDCP_INT_CTRL (0x00000118)
+#define HDMI_HDCP_LINK0_STATUS (0x0000011C)
+#define HDMI_HDCP_DDC_CTRL_0 (0x00000120)
+#define HDMI_HDCP_DDC_CTRL_1 (0x00000124)
+#define HDMI_HDCP_DDC_STATUS (0x00000128)
+#define HDMI_HDCP_ENTROPY_CTRL0 (0x0000012C)
+#define HDMI_HDCP_RESET (0x00000130)
+#define HDMI_HDCP_RCVPORT_DATA0 (0x00000134)
+#define HDMI_HDCP_RCVPORT_DATA1 (0x00000138)
+#define HDMI_HDCP_RCVPORT_DATA2_0 (0x0000013C)
+#define HDMI_HDCP_RCVPORT_DATA2_1 (0x00000140)
+#define HDMI_HDCP_RCVPORT_DATA3 (0x00000144)
+#define HDMI_HDCP_RCVPORT_DATA4 (0x00000148)
+#define HDMI_HDCP_RCVPORT_DATA5 (0x0000014C)
+#define HDMI_HDCP_RCVPORT_DATA6 (0x00000150)
+#define HDMI_HDCP_RCVPORT_DATA7 (0x00000154)
+#define HDMI_HDCP_RCVPORT_DATA8 (0x00000158)
+#define HDMI_HDCP_RCVPORT_DATA9 (0x0000015C)
+#define HDMI_HDCP_RCVPORT_DATA10 (0x00000160)
+#define HDMI_HDCP_RCVPORT_DATA11 (0x00000164)
+#define HDMI_HDCP_RCVPORT_DATA12 (0x00000168)
+#define HDMI_VENSPEC_INFO0 (0x0000016C)
+#define HDMI_VENSPEC_INFO1 (0x00000170)
+#define HDMI_VENSPEC_INFO2 (0x00000174)
+#define HDMI_VENSPEC_INFO3 (0x00000178)
+#define HDMI_VENSPEC_INFO4 (0x0000017C)
+#define HDMI_VENSPEC_INFO5 (0x00000180)
+#define HDMI_VENSPEC_INFO6 (0x00000184)
+#define HDMI_HDCP_DEBUG (0x00000194)
+#define HDMI_TMDS_CTRL_CHAR (0x0000019C)
+#define HDMI_TMDS_CTRL_SEL (0x000001A4)
+#define HDMI_TMDS_SYNCCHAR01 (0x000001A8)
+#define HDMI_TMDS_SYNCCHAR23 (0x000001AC)
+#define HDMI_TMDS_DEBUG (0x000001B4)
+#define HDMI_TMDS_CTL_BITS (0x000001B8)
+#define HDMI_TMDS_DCBAL_CTRL (0x000001BC)
+#define HDMI_TMDS_DCBAL_CHAR (0x000001C0)
+#define HDMI_TMDS_CTL01_GEN (0x000001C8)
+#define HDMI_TMDS_CTL23_GEN (0x000001CC)
+#define HDMI_AUDIO_CFG (0x000001D0)
+#define HDMI_DEBUG (0x00000204)
+#define HDMI_USEC_REFTIMER (0x00000208)
+#define HDMI_DDC_CTRL (0x0000020C)
+#define HDMI_DDC_ARBITRATION (0x00000210)
+#define HDMI_DDC_INT_CTRL (0x00000214)
+#define HDMI_DDC_SW_STATUS (0x00000218)
+#define HDMI_DDC_HW_STATUS (0x0000021C)
+#define HDMI_DDC_SPEED (0x00000220)
+#define HDMI_DDC_SETUP (0x00000224)
+#define HDMI_DDC_TRANS0 (0x00000228)
+#define HDMI_DDC_TRANS1 (0x0000022C)
+#define HDMI_DDC_TRANS2 (0x00000230)
+#define HDMI_DDC_TRANS3 (0x00000234)
+#define HDMI_DDC_DATA (0x00000238)
+#define HDMI_HDCP_SHA_CTRL (0x0000023C)
+#define HDMI_HDCP_SHA_STATUS (0x00000240)
+#define HDMI_HDCP_SHA_DATA (0x00000244)
+#define HDMI_HDCP_SHA_DBG_M0_0 (0x00000248)
+#define HDMI_HDCP_SHA_DBG_M0_1 (0x0000024C)
+#define HDMI_HPD_INT_STATUS (0x00000250)
+#define HDMI_HPD_INT_CTRL (0x00000254)
+#define HDMI_HPD_CTRL (0x00000258)
+#define HDMI_HDCP_ENTROPY_CTRL1 (0x0000025C)
+#define HDMI_HDCP_SW_UPPER_AN (0x00000260)
+#define HDMI_HDCP_SW_LOWER_AN (0x00000264)
+#define HDMI_CRC_CTRL (0x00000268)
+#define HDMI_VID_CRC (0x0000026C)
+#define HDMI_AUD_CRC (0x00000270)
+#define HDMI_VBI_CRC (0x00000274)
+#define HDMI_DDC_REF (0x0000027C)
+#define HDMI_HDCP_SW_UPPER_AKSV (0x00000284)
+#define HDMI_HDCP_SW_LOWER_AKSV (0x00000288)
+#define HDMI_CEC_CTRL (0x0000028C)
+#define HDMI_CEC_WR_DATA (0x00000290)
+#define HDMI_CEC_RETRANSMIT (0x00000294)
+#define HDMI_CEC_STATUS (0x00000298)
+#define HDMI_CEC_INT (0x0000029C)
+#define HDMI_CEC_ADDR (0x000002A0)
+#define HDMI_CEC_TIME (0x000002A4)
+#define HDMI_CEC_REFTIMER (0x000002A8)
+#define HDMI_CEC_RD_DATA (0x000002AC)
+#define HDMI_CEC_RD_FILTER (0x000002B0)
+#define HDMI_ACTIVE_H (0x000002B4)
+#define HDMI_ACTIVE_V (0x000002B8)
+#define HDMI_ACTIVE_V_F2 (0x000002BC)
+#define HDMI_TOTAL (0x000002C0)
+#define HDMI_V_TOTAL_F2 (0x000002C4)
+#define HDMI_FRAME_CTRL (0x000002C8)
+#define HDMI_AUD_INT (0x000002CC)
+#define HDMI_DEBUG_BUS_CTRL (0x000002D0)
+#define HDMI_PHY_CTRL (0x000002D4)
+#define HDMI_CEC_WR_RANGE (0x000002DC)
+#define HDMI_CEC_RD_RANGE (0x000002E0)
+#define HDMI_VERSION (0x000002E4)
+#define HDMI_BIST_ENABLE (0x000002F4)
+#define HDMI_TIMING_ENGINE_EN (0x000002F8)
+#define HDMI_INTF_CONFIG (0x000002FC)
+#define HDMI_HSYNC_CTL (0x00000300)
+#define HDMI_VSYNC_PERIOD_F0 (0x00000304)
+#define HDMI_VSYNC_PERIOD_F1 (0x00000308)
+#define HDMI_VSYNC_PULSE_WIDTH_F0 (0x0000030C)
+#define HDMI_VSYNC_PULSE_WIDTH_F1 (0x00000310)
+#define HDMI_DISPLAY_V_START_F0 (0x00000314)
+#define HDMI_DISPLAY_V_START_F1 (0x00000318)
+#define HDMI_DISPLAY_V_END_F0 (0x0000031C)
+#define HDMI_DISPLAY_V_END_F1 (0x00000320)
+#define HDMI_ACTIVE_V_START_F0 (0x00000324)
+#define HDMI_ACTIVE_V_START_F1 (0x00000328)
+#define HDMI_ACTIVE_V_END_F0 (0x0000032C)
+#define HDMI_ACTIVE_V_END_F1 (0x00000330)
+#define HDMI_DISPLAY_HCTL (0x00000334)
+#define HDMI_ACTIVE_HCTL (0x00000338)
+#define HDMI_HSYNC_SKEW (0x0000033C)
+#define HDMI_POLARITY_CTL (0x00000340)
+#define HDMI_TPG_MAIN_CONTROL (0x00000344)
+#define HDMI_TPG_VIDEO_CONFIG (0x00000348)
+#define HDMI_TPG_COMPONENT_LIMITS (0x0000034C)
+#define HDMI_TPG_RECTANGLE (0x00000350)
+#define HDMI_TPG_INITIAL_VALUE (0x00000354)
+#define HDMI_TPG_BLK_WHT_PATTERN_FRAMES (0x00000358)
+#define HDMI_TPG_RGB_MAPPING (0x0000035C)
+#define HDMI_CEC_COMPL_CTL (0x00000360)
+#define HDMI_CEC_RD_START_RANGE (0x00000364)
+#define HDMI_CEC_RD_TOTAL_RANGE (0x00000368)
+#define HDMI_CEC_RD_ERR_RESP_LO (0x0000036C)
+#define HDMI_CEC_WR_CHECK_CONFIG (0x00000370)
+#define HDMI_INTERNAL_TIMING_MODE (0x00000374)
+#define HDMI_CTRL_SW_RESET (0x00000378)
+#define HDMI_CTRL_AUDIO_RESET (0x0000037C)
+#define HDMI_SCRATCH (0x00000380)
+#define HDMI_CLK_CTRL (0x00000384)
+#define HDMI_CLK_ACTIVE (0x00000388)
+#define HDMI_VBI_CFG (0x0000038C)
+#define HDMI_DDC_INT_CTRL0 (0x00000430)
+#define HDMI_DDC_INT_CTRL1 (0x00000434)
+#define HDMI_DDC_INT_CTRL2 (0x00000438)
+#define HDMI_DDC_INT_CTRL3 (0x0000043C)
+#define HDMI_DDC_INT_CTRL4 (0x00000440)
+#define HDMI_DDC_INT_CTRL5 (0x00000444)
+#define HDMI_HDCP2P2_DDC_CTRL (0x0000044C)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL (0x00000450)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL2 (0x00000454)
+#define HDMI_HDCP2P2_DDC_STATUS (0x00000458)
+#define HDMI_SCRAMBLER_STATUS_DDC_CTRL (0x00000464)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL (0x00000468)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2 (0x0000046C)
+#define HDMI_SCRAMBLER_STATUS_DDC_STATUS (0x00000470)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS (0x00000474)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2 (0x00000478)
+#define HDMI_HW_DDC_CTRL (0x000004CC)
+#define HDMI_HDCP2P2_DDC_SW_TRIGGER (0x000004D0)
+#define HDMI_HDCP_STATUS (0x00000500)
+#define HDMI_HDCP_INT_CTRL2 (0x00000504)
+
+/* HDMI PHY Registers */
+#define HDMI_PHY_ANA_CFG0 (0x00000000)
+#define HDMI_PHY_ANA_CFG1 (0x00000004)
+#define HDMI_PHY_PD_CTRL0 (0x00000010)
+#define HDMI_PHY_PD_CTRL1 (0x00000014)
+#define HDMI_PHY_BIST_CFG0 (0x00000034)
+#define HDMI_PHY_BIST_PATN0 (0x0000003C)
+#define HDMI_PHY_BIST_PATN1 (0x00000040)
+#define HDMI_PHY_BIST_PATN2 (0x00000044)
+#define HDMI_PHY_BIST_PATN3 (0x00000048)
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB (0x000000F8)
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB (0x000000FC)
+#define QFPROM_RAW_VERSION_4 (0x000000A8)
+#define SEC_CTRL_HW_VERSION (0x00006000)
+#define HDCP_KSV_LSB (0x000060D8)
+#define HDCP_KSV_MSB (0x000060DC)
+#define HDCP_KSV_VERSION_4_OFFSET (0x00000014)
+
+/* SEC_CTRL version that supports HDCP SEL */
+#define HDCP_SEL_MIN_SEC_VERSION (0x50010000)
+
+#define TOP_AND_BOTTOM (1 << HDMI_S3D_TOP_AND_BOTTOM)
+#define FRAME_PACKING (1 << HDMI_S3D_FRAME_PACKING)
+#define SIDE_BY_SIDE_HALF (1 << HDMI_S3D_SIDE_BY_SIDE)
+
+#define LPASS_LPAIF_RDDMA_CTL0 (0xFE152000)
+#define LPASS_LPAIF_RDDMA_PER_CNT0 (0x00000014)
+
+/* TX major version that supports scrambling */
+#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04
+
+/* TX major versions */
+#define HDMI_TX_VERSION_4 4
+#define HDMI_TX_VERSION_3 3
+
+/* HDMI SCDC register offsets */
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_UPDATE_1 0x11
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS_0 0x40
+#define HDMI_SCDC_STATUS_FLAGS_1 0x41
+#define HDMI_SCDC_ERR_DET_0_L 0x50
+#define HDMI_SCDC_ERR_DET_0_H 0x51
+#define HDMI_SCDC_ERR_DET_1_L 0x52
+#define HDMI_SCDC_ERR_DET_1_H 0x53
+#define HDMI_SCDC_ERR_DET_2_L 0x54
+#define HDMI_SCDC_ERR_DET_2_H 0x55
+#define HDMI_SCDC_ERR_DET_CHECKSUM 0x56
+
+/* HDCP secure registers directly accessible to HLOS since HDMI controller
+ * version major version 4.0
+ */
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x00000004)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x00000008)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x0000000C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x00000010)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x00000014)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x00000018)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x0000001C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x00000020)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL (0x00000024)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA (0x00000028)
+
+/*
+ * Offsets in HDMI_DDC_INT_CTRL0 register
+ *
+ * The HDMI_DDC_INT_CTRL0 register is intended for HDCP 2.2 RxStatus
+ * register manipulation. It reads like this:
+ *
+ * Bit 31: RXSTATUS_MESSAGE_SIZE_MASK (1 = generate interrupt when size > 0)
+ * Bit 30: RXSTATUS_MESSAGE_SIZE_ACK (1 = Acknowledge message size intr)
+ * Bits 29-20: RXSTATUS_MESSAGE_SIZE (Actual size of message available)
+ * Bits 19-18: RXSTATUS_READY_MASK (1 = generate interrupt when ready = 1
+ * 2 = generate interrupt when ready = 0)
+ * Bit 17: RXSTATUS_READY_ACK (1 = Acknowledge ready bit interrupt)
+ * Bit 16: RXSTATUS_READY (1 = Rxstatus ready bit read is 1)
+ * Bit 15: RXSTATUS_READY_NOT (1 = Rxstatus ready bit read is 0)
+ * Bit 14: RXSTATUS_REAUTH_REQ_MASK (1 = generate interrupt when reauth is
+ * requested by sink)
+ * Bit 13: RXSTATUS_REAUTH_REQ_ACK (1 = Acknowledge Reauth req interrupt)
+ * Bit 12: RXSTATUS_REAUTH_REQ (1 = Rxstatus reauth req bit read is 1)
+ * Bit 10: RXSTATUS_DDC_FAILED_MASK (1 = generate interrupt when DDC
+ * tranasaction fails)
+ * Bit 9: RXSTATUS_DDC_FAILED_ACK (1 = Acknowledge ddc failure interrupt)
+ * Bit 8: RXSTATUS_DDC_FAILED (1 = DDC transaction failed)
+ * Bit 6: RXSTATUS_DDC_DONE_MASK (1 = generate interrupt when DDC
+ * transaction completes)
+ * Bit 5: RXSTATUS_DDC_DONE_ACK (1 = Acknowledge ddc done interrupt)
+ * Bit 4: RXSTATUS_DDC_DONE (1 = DDC transaction is done)
+ * Bit 2: RXSTATUS_DDC_REQ_MASK (1 = generate interrupt when DDC Read
+ * request for RXstatus is made)
+ * Bit 1: RXSTATUS_DDC_REQ_ACK (1 = Acknowledge Rxstatus read interrupt)
+ * Bit 0: RXSTATUS_DDC_REQ (1 = RXStatus DDC read request is made)
+ *
+ */
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_SHIFT 20
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_MASK 0x3ff00000
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_ACK_SHIFT 30
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_INTR_SHIFT 31
+
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_SHIFT 12
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_MASK 1
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_ACK_SHIFT 13
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_INTR_SHIFT 14
+
+#define HDCP2P2_RXSTATUS_READY_SHIFT 16
+#define HDCP2P2_RXSTATUS_READY_MASK 1
+#define HDCP2P2_RXSTATUS_READY_ACK_SHIFT 17
+#define HDCP2P2_RXSTATUS_READY_INTR_SHIFT 18
+#define HDCP2P2_RXSTATUS_READY_INTR_MASK 18
+
+#define HDCP2P2_RXSTATUS_DDC_FAILED_SHIFT 8
+#define HDCP2P2_RXSTATUS_DDC_FAILED_ACKSHIFT 9
+#define HDCP2P2_RXSTATUS_DDC_FAILED_INTR_MASK 10
+#define HDCP2P2_RXSTATUS_DDC_DONE 6
+
+/*
+ * Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be
+ * read by the hardware
+ */
+#define HDCP2P2_RXSTATUS_HW_DDC_DISABLE 0
+#define HDCP2P2_RXSTATUS_HW_DDC_AUTOMATIC_LOOP 1
+#define HDCP2P2_RXSTATUS_HW_DDC_FORCE_LOOP 2
+#define HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER 3
+
+/* default hsyncs for 4k@60 for 200ms */
+#define HDMI_DEFAULT_TIMEOUT_HSYNC 28571
+
+enum hdmi_tx_feature_type {
+ HDMI_TX_FEAT_EDID = BIT(0),
+ HDMI_TX_FEAT_HDCP = BIT(1),
+ HDMI_TX_FEAT_HDCP2P2 = BIT(2),
+ HDMI_TX_FEAT_CEC_HW = BIT(3),
+ HDMI_TX_FEAT_CEC_ABST = BIT(4),
+ HDMI_TX_FEAT_PANEL = BIT(5),
+ HDMI_TX_FEAT_MAX = HDMI_TX_FEAT_EDID | HDMI_TX_FEAT_HDCP |
+ HDMI_TX_FEAT_HDCP2P2 | HDMI_TX_FEAT_CEC_HW |
+ HDMI_TX_FEAT_CEC_ABST | HDMI_TX_FEAT_PANEL
+};
+
+enum hdmi_tx_scdc_access_type {
+ HDMI_TX_SCDC_SCRAMBLING_STATUS,
+ HDMI_TX_SCDC_SCRAMBLING_ENABLE,
+ HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+ HDMI_TX_SCDC_CLOCK_DET_STATUS,
+ HDMI_TX_SCDC_CH0_LOCK_STATUS,
+ HDMI_TX_SCDC_CH1_LOCK_STATUS,
+ HDMI_TX_SCDC_CH2_LOCK_STATUS,
+ HDMI_TX_SCDC_CH0_ERROR_COUNT,
+ HDMI_TX_SCDC_CH1_ERROR_COUNT,
+ HDMI_TX_SCDC_CH2_ERROR_COUNT,
+ HDMI_TX_SCDC_READ_ENABLE,
+ HDMI_TX_SCDC_MAX,
+};
+
+enum hdmi_tx_ddc_timer_type {
+ HDMI_TX_DDC_TIMER_HDCP2P2_RD_MSG,
+ HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS,
+ HDMI_TX_DDC_TIMER_UPDATE_FLAGS,
+ HDMI_TX_DDC_TIMER_STATUS_FLAGS,
+ HDMI_TX_DDC_TIMER_CED,
+ HDMI_TX_DDC_TIMER_MAX,
+};
+
+struct hdmi_tx_ddc_data {
+ char *what;
+ u8 *data_buf;
+ u32 data_len;
+ u32 dev_addr;
+ u32 offset;
+ u32 request_len;
+ u32 retry_align;
+ u32 hard_timeout;
+ u32 timeout_left;
+ int retry;
+};
+
+enum hdmi_tx_hdcp2p2_rxstatus_intr_mask {
+ RXSTATUS_MESSAGE_SIZE = BIT(31),
+ RXSTATUS_READY = BIT(18),
+ RXSTATUS_REAUTH_REQ = BIT(14),
+};
+
+struct hdmi_tx_hdcp2p2_ddc_data {
+ enum hdmi_tx_hdcp2p2_rxstatus_intr_mask intr_mask;
+ u32 timeout_ms;
+ u32 timeout_hsync;
+ u32 periodic_timer_hsync;
+ u32 timeout_left;
+ u32 read_method;
+ u32 message_size;
+ bool encryption_ready;
+ bool ready;
+ bool reauth_req;
+ bool ddc_max_retries_fail;
+ bool ddc_done;
+ bool ddc_read_req;
+ bool ddc_timeout;
+ bool wait;
+ int irq_wait_count;
+ void (*link_cb)(void *data);
+ void *link_data;
+};
+
+struct hdmi_tx_ddc_ctrl {
+ atomic_t write_busy_wait_done;
+ atomic_t read_busy_wait_done;
+ atomic_t rxstatus_busy_wait_done;
+ struct mdss_io_data *io;
+ struct completion ddc_sw_done;
+ struct hdmi_tx_ddc_data ddc_data;
+ struct hdmi_tx_hdcp2p2_ddc_data hdcp2p2_ddc_data;
+};
+
+
+struct hdmi_util_ds_data {
+ bool ds_registered;
+ u32 ds_max_clk;
+};
+
+static inline int hdmi_tx_get_v_total(const struct msm_hdmi_mode_timing_info *t)
+{
+ if (t) {
+ return t->active_v + t->front_porch_v + t->pulse_width_v +
+ t->back_porch_v;
+ }
+
+ return 0;
+}
+
+static inline int hdmi_tx_get_h_total(const struct msm_hdmi_mode_timing_info *t)
+{
+ if (t) {
+ return t->active_h + t->front_porch_h + t->pulse_width_h +
+ t->back_porch_h;
+ }
+
+ return 0;
+}
+
+/* video timing related utility routines */
+int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
+ struct hdmi_util_ds_data *ds_data);
+int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
+ struct hdmi_util_ds_data *ds_data, u32 mode);
+ssize_t hdmi_get_video_3d_fmt_2string(u32 format, char *buf, u32 size);
+const char *msm_hdmi_mode_2string(u32 mode);
+int hdmi_set_resv_timing_info(struct msm_hdmi_mode_timing_info *mode);
+bool hdmi_is_valid_resv_timing(int mode);
+void hdmi_reset_resv_timing_info(void);
+
+/* todo: Fix this. Right now this is defined in mdss_hdmi_tx.c */
+void *hdmi_get_featuredata_from_sysfs_dev(struct device *device, u32 type);
+
+/* DDC */
+void hdmi_ddc_config(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_ddc_isr(struct hdmi_tx_ddc_ctrl *ctrl, u32 version);
+int hdmi_ddc_write(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_ddc_read_seg(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_ddc_read(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_ddc_abort_transaction(struct hdmi_tx_ddc_ctrl *ctrl);
+
+int hdmi_scdc_read(struct hdmi_tx_ddc_ctrl *ctrl, u32 data_type, u32 *val);
+int hdmi_scdc_write(struct hdmi_tx_ddc_ctrl *ctrl, u32 data_type, u32 val);
+int hdmi_setup_ddc_timers(struct hdmi_tx_ddc_ctrl *ctrl,
+ u32 type, u32 to_in_num_lines);
+void hdmi_scrambler_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl);
+void hdmi_hdcp2p2_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_utils_get_timeout_in_hysnc(struct msm_hdmi_mode_timing_info *timing,
+ u32 timeout_ms);
+
+#endif /* __HDMI_UTIL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_io_util.c b/drivers/video/fbdev/msm/mdss_io_util.c
new file mode 100644
index 0000000..3117793
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_io_util.c
@@ -0,0 +1,552 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mdss_io_util.h>
+
+#define MAX_I2C_CMDS 16
+void mdss_reg_w(struct mdss_io_data *io, u32 offset, u32 value, u32 debug)
+{
+ u32 in_val;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ writel_relaxed(value, io->base + offset);
+ if (debug) {
+ in_val = readl_relaxed(io->base + offset);
+ DEV_DBG("[%08x] => %08x [%08x]\n",
+ (u32)(unsigned long)(io->base + offset),
+
+ value, in_val);
+ }
+} /* mdss_reg_w */
+EXPORT_SYMBOL(mdss_reg_w);
+
+u32 mdss_reg_r(struct mdss_io_data *io, u32 offset, u32 debug)
+{
+ u32 value;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ value = readl_relaxed(io->base + offset);
+ if (debug)
+ DEV_DBG("[%08x] <= %08x\n",
+ (u32)(unsigned long)(io->base + offset), value);
+
+ return value;
+} /* mdss_reg_r */
+EXPORT_SYMBOL(mdss_reg_r);
+
+void mdss_reg_dump(void __iomem *base, u32 length, const char *prefix,
+ u32 debug)
+{
+ if (debug)
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+ (void *)base, length, false);
+} /* mdss_reg_dump */
+EXPORT_SYMBOL(mdss_reg_dump);
+
+static struct resource *msm_mdss_get_res_byname(struct platform_device *pdev,
+ unsigned int type, const char *name)
+{
+ struct resource *res = NULL;
+
+ res = platform_get_resource_byname(pdev, type, name);
+ if (!res)
+ DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+ return res;
+} /* msm_mdss_get_res_byname */
+EXPORT_SYMBOL(msm_mdss_get_res_byname);
+
+int msm_mdss_ioremap_byname(struct platform_device *pdev,
+ struct mdss_io_data *io_data, const char *name)
+{
+ struct resource *res = NULL;
+
+ if (!pdev || !io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ res = msm_mdss_get_res_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DEV_ERR("%pS->%s: '%s' msm_mdss_get_res_byname failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -ENODEV;
+ }
+
+ io_data->len = (u32)resource_size(res);
+ io_data->base = ioremap(res->start, io_data->len);
+ if (!io_data->base) {
+ DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -EIO;
+ }
+
+ return 0;
+} /* msm_mdss_ioremap_byname */
+EXPORT_SYMBOL(msm_mdss_ioremap_byname);
+
+void msm_mdss_iounmap(struct mdss_io_data *io_data)
+{
+ if (!io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (io_data->base) {
+ iounmap(io_data->base);
+ io_data->base = NULL;
+ }
+ io_data->len = 0;
+} /* msm_mdss_iounmap */
+EXPORT_SYMBOL(msm_mdss_iounmap);
+
+int msm_mdss_config_vreg(struct device *dev, struct mdss_vreg *in_vreg,
+ int num_vreg, int config)
+{
+ int i = 0, rc = 0;
+ struct mdss_vreg *curr_vreg = NULL;
+ enum mdss_vreg_type type;
+
+ if (!in_vreg || !num_vreg)
+ return rc;
+
+ if (config) {
+ for (i = 0; i < num_vreg; i++) {
+ curr_vreg = &in_vreg[i];
+ curr_vreg->vreg = regulator_get(dev,
+ curr_vreg->vreg_name);
+ rc = PTR_RET(curr_vreg->vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ curr_vreg->vreg_name, rc);
+ curr_vreg->vreg = NULL;
+ goto vreg_get_fail;
+ }
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ rc = regulator_set_voltage(
+ curr_vreg->vreg,
+ curr_vreg->min_voltage,
+ curr_vreg->max_voltage);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set vltg fail\n",
+ __builtin_return_address(0),
+ __func__,
+ curr_vreg->vreg_name);
+ goto vreg_set_voltage_fail;
+ }
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ if (curr_vreg->vreg) {
+ type = (regulator_count_voltages(
+ curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ regulator_set_voltage(curr_vreg->vreg,
+ 0, curr_vreg->max_voltage);
+ }
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+ }
+ }
+ }
+ return 0;
+
+vreg_unconfig:
+if (type == DSS_REG_LDO)
+ regulator_set_load(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+ for (i--; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ goto vreg_unconfig;
+ }
+ return rc;
+} /* msm_mdss_config_vreg */
+EXPORT_SYMBOL(msm_mdss_config_vreg);
+
+int msm_mdss_config_vreg_opt_mode(struct mdss_vreg *in_vreg, int num_vreg,
+ enum mdss_vreg_mode mode)
+{
+ int i = 0, rc = 0;
+
+ if (mode >= DSS_REG_MODE_MAX) {
+ pr_err("%pS->%s: invalid mode %d\n",
+ __builtin_return_address(0), __func__, mode);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ for (i = 0; i < num_vreg; i++) {
+ rc = PTR_RET(in_vreg[i].vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name, rc);
+ goto error;
+ }
+
+ DEV_DBG("%s: Setting optimum mode %d for %s (load=%d)\n",
+ __func__, mode, in_vreg[i].vreg_name,
+ in_vreg[i].load[mode]);
+ rc = regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].load[mode]);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set opt mode failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name, rc);
+ goto error;
+ } else {
+ /*
+ * regulator_set_load can return non-zero
+ * value for success. However, this API is expected
+ * to return 0 for success.
+ */
+ rc = 0;
+ }
+ }
+
+error:
+ return rc;
+}
+EXPORT_SYMBOL(msm_mdss_config_vreg_opt_mode);
+
+int msm_mdss_enable_vreg(struct mdss_vreg *in_vreg, int num_vreg, int enable)
+{
+ int i = 0, rc = 0;
+ bool need_sleep;
+
+ if (enable) {
+ for (i = 0; i < num_vreg; i++) {
+ rc = PTR_RET(in_vreg[i].vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name, rc);
+ goto vreg_set_opt_mode_fail;
+ }
+ need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+ if (in_vreg[i].pre_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].pre_on_sleep * 1000,
+ in_vreg[i].pre_on_sleep * 1000);
+ rc = regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].load[DSS_REG_MODE_ENABLE]);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set opt m fail\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto vreg_set_opt_mode_fail;
+ }
+ rc = regulator_enable(in_vreg[i].vreg);
+ if (in_vreg[i].post_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].post_on_sleep * 1000,
+ in_vreg[i].post_on_sleep * 1000);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto disable_vreg;
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].load[DSS_REG_MODE_DISABLE]);
+
+ if (regulator_is_enabled(in_vreg[i].vreg))
+ regulator_disable(in_vreg[i].vreg);
+
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+ }
+ return rc;
+
+disable_vreg:
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].load[DSS_REG_MODE_DISABLE]);
+
+vreg_set_opt_mode_fail:
+ for (i--; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].load[DSS_REG_MODE_DISABLE]);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+
+ return rc;
+} /* msm_mdss_enable_vreg */
+EXPORT_SYMBOL(msm_mdss_enable_vreg);
+
+int msm_mdss_enable_gpio(struct mdss_gpio *in_gpio, int num_gpio, int enable)
+{
+ int i = 0, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_gpio; i++) {
+ DEV_DBG("%pS->%s: %s enable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+
+ rc = gpio_request(in_gpio[i].gpio,
+ in_gpio[i].gpio_name);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+ goto disable_gpio;
+ }
+ gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
+ }
+ } else {
+ for (i = num_gpio-1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: %s disable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+ if (in_gpio[i].gpio)
+ gpio_free(in_gpio[i].gpio);
+ }
+ }
+ return rc;
+
+disable_gpio:
+ for (i--; i >= 0; i--)
+ if (in_gpio[i].gpio)
+ gpio_free(in_gpio[i].gpio);
+
+ return rc;
+} /* msm_mdss_enable_gpio */
+EXPORT_SYMBOL(msm_mdss_enable_gpio);
+
+void msm_mdss_put_clk(struct mdss_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+} /* msm_mdss_put_clk */
+EXPORT_SYMBOL(msm_mdss_put_clk);
+
+int msm_mdss_get_clk(struct device *dev, struct mdss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_RET(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ msm_mdss_put_clk(clk_arry, num_clk);
+
+ return rc;
+} /* msm_mdss_get_clk */
+EXPORT_SYMBOL(msm_mdss_get_clk);
+
+int msm_mdss_clk_set_rate(struct mdss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (clk_arry[i].type != DSS_CLK_AHB) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+} /* msm_mdss_clk_set_rate */
+EXPORT_SYMBOL(msm_mdss_clk_set_rate);
+
+int msm_mdss_enable_clk(struct mdss_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ msm_mdss_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+} /* msm_mdss_enable_clk */
+EXPORT_SYMBOL(msm_mdss_enable_clk);
+
+
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf)
+{
+ struct i2c_msg msgs[2];
+ int ret = -1;
+
+ pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].buf = ®_offset;
+ msgs[0].len = 1;
+
+ msgs[1].addr = slave_addr >> 1;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = read_buf;
+ msgs[1].len = 1;
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret < 1) {
+ pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+ return -EACCES;
+ }
+ pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+ return 0;
+}
+EXPORT_SYMBOL(mdss_i2c_byte_read);
+
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value)
+{
+ struct i2c_msg msgs[1];
+ uint8_t data[2];
+ int status = -EACCES;
+
+ pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ data[0] = reg_offset;
+ data[1] = *value;
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = data;
+
+ status = i2c_transfer(client->adapter, msgs, 1);
+ if (status < 1) {
+ pr_err("I2C WRITE FAILED=[%d]\n", status);
+ return -EACCES;
+ }
+ pr_debug("%s: I2C write status=%x\n", __func__, status);
+ return status;
+}
+EXPORT_SYMBOL(mdss_i2c_byte_write);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
new file mode 100644
index 0000000..a9a5d8f
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -0,0 +1,5114 @@
+/*
+ * MDSS MDP Interface (used by framebuffer core)
+ *
+ * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/rpm-smd.h>
+
+#include "mdss.h"
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_panel.h"
+#include "mdss_debug.h"
+#include "mdss_mdp_debug.h"
+#include "mdss_smmu.h"
+
+#include "mdss_mdp_trace.h"
+
+#define AXI_HALT_TIMEOUT_US 0x4000
+#define AUTOSUSPEND_TIMEOUT_MS 200
+#define DEFAULT_MDP_PIPE_WIDTH 2048
+#define RES_1080p (1088*1920)
+#define RES_UHD (3840*2160)
+
+struct mdss_data_type *mdss_res;
+static u32 mem_protect_sd_ctrl_id;
+
+static int mdss_fb_mem_get_iommu_domain(void)
+{
+ return mdss_smmu_get_domain_id(MDSS_IOMMU_DOMAIN_UNSECURE);
+}
+
+struct msm_mdp_interface mdp5 = {
+ .init_fnc = mdss_mdp_overlay_init,
+ .fb_mem_get_iommu_domain = mdss_fb_mem_get_iommu_domain,
+ .fb_stride = mdss_mdp_fb_stride,
+ .check_dsi_status = mdss_check_dsi_ctrl_status,
+ .get_format_params = mdss_mdp_get_format_params,
+};
+
+#define IB_QUOTA 2000000000
+#define AB_QUOTA 2000000000
+
+#define MAX_AXI_PORT_COUNT 3
+
+#define MEM_PROTECT_SD_CTRL 0xF
+#define MEM_PROTECT_SD_CTRL_FLAT 0x14
+
+static DEFINE_SPINLOCK(mdp_lock);
+static DEFINE_SPINLOCK(mdss_mdp_intr_lock);
+static DEFINE_MUTEX(mdp_clk_lock);
+static DEFINE_MUTEX(mdp_iommu_ref_cnt_lock);
+static DEFINE_MUTEX(mdp_fs_idle_pc_lock);
+
+static struct mdss_panel_intf pan_types[] = {
+ {"dsi", MDSS_PANEL_INTF_DSI},
+ {"edp", MDSS_PANEL_INTF_EDP},
+ {"hdmi", MDSS_PANEL_INTF_HDMI},
+};
+static char mdss_mdp_panel[MDSS_MAX_PANEL_LEN];
+
+struct mdss_hw mdss_mdp_hw = {
+ .hw_ndx = MDSS_HW_MDP,
+ .ptr = NULL,
+ .irq_handler = mdss_mdp_isr,
+};
+
+/* define for h/w block with external driver */
+struct mdss_hw mdss_misc_hw = {
+ .hw_ndx = MDSS_HW_MISC,
+ .ptr = NULL,
+ .irq_handler = NULL,
+};
+
+#ifdef CONFIG_QCOM_BUS_SCALING
+#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_AMPSS_M0, \
+ .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+#define BUS_VOTE_19_MHZ 153600000
+#define BUS_VOTE_40_MHZ 320000000
+#define BUS_VOTE_80_MHZ 640000000
+
+static struct msm_bus_vectors mdp_reg_bus_vectors[] = {
+ MDP_REG_BUS_VECTOR_ENTRY(0, 0),
+ MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
+ MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_40_MHZ),
+ MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_80_MHZ),
+};
+static struct msm_bus_paths mdp_reg_bus_usecases[ARRAY_SIZE(
+ mdp_reg_bus_vectors)];
+static struct msm_bus_scale_pdata mdp_reg_bus_scale_table = {
+ .usecase = mdp_reg_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_reg_bus_usecases),
+ .name = "mdss_reg",
+ .active_only = true,
+};
+#endif
+
+u32 invalid_mdp107_wb_output_fmts[] = {
+ MDP_XRGB_8888,
+ MDP_RGBX_8888,
+ MDP_BGRX_8888,
+};
+
+/*
+ * struct intr_call - array of intr handlers
+ * @func: intr handler
+ * @arg: requested argument to the handler
+ */
+struct intr_callback {
+ void (*func)(void *);
+ void *arg;
+};
+
+/*
+ * struct mdss_mdp_intr_reg - array of MDP intr register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct mdss_mdp_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/*
+ * struct mdss_mdp_irq - maps each irq with i/f
+ * @intr_type: type of interface
+ * @intf_num: i/f the irq is associated with
+ * @irq_mask: corresponding bit in the reg set
+ * @reg_idx: which reg set to program
+ */
+struct mdss_mdp_irq {
+ u32 intr_type;
+ u32 intf_num;
+ u32 irq_mask;
+ u32 reg_idx;
+};
+
+static struct mdss_mdp_intr_reg mdp_intr_reg[] = {
+ { MDSS_MDP_REG_INTR_CLEAR, MDSS_MDP_REG_INTR_EN,
+ MDSS_MDP_REG_INTR_STATUS },
+ { MDSS_MDP_REG_INTR2_CLEAR, MDSS_MDP_REG_INTR2_EN,
+ MDSS_MDP_REG_INTR2_STATUS }
+};
+
+static struct mdss_mdp_irq mdp_irq_map[] = {
+ { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 1,
+ MDSS_MDP_INTR_INTF_0_UNDERRUN, 0},
+ { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 2,
+ MDSS_MDP_INTR_INTF_1_UNDERRUN, 0},
+ { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 3,
+ MDSS_MDP_INTR_INTF_2_UNDERRUN, 0},
+ { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 4,
+ MDSS_MDP_INTR_INTF_3_UNDERRUN, 0},
+ { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 1,
+ MDSS_MDP_INTR_INTF_0_VSYNC, 0},
+ { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 2,
+ MDSS_MDP_INTR_INTF_1_VSYNC, 0},
+ { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 3,
+ MDSS_MDP_INTR_INTF_2_VSYNC, 0},
+ { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 4,
+ MDSS_MDP_INTR_INTF_3_VSYNC, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 0,
+ MDSS_MDP_INTR_PING_PONG_0_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 1,
+ MDSS_MDP_INTR_PING_PONG_1_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 2,
+ MDSS_MDP_INTR_PING_PONG_2_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 3,
+ MDSS_MDP_INTR_PING_PONG_3_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 0,
+ MDSS_MDP_INTR_PING_PONG_0_RD_PTR, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 1,
+ MDSS_MDP_INTR_PING_PONG_1_RD_PTR, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 2,
+ MDSS_MDP_INTR_PING_PONG_2_RD_PTR, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 3,
+ MDSS_MDP_INTR_PING_PONG_3_RD_PTR, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 0,
+ MDSS_MDP_INTR_PING_PONG_0_WR_PTR, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 1,
+ MDSS_MDP_INTR_PING_PONG_1_WR_PTR, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 2,
+ MDSS_MDP_INTR_PING_PONG_2_WR_PTR, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 3,
+ MDSS_MDP_INTR_PING_PONG_3_WR_PTR, 0},
+ { MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 0,
+ MDSS_MDP_INTR_WB_0_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 1,
+ MDSS_MDP_INTR_WB_1_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_WB_WFD_COMP, 0,
+ MDSS_MDP_INTR_WB_2_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 0,
+ MDSS_MDP_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 1,
+ MDSS_MDP_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 2,
+ MDSS_MDP_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 3,
+ MDSS_MDP_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+ { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 2,
+ MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1},
+ { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 3,
+ MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1}
+};
+
+static struct intr_callback *mdp_intr_cb;
+
+static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
+static int mdss_mdp_parse_dt(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_wb(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
+ char *prop_name, u32 *offsets, int len);
+static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
+ char *prop_name);
+static int mdss_mdp_parse_dt_smp(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_misc(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev);
+
+static inline u32 is_mdp_irq_enabled(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++)
+ if (mdata->mdp_irq_mask[i] != 0)
+ return 1;
+
+ if (mdata->mdp_hist_irq_mask)
+ return 1;
+
+ if (mdata->mdp_intf_irq_mask)
+ return 1;
+
+ return 0;
+}
+
+u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+ /* The adreno GPU hardware requires that the pitch be aligned to
+ * 32 pixels for color buffers, so for the cases where the GPU
+ * is writing directly to fb0, the framebuffer pitch
+ * also needs to be 32 pixel aligned
+ */
+
+ if (fb_index == 0)
+ return ALIGN(xres, 32) * bpp;
+ else
+ return xres * bpp;
+}
+
+static void mdss_irq_mask(struct irq_data *data)
+{
+ struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
+ unsigned long irq_flags;
+
+ if (!mdata)
+ return;
+
+ pr_debug("irq_domain_mask %lu\n", data->hwirq);
+
+ if (data->hwirq < 32) {
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ mdata->mdss_util->disable_irq(&mdss_misc_hw);
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+ }
+}
+
+static void mdss_irq_unmask(struct irq_data *data)
+{
+ struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
+ unsigned long irq_flags;
+
+ if (!mdata)
+ return;
+
+ pr_debug("irq_domain_unmask %lu\n", data->hwirq);
+
+ if (data->hwirq < 32) {
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ mdata->mdss_util->enable_irq(&mdss_misc_hw);
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+ }
+}
+
+static struct irq_chip mdss_irq_chip = {
+ .name = "mdss",
+ .irq_mask = mdss_irq_mask,
+ .irq_unmask = mdss_irq_unmask,
+};
+
+static int mdss_irq_domain_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ struct mdss_data_type *mdata = d->host_data;
+ /* check here if virq is a valid interrupt line */
+ irq_set_chip_and_handler(virq, &mdss_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, mdata);
+ return 0;
+}
+
+const struct irq_domain_ops mdss_irq_domain_ops = {
+ .map = mdss_irq_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static irqreturn_t mdss_irq_handler(int irq, void *ptr)
+{
+ struct mdss_data_type *mdata = ptr;
+ u32 intr;
+
+ if (!mdata)
+ return IRQ_NONE;
+ else if (!mdss_get_irq_enable_state(&mdss_mdp_hw))
+ return IRQ_HANDLED;
+
+ intr = MDSS_REG_READ(mdata, MDSS_REG_HW_INTR_STATUS);
+
+ mdss_mdp_hw.irq_info->irq_buzy = true;
+
+ if (intr & MDSS_INTR_MDP) {
+ spin_lock(&mdp_lock);
+ mdata->mdss_util->irq_dispatch(MDSS_HW_MDP, irq, ptr);
+ spin_unlock(&mdp_lock);
+ intr &= ~MDSS_INTR_MDP;
+ }
+
+ if (intr & MDSS_INTR_DSI0) {
+ mdata->mdss_util->irq_dispatch(MDSS_HW_DSI0, irq, ptr);
+ intr &= ~MDSS_INTR_DSI0;
+ }
+
+ if (intr & MDSS_INTR_DSI1) {
+ mdata->mdss_util->irq_dispatch(MDSS_HW_DSI1, irq, ptr);
+ intr &= ~MDSS_INTR_DSI1;
+ }
+
+ if (intr & MDSS_INTR_EDP) {
+ mdata->mdss_util->irq_dispatch(MDSS_HW_EDP, irq, ptr);
+ intr &= ~MDSS_INTR_EDP;
+ }
+
+ if (intr & MDSS_INTR_HDMI) {
+ mdata->mdss_util->irq_dispatch(MDSS_HW_HDMI, irq, ptr);
+ intr &= ~MDSS_INTR_HDMI;
+ }
+
+ /* route misc. interrupts to external drivers */
+ while (intr) {
+ irq_hw_number_t hwirq = fls(intr) - 1;
+
+ generic_handle_irq(irq_find_mapping(
+ mdata->irq_domain, hwirq));
+ intr &= ~(1 << hwirq);
+ }
+
+ mdss_mdp_hw.irq_info->irq_buzy = false;
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_QCOM_BUS_SCALING
+static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
+{
+ struct msm_bus_scale_pdata *reg_bus_pdata;
+ int i, rc;
+
+ if (!mdata->bus_hdl) {
+ rc = mdss_mdp_parse_dt_bus_scale(mdata->pdev);
+ if (rc) {
+ pr_err("Error in device tree : bus scale\n");
+ return rc;
+ }
+
+ mdata->bus_hdl =
+ msm_bus_scale_register_client(mdata->bus_scale_table);
+ if (!mdata->bus_hdl) {
+ pr_err("bus_client register failed\n");
+ return -EINVAL;
+ }
+
+ pr_debug("register bus_hdl=%x\n", mdata->bus_hdl);
+ }
+
+ if (!mdata->reg_bus_scale_table) {
+ reg_bus_pdata = &mdp_reg_bus_scale_table;
+ for (i = 0; i < reg_bus_pdata->num_usecases; i++) {
+ mdp_reg_bus_usecases[i].num_paths = 1;
+ mdp_reg_bus_usecases[i].vectors =
+ &mdp_reg_bus_vectors[i];
+ }
+ mdata->reg_bus_scale_table = reg_bus_pdata;
+ }
+
+ if (!mdata->reg_bus_hdl) {
+ mdata->reg_bus_hdl =
+ msm_bus_scale_register_client(
+ mdata->reg_bus_scale_table);
+ if (!mdata->reg_bus_hdl)
+ /* Continue without reg_bus scaling */
+ pr_warn("reg_bus_client register failed\n");
+ else
+ pr_debug("register reg_bus_hdl=%x\n",
+ mdata->reg_bus_hdl);
+ }
+
+ if (mdata->hw_rt_bus_scale_table && !mdata->hw_rt_bus_hdl) {
+ mdata->hw_rt_bus_hdl =
+ msm_bus_scale_register_client(
+ mdata->hw_rt_bus_scale_table);
+ if (!mdata->hw_rt_bus_hdl)
+ /* Continue without reg_bus scaling */
+ pr_warn("hw_rt_bus client register failed\n");
+ else
+ pr_debug("register hw_rt_bus=%x\n",
+ mdata->hw_rt_bus_hdl);
+ }
+
+ /*
+ * Following call will not result in actual vote rather update the
+ * current index and ab/ib value. When continuous splash is enabled,
+ * actual vote will happen when splash handoff is done.
+ */
+ return mdss_bus_scale_set_quota(MDSS_MDP_RT, AB_QUOTA, IB_QUOTA);
+}
+
+static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
+{
+ pr_debug("unregister bus_hdl=%x\n", mdata->bus_hdl);
+
+ if (mdata->bus_hdl)
+ msm_bus_scale_unregister_client(mdata->bus_hdl);
+
+ pr_debug("unregister reg_bus_hdl=%x\n", mdata->reg_bus_hdl);
+
+ if (mdata->reg_bus_hdl) {
+ msm_bus_scale_unregister_client(mdata->reg_bus_hdl);
+ mdata->reg_bus_hdl = 0;
+ }
+
+ if (mdata->hw_rt_bus_hdl) {
+ msm_bus_scale_unregister_client(mdata->hw_rt_bus_hdl);
+ mdata->hw_rt_bus_hdl = 0;
+ }
+}
+
+/*
+ * Caller needs to hold mdata->bus_lock lock before calling this function.
+ */
+static int mdss_mdp_bus_scale_set_quota(u64 ab_quota_rt, u64 ab_quota_nrt,
+ u64 ib_quota_rt, u64 ib_quota_nrt)
+{
+ int new_uc_idx;
+ u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0};
+ u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0};
+ int rc;
+
+ if (mdss_res->bus_hdl < 1) {
+ pr_err("invalid bus handle %d\n", mdss_res->bus_hdl);
+ return -EINVAL;
+ }
+
+ if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt) {
+ new_uc_idx = 0;
+ } else {
+ int i;
+ struct msm_bus_vectors *vect = NULL;
+ struct msm_bus_scale_pdata *bw_table =
+ mdss_res->bus_scale_table;
+ u32 nrt_axi_port_cnt = mdss_res->nrt_axi_port_cnt;
+ u32 total_axi_port_cnt = mdss_res->axi_port_cnt;
+ u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
+ int match_cnt = 0;
+
+ if (!bw_table || !total_axi_port_cnt ||
+ total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (mdss_res->bus_channels) {
+ ib_quota_rt = div_u64(ib_quota_rt,
+ mdss_res->bus_channels);
+ ib_quota_nrt = div_u64(ib_quota_nrt,
+ mdss_res->bus_channels);
+ }
+
+ if (mdss_res->has_fixed_qos_arbiter_enabled ||
+ nrt_axi_port_cnt) {
+
+ ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt);
+ ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt);
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ if (i < rt_axi_port_cnt) {
+ ab_quota[i] = ab_quota_rt;
+ ib_quota[i] = ib_quota_rt;
+ } else {
+ ab_quota[i] = ab_quota_nrt;
+ ib_quota[i] = ib_quota_nrt;
+ }
+ }
+ } else {
+ ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
+ total_axi_port_cnt);
+ ib_quota[0] = ib_quota_rt + ib_quota_nrt;
+
+ for (i = 1; i < total_axi_port_cnt; i++) {
+ ab_quota[i] = ab_quota[0];
+ ib_quota[i] = ib_quota[0];
+ }
+ }
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ vect = &bw_table->usecase
+ [mdss_res->curr_bw_uc_idx].vectors[i];
+ /* avoid performing updates for small changes */
+ if ((ab_quota[i] == vect->ab) &&
+ (ib_quota[i] == vect->ib))
+ match_cnt++;
+ }
+
+ if (match_cnt == total_axi_port_cnt) {
+ pr_debug("skip BW vote\n");
+ return 0;
+ }
+
+ new_uc_idx = (mdss_res->curr_bw_uc_idx %
+ (bw_table->num_usecases - 1)) + 1;
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ vect = &bw_table->usecase[new_uc_idx].vectors[i];
+ vect->ab = ab_quota[i];
+ vect->ib = ib_quota[i];
+
+ pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
+ new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
+ , i, vect->ab, vect->ib);
+ }
+ }
+ mdss_res->curr_bw_uc_idx = new_uc_idx;
+ mdss_res->ao_bw_uc_idx = new_uc_idx;
+
+ if ((mdss_res->bus_ref_cnt == 0) && mdss_res->curr_bw_uc_idx) {
+ rc = 0;
+ } else { /* vote BW if bus_bw_cnt > 0 or uc_idx is zero */
+ ATRACE_BEGIN("msm_bus_scale_req");
+ rc = msm_bus_scale_client_update_request(mdss_res->bus_hdl,
+ new_uc_idx);
+ ATRACE_END("msm_bus_scale_req");
+ }
+ return rc;
+}
+
+struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
+{
+ struct reg_bus_client *client;
+ static u32 id;
+
+ if (client_name == NULL) {
+ pr_err("client name is null\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ client = kcalloc(1, sizeof(struct reg_bus_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&mdss_res->reg_bus_lock);
+ strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+ client->usecase_ndx = VOTE_INDEX_DISABLE;
+ client->id = id;
+ pr_debug("bus vote client %s created:%pK id :%d\n", client_name,
+ client, id);
+ id++;
+ list_add(&client->list, &mdss_res->reg_bus_clist);
+ mutex_unlock(&mdss_res->reg_bus_lock);
+
+ return client;
+}
+
+void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
+{
+ if (!client) {
+ pr_err("reg bus vote: invalid client handle\n");
+ } else {
+ pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+ client->name, client, client->id);
+ mutex_lock(&mdss_res->reg_bus_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&mdss_res->reg_bus_lock);
+ kfree(client);
+ }
+}
+
+int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
+{
+ int ret = 0;
+ bool changed = false;
+ u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
+ struct reg_bus_client *client, *temp_client;
+
+ if (!mdss_res || !mdss_res->reg_bus_hdl || !bus_client)
+ return 0;
+
+ mutex_lock(&mdss_res->reg_bus_lock);
+ bus_client->usecase_ndx = usecase_ndx;
+ list_for_each_entry_safe(client, temp_client, &mdss_res->reg_bus_clist,
+ list) {
+
+ if (client->usecase_ndx < VOTE_INDEX_MAX &&
+ client->usecase_ndx > max_usecase_ndx)
+ max_usecase_ndx = client->usecase_ndx;
+ }
+
+ if (mdss_res->reg_bus_usecase_ndx != max_usecase_ndx) {
+ changed = true;
+ mdss_res->reg_bus_usecase_ndx = max_usecase_ndx;
+ }
+
+ pr_debug("%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n",
+ __builtin_return_address(0), changed, max_usecase_ndx,
+ bus_client->name, bus_client->id, usecase_ndx);
+ MDSS_XLOG(changed, max_usecase_ndx, bus_client->id, usecase_ndx);
+ if (changed)
+ ret = msm_bus_scale_client_update_request(mdss_res->reg_bus_hdl,
+ max_usecase_ndx);
+
+ mutex_unlock(&mdss_res->reg_bus_lock);
+ return ret;
+}
+
+int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
+{
+ int rc = 0;
+ int i;
+ u64 total_ab_rt = 0, total_ib_rt = 0;
+ u64 total_ab_nrt = 0, total_ib_nrt = 0;
+
+ mutex_lock(&mdss_res->bus_lock);
+
+ mdss_res->ab[client] = ab_quota;
+ mdss_res->ib[client] = ib_quota;
+ trace_mdp_perf_update_bus(client, ab_quota, ib_quota);
+
+ for (i = 0; i < MDSS_MAX_BUS_CLIENTS; i++) {
+ if (i == MDSS_MDP_NRT) {
+ total_ab_nrt = mdss_res->ab[i];
+ total_ib_nrt = mdss_res->ib[i];
+ } else {
+ total_ab_rt += mdss_res->ab[i];
+ total_ib_rt = max(total_ib_rt, mdss_res->ib[i]);
+ }
+ }
+
+ rc = mdss_mdp_bus_scale_set_quota(total_ab_rt, total_ab_nrt,
+ total_ib_rt, total_ib_nrt);
+
+ mutex_unlock(&mdss_res->bus_lock);
+
+ return rc;
+}
+#else
+static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
+{
+ return 0;
+}
+
+static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
+{
+}
+
+int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
+{
+ pr_debug("No bus scaling! client=%d ab=%llu ib=%llu\n",
+ client, ab_quota, ib_quota);
+
+ return 0;
+}
+
+struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
+{
+ return NULL;
+}
+
+void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
+{
+}
+
+int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
+{
+ pr_debug("%pS: No reg scaling! usecase=%u\n",
+ __builtin_return_address(0), usecase_ndx);
+
+ return 0;
+}
+#endif
+
+
+static int mdss_mdp_intr2index(u32 intr_type, u32 intf_num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp_irq_map); i++) {
+ if (intr_type == mdp_irq_map[i].intr_type &&
+ intf_num == mdp_irq_map[i].intf_num)
+ return i;
+ }
+ return -EINVAL;
+}
+
+u32 mdss_mdp_get_irq_mask(u32 intr_type, u32 intf_num)
+{
+ int idx = mdss_mdp_intr2index(intr_type, intf_num);
+
+ return (idx < 0) ? 0 : mdp_irq_map[idx].irq_mask;
+}
+
+void mdss_mdp_enable_hw_irq(struct mdss_data_type *mdata)
+{
+ mdata->mdss_util->enable_irq(&mdss_mdp_hw);
+}
+
+void mdss_mdp_disable_hw_irq(struct mdss_data_type *mdata)
+{
+ if (!is_mdp_irq_enabled())
+ mdata->mdss_util->disable_irq(&mdss_mdp_hw);
+}
+
+/* function assumes that mdp is clocked to access hw registers */
+void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
+ u32 intr_type, u32 intf_num)
+{
+ unsigned long irq_flags;
+ int irq_idx;
+ struct mdss_mdp_intr_reg reg;
+ struct mdss_mdp_irq irq;
+
+ irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
+ if (irq_idx < 0) {
+ pr_err("invalid irq request\n");
+ return;
+ }
+
+ irq = mdp_irq_map[irq_idx];
+ reg = mdp_intr_reg[irq.reg_idx];
+
+ pr_debug("clearing mdp irq mask=%x\n", irq.irq_mask);
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
+int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
+{
+ int irq_idx;
+ unsigned long irq_flags;
+ int ret = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_intr_reg reg;
+ struct mdss_mdp_irq irq;
+
+ irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
+ if (irq_idx < 0) {
+ pr_err("invalid irq request\n");
+ return -EINVAL;
+ }
+
+ irq = mdp_irq_map[irq_idx];
+ reg = mdp_intr_reg[irq.reg_idx];
+
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ if (mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask) {
+ pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
+ irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
+ ret = -EBUSY;
+ } else {
+ pr_debug("MDP IRQ mask old=%x new=%x\n",
+ mdata->mdp_irq_mask[irq.reg_idx], irq.irq_mask);
+ mdata->mdp_irq_mask[irq.reg_idx] |= irq.irq_mask;
+ writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
+ writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
+ mdata->mdp_base + reg.en_off);
+ mdata->mdss_util->enable_irq(&mdss_mdp_hw);
+ }
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+
+ return ret;
+}
+int mdss_mdp_hist_irq_enable(u32 irq)
+{
+ int ret = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->mdp_hist_irq_mask & irq) {
+ pr_warn("MDSS MDP Hist IRQ-0x%x is already set, mask=%x\n",
+ irq, mdata->mdp_hist_irq_mask);
+ ret = -EBUSY;
+ } else {
+ pr_debug("mask old=%x new=%x\n",
+ mdata->mdp_hist_irq_mask, irq);
+ mdata->mdp_hist_irq_mask |= irq;
+ writel_relaxed(irq, mdata->mdp_base +
+ MDSS_MDP_REG_HIST_INTR_CLEAR);
+ writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base +
+ MDSS_MDP_REG_HIST_INTR_EN);
+ mdata->mdss_util->enable_irq(&mdss_mdp_hw);
+ }
+
+ return ret;
+}
+
+void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num)
+{
+ int irq_idx;
+ unsigned long irq_flags;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_intr_reg reg;
+ struct mdss_mdp_irq irq;
+
+ irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
+ if (irq_idx < 0) {
+ pr_err("invalid irq request\n");
+ return;
+ }
+
+ irq = mdp_irq_map[irq_idx];
+ reg = mdp_intr_reg[irq.reg_idx];
+
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) {
+ pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+ irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
+ } else {
+ mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask;
+ writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
+ mdata->mdp_base + reg.en_off);
+ if (!is_mdp_irq_enabled())
+ mdata->mdss_util->disable_irq(&mdss_mdp_hw);
+ }
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
+/* This function is used to check and clear the status of MDP interrupts */
+void mdss_mdp_intr_check_and_clear(u32 intr_type, u32 intf_num)
+{
+ u32 status;
+ int irq_idx;
+ unsigned long irq_flags;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_intr_reg reg;
+ struct mdss_mdp_irq irq;
+
+ irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
+ if (irq_idx < 0) {
+ pr_err("invalid irq request\n");
+ return;
+ }
+
+ irq = mdp_irq_map[irq_idx];
+ reg = mdp_intr_reg[irq.reg_idx];
+
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ status = irq.irq_mask & readl_relaxed(mdata->mdp_base +
+ reg.status_off);
+ if (status) {
+ pr_debug("clearing irq: intr_type:%d, intf_num:%d\n",
+ intr_type, intf_num);
+ writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
+ }
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
+void mdss_mdp_hist_irq_disable(u32 irq)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!(mdata->mdp_hist_irq_mask & irq)) {
+ pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+ irq, mdata->mdp_hist_irq_mask);
+ } else {
+ mdata->mdp_hist_irq_mask &= ~irq;
+ writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base +
+ MDSS_MDP_REG_HIST_INTR_EN);
+ if (!is_mdp_irq_enabled())
+ mdata->mdss_util->disable_irq(&mdss_mdp_hw);
+ }
+}
+
+/**
+ * mdss_mdp_irq_disable_nosync() - disable mdp irq
+ * @intr_type: mdp interface type
+ * @intf_num: mdp interface num
+ *
+ * This function is called from interrupt context
+ * mdp_lock is already held at up stream (mdss_irq_handler)
+ * therefore spin_lock(&mdp_lock) is not allowed here
+ *
+ */
+void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num)
+{
+ int irq_idx;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_intr_reg reg;
+ struct mdss_mdp_irq irq;
+
+ irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
+ if (irq_idx < 0) {
+ pr_err("invalid irq request\n");
+ return;
+ }
+
+ irq = mdp_irq_map[irq_idx];
+ reg = mdp_intr_reg[irq.reg_idx];
+
+ if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) {
+ pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+ irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
+ } else {
+ mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask;
+ writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
+ mdata->mdp_base + reg.en_off);
+ if (!is_mdp_irq_enabled())
+ mdata->mdss_util->disable_irq_nosync(&mdss_mdp_hw);
+ }
+}
+
+int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
+ void (*fnc_ptr)(void *), void *arg)
+{
+ unsigned long flags;
+ int index;
+
+ index = mdss_mdp_intr2index(intr_type, intf_num);
+ if (index < 0) {
+ pr_warn("invalid intr type=%u intf_numf_num=%u\n",
+ intr_type, intf_num);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&mdss_mdp_intr_lock, flags);
+ WARN(mdp_intr_cb[index].func && fnc_ptr,
+ "replacing current intr callback for ndx=%d\n", index);
+ mdp_intr_cb[index].func = fnc_ptr;
+ mdp_intr_cb[index].arg = arg;
+ spin_unlock_irqrestore(&mdss_mdp_intr_lock, flags);
+
+ return 0;
+}
+
+int mdss_mdp_set_intr_callback_nosync(u32 intr_type, u32 intf_num,
+ void (*fnc_ptr)(void *), void *arg)
+{
+ int index;
+
+ index = mdss_mdp_intr2index(intr_type, intf_num);
+ if (index < 0) {
+ pr_warn("invalid intr Typee=%u intf_num=%u\n",
+ intr_type, intf_num);
+ return -EINVAL;
+ }
+
+ WARN(mdp_intr_cb[index].func && fnc_ptr,
+ "replacing current intr callbackack for ndx=%d\n",
+ index);
+ mdp_intr_cb[index].func = fnc_ptr;
+ mdp_intr_cb[index].arg = arg;
+
+ return 0;
+}
+
+static inline void mdss_mdp_intr_done(int index)
+{
+ void (*fnc)(void *);
+ void *arg;
+
+ spin_lock(&mdss_mdp_intr_lock);
+ fnc = mdp_intr_cb[index].func;
+ arg = mdp_intr_cb[index].arg;
+ spin_unlock(&mdss_mdp_intr_lock);
+ if (fnc)
+ fnc(arg);
+}
+
+irqreturn_t mdss_mdp_isr(int irq, void *ptr)
+{
+ struct mdss_data_type *mdata = ptr;
+ u32 isr, mask, hist_isr, hist_mask;
+ int i, j;
+
+ if (!mdata->clk_ena)
+ return IRQ_HANDLED;
+
+ for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++) {
+ struct mdss_mdp_intr_reg reg = mdp_intr_reg[i];
+
+ isr = readl_relaxed(mdata->mdp_base + reg.status_off);
+ if (isr == 0)
+ continue;
+
+ mask = readl_relaxed(mdata->mdp_base + reg.en_off);
+ writel_relaxed(isr, mdata->mdp_base + reg.clr_off);
+
+ pr_debug("%s: reg:%d isr=%x mask=%x\n",
+ __func__, i+1, isr, mask);
+
+ isr &= mask;
+ if (isr == 0)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(mdp_irq_map); j++)
+ if (mdp_irq_map[j].reg_idx == i &&
+ (isr & mdp_irq_map[j].irq_mask))
+ mdss_mdp_intr_done(j);
+ if (!i) {
+ if (isr & MDSS_MDP_INTR_PING_PONG_0_DONE)
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0,
+ false);
+
+ if (isr & MDSS_MDP_INTR_PING_PONG_1_DONE)
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1,
+ false);
+
+ if (isr & MDSS_MDP_INTR_INTF_0_VSYNC)
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_EDP,
+ true);
+
+ if (isr & MDSS_MDP_INTR_INTF_1_VSYNC)
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0,
+ true);
+
+ if (isr & MDSS_MDP_INTR_INTF_2_VSYNC)
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1,
+ true);
+
+ if (isr & MDSS_MDP_INTR_INTF_3_VSYNC)
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_HDMI,
+ true);
+
+ if (isr & MDSS_MDP_INTR_WB_0_DONE)
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
+ true);
+
+ if (isr & MDSS_MDP_INTR_WB_1_DONE)
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
+ true);
+
+ if (isr & MDSS_MDP_INTR_WB_2_DONE)
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
+ true);
+ }
+ }
+
+ hist_isr = readl_relaxed(mdata->mdp_base +
+ MDSS_MDP_REG_HIST_INTR_STATUS);
+ if (hist_isr != 0) {
+ hist_mask = readl_relaxed(mdata->mdp_base +
+ MDSS_MDP_REG_HIST_INTR_EN);
+ writel_relaxed(hist_isr, mdata->mdp_base +
+ MDSS_MDP_REG_HIST_INTR_CLEAR);
+ hist_isr &= hist_mask;
+ if (hist_isr != 0)
+ mdss_mdp_hist_intr_done(hist_isr);
+ }
+
+ mdss_mdp_video_isr(mdata->video_intf, mdata->nintf);
+ return IRQ_HANDLED;
+}
+
+static int mdss_mdp_clk_update(u32 clk_idx, u32 enable)
+{
+ int ret = -ENODEV;
+ struct clk *clk = mdss_mdp_get_clk(clk_idx);
+
+ if (clk) {
+ pr_debug("clk=%d en=%d\n", clk_idx, enable);
+ if (enable) {
+ if (clk_idx == MDSS_CLK_MDP_VSYNC)
+ clk_set_rate(clk, 19200000);
+ ret = clk_prepare_enable(clk);
+ } else {
+ clk_disable_unprepare(clk);
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+int mdss_mdp_vsync_clk_enable(int enable, bool locked)
+{
+ int ret = 0;
+
+ pr_debug("clk enable=%d\n", enable);
+
+ if (!locked)
+ mutex_lock(&mdp_clk_lock);
+
+ if (mdss_res->vsync_ena != enable) {
+ mdss_res->vsync_ena = enable;
+ ret = mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
+ }
+
+ if (!locked)
+ mutex_unlock(&mdp_clk_lock);
+ return ret;
+}
+
+void mdss_mdp_set_clk_rate(unsigned long rate)
+{
+ struct mdss_data_type *mdata = mdss_res;
+ unsigned long clk_rate;
+ struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
+ unsigned long min_clk_rate;
+
+ min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk);
+
+ if (clk) {
+ mutex_lock(&mdp_clk_lock);
+ if (min_clk_rate < mdata->max_mdp_clk_rate)
+ clk_rate = clk_round_rate(clk, min_clk_rate);
+ else
+ clk_rate = mdata->max_mdp_clk_rate;
+ if (IS_ERR_VALUE(clk_rate)) {
+ pr_err("unable to round rate err=%ld\n", clk_rate);
+ } else if (clk_rate != clk_get_rate(clk)) {
+ if (IS_ERR_VALUE((unsigned long)
+ clk_set_rate(clk, clk_rate)))
+ pr_err("clk_set_rate failed\n");
+ else
+ pr_debug("mdp clk rate=%lu\n", clk_rate);
+ }
+ mutex_unlock(&mdp_clk_lock);
+ } else {
+ pr_err("mdp src clk not setup properly\n");
+ }
+}
+
+unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked)
+{
+ unsigned long clk_rate = 0;
+ struct clk *clk = mdss_mdp_get_clk(clk_idx);
+
+ if (clk) {
+ if (!locked)
+ mutex_lock(&mdp_clk_lock);
+
+ clk_rate = clk_get_rate(clk);
+
+ if (!locked)
+ mutex_unlock(&mdp_clk_lock);
+ }
+
+ return clk_rate;
+}
+
+/**
+ * mdss_bus_rt_bw_vote() -- place bus bandwidth request
+ * @enable: value of enable or disable
+ *
+ * hw_rt table has two entries, 0 and Min Vote (1Mhz)
+ * while attaching SMMU and for few TZ operations which
+ * happen at very early stage, we will request Min Vote
+ * thru this handle.
+ *
+ */
+static int mdss_bus_rt_bw_vote(bool enable)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int rc = 0;
+ bool changed = false;
+
+ if (!mdata->hw_rt_bus_hdl || mdata->handoff_pending)
+ return 0;
+
+ if (enable) {
+ if (mdata->hw_rt_bus_ref_cnt == 0)
+ changed = true;
+ mdata->hw_rt_bus_ref_cnt++;
+ } else {
+ if (mdata->hw_rt_bus_ref_cnt != 0) {
+ mdata->hw_rt_bus_ref_cnt--;
+ if (mdata->hw_rt_bus_ref_cnt == 0)
+ changed = true;
+ } else {
+ pr_warn("%s: bus bw votes are not balanced\n",
+ __func__);
+ }
+ }
+
+ pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
+ __builtin_return_address(0), current->group_leader->comm,
+ mdata->hw_rt_bus_ref_cnt, changed, enable);
+
+ if (changed) {
+ rc = msm_bus_scale_client_update_request(mdata->hw_rt_bus_hdl,
+ enable ? 1 : 0);
+ if (rc)
+ pr_err("%s: Bus bandwidth vote failed\n", __func__);
+ }
+
+ return rc;
+}
+
+/**
+ * __mdss_mdp_reg_access_clk_enable - Enable minimum MDSS clocks required
+ * for register access
+ */
+static inline void __mdss_mdp_reg_access_clk_enable(
+ struct mdss_data_type *mdata, bool enable)
+{
+ if (enable) {
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_LOW);
+ mdss_bus_rt_bw_vote(true);
+ mdss_mdp_clk_update(MDSS_CLK_AHB, 1);
+ mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
+ } else {
+ mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0);
+ mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
+ mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
+ mdss_bus_rt_bw_vote(false);
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+ }
+}
+
+int __mdss_mdp_vbif_halt(struct mdss_data_type *mdata, bool is_nrt)
+{
+ int rc = 0;
+ void __iomem *base;
+ u32 halt_ack_mask = BIT(0), status;
+
+ /* if not real time vbif */
+ if (is_nrt)
+ base = mdata->vbif_nrt_io.base;
+ else
+ base = mdata->vbif_io.base;
+
+ if (!base) {
+ /* some targets might not have a nrt port */
+ goto vbif_done;
+ }
+
+ /* force vbif clock on */
+ MDSS_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 1, is_nrt);
+
+ /* request halt */
+ MDSS_VBIF_WRITE(mdata, MMSS_VBIF_AXI_HALT_CTRL0, 1, is_nrt);
+
+ rc = readl_poll_timeout(base +
+ MMSS_VBIF_AXI_HALT_CTRL1, status, (status &
+ halt_ack_mask),
+ 1000, AXI_HALT_TIMEOUT_US);
+ if (rc == -ETIMEDOUT) {
+ pr_err("VBIF axi is not halting. TIMEDOUT.\n");
+ goto vbif_done;
+ }
+
+ pr_debug("VBIF axi is halted\n");
+
+vbif_done:
+ return rc;
+}
+
+/**
+ * mdss_mdp_vbif_axi_halt() - Halt MDSS AXI ports
+ * @mdata: pointer to the global mdss data structure.
+ *
+ * This function can be called during deep suspend, display off or for
+ * debugging purposes. On success it should be assumed that AXI ports connected
+ * to RT VBIF are in idle state and would not fetch any more data.
+ */
+static void mdss_mdp_vbif_axi_halt(struct mdss_data_type *mdata)
+{
+ __mdss_mdp_reg_access_clk_enable(mdata, true);
+
+ /* real time ports */
+ __mdss_mdp_vbif_halt(mdata, false);
+ /* non-real time ports */
+ __mdss_mdp_vbif_halt(mdata, true);
+
+ __mdss_mdp_reg_access_clk_enable(mdata, false);
+}
+
+int mdss_iommu_ctrl(int enable)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int rc = 0;
+
+ mutex_lock(&mdp_iommu_ref_cnt_lock);
+ pr_debug("%pS: enable:%d ref_cnt:%d attach:%d hoff:%d\n",
+ __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
+ mdata->iommu_attached, mdata->handoff_pending);
+
+ if (enable) {
+ /*
+ * delay iommu attach until continuous splash screen has
+ * finished handoff, as it may still be working with phys addr
+ */
+ if (!mdata->iommu_attached && !mdata->handoff_pending) {
+ mdss_bus_rt_bw_vote(true);
+ rc = mdss_smmu_attach(mdata);
+ }
+ mdata->iommu_ref_cnt++;
+ } else {
+ if (mdata->iommu_ref_cnt) {
+ mdata->iommu_ref_cnt--;
+ if (mdata->iommu_ref_cnt == 0) {
+ rc = mdss_smmu_detach(mdata);
+ mdss_bus_rt_bw_vote(false);
+ }
+ } else {
+ pr_err("unbalanced iommu ref\n");
+ }
+ }
+ mutex_unlock(&mdp_iommu_ref_cnt_lock);
+
+ if (IS_ERR_VALUE((unsigned long)rc))
+ return rc;
+ else
+ return mdata->iommu_ref_cnt;
+}
+
+static void mdss_mdp_memory_retention_enter(void)
+{
+ struct clk *mdss_mdp_clk = NULL;
+ struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
+
+ if (mdp_vote_clk) {
+ mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
+ if (mdss_mdp_clk) {
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET);
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
+ }
+ }
+}
+
+static void mdss_mdp_memory_retention_exit(void)
+{
+ struct clk *mdss_mdp_clk = NULL;
+ struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
+
+ if (mdp_vote_clk) {
+ mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
+ if (mdss_mdp_clk) {
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH);
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_CLEAR);
+ }
+ }
+}
+
+/**
+ * mdss_mdp_idle_pc_restore() - Restore MDSS settings when exiting idle pc
+ *
+ * MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command
+ * mode displays, referred to as MDSS idle power collapse. Upon subsequent
+ * frame update, MDSS GDSC needs to turned back on and hw state needs to be
+ * restored.
+ */
+static int mdss_mdp_idle_pc_restore(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int rc = 0;
+
+ mutex_lock(&mdp_fs_idle_pc_lock);
+ if (!mdata->idle_pc) {
+ pr_debug("no idle pc, no need to restore\n");
+ goto end;
+ }
+
+ pr_debug("called from %pS\n", __builtin_return_address(0));
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE((unsigned long)rc)) {
+ pr_err("mdss iommu attach failed rc=%d\n", rc);
+ goto end;
+ }
+ mdss_hw_init(mdata);
+ mdss_iommu_ctrl(0);
+
+ /**
+ * sleep 10 microseconds to make sure AD auto-reinitialization
+ * is done
+ */
+ udelay(10);
+ mdss_mdp_memory_retention_exit();
+
+ mdss_mdp_ctl_restore(true);
+ mdata->idle_pc = false;
+
+end:
+ mutex_unlock(&mdp_fs_idle_pc_lock);
+ return rc;
+}
+
+/**
+ * mdss_bus_bandwidth_ctrl() -- place bus bandwidth request
+ * @enable: value of enable or disable
+ *
+ * Function place bus bandwidth request to allocate saved bandwidth
+ * if enabled or free bus bandwidth allocation if disabled.
+ * Bus bandwidth is required by mdp.For dsi, it only requires to send
+ * dcs coammnd. It returns error if bandwidth request fails.
+ */
+void mdss_bus_bandwidth_ctrl(int enable)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int changed = 0;
+
+ mutex_lock(&mdata->bus_lock);
+ if (enable) {
+ if (mdata->bus_ref_cnt == 0)
+ changed++;
+ mdata->bus_ref_cnt++;
+ } else {
+ if (mdata->bus_ref_cnt) {
+ mdata->bus_ref_cnt--;
+ if (mdata->bus_ref_cnt == 0)
+ changed++;
+ } else {
+ pr_err("Can not be turned off\n");
+ }
+ }
+
+ pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
+ __builtin_return_address(0), current->group_leader->comm,
+ mdata->bus_ref_cnt, changed, enable);
+
+ if (changed) {
+ MDSS_XLOG(mdata->bus_ref_cnt, enable);
+
+ if (!enable) {
+ if (!mdata->handoff_pending) {
+ msm_bus_scale_client_update_request(
+ mdata->bus_hdl, 0);
+ mdata->ao_bw_uc_idx = 0;
+ }
+ pm_runtime_mark_last_busy(&mdata->pdev->dev);
+ pm_runtime_put_autosuspend(&mdata->pdev->dev);
+ } else {
+ pm_runtime_get_sync(&mdata->pdev->dev);
+ msm_bus_scale_client_update_request(
+ mdata->bus_hdl, mdata->curr_bw_uc_idx);
+ }
+ }
+
+ mutex_unlock(&mdata->bus_lock);
+}
+EXPORT_SYMBOL(mdss_bus_bandwidth_ctrl);
+
+void mdss_mdp_clk_ctrl(int enable)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ static int mdp_clk_cnt;
+ unsigned long flags;
+ int changed = 0;
+ int rc = 0;
+
+ mutex_lock(&mdp_clk_lock);
+ if (enable) {
+ if (mdp_clk_cnt == 0)
+ changed++;
+ mdp_clk_cnt++;
+ } else {
+ if (mdp_clk_cnt) {
+ mdp_clk_cnt--;
+ if (mdp_clk_cnt == 0)
+ changed++;
+ } else {
+ pr_err("Can not be turned off\n");
+ }
+ }
+
+ if (changed)
+ MDSS_XLOG(mdp_clk_cnt, enable, current->pid);
+
+ pr_debug("%pS: task:%s clk_cnt=%d changed=%d enable=%d\n",
+ __builtin_return_address(0), current->group_leader->comm,
+ mdata->bus_ref_cnt, changed, enable);
+
+ if (changed) {
+ if (enable) {
+ pm_runtime_get_sync(&mdata->pdev->dev);
+
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_LOW);
+
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE((unsigned long)rc))
+ pr_err("IOMMU attach failed\n");
+
+ /* Active+Sleep */
+ msm_bus_scale_client_update_context(mdata->bus_hdl,
+ false, mdata->curr_bw_uc_idx);
+ }
+
+ spin_lock_irqsave(&mdp_lock, flags);
+ mdata->clk_ena = enable;
+ spin_unlock_irqrestore(&mdp_lock, flags);
+
+ mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
+ mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
+ if (mdata->vsync_ena)
+ mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
+
+ if (!enable) {
+ /* release iommu control */
+ mdss_iommu_ctrl(0);
+
+ /* Active-Only */
+ msm_bus_scale_client_update_context(mdata->bus_hdl,
+ true, mdata->ao_bw_uc_idx);
+
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+
+ pm_runtime_mark_last_busy(&mdata->pdev->dev);
+ pm_runtime_put_autosuspend(&mdata->pdev->dev);
+ }
+ }
+
+ if (enable && changed)
+ mdss_mdp_idle_pc_restore();
+
+ mutex_unlock(&mdp_clk_lock);
+}
+
+static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
+ char *clk_name, int clk_idx)
+{
+ struct clk *tmp;
+
+ if (clk_idx >= MDSS_MAX_CLK) {
+ pr_err("invalid clk index %d\n", clk_idx);
+ return -EINVAL;
+ }
+
+ tmp = devm_clk_get(&mdata->pdev->dev, clk_name);
+ if (IS_ERR(tmp)) {
+ pr_err("unable to get clk: %s\n", clk_name);
+ return PTR_ERR(tmp);
+ }
+
+ mdata->mdp_clk[clk_idx] = tmp;
+ return 0;
+}
+
+#define SEC_DEVICE_MDSS 1
+
+static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
+{
+ int ret, scm_ret = 0;
+
+ if (test_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map))
+ return;
+
+ pr_debug("restoring mdss secure config\n");
+
+ __mdss_mdp_reg_access_clk_enable(mdata, true);
+
+ ret = scm_restore_sec_cfg(SEC_DEVICE_MDSS, 0, &scm_ret);
+ if (ret || scm_ret)
+ pr_warn("scm_restore_sec_cfg failed %d %d\n",
+ ret, scm_ret);
+
+ __mdss_mdp_reg_access_clk_enable(mdata, false);
+}
+
+static int mdss_mdp_gdsc_notifier_call(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ struct mdss_data_type *mdata;
+
+ mdata = container_of(self, struct mdss_data_type, gdsc_cb);
+
+ if (event & REGULATOR_EVENT_ENABLE) {
+ /*
+ * As SMMU in low tier targets is not power collapsible,
+ * hence we don't need to restore sec configuration.
+ */
+ if (!mdss_mdp_req_init_restore_cfg(mdata))
+ __mdss_restore_sec_cfg(mdata);
+ } else if (event & REGULATOR_EVENT_PRE_DISABLE) {
+ pr_debug("mdss gdsc is getting disabled\n");
+ /* halt the vbif transactions */
+ mdss_mdp_vbif_axi_halt(mdata);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
+{
+ int ret;
+
+ ret = of_property_read_u32(mdata->pdev->dev.of_node,
+ "qcom,max-clk-rate", &mdata->max_mdp_clk_rate);
+ if (ret) {
+ pr_err("failed to get max mdp clock rate\n");
+ return ret;
+ }
+
+ pr_debug("max mdp clk rate=%d\n", mdata->max_mdp_clk_rate);
+
+ ret = devm_request_irq(&mdata->pdev->dev, mdss_mdp_hw.irq_info->irq,
+ mdss_irq_handler, 0, "MDSS", mdata);
+ if (ret) {
+ pr_err("mdp request_irq() failed!\n");
+ return ret;
+ }
+ disable_irq(mdss_mdp_hw.irq_info->irq);
+
+ mdata->fs = devm_regulator_get(&mdata->pdev->dev, "vdd");
+ if (IS_ERR_OR_NULL(mdata->fs)) {
+ mdata->fs = NULL;
+ pr_err("unable to get gdsc regulator\n");
+ return -EINVAL;
+ }
+
+ mdata->venus = devm_regulator_get_optional(&mdata->pdev->dev,
+ "gdsc-venus");
+ if (IS_ERR_OR_NULL(mdata->venus)) {
+ mdata->venus = NULL;
+ pr_debug("unable to get venus gdsc regulator\n");
+ }
+
+ mdata->fs_ena = false;
+
+ mdata->gdsc_cb.notifier_call = mdss_mdp_gdsc_notifier_call;
+ mdata->gdsc_cb.priority = 5;
+ if (regulator_register_notifier(mdata->fs, &(mdata->gdsc_cb)))
+ pr_warn("GDSC notification registration failed!\n");
+ else
+ mdata->regulator_notif_register = true;
+
+ mdata->vdd_cx = devm_regulator_get_optional(&mdata->pdev->dev,
+ "vdd-cx");
+ if (IS_ERR_OR_NULL(mdata->vdd_cx)) {
+ pr_debug("unable to get CX reg. rc=%d\n",
+ PTR_RET(mdata->vdd_cx));
+ mdata->vdd_cx = NULL;
+ }
+
+ mdata->reg_bus_clt = mdss_reg_bus_vote_client_create("mdp\0");
+ if (IS_ERR(mdata->reg_bus_clt)) {
+ pr_err("bus client register failed\n");
+ return PTR_ERR(mdata->reg_bus_clt);
+ }
+
+ if (mdss_mdp_irq_clk_register(mdata, "bus_clk", MDSS_CLK_AXI) ||
+ mdss_mdp_irq_clk_register(mdata, "iface_clk", MDSS_CLK_AHB) ||
+ mdss_mdp_irq_clk_register(mdata, "core_clk",
+ MDSS_CLK_MDP_CORE))
+ return -EINVAL;
+
+ /* lut_clk is not present on all MDSS revisions */
+ mdss_mdp_irq_clk_register(mdata, "lut_clk", MDSS_CLK_MDP_LUT);
+
+ /* vsync_clk is optional for non-smart panels */
+ mdss_mdp_irq_clk_register(mdata, "vsync_clk", MDSS_CLK_MDP_VSYNC);
+
+ /* Setting the default clock rate to the max supported.*/
+ mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate);
+ pr_debug("mdp clk rate=%ld\n",
+ mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false));
+
+ return 0;
+}
+
+static void mdss_debug_enable_clock(int on)
+{
+ if (on)
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ else
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+static int mdss_mdp_debug_init(struct platform_device *pdev,
+ struct mdss_data_type *mdata)
+{
+ int rc;
+ struct mdss_debug_base *dbg_blk;
+
+ mdata->debug_inf.debug_enable_clock = mdss_debug_enable_clock;
+
+ rc = mdss_debugfs_init(mdata);
+ if (rc)
+ return rc;
+
+ rc = mdss_mdp_debugfs_init(mdata);
+ if (rc) {
+ mdss_debugfs_remove(mdata);
+ return rc;
+ }
+
+ mdss_debug_register_io("mdp", &mdata->mdss_io, &dbg_blk);
+ mdss_debug_register_dump_range(pdev, dbg_blk, "qcom,regs-dump-mdp",
+ "qcom,regs-dump-names-mdp", "qcom,regs-dump-xin-id-mdp");
+
+ if (mdata->vbif_io.base)
+ mdss_debug_register_io("vbif", &mdata->vbif_io, NULL);
+ if (mdata->vbif_nrt_io.base)
+ mdss_debug_register_io("vbif_nrt", &mdata->vbif_nrt_io, NULL);
+
+ return 0;
+}
+
+static u32 mdss_get_props(void)
+{
+ u32 props = 0;
+ void __iomem *props_base = ioremap(0xFC4B8114, 4);
+
+ if (props_base) {
+ props = readl_relaxed(props_base);
+ iounmap(props_base);
+ }
+ return props;
+}
+
+void mdss_mdp_init_default_prefill_factors(struct mdss_data_type *mdata)
+{
+ mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor = 8;
+ mdata->prefill_data.prefill_factors.fmt_mt_factor = 4;
+ mdata->prefill_data.prefill_factors.fmt_linear_factor = 1;
+ mdata->prefill_data.prefill_factors.scale_factor = 1;
+ mdata->prefill_data.prefill_factors.xtra_ff_factor = 2;
+
+ if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
+ mdata->prefill_data.ts_threshold = 25;
+ mdata->prefill_data.ts_end = 8;
+ mdata->prefill_data.ts_rate.numer = 1;
+ mdata->prefill_data.ts_rate.denom = 4;
+ mdata->prefill_data.ts_overhead = 2;
+ }
+}
+
+static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
+{
+
+ mdata->per_pipe_ib_factor.numer = 0;
+ mdata->per_pipe_ib_factor.denom = 0;
+ mdata->apply_post_scale_bytes = true;
+ mdata->hflip_buffer_reused = true;
+ /* prevent disable of prefill calculations */
+ mdata->min_prefill_lines = 0xffff;
+ /* clock gating feature is disabled by default */
+ mdata->enable_gate = false;
+ mdata->pixel_ram_size = 0;
+ mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_FLAT;
+
+ mdss_mdp_hw_rev_debug_caps_init(mdata);
+
+ switch (mdata->mdp_rev) {
+ case MDSS_MDP_HW_REV_107:
+ mdss_set_quirk(mdata, MDSS_QUIRK_ROTCDP);
+ case MDSS_MDP_HW_REV_107_1:
+ mdss_mdp_format_flag_removal(invalid_mdp107_wb_output_fmts,
+ ARRAY_SIZE(invalid_mdp107_wb_output_fmts),
+ VALID_MDP_WB_INTF_FORMAT);
+ /* fall-through */
+ case MDSS_MDP_HW_REV_107_2:
+ mdata->max_target_zorder = 7; /* excluding base layer */
+ mdata->max_cursor_size = 128;
+ mdata->per_pipe_ib_factor.numer = 8;
+ mdata->per_pipe_ib_factor.denom = 5;
+ mdata->apply_post_scale_bytes = false;
+ mdata->hflip_buffer_reused = false;
+ mdata->min_prefill_lines = 21;
+ mdata->has_ubwc = true;
+ mdata->pixel_ram_size = 50 * 1024;
+ set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
+ set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
+ mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
+ mdata->mdss_caps_map);
+ mdss_mdp_init_default_prefill_factors(mdata);
+ mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
+ mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
+ mdss_set_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED);
+ break;
+ case MDSS_MDP_HW_REV_105:
+ case MDSS_MDP_HW_REV_109:
+ mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC);
+ mdata->max_target_zorder = 7; /* excluding base layer */
+ mdata->max_cursor_size = 128;
+ set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+ set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
+ mdata->mdss_caps_map);
+ break;
+ case MDSS_MDP_HW_REV_110:
+ mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC);
+ mdata->max_target_zorder = 4; /* excluding base layer */
+ mdata->max_cursor_size = 128;
+ set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+ mdata->min_prefill_lines = 12;
+ mdata->props = mdss_get_props();
+ break;
+ case MDSS_MDP_HW_REV_112:
+ mdata->max_target_zorder = 4; /* excluding base layer */
+ mdata->max_cursor_size = 64;
+ mdata->min_prefill_lines = 12;
+ set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+ break;
+ case MDSS_MDP_HW_REV_114:
+ /* disable ECG for 28nm PHY platform */
+ mdata->enable_gate = false;
+ case MDSS_MDP_HW_REV_116:
+ mdata->max_target_zorder = 4; /* excluding base layer */
+ mdata->max_cursor_size = 128;
+ mdata->min_prefill_lines = 14;
+ mdata->has_ubwc = true;
+ mdata->pixel_ram_size = 40 * 1024;
+ mdata->apply_post_scale_bytes = false;
+ mdata->hflip_buffer_reused = false;
+ mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
+ set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
+ set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
+ mdss_mdp_init_default_prefill_factors(mdata);
+ set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+ mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
+ mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
+ break;
+ case MDSS_MDP_HW_REV_115:
+ mdata->max_target_zorder = 4; /* excluding base layer */
+ mdata->max_cursor_size = 128;
+ mdata->min_prefill_lines = 14;
+ mdata->has_ubwc = false;
+ mdata->pixel_ram_size = 16 * 1024;
+ mdata->apply_post_scale_bytes = false;
+ mdata->hflip_buffer_reused = false;
+ /* disable ECG for 28nm PHY platform */
+ mdata->enable_gate = false;
+ mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
+ set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
+ set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_MIXER_1_FOR_WB, mdata->mdss_caps_map);
+ mdss_mdp_init_default_prefill_factors(mdata);
+ set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+ mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
+ mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
+ break;
+ case MDSS_MDP_HW_REV_300:
+ case MDSS_MDP_HW_REV_301:
+ mdata->max_target_zorder = 7; /* excluding base layer */
+ mdata->max_cursor_size = 384;
+ mdata->per_pipe_ib_factor.numer = 8;
+ mdata->per_pipe_ib_factor.denom = 5;
+ mdata->apply_post_scale_bytes = false;
+ mdata->hflip_buffer_reused = false;
+ mdata->min_prefill_lines = 25;
+ mdata->has_ubwc = true;
+ mdata->pixel_ram_size = 50 * 1024;
+ mdata->rects_per_sspp[MDSS_MDP_PIPE_TYPE_DMA] = 2;
+
+ set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_IB_NOCR, mdata->mdss_qos_map);
+ set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
+ mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
+ mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map);
+ mdss_mdp_init_default_prefill_factors(mdata);
+ mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
+ mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
+ mdss_set_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
+ mdata->has_wb_ubwc = true;
+ set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
+ break;
+ default:
+ mdata->max_target_zorder = 4; /* excluding base layer */
+ mdata->max_cursor_size = 64;
+ }
+
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_103)
+ mdss_set_quirk(mdata, MDSS_QUIRK_DOWNSCALE_HANG);
+
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 ||
+ mdata->mdp_rev == MDSS_MDP_HW_REV_200)
+ mdss_set_quirk(mdata, MDSS_QUIRK_FMT_PACK_PATTERN);
+}
+
+static void mdss_hw_rev_init(struct mdss_data_type *mdata)
+{
+ if (mdata->mdp_rev)
+ return;
+
+ mdata->mdp_rev = MDSS_REG_READ(mdata, MDSS_REG_HW_VERSION);
+ mdss_mdp_hw_rev_caps_init(mdata);
+}
+
+/**
+ * mdss_hw_init() - Initialize MDSS target specific register settings
+ * @mdata: MDP private data
+ *
+ * Initialize basic MDSS hardware settings based on the board specific
+ * parameters. This function does not explicitly turn on the MDP clocks
+ * and so it must be called with the MDP clocks already enabled.
+ */
+void mdss_hw_init(struct mdss_data_type *mdata)
+{
+ struct mdss_mdp_pipe *vig;
+
+ mdss_hw_rev_init(mdata);
+
+ /* Disable hw underrun recovery only for older mdp reversions. */
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_105)
+ writel_relaxed(0x0, mdata->mdp_base +
+ MDSS_MDP_REG_VIDEO_INTF_UNDERFLOW_CTL);
+
+ if (mdata->hw_settings) {
+ struct mdss_hw_settings *hws = mdata->hw_settings;
+
+ while (hws->reg) {
+ writel_relaxed(hws->val, hws->reg);
+ hws++;
+ }
+ }
+
+ vig = mdata->vig_pipes;
+
+ mdata->nmax_concurrent_ad_hw =
+ (mdata->mdp_rev < MDSS_MDP_HW_REV_103) ? 1 : 2;
+
+ pr_debug("MDP hw init done\n");
+}
+
+static u32 mdss_mdp_res_init(struct mdss_data_type *mdata)
+{
+ u32 rc = 0;
+
+ if (mdata->res_init) {
+ pr_err("mdss resources already initialized\n");
+ return -EPERM;
+ }
+
+ mdata->res_init = true;
+ mdata->clk_ena = false;
+ mdss_mdp_hw.irq_info->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
+ mdss_mdp_hw.irq_info->irq_ena = false;
+
+ rc = mdss_mdp_irq_clk_setup(mdata);
+ if (rc)
+ return rc;
+
+ mdata->hist_intr.req = 0;
+ mdata->hist_intr.curr = 0;
+ mdata->hist_intr.state = 0;
+ spin_lock_init(&mdata->hist_intr.lock);
+
+ mdata->iclient = msm_ion_client_create(mdata->pdev->name);
+ if (IS_ERR_OR_NULL(mdata->iclient)) {
+ pr_err("msm_ion_client_create() return error (%pK)\n",
+ mdata->iclient);
+ mdata->iclient = NULL;
+ }
+
+ return rc;
+}
+
+static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata,
+ struct device *dev)
+{
+ int ret;
+ struct device_node *node;
+ u32 prop_val;
+
+ if (!dev)
+ return -EPERM;
+
+ node = of_get_child_by_name(dev->of_node, "qcom,mdss-scaler-offsets");
+ if (!node)
+ return 0;
+
+ if (mdata->scaler_off)
+ return -EFAULT;
+
+ mdata->scaler_off = devm_kzalloc(&mdata->pdev->dev,
+ sizeof(*mdata->scaler_off), GFP_KERNEL);
+ if (!mdata->scaler_off)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(node,
+ "qcom,mdss-vig-scaler-off",
+ &prop_val);
+ if (ret) {
+ pr_err("read property %s failed ret %d\n",
+ "qcom,mdss-vig-scaler-off", ret);
+ return -EINVAL;
+ }
+ mdata->scaler_off->vig_scaler_off = prop_val;
+ ret = of_property_read_u32(node,
+ "qcom,mdss-vig-scaler-lut-off",
+ &prop_val);
+ if (ret) {
+ pr_err("read property %s failed ret %d\n",
+ "qcom,mdss-vig-scaler-lut-off", ret);
+ return -EINVAL;
+ }
+ mdata->scaler_off->vig_scaler_lut_off = prop_val;
+ mdata->scaler_off->has_dest_scaler =
+ of_property_read_bool(mdata->pdev->dev.of_node,
+ "qcom,mdss-has-dest-scaler");
+ if (mdata->scaler_off->has_dest_scaler) {
+ ret = of_property_read_u32(node,
+ "qcom,mdss-dest-block-off",
+ &prop_val);
+ if (ret) {
+ pr_err("read property %s failed ret %d\n",
+ "qcom,mdss-dest-block-off", ret);
+ return -EINVAL;
+ }
+ mdata->scaler_off->dest_base = mdata->mdss_io.base +
+ prop_val;
+ mdata->scaler_off->ndest_scalers =
+ mdss_mdp_parse_dt_prop_len(mdata->pdev,
+ "qcom,mdss-dest-scalers-off");
+ mdata->scaler_off->dest_scaler_off =
+ devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
+ mdata->scaler_off->ndest_scalers,
+ GFP_KERNEL);
+ if (!mdata->scaler_off->dest_scaler_off) {
+ kfree(mdata->scaler_off->dest_scaler_off);
+ return -ENOMEM;
+ }
+ ret = mdss_mdp_parse_dt_handler(mdata->pdev,
+ "qcom,mdss-dest-scaler-off",
+ mdata->scaler_off->dest_scaler_off,
+ mdata->scaler_off->ndest_scalers);
+ if (ret)
+ return -EINVAL;
+ mdata->scaler_off->dest_scaler_lut_off =
+ devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
+ mdata->scaler_off->ndest_scalers,
+ GFP_KERNEL);
+ if (!mdata->scaler_off->dest_scaler_lut_off) {
+ kfree(mdata->scaler_off->dest_scaler_lut_off);
+ return -ENOMEM;
+ }
+ ret = mdss_mdp_parse_dt_handler(mdata->pdev,
+ "qcom,mdss-dest-scalers-lut-off",
+ mdata->scaler_off->dest_scaler_lut_off,
+ mdata->scaler_off->ndest_scalers);
+ if (ret)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * mdss_mdp_footswitch_ctrl_splash() - clocks handoff for cont. splash screen
+ * @on: 1 to start handoff, 0 to complete the handoff after first frame update
+ *
+ * MDSS Clocks and GDSC are already on during continuous splash screen, but
+ * increasing ref count will keep clocks from being turned off until handoff
+ * has properly happened after frame update.
+ */
+void mdss_mdp_footswitch_ctrl_splash(int on)
+{
+ int ret;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata != NULL) {
+ if (on) {
+ mdata->handoff_pending = true;
+ pr_debug("Enable MDP FS for splash.\n");
+ if (mdata->venus) {
+ ret = regulator_enable(mdata->venus);
+ if (ret)
+ pr_err("venus failed to enable\n");
+ }
+
+ ret = regulator_enable(mdata->fs);
+ if (ret)
+ pr_err("Footswitch failed to enable\n");
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mdss_bus_bandwidth_ctrl(true);
+ } else {
+ pr_debug("Disable MDP FS for splash.\n");
+ mdss_bus_bandwidth_ctrl(false);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ regulator_disable(mdata->fs);
+ if (mdata->venus)
+ regulator_disable(mdata->venus);
+ mdata->handoff_pending = false;
+ }
+ } else {
+ pr_warn("mdss mdata not initialized\n");
+ }
+}
+
+static int mdss_mdp_get_pan_intf(const char *pan_intf)
+{
+ int i, rc = MDSS_PANEL_INTF_INVALID;
+
+ if (!pan_intf)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(pan_types); i++) {
+ if (!strcmp(pan_intf, pan_types[i].name)) {
+ rc = pan_types[i].type;
+ break;
+ }
+ }
+ return rc;
+}
+
+static int mdss_mdp_get_pan_cfg(struct mdss_panel_cfg *pan_cfg)
+{
+ char *t = NULL;
+ char pan_intf_str[MDSS_MAX_PANEL_LEN];
+ int rc, i, panel_len;
+ char pan_name[MDSS_MAX_PANEL_LEN] = {'\0'};
+
+ if (!pan_cfg)
+ return -EINVAL;
+
+ if (mdss_mdp_panel[0] == '0') {
+ pr_debug("panel name is not set\n");
+ pan_cfg->lk_cfg = false;
+ pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+ return -EINVAL;
+ } else if (mdss_mdp_panel[0] == '1') {
+ pan_cfg->lk_cfg = true;
+ } else {
+ /* read from dt */
+ pan_cfg->lk_cfg = true;
+ pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+ return -EINVAL;
+ }
+
+ /* skip lk cfg and delimiter; ex: "1:" */
+ strlcpy(pan_name, &mdss_mdp_panel[2], MDSS_MAX_PANEL_LEN);
+ t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN);
+ if (!t) {
+ pr_err("pan_name=[%s] invalid\n", pan_name);
+ pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+ return -EINVAL;
+ }
+
+ for (i = 0; ((pan_name + i) < t) && (i < 4); i++)
+ pan_intf_str[i] = *(pan_name + i);
+ pan_intf_str[i] = 0;
+ pr_debug("%d panel intf %s\n", __LINE__, pan_intf_str);
+ /* point to the start of panel name */
+ t = t + 1;
+ strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg));
+ pr_debug("%d: t=[%s] panel name=[%s]\n", __LINE__,
+ t, pan_cfg->arg_cfg);
+
+ panel_len = strlen(pan_cfg->arg_cfg);
+ if (!panel_len) {
+ pr_err("Panel name is invalid\n");
+ pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+ return -EINVAL;
+ }
+
+ rc = mdss_mdp_get_pan_intf(pan_intf_str);
+ pan_cfg->pan_intf = (rc < 0) ? MDSS_PANEL_INTF_INVALID : rc;
+ return 0;
+}
+
+static int mdss_mdp_parse_dt_pan_intf(struct platform_device *pdev)
+{
+ int rc;
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ const char *prim_intf = NULL;
+
+ rc = of_property_read_string(pdev->dev.of_node,
+ "qcom,mdss-pref-prim-intf", &prim_intf);
+ if (rc)
+ return -ENODEV;
+
+ rc = mdss_mdp_get_pan_intf(prim_intf);
+ if (rc < 0) {
+ mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID;
+ } else {
+ mdata->pan_cfg.pan_intf = rc;
+ rc = 0;
+ }
+ return rc;
+}
+
+static int mdss_mdp_get_cmdline_config(struct platform_device *pdev)
+{
+ int rc, len = 0;
+ int *intf_type;
+ char *panel_name;
+ struct mdss_panel_cfg *pan_cfg;
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0;
+ pan_cfg = &mdata->pan_cfg;
+ panel_name = &pan_cfg->arg_cfg[0];
+ intf_type = &pan_cfg->pan_intf;
+
+ /* reads from dt by default */
+ pan_cfg->lk_cfg = true;
+
+ len = strlen(mdss_mdp_panel);
+
+ if (len > 0) {
+ rc = mdss_mdp_get_pan_cfg(pan_cfg);
+ if (!rc) {
+ pan_cfg->init_done = true;
+ return rc;
+ }
+ }
+
+ rc = mdss_mdp_parse_dt_pan_intf(pdev);
+ /* if pref pan intf is not present */
+ if (rc)
+ pr_warn("unable to parse device tree for pan intf\n");
+
+ pan_cfg->init_done = true;
+
+ return 0;
+}
+
+static void __update_sspp_info(struct mdss_mdp_pipe *pipe,
+ int pipe_cnt, char *type, char *buf, int *cnt)
+{
+ int i;
+ int j;
+ size_t len = PAGE_SIZE;
+ int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
+
+#define SPRINT(fmt, ...) \
+ (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
+
+ for (i = 0; i < pipe_cnt && pipe; i++) {
+ SPRINT("pipe_num:%d pipe_type:%s pipe_ndx:%d rects:%d pipe_is_handoff:%d display_id:%d ",
+ pipe->num, type, pipe->ndx, pipe->multirect.max_rects,
+ pipe->is_handed_off, mdss_mdp_get_display_id(pipe));
+ SPRINT("fmts_supported:");
+ for (j = 0; j < num_bytes; j++)
+ SPRINT("%d,", pipe->supported_formats[j]);
+ SPRINT("\n");
+ pipe += pipe->multirect.max_rects;
+ }
+#undef SPRINT
+}
+
+static void mdss_mdp_update_sspp_info(struct mdss_data_type *mdata,
+ char *buf, int *cnt)
+{
+ __update_sspp_info(mdata->vig_pipes, mdata->nvig_pipes,
+ "vig", buf, cnt);
+ __update_sspp_info(mdata->rgb_pipes, mdata->nrgb_pipes,
+ "rgb", buf, cnt);
+ __update_sspp_info(mdata->dma_pipes, mdata->ndma_pipes,
+ "dma", buf, cnt);
+ __update_sspp_info(mdata->cursor_pipes, mdata->ncursor_pipes,
+ "cursor", buf, cnt);
+}
+
+static void mdss_mdp_update_wb_info(struct mdss_data_type *mdata,
+ char *buf, int *cnt)
+{
+#define SPRINT(fmt, ...) \
+ (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
+ size_t len = PAGE_SIZE;
+ int i;
+ int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
+
+ SPRINT("rot_input_fmts=");
+ for (i = 0; i < num_bytes && mdata->wb; i++)
+ SPRINT("%d ", mdata->wb->supported_input_formats[i]);
+ SPRINT("\nrot_output_fmts=");
+ for (i = 0; i < num_bytes && mdata->wb; i++)
+ SPRINT("%d ", mdata->wb->supported_input_formats[i]);
+ SPRINT("\nwb_output_fmts=");
+ for (i = 0; i < num_bytes && mdata->wb; i++)
+ SPRINT("%d ", mdata->wb->supported_output_formats[i]);
+ SPRINT("\n");
+#undef SPRINT
+}
+
+ssize_t mdss_mdp_show_capabilities(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mdss_data_type *mdata = dev_get_drvdata(dev);
+ size_t len = PAGE_SIZE;
+ int cnt = 0;
+
+#define SPRINT(fmt, ...) \
+ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+ SPRINT("mdp_version=5\n");
+ SPRINT("hw_rev=%d\n", mdata->mdp_rev);
+ SPRINT("pipe_count:%d\n", mdata->nvig_pipes + mdata->nrgb_pipes +
+ mdata->ndma_pipes + mdata->ncursor_pipes);
+ mdss_mdp_update_sspp_info(mdata, buf, &cnt);
+ mdss_mdp_update_wb_info(mdata, buf, &cnt);
+ /* TODO : need to remove num pipes info */
+ SPRINT("rgb_pipes=%d\n", mdata->nrgb_pipes);
+ SPRINT("vig_pipes=%d\n", mdata->nvig_pipes);
+ SPRINT("dma_pipes=%d\n", mdata->ndma_pipes);
+ SPRINT("blending_stages=%d\n", mdata->max_target_zorder);
+ SPRINT("cursor_pipes=%d\n", mdata->ncursor_pipes);
+ SPRINT("max_cursor_size=%d\n", mdata->max_cursor_size);
+ SPRINT("smp_count=%d\n", mdata->smp_mb_cnt);
+ SPRINT("smp_size=%d\n", mdata->smp_mb_size);
+ SPRINT("smp_mb_per_pipe=%d\n", mdata->smp_mb_per_pipe);
+ SPRINT("max_downscale_ratio=%d\n", MAX_DOWNSCALE_RATIO);
+ SPRINT("max_upscale_ratio=%d\n", MAX_UPSCALE_RATIO);
+
+ if (mdata->nwb)
+ SPRINT("wb_intf_index=%d\n", mdata->nwb - 1);
+
+ if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
+ SPRINT("fmt_mt_nv12_factor=%d\n",
+ mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor);
+ SPRINT("fmt_mt_factor=%d\n",
+ mdata->prefill_data.prefill_factors.fmt_mt_factor);
+ SPRINT("fmt_linear_factor=%d\n",
+ mdata->prefill_data.prefill_factors.fmt_linear_factor);
+ SPRINT("scale_factor=%d\n",
+ mdata->prefill_data.prefill_factors.scale_factor);
+ SPRINT("xtra_ff_factor=%d\n",
+ mdata->prefill_data.prefill_factors.xtra_ff_factor);
+ }
+
+ if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
+ SPRINT("amortizable_threshold=%d\n",
+ mdata->prefill_data.ts_threshold);
+ SPRINT("system_overhead_lines=%d\n",
+ mdata->prefill_data.ts_overhead);
+ }
+
+ if (mdata->props)
+ SPRINT("props=%d\n", mdata->props);
+ if (mdata->max_bw_low)
+ SPRINT("max_bandwidth_low=%u\n", mdata->max_bw_low);
+ if (mdata->max_bw_high)
+ SPRINT("max_bandwidth_high=%u\n", mdata->max_bw_high);
+ if (mdata->max_pipe_width)
+ SPRINT("max_pipe_width=%d\n", mdata->max_pipe_width);
+ if (mdata->max_mixer_width)
+ SPRINT("max_mixer_width=%d\n", mdata->max_mixer_width);
+ if (mdata->max_bw_per_pipe)
+ SPRINT("max_pipe_bw=%u\n", mdata->max_bw_per_pipe);
+ if (mdata->max_mdp_clk_rate)
+ SPRINT("max_mdp_clk=%u\n", mdata->max_mdp_clk_rate);
+ if (mdata->clk_factor.numer)
+ SPRINT("clk_fudge_factor=%u,%u\n", mdata->clk_factor.numer,
+ mdata->clk_factor.denom);
+ if (mdata->has_rot_dwnscale) {
+ if (mdata->rot_dwnscale_min)
+ SPRINT("rot_dwnscale_min=%u\n",
+ mdata->rot_dwnscale_min);
+ if (mdata->rot_dwnscale_max)
+ SPRINT("rot_dwnscale_max=%u\n",
+ mdata->rot_dwnscale_max);
+ }
+ SPRINT("features=");
+ if (mdata->has_bwc)
+ SPRINT(" bwc");
+ if (mdata->has_ubwc)
+ SPRINT(" ubwc");
+ if (mdata->has_wb_ubwc)
+ SPRINT(" wb_ubwc");
+ if (mdata->has_decimation)
+ SPRINT(" decimation");
+ if (mdata->highest_bank_bit && !mdss_mdp_is_ubwc_supported(mdata))
+ SPRINT(" tile_format");
+ if (mdata->has_non_scalar_rgb)
+ SPRINT(" non_scalar_rgb");
+ if (mdata->has_src_split)
+ SPRINT(" src_split");
+ if (mdata->has_rot_dwnscale)
+ SPRINT(" rotator_downscale");
+ if (mdata->max_bw_settings_cnt)
+ SPRINT(" dynamic_bw_limit");
+ if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
+ SPRINT(" qseed3");
+ if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map))
+ SPRINT(" dest_scaler");
+ if (mdata->has_separate_rotator)
+ SPRINT(" separate_rotator");
+ if (mdss_has_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED))
+ SPRINT(" hdr");
+ SPRINT("\n");
+#undef SPRINT
+
+ return cnt;
+}
+
+static ssize_t mdss_mdp_read_max_limit_bw(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mdss_data_type *mdata = dev_get_drvdata(dev);
+ size_t len = PAGE_SIZE;
+ u32 cnt = 0;
+ int i;
+
+ char bw_names[4][8] = {"default", "camera", "hflip", "vflip"};
+ char pipe_bw_names[4][16] = {"default_pipe", "camera_pipe",
+ "hflip_pipe", "vflip_pipe"};
+ struct mdss_max_bw_settings *bw_settings;
+ struct mdss_max_bw_settings *pipe_bw_settings;
+
+ bw_settings = mdata->max_bw_settings;
+ pipe_bw_settings = mdata->max_per_pipe_bw_settings;
+
+#define SPRINT(fmt, ...) \
+ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+ SPRINT("bw_mode_bitmap=%d\n", mdata->bw_mode_bitmap);
+ SPRINT("bw_limit_pending=%d\n", mdata->bw_limit_pending);
+
+ for (i = 0; i < mdata->max_bw_settings_cnt; i++) {
+ SPRINT("%s=%d\n", bw_names[i], bw_settings->mdss_max_bw_val);
+ bw_settings++;
+ }
+
+ for (i = 0; i < mdata->mdss_per_pipe_bw_cnt; i++) {
+ SPRINT("%s=%d\n", pipe_bw_names[i],
+ pipe_bw_settings->mdss_max_bw_val);
+ pipe_bw_settings++;
+ }
+
+ return cnt;
+}
+
+static ssize_t mdss_mdp_store_max_limit_bw(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct mdss_data_type *mdata = dev_get_drvdata(dev);
+ u32 data = 0;
+
+ if (kstrtouint(buf, 0, &data)) {
+ pr_info("Not able scan to bw_mode_bitmap\n");
+ } else {
+ mdata->bw_mode_bitmap = data;
+ mdata->bw_limit_pending = true;
+ pr_debug("limit use case, bw_mode_bitmap = %d\n", data);
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(caps, 0444, mdss_mdp_show_capabilities, NULL);
+static DEVICE_ATTR(bw_mode_bitmap, 0664,
+ mdss_mdp_read_max_limit_bw, mdss_mdp_store_max_limit_bw);
+
+static struct attribute *mdp_fs_attrs[] = {
+ &dev_attr_caps.attr,
+ &dev_attr_bw_mode_bitmap.attr,
+ NULL
+};
+
+static struct attribute_group mdp_fs_attr_group = {
+ .attrs = mdp_fs_attrs
+};
+
+static int mdss_mdp_register_sysfs(struct mdss_data_type *mdata)
+{
+ struct device *dev = &mdata->pdev->dev;
+ int rc;
+
+ rc = sysfs_create_group(&dev->kobj, &mdp_fs_attr_group);
+
+ return rc;
+}
+
+int mdss_panel_get_intf_status(u32 disp_num, u32 intf_type)
+{
+ int rc, intf_status = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdss_res || !mdss_res->pan_cfg.init_done)
+ return -EPROBE_DEFER;
+
+ if (mdss_res->handoff_pending) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ intf_status = readl_relaxed(mdata->mdp_base +
+ MDSS_MDP_REG_DISP_INTF_SEL);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ if (intf_type == MDSS_PANEL_INTF_DSI) {
+ if (disp_num == DISPLAY_1)
+ rc = (intf_status & MDSS_MDP_INTF_DSI0_SEL);
+ else if (disp_num == DISPLAY_2)
+ rc = (intf_status & MDSS_MDP_INTF_DSI1_SEL);
+ else
+ rc = 0;
+ } else if (intf_type == MDSS_PANEL_INTF_EDP) {
+ intf_status &= MDSS_MDP_INTF_EDP_SEL;
+ rc = (intf_status == MDSS_MDP_INTF_EDP_SEL);
+ } else if (intf_type == MDSS_PANEL_INTF_HDMI) {
+ intf_status &= MDSS_MDP_INTF_HDMI_SEL;
+ rc = (intf_status == MDSS_MDP_INTF_HDMI_SEL);
+ } else {
+ rc = 0;
+ }
+ } else {
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int mdss_mdp_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int rc;
+ struct mdss_data_type *mdata;
+ uint32_t intf_sel = 0;
+ uint32_t split_display = 0;
+ int num_of_display_on = 0;
+ int i = 0;
+
+ if (!pdev->dev.of_node) {
+ pr_err("MDP driver only supports device tree probe\n");
+ return -ENOTSUPP;
+ }
+
+ if (mdss_res) {
+ pr_err("MDP already initialized\n");
+ return -EINVAL;
+ }
+
+ mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
+ if (mdata == NULL)
+ return -ENOMEM;
+
+ pdev->id = 0;
+ mdata->pdev = pdev;
+ platform_set_drvdata(pdev, mdata);
+ mdss_res = mdata;
+ mutex_init(&mdata->reg_lock);
+ mutex_init(&mdata->reg_bus_lock);
+ mutex_init(&mdata->bus_lock);
+ INIT_LIST_HEAD(&mdata->reg_bus_clist);
+ atomic_set(&mdata->sd_client_count, 0);
+ atomic_set(&mdata->active_intf_cnt, 0);
+
+ mdss_res->mdss_util = mdss_get_util_intf();
+ if (mdss_res->mdss_util == NULL) {
+ pr_err("Failed to get mdss utility functions\n");
+ return -ENODEV;
+ }
+
+ mdss_res->mdss_util->get_iommu_domain = mdss_smmu_get_domain_id;
+ mdss_res->mdss_util->iommu_attached = is_mdss_iommu_attached;
+ mdss_res->mdss_util->iommu_ctrl = mdss_iommu_ctrl;
+ mdss_res->mdss_util->bus_scale_set_quota = mdss_bus_scale_set_quota;
+ mdss_res->mdss_util->bus_bandwidth_ctrl = mdss_bus_bandwidth_ctrl;
+ mdss_res->mdss_util->panel_intf_type = mdss_panel_intf_type;
+ mdss_res->mdss_util->panel_intf_status = mdss_panel_get_intf_status;
+
+ rc = msm_mdss_ioremap_byname(pdev, &mdata->mdss_io, "mdp_phys");
+ if (rc) {
+ pr_err("unable to map MDP base\n");
+ goto probe_done;
+ }
+ pr_debug("MDSS HW Base addr=0x%x len=0x%x\n",
+ (int) (unsigned long) mdata->mdss_io.base,
+ mdata->mdss_io.len);
+
+ rc = msm_mdss_ioremap_byname(pdev, &mdata->vbif_io, "vbif_phys");
+ if (rc) {
+ pr_err("unable to map MDSS VBIF base\n");
+ goto probe_done;
+ }
+ pr_debug("MDSS VBIF HW Base addr=0x%x len=0x%x\n",
+ (int) (unsigned long) mdata->vbif_io.base,
+ mdata->vbif_io.len);
+
+ rc = msm_mdss_ioremap_byname(pdev, &mdata->vbif_nrt_io,
+ "vbif_nrt_phys");
+ if (rc)
+ pr_debug("unable to map MDSS VBIF non-realtime base\n");
+ else
+ pr_debug("MDSS VBIF NRT HW Base addr=%pK len=0x%x\n",
+ mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ pr_err("unable to get MDSS irq\n");
+ rc = -ENOMEM;
+ goto probe_done;
+ }
+
+ mdss_mdp_hw.irq_info = kcalloc(1, sizeof(struct irq_info), GFP_KERNEL);
+ if (!mdss_mdp_hw.irq_info)
+ return -ENOMEM;
+
+ mdss_mdp_hw.irq_info->irq = res->start;
+ mdss_mdp_hw.ptr = mdata;
+
+ /* export misc. interrupts to external driver */
+ mdata->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 32,
+ &mdss_irq_domain_ops, mdata);
+ if (!mdata->irq_domain) {
+ pr_err("unable to add linear domain\n");
+ rc = -ENOMEM;
+ goto probe_done;
+ }
+
+ mdss_misc_hw.irq_info = mdss_intr_line();
+ rc = mdss_res->mdss_util->register_irq(&mdss_misc_hw);
+ if (rc)
+ pr_err("mdss_register_irq failed.\n");
+
+ rc = mdss_mdp_res_init(mdata);
+ if (rc) {
+ pr_err("unable to initialize mdss mdp resources\n");
+ goto probe_done;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS);
+ if (mdata->idle_pc_enabled)
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev))
+ mdss_mdp_footswitch_ctrl(mdata, true);
+
+ rc = mdss_mdp_bus_scale_register(mdata);
+ if (rc) {
+ pr_err("unable to register bus scaling\n");
+ goto probe_done;
+ }
+
+ /*
+ * enable clocks and read mdp_rev as soon as possible once
+ * kernel is up.
+ */
+ mdss_mdp_footswitch_ctrl_splash(true);
+ mdss_hw_rev_init(mdata);
+
+ /*populate hw iomem base info from device tree*/
+ rc = mdss_mdp_parse_dt(pdev);
+ if (rc) {
+ pr_err("unable to parse device tree\n");
+ goto probe_done;
+ }
+
+ rc = mdss_mdp_get_cmdline_config(pdev);
+ if (rc) {
+ pr_err("Error in panel override:rc=[%d]\n", rc);
+ goto probe_done;
+ }
+
+ rc = mdss_mdp_debug_init(pdev, mdata);
+ if (rc) {
+ pr_err("unable to initialize mdp debugging\n");
+ goto probe_done;
+ }
+ rc = mdss_mdp_scaler_init(mdata, &pdev->dev);
+ if (rc)
+ goto probe_done;
+
+ rc = mdss_mdp_register_sysfs(mdata);
+ if (rc)
+ pr_err("unable to register mdp sysfs nodes\n");
+
+ rc = mdss_fb_register_mdp_instance(&mdp5);
+ if (rc)
+ pr_err("unable to register mdp instance\n");
+
+ rc = mdss_res->mdss_util->register_irq(&mdss_mdp_hw);
+ if (rc)
+ pr_err("mdss_register_irq failed.\n");
+
+ rc = mdss_smmu_init(mdata, &pdev->dev);
+ if (rc)
+ pr_err("mdss smmu init failed\n");
+
+ mdss_mdp_set_supported_formats(mdata);
+
+ mdss_res->mdss_util->mdp_probe_done = true;
+
+ mdss_hw_init(mdata);
+
+ rc = mdss_mdp_pp_init(&pdev->dev);
+ if (rc)
+ pr_err("unable to initialize mdss pp resources\n");
+
+ /* Restoring Secure configuration during boot-up */
+ if (mdss_mdp_req_init_restore_cfg(mdata))
+ __mdss_restore_sec_cfg(mdata);
+
+ if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC)) {
+ mdata->default_panic_lut0 = readl_relaxed(mdata->mdp_base +
+ MMSS_MDP_PANIC_LUT0);
+ mdata->default_panic_lut1 = readl_relaxed(mdata->mdp_base +
+ MMSS_MDP_PANIC_LUT1);
+ mdata->default_robust_lut = readl_relaxed(mdata->mdp_base +
+ MMSS_MDP_ROBUST_LUT);
+ }
+
+ /*
+ * Read the DISP_INTF_SEL register to check if display was enabled in
+ * bootloader or not. If yes, let handoff handle removing the extra
+ * clk/regulator votes else turn off clk/regulators because purpose
+ * here is to get mdp_rev.
+ */
+ intf_sel = readl_relaxed(mdata->mdp_base +
+ MDSS_MDP_REG_DISP_INTF_SEL);
+ split_display = readl_relaxed(mdata->mdp_base +
+ MDSS_MDP_REG_SPLIT_DISPLAY_EN);
+ mdata->splash_intf_sel = intf_sel;
+ mdata->splash_split_disp = split_display;
+
+ if (intf_sel != 0) {
+ for (i = 0; i < 4; i++)
+ if ((intf_sel >> i*8) & 0x000000FF)
+ num_of_display_on++;
+
+ /*
+ * For split display enabled - DSI0, DSI1 interfaces are
+ * considered as single display. So decrement
+ * 'num_of_display_on' by 1
+ */
+ if (split_display)
+ num_of_display_on--;
+ }
+ if (!num_of_display_on) {
+ mdss_mdp_footswitch_ctrl_splash(false);
+ msm_bus_scale_client_update_request(
+ mdata->bus_hdl, 0);
+ mdata->ao_bw_uc_idx = 0;
+ } else {
+ mdata->handoff_pending = true;
+ /*
+ * If multiple displays are enabled in LK, ctrl_splash off will
+ * be called multiple times during splash_cleanup. Need to
+ * enable it symmetrically
+ */
+ for (i = 1; i < num_of_display_on; i++)
+ mdss_mdp_footswitch_ctrl_splash(true);
+ }
+
+ mdp_intr_cb = kcalloc(ARRAY_SIZE(mdp_irq_map),
+ sizeof(struct intr_callback), GFP_KERNEL);
+ if (mdp_intr_cb == NULL)
+ return -ENOMEM;
+
+ mdss_res->mdp_irq_mask = kcalloc(ARRAY_SIZE(mdp_intr_reg),
+ sizeof(u32), GFP_KERNEL);
+ if (mdss_res->mdp_irq_mask == NULL)
+ return -ENOMEM;
+
+ pr_info("mdss version = 0x%x, bootloader display is %s, num %d, intf_sel=0x%08x\n",
+ mdata->mdp_rev, num_of_display_on ? "on" : "off",
+ num_of_display_on, intf_sel);
+
+probe_done:
+ if (IS_ERR_VALUE((unsigned long)rc)) {
+ if (!num_of_display_on)
+ mdss_mdp_footswitch_ctrl_splash(false);
+
+ if (mdata->regulator_notif_register)
+ regulator_unregister_notifier(mdata->fs,
+ &(mdata->gdsc_cb));
+ mdss_mdp_hw.ptr = NULL;
+ mdss_mdp_pp_term(&pdev->dev);
+ mutex_destroy(&mdata->reg_lock);
+ mdss_res = NULL;
+ }
+
+ return rc;
+}
+
+static void mdss_mdp_parse_dt_regs_array(const u32 *arr,
+ struct mdss_io_data *io, struct mdss_hw_settings *hws, int count)
+{
+ u32 len, reg;
+ int i;
+
+ if (!arr)
+ return;
+
+ for (i = 0, len = count * 2; i < len; i += 2) {
+ reg = be32_to_cpu(arr[i]);
+ if (reg >= io->len)
+ continue;
+
+ hws->reg = io->base + reg;
+ hws->val = be32_to_cpu(arr[i + 1]);
+ pr_debug("reg: 0x%04x=0x%08x\n", reg, hws->val);
+ hws++;
+ }
+}
+
+int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ struct mdss_hw_settings *hws;
+ const u32 *vbif_arr, *mdp_arr, *vbif_nrt_arr;
+ int vbif_len, mdp_len, vbif_nrt_len;
+
+ vbif_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-settings",
+ &vbif_len);
+ if (!vbif_arr || (vbif_len & 1)) {
+ pr_debug("MDSS VBIF settings not found\n");
+ vbif_len = 0;
+ }
+ vbif_len /= 2 * sizeof(u32);
+
+ vbif_nrt_arr = of_get_property(pdev->dev.of_node,
+ "qcom,vbif-nrt-settings", &vbif_nrt_len);
+ if (!vbif_nrt_arr || (vbif_nrt_len & 1)) {
+ pr_debug("MDSS VBIF non-realtime settings not found\n");
+ vbif_nrt_len = 0;
+ }
+ vbif_nrt_len /= 2 * sizeof(u32);
+
+ mdp_arr = of_get_property(pdev->dev.of_node, "qcom,mdp-settings",
+ &mdp_len);
+ if (!mdp_arr || (mdp_len & 1)) {
+ pr_debug("MDSS MDP settings not found\n");
+ mdp_len = 0;
+ }
+ mdp_len /= 2 * sizeof(u32);
+
+ if (!(mdp_len + vbif_len + vbif_nrt_len))
+ return 0;
+
+ hws = devm_kzalloc(&pdev->dev, sizeof(*hws) * (vbif_len + mdp_len +
+ vbif_nrt_len + 1), GFP_KERNEL);
+ if (!hws)
+ return -ENOMEM;
+
+ mdss_mdp_parse_dt_regs_array(vbif_arr, &mdata->vbif_io,
+ hws, vbif_len);
+ mdss_mdp_parse_dt_regs_array(vbif_nrt_arr, &mdata->vbif_nrt_io,
+ hws, vbif_nrt_len);
+ mdss_mdp_parse_dt_regs_array(mdp_arr, &mdata->mdss_io,
+ hws + vbif_len, mdp_len);
+
+ mdata->hw_settings = hws;
+
+ return 0;
+}
+
+static int mdss_mdp_parse_dt(struct platform_device *pdev)
+{
+ int rc, data;
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ rc = mdss_mdp_parse_dt_hw_settings(pdev);
+ if (rc) {
+ pr_err("Error in device tree : hw settings\n");
+ return rc;
+ }
+
+ rc = mdss_mdp_parse_dt_pipe(pdev);
+ if (rc) {
+ pr_err("Error in device tree : pipes\n");
+ return rc;
+ }
+
+ rc = mdss_mdp_parse_dt_mixer(pdev);
+ if (rc) {
+ pr_err("Error in device tree : mixers\n");
+ return rc;
+ }
+
+ rc = mdss_mdp_parse_dt_misc(pdev);
+ if (rc) {
+ pr_err("Error in device tree : misc\n");
+ return rc;
+ }
+
+ rc = mdss_mdp_parse_dt_wb(pdev);
+ if (rc) {
+ pr_err("Error in device tree : wb\n");
+ return rc;
+ }
+
+ rc = mdss_mdp_parse_dt_ctl(pdev);
+ if (rc) {
+ pr_err("Error in device tree : ctl\n");
+ return rc;
+ }
+
+ rc = mdss_mdp_parse_dt_video_intf(pdev);
+ if (rc) {
+ pr_err("Error in device tree : ctl\n");
+ return rc;
+ }
+
+ rc = mdss_mdp_parse_dt_smp(pdev);
+ if (rc) {
+ pr_err("Error in device tree : smp\n");
+ return rc;
+ }
+
+ rc = mdss_mdp_parse_dt_prefill(pdev);
+ if (rc) {
+ pr_err("Error in device tree : prefill\n");
+ return rc;
+ }
+
+ rc = mdss_mdp_parse_dt_ad_cfg(pdev);
+ if (rc) {
+ pr_err("Error in device tree : ad\n");
+ return rc;
+ }
+
+ rc = mdss_mdp_parse_dt_cdm(pdev);
+ if (rc)
+ pr_debug("CDM offset not found in device tree\n");
+
+ rc = mdss_mdp_parse_dt_dsc(pdev);
+ if (rc)
+ pr_debug("DSC offset not found in device tree\n");
+
+ /* Parse the mdp specific register base offset*/
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-mdp-reg-offset", &data);
+ if (rc) {
+ pr_err("Error in device tree : mdp reg base\n");
+ return rc;
+ }
+ mdata->mdp_base = mdata->mdss_io.base + data;
+ return 0;
+}
+
+static void mdss_mdp_parse_dt_pipe_sw_reset(struct platform_device *pdev,
+ u32 reg_off, char *prop_name, struct mdss_mdp_pipe *pipe_list,
+ u32 npipes)
+{
+ int len;
+ const u32 *arr;
+
+ arr = of_get_property(pdev->dev.of_node, prop_name, &len);
+ if (arr) {
+ int i;
+
+ len /= sizeof(u32);
+ if (len != npipes) {
+ pr_err("%s: invalid sw_reset entries req:%d found:%d\n",
+ prop_name, len, npipes);
+ return;
+ }
+
+ for (i = 0; i < len; i++) {
+ pipe_list[i].sw_reset.reg_off = reg_off;
+ pipe_list[i].sw_reset.bit_off = be32_to_cpu(arr[i]);
+
+ pr_debug("%s[%d]: sw_reset: reg_off:0x%x bit_off:%d\n",
+ prop_name, i, reg_off, be32_to_cpu(arr[i]));
+ }
+ }
+}
+
+static int mdss_mdp_parse_dt_pipe_clk_ctrl(struct platform_device *pdev,
+ char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
+{
+ int rc = 0, len;
+ const u32 *arr;
+
+ arr = of_get_property(pdev->dev.of_node, prop_name, &len);
+ if (arr) {
+ int i, j;
+
+ len /= sizeof(u32);
+ for (i = 0, j = 0; i < len; j++) {
+ struct mdss_mdp_pipe *pipe = NULL;
+
+ if (j >= npipes) {
+ pr_err("invalid clk ctrl enries for prop: %s\n",
+ prop_name);
+ return -EINVAL;
+ }
+
+ pipe = &pipe_list[j];
+
+ pipe->clk_ctrl.reg_off = be32_to_cpu(arr[i++]);
+ pipe->clk_ctrl.bit_off = be32_to_cpu(arr[i++]);
+
+ /* status register is next in line to ctrl register */
+ pipe->clk_status.reg_off = pipe->clk_ctrl.reg_off + 4;
+ pipe->clk_status.bit_off = be32_to_cpu(arr[i++]);
+
+ pr_debug("%s[%d]: ctrl: reg_off: 0x%x bit_off: %d\n",
+ prop_name, j, pipe->clk_ctrl.reg_off,
+ pipe->clk_ctrl.bit_off);
+ pr_debug("%s[%d]: status: reg_off: 0x%x bit_off: %d\n",
+ prop_name, j, pipe->clk_status.reg_off,
+ pipe->clk_status.bit_off);
+ }
+ if (j != npipes) {
+ pr_err("%s: %d entries found. required %d\n",
+ prop_name, j, npipes);
+ for (i = 0; i < npipes; i++) {
+ memset(&pipe_list[i].clk_ctrl, 0,
+ sizeof(pipe_list[i].clk_ctrl));
+ memset(&pipe_list[i].clk_status, 0,
+ sizeof(pipe_list[i].clk_status));
+ }
+ rc = -EINVAL;
+ }
+ } else {
+ pr_err("error mandatory property '%s' not found\n", prop_name);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void mdss_mdp_parse_dt_pipe_panic_ctrl(struct platform_device *pdev,
+ char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
+{
+ int i, j;
+ int len;
+ const u32 *arr;
+ struct mdss_mdp_pipe *pipe = NULL;
+
+ arr = of_get_property(pdev->dev.of_node, prop_name, &len);
+ if (arr) {
+ len /= sizeof(u32);
+ for (i = 0, j = 0; i < len; j++) {
+ if (j >= npipes) {
+ pr_err("invalid panic ctrl enries for prop: %s\n",
+ prop_name);
+ return;
+ }
+
+ pipe = &pipe_list[j];
+ pipe->panic_ctrl_ndx = be32_to_cpu(arr[i++]);
+ }
+ if (j != npipes)
+ pr_err("%s: %d entries found. required %d\n",
+ prop_name, j, npipes);
+ } else {
+ pr_debug("panic ctrl enabled but property '%s' not found\n",
+ prop_name);
+ }
+}
+
+static int mdss_mdp_parse_dt_pipe_helper(struct platform_device *pdev,
+ u32 ptype, char *ptypestr,
+ struct mdss_mdp_pipe **out_plist,
+ size_t len,
+ u8 priority_base)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ u32 offsets[MDSS_MDP_MAX_SSPP];
+ u32 ftch_id[MDSS_MDP_MAX_SSPP];
+ u32 xin_id[MDSS_MDP_MAX_SSPP];
+ u32 pnums[MDSS_MDP_MAX_SSPP];
+ struct mdss_mdp_pipe *pipe_list;
+ char prop_name[64];
+ int i, cnt, rc;
+ u32 rects_per_sspp;
+
+ if (!out_plist)
+ return -EINVAL;
+
+ for (i = 0, cnt = 0; i < MDSS_MDP_MAX_SSPP && cnt < len; i++) {
+ if (ptype == get_pipe_type_from_num(i)) {
+ pnums[cnt] = i;
+ cnt++;
+ }
+ }
+
+ if (cnt < len)
+ pr_warn("Invalid %s pipe count: %zu, max supported: %d\n",
+ ptypestr, len, cnt);
+ if (cnt == 0) {
+ *out_plist = NULL;
+
+ return 0;
+ }
+
+ /* by default works in single rect mode unless otherwise noted */
+ rects_per_sspp = mdata->rects_per_sspp[ptype] ? : 1;
+
+ pipe_list = devm_kzalloc(&pdev->dev,
+ (sizeof(struct mdss_mdp_pipe) * cnt * rects_per_sspp),
+ GFP_KERNEL);
+ if (!pipe_list)
+ return -ENOMEM;
+
+ if (mdata->has_pixel_ram || (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) {
+ for (i = 0; i < cnt; i++)
+ ftch_id[i] = -1;
+ } else {
+ snprintf(prop_name, sizeof(prop_name),
+ "qcom,mdss-pipe-%s-fetch-id", ptypestr);
+ rc = mdss_mdp_parse_dt_handler(pdev, prop_name, ftch_id,
+ cnt);
+ if (rc)
+ goto parse_fail;
+ }
+
+ snprintf(prop_name, sizeof(prop_name),
+ "qcom,mdss-pipe-%s-xin-id", ptypestr);
+ rc = mdss_mdp_parse_dt_handler(pdev, prop_name, xin_id, cnt);
+ if (rc)
+ goto parse_fail;
+
+ snprintf(prop_name, sizeof(prop_name),
+ "qcom,mdss-pipe-%s-off", ptypestr);
+ rc = mdss_mdp_parse_dt_handler(pdev, prop_name, offsets, cnt);
+ if (rc)
+ goto parse_fail;
+
+ rc = mdss_mdp_pipe_addr_setup(mdata, pipe_list, offsets, ftch_id,
+ xin_id, ptype, pnums, cnt, rects_per_sspp,
+ priority_base);
+ if (rc)
+ goto parse_fail;
+
+ snprintf(prop_name, sizeof(prop_name),
+ "qcom,mdss-pipe-%s-clk-ctrl-offsets", ptypestr);
+ rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev, prop_name,
+ pipe_list, cnt);
+ if (rc)
+ goto parse_fail;
+
+ *out_plist = pipe_list;
+
+ return cnt;
+parse_fail:
+ devm_kfree(&pdev->dev, pipe_list);
+
+ return rc;
+}
+
+static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 nfids = 0, len, nxids = 0, npipes = 0;
+ u32 sw_reset_offset = 0;
+ u32 data[4];
+
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ mdata->has_pixel_ram = !mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-smp-data");
+
+ mdata->nvig_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pipe-vig-off");
+ mdata->nrgb_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pipe-rgb-off");
+ mdata->ndma_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pipe-dma-off");
+ mdata->ncursor_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pipe-cursor-off");
+
+ npipes = mdata->nvig_pipes + mdata->nrgb_pipes + mdata->ndma_pipes;
+
+ if (!mdata->has_pixel_ram) {
+ nfids += mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pipe-vig-fetch-id");
+ nfids += mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pipe-rgb-fetch-id");
+ nfids += mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pipe-dma-fetch-id");
+ if (npipes != nfids) {
+ pr_err("device tree err: unequal number of pipes and smp ids");
+ return -EINVAL;
+ }
+ }
+
+ if (mdata->nvig_pipes)
+ nxids += mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pipe-vig-xin-id");
+ if (mdata->nrgb_pipes)
+ nxids += mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pipe-rgb-xin-id");
+ if (mdata->ndma_pipes)
+ nxids += mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pipe-dma-xin-id");
+ if (npipes != nxids) {
+ pr_err("device tree err: unequal number of pipes and xin ids\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_VIG, "vig",
+ &mdata->vig_pipes, mdata->nvig_pipes, 0);
+ if (IS_ERR_VALUE((unsigned long)rc))
+ goto parse_fail;
+ mdata->nvig_pipes = rc;
+
+ rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_RGB, "rgb",
+ &mdata->rgb_pipes, mdata->nrgb_pipes,
+ mdata->nvig_pipes);
+ if (IS_ERR_VALUE((unsigned long)rc))
+ goto parse_fail;
+ mdata->nrgb_pipes = rc;
+
+ rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_DMA, "dma",
+ &mdata->dma_pipes, mdata->ndma_pipes,
+ mdata->nvig_pipes + mdata->nrgb_pipes);
+ if (IS_ERR_VALUE((unsigned long)rc))
+ goto parse_fail;
+ mdata->ndma_pipes = rc;
+
+ rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_CURSOR,
+ "cursor", &mdata->cursor_pipes, mdata->ncursor_pipes,
+ 0);
+ if (IS_ERR_VALUE((unsigned long)rc))
+ goto parse_fail;
+ mdata->ncursor_pipes = rc;
+
+ rc = 0;
+
+ mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-sw-reset-off",
+ &sw_reset_offset, 1);
+ if (sw_reset_offset) {
+ if (mdata->vig_pipes)
+ mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
+ "qcom,mdss-pipe-vig-sw-reset-map",
+ mdata->vig_pipes, mdata->nvig_pipes);
+ if (mdata->rgb_pipes)
+ mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
+ "qcom,mdss-pipe-rgb-sw-reset-map",
+ mdata->rgb_pipes, mdata->nrgb_pipes);
+ if (mdata->dma_pipes)
+ mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
+ "qcom,mdss-pipe-dma-sw-reset-map",
+ mdata->dma_pipes, mdata->ndma_pipes);
+ }
+
+ mdata->has_panic_ctrl = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-panic-ctrl");
+ if (mdata->has_panic_ctrl) {
+ if (mdata->vig_pipes)
+ mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
+ "qcom,mdss-pipe-vig-panic-ctrl-offsets",
+ mdata->vig_pipes, mdata->nvig_pipes);
+ if (mdata->rgb_pipes)
+ mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
+ "qcom,mdss-pipe-rgb-panic-ctrl-offsets",
+ mdata->rgb_pipes, mdata->nrgb_pipes);
+ if (mdata->dma_pipes)
+ mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
+ "qcom,mdss-pipe-dma-panic-ctrl-offsets",
+ mdata->dma_pipes, mdata->ndma_pipes);
+ }
+
+ len = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-per-pipe-panic-luts");
+ if (len != 4) {
+ pr_debug("Unable to read per-pipe-panic-luts\n");
+ } else {
+ rc = mdss_mdp_parse_dt_handler(pdev,
+ "qcom,mdss-per-pipe-panic-luts", data, len);
+ mdata->default_panic_lut_per_pipe_linear = data[0];
+ mdata->default_panic_lut_per_pipe_tile = data[1];
+ mdata->default_robust_lut_per_pipe_linear = data[2];
+ mdata->default_robust_lut_per_pipe_tile = data[3];
+ pr_debug("per pipe panic lut [0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n",
+ data[0], data[1], data[2], data[3]);
+ }
+
+parse_fail:
+ return rc;
+}
+
+static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev)
+{
+
+ u32 nmixers, npingpong;
+ int rc = 0;
+ u32 *mixer_offsets = NULL, *dspp_offsets = NULL,
+ *pingpong_offsets = NULL;
+ u32 is_virtual_mixer_req = false;
+
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ mdata->nmixers_intf = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-mixer-intf-off");
+ mdata->nmixers_wb = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-mixer-wb-off");
+ mdata->ndspp = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-dspp-off");
+ npingpong = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-pingpong-off");
+ nmixers = mdata->nmixers_intf + mdata->nmixers_wb;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-mixer-width", &mdata->max_mixer_width);
+ if (rc) {
+ pr_err("device tree err: failed to get max mixer width\n");
+ return -EINVAL;
+ }
+
+ if (mdata->nmixers_intf < mdata->ndspp) {
+ pr_err("device tree err: no of dspp are greater than intf mixers\n");
+ return -EINVAL;
+ }
+
+ if (mdata->nmixers_intf != npingpong) {
+ pr_err("device tree err: unequal no of pingpong and intf mixers\n");
+ return -EINVAL;
+ }
+
+ mixer_offsets = kcalloc(nmixers, sizeof(u32), GFP_KERNEL);
+ if (!mixer_offsets)
+ return -ENOMEM;
+
+ dspp_offsets = kcalloc(mdata->ndspp, sizeof(u32), GFP_KERNEL);
+ if (!dspp_offsets) {
+ rc = -ENOMEM;
+ goto dspp_alloc_fail;
+ }
+ pingpong_offsets = kcalloc(npingpong, sizeof(u32), GFP_KERNEL);
+ if (!pingpong_offsets) {
+ rc = -ENOMEM;
+ goto pingpong_alloc_fail;
+ }
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-intf-off",
+ mixer_offsets, mdata->nmixers_intf);
+ if (rc)
+ goto parse_done;
+
+ mdata->has_separate_rotator = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-separate-rotator");
+ if (mdata->nmixers_wb) {
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-wb-off",
+ mixer_offsets + mdata->nmixers_intf,
+ mdata->nmixers_wb);
+ if (rc)
+ goto parse_done;
+ } else if (!mdata->has_separate_rotator) {
+ /*
+ * If writeback mixers are not available, put the number of
+ * writeback mixers equal to number of DMA pipes so that
+ * later same number of virtual writeback mixers can be
+ * allocated.
+ */
+ mdata->nmixers_wb = mdata->ndma_pipes;
+ is_virtual_mixer_req = true;
+ }
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dspp-off",
+ dspp_offsets, mdata->ndspp);
+ if (rc)
+ goto parse_done;
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pingpong-off",
+ pingpong_offsets, npingpong);
+ if (rc)
+ goto parse_done;
+
+ rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets,
+ dspp_offsets, pingpong_offsets,
+ MDSS_MDP_MIXER_TYPE_INTF, mdata->nmixers_intf);
+ if (rc)
+ goto parse_done;
+
+ if (mdata->nmixers_wb) {
+ if (is_virtual_mixer_req) {
+ /*
+ * Replicate last interface mixers based on number of
+ * dma pipes available as virtual writeback mixers.
+ */
+ rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
+ mdata->nmixers_intf - mdata->ndma_pipes,
+ NULL, NULL, MDSS_MDP_MIXER_TYPE_WRITEBACK,
+ mdata->nmixers_wb);
+ if (rc)
+ goto parse_done;
+ } else {
+ rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
+ mdata->nmixers_intf, NULL, NULL,
+ MDSS_MDP_MIXER_TYPE_WRITEBACK,
+ mdata->nmixers_wb);
+ if (rc)
+ goto parse_done;
+ }
+ }
+
+parse_done:
+ kfree(pingpong_offsets);
+pingpong_alloc_fail:
+ kfree(dspp_offsets);
+dspp_alloc_fail:
+ kfree(mixer_offsets);
+
+ return rc;
+}
+
+static int mdss_mdp_cdm_addr_setup(struct mdss_data_type *mdata,
+ u32 *cdm_offsets, u32 len)
+{
+ struct mdss_mdp_cdm *head;
+ u32 i = 0;
+
+ head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_cdm) *
+ len, GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ head[i].num = i;
+ head[i].base = (mdata->mdss_io.base) + cdm_offsets[i];
+ atomic_set(&head[i].kref.refcount, 0);
+ mutex_init(&head[i].lock);
+ init_completion(&head[i].free_comp);
+ pr_debug("%s: cdm off (%d) = %pK\n", __func__, i, head[i].base);
+ }
+
+ mdata->cdm_off = head;
+ mutex_init(&mdata->cdm_lock);
+ return 0;
+}
+
+static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 *cdm_offsets = NULL;
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ mdata->ncdm = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-cdm-off");
+
+ if (!mdata->ncdm) {
+ rc = 0;
+ pr_debug("%s: No CDM offsets present in DT\n", __func__);
+ goto end;
+ }
+ pr_debug("%s: cdm len == %d\n", __func__, mdata->ncdm);
+ cdm_offsets = kcalloc(mdata->ncdm, sizeof(u32), GFP_KERNEL);
+ if (!cdm_offsets) {
+ rc = -ENOMEM;
+ mdata->ncdm = 0;
+ goto end;
+ }
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-cdm-off", cdm_offsets,
+ mdata->ncdm);
+ if (rc) {
+ pr_err("device tree err: failed to get cdm offsets\n");
+ goto fail;
+ }
+
+ rc = mdss_mdp_cdm_addr_setup(mdata, cdm_offsets, mdata->ncdm);
+ if (rc) {
+ pr_err("%s: CDM address setup failed\n", __func__);
+ goto fail;
+ }
+
+fail:
+ kfree(cdm_offsets);
+ if (rc)
+ mdata->ncdm = 0;
+end:
+ return rc;
+}
+
+static int mdss_mdp_dsc_addr_setup(struct mdss_data_type *mdata,
+ u32 *dsc_offsets, u32 len)
+{
+ struct mdss_mdp_dsc *head;
+ u32 i = 0;
+
+ head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_dsc) *
+ len, GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ head[i].num = i;
+ head[i].base = (mdata->mdss_io.base) + dsc_offsets[i];
+ pr_debug("dsc off (%d) = %pK\n", i, head[i].base);
+ }
+
+ mdata->dsc_off = head;
+ return 0;
+}
+
+static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 *dsc_offsets = NULL;
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ mdata->ndsc = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-dsc-off");
+ if (!mdata->ndsc) {
+ rc = 0;
+ pr_debug("No DSC offsets present in DT\n");
+ goto end;
+ }
+ pr_debug("dsc len == %d\n", mdata->ndsc);
+
+ dsc_offsets = kcalloc(mdata->ndsc, sizeof(u32), GFP_KERNEL);
+ if (!dsc_offsets) {
+ rc = -ENOMEM;
+ mdata->ndsc = 0;
+ goto end;
+ }
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dsc-off", dsc_offsets,
+ mdata->ndsc);
+ if (rc) {
+ pr_err("device tree err: failed to get cdm offsets\n");
+ goto fail;
+ }
+
+ rc = mdss_mdp_dsc_addr_setup(mdata, dsc_offsets, mdata->ndsc);
+ if (rc) {
+ pr_err("%s: DSC address setup failed\n", __func__);
+ goto fail;
+ }
+
+fail:
+ kfree(dsc_offsets);
+ if (rc)
+ mdata->ndsc = 0;
+end:
+ return rc;
+}
+
+static int mdss_mdp_parse_dt_wb(struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 *wb_offsets = NULL;
+ u32 num_wb_mixer, nwb_offsets, num_intf_wb = 0;
+ const char *wfd_data;
+ struct mdss_data_type *mdata;
+
+ mdata = platform_get_drvdata(pdev);
+
+ num_wb_mixer = mdata->nmixers_wb;
+
+ wfd_data = of_get_property(pdev->dev.of_node,
+ "qcom,mdss-wfd-mode", NULL);
+ if (wfd_data && strcmp(wfd_data, "shared") != 0)
+ num_intf_wb = 1;
+
+ nwb_offsets = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-wb-off");
+
+ wb_offsets = kcalloc(nwb_offsets, sizeof(u32), GFP_KERNEL);
+ if (!wb_offsets)
+ return -ENOMEM;
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-wb-off",
+ wb_offsets, nwb_offsets);
+ if (rc)
+ goto wb_parse_done;
+
+ rc = mdss_mdp_wb_addr_setup(mdata, num_wb_mixer, num_intf_wb);
+ if (rc)
+ goto wb_parse_done;
+
+ mdata->nwb_offsets = nwb_offsets;
+ mdata->wb_offsets = wb_offsets;
+
+ return 0;
+
+wb_parse_done:
+ kfree(wb_offsets);
+ return rc;
+}
+
+static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 *ctl_offsets = NULL;
+
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ mdata->nctl = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-ctl-off");
+
+ if (mdata->nctl < mdata->nwb) {
+ pr_err("device tree err: number of ctl greater than wb\n");
+ rc = -EINVAL;
+ goto parse_done;
+ }
+
+ ctl_offsets = kcalloc(mdata->nctl, sizeof(u32), GFP_KERNEL);
+ if (!ctl_offsets)
+ return -ENOMEM;
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ctl-off",
+ ctl_offsets, mdata->nctl);
+ if (rc)
+ goto parse_done;
+
+ rc = mdss_mdp_ctl_addr_setup(mdata, ctl_offsets, mdata->nctl);
+ if (rc)
+ goto parse_done;
+
+parse_done:
+ kfree(ctl_offsets);
+
+ return rc;
+}
+
+static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ u32 count;
+ u32 *offsets;
+ int rc;
+
+
+ count = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-intf-off");
+ if (count == 0)
+ return -EINVAL;
+
+ offsets = kcalloc(count, sizeof(u32), GFP_KERNEL);
+ if (!offsets)
+ return -ENOMEM;
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-intf-off",
+ offsets, count);
+ if (rc)
+ goto parse_fail;
+
+ rc = mdss_mdp_video_addr_setup(mdata, offsets, count);
+ if (rc)
+ pr_err("unable to setup video interfaces\n");
+
+parse_fail:
+ kfree(offsets);
+
+ return rc;
+}
+
+static int mdss_mdp_update_smp_map(struct platform_device *pdev,
+ const u32 *data, int len, int pipe_cnt,
+ struct mdss_mdp_pipe *pipes)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ int i, j, k;
+ u32 cnt, mmb;
+
+ len /= sizeof(u32);
+ for (i = 0, k = 0; i < len; k++) {
+ struct mdss_mdp_pipe *pipe = NULL;
+
+ if (k >= pipe_cnt) {
+ pr_err("invalid fixed mmbs\n");
+ return -EINVAL;
+ }
+
+ pipe = &pipes[k];
+
+ cnt = be32_to_cpu(data[i++]);
+ if (cnt == 0)
+ continue;
+
+ for (j = 0; j < cnt; j++) {
+ mmb = be32_to_cpu(data[i++]);
+ if (mmb > mdata->smp_mb_cnt) {
+ pr_err("overflow mmb:%d pipe:%d: max:%d\n",
+ mmb, k, mdata->smp_mb_cnt);
+ return -EINVAL;
+ }
+ set_bit(mmb, pipe->smp_map[0].fixed);
+ }
+ if (bitmap_intersects(pipe->smp_map[0].fixed,
+ mdata->mmb_alloc_map,
+ mdata->smp_mb_cnt)) {
+ pr_err("overlapping fixed mmb map\n");
+ return -EINVAL;
+ }
+ bitmap_or(mdata->mmb_alloc_map, pipe->smp_map[0].fixed,
+ mdata->mmb_alloc_map, mdata->smp_mb_cnt);
+ }
+ return 0;
+}
+
+static int mdss_mdp_parse_dt_smp(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ u32 num;
+ u32 data[2];
+ int rc, len;
+ const u32 *arr;
+
+ num = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-smp-data");
+ /*
+ * This property is optional for targets with fix pixel ram. Rest
+ * must provide no. of smp and size of each block.
+ */
+ if (!num)
+ return 0;
+ else if (num != 2)
+ return -EINVAL;
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-smp-data", data, num);
+ if (rc)
+ return rc;
+
+ rc = mdss_mdp_smp_setup(mdata, data[0], data[1]);
+
+ if (rc) {
+ pr_err("unable to setup smp data\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-smp-mb-per-pipe", data);
+ mdata->smp_mb_per_pipe = (!rc ? data[0] : 0);
+
+ rc = 0;
+ arr = of_get_property(pdev->dev.of_node,
+ "qcom,mdss-pipe-rgb-fixed-mmb", &len);
+ if (arr) {
+ rc = mdss_mdp_update_smp_map(pdev, arr, len,
+ mdata->nrgb_pipes, mdata->rgb_pipes);
+
+ if (rc)
+ pr_warn("unable to update smp map for RGB pipes\n");
+ }
+
+ arr = of_get_property(pdev->dev.of_node,
+ "qcom,mdss-pipe-vig-fixed-mmb", &len);
+ if (arr) {
+ rc = mdss_mdp_update_smp_map(pdev, arr, len,
+ mdata->nvig_pipes, mdata->vig_pipes);
+
+ if (rc)
+ pr_warn("unable to update smp map for VIG pipes\n");
+ }
+ return rc;
+}
+
+static void mdss_mdp_parse_dt_fudge_factors(struct platform_device *pdev,
+ char *prop_name, struct mult_factor *ff)
+{
+ int rc;
+ u32 data[2] = {1, 1};
+
+ rc = mdss_mdp_parse_dt_handler(pdev, prop_name, data, 2);
+ if (rc) {
+ pr_debug("err reading %s\n", prop_name);
+ } else {
+ ff->numer = data[0];
+ ff->denom = data[1];
+ }
+}
+
+static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ struct mdss_prefill_data *prefill = &mdata->prefill_data;
+ int rc;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-prefill-outstanding-buffer-bytes",
+ &prefill->ot_bytes);
+ if (rc) {
+ pr_err("prefill outstanding buffer bytes not specified\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-prefill-y-buffer-bytes", &prefill->y_buf_bytes);
+ if (rc) {
+ pr_err("prefill y buffer bytes not specified\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-prefill-scaler-buffer-lines-bilinear",
+ &prefill->y_scaler_lines_bilinear);
+ if (rc) {
+ pr_err("prefill scaler lines for bilinear not specified\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-prefill-scaler-buffer-lines-caf",
+ &prefill->y_scaler_lines_caf);
+ if (rc) {
+ pr_debug("prefill scaler lines for caf not specified\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-prefill-post-scaler-buffer-pixels",
+ &prefill->post_scaler_pixels);
+ if (rc) {
+ pr_err("prefill post scaler buffer pixels not specified\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-prefill-pingpong-buffer-pixels",
+ &prefill->pp_pixels);
+ if (rc) {
+ pr_err("prefill pingpong buffer lines not specified\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-prefill-fbc-lines", &prefill->fbc_lines);
+ if (rc)
+ pr_debug("prefill FBC lines not specified\n");
+
+ return 0;
+}
+
+static void mdss_mdp_parse_vbif_qos(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ int rc;
+
+ mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-vbif-qos-rt-setting");
+ if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
+ mdata->vbif_rt_qos = kcalloc(mdata->npriority_lvl,
+ sizeof(u32), GFP_KERNEL);
+ if (!mdata->vbif_rt_qos)
+ return;
+
+ rc = mdss_mdp_parse_dt_handler(pdev,
+ "qcom,mdss-vbif-qos-rt-setting",
+ mdata->vbif_rt_qos, mdata->npriority_lvl);
+ if (rc) {
+ pr_debug("rt setting not found\n");
+ return;
+ }
+ } else {
+ mdata->npriority_lvl = 0;
+ pr_debug("Invalid or no vbif qos rt setting\n");
+ return;
+ }
+
+ mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-vbif-qos-nrt-setting");
+ if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
+ mdata->vbif_nrt_qos = kcalloc(mdata->npriority_lvl,
+ sizeof(u32), GFP_KERNEL);
+ if (!mdata->vbif_nrt_qos)
+ return;
+
+ rc = mdss_mdp_parse_dt_handler(pdev,
+ "qcom,mdss-vbif-qos-nrt-setting", mdata->vbif_nrt_qos,
+ mdata->npriority_lvl);
+ if (rc) {
+ pr_debug("nrt setting not found\n");
+ return;
+ }
+ } else {
+ mdata->npriority_lvl = 0;
+ pr_debug("Invalid or no vbif qos nrt seting\n");
+ }
+}
+
+static void mdss_mdp_parse_max_bw_array(const u32 *arr,
+ struct mdss_max_bw_settings *max_bw_settings, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ max_bw_settings->mdss_max_bw_mode = be32_to_cpu(arr[i*2]);
+ max_bw_settings->mdss_max_bw_val = be32_to_cpu(arr[(i*2)+1]);
+ max_bw_settings++;
+ }
+}
+
+static void mdss_mdp_parse_max_bandwidth(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ struct mdss_max_bw_settings *max_bw_settings;
+ int max_bw_settings_cnt = 0;
+ const u32 *max_bw;
+
+ max_bw = of_get_property(pdev->dev.of_node, "qcom,max-bw-settings",
+ &max_bw_settings_cnt);
+
+ if (!max_bw || !max_bw_settings_cnt) {
+ pr_debug("MDSS max bandwidth settings not found\n");
+ return;
+ }
+
+ max_bw_settings_cnt /= 2 * sizeof(u32);
+
+ max_bw_settings = devm_kzalloc(&pdev->dev, sizeof(*max_bw_settings)
+ * max_bw_settings_cnt, GFP_KERNEL);
+ if (!max_bw_settings)
+ return;
+
+ mdss_mdp_parse_max_bw_array(max_bw, max_bw_settings,
+ max_bw_settings_cnt);
+
+ mdata->max_bw_settings = max_bw_settings;
+ mdata->max_bw_settings_cnt = max_bw_settings_cnt;
+}
+
+static void mdss_mdp_parse_per_pipe_bandwidth(struct platform_device *pdev)
+{
+
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ struct mdss_max_bw_settings *max_bw_per_pipe_settings;
+ int max_bw_settings_cnt = 0;
+ const u32 *max_bw_settings;
+ u32 max_bw, min_bw, threshold, i = 0;
+
+ max_bw_settings = of_get_property(pdev->dev.of_node,
+ "qcom,max-bandwidth-per-pipe-kbps",
+ &max_bw_settings_cnt);
+
+ if (!max_bw_settings || !max_bw_settings_cnt) {
+ pr_debug("MDSS per pipe max bandwidth settings not found\n");
+ return;
+ }
+
+ /* Support targets where a common per pipe max bw is provided */
+ if ((max_bw_settings_cnt / sizeof(u32)) == 1) {
+ mdata->max_bw_per_pipe = be32_to_cpu(max_bw_settings[0]);
+ mdata->max_per_pipe_bw_settings = NULL;
+ pr_debug("Common per pipe max bandwidth provided\n");
+ return;
+ }
+
+ max_bw_settings_cnt /= 2 * sizeof(u32);
+
+ max_bw_per_pipe_settings = devm_kzalloc(&pdev->dev,
+ sizeof(struct mdss_max_bw_settings) * max_bw_settings_cnt,
+ GFP_KERNEL);
+ if (!max_bw_per_pipe_settings) {
+ pr_err("Memory allocation failed for max_bw_settings\n");
+ return;
+ }
+
+ mdss_mdp_parse_max_bw_array(max_bw_settings, max_bw_per_pipe_settings,
+ max_bw_settings_cnt);
+ mdata->max_per_pipe_bw_settings = max_bw_per_pipe_settings;
+ mdata->mdss_per_pipe_bw_cnt = max_bw_settings_cnt;
+
+ /* Calculate min and max allowed per pipe BW */
+ min_bw = mdata->max_bw_high;
+ max_bw = 0;
+
+ while (i < max_bw_settings_cnt) {
+ threshold = mdata->max_per_pipe_bw_settings[i].mdss_max_bw_val;
+ if (threshold > max_bw)
+ max_bw = threshold;
+ if (threshold < min_bw)
+ min_bw = threshold;
+ ++i;
+ }
+ mdata->max_bw_per_pipe = max_bw;
+ mdata->min_bw_per_pipe = min_bw;
+}
+
+static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ u32 data, slave_pingpong_off;
+ const char *wfd_data;
+ int rc;
+ struct property *prop = NULL;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
+ &data);
+ mdata->rot_block_size = (!rc ? data : 128);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-default-ot-rd-limit", &data);
+ mdata->default_ot_rd_limit = (!rc ? data : 0);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-default-ot-wr-limit", &data);
+ mdata->default_ot_wr_limit = (!rc ? data : 0);
+
+ mdata->has_non_scalar_rgb = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-non-scalar-rgb");
+ mdata->has_bwc = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-bwc");
+ mdata->has_decimation = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-decimation");
+ mdata->has_no_lut_read = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-no-lut-read");
+ mdata->needs_hist_vote = !(of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-no-hist-vote"));
+ wfd_data = of_get_property(pdev->dev.of_node,
+ "qcom,mdss-wfd-mode", NULL);
+ if (wfd_data) {
+ pr_debug("wfd mode: %s\n", wfd_data);
+ if (!strcmp(wfd_data, "intf")) {
+ mdata->wfd_mode = MDSS_MDP_WFD_INTERFACE;
+ } else if (!strcmp(wfd_data, "shared")) {
+ mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
+ } else if (!strcmp(wfd_data, "dedicated")) {
+ mdata->wfd_mode = MDSS_MDP_WFD_DEDICATED;
+ } else {
+ pr_debug("wfd default mode: Shared\n");
+ mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
+ }
+ } else {
+ pr_warn("wfd mode not configured. Set to default: Shared\n");
+ mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
+ }
+
+ mdata->has_src_split = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-source-split");
+ mdata->has_fixed_qos_arbiter_enabled =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-fixed-qos-arbiter-enabled");
+ mdata->idle_pc_enabled = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-idle-power-collapse-enabled");
+
+ prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL);
+ mdata->batfet_required = prop ? true : false;
+ mdata->en_svs_high = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-en-svs-high");
+ if (!mdata->en_svs_high)
+ pr_debug("%s: svs_high is not enabled\n", __func__);
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit));
+ if (rc)
+ pr_debug("Could not read optional property: highest bank bit\n");
+
+ mdata->has_pingpong_split = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-pingpong-split");
+
+ if (mdata->has_pingpong_split) {
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-slave-pingpong-off",
+ &slave_pingpong_off);
+ if (rc) {
+ pr_err("Error in device tree: slave pingpong offset\n");
+ return rc;
+ }
+ mdata->slave_pingpong_base = mdata->mdss_io.base +
+ slave_pingpong_off;
+ rc = mdss_mdp_parse_dt_ppb_off(pdev);
+ if (rc) {
+ pr_err("Error in device tree: ppb offset not configured\n");
+ return rc;
+ }
+ }
+
+ /*
+ * 2x factor on AB because bus driver will divide by 2
+ * due to 2x ports to BIMC
+ */
+ mdata->ab_factor.numer = 2;
+ mdata->ab_factor.denom = 1;
+ mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ab-factor",
+ &mdata->ab_factor);
+
+ /*
+ * 1.2 factor on ib as default value. This value is
+ * experimentally determined and should be tuned in device
+ * tree.
+ */
+ mdata->ib_factor.numer = 6;
+ mdata->ib_factor.denom = 5;
+ mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor",
+ &mdata->ib_factor);
+
+ /*
+ * Set overlap ib value equal to ib by default. This value can
+ * be tuned in device tree to be different from ib.
+ * This factor apply when the max bandwidth per pipe
+ * is the overlap BW.
+ */
+ mdata->ib_factor_overlap.numer = mdata->ib_factor.numer;
+ mdata->ib_factor_overlap.denom = mdata->ib_factor.denom;
+ mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor-overlap",
+ &mdata->ib_factor_overlap);
+
+ mdata->clk_factor.numer = 1;
+ mdata->clk_factor.denom = 1;
+ mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor",
+ &mdata->clk_factor);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-bandwidth-low-kbps", &mdata->max_bw_low);
+ if (rc)
+ pr_debug("max bandwidth (low) property not specified\n");
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-bandwidth-high-kbps", &mdata->max_bw_high);
+ if (rc)
+ pr_debug("max bandwidth (high) property not specified\n");
+
+ mdss_mdp_parse_per_pipe_bandwidth(pdev);
+
+ mdss_mdp_parse_max_bandwidth(pdev);
+
+ mdata->nclk_lvl = mdss_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-clk-levels");
+
+ if (mdata->nclk_lvl) {
+ mdata->clock_levels = kcalloc(mdata->nclk_lvl, sizeof(u32),
+ GFP_KERNEL);
+ if (!mdata->clock_levels)
+ return -ENOMEM;
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-clk-levels",
+ mdata->clock_levels, mdata->nclk_lvl);
+ if (rc)
+ pr_debug("clock levels not found\n");
+ }
+
+ mdss_mdp_parse_vbif_qos(pdev);
+ mdata->traffic_shaper_en = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-traffic-shaper-enabled");
+ mdata->has_rot_dwnscale = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-rotator-downscale");
+ if (mdata->has_rot_dwnscale) {
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-rot-downscale-min",
+ &mdata->rot_dwnscale_min);
+ if (rc)
+ pr_err("Min rotator downscale property not specified\n");
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-rot-downscale-max",
+ &mdata->rot_dwnscale_max);
+ if (rc)
+ pr_err("Max rotator downscale property not specified\n");
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-dram-channels", &mdata->bus_channels);
+ if (rc)
+ pr_debug("number of channels property not specified\n");
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-pipe-width", &mdata->max_pipe_width);
+ if (rc) {
+ pr_debug("max pipe width not specified. Using default value\n");
+ mdata->max_pipe_width = DEFAULT_MDP_PIPE_WIDTH;
+ }
+ return 0;
+}
+
+static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ u32 *ad_offsets = NULL;
+ int rc;
+
+ mdata->nad_cfgs = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-ad-off");
+
+ if (mdata->nad_cfgs == 0) {
+ mdata->ad_cfgs = NULL;
+ return 0;
+ }
+
+ if (mdata->nad_cfgs > mdata->nmixers_intf)
+ return -EINVAL;
+
+
+ mdata->has_wb_ad = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-has-wb-ad");
+
+ ad_offsets = kcalloc(mdata->nad_cfgs, sizeof(u32), GFP_KERNEL);
+ if (!ad_offsets)
+ return -ENOMEM;
+
+ rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ad-off", ad_offsets,
+ mdata->nad_cfgs);
+ if (rc)
+ goto parse_done;
+
+ rc = mdss_mdp_ad_addr_setup(mdata, ad_offsets);
+ if (rc)
+ pr_err("unable to setup assertive display\n");
+
+parse_done:
+ kfree(ad_offsets);
+ return rc;
+}
+
+static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ u32 len, index;
+ const u32 *arr;
+
+ arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-ctl-off", &len);
+ if (arr) {
+ mdata->nppb_ctl = len / sizeof(u32);
+ mdata->ppb_ctl = devm_kzalloc(&mdata->pdev->dev,
+ sizeof(u32) * mdata->nppb_ctl, GFP_KERNEL);
+
+ if (mdata->ppb_ctl == NULL)
+ return -ENOMEM;
+
+ for (index = 0; index < mdata->nppb_ctl; index++)
+ mdata->ppb_ctl[index] = be32_to_cpu(arr[index]);
+ }
+
+ arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-cfg-off", &len);
+ if (arr) {
+ mdata->nppb_cfg = len / sizeof(u32);
+ mdata->ppb_cfg = devm_kzalloc(&mdata->pdev->dev,
+ sizeof(u32) * mdata->nppb_cfg, GFP_KERNEL);
+
+ if (mdata->ppb_cfg == NULL)
+ return -ENOMEM;
+
+ for (index = 0; index < mdata->nppb_cfg; index++)
+ mdata->ppb_cfg[index] = be32_to_cpu(arr[index]);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_QCOM_BUS_SCALING
+static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
+{
+ int rc, paths;
+ struct device_node *node;
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-bus,num-paths", &paths);
+ if (rc) {
+ pr_err("Error. qcom,msm-bus,num-paths prop not found.rc=%d\n",
+ rc);
+ return rc;
+ }
+ mdss_res->axi_port_cnt = paths;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-num-nrt-paths", &mdata->nrt_axi_port_cnt);
+ if (rc && mdata->has_fixed_qos_arbiter_enabled) {
+ pr_err("Error. qcom,mdss-num-nrt-paths prop not found.rc=%d\n",
+ rc);
+ return rc;
+ }
+ rc = 0;
+
+ mdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (IS_ERR_OR_NULL(mdata->bus_scale_table)) {
+ rc = PTR_ERR(mdata->bus_scale_table);
+ if (!rc)
+ rc = -EINVAL;
+ pr_err("msm_bus_cl_get_pdata failed. rc=%d\n", rc);
+ mdata->bus_scale_table = NULL;
+ return rc;
+ }
+
+ /*
+ * if mdss-reg-bus is not found then default table is picked
+ * hence below code wont return error.
+ */
+ node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-reg-bus");
+ if (node) {
+ mdata->reg_bus_scale_table =
+ msm_bus_pdata_from_node(pdev, node);
+ if (IS_ERR_OR_NULL(mdata->reg_bus_scale_table)) {
+ rc = PTR_ERR(mdata->reg_bus_scale_table);
+ if (!rc)
+ pr_err("bus_pdata reg_bus failed rc=%d\n", rc);
+ rc = 0;
+ mdata->reg_bus_scale_table = NULL;
+ }
+ } else {
+ rc = 0;
+ mdata->reg_bus_scale_table = NULL;
+ pr_debug("mdss-reg-bus not found\n");
+ }
+
+ node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-hw-rt-bus");
+ if (node) {
+ mdata->hw_rt_bus_scale_table =
+ msm_bus_pdata_from_node(pdev, node);
+ if (IS_ERR_OR_NULL(mdata->hw_rt_bus_scale_table)) {
+ rc = PTR_ERR(mdata->hw_rt_bus_scale_table);
+ if (!rc)
+ pr_err("hw_rt_bus_scale failed rc=%d\n", rc);
+ rc = 0;
+ mdata->hw_rt_bus_scale_table = NULL;
+ }
+ } else {
+ rc = 0;
+ mdata->hw_rt_bus_scale_table = NULL;
+ pr_debug("mdss-hw-rt-bus not found\n");
+ }
+
+ return rc;
+}
+#else
+__maybe_unused
+static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#endif
+
+static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
+ char *prop_name, u32 *offsets, int len)
+{
+ int rc;
+
+ rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
+ offsets, len);
+ if (rc) {
+ pr_err("Error from prop %s : u32 array read\n", prop_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
+ char *prop_name)
+{
+ int len = 0;
+
+ of_find_property(pdev->dev.of_node, prop_name, &len);
+
+ if (len < 1) {
+ pr_debug("prop %s : doesn't exist in device tree\n",
+ prop_name);
+ return 0;
+ }
+
+ len = len/sizeof(u32);
+
+ return len;
+}
+
+struct mdss_data_type *mdss_mdp_get_mdata(void)
+{
+ return mdss_res;
+}
+
+void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable)
+{
+ int ret;
+
+ if (!mdata->batfet_required)
+ return;
+
+ if (!mdata->batfet) {
+ if (enable) {
+ mdata->batfet = devm_regulator_get(&mdata->pdev->dev,
+ "batfet");
+ if (IS_ERR_OR_NULL(mdata->batfet)) {
+ pr_debug("unable to get batfet reg. rc=%d\n",
+ PTR_RET(mdata->batfet));
+ mdata->batfet = NULL;
+ return;
+ }
+ } else {
+ pr_debug("Batfet regulator disable w/o enable\n");
+ return;
+ }
+ }
+
+ if (enable) {
+ ret = regulator_enable(mdata->batfet);
+ if (ret)
+ pr_err("regulator_enable failed\n");
+ } else {
+ regulator_disable(mdata->batfet);
+ }
+}
+
+/**
+ * mdss_is_ready() - checks if mdss is probed and ready
+ *
+ * Checks if mdss resources have been initialized
+ *
+ * returns true if mdss is ready, else returns false
+ */
+bool mdss_is_ready(void)
+{
+ return mdss_mdp_get_mdata() ? true : false;
+}
+EXPORT_SYMBOL(mdss_mdp_get_mdata);
+
+/**
+ * mdss_panel_intf_type() - checks if a given intf type is primary
+ * @intf_val: panel interface type of the individual controller
+ *
+ * Individual controller queries with MDP to check if it is
+ * configured as the primary interface.
+ *
+ * returns a pointer to the configured structure mdss_panel_cfg
+ * to the controller that's configured as the primary panel interface.
+ * returns NULL on error or if @intf_val is not the configured
+ * controller.
+ */
+struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val)
+{
+ if (!mdss_res || !mdss_res->pan_cfg.init_done)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (mdss_res->pan_cfg.pan_intf == intf_val)
+ return &mdss_res->pan_cfg;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(mdss_panel_intf_type);
+
+struct irq_info *mdss_intr_line()
+{
+ return mdss_mdp_hw.irq_info;
+}
+EXPORT_SYMBOL(mdss_intr_line);
+
+int mdss_mdp_wait_for_xin_halt(u32 xin_id, bool is_vbif_nrt)
+{
+ void __iomem *vbif_base;
+ u32 status;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 idle_mask = BIT(xin_id);
+ int rc;
+
+ vbif_base = is_vbif_nrt ? mdata->vbif_nrt_io.base :
+ mdata->vbif_io.base;
+
+ rc = readl_poll_timeout(vbif_base + MMSS_VBIF_XIN_HALT_CTRL1,
+ status, (status & idle_mask),
+ 1000, XIN_HALT_TIMEOUT_US);
+ if (rc == -ETIMEDOUT) {
+ pr_err("VBIF client %d not halting. TIMEDOUT.\n",
+ xin_id);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus", "panic");
+ } else {
+ pr_debug("VBIF client %d is halted\n", xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * force_on_xin_clk() - enable/disable the force-on for the pipe clock
+ * @bit_off: offset of the bit to enable/disable the force-on.
+ * @reg_off: register offset for the clock control.
+ * @enable: boolean to indicate if the force-on of the clock needs to be
+ * enabled or disabled.
+ *
+ * This function returns:
+ * true - if the clock is forced-on by this function
+ * false - if the clock was already forced on
+ * It is the caller responsibility to check if this function is forcing
+ * the clock on; if so, it will need to remove the force of the clock,
+ * otherwise it should avoid to remove the force-on.
+ * Clocks must be on when calling this function.
+ */
+bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable)
+{
+ u32 val;
+ u32 force_on_mask;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool clk_forced_on = false;
+
+ force_on_mask = BIT(bit_off);
+ val = readl_relaxed(mdata->mdp_base + clk_ctl_reg_off);
+
+ clk_forced_on = !(force_on_mask & val);
+
+ if (true == enable)
+ val |= force_on_mask;
+ else
+ val &= ~force_on_mask;
+
+ writel_relaxed(val, mdata->mdp_base + clk_ctl_reg_off);
+
+ return clk_forced_on;
+}
+
+static void apply_dynamic_ot_limit(u32 *ot_lim,
+ struct mdss_mdp_set_ot_params *params)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 res, read_vbif_ot;
+ u32 rot_ot = 4;
+
+ if (false == test_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map))
+ return;
+
+ /* Dynamic OT setting done only for rotator and WFD */
+ if (!((params->is_rot && params->is_yuv) || params->is_wb))
+ return;
+
+ res = params->width * params->height;
+
+ pr_debug("w:%d h:%d rot:%d yuv:%d wb:%d res:%d fps:%d\n",
+ params->width, params->height, params->is_rot,
+ params->is_yuv, params->is_wb, res, params->frame_rate);
+
+ switch (mdata->mdp_rev) {
+ case MDSS_MDP_HW_REV_114:
+ /*
+ * MDP rev is same for msm8937 and msm8940, but rotator OT
+ * recommendations are different. Setting it based on AXI OT.
+ */
+ read_vbif_ot = MDSS_VBIF_READ(mdata, MMSS_VBIF_OUT_RD_LIM_CONF0,
+ false);
+ rot_ot = (read_vbif_ot == 0x10) ? 4 : 8;
+ /* fall-through */
+ case MDSS_MDP_HW_REV_115:
+ case MDSS_MDP_HW_REV_116:
+ if ((res <= RES_1080p) && (params->frame_rate <= 30))
+ *ot_lim = 2;
+ else if (params->is_rot && params->is_yuv)
+ *ot_lim = rot_ot;
+ else
+ *ot_lim = 6;
+ break;
+ default:
+ if (res <= RES_1080p) {
+ *ot_lim = 2;
+ } else if (res <= RES_UHD) {
+ if (params->is_rot && params->is_yuv)
+ *ot_lim = 8;
+ else
+ *ot_lim = 16;
+ }
+ break;
+ }
+}
+
+static u32 get_ot_limit(u32 reg_off, u32 bit_off,
+ struct mdss_mdp_set_ot_params *params)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 ot_lim = 0;
+ u32 is_vbif_nrt, val;
+
+ if (mdata->default_ot_wr_limit &&
+ (params->reg_off_vbif_lim_conf == MMSS_VBIF_WR_LIM_CONF))
+ ot_lim = mdata->default_ot_wr_limit;
+ else if (mdata->default_ot_rd_limit &&
+ (params->reg_off_vbif_lim_conf == MMSS_VBIF_RD_LIM_CONF))
+ ot_lim = mdata->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ apply_dynamic_ot_limit(&ot_lim, params);
+
+ is_vbif_nrt = params->is_vbif_nrt;
+ val = MDSS_VBIF_READ(mdata, reg_off, is_vbif_nrt);
+ val &= (0xFF << bit_off);
+ val = val >> bit_off;
+
+ if (val == ot_lim)
+ ot_lim = 0;
+
+exit:
+ pr_debug("ot_lim=%d\n", ot_lim);
+ return ot_lim;
+}
+
+void mdss_mdp_set_ot_limit(struct mdss_mdp_set_ot_params *params)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 ot_lim;
+ u32 reg_off_vbif_lim_conf = (params->xin_id / 4) * 4 +
+ params->reg_off_vbif_lim_conf;
+ u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8;
+ bool is_vbif_nrt = params->is_vbif_nrt;
+ u32 reg_val;
+ bool forced_on;
+
+ ot_lim = get_ot_limit(
+ reg_off_vbif_lim_conf,
+ bit_off_vbif_lim_conf,
+ params) & 0xFF;
+
+ if (ot_lim == 0)
+ goto exit;
+
+ trace_mdp_perf_set_ot(params->num, params->xin_id, ot_lim,
+ is_vbif_nrt);
+
+ mutex_lock(&mdata->reg_lock);
+
+ forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
+ params->reg_off_mdp_clk_ctrl, true);
+
+ reg_val = MDSS_VBIF_READ(mdata, reg_off_vbif_lim_conf,
+ is_vbif_nrt);
+ reg_val &= ~(0xFF << bit_off_vbif_lim_conf);
+ reg_val |= (ot_lim) << bit_off_vbif_lim_conf;
+ MDSS_VBIF_WRITE(mdata, reg_off_vbif_lim_conf, reg_val,
+ is_vbif_nrt);
+
+ reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ is_vbif_nrt);
+ MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ reg_val | BIT(params->xin_id), is_vbif_nrt);
+
+ mutex_unlock(&mdata->reg_lock);
+ mdss_mdp_wait_for_xin_halt(params->xin_id, is_vbif_nrt);
+ mutex_lock(&mdata->reg_lock);
+
+ reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ is_vbif_nrt);
+ MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ reg_val & ~BIT(params->xin_id), is_vbif_nrt);
+
+ if (forced_on)
+ force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
+ params->reg_off_mdp_clk_ctrl, false);
+
+ mutex_unlock(&mdata->reg_lock);
+
+exit:
+ return;
+}
+
+#define RPM_MISC_REQ_TYPE 0x6373696d
+#define RPM_MISC_REQ_SVS_PLUS_KEY 0x2B737673
+
+static void mdss_mdp_config_cx_voltage(struct mdss_data_type *mdata, int enable)
+{
+ int ret = 0;
+ static struct msm_rpm_kvp rpm_kvp;
+ static uint8_t svs_en;
+
+ if (!mdata->en_svs_high)
+ return;
+
+ if (!rpm_kvp.key) {
+ rpm_kvp.key = RPM_MISC_REQ_SVS_PLUS_KEY;
+ rpm_kvp.length = sizeof(uint64_t);
+ pr_debug("%s: Initialized rpm_kvp structure\n", __func__);
+ }
+
+ if (enable) {
+ svs_en = 1;
+ rpm_kvp.data = &svs_en;
+ pr_debug("%s: voting for svs high\n", __func__);
+ ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
+ RPM_MISC_REQ_TYPE, 0,
+ &rpm_kvp, 1);
+ if (ret)
+ pr_err("vote for active_set svs high failed: %d\n",
+ ret);
+ ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET,
+ RPM_MISC_REQ_TYPE, 0,
+ &rpm_kvp, 1);
+ if (ret)
+ pr_err("vote for sleep_set svs high failed: %d\n",
+ ret);
+ } else {
+ svs_en = 0;
+ rpm_kvp.data = &svs_en;
+ pr_debug("%s: Removing vote for svs high\n", __func__);
+ ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
+ RPM_MISC_REQ_TYPE, 0,
+ &rpm_kvp, 1);
+ if (ret)
+ pr_err("Remove vote:active_set svs high failed: %d\n",
+ ret);
+ ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET,
+ RPM_MISC_REQ_TYPE, 0,
+ &rpm_kvp, 1);
+ if (ret)
+ pr_err("Remove vote:sleep_set svs high failed: %d\n",
+ ret);
+ }
+}
+
+static int mdss_mdp_cx_ctrl(struct mdss_data_type *mdata, int enable)
+{
+ int rc = 0;
+
+ if (!mdata->vdd_cx)
+ return rc;
+
+ if (enable) {
+ rc = regulator_set_voltage(
+ mdata->vdd_cx,
+ RPM_REGULATOR_CORNER_SVS_SOC,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc < 0)
+ goto vreg_set_voltage_fail;
+
+ pr_debug("Enabling CX power rail\n");
+ rc = regulator_enable(mdata->vdd_cx);
+ if (rc) {
+ pr_err("Failed to enable regulator.\n");
+ return rc;
+ }
+ } else {
+ pr_debug("Disabling CX power rail\n");
+ rc = regulator_disable(mdata->vdd_cx);
+ if (rc) {
+ pr_err("Failed to disable regulator.\n");
+ return rc;
+ }
+ rc = regulator_set_voltage(
+ mdata->vdd_cx,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc < 0)
+ goto vreg_set_voltage_fail;
+ }
+
+ return rc;
+
+vreg_set_voltage_fail:
+ pr_err("Set vltg fail\n");
+ return rc;
+}
+
+/**
+ * mdss_mdp_footswitch_ctrl() - Disable/enable MDSS GDSC and CX/Batfet rails
+ * @mdata: MDP private data
+ * @on: 1 to turn on footswitch, 0 to turn off footswitch
+ *
+ * When no active references to the MDP device node and it's child nodes are
+ * held, MDSS GDSC can be turned off. However, any any panels are still
+ * active (but likely in an idle state), the vote for the CX and the batfet
+ * rails should not be released.
+ */
+static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
+{
+ int ret;
+ int active_cnt = 0;
+
+ if (!mdata->fs)
+ return;
+
+ MDSS_XLOG(on, mdata->fs_ena, mdata->idle_pc, mdata->en_svs_high,
+ atomic_read(&mdata->active_intf_cnt));
+
+ if (on) {
+ if (!mdata->fs_ena) {
+ pr_debug("Enable MDP FS\n");
+ if (mdata->venus) {
+ ret = regulator_enable(mdata->venus);
+ if (ret)
+ pr_err("venus failed to enable\n");
+ }
+
+ ret = regulator_enable(mdata->fs);
+ if (ret)
+ pr_warn("Footswitch failed to enable\n");
+ if (!mdata->idle_pc) {
+ mdss_mdp_cx_ctrl(mdata, true);
+ mdss_mdp_batfet_ctrl(mdata, true);
+ }
+ }
+ if (mdata->en_svs_high)
+ mdss_mdp_config_cx_voltage(mdata, true);
+ mdata->fs_ena = true;
+ } else {
+ if (mdata->fs_ena) {
+ pr_debug("Disable MDP FS\n");
+ active_cnt = atomic_read(&mdata->active_intf_cnt);
+ if (active_cnt != 0) {
+ /*
+ * Turning off GDSC while overlays are still
+ * active.
+ */
+ mdata->idle_pc = true;
+ pr_debug("idle pc. active overlays=%d\n",
+ active_cnt);
+ mdss_mdp_memory_retention_enter();
+ } else {
+ mdss_mdp_cx_ctrl(mdata, false);
+ mdss_mdp_batfet_ctrl(mdata, false);
+ }
+ if (mdata->en_svs_high)
+ mdss_mdp_config_cx_voltage(mdata, false);
+ regulator_disable(mdata->fs);
+ if (mdata->venus)
+ regulator_disable(mdata->venus);
+ }
+ mdata->fs_ena = false;
+ }
+}
+
+int mdss_mdp_secure_display_ctrl(struct mdss_data_type *mdata,
+ unsigned int enable)
+{
+ struct sd_ctrl_req {
+ unsigned int enable;
+ } __attribute__ ((__packed__)) request;
+ unsigned int resp = -1;
+ int ret = 0;
+ struct scm_desc desc;
+
+ if ((enable && (mdss_get_sd_client_cnt() > 0)) ||
+ (!enable && (mdss_get_sd_client_cnt() > 1))) {
+ mdss_update_sd_client(mdata, enable);
+ return ret;
+ }
+
+ desc.args[0] = request.enable = enable;
+ desc.arginfo = SCM_ARGS(1);
+
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL,
+ &request, sizeof(request), &resp, sizeof(resp));
+ } else {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ mem_protect_sd_ctrl_id), &desc);
+ resp = desc.ret[0];
+ }
+
+ pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x",
+ enable, ret, resp);
+ if (ret)
+ return ret;
+
+ mdss_update_sd_client(mdata, enable);
+ return resp;
+}
+
+static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
+{
+ mdata->suspend_fs_ena = mdata->fs_ena;
+ mdss_mdp_footswitch_ctrl(mdata, false);
+
+ pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena);
+
+ return 0;
+}
+
+static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata)
+{
+ if (mdata->suspend_fs_ena)
+ mdss_mdp_footswitch_ctrl(mdata, true);
+
+ pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mdss_mdp_pm_suspend(struct device *dev)
+{
+ struct mdss_data_type *mdata;
+
+ mdata = dev_get_drvdata(dev);
+ if (!mdata)
+ return -ENODEV;
+
+ dev_dbg(dev, "display pm suspend\n");
+
+ return mdss_mdp_suspend_sub(mdata);
+}
+
+static int mdss_mdp_pm_resume(struct device *dev)
+{
+ struct mdss_data_type *mdata;
+
+ mdata = dev_get_drvdata(dev);
+ if (!mdata)
+ return -ENODEV;
+
+ dev_dbg(dev, "display pm resume\n");
+
+ /*
+ * It is possible that the runtime status of the mdp device may
+ * have been active when the system was suspended. Reset the runtime
+ * status to suspended state after a complete system resume.
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ return mdss_mdp_resume_sub(mdata);
+}
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ if (!mdata)
+ return -ENODEV;
+
+ dev_dbg(&pdev->dev, "display suspend\n");
+
+ return mdss_mdp_suspend_sub(mdata);
+}
+
+static int mdss_mdp_resume(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ if (!mdata)
+ return -ENODEV;
+
+ dev_dbg(&pdev->dev, "display resume\n");
+
+ return mdss_mdp_resume_sub(mdata);
+}
+#else
+#define mdss_mdp_suspend NULL
+#define mdss_mdp_resume NULL
+#endif
+
+#ifdef CONFIG_PM
+static int mdss_mdp_runtime_resume(struct device *dev)
+{
+ struct mdss_data_type *mdata = dev_get_drvdata(dev);
+ bool device_on = true;
+
+ if (!mdata)
+ return -ENODEV;
+
+ dev_dbg(dev, "pm_runtime: resuming. active overlay cnt=%d\n",
+ atomic_read(&mdata->active_intf_cnt));
+
+ /* do not resume panels when coming out of idle power collapse */
+ if (!mdata->idle_pc)
+ device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
+ mdss_mdp_footswitch_ctrl(mdata, true);
+
+ return 0;
+}
+
+static int mdss_mdp_runtime_idle(struct device *dev)
+{
+ struct mdss_data_type *mdata = dev_get_drvdata(dev);
+
+ if (!mdata)
+ return -ENODEV;
+
+ dev_dbg(dev, "pm_runtime: idling...\n");
+
+ return 0;
+}
+
+static int mdss_mdp_runtime_suspend(struct device *dev)
+{
+ struct mdss_data_type *mdata = dev_get_drvdata(dev);
+ bool device_on = false;
+
+ if (!mdata)
+ return -ENODEV;
+ dev_dbg(dev, "pm_runtime: suspending. active overlay cnt=%d\n",
+ atomic_read(&mdata->active_intf_cnt));
+
+ if (mdata->clk_ena) {
+ pr_err("MDP suspend failed\n");
+ return -EBUSY;
+ }
+
+ mdss_mdp_footswitch_ctrl(mdata, false);
+ /* do not suspend panels when going in to idle power collapse */
+ if (!mdata->idle_pc)
+ device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mdss_mdp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume)
+#ifdef CONFIG_PM
+ SET_RUNTIME_PM_OPS(mdss_mdp_runtime_suspend,
+ mdss_mdp_runtime_resume,
+ mdss_mdp_runtime_idle)
+#endif
+};
+
+static int mdss_mdp_remove(struct platform_device *pdev)
+{
+ struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+ if (!mdata)
+ return -ENODEV;
+ pm_runtime_disable(&pdev->dev);
+ mdss_mdp_pp_term(&pdev->dev);
+ mdss_mdp_bus_scale_unregister(mdata);
+ mdss_debugfs_remove(mdata);
+ if (mdata->regulator_notif_register)
+ regulator_unregister_notifier(mdata->fs, &(mdata->gdsc_cb));
+ return 0;
+}
+
+static const struct of_device_id mdss_mdp_dt_match[] = {
+ { .compatible = "qcom,mdss_mdp",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdss_mdp_dt_match);
+
+static struct platform_driver mdss_mdp_driver = {
+ .probe = mdss_mdp_probe,
+ .remove = mdss_mdp_remove,
+ .suspend = mdss_mdp_suspend,
+ .resume = mdss_mdp_resume,
+ .shutdown = NULL,
+ .driver = {
+ /*
+ * Driver name must match the device name added in
+ * platform.c.
+ */
+ .name = "mdp",
+ .of_match_table = mdss_mdp_dt_match,
+ .pm = &mdss_mdp_pm_ops,
+ },
+};
+
+static int mdss_mdp_register_driver(void)
+{
+ return platform_driver_register(&mdss_mdp_driver);
+}
+
+static int __init mdss_mdp_driver_init(void)
+{
+ int ret;
+
+ ret = mdss_mdp_register_driver();
+ if (ret) {
+ pr_err("mdp_register_driver() failed!\n");
+ return ret;
+ }
+
+ return 0;
+
+}
+
+module_param_string(panel, mdss_mdp_panel, MDSS_MAX_PANEL_LEN, 0600);
+/*
+ * panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg>
+ * where <lk_cfg> is "1"-lk/gcdb config or "0" non-lk/non-gcdb
+ * config; <pan_intf> is dsi:<ctrl_id> or hdmi or edp
+ * <pan_intf_cfg> is panel interface specific string
+ * Ex: This string is panel's device node name from DT
+ * for DSI interface
+ * hdmi/edp interface does not use this string
+ * <panel_topology_cfg> is an optional string. Currently it is
+ * only valid for DSI panels. In dual-DSI case, it needs to be
+ * used on both panels or none. When used, format is config%d
+ * where %d is one of the configuration found in device node of
+ * panel selected by <pan_intf_cfg>
+ */
+MODULE_PARM_DESC(panel, "lk supplied panel selection string");
+MODULE_PARM_DESC(panel,
+ "panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg>");
+module_init(mdss_mdp_driver_init);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
new file mode 100644
index 0000000..4307119
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -0,0 +1,1979 @@
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_H
+#define MDSS_MDP_H
+
+#include <linux/io.h>
+#include <linux/msm_mdp.h>
+#include <linux/msm_mdp_ext.h>
+#include <linux/platform_device.h>
+#include <linux/notifier.h>
+#include <linux/irqreturn.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+
+#include "mdss.h"
+#include "mdss_mdp_hwio.h"
+#include "mdss_fb.h"
+#include "mdss_mdp_cdm.h"
+
+#define MDSS_MDP_DEFAULT_INTR_MASK 0
+
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+#define MAX_LINE_BUFFER_WIDTH 2048
+#define MAX_MIXER_HEIGHT 0xFFFF
+#define MAX_IMG_WIDTH 0x3FFF
+#define MAX_IMG_HEIGHT 0x3FFF
+#define AHB_CLK_OFFSET 0x2B4
+#define MAX_DST_H MAX_MIXER_HEIGHT
+#define MAX_DOWNSCALE_RATIO 4
+#define MAX_UPSCALE_RATIO 20
+#define MAX_DECIMATION 4
+#define MDP_MIN_VBP 4
+#define MAX_FREE_LIST_SIZE 12
+#define OVERLAY_MAX 10
+
+#define VALID_ROT_WB_FORMAT BIT(0)
+#define VALID_MDP_WB_INTF_FORMAT BIT(1)
+#define VALID_MDP_CURSOR_FORMAT BIT(2)
+
+#define C3_ALPHA 3 /* alpha */
+#define C2_R_Cr 2 /* R/Cr */
+#define C1_B_Cb 1 /* B/Cb */
+#define C0_G_Y 0 /* G/luma */
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KOFF_TIMEOUT_MS 84
+#define KOFF_TIMEOUT msecs_to_jiffies(KOFF_TIMEOUT_MS)
+
+#define OVERFETCH_DISABLE_TOP BIT(0)
+#define OVERFETCH_DISABLE_BOTTOM BIT(1)
+#define OVERFETCH_DISABLE_LEFT BIT(2)
+#define OVERFETCH_DISABLE_RIGHT BIT(3)
+
+#define MDSS_MDP_CDP_ENABLE BIT(0)
+#define MDSS_MDP_CDP_ENABLE_UBWCMETA BIT(1)
+#define MDSS_MDP_CDP_AMORTIZED BIT(2)
+#define MDSS_MDP_CDP_AHEAD_64 BIT(3)
+
+#define PERF_STATUS_DONE 0
+#define PERF_STATUS_BUSY 1
+
+#define PERF_CALC_PIPE_APPLY_CLK_FUDGE BIT(0)
+#define PERF_CALC_PIPE_SINGLE_LAYER BIT(1)
+#define PERF_CALC_PIPE_CALC_SMP_SIZE BIT(2)
+
+#define PERF_SINGLE_PIPE_BW_FLOOR 1200000000
+#define CURSOR_PIPE_LEFT 0
+#define CURSOR_PIPE_RIGHT 1
+
+#define MASTER_CTX 0
+#define SLAVE_CTX 1
+
+#define XIN_HALT_TIMEOUT_US 0x4000
+
+#define MAX_LAYER_COUNT 0xC
+
+/* hw cursor can only be setup in highest mixer stage */
+#define HW_CURSOR_STAGE(mdata) \
+ (((mdata)->max_target_zorder + MDSS_MDP_STAGE_0) - 1)
+
+#define BITS_TO_BYTES(x) DIV_ROUND_UP(x, BITS_PER_BYTE)
+
+enum mdss_mdp_perf_state_type {
+ PERF_SW_COMMIT_STATE = 0,
+ PERF_HW_MDP_STATE,
+};
+
+enum mdss_mdp_block_power_state {
+ MDP_BLOCK_POWER_OFF = 0,
+ MDP_BLOCK_POWER_ON = 1,
+};
+
+enum mdss_mdp_mixer_type {
+ MDSS_MDP_MIXER_TYPE_UNUSED,
+ MDSS_MDP_MIXER_TYPE_INTF,
+ MDSS_MDP_MIXER_TYPE_WRITEBACK,
+};
+
+enum mdss_mdp_mixer_mux {
+ MDSS_MDP_MIXER_MUX_DEFAULT,
+ MDSS_MDP_MIXER_MUX_LEFT,
+ MDSS_MDP_MIXER_MUX_RIGHT,
+};
+
+enum mdss_sd_transition {
+ SD_TRANSITION_NONE,
+ SD_TRANSITION_SECURE_TO_NON_SECURE,
+ SD_TRANSITION_NON_SECURE_TO_SECURE
+};
+
+static inline enum mdss_mdp_sspp_index get_pipe_num_from_ndx(u32 ndx)
+{
+ u32 id;
+
+ if (unlikely(!ndx))
+ return MDSS_MDP_MAX_SSPP;
+
+ id = fls(ndx) - 1;
+
+ if (unlikely(ndx ^ BIT(id)))
+ return MDSS_MDP_MAX_SSPP;
+
+ return id;
+}
+
+static inline enum mdss_mdp_pipe_type
+get_pipe_type_from_num(enum mdss_mdp_sspp_index pnum)
+{
+ enum mdss_mdp_pipe_type ptype;
+
+ switch (pnum) {
+ case MDSS_MDP_SSPP_VIG0:
+ case MDSS_MDP_SSPP_VIG1:
+ case MDSS_MDP_SSPP_VIG2:
+ case MDSS_MDP_SSPP_VIG3:
+ ptype = MDSS_MDP_PIPE_TYPE_VIG;
+ break;
+ case MDSS_MDP_SSPP_RGB0:
+ case MDSS_MDP_SSPP_RGB1:
+ case MDSS_MDP_SSPP_RGB2:
+ case MDSS_MDP_SSPP_RGB3:
+ ptype = MDSS_MDP_PIPE_TYPE_RGB;
+ break;
+ case MDSS_MDP_SSPP_DMA0:
+ case MDSS_MDP_SSPP_DMA1:
+ case MDSS_MDP_SSPP_DMA2:
+ case MDSS_MDP_SSPP_DMA3:
+ ptype = MDSS_MDP_PIPE_TYPE_DMA;
+ break;
+ case MDSS_MDP_SSPP_CURSOR0:
+ case MDSS_MDP_SSPP_CURSOR1:
+ ptype = MDSS_MDP_PIPE_TYPE_CURSOR;
+ break;
+ default:
+ ptype = MDSS_MDP_PIPE_TYPE_INVALID;
+ break;
+ }
+
+ return ptype;
+}
+
+static inline enum mdss_mdp_pipe_type get_pipe_type_from_ndx(u32 ndx)
+{
+ enum mdss_mdp_sspp_index pnum;
+
+ pnum = get_pipe_num_from_ndx(ndx);
+
+ return get_pipe_type_from_num(pnum);
+}
+
+enum mdss_mdp_block_type {
+ MDSS_MDP_BLOCK_UNUSED,
+ MDSS_MDP_BLOCK_SSPP,
+ MDSS_MDP_BLOCK_MIXER,
+ MDSS_MDP_BLOCK_DSPP,
+ MDSS_MDP_BLOCK_WB,
+ MDSS_MDP_BLOCK_CDM,
+ MDSS_MDP_BLOCK_SSPP_10,
+ MDSS_MDP_BLOCK_MAX
+};
+
+enum mdss_mdp_csc_type {
+ MDSS_MDP_CSC_YUV2RGB_601L,
+ MDSS_MDP_CSC_YUV2RGB_601FR,
+ MDSS_MDP_CSC_YUV2RGB_709L,
+ MDSS_MDP_CSC_YUV2RGB_2020L,
+ MDSS_MDP_CSC_YUV2RGB_2020FR,
+ MDSS_MDP_CSC_RGB2YUV_601L,
+ MDSS_MDP_CSC_RGB2YUV_601FR,
+ MDSS_MDP_CSC_RGB2YUV_709L,
+ MDSS_MDP_CSC_RGB2YUV_2020L,
+ MDSS_MDP_CSC_RGB2YUV_2020FR,
+ MDSS_MDP_CSC_YUV2YUV,
+ MDSS_MDP_CSC_RGB2RGB,
+ MDSS_MDP_MAX_CSC
+};
+
+enum mdp_wfd_blk_type {
+ MDSS_MDP_WFD_SHARED = 0,
+ MDSS_MDP_WFD_INTERFACE,
+ MDSS_MDP_WFD_DEDICATED,
+};
+
+enum mdss_mdp_reg_bus_cfg {
+ REG_CLK_CFG_OFF,
+ REG_CLK_CFG_LOW,
+ REG_CLK_CFG_HIGH,
+};
+
+enum mdss_mdp_panic_signal_type {
+ MDSS_MDP_PANIC_NONE,
+ MDSS_MDP_PANIC_COMMON_REG_CFG,
+ MDSS_MDP_PANIC_PER_PIPE_CFG,
+};
+
+enum mdss_mdp_fetch_type {
+ MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_FETCH_TILE,
+ MDSS_MDP_FETCH_UBWC,
+};
+
+/**
+ * enum mdp_commit_stage_type - Indicate different commit stages
+ *
+ * @MDP_COMMIT_STATE_WAIT_FOR_PINGPONG: At the stage of being ready to
+ * wait for pingpong buffer.
+ * @MDP_COMMIT_STATE_PINGPONG_DONE: At the stage that pingpong
+ * buffer is ready.
+ */
+enum mdp_commit_stage_type {
+ MDP_COMMIT_STAGE_SETUP_DONE,
+ MDP_COMMIT_STAGE_READY_FOR_KICKOFF,
+};
+
+struct mdss_mdp_ctl;
+typedef void (*mdp_vsync_handler_t)(struct mdss_mdp_ctl *, ktime_t);
+
+struct mdss_mdp_vsync_handler {
+ bool enabled;
+ bool cmd_post_flush;
+ mdp_vsync_handler_t vsync_handler;
+ struct list_head list;
+};
+
+struct mdss_mdp_lineptr_handler {
+ bool enabled;
+ mdp_vsync_handler_t lineptr_handler;
+ struct list_head list;
+};
+
+enum mdss_mdp_wb_ctl_type {
+ MDSS_MDP_WB_CTL_TYPE_BLOCK = 1,
+ MDSS_MDP_WB_CTL_TYPE_LINE
+};
+
+enum mdss_mdp_bw_vote_mode {
+ MDSS_MDP_BW_MODE_SINGLE_LAYER,
+ MDSS_MDP_BW_MODE_SINGLE_IF,
+ MDSS_MDP_BW_MODE_MAX
+};
+
+enum mdp_wb_blk_caps {
+ MDSS_MDP_WB_WFD = BIT(0),
+ MDSS_MDP_WB_ROTATOR = BIT(1),
+ MDSS_MDP_WB_INTF = BIT(2),
+ MDSS_MDP_WB_UBWC = BIT(3),
+};
+
+/**
+ * enum perf_calc_vote_mode - enum to decide if mdss_mdp_get_bw_vote_mode
+ * function needs an extra efficiency factor.
+ *
+ * @PERF_CALC_VOTE_MODE_PER_PIPE: used to check if efficiency factor is needed
+ * based on the pipe properties.
+ * @PERF_CALC_VOTE_MODE_CTL: used to check if efficiency factor is needed based
+ * on the controller properties.
+ * @PERF_CALC_VOTE_MODE_MAX: used to check if efficiency factor is need to vote
+ * max MDP bandwidth.
+ *
+ * Depending upon the properties of each specific object (determined
+ * by this enum), driver decides if the mode to vote needs an
+ * extra factor.
+ */
+enum perf_calc_vote_mode {
+ PERF_CALC_VOTE_MODE_PER_PIPE,
+ PERF_CALC_VOTE_MODE_CTL,
+ PERF_CALC_VOTE_MODE_MAX,
+};
+
+struct mdss_mdp_perf_params {
+ u64 bw_overlap;
+ u64 bw_overlap_nocr;
+ u64 bw_writeback;
+ u64 bw_prefill;
+ u64 max_per_pipe_ib;
+ u32 prefill_bytes;
+ u64 bw_ctl;
+ u32 mdp_clk_rate;
+ DECLARE_BITMAP(bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+};
+
+struct mdss_mdp_writeback {
+ u32 num;
+ char __iomem *base;
+ u32 caps;
+ struct kref kref;
+ u8 supported_input_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
+ u8 supported_output_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
+};
+
+struct mdss_mdp_ctl_intfs_ops {
+ int (*start_fnc)(struct mdss_mdp_ctl *ctl);
+ int (*stop_fnc)(struct mdss_mdp_ctl *ctl, int panel_power_state);
+ int (*prepare_fnc)(struct mdss_mdp_ctl *ctl, void *arg);
+ int (*display_fnc)(struct mdss_mdp_ctl *ctl, void *arg);
+ int (*wait_fnc)(struct mdss_mdp_ctl *ctl, void *arg);
+ int (*wait_vsync_fnc)(struct mdss_mdp_ctl *ctl);
+ int (*wait_pingpong)(struct mdss_mdp_ctl *ctl, void *arg);
+ u32 (*read_line_cnt_fnc)(struct mdss_mdp_ctl *);
+ int (*add_vsync_handler)(struct mdss_mdp_ctl *,
+ struct mdss_mdp_vsync_handler *);
+ int (*remove_vsync_handler)(struct mdss_mdp_ctl *,
+ struct mdss_mdp_vsync_handler *);
+ int (*config_fps_fnc)(struct mdss_mdp_ctl *ctl, int new_fps);
+ int (*restore_fnc)(struct mdss_mdp_ctl *ctl, bool locked);
+ int (*early_wake_up_fnc)(struct mdss_mdp_ctl *ctl);
+
+ /*
+ * reconfigure interface for new resolution, called before (pre=1)
+ * and after interface has been reconfigured (pre=0)
+ */
+ int (*reconfigure)(struct mdss_mdp_ctl *ctl,
+ enum dynamic_switch_modes mode, bool pre);
+ /* called before do any register programming from commit thread */
+ void (*pre_programming)(struct mdss_mdp_ctl *ctl);
+
+ /* to update lineptr, [1..yres] - enable, 0 - disable */
+ int (*update_lineptr)(struct mdss_mdp_ctl *ctl, bool enable);
+};
+
+/* FRC info used for Deterministic Frame Rate Control */
+#define FRC_CADENCE_22_RATIO 2000000000u /* 30fps -> 60fps, 29.97 -> 59.94 */
+#define FRC_CADENCE_22_RATIO_LOW 1940000000u
+#define FRC_CADENCE_22_RATIO_HIGH 2060000000u
+
+#define FRC_CADENCE_23_RATIO 2500000000u /* 24fps -> 60fps, 23.976 -> 59.94 */
+#define FRC_CADENCE_23_RATIO_LOW 2450000000u
+#define FRC_CADENCE_23_RATIO_HIGH 2550000000u
+
+#define FRC_CADENCE_23223_RATIO 2400000000u /* 25fps -> 60fps */
+#define FRC_CADENCE_23223_RATIO_LOW 2360000000u
+#define FRC_CADENCE_23223_RATIO_HIGH 2440000000u
+
+#define FRC_VIDEO_TS_DELTA_THRESHOLD_US (16666 * 10) /* 10 frames at 60fps */
+
+/*
+ * In current FRC design, the minimum video fps change we can support is 24fps
+ * to 25fps, so the timestamp delta per frame is 1667. Use this threshold to
+ * catch this case and ignore more trivial video fps variations.
+ */
+#define FRC_VIDEO_FPS_CHANGE_THRESHOLD_US 1667
+
+/* how many samples we need for video
+ * fps calculation
+ */
+#define FRC_VIDEO_FPS_DETECT_WINDOW 32
+
+/*
+ * Experimental value. Mininum vsync counts during video's single update could
+ * be thought of as pause. If video fps is 10fps and display is 60fps, every
+ * video frame should arrive per 6 vsync, and add 2 more vsync delay, each frame
+ * should arrive in at most 8 vsync interval, otherwise it's considered as a
+ * pause. This value might need tuning in some cases.
+ */
+#define FRC_VIDEO_PAUSE_THRESHOLD 8
+
+#define FRC_MAX_VIDEO_DROPPING_CNT 10 /* how many drops before we disable FRC */
+#define FRC_VIDEO_DROP_TOLERANCE_WINDOW 1000 /* how many frames to count drop */
+
+/* DONOT change the definition order. __check_known_cadence depends on it */
+enum {
+ FRC_CADENCE_NONE = 0, /* Waiting for samples to compute cadence */
+ FRC_CADENCE_23,
+ FRC_CADENCE_22,
+ FRC_CADENCE_23223,
+ FRC_CADENCE_FREE_RUN, /* No extra repeat, but wait for changes */
+ FRC_CADENCE_DISABLE, /* FRC disabled, no extra repeat */
+};
+#define FRC_MAX_SUPPORT_CADENCE FRC_CADENCE_FREE_RUN
+
+#define FRC_CADENCE_SEQUENCE_MAX_LEN 5 /* 5 -> 23223 */
+#define FRC_CADENCE_SEQUENCE_MAX_RETRY 5 /* max retry of matching sequence */
+
+/* sequence generator for pre-defined cadence */
+struct mdss_mdp_frc_seq_gen {
+ int seq[FRC_CADENCE_SEQUENCE_MAX_LEN];
+ int cache[FRC_CADENCE_SEQUENCE_MAX_LEN]; /* 0 -> this slot is empty */
+ int len;
+ int pos; /* current position in seq, < 0 -> pattern not matched */
+ int base;
+ int retry;
+};
+
+struct mdss_mdp_frc_data {
+ u32 frame_cnt; /* video frame count */
+ s64 timestamp; /* video timestamp in millisecond */
+};
+
+struct mdss_mdp_frc_video_stat {
+ u32 frame_cnt; /* video frame count */
+ s64 timestamp; /* video timestamp in millisecond */
+ s64 last_delta;
+};
+
+struct mdss_mdp_frc_drop_stat {
+ u32 drop_cnt; /* how many video buffer drop */
+ u32 frame_cnt; /* the first frame cnt where drop happens */
+};
+
+/* how many samples at least we need for
+ * cadence detection
+ */
+#define FRC_CADENCE_DETECT_WINDOW 6
+
+struct mdss_mdp_frc_cadence_calc {
+ struct mdss_mdp_frc_data samples[FRC_CADENCE_DETECT_WINDOW];
+ int sample_cnt;
+};
+
+struct mdss_mdp_frc_info {
+ u32 cadence_id; /* patterns such as 22/23/23223 */
+ u32 display_fp1000s;
+ u32 last_vsync_cnt; /* vsync when we kicked off last frame */
+ u32 last_repeat; /* how many times last frame was repeated */
+ u32 base_vsync_cnt;
+ struct mdss_mdp_frc_data cur_frc;
+ struct mdss_mdp_frc_data last_frc;
+ struct mdss_mdp_frc_data base_frc;
+ struct mdss_mdp_frc_video_stat video_stat;
+ struct mdss_mdp_frc_drop_stat drop_stat;
+ struct mdss_mdp_frc_cadence_calc calc;
+ struct mdss_mdp_frc_seq_gen gen;
+};
+
+/*
+ * FSM used in deterministic frame rate control:
+ *
+ * +----------------+ +----------------+
+ * | +------------+ | too many drops | +------------+ |
+ * +--------> | INIT | +----------------------> | DISABLE | |
+ * | | +------------+ <-----------+ | +------------+ |
+ * | +----------------+ | +----------------+
+ * | | | |
+ * | | | | change
+ * | frame| |change +----------------+
+ * | | | |
+ * | | | |
+ * | +--v--------+----+ +-----+----------+
+ * change| | | not supported | |
+ * | | CADENCE_DETECT +----------------------> FREE_RUN |
+ * | | | | |
+ * | +-------+--------+ +----------------+
+ * | |
+ * | |
+ * | |cadence detected
+ * | |
+ * | |
+ * | +-------v--------+ +----------------------------+
+ * | | | |Events: |
+ * +--------+ SEQ_MATCH | | 1. change: some changes |
+ * | | | | might change cadence like |
+ * | +-------+--------+ | video/display fps. |
+ * | | | 2. frame: video frame with|
+ * | |sequence matched | correct FRC info. |
+ * | | | 3. in other states than |
+ * | +-------v--------+ | INIT frame event doesn't |
+ * | | | | make any state change. |
+ * | | | +----------------------------+
+ * +--------+ READY |
+ * | |
+ * +----------------+
+ */
+enum mdss_mdp_frc_state_type {
+ FRC_STATE_INIT = 0, /* INIT state waiting for frames */
+ FRC_STATE_CADENCE_DETECT, /* state to detect cadence ID */
+ FRC_STATE_SEQ_MATCH, /* state to find start pos in cadence sequence */
+ FRC_STATE_FREERUN, /* state has no extra repeat but might be changed */
+ FRC_STATE_READY, /* state ready to do FRC */
+ FRC_STATE_DISABLE, /* state in which FRC is disabled */
+ FRC_STATE_MAX,
+};
+
+struct mdss_mdp_frc_fsm;
+
+struct mdss_mdp_frc_fsm_ops {
+ /* preprocess incoming FRC info like checking fps changes */
+ void (*pre_frc)(struct mdss_mdp_frc_fsm *frc_fsm, void *arg);
+ /* deterministic frame rate control like delaying frame's display */
+ void (*do_frc)(struct mdss_mdp_frc_fsm *frc_fsm, void *arg);
+ /* post-operations after FRC like saving past info */
+ void (*post_frc)(struct mdss_mdp_frc_fsm *frc_fsm, void *arg);
+};
+
+struct mdss_mdp_frc_fsm_cbs {
+ /* callback used once updating FRC FSM's state */
+ void (*update_state_cb)(struct mdss_mdp_frc_fsm *frc_fsm);
+};
+
+struct mdss_mdp_frc_fsm_state {
+ char *name; /* debug name of current state */
+ enum mdss_mdp_frc_state_type state; /* current state type */
+ struct mdss_mdp_frc_fsm_ops ops; /* operations of curent state */
+};
+
+struct mdss_mdp_frc_fsm {
+ bool enable; /* whether FRC is running */
+ struct mdss_mdp_frc_fsm_state state; /* current state */
+ struct mdss_mdp_frc_fsm_state to_state; /* state to set */
+ struct mdss_mdp_frc_fsm_cbs cbs;
+ struct mdss_mdp_frc_info frc_info;
+};
+
+struct mdss_mdp_ctl {
+ u32 num;
+ char __iomem *base;
+
+ u32 ref_cnt;
+ int power_state;
+
+ u32 intf_num;
+ u32 slave_intf_num; /* ping-pong split */
+ u32 intf_type;
+
+ /*
+ * false: for sctl in DUAL_LM_DUAL_DISPLAY
+ * true: everything else
+ */
+ bool is_master;
+
+ u32 opmode;
+ u32 flush_bits;
+ u32 flush_reg_data;
+
+ bool split_flush_en;
+ bool is_video_mode;
+ u32 play_cnt;
+ u32 vsync_cnt;
+ u32 underrun_cnt;
+
+ struct work_struct cpu_pm_work;
+ int autorefresh_frame_cnt;
+
+ u16 width;
+ u16 height;
+ u16 border_x_off;
+ u16 border_y_off;
+ bool is_secure;
+
+ /* used for WFD */
+ u32 dst_format;
+ enum mdss_mdp_csc_type csc_type;
+ struct mult_factor dst_comp_ratio;
+
+ u32 clk_rate;
+ int force_screen_state;
+ struct mdss_mdp_perf_params cur_perf;
+ struct mdss_mdp_perf_params new_perf;
+ u32 perf_transaction_status;
+ bool perf_release_ctl_bw;
+ u64 bw_pending;
+ bool disable_prefill;
+
+ bool traffic_shaper_enabled;
+ u32 traffic_shaper_mdp_clk;
+
+ struct mdss_data_type *mdata;
+ struct msm_fb_data_type *mfd;
+ struct mdss_mdp_mixer *mixer_left;
+ struct mdss_mdp_mixer *mixer_right;
+ struct mdss_mdp_cdm *cdm;
+ struct mutex lock;
+ struct mutex offlock;
+ struct mutex flush_lock;
+ struct mutex *shared_lock;
+ struct mutex rsrc_lock;
+ spinlock_t spin_lock;
+
+ struct mdss_panel_data *panel_data;
+ struct mdss_mdp_vsync_handler vsync_handler;
+ struct mdss_mdp_vsync_handler recover_underrun_handler;
+ struct work_struct recover_work;
+ struct work_struct remove_underrun_handler;
+
+ struct mdss_mdp_lineptr_handler lineptr_handler;
+
+ /*
+ * This ROI is aligned to as per following guidelines and
+ * sent to the panel driver.
+ *
+ * 1. DUAL_LM_DUAL_DISPLAY
+ * Panel = 1440x2560
+ * CTL0 = 720x2560 (LM0=720x2560)
+ * CTL1 = 720x2560 (LM1=720x2560)
+ * Both CTL's ROI will be (0-719)x(0-2599)
+ * 2. DUAL_LM_SINGLE_DISPLAY
+ * Panel = 1440x2560
+ * CTL0 = 1440x2560 (LM0=720x2560 and LM1=720x2560)
+ * CTL0's ROI will be (0-1429)x(0-2599)
+ * 3. SINGLE_LM_SINGLE_DISPLAY
+ * Panel = 1080x1920
+ * CTL0 = 1080x1920 (LM0=1080x1920)
+ * CTL0's ROI will be (0-1079)x(0-1919)
+ */
+ struct mdss_rect roi;
+ struct mdss_rect roi_bkup;
+
+ struct blocking_notifier_head notifier_head;
+
+ void *priv_data;
+ void *intf_ctx[2];
+ u32 wb_type;
+
+ struct mdss_mdp_writeback *wb;
+
+ struct mdss_mdp_ctl_intfs_ops ops;
+ bool force_ctl_start;
+
+ u64 last_input_time;
+ int pending_mode_switch;
+ u16 frame_rate;
+
+ /* dynamic resolution switch during cont-splash handoff */
+ bool switch_with_handoff;
+
+ /* vsync handler for FRC */
+ struct mdss_mdp_vsync_handler frc_vsync_handler;
+};
+
+struct mdss_mdp_mixer {
+ u32 num;
+ u32 ref_cnt;
+ char __iomem *base;
+ char __iomem *dspp_base;
+ char __iomem *pingpong_base;
+ u8 type;
+ u8 params_changed;
+ u16 width;
+ u16 height;
+
+ bool valid_roi;
+ bool roi_changed;
+ struct mdss_rect roi;
+
+ u8 cursor_enabled;
+ u16 cursor_hotx;
+ u16 cursor_hoty;
+ u8 rotator_mode;
+
+ /*
+ * src_split_req is valid only for right layer mixer.
+ *
+ * VIDEO mode panels: Always true if source split is enabled.
+ * CMD mode panels: Only true if source split is enabled and
+ * for a given commit left and right both ROIs
+ * are valid.
+ */
+ bool src_split_req;
+ bool is_right_mixer;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_mdp_pipe *stage_pipe[MAX_PIPES_PER_LM];
+ u32 next_pipe_map;
+ u32 pipe_mapped;
+};
+
+struct mdss_mdp_format_params {
+ u32 format;
+ u32 flag;
+ u8 is_yuv;
+
+ u8 frame_format;
+ u8 chroma_sample;
+ u8 solid_fill;
+ u8 fetch_planes;
+ u8 unpack_align_msb; /* 0 to LSB, 1 to MSB */
+ u8 unpack_tight; /* 0 for loose, 1 for tight */
+ u8 unpack_count; /* 0 = 1 component, 1 = 2 component ... */
+ u8 bpp;
+ u8 alpha_enable; /* source has alpha */
+ u8 fetch_mode;
+ u8 bits[MAX_PLANES];
+ u8 element[MAX_PLANES];
+ u8 unpack_dx_format; /*1 for 10 bit format otherwise 0 */
+};
+
+struct mdss_mdp_format_ubwc_tile_info {
+ u16 tile_height;
+ u16 tile_width;
+};
+
+struct mdss_mdp_format_params_ubwc {
+ struct mdss_mdp_format_params mdp_format;
+ struct mdss_mdp_format_ubwc_tile_info micro;
+};
+
+struct mdss_mdp_plane_sizes {
+ u32 num_planes;
+ u32 plane_size[MAX_PLANES];
+ u32 total_size;
+ u32 ystride[MAX_PLANES];
+ u32 rau_cnt;
+ u32 rau_h[2];
+};
+
+struct mdss_mdp_img_data {
+ dma_addr_t addr;
+ unsigned long len;
+ u32 offset;
+ u32 flags;
+ u32 dir;
+ u32 domain;
+ bool mapped;
+ bool skip_detach;
+ struct fd srcp_f;
+ struct dma_buf *srcp_dma_buf;
+ struct dma_buf_attachment *srcp_attachment;
+ struct sg_table *srcp_table;
+};
+
+enum mdss_mdp_data_state {
+ MDP_BUF_STATE_UNUSED,
+ MDP_BUF_STATE_READY,
+ MDP_BUF_STATE_ACTIVE,
+ MDP_BUF_STATE_CLEANUP,
+};
+
+struct mdss_mdp_data {
+ enum mdss_mdp_data_state state;
+ u8 num_planes;
+ struct mdss_mdp_img_data p[MAX_PLANES];
+ struct list_head buf_list;
+ struct list_head pipe_list;
+ struct list_head chunk_list;
+ u64 last_alloc;
+ u64 last_freed;
+ struct mdss_mdp_pipe *last_pipe;
+};
+
+struct pp_hist_col_info {
+ u32 col_state;
+ u32 col_en;
+ u32 hist_cnt_read;
+ u32 hist_cnt_sent;
+ u32 hist_cnt_time;
+ u32 frame_cnt;
+ u32 data[HIST_V_SIZE];
+ struct mutex hist_mutex;
+ spinlock_t hist_lock;
+ char __iomem *base;
+ u32 intr_shift;
+ u32 disp_num;
+ struct mdss_mdp_ctl *ctl;
+};
+
+struct mdss_mdp_ad {
+ char __iomem *base;
+ u8 num;
+};
+
+struct mdss_ad_info {
+ u8 num;
+ u8 calc_hw_num;
+ u32 ops;
+ u32 sts;
+ u32 reg_sts;
+ u32 state;
+ u32 ad_data;
+ u32 ad_data_mode;
+ struct mdss_ad_init init;
+ struct mdss_ad_cfg cfg;
+ struct mutex lock;
+ struct work_struct calc_work;
+ struct msm_fb_data_type *mfd;
+ struct msm_fb_data_type *bl_mfd;
+ struct mdss_mdp_vsync_handler handle;
+ u32 last_str;
+ u32 last_bl;
+ u32 last_ad_data;
+ u16 last_calib[4];
+ bool last_ad_data_valid;
+ bool last_calib_valid;
+ u32 ipc_frame_count;
+ u32 bl_data;
+ u32 calc_itr;
+ uint32_t bl_lin[AD_BL_LIN_LEN];
+ uint32_t bl_lin_inv[AD_BL_LIN_LEN];
+ uint32_t bl_att_lut[AD_BL_ATT_LUT_LEN];
+};
+
+struct pp_sts_type {
+ u32 pa_sts;
+ u32 pcc_sts;
+ u32 igc_sts;
+ u32 igc_tbl_idx;
+ u32 argc_sts;
+ u32 enhist_sts;
+ u32 dither_sts;
+ u32 gamut_sts;
+ u32 pgc_sts;
+ u32 sharp_sts;
+ u32 hist_sts;
+ u32 side_sts;
+};
+
+struct mdss_pipe_pp_res {
+ u32 igc_c0_c1[IGC_LUT_ENTRIES];
+ u32 igc_c2[IGC_LUT_ENTRIES];
+ u32 hist_lut[ENHIST_LUT_ENTRIES];
+ struct pp_hist_col_info hist;
+ struct pp_sts_type pp_sts;
+ void *pa_cfg_payload;
+ void *pcc_cfg_payload;
+ void *igc_cfg_payload;
+ void *hist_lut_cfg_payload;
+};
+
+struct mdss_mdp_pipe_smp_map {
+ DECLARE_BITMAP(reserved, MAX_DRV_SUP_MMB_BLKS);
+ DECLARE_BITMAP(allocated, MAX_DRV_SUP_MMB_BLKS);
+ DECLARE_BITMAP(fixed, MAX_DRV_SUP_MMB_BLKS);
+};
+
+struct mdss_mdp_shared_reg_ctrl {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+enum mdss_mdp_pipe_rect {
+ MDSS_MDP_PIPE_RECT0, /* default */
+ MDSS_MDP_PIPE_RECT1,
+ MDSS_MDP_PIPE_MAX_RECTS,
+};
+
+/**
+ * enum mdss_mdp_pipe_multirect_mode - pipe multirect mode
+ * @MDSS_MDP_PIPE_MULTIRECT_NONE: pipe is not working in multirect mode
+ * @MDSS_MDP_PIPE_MULTIRECT_PARALLEL: rectangles are being fetched at the
+ * same time in time multiplexed fashion
+ * @MDSS_MDP_PIPE_MULTIRECT_SERIAL: rectangles are fetched serially, where
+ * one is only fetched after the other one
+ * is complete
+ */
+enum mdss_mdp_pipe_multirect_mode {
+ MDSS_MDP_PIPE_MULTIRECT_NONE,
+ MDSS_MDP_PIPE_MULTIRECT_PARALLEL,
+ MDSS_MDP_PIPE_MULTIRECT_SERIAL,
+};
+
+/**
+ * struct mdss_mdp_pipe_multirect_params - multirect info for layer or pipe
+ * @num: rectangle being operated, default is RECT0 if pipe doesn't
+ * support multirect
+ * @mode: mode of multirect operation, default is NONE
+ * @next: pointer to sibling pipe/layer which is also operating in
+ * multirect mode
+ */
+struct mdss_mdp_pipe_multirect_params {
+ enum mdss_mdp_pipe_rect num; /* RECT0 or RECT1 */
+ int max_rects;
+ enum mdss_mdp_pipe_multirect_mode mode;
+ void *next; /* pointer to next pipe or layer */
+};
+
+struct mdss_mdp_pipe {
+ u32 num;
+ u32 type;
+ u32 ndx;
+ u8 priority;
+ char __iomem *base;
+ u32 ftch_id;
+ u32 xin_id;
+ u32 panic_ctrl_ndx;
+ struct mdss_mdp_shared_reg_ctrl clk_ctrl;
+ struct mdss_mdp_shared_reg_ctrl clk_status;
+ struct mdss_mdp_shared_reg_ctrl sw_reset;
+
+ struct kref kref;
+
+ u32 play_cnt;
+ struct file *file;
+ bool is_handed_off;
+
+ u32 flags;
+ u32 bwc_mode;
+
+ /* valid only when pipe's output is crossing both layer mixers */
+ bool src_split_req;
+ bool is_right_blend;
+
+ u16 img_width;
+ u16 img_height;
+ u8 horz_deci;
+ u8 vert_deci;
+ struct mdss_rect src;
+ struct mdss_rect dst;
+ struct mdss_mdp_format_params *src_fmt;
+ struct mdss_mdp_plane_sizes src_planes;
+
+ /* compression ratio from the source format */
+ struct mult_factor comp_ratio;
+
+ enum mdss_mdp_stage_index mixer_stage;
+ u8 is_fg;
+ u8 alpha;
+ u8 blend_op;
+ u8 overfetch_disable;
+ u32 transp;
+ u32 bg_color;
+
+ struct msm_fb_data_type *mfd;
+ struct mdss_mdp_mixer *mixer_left;
+ struct mdss_mdp_mixer *mixer_right;
+
+ struct mdp_overlay req_data;
+ struct mdp_input_layer layer;
+ u32 params_changed;
+ bool dirty;
+ bool unhalted;
+ bool async_update;
+
+ struct mdss_mdp_pipe_smp_map smp_map[MAX_PLANES];
+
+ struct list_head buf_queue;
+ struct list_head list;
+
+ struct mdp_overlay_pp_params pp_cfg;
+ struct mdss_pipe_pp_res pp_res;
+ struct mdp_scale_data_v2 scaler;
+ u8 chroma_sample_h;
+ u8 chroma_sample_v;
+
+ wait_queue_head_t free_waitq;
+ u32 frame_rate;
+ u8 csc_coeff_set;
+ u8 supported_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
+
+ struct mdss_mdp_pipe_multirect_params multirect;
+};
+
+struct mdss_mdp_writeback_arg {
+ struct mdss_mdp_data *data;
+ void *priv_data;
+};
+
+struct mdss_mdp_wfd;
+
+struct mdss_overlay_private {
+ ktime_t vsync_time;
+ ktime_t lineptr_time;
+ struct kernfs_node *vsync_event_sd;
+ struct kernfs_node *lineptr_event_sd;
+ struct kernfs_node *hist_event_sd;
+ struct kernfs_node *bl_event_sd;
+ struct kernfs_node *ad_event_sd;
+ struct kernfs_node *ad_bl_event_sd;
+ int borderfill_enable;
+ int hw_refresh;
+ void *cpu_pm_hdl;
+
+ struct mdss_data_type *mdata;
+ struct mutex ov_lock;
+ struct mutex dfps_lock;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_mdp_wfd *wfd;
+
+ struct mutex list_lock;
+ struct list_head pipes_used;
+ struct list_head pipes_cleanup;
+ struct list_head pipes_destroy;
+ struct list_head rot_proc_list;
+ bool mixer_swap;
+ u32 resources_state;
+
+ /* list of buffers that can be reused */
+ struct list_head bufs_chunks;
+ struct list_head bufs_pool;
+ struct list_head bufs_used;
+ /* list of buffers which should be freed during cleanup stage */
+ struct list_head bufs_freelist;
+
+ int ad_state;
+ int dyn_pu_state;
+
+ bool handoff;
+ u32 splash_mem_addr;
+ u32 splash_mem_size;
+ u32 sd_enabled;
+
+ struct mdss_timeline *vsync_timeline;
+ struct mdss_mdp_vsync_handler vsync_retire_handler;
+ int retire_cnt;
+ bool kickoff_released;
+ u32 cursor_ndx[2];
+ u32 hist_events;
+ u32 bl_events;
+ u32 ad_events;
+ u32 ad_bl_events;
+
+ bool allow_kickoff;
+
+ /* video frame info used by deterministic frame rate control */
+ struct mdss_mdp_frc_fsm *frc_fsm;
+ u8 sd_transition_state;
+ struct kthread_worker worker;
+ struct kthread_work vsync_work;
+ struct task_struct *thread;
+};
+
+struct mdss_mdp_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ u16 frame_rate;
+ bool is_rot;
+ bool is_wb;
+ bool is_yuv;
+ bool is_vbif_nrt;
+ u32 reg_off_vbif_lim_conf;
+ u32 reg_off_mdp_clk_ctrl;
+ u32 bit_off_mdp_clk_ctrl;
+};
+
+struct mdss_mdp_commit_cb {
+ void *data;
+ int (*commit_cb_fnc)(enum mdp_commit_stage_type commit_state,
+ void *data);
+};
+
+/**
+ * enum mdss_screen_state - Screen states that MDP can be forced into
+ *
+ * @MDSS_SCREEN_DEFAULT: Do not force MDP into any screen state.
+ * @MDSS_SCREEN_FORCE_BLANK: Force MDP to generate blank color fill screen.
+ */
+enum mdss_screen_state {
+ MDSS_SCREEN_DEFAULT,
+ MDSS_SCREEN_FORCE_BLANK,
+};
+
+/**
+ * enum mdss_mdp_clt_intf_event_flags - flags specifying how event to should
+ * be sent to panel drivers.
+ *
+ * @CTL_INTF_EVENT_FLAG_DEFAULT: this flag denotes default behaviour where
+ * event will be send to all panels attached this
+ * display, recursively in split-DSI.
+ * @CTL_INTF_EVENT_FLAG_SKIP_BROADCAST: this flag sends event only to panel
+ * associated with this ctl.
+ * @CTL_INTF_EVENT_FLAG_SLAVE_INTF: this flag sends event only to slave panel
+ * associated with this ctl, i.e pingpong-split
+ */
+enum mdss_mdp_clt_intf_event_flags {
+ CTL_INTF_EVENT_FLAG_DEFAULT = 0,
+ CTL_INTF_EVENT_FLAG_SKIP_BROADCAST = BIT(1),
+ CTL_INTF_EVENT_FLAG_SLAVE_INTF = BIT(2),
+};
+
+#define mfd_to_mdp5_data(mfd) (mfd->mdp.private1)
+#define mfd_to_mdata(mfd) (((struct mdss_overlay_private *)\
+ (mfd->mdp.private1))->mdata)
+#define mfd_to_ctl(mfd) (((struct mdss_overlay_private *)\
+ (mfd->mdp.private1))->ctl)
+#define mfd_to_wb(mfd) (((struct mdss_overlay_private *)\
+ (mfd->mdp.private1))->wb)
+
+/**
+ * - mdss_mdp_is_roi_changed
+ * @mfd - pointer to mfd
+ *
+ * Function returns true if roi is changed for any layer mixer of a given
+ * display, false otherwise.
+ */
+static inline bool mdss_mdp_is_roi_changed(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_ctl *ctl;
+
+ if (!mfd)
+ return false;
+
+ ctl = mfd_to_ctl(mfd); /* returns master ctl */
+
+ return ctl->mixer_left->roi_changed ||
+ (is_split_lm(mfd) ? ctl->mixer_right->roi_changed : false);
+}
+
+/**
+ * - mdss_mdp_is_both_lm_valid
+ * @main_ctl - pointer to a main ctl
+ *
+ * Function checks if both layer mixers are active or not. This can be useful
+ * when partial update is enabled on either MDP_DUAL_LM_SINGLE_DISPLAY or
+ * MDP_DUAL_LM_DUAL_DISPLAY .
+ */
+static inline bool mdss_mdp_is_both_lm_valid(struct mdss_mdp_ctl *main_ctl)
+{
+ return (main_ctl && main_ctl->is_master &&
+ main_ctl->mixer_left && main_ctl->mixer_left->valid_roi &&
+ main_ctl->mixer_right && main_ctl->mixer_right->valid_roi);
+}
+
+enum mdss_mdp_pu_type {
+ MDSS_MDP_INVALID_UPDATE = -1,
+ MDSS_MDP_DEFAULT_UPDATE,
+ MDSS_MDP_LEFT_ONLY_UPDATE, /* only valid for split_lm */
+ MDSS_MDP_RIGHT_ONLY_UPDATE, /* only valid for split_lm */
+};
+
+/* only call from master ctl */
+static inline enum mdss_mdp_pu_type mdss_mdp_get_pu_type(
+ struct mdss_mdp_ctl *mctl)
+{
+ enum mdss_mdp_pu_type pu_type = MDSS_MDP_INVALID_UPDATE;
+
+ if (!mctl || !mctl->is_master)
+ return pu_type;
+
+ if (!is_split_lm(mctl->mfd) || mdss_mdp_is_both_lm_valid(mctl))
+ pu_type = MDSS_MDP_DEFAULT_UPDATE;
+ else if (mctl->mixer_left->valid_roi)
+ pu_type = MDSS_MDP_LEFT_ONLY_UPDATE;
+ else if (mctl->mixer_right->valid_roi)
+ pu_type = MDSS_MDP_RIGHT_ONLY_UPDATE;
+ else
+ pr_err("%s: invalid pu_type\n", __func__);
+
+ return pu_type;
+}
+
+static inline struct mdss_mdp_ctl *mdss_mdp_get_split_ctl(
+ struct mdss_mdp_ctl *ctl)
+{
+ if (ctl && ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
+ return ctl->mixer_right->ctl;
+
+ return NULL;
+}
+
+static inline struct mdss_mdp_ctl *mdss_mdp_get_main_ctl(
+ struct mdss_mdp_ctl *sctl)
+{
+ if (sctl && sctl->mfd && sctl->mixer_left &&
+ sctl->mixer_left->is_right_mixer)
+ return mfd_to_ctl(sctl->mfd);
+
+ return NULL;
+}
+
+static inline bool mdss_mdp_pipe_is_yuv(struct mdss_mdp_pipe *pipe)
+{
+ return pipe && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG);
+}
+
+static inline bool mdss_mdp_pipe_is_rgb(struct mdss_mdp_pipe *pipe)
+{
+ return pipe && (pipe->type == MDSS_MDP_PIPE_TYPE_RGB);
+}
+
+static inline bool mdss_mdp_pipe_is_dma(struct mdss_mdp_pipe *pipe)
+{
+ return pipe && (pipe->type == MDSS_MDP_PIPE_TYPE_DMA);
+}
+
+static inline void mdss_mdp_ctl_write(struct mdss_mdp_ctl *ctl,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, ctl->base + reg);
+}
+
+static inline u32 mdss_mdp_ctl_read(struct mdss_mdp_ctl *ctl, u32 reg)
+{
+ return readl_relaxed(ctl->base + reg);
+}
+
+static inline void mdp_mixer_write(struct mdss_mdp_mixer *mixer,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, mixer->base + reg);
+}
+
+static inline u32 mdp_mixer_read(struct mdss_mdp_mixer *mixer, u32 reg)
+{
+ return readl_relaxed(mixer->base + reg);
+}
+
+static inline void mdss_mdp_pingpong_write(char __iomem *pingpong_base,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, pingpong_base + reg);
+}
+
+static inline u32 mdss_mdp_pingpong_read(char __iomem *pingpong_base, u32 reg)
+{
+ return readl_relaxed(pingpong_base + reg);
+}
+
+static inline int mdss_mdp_pipe_is_sw_reset_available(
+ struct mdss_data_type *mdata)
+{
+ switch (mdata->mdp_rev) {
+ case MDSS_MDP_HW_REV_101_2:
+ case MDSS_MDP_HW_REV_103_1:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline int mdss_mdp_iommu_dyn_attach_supported(
+ struct mdss_data_type *mdata)
+{
+ return (mdata->mdp_rev >= MDSS_MDP_HW_REV_103);
+}
+
+static inline int mdss_mdp_line_buffer_width(void)
+{
+ return MAX_LINE_BUFFER_WIDTH;
+}
+
+static inline u32 get_panel_yres(struct mdss_panel_info *pinfo)
+{
+ u32 yres;
+
+ yres = pinfo->yres + pinfo->lcdc.border_top +
+ pinfo->lcdc.border_bottom;
+ return yres;
+}
+
+static inline u32 get_panel_xres(struct mdss_panel_info *pinfo)
+{
+ u32 xres;
+
+ xres = pinfo->xres + pinfo->lcdc.border_left +
+ pinfo->lcdc.border_right;
+ return xres;
+}
+
+static inline u32 get_panel_width(struct mdss_mdp_ctl *ctl)
+{
+ u32 width;
+
+ width = get_panel_xres(&ctl->panel_data->panel_info);
+ if (ctl->panel_data->next && is_pingpong_split(ctl->mfd))
+ width += get_panel_xres(&ctl->panel_data->next->panel_info);
+
+ return width;
+}
+
+static inline bool mdss_mdp_req_init_restore_cfg(struct mdss_data_type *mdata)
+{
+ if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_106) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_108) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_112) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_114) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_115) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_116))
+ return true;
+
+ return false;
+}
+
+static inline int mdss_mdp_panic_signal_support_mode(
+ struct mdss_data_type *mdata)
+{
+ uint32_t signal_mode = MDSS_MDP_PANIC_NONE;
+
+ if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_105) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_108) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_109) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_110))
+ signal_mode = MDSS_MDP_PANIC_COMMON_REG_CFG;
+ else if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_107) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_114) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_115) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+ MDSS_MDP_HW_REV_116))
+ signal_mode = MDSS_MDP_PANIC_PER_PIPE_CFG;
+
+ return signal_mode;
+}
+
+static inline struct clk *mdss_mdp_get_clk(u32 clk_idx)
+{
+ if (clk_idx < MDSS_MAX_CLK)
+ return mdss_res->mdp_clk[clk_idx];
+ return NULL;
+}
+
+static inline void mdss_update_sd_client(struct mdss_data_type *mdata,
+ unsigned int status)
+{
+ if (status)
+ atomic_inc(&mdata->sd_client_count);
+ else
+ atomic_add_unless(&mdss_res->sd_client_count, -1, 0);
+}
+
+static inline int mdss_mdp_get_wb_ctl_support(struct mdss_data_type *mdata,
+ bool rotator_session)
+{
+ /*
+ * Any control path can be routed to any of the hardware datapaths.
+ * But there is a HW restriction for 3D Mux block. As the 3D Mux
+ * settings in the CTL registers are double buffered, if an interface
+ * uses it and disconnects, then the subsequent interface which gets
+ * connected should use the same control path in order to clear the
+ * 3D MUX settings.
+ * To handle this restriction, we are allowing WB also, to loop through
+ * all the avialable control paths, so that it can reuse the control
+ * path left by the external interface, thereby clearing the 3D Mux
+ * settings.
+ * The initial control paths can be used by Primary, External and WB.
+ * The rotator can use the remaining available control paths.
+ */
+ return rotator_session ? (mdata->nctl - mdata->nmixers_wb) :
+ MDSS_MDP_CTL0;
+}
+
+static inline bool mdss_mdp_is_nrt_vbif_client(struct mdss_data_type *mdata,
+ struct mdss_mdp_pipe *pipe)
+{
+ return mdata->vbif_nrt_io.base && pipe->mixer_left &&
+ pipe->mixer_left->rotator_mode;
+}
+
+static inline bool mdss_mdp_is_nrt_ctl_path(struct mdss_mdp_ctl *ctl)
+{
+ return (ctl->intf_num == MDSS_MDP_NO_INTF) ||
+ (ctl->mixer_left && ctl->mixer_left->rotator_mode);
+}
+
+static inline bool mdss_mdp_is_nrt_vbif_base_defined(
+ struct mdss_data_type *mdata)
+{
+ return mdata->vbif_nrt_io.base ? true : false;
+}
+
+static inline bool mdss_mdp_ctl_is_power_off(struct mdss_mdp_ctl *ctl)
+{
+ return mdss_panel_is_power_off(ctl->power_state);
+}
+
+static inline bool mdss_mdp_ctl_is_power_on_interactive(
+ struct mdss_mdp_ctl *ctl)
+{
+ return mdss_panel_is_power_on_interactive(ctl->power_state);
+}
+
+static inline bool mdss_mdp_ctl_is_power_on(struct mdss_mdp_ctl *ctl)
+{
+ return mdss_panel_is_power_on(ctl->power_state);
+}
+
+static inline bool mdss_mdp_ctl_is_power_on_lp(struct mdss_mdp_ctl *ctl)
+{
+ return mdss_panel_is_power_on_lp(ctl->power_state);
+}
+
+static inline u32 left_lm_w_from_mfd(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_panel_info *pinfo = mfd->panel_info;
+ int width = 0;
+
+ if (ctl && ctl->mixer_left) {
+ width = ctl->mixer_left->width;
+ width -= (pinfo->lcdc.border_left + pinfo->lcdc.border_right);
+ pr_debug("ctl=%d mw=%d l=%d r=%d w=%d\n",
+ ctl->num, ctl->mixer_left->width,
+ pinfo->lcdc.border_left, pinfo->lcdc.border_right,
+ width);
+ }
+ return width;
+}
+
+static inline bool mdss_mdp_is_tile_format(struct mdss_mdp_format_params *fmt)
+{
+ return fmt && (fmt->fetch_mode == MDSS_MDP_FETCH_TILE);
+}
+
+static inline bool mdss_mdp_is_ubwc_format(struct mdss_mdp_format_params *fmt)
+{
+ return fmt && (fmt->fetch_mode == MDSS_MDP_FETCH_UBWC);
+}
+
+static inline bool mdss_mdp_is_linear_format(struct mdss_mdp_format_params *fmt)
+{
+ return fmt && (fmt->fetch_mode == MDSS_MDP_FETCH_LINEAR);
+}
+
+static inline bool mdss_mdp_is_nv12_format(struct mdss_mdp_format_params *fmt)
+{
+ return fmt && (fmt->chroma_sample == MDSS_MDP_CHROMA_420) &&
+ (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR);
+}
+
+static inline bool mdss_mdp_is_ubwc_supported(struct mdss_data_type *mdata)
+{
+ return mdata->has_ubwc;
+}
+
+static inline bool mdss_mdp_is_wb_rotator_supported(
+ struct mdss_data_type *mdata)
+{
+ return mdata && !mdata->has_separate_rotator;
+}
+
+static inline int mdss_mdp_is_cdm_supported(struct mdss_data_type *mdata,
+ u32 intf_type, u32 mixer_type)
+{
+ int support = mdata->ncdm;
+
+ /*
+ * CDM is supported under these conditions
+ * 1. If Device tree created a cdm block AND
+ * 2. Output interface is HDMI OR Output interface is WB2
+ */
+ return support && ((intf_type == MDSS_INTF_HDMI) ||
+ ((intf_type == MDSS_MDP_NO_INTF) &&
+ ((mixer_type == MDSS_MDP_MIXER_TYPE_INTF) ||
+ (mixer_type == MDSS_MDP_MIXER_TYPE_WRITEBACK))));
+}
+
+static inline u32 mdss_mdp_get_cursor_frame_size(struct mdss_data_type *mdata)
+{
+ return mdata->max_cursor_size * mdata->max_cursor_size * 4;
+}
+
+static inline uint8_t pp_vig_csc_pipe_val(struct mdss_mdp_pipe *pipe)
+{
+ switch (pipe->csc_coeff_set) {
+ case MDP_CSC_ITU_R_601:
+ return MDSS_MDP_CSC_YUV2RGB_601L;
+ case MDP_CSC_ITU_R_601_FR:
+ return MDSS_MDP_CSC_YUV2RGB_601FR;
+ case MDP_CSC_ITU_R_2020:
+ return MDSS_MDP_CSC_YUV2RGB_2020L;
+ case MDP_CSC_ITU_R_2020_FR:
+ return MDSS_MDP_CSC_YUV2RGB_2020FR;
+ case MDP_CSC_ITU_R_709:
+ default:
+ return MDSS_MDP_CSC_YUV2RGB_709L;
+ }
+}
+
+/*
+ * when split_lm topology is used without 3D_Mux, either DSC_MERGE or
+ * split_panel is used during full frame updates. Now when we go from
+ * full frame update to right-only update, we need to disable DSC_MERGE or
+ * split_panel. However, those are controlled through DSC0_COMMON_MODE
+ * register which is double buffered, and this double buffer update is tied to
+ * LM0. Now for right-only update, LM0 will not get double buffer update signal.
+ * So DSC_MERGE or split_panel is not disabled for right-only update which is
+ * a wrong HW state and leads ping-pong timeout. Workaround for this is to use
+ * LM0->DSC0 pair for right-only update and disable DSC_MERGE or split_panel.
+ *
+ * However using LM0->DSC0 pair for right-only update requires many changes
+ * at various levels of SW. To lower the SW impact and still support
+ * right-only partial update, keep SW state as it is but swap mixer register
+ * writes such that we instruct HW to use LM0->DSC0 pair.
+ *
+ * This function will return true if such a swap is needed or not.
+ */
+static inline bool mdss_mdp_is_lm_swap_needed(struct mdss_data_type *mdata,
+ struct mdss_mdp_ctl *mctl)
+{
+ if (!mdata || !mctl || !mctl->is_master ||
+ !mctl->panel_data || !mctl->mfd)
+ return false;
+
+ return (is_dsc_compression(&mctl->panel_data->panel_info)) &&
+ (mctl->panel_data->panel_info.partial_update_enabled) &&
+ (mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU)) &&
+ ((mctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
+ ((mctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
+ (mctl->panel_data->panel_info.dsc_enc_total == 2))) &&
+ (!mctl->mixer_left->valid_roi) &&
+ (mctl->mixer_right->valid_roi);
+}
+
+static inline int mdss_mdp_get_display_id(struct mdss_mdp_pipe *pipe)
+{
+ return (pipe && pipe->mfd) ? pipe->mfd->index : -1;
+}
+
+static inline bool mdss_mdp_is_full_frame_update(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_mixer *mixer;
+ struct mdss_rect *roi;
+
+ if (mdss_mdp_get_pu_type(ctl) != MDSS_MDP_DEFAULT_UPDATE)
+ return false;
+
+ if (ctl->mixer_left->valid_roi) {
+ mixer = ctl->mixer_left;
+ roi = &mixer->roi;
+ if ((roi->x != 0) || (roi->y != 0) || (roi->w != mixer->width)
+ || (roi->h != mixer->height))
+ return false;
+ }
+
+ if (ctl->mixer_right && ctl->mixer_right->valid_roi) {
+ mixer = ctl->mixer_right;
+ roi = &mixer->roi;
+ if ((roi->x != 0) || (roi->y != 0) || (roi->w != mixer->width)
+ || (roi->h != mixer->height))
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool mdss_mdp_is_lineptr_supported(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_panel_info *pinfo;
+
+ if (!ctl || !ctl->mixer_left || !ctl->is_master)
+ return false;
+
+ pinfo = &ctl->panel_data->panel_info;
+
+ return (ctl->is_video_mode || ((pinfo->type == MIPI_CMD_PANEL)
+ && (pinfo->te.tear_check_en)) ? true : false);
+}
+
+static inline bool mdss_mdp_is_map_needed(struct mdss_data_type *mdata,
+ struct mdss_mdp_img_data *data)
+{
+ u32 is_secure_ui = data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION;
+
+ /*
+ * For ULT Targets we need SMMU Map, to issue map call for secure Display.
+ */
+ if (is_secure_ui && !mdss_has_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP))
+ return false;
+
+ return true;
+}
+
+static inline u32 mdss_mdp_get_rotator_dst_format(u32 in_format, u32 in_rot90,
+ u32 bwc)
+{
+ switch (in_format) {
+ case MDP_RGB_565:
+ case MDP_BGR_565:
+ if (in_rot90)
+ return MDP_RGB_888;
+ else
+ return in_format;
+ case MDP_RGBA_8888:
+ if (bwc)
+ return MDP_BGRA_8888;
+ else
+ return in_format;
+ case MDP_Y_CBCR_H2V2_VENUS:
+ case MDP_Y_CRCB_H2V2_VENUS:
+ case MDP_Y_CBCR_H2V2:
+ if (in_rot90)
+ return MDP_Y_CRCB_H2V2;
+ else
+ return in_format;
+ case MDP_Y_CB_CR_H2V2:
+ case MDP_Y_CR_CB_GH2V2:
+ case MDP_Y_CR_CB_H2V2:
+ return MDP_Y_CRCB_H2V2;
+ default:
+ return in_format;
+ }
+}
+
+irqreturn_t mdss_mdp_isr(int irq, void *ptr);
+void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
+ u32 intr_type, u32 intf_num);
+int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num);
+void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num);
+void mdss_mdp_intr_check_and_clear(u32 intr_type, u32 intf_num);
+int mdss_mdp_hist_irq_enable(u32 irq);
+void mdss_mdp_hist_irq_disable(u32 irq);
+void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num);
+int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
+ void (*fnc_ptr)(void *), void *arg);
+int mdss_mdp_set_intr_callback_nosync(u32 intr_type, u32 intf_num,
+ void (*fnc_ptr)(void *), void *arg);
+u32 mdss_mdp_get_irq_mask(u32 intr_type, u32 intf_num);
+
+void mdss_mdp_footswitch_ctrl_splash(int on);
+void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable);
+void mdss_mdp_set_clk_rate(unsigned long min_clk_rate);
+unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked);
+int mdss_mdp_vsync_clk_enable(int enable, bool locked);
+void mdss_mdp_clk_ctrl(int enable);
+struct mdss_data_type *mdss_mdp_get_mdata(void);
+int mdss_mdp_secure_display_ctrl(struct mdss_data_type *mdata,
+ unsigned int enable);
+
+int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
+int mdss_mdp_dfps_update_params(struct msm_fb_data_type *mfd,
+ struct mdss_panel_data *pdata, struct dynamic_fps_data *data);
+int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *ov_commit);
+int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *ov_commit);
+
+int mdss_mdp_layer_atomic_validate_wfd(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *ov_commit);
+int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *ov_commit);
+bool mdss_mdp_wfd_is_config_same(struct msm_fb_data_type *mfd,
+ struct mdp_output_layer *layer);
+
+int mdss_mdp_async_position_update(struct msm_fb_data_type *mfd,
+ struct mdp_position_update *update_pos);
+
+int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req,
+ struct mdss_mdp_format_params *fmt);
+int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en);
+int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req, struct mdss_mdp_pipe **ppipe,
+ struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer);
+void mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
+ u32 type);
+int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx);
+int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd);
+void mdss_mdp_overlay_set_chroma_sample(
+ struct mdss_mdp_pipe *pipe);
+int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
+ u32 flags);
+int mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe);
+struct mdss_mdp_pipe *mdss_mdp_pipe_assign(struct mdss_data_type *mdata,
+ struct mdss_mdp_mixer *mixer, u32 ndx,
+ enum mdss_mdp_pipe_rect rect_num);
+struct mdss_mdp_pipe *mdss_mdp_overlay_pipe_reuse(
+ struct msm_fb_data_type *mfd, int pipe_ndx);
+void mdss_mdp_pipe_position_update(struct mdss_mdp_pipe *pipe,
+ struct mdss_rect *src, struct mdss_rect *dst);
+int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata,
+ u32 *offsets, u32 count);
+int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
+void mdss_mdp_switch_roi_reset(struct mdss_mdp_ctl *ctl);
+void mdss_mdp_switch_to_cmd_mode(struct mdss_mdp_ctl *ctl, int prep);
+void mdss_mdp_switch_to_vid_mode(struct mdss_mdp_ctl *ctl, int prep);
+void *mdss_mdp_get_intf_base_addr(struct mdss_data_type *mdata,
+ u32 interface_id);
+int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
+ struct mdp_display_commit *data);
+struct mdss_mdp_data *mdss_mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_pipe *pipe);
+void mdss_mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_data *buf);
+
+int mdss_mdp_ctl_reconfig(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_data *pdata);
+struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
+ struct msm_fb_data_type *mfd);
+int mdss_mdp_video_reconfigure_splash_done(struct mdss_mdp_ctl *ctl,
+ bool handoff);
+int mdss_mdp_cmd_reconfigure_splash_done(struct mdss_mdp_ctl *ctl,
+ bool handoff);
+int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff);
+void mdss_mdp_check_ctl_reset_status(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_data *pdata);
+int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl, bool handoff);
+int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl, int panel_power_mode);
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg,
+ u32 flags);
+int mdss_mdp_get_prefetch_lines(struct mdss_panel_info *pinfo);
+int mdss_mdp_perf_bw_check(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_pipe **left_plist, int left_cnt,
+ struct mdss_mdp_pipe **right_plist, int right_cnt);
+int mdss_mdp_perf_bw_check_pipe(struct mdss_mdp_perf_params *perf,
+ struct mdss_mdp_pipe *pipe);
+int mdss_mdp_get_pipe_overlap_bw(struct mdss_mdp_pipe *pipe,
+ struct mdss_rect *roi, u64 *quota, u64 *quota_nocr, u32 flags);
+int mdss_mdp_get_panel_params(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer, u32 *fps, u32 *v_total,
+ u32 *h_total, u32 *xres);
+int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_perf_params *perf, struct mdss_rect *roi,
+ u32 flags);
+bool mdss_mdp_is_amortizable_pipe(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer, struct mdss_data_type *mdata);
+u32 mdss_mdp_calc_latency_buf_bytes(bool is_yuv, bool is_bwc,
+ bool is_tile, u32 src_w, u32 bpp, bool use_latency_buf_percentage,
+ u32 smp_bytes, bool is_ubwc, bool is_nv12, bool is_hflip);
+u32 mdss_mdp_get_mdp_clk_rate(struct mdss_data_type *mdata);
+int mdss_mdp_ctl_notify(struct mdss_mdp_ctl *ctl, int event);
+void mdss_mdp_ctl_notifier_register(struct mdss_mdp_ctl *ctl,
+ struct notifier_block *notifier);
+void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl,
+ struct notifier_block *notifier);
+u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl);
+u32 apply_comp_ratio_factor(u32 quota, struct mdss_mdp_format_params *fmt,
+ struct mult_factor *factor);
+
+int mdss_mdp_scan_pipes(void);
+
+int mdss_mdp_mixer_handoff(struct mdss_mdp_ctl *ctl, u32 num,
+ struct mdss_mdp_pipe *pipe);
+
+void mdss_mdp_ctl_perf_set_transaction_status(struct mdss_mdp_ctl *ctl,
+ enum mdss_mdp_perf_state_type component, bool new_status);
+void mdss_mdp_ctl_perf_release_bw(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_async_ctl_flush(struct msm_fb_data_type *mfd,
+ u32 flush_bits);
+int mdss_mdp_get_pipe_flush_bits(struct mdss_mdp_pipe *pipe);
+struct mdss_mdp_mixer *mdss_mdp_block_mixer_alloc(void);
+int mdss_mdp_block_mixer_destroy(struct mdss_mdp_mixer *mixer);
+struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux);
+struct mdss_mdp_pipe *mdss_mdp_get_staged_pipe(struct mdss_mdp_ctl *ctl,
+ int mux, int stage, bool is_right_blend);
+int mdss_mdp_mixer_pipe_update(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer, int params_changed);
+int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer);
+void mdss_mdp_mixer_unstage_all(struct mdss_mdp_mixer *mixer);
+void mdss_mdp_reset_mixercfg(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
+ struct mdss_mdp_commit_cb *commit_cb);
+int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_display_wait4pingpong(struct mdss_mdp_ctl *ctl, bool use_lock);
+int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
+ ktime_t *wakeup_time);
+
+int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 csc_type);
+int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, struct mdp_csc_cfg *data);
+
+int mdss_mdp_pp_init(struct device *dev);
+void mdss_mdp_pp_term(struct device *dev);
+int mdss_mdp_pp_overlay_init(struct msm_fb_data_type *mfd);
+
+int mdss_mdp_pp_resume(struct msm_fb_data_type *mfd);
+
+int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_pipe_pp_setup(struct mdss_mdp_pipe *pipe, u32 *op);
+void mdss_mdp_pipe_pp_clear(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op);
+int mdss_mdp_pp_sspp_config(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_copy_layer_pp_info(struct mdp_input_layer *layer);
+void mdss_mdp_free_layer_pp_info(struct mdp_input_layer *layer);
+
+int mdss_mdp_smp_setup(struct mdss_data_type *mdata, u32 cnt, u32 size);
+
+void mdss_hw_init(struct mdss_data_type *mdata);
+
+int mdss_mdp_mfd_valid_dspp(struct msm_fb_data_type *mfd);
+
+int mdss_mdp_pa_config(struct msm_fb_data_type *mfd,
+ struct mdp_pa_cfg_data *config, u32 *copyback);
+int mdss_mdp_pa_v2_config(struct msm_fb_data_type *mfd,
+ struct mdp_pa_v2_cfg_data *config, u32 *copyback);
+int mdss_mdp_pcc_config(struct msm_fb_data_type *mfd,
+ struct mdp_pcc_cfg_data *cfg_ptr, u32 *copyback);
+int mdss_mdp_igc_lut_config(struct msm_fb_data_type *mfd,
+ struct mdp_igc_lut_data *config, u32 *copyback,
+ u32 copy_from_kernel);
+int mdss_mdp_argc_config(struct msm_fb_data_type *mfd,
+ struct mdp_pgc_lut_data *config, u32 *copyback);
+int mdss_mdp_hist_lut_config(struct msm_fb_data_type *mfd,
+ struct mdp_hist_lut_data *config, u32 *copyback);
+int mdss_mdp_pp_default_overlay_config(struct msm_fb_data_type *mfd,
+ struct mdss_panel_data *pdata,
+ bool enable);
+int mdss_mdp_dither_config(struct msm_fb_data_type *mfd,
+ struct mdp_dither_cfg_data *config, u32 *copyback,
+ int copy_from_kernel);
+int mdss_mdp_gamut_config(struct msm_fb_data_type *mfd,
+ struct mdp_gamut_cfg_data *config, u32 *copyback);
+
+int mdss_mdp_hist_intr_req(struct mdss_intr *intr, u32 bits, bool en);
+int mdss_mdp_hist_intr_setup(struct mdss_intr *intr, int state);
+int mdss_mdp_hist_start(struct mdp_histogram_start_req *req);
+int mdss_mdp_hist_stop(u32 block);
+int mdss_mdp_hist_collect(struct mdp_histogram_data *hist);
+void mdss_mdp_hist_intr_done(u32 isr);
+
+int mdss_mdp_ad_config(struct msm_fb_data_type *mfd,
+ struct mdss_ad_init_cfg *init_cfg);
+int mdss_mdp_ad_input(struct msm_fb_data_type *mfd,
+ struct mdss_ad_input *input, int wait);
+int mdss_mdp_ad_addr_setup(struct mdss_data_type *mdata, u32 *ad_offsets);
+int mdss_mdp_calib_mode(struct msm_fb_data_type *mfd,
+ struct mdss_calib_cfg *cfg);
+
+int mdss_mdp_pipe_handoff(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_smp_handoff(struct mdss_data_type *mdata);
+struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
+ u32 type, struct mdss_mdp_pipe *left_blend_pipe);
+struct mdss_mdp_pipe *mdss_mdp_pipe_get(u32 ndx,
+ enum mdss_mdp_pipe_rect rect_num);
+struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
+ u32 ndx, enum mdss_mdp_pipe_rect rect_num);
+int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe);
+
+u32 mdss_mdp_smp_calc_num_blocks(struct mdss_mdp_pipe *pipe);
+u32 mdss_mdp_smp_get_size(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_smp_reserve(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_smp_unreserve(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_smp_release(struct mdss_mdp_pipe *pipe);
+
+int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata,
+ struct mdss_mdp_pipe *head, u32 *offsets, u32 *ftch_id, u32 *xin_id,
+ u32 type, const int *pnums, u32 len, u32 rects_per_sspp,
+ u8 priority_base);
+int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata, u32 *mixer_offsets,
+ u32 *dspp_offsets, u32 *pingpong_offsets, u32 type, u32 len);
+int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata, u32 *ctl_offsets,
+ u32 len);
+int mdss_mdp_wb_addr_setup(struct mdss_data_type *mdata,
+ u32 num_wb, u32 num_intf_wb);
+
+void mdss_mdp_pipe_clk_force_off(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_pipe_fetch_halt(struct mdss_mdp_pipe *pipe, bool is_recovery);
+int mdss_mdp_pipe_panic_signal_ctrl(struct mdss_mdp_pipe *pipe, bool enable);
+void mdss_mdp_bwcpanic_ctrl(struct mdss_data_type *mdata, bool enable);
+int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_data *src_data);
+
+int mdss_mdp_data_check(struct mdss_mdp_data *data,
+ struct mdss_mdp_plane_sizes *ps,
+ struct mdss_mdp_format_params *fmt);
+int mdss_mdp_get_plane_sizes(struct mdss_mdp_format_params *fmt, u32 w, u32 h,
+ struct mdss_mdp_plane_sizes *ps, u32 bwc_mode, bool rotation);
+int mdss_mdp_get_rau_strides(u32 w, u32 h, struct mdss_mdp_format_params *fmt,
+ struct mdss_mdp_plane_sizes *ps);
+void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
+ struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt);
+void mdss_mdp_format_flag_removal(u32 *table, u32 num, u32 remove_bits);
+struct mdss_mdp_format_params *mdss_mdp_get_format_params(u32 format);
+int mdss_mdp_validate_offset_for_ubwc_format(
+ struct mdss_mdp_format_params *fmt, u16 x, u16 y);
+void mdss_mdp_get_v_h_subsample_rate(u8 chroma_samp,
+ u8 *v_sample, u8 *h_sample);
+struct mult_factor *mdss_mdp_get_comp_factor(u32 format,
+ bool rt_factor);
+int mdss_mdp_data_map(struct mdss_mdp_data *data, bool rotator, int dir);
+void mdss_mdp_data_free(struct mdss_mdp_data *data, bool rotator, int dir);
+int mdss_mdp_data_get_and_validate_size(struct mdss_mdp_data *data,
+ struct msmfb_data *planes, int num_planes, u32 flags,
+ struct device *dev, bool rotator, int dir,
+ struct mdp_layer_buffer *buffer);
+u32 mdss_get_panel_framerate(struct msm_fb_data_type *mfd);
+int mdss_mdp_calc_phase_step(u32 src, u32 dst, u32 *out_phase);
+
+void mdss_mdp_intersect_rect(struct mdss_rect *res_rect,
+ const struct mdss_rect *dst_rect,
+ const struct mdss_rect *sci_rect);
+void mdss_mdp_crop_rect(struct mdss_rect *src_rect,
+ struct mdss_rect *dst_rect,
+ const struct mdss_rect *sci_rect);
+void rect_copy_mdss_to_mdp(struct mdp_rect *user, struct mdss_rect *kernel);
+void rect_copy_mdp_to_mdss(struct mdp_rect *user, struct mdss_rect *kernel);
+bool mdss_rect_overlap_check(struct mdss_rect *rect1, struct mdss_rect *rect2);
+void mdss_rect_split(struct mdss_rect *in_roi, struct mdss_rect *l_roi,
+ struct mdss_rect *r_roi, u32 splitpoint);
+
+
+int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id);
+bool mdss_mdp_mixer_reg_has_pipe(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_pipe *pipe);
+u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp);
+void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval);
+
+int mdss_mdp_calib_config(struct mdp_calib_config_data *cfg, u32 *copyback);
+int mdss_mdp_calib_config_buffer(struct mdp_calib_config_buffer *cfg,
+ u32 *copyback);
+int mdss_mdp_ctl_update_fps(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_pipe_is_staged(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_writeback_display_commit(struct mdss_mdp_ctl *ctl, void *arg);
+struct mdss_mdp_ctl *mdss_mdp_ctl_mixer_switch(struct mdss_mdp_ctl *ctl,
+ u32 return_type);
+void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
+ struct mdss_rect *l_roi, struct mdss_rect *r_roi);
+void mdss_mdp_mixer_update_pipe_map(struct mdss_mdp_ctl *master_ctl,
+ int mixer_mux);
+
+void mdss_mdp_pipe_calc_pixel_extn(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_pipe_calc_qseed3_cfg(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_ctl_restore(bool locked);
+int mdss_mdp_ctl_reset(struct mdss_mdp_ctl *ctl, bool is_recovery);
+int mdss_mdp_wait_for_xin_halt(u32 xin_id, bool is_vbif_nrt);
+void mdss_mdp_set_ot_limit(struct mdss_mdp_set_ot_params *params);
+int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *ctl, int frame_cnt);
+int mdss_mdp_cmd_get_autorefresh_mode(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_cmd_set_autorefresh(struct mdss_mdp_ctl *ctl, int frame_cnt);
+int mdss_mdp_ctl_cmd_get_autorefresh(struct mdss_mdp_ctl *ctl);
+void mdss_mdp_ctl_event_timer(void *data);
+int mdss_mdp_pp_get_version(struct mdp_pp_feature_version *version);
+
+struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata,
+ u32 off);
+int mdss_mdp_ctl_free(struct mdss_mdp_ctl *ctl);
+
+struct mdss_mdp_mixer *mdss_mdp_mixer_assign(u32 id, bool wb, bool rot);
+struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
+ struct mdss_mdp_ctl *ctl, u32 type, int mux, int rotator);
+int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer);
+
+bool mdss_mdp_is_wb_mdp_intf(u32 num, u32 reg_index);
+struct mdss_mdp_writeback *mdss_mdp_wb_assign(u32 id, u32 reg_index);
+struct mdss_mdp_writeback *mdss_mdp_wb_alloc(u32 caps, u32 reg_index);
+void mdss_mdp_wb_free(struct mdss_mdp_writeback *wb);
+
+void mdss_mdp_ctl_dsc_setup(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_info *pinfo);
+
+void mdss_mdp_video_isr(void *ptr, u32 count);
+void mdss_mdp_enable_hw_irq(struct mdss_data_type *mdata);
+void mdss_mdp_disable_hw_irq(struct mdss_data_type *mdata);
+
+void mdss_mdp_set_supported_formats(struct mdss_data_type *mdata);
+
+void mdss_mdp_frc_fsm_init_state(struct mdss_mdp_frc_fsm *frc_fsm);
+void mdss_mdp_frc_fsm_change_state(struct mdss_mdp_frc_fsm *frc_fsm,
+ enum mdss_mdp_frc_state_type state,
+ void (*cb)(struct mdss_mdp_frc_fsm *frc_fsm));
+void mdss_mdp_frc_fsm_update_state(struct mdss_mdp_frc_fsm *frc_fsm);
+
+#ifdef CONFIG_FB_MSM_MDP_NONE
+struct mdss_data_type *mdss_mdp_get_mdata(void)
+{
+ return NULL;
+}
+
+int mdss_mdp_copy_layer_pp_info(struct mdp_input_layer *layer)
+{
+ return -EFAULT;
+}
+
+void mdss_mdp_free_layer_pp_info(struct mdp_input_layer *layer)
+{
+}
+
+#endif /* CONFIG_FB_MSM_MDP_NONE */
+#endif /* MDSS_MDP_H */
diff --git a/drivers/video/fbdev/msm/mdss_mdp_cdm.c b/drivers/video/fbdev/msm/mdss_mdp_cdm.c
new file mode 100644
index 0000000..ab680f5
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_cdm.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/mutex.h>
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_debug.h"
+
+static u32 cdm_cdwn2_cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+static u32 cdm_cdwn2_offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+static u32 cdm_cdwn2_cosite_v_coeff[] = {0x00080004};
+static u32 cdm_cdwn2_offsite_v_coeff[] = {0x00060002};
+
+#define VSYNC_TIMEOUT_US 16000
+
+/**
+ * @mdss_mdp_cdm_alloc() - Allocates a cdm block by parsing the list of
+ * available cdm blocks.
+ *
+ * @mdata - structure containing the list of cdm blocks
+ */
+static struct mdss_mdp_cdm *mdss_mdp_cdm_alloc(struct mdss_data_type *mdata)
+{
+ struct mdss_mdp_cdm *cdm = NULL;
+ u32 i = 0;
+
+ mutex_lock(&mdata->cdm_lock);
+
+ for (i = 0; i < mdata->ncdm; i++) {
+ cdm = mdata->cdm_off + i;
+ if (atomic_read(&cdm->kref.refcount) == 0) {
+ kref_init(&cdm->kref);
+ cdm->mdata = mdata;
+ pr_debug("alloc cdm=%d\n", cdm->num);
+ break;
+ }
+ cdm = NULL;
+ }
+
+ mutex_unlock(&mdata->cdm_lock);
+
+ return cdm;
+}
+
+/**
+ * @mdss_mdp_cdm_free() - Adds the CDM block back to the available list
+ * @kref: Reference count structure
+ */
+static void mdss_mdp_cdm_free(struct kref *kref)
+{
+ struct mdss_mdp_cdm *cdm = container_of(kref, struct mdss_mdp_cdm,
+ kref);
+ if (!cdm)
+ return;
+
+ complete_all(&cdm->free_comp);
+ pr_debug("free cdm_num = %d\n", cdm->num);
+
+}
+
+/**
+ * @mdss_mdp_cdm_init() - Allocates a CDM block and initializes the hardware
+ * and software context. This should be called once at
+ * when setting up the usecase and released when done.
+ * @ctl: Pointer to the control structure.
+ * @intf_type: Output interface which will be connected to CDM.
+ */
+struct mdss_mdp_cdm *mdss_mdp_cdm_init(struct mdss_mdp_ctl *ctl, u32 intf_type)
+{
+ struct mdss_data_type *mdata = ctl->mdata;
+ struct mdss_mdp_cdm *cdm = NULL;
+
+ cdm = mdss_mdp_cdm_alloc(mdata);
+
+ /**
+ * give hdmi interface priority to alloc the cdm block. It will wait
+ * for one vsync cycle to allow wfd to finish its job and try to reserve
+ * the block the again.
+ */
+ if (!cdm && (intf_type == MDP_CDM_CDWN_OUTPUT_HDMI)) {
+ /* always wait for first cdm block */
+ cdm = mdata->cdm_off;
+ if (cdm) {
+ reinit_completion(&cdm->free_comp);
+ /*
+ * no need to check the return status of completion
+ * timeout. Next cdm_alloc call will try to reserve
+ * the cdm block and returns failure if allocation
+ * fails.
+ */
+ wait_for_completion_timeout(&cdm->free_comp,
+ usecs_to_jiffies(VSYNC_TIMEOUT_US));
+ cdm = mdss_mdp_cdm_alloc(mdata);
+ }
+ }
+
+ if (!cdm) {
+ pr_err("%s: Unable to allocate cdm\n", __func__);
+ return ERR_PTR(-EBUSY);
+ }
+
+ cdm->out_intf = intf_type;
+ cdm->is_bypassed = true;
+ memset(&cdm->setup, 0x0, sizeof(struct mdp_cdm_cfg));
+
+ return cdm;
+}
+
+/**
+ * @mdss_mdp_cdm_csc_setup - Programs the CSC block.
+ * @cdm: Pointer to the CDM structure.
+ * @data: Pointer to the structure containing configuration
+ * data.
+ */
+static int mdss_mdp_cdm_csc_setup(struct mdss_mdp_cdm *cdm,
+ struct mdp_cdm_cfg *data)
+{
+ int rc = 0;
+ u32 op_mode = 0;
+
+ mdss_mdp_csc_setup(MDSS_MDP_BLOCK_CDM, cdm->num, data->csc_type);
+
+ if ((data->csc_type == MDSS_MDP_CSC_RGB2YUV_601L) ||
+ (data->csc_type == MDSS_MDP_CSC_RGB2YUV_601FR) ||
+ (data->csc_type == MDSS_MDP_CSC_RGB2YUV_709L)) {
+ op_mode |= BIT(2); /* DST_DATA_FORMAT = YUV */
+ op_mode &= ~BIT(1); /* SRC_DATA_FORMAT = RGB */
+ op_mode |= BIT(0); /* EN = 1 */
+ cdm->is_bypassed = false;
+ } else {
+ op_mode = 0;
+ cdm->is_bypassed = true;
+ }
+
+ writel_relaxed(op_mode, cdm->base + MDSS_MDP_REG_CDM_CSC_10_OPMODE);
+
+ return rc;
+}
+
+/**
+ * @mdss_mdp_cdm_cdwn_setup - Programs the chroma down block.
+ * @cdm: Pointer to the CDM structure.
+ * @data: Pointer to the structure containing configuration
+ * data.
+ */
+static int mdss_mdp_cdm_cdwn_setup(struct mdss_mdp_cdm *cdm,
+ struct mdp_cdm_cfg *data)
+{
+ int rc = 0;
+ u32 opmode = 0;
+ u32 out_size = 0;
+
+ if (data->mdp_csc_bit_depth == MDP_CDM_CSC_10BIT)
+ opmode &= ~BIT(7);
+ else
+ opmode |= BIT(7);
+
+ /* ENABLE DWNS_H bit */
+ opmode |= BIT(1);
+
+ switch (data->horz_downsampling_type) {
+ case MDP_CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_H field */
+ opmode &= ~(0x18);
+ /* CLEAR DWNS_H bit */
+ opmode &= ~BIT(1);
+ break;
+ case MDP_CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_H field (pixel drop is 0) */
+ opmode &= ~(0x18);
+ break;
+ case MDP_CDM_CDWN_AVG:
+ /* Clear METHOD_H field (Average is 0x1) */
+ opmode &= ~(0x18);
+ opmode |= (0x1 << 0x3);
+ break;
+ case MDP_CDM_CDWN_COSITE:
+ /* Clear METHOD_H field (Average is 0x2) */
+ opmode &= ~(0x18);
+ opmode |= (0x2 << 0x3);
+ /* Co-site horizontal coefficients */
+ writel_relaxed(cdm_cdwn2_cosite_h_coeff[0], cdm->base +
+ MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_0);
+ writel_relaxed(cdm_cdwn2_cosite_h_coeff[1], cdm->base +
+ MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_1);
+ writel_relaxed(cdm_cdwn2_cosite_h_coeff[2], cdm->base +
+ MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_2);
+ break;
+ case MDP_CDM_CDWN_OFFSITE:
+ /* Clear METHOD_H field (Average is 0x3) */
+ opmode &= ~(0x18);
+ opmode |= (0x3 << 0x3);
+
+ /* Off-site horizontal coefficients */
+ writel_relaxed(cdm_cdwn2_offsite_h_coeff[0], cdm->base +
+ MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_0);
+ writel_relaxed(cdm_cdwn2_offsite_h_coeff[1], cdm->base +
+ MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_1);
+ writel_relaxed(cdm_cdwn2_offsite_h_coeff[2], cdm->base +
+ MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_2);
+ break;
+ default:
+ pr_err("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ /* ENABLE DWNS_V bit */
+ opmode |= BIT(2);
+
+ switch (data->vert_downsampling_type) {
+ case MDP_CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_V field */
+ opmode &= ~(0x60);
+ /* CLEAR DWNS_V bit */
+ opmode &= ~BIT(2);
+ break;
+ case MDP_CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_V field (pixel drop is 0) */
+ opmode &= ~(0x60);
+ break;
+ case MDP_CDM_CDWN_AVG:
+ /* Clear METHOD_V field (Average is 0x1) */
+ opmode &= ~(0x60);
+ opmode |= (0x1 << 0x5);
+ break;
+ case MDP_CDM_CDWN_COSITE:
+ /* Clear METHOD_V field (Average is 0x2) */
+ opmode &= ~(0x60);
+ opmode |= (0x2 << 0x5);
+ /* Co-site vertical coefficients */
+ writel_relaxed(cdm_cdwn2_cosite_v_coeff[0], cdm->base +
+ MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_V);
+ break;
+ case MDP_CDM_CDWN_OFFSITE:
+ /* Clear METHOD_V field (Average is 0x3) */
+ opmode &= ~(0x60);
+ opmode |= (0x3 << 0x5);
+
+ /* Off-site vertical coefficients */
+ writel_relaxed(cdm_cdwn2_offsite_v_coeff[0], cdm->base +
+ MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_V);
+ break;
+ default:
+ pr_err("%s invalid vert down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (data->vert_downsampling_type || data->horz_downsampling_type)
+ opmode |= BIT(0); /* EN CDWN module */
+ else
+ opmode &= ~BIT(0);
+
+ out_size = (data->output_width & 0xFFFF) |
+ ((data->output_height & 0xFFFF) << 16);
+ writel_relaxed(out_size, cdm->base + MDSS_MDP_REG_CDM_CDWN2_OUT_SIZE);
+ writel_relaxed(opmode, cdm->base + MDSS_MDP_REG_CDM_CDWN2_OP_MODE);
+ writel_relaxed(((0x3FF << 16) | 0x0),
+ cdm->base + MDSS_MDP_REG_CDM_CDWN2_CLAMP_OUT);
+ return rc;
+
+}
+
+/**
+ * @mdss_mdp_cdm_out_packer_setup - Programs the output packer block.
+ * @cdm: Pointer to the CDM structure.
+ * @data: Pointer to the structure containing
+ * configuration data.
+ */
+static int mdss_mdp_cdm_out_packer_setup(struct mdss_mdp_cdm *cdm,
+ struct mdp_cdm_cfg *data)
+{
+ int rc = 0;
+ u32 opmode = 0;
+ u32 cdm_enable = 0;
+ struct mdss_mdp_format_params *fmt;
+
+ if (cdm->out_intf == MDP_CDM_CDWN_OUTPUT_HDMI) {
+ /* Enable HDMI packer */
+ opmode |= BIT(0);
+ fmt = mdss_mdp_get_format_params(data->out_format);
+ if (!fmt) {
+ pr_err("cdm format = %d, not supported\n",
+ data->out_format);
+ return -EINVAL;
+ }
+ opmode &= ~0x6;
+ opmode |= (fmt->chroma_sample << 1);
+ if (!cdm->is_bypassed)
+ cdm_enable |= BIT(19);
+
+ } else {
+ /* Disable HDMI pacler for WB */
+ opmode = 0;
+ if (!cdm->is_bypassed)
+ cdm_enable |= BIT(24);
+ }
+ writel_relaxed(cdm_enable, cdm->mdata->mdp_base +
+ MDSS_MDP_MDP_OUT_CTL_0);
+ writel_relaxed(opmode, cdm->base + MDSS_MDP_REG_CDM_HDMI_PACK_OP_MODE);
+
+ return rc;
+}
+
+/**
+ * @mdss_mdp_cdm_setup - Sets up the CDM block based on the usecase. The CDM
+ * block should be initialized before calling this
+ * function.
+ * @cdm: Pointer to the CDM structure.
+ * @data: Pointer to the structure containing configuration
+ * data.
+ */
+int mdss_mdp_cdm_setup(struct mdss_mdp_cdm *cdm, struct mdp_cdm_cfg *data)
+{
+ int rc = 0;
+
+ if (!cdm || !data) {
+ pr_err("%s: invalid arguments\n", __func__);
+ return -EINVAL;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mutex_lock(&cdm->lock);
+ /* Setup CSC block */
+ rc = mdss_mdp_cdm_csc_setup(cdm, data);
+ if (rc) {
+ pr_err("%s: csc configuration failure\n", __func__);
+ goto fail;
+ }
+
+ /* Setup chroma down sampler */
+ rc = mdss_mdp_cdm_cdwn_setup(cdm, data);
+ if (rc) {
+ pr_err("%s: cdwn configuration failure\n", __func__);
+ goto fail;
+ }
+
+ /* Setup HDMI packer */
+ rc = mdss_mdp_cdm_out_packer_setup(cdm, data);
+ if (rc) {
+ pr_err("%s: out packer configuration failure\n", __func__);
+ goto fail;
+ }
+
+ memcpy(&cdm->setup, data, sizeof(struct mdp_cdm_cfg));
+
+fail:
+ mutex_unlock(&cdm->lock);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return rc;
+}
+
+/**
+ * @mdss_mdp_cdm_destroy - Destroys the CDM configuration and return it to
+ * default state.
+ * @cdm: Pointer to the CDM structure
+ */
+int mdss_mdp_cdm_destroy(struct mdss_mdp_cdm *cdm)
+{
+ int rc = 0;
+
+ if (!cdm) {
+ pr_err("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ kref_put(&cdm->kref, mdss_mdp_cdm_free);
+
+ return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_cdm.h b/drivers/video/fbdev/msm/mdss_mdp_cdm.h
new file mode 100644
index 0000000..c494720
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_cdm.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014,2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_CDM_H
+#define MDSS_MDP_CDM_H
+
+#include <linux/msm_mdp.h>
+#include <linux/kref.h>
+
+enum mdp_cdm_cdwn_method_type {
+ MDP_CDM_CDWN_DISABLE,
+ MDP_CDM_CDWN_PIXEL_DROP,
+ MDP_CDM_CDWN_AVG,
+ MDP_CDM_CDWN_COSITE,
+ MDP_CDM_CDWN_OFFSITE,
+};
+
+enum mdp_cdm_cdwn_output_type {
+ MDP_CDM_CDWN_OUTPUT_HDMI,
+ MDP_CDM_CDWN_OUTPUT_WB,
+};
+
+enum mdp_cdm_csc_bit_depth {
+ MDP_CDM_CSC_8BIT,
+ MDP_CDM_CSC_10BIT,
+};
+
+struct mdp_cdm_cfg {
+ /* CSC block configuration */
+ u32 mdp_csc_bit_depth;
+ u32 csc_type;
+ /* CDWN block configuration */
+ u32 horz_downsampling_type;
+ u32 vert_downsampling_type;
+ /* Output packer configuration */
+ u32 output_width;
+ u32 output_height;
+ u32 out_format;
+};
+
+struct mdss_mdp_cdm {
+ u32 num;
+ char __iomem *base;
+ struct kref kref;
+ struct mutex lock;
+
+ struct mdss_data_type *mdata;
+ u32 out_intf;
+ bool is_bypassed;
+ struct mdp_cdm_cfg setup;
+ struct completion free_comp;
+};
+
+struct mdss_mdp_cdm *mdss_mdp_cdm_init(struct mdss_mdp_ctl *ctl,
+ u32 intf_type);
+int mdss_mdp_cdm_destroy(struct mdss_mdp_cdm *cdm);
+int mdss_mdp_cdm_setup(struct mdss_mdp_cdm *cdm, struct mdp_cdm_cfg *data);
+
+#endif /* MDSS_MDP_CDM_H */
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
new file mode 100644
index 0000000..4f17310
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -0,0 +1,5987 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include <soc/qcom/event_timer.h>
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_debug.h"
+
+#define MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM 2
+#define NUM_MIXERCFG_REGS 3
+#define MDSS_MDP_WB_OUTPUT_BPP 3
+struct mdss_mdp_mixer_cfg {
+ u32 config_masks[NUM_MIXERCFG_REGS];
+ bool border_enabled;
+ bool cursor_enabled;
+};
+
+static struct {
+ u32 flush_bit;
+ struct mdss_mdp_hwio_cfg base;
+ struct mdss_mdp_hwio_cfg ext;
+ struct mdss_mdp_hwio_cfg ext2;
+} mdp_pipe_hwio[MDSS_MDP_MAX_SSPP] = {
+ [MDSS_MDP_SSPP_VIG0] = { 0, { 0, 3, 0 }, { 0, 1, 3 } },
+ [MDSS_MDP_SSPP_VIG1] = { 1, { 3, 3, 0 }, { 2, 1, 3 } },
+ [MDSS_MDP_SSPP_VIG2] = { 2, { 6, 3, 0 }, { 4, 1, 3 } },
+ [MDSS_MDP_SSPP_VIG3] = { 18, { 26, 3, 0 }, { 6, 1, 3 } },
+ [MDSS_MDP_SSPP_RGB0] = { 3, { 9, 3, 0 }, { 8, 1, 3 } },
+ [MDSS_MDP_SSPP_RGB1] = { 4, { 12, 3, 0 }, { 10, 1, 3 } },
+ [MDSS_MDP_SSPP_RGB2] = { 5, { 15, 3, 0 }, { 12, 1, 3 } },
+ [MDSS_MDP_SSPP_RGB3] = { 19, { 29, 3, 0 }, { 14, 1, 3 } },
+ [MDSS_MDP_SSPP_DMA0] = { 11, { 18, 3, 0 }, { 16, 1, 3 } },
+ [MDSS_MDP_SSPP_DMA1] = { 12, { 21, 3, 0 }, { 18, 1, 3 } },
+ [MDSS_MDP_SSPP_DMA2] = { 24, .ext2 = { 0, 4, 0 } },
+ [MDSS_MDP_SSPP_DMA3] = { 25, .ext2 = { 4, 4, 0 } },
+ [MDSS_MDP_SSPP_CURSOR0] = { 22, .ext = { 20, 4, 0 } },
+ [MDSS_MDP_SSPP_CURSOR1] = { 23, .ext = { 26, 4, 0 } },
+};
+
+static struct {
+ struct mdss_mdp_hwio_cfg ext2;
+} mdp_pipe_rec1_hwio[MDSS_MDP_MAX_SSPP] = {
+ [MDSS_MDP_SSPP_DMA0] = { .ext2 = { 8, 4, 0 } },
+ [MDSS_MDP_SSPP_DMA1] = { .ext2 = { 12, 4, 0 } },
+ [MDSS_MDP_SSPP_DMA2] = { .ext2 = { 16, 4, 0 } },
+ [MDSS_MDP_SSPP_DMA3] = { .ext2 = { 20, 4, 0 } },
+};
+
+static void __mdss_mdp_mixer_write_cfg(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_mixer_cfg *cfg);
+
+static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
+{
+ u64 result = val;
+
+ if (val) {
+ u64 temp = -1UL;
+
+ do_div(temp, val);
+ if (temp > numer) {
+ /* no overflow, so we can do the operation*/
+ result = (val * (u64)numer);
+ do_div(result, denom);
+ }
+ }
+ return result;
+}
+
+static inline u64 apply_fudge_factor(u64 val,
+ struct mult_factor *factor)
+{
+ return fudge_factor(val, factor->numer, factor->denom);
+}
+
+static inline u64 apply_inverse_fudge_factor(u64 val,
+ struct mult_factor *factor)
+{
+ return fudge_factor(val, factor->denom, factor->numer);
+}
+
+static DEFINE_MUTEX(mdss_mdp_ctl_lock);
+
+static inline u64 mdss_mdp_get_pclk_rate(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+
+ return (ctl->intf_type == MDSS_INTF_DSI) ?
+ pinfo->mipi.dsi_pclk_rate :
+ pinfo->clk_rate;
+}
+
+static inline u32 mdss_mdp_clk_fudge_factor(struct mdss_mdp_mixer *mixer,
+ u32 rate)
+{
+ struct mdss_panel_info *pinfo = &mixer->ctl->panel_data->panel_info;
+
+ rate = apply_fudge_factor(rate, &mdss_res->clk_factor);
+
+ /*
+ * If the panel is video mode and its back porch period is
+ * small, the workaround of increasing mdp clk is needed to
+ * avoid underrun.
+ */
+ if (mixer->ctl->is_video_mode && pinfo &&
+ (pinfo->lcdc.v_back_porch < MDP_MIN_VBP))
+ rate = apply_fudge_factor(rate, &mdss_res->clk_factor);
+
+ return rate;
+}
+
+struct mdss_mdp_prefill_params {
+ u32 smp_bytes;
+ u32 xres;
+ u32 src_w;
+ u32 dst_w;
+ u32 src_h;
+ u32 dst_h;
+ u32 dst_y;
+ u32 bpp;
+ u32 pnum;
+ bool is_yuv;
+ bool is_caf;
+ bool is_fbc;
+ bool is_bwc;
+ bool is_tile;
+ bool is_hflip;
+ bool is_cmd;
+ bool is_ubwc;
+ bool is_nv12;
+};
+
+static inline bool mdss_mdp_perf_is_caf(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ /*
+ * CAF mode filter is enabled when format is yuv and
+ * upscaling. Post processing had the decision to use CAF
+ * under these conditions.
+ */
+ return ((mdata->mdp_rev >= MDSS_MDP_HW_REV_102) &&
+ pipe->src_fmt->is_yuv && ((pipe->src.h >> pipe->vert_deci) <=
+ pipe->dst.h));
+}
+
+static inline u32 mdss_mdp_calc_y_scaler_bytes(struct mdss_mdp_prefill_params
+ *params, struct mdss_prefill_data *prefill)
+{
+ u32 y_scaler_bytes = 0, y_scaler_lines = 0;
+
+ if (params->is_yuv) {
+ if (params->src_h != params->dst_h) {
+ y_scaler_lines = (params->is_caf) ?
+ prefill->y_scaler_lines_caf :
+ prefill->y_scaler_lines_bilinear;
+ /*
+ * y is src_width, u is src_width/2 and v is
+ * src_width/2, so the total is scaler_lines *
+ * src_w * 2
+ */
+ y_scaler_bytes = y_scaler_lines * params->src_w * 2;
+ }
+ } else {
+ if (params->src_h != params->dst_h) {
+ y_scaler_lines = prefill->y_scaler_lines_bilinear;
+ y_scaler_bytes = y_scaler_lines * params->src_w *
+ params->bpp;
+ }
+ }
+
+ return y_scaler_bytes;
+}
+
+static inline u32 mdss_mdp_align_latency_buf_bytes(
+ u32 latency_buf_bytes, u32 percentage,
+ u32 smp_bytes)
+{
+ u32 aligned_bytes;
+
+ aligned_bytes = ((smp_bytes - latency_buf_bytes) * percentage) / 100;
+
+ pr_debug("percentage=%d, extra_bytes(per)=%d smp_bytes=%d latency=%d\n",
+ percentage, aligned_bytes, smp_bytes, latency_buf_bytes);
+ return latency_buf_bytes + aligned_bytes;
+}
+
+/**
+ * @ mdss_mdp_calc_latency_buf_bytes() -
+ * Get the number of bytes for the
+ * latency lines.
+ * @is_yuv - true if format is yuv
+ * @is_bwc - true if BWC is enabled
+ * @is_tile - true if it is Tile format
+ * @src_w - source rectangle width
+ * @bpp - Bytes per pixel of source rectangle
+ * @use_latency_buf_percentage - use an extra percentage for
+ * the latency bytes calculation.
+ * @smp_bytes - size of the smp for alignment
+ * @is_ubwc - true if UBWC is enabled
+ * @is_nv12 - true if NV12 format is used
+ * @is_hflip - true if HFLIP is enabled
+ *
+ * Return:
+ * The amount of bytes to consider for the latency lines, where:
+ * If use_latency_buf_percentate is TRUE:
+ * Function will return the amount of bytes for the
+ * latency lines plus a percentage of the
+ * additional bytes allocated to align with the
+ * SMP size. Percentage is determined by
+ * "latency_buff_per", which can be modified
+ * through debugfs.
+ * If use_latency_buf_percentage is FALSE:
+ * Function will return only the the amount of bytes
+ * for the latency lines without any
+ * extra bytes.
+ */
+u32 mdss_mdp_calc_latency_buf_bytes(bool is_yuv, bool is_bwc,
+ bool is_tile, u32 src_w, u32 bpp, bool use_latency_buf_percentage,
+ u32 smp_bytes, bool is_ubwc, bool is_nv12, bool is_hflip)
+{
+ u32 latency_lines = 0, latency_buf_bytes;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (is_hflip && !mdata->hflip_buffer_reused)
+ latency_lines = 1;
+
+ if (is_yuv) {
+ if (is_ubwc) {
+ if (is_nv12)
+ latency_lines += 8;
+ else
+ latency_lines += 4;
+ latency_buf_bytes = src_w * bpp * latency_lines;
+ } else if (is_bwc) {
+ latency_lines += 4;
+ latency_buf_bytes = src_w * bpp * latency_lines;
+ } else {
+ if (!mdata->hflip_buffer_reused)
+ latency_lines += 1;
+ else
+ latency_lines = 2;
+ /* multiply * 2 for the two YUV planes */
+ latency_buf_bytes = mdss_mdp_align_latency_buf_bytes(
+ src_w * bpp * latency_lines,
+ use_latency_buf_percentage ?
+ mdata->latency_buff_per : 0, smp_bytes) * 2;
+ }
+ } else {
+ if (is_ubwc) {
+ latency_lines += 4;
+ latency_buf_bytes = src_w * bpp * latency_lines;
+ } else if (is_tile) {
+ latency_lines += 8;
+ latency_buf_bytes = src_w * bpp * latency_lines;
+ } else if (is_bwc) {
+ latency_lines += 4;
+ latency_buf_bytes = src_w * bpp * latency_lines;
+ } else {
+ if (!mdata->hflip_buffer_reused)
+ latency_lines += 1;
+ else
+ latency_lines = 2;
+ latency_buf_bytes = mdss_mdp_align_latency_buf_bytes(
+ src_w * bpp * latency_lines,
+ use_latency_buf_percentage ?
+ mdata->latency_buff_per : 0, smp_bytes);
+ }
+ }
+
+ return latency_buf_bytes;
+}
+
+static inline u32 mdss_mdp_calc_scaling_w_h(u32 val, u32 src_h, u32 dst_h,
+ u32 src_w, u32 dst_w)
+{
+ if (dst_h)
+ val = mult_frac(val, src_h, dst_h);
+ if (dst_w)
+ val = mult_frac(val, src_w, dst_w);
+
+ return val;
+}
+
+static u32 mdss_mdp_perf_calc_pipe_prefill_video(struct mdss_mdp_prefill_params
+ *params)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_prefill_data *prefill = &mdata->prefill_data;
+ u32 prefill_bytes = 0;
+ u32 latency_buf_bytes = 0;
+ u32 y_buf_bytes = 0;
+ u32 y_scaler_bytes = 0;
+ u32 pp_bytes = 0, pp_lines = 0;
+ u32 post_scaler_bytes = 0;
+ u32 fbc_bytes = 0;
+
+ prefill_bytes = prefill->ot_bytes;
+
+ latency_buf_bytes = mdss_mdp_calc_latency_buf_bytes(params->is_yuv,
+ params->is_bwc, params->is_tile, params->src_w, params->bpp,
+ true, params->smp_bytes, params->is_ubwc, params->is_nv12,
+ params->is_hflip);
+ prefill_bytes += latency_buf_bytes;
+ pr_debug("latency_buf_bytes bw_calc=%d actual=%d\n", latency_buf_bytes,
+ params->smp_bytes);
+
+ if (params->is_yuv)
+ y_buf_bytes = prefill->y_buf_bytes;
+
+ y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
+
+ prefill_bytes += y_buf_bytes + y_scaler_bytes;
+
+ if (mdata->apply_post_scale_bytes || (params->src_h != params->dst_h) ||
+ (params->src_w != params->dst_w)) {
+ post_scaler_bytes = prefill->post_scaler_pixels * params->bpp;
+ post_scaler_bytes = mdss_mdp_calc_scaling_w_h(post_scaler_bytes,
+ params->src_h, params->dst_h, params->src_w,
+ params->dst_w);
+ prefill_bytes += post_scaler_bytes;
+ }
+
+ if (params->xres)
+ pp_lines = DIV_ROUND_UP(prefill->pp_pixels, params->xres);
+ if (params->xres && params->dst_h && (params->dst_y <= pp_lines))
+ pp_bytes = ((params->src_w * params->bpp * prefill->pp_pixels /
+ params->xres) * params->src_h) / params->dst_h;
+ prefill_bytes += pp_bytes;
+
+ if (params->is_fbc) {
+ fbc_bytes = prefill->fbc_lines * params->bpp;
+ fbc_bytes = mdss_mdp_calc_scaling_w_h(fbc_bytes, params->src_h,
+ params->dst_h, params->src_w, params->dst_w);
+ }
+ prefill_bytes += fbc_bytes;
+
+ trace_mdp_perf_prefill_calc(params->pnum, latency_buf_bytes,
+ prefill->ot_bytes, y_buf_bytes, y_scaler_bytes, pp_lines,
+ pp_bytes, post_scaler_bytes, fbc_bytes, prefill_bytes);
+
+ pr_debug("ot=%d y_buf=%d pp_lines=%d pp=%d post_sc=%d fbc_bytes=%d\n",
+ prefill->ot_bytes, y_buf_bytes, pp_lines, pp_bytes,
+ post_scaler_bytes, fbc_bytes);
+
+ return prefill_bytes;
+}
+
+static u32 mdss_mdp_perf_calc_pipe_prefill_cmd(struct mdss_mdp_prefill_params
+ *params)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_prefill_data *prefill = &mdata->prefill_data;
+ u32 prefill_bytes;
+ u32 ot_bytes = 0;
+ u32 latency_lines, latency_buf_bytes;
+ u32 y_buf_bytes = 0;
+ u32 y_scaler_bytes;
+ u32 fbc_cmd_lines = 0, fbc_cmd_bytes = 0;
+ u32 post_scaler_bytes = 0;
+
+ /* y_scaler_bytes are same for the first or non first line */
+ y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
+ prefill_bytes = y_scaler_bytes;
+
+ /* 1st line if fbc is not enabled and 2nd line if fbc is enabled */
+ if (((params->dst_y == 0) && !params->is_fbc) ||
+ ((params->dst_y <= 1) && params->is_fbc)) {
+ if (params->is_ubwc) {
+ if (params->is_nv12)
+ latency_lines = 8;
+ else
+ latency_lines = 4;
+ } else if (params->is_bwc || params->is_tile) {
+ latency_lines = 4;
+ } else if (params->is_hflip) {
+ latency_lines = 1;
+ } else {
+ latency_lines = 0;
+ }
+ latency_buf_bytes = params->src_w * params->bpp * latency_lines;
+ prefill_bytes += latency_buf_bytes;
+
+ fbc_cmd_lines++;
+ if (params->is_fbc)
+ fbc_cmd_lines++;
+ fbc_cmd_bytes = params->bpp * params->dst_w * fbc_cmd_lines;
+ fbc_cmd_bytes = mdss_mdp_calc_scaling_w_h(fbc_cmd_bytes,
+ params->src_h, params->dst_h, params->src_w,
+ params->dst_w);
+ prefill_bytes += fbc_cmd_bytes;
+ } else {
+ ot_bytes = prefill->ot_bytes;
+ prefill_bytes += ot_bytes;
+
+ latency_buf_bytes = mdss_mdp_calc_latency_buf_bytes(
+ params->is_yuv, params->is_bwc, params->is_tile,
+ params->src_w, params->bpp, true, params->smp_bytes,
+ params->is_ubwc, params->is_nv12, params->is_hflip);
+ prefill_bytes += latency_buf_bytes;
+
+ if (params->is_yuv)
+ y_buf_bytes = prefill->y_buf_bytes;
+ prefill_bytes += y_buf_bytes;
+
+ if (mdata->apply_post_scale_bytes ||
+ (params->src_h != params->dst_h) ||
+ (params->src_w != params->dst_w)) {
+ post_scaler_bytes = prefill->post_scaler_pixels *
+ params->bpp;
+ post_scaler_bytes = mdss_mdp_calc_scaling_w_h(
+ post_scaler_bytes, params->src_h,
+ params->dst_h, params->src_w,
+ params->dst_w);
+ prefill_bytes += post_scaler_bytes;
+ }
+ }
+
+ pr_debug("ot=%d bwc=%d smp=%d y_buf=%d fbc=%d\n", ot_bytes,
+ params->is_bwc, latency_buf_bytes, y_buf_bytes, fbc_cmd_bytes);
+
+ return prefill_bytes;
+}
+
+u32 mdss_mdp_perf_calc_pipe_prefill_single(struct mdss_mdp_prefill_params
+ *params)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_prefill_data *prefill = &mdata->prefill_data;
+ u32 prefill_bytes;
+ u32 latency_lines, latency_buf_bytes;
+ u32 y_scaler_bytes;
+ u32 fbc_cmd_lines = 0, fbc_cmd_bytes = 0;
+
+ if (params->is_ubwc) {
+ if (params->is_nv12)
+ latency_lines = 8;
+ else
+ latency_lines = 4;
+ } else if (params->is_bwc || params->is_tile)
+ /* can start processing after receiving 4 lines */
+ latency_lines = 4;
+ else if (params->is_hflip)
+ /* need oneline before reading backwards */
+ latency_lines = 1;
+ else
+ latency_lines = 0;
+ latency_buf_bytes = params->src_w * params->bpp * latency_lines;
+ prefill_bytes = latency_buf_bytes;
+
+ y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
+ prefill_bytes += y_scaler_bytes;
+
+ if (params->is_cmd)
+ fbc_cmd_lines++;
+ if (params->is_fbc)
+ fbc_cmd_lines++;
+
+ if (fbc_cmd_lines) {
+ fbc_cmd_bytes = params->bpp * params->dst_w * fbc_cmd_lines;
+ fbc_cmd_bytes = mdss_mdp_calc_scaling_w_h(fbc_cmd_bytes,
+ params->src_h, params->dst_h, params->src_w,
+ params->dst_w);
+ prefill_bytes += fbc_cmd_bytes;
+ }
+
+ return prefill_bytes;
+}
+
+u32 mdss_mdp_perf_calc_smp_size(struct mdss_mdp_pipe *pipe,
+ bool calc_smp_size)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 smp_bytes;
+
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
+ return 0;
+
+ /* Get allocated or fixed smp bytes */
+ smp_bytes = mdss_mdp_smp_get_size(pipe);
+
+ /*
+ * We need to calculate the SMP size for scenarios where
+ * allocation have not happened yet (i.e. during prepare IOCTL).
+ */
+ if (calc_smp_size && !mdata->has_pixel_ram) {
+ u32 calc_smp_total;
+
+ calc_smp_total = mdss_mdp_smp_calc_num_blocks(pipe);
+ calc_smp_total *= mdata->smp_mb_size;
+
+ /*
+ * If the pipe has fixed SMPs, then we must consider
+ * the max smp size.
+ */
+ if (calc_smp_total > smp_bytes)
+ smp_bytes = calc_smp_total;
+ }
+
+ pr_debug("SMP size (bytes) %d for pnum=%d calc=%d\n",
+ smp_bytes, pipe->num, calc_smp_size);
+ WARN_ON(smp_bytes == 0);
+
+ return smp_bytes;
+}
+
+static void mdss_mdp_get_bw_vote_mode(void *data,
+ u32 mdp_rev, struct mdss_mdp_perf_params *perf,
+ enum perf_calc_vote_mode calc_mode, u32 flags)
+{
+
+ if (!data)
+ goto exit;
+
+ switch (mdp_rev) {
+ case MDSS_MDP_HW_REV_105:
+ case MDSS_MDP_HW_REV_109:
+ if (calc_mode == PERF_CALC_VOTE_MODE_PER_PIPE) {
+ struct mdss_mdp_mixer *mixer =
+ (struct mdss_mdp_mixer *)data;
+
+ if ((flags & PERF_CALC_PIPE_SINGLE_LAYER) &&
+ !mixer->rotator_mode &&
+ (mixer->type == MDSS_MDP_MIXER_TYPE_INTF))
+ set_bit(MDSS_MDP_BW_MODE_SINGLE_LAYER,
+ perf->bw_vote_mode);
+ } else if (calc_mode == PERF_CALC_VOTE_MODE_CTL) {
+ struct mdss_mdp_ctl *ctl = (struct mdss_mdp_ctl *)data;
+
+ if (ctl->is_video_mode &&
+ (ctl->mfd->split_mode == MDP_SPLIT_MODE_NONE))
+ set_bit(MDSS_MDP_BW_MODE_SINGLE_IF,
+ perf->bw_vote_mode);
+ }
+ break;
+ default:
+ break;
+ };
+
+ pr_debug("mode=0x%lx\n", *(perf->bw_vote_mode));
+
+exit:
+ return;
+}
+
+static u32 __calc_qseed3_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
+ struct mdss_rect src, struct mdss_rect dst, u32 src_h,
+ u32 fps, u32 v_total)
+{
+ u32 active_line_cycle, backfill_cycle, total_cycle;
+ u32 ver_dwnscale;
+ u32 active_line;
+ u32 backfill_line;
+
+ ver_dwnscale = (src_h << PHASE_STEP_SHIFT) / dst.h;
+
+ if (ver_dwnscale > (MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM
+ << PHASE_STEP_SHIFT)) {
+ active_line = MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM
+ << PHASE_STEP_SHIFT;
+ backfill_line = ver_dwnscale - active_line;
+ } else {
+ /* active line same as downscale and no backfill */
+ active_line = ver_dwnscale;
+ backfill_line = 0;
+ }
+
+ active_line_cycle = mult_frac(active_line, src.w,
+ 4) >> PHASE_STEP_SHIFT; /* 4pix/clk */
+ if (active_line_cycle < dst.w)
+ active_line_cycle = dst.w;
+
+ backfill_cycle = mult_frac(backfill_line, src.w, 4) /* 4pix/clk */
+ >> PHASE_STEP_SHIFT;
+
+ total_cycle = active_line_cycle + backfill_cycle;
+
+ pr_debug("line: active=%d backfill=%d vds=%d\n",
+ active_line, backfill_line, ver_dwnscale);
+ pr_debug("cycle: total=%d active=%d backfill=%d\n",
+ total_cycle, active_line_cycle, backfill_cycle);
+
+ return total_cycle * (fps * v_total);
+}
+
+static inline bool __is_vert_downscaling(u32 src_h,
+ struct mdss_rect dst){
+
+ return (src_h > dst.h);
+}
+
+static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
+ struct mdss_rect src, struct mdss_rect dst,
+ u32 fps, u32 v_total, u32 flags)
+{
+ struct mdss_mdp_mixer *mixer;
+ u32 rate, src_h;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ /*
+ * when doing vertical decimation lines will be skipped, hence there is
+ * no need to account for these lines in MDP clock or request bus
+ * bandwidth to fetch them.
+ */
+ mixer = pipe->mixer_left;
+ src_h = DECIMATED_DIMENSION(src.h, pipe->vert_deci);
+
+ if (mixer->rotator_mode) {
+
+ rate = pipe->src.w * pipe->src.h * fps;
+ rate /= 4; /* block mode fetch at 4 pix/clk */
+ } else if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map) &&
+ pipe->scaler.enable && __is_vert_downscaling(src_h, dst)) {
+
+ rate = __calc_qseed3_mdp_clk_rate(pipe, src, dst, src_h,
+ fps, v_total);
+ } else {
+
+ rate = dst.w;
+ if (src_h > dst.h)
+ rate = (rate * src_h) / dst.h;
+
+ rate *= v_total * fps;
+
+ /* pipes decoding BWC content have different clk requirement */
+ if (pipe->bwc_mode && !pipe->src_fmt->is_yuv &&
+ pipe->src_fmt->bpp == 4) {
+ u32 bwc_rate =
+ mult_frac((src.w * src_h * fps), v_total, dst.h << 1);
+ pr_debug("src: w:%d h:%d fps:%d vtotal:%d dst h:%d\n",
+ src.w, src_h, fps, v_total, dst.h);
+ pr_debug("pipe%d: bwc_rate=%d normal_rate=%d\n",
+ pipe->num, bwc_rate, rate);
+ rate = max(bwc_rate, rate);
+ }
+ }
+
+ if (flags & PERF_CALC_PIPE_APPLY_CLK_FUDGE)
+ rate = mdss_mdp_clk_fudge_factor(mixer, rate);
+
+ return rate;
+}
+
+static u32 mdss_mdp_get_rotator_fps(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 fps;
+
+ if (pipe->src.w >= 3840 || pipe->src.h >= 3840)
+ fps = ROTATOR_LOW_FRAME_RATE;
+ else if (mdata->traffic_shaper_en)
+ fps = DEFAULT_ROTATOR_FRAME_RATE;
+ else if (pipe->frame_rate)
+ fps = pipe->frame_rate;
+ else
+ fps = DEFAULT_FRAME_RATE;
+
+ pr_debug("rotator fps:%d\n", fps);
+
+ return fps;
+}
+
+int mdss_mdp_get_panel_params(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer, u32 *fps, u32 *v_total,
+ u32 *h_total, u32 *xres)
+{
+
+ if (mixer->rotator_mode) {
+ *fps = mdss_mdp_get_rotator_fps(pipe);
+ } else if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+ struct mdss_panel_info *pinfo;
+
+ if (!mixer->ctl)
+ return -EINVAL;
+
+ pinfo = &mixer->ctl->panel_data->panel_info;
+ if (pinfo->type == MIPI_VIDEO_PANEL) {
+ *fps = pinfo->panel_max_fps;
+ *v_total = pinfo->panel_max_vtotal;
+ } else {
+ *fps = mdss_panel_get_framerate(pinfo,
+ FPS_RESOLUTION_HZ);
+ *v_total = mdss_panel_get_vtotal(pinfo);
+ }
+ *xres = get_panel_width(mixer->ctl);
+ *h_total = mdss_panel_get_htotal(pinfo, false);
+
+ if (is_pingpong_split(mixer->ctl->mfd))
+ *h_total += mdss_panel_get_htotal(
+ &mixer->ctl->panel_data->next->panel_info,
+ false);
+ } else {
+ *v_total = mixer->height;
+ *xres = mixer->width;
+ *h_total = mixer->width;
+ *fps = DEFAULT_FRAME_RATE;
+ }
+
+ return 0;
+}
+
+int mdss_mdp_get_pipe_overlap_bw(struct mdss_mdp_pipe *pipe,
+ struct mdss_rect *roi, u64 *quota, u64 *quota_nocr, u32 flags)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_mixer *mixer = pipe->mixer_left;
+ struct mdss_rect src, dst;
+ u32 v_total = 0, h_total = 0, xres = 0, src_h = 0;
+ u32 fps = DEFAULT_FRAME_RATE;
+ *quota = 0;
+ *quota_nocr = 0;
+
+ if (mdss_mdp_get_panel_params(pipe, mixer, &fps, &v_total,
+ &h_total, &xres)) {
+ pr_err(" error retreiving the panel params!\n");
+ return -EINVAL;
+ }
+
+ dst = pipe->dst;
+ src = pipe->src;
+
+ /* crop rectangles */
+ if (roi && !mixer->ctl->is_video_mode && !pipe->src_split_req)
+ mdss_mdp_crop_rect(&src, &dst, roi);
+
+ /*
+ * when doing vertical decimation lines will be skipped, hence there is
+ * no need to account for these lines in MDP clock or request bus
+ * bandwidth to fetch them.
+ */
+ src_h = DECIMATED_DIMENSION(src.h, pipe->vert_deci);
+
+ *quota = fps * src.w * src_h;
+
+ if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
+ /*
+ * with decimation, chroma is not downsampled, this means we
+ * need to allocate bw for extra lines that will be fetched
+ */
+ if (pipe->vert_deci)
+ *quota *= 2;
+ else
+ *quota = (*quota * 3) / 2;
+ else
+ *quota *= pipe->src_fmt->bpp;
+
+ if (mixer->rotator_mode) {
+ if (test_bit(MDSS_QOS_OVERHEAD_FACTOR,
+ mdata->mdss_qos_map)) {
+ /* rotator read */
+ *quota_nocr += (*quota * 2);
+ *quota = apply_comp_ratio_factor(*quota,
+ pipe->src_fmt, &pipe->comp_ratio);
+ /*
+ * rotator write: here we are using src_fmt since
+ * current implementation only supports calculate
+ * bandwidth based in the source parameters.
+ * The correct fine-tuned calculation should use
+ * destination format and destination rectangles to
+ * calculate the bandwidth, but leaving this
+ * calculation as per current support.
+ */
+ *quota += apply_comp_ratio_factor(*quota,
+ pipe->src_fmt, &pipe->comp_ratio);
+ } else {
+ *quota *= 2; /* bus read + write */
+ }
+ } else {
+
+ *quota = DIV_ROUND_UP_ULL(*quota * v_total, dst.h);
+ if (!mixer->ctl->is_video_mode)
+ *quota = DIV_ROUND_UP_ULL(*quota * h_total, xres);
+
+ *quota_nocr = *quota;
+
+ if (test_bit(MDSS_QOS_OVERHEAD_FACTOR,
+ mdata->mdss_qos_map))
+ *quota = apply_comp_ratio_factor(*quota,
+ pipe->src_fmt, &pipe->comp_ratio);
+ }
+
+
+ pr_debug("quota:%llu nocr:%llu src.w:%d src.h%d comp:[%d, %d]\n",
+ *quota, *quota_nocr, src.w, src_h, pipe->comp_ratio.numer,
+ pipe->comp_ratio.denom);
+
+ return 0;
+}
+
+static inline bool validate_comp_ratio(struct mult_factor *factor)
+{
+ return factor->numer && factor->denom;
+}
+
+u32 apply_comp_ratio_factor(u32 quota,
+ struct mdss_mdp_format_params *fmt,
+ struct mult_factor *factor)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata || !test_bit(MDSS_QOS_OVERHEAD_FACTOR,
+ mdata->mdss_qos_map))
+ return quota;
+
+ /* apply compression ratio, only for compressed formats */
+ if (mdss_mdp_is_ubwc_format(fmt) &&
+ validate_comp_ratio(factor))
+ quota = apply_inverse_fudge_factor(quota, factor);
+
+ return quota;
+}
+
+static u32 mdss_mdp_get_vbp_factor(struct mdss_mdp_ctl *ctl)
+{
+ u32 fps, v_total, vbp, vbp_fac;
+ struct mdss_panel_info *pinfo;
+
+ if (!ctl || !ctl->panel_data)
+ return 0;
+
+ pinfo = &ctl->panel_data->panel_info;
+ fps = mdss_panel_get_framerate(pinfo,
+ FPS_RESOLUTION_HZ);
+ v_total = mdss_panel_get_vtotal(pinfo);
+ vbp = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+ vbp += pinfo->prg_fet;
+
+ vbp_fac = (vbp) ? fps * v_total / vbp : 0;
+ pr_debug("vbp_fac=%d vbp=%d v_total=%d\n", vbp_fac, vbp, v_total);
+
+ return vbp_fac;
+}
+
+static u32 mdss_mdp_get_vbp_factor_max(struct mdss_mdp_ctl *ctl)
+{
+ u32 vbp_max = 0;
+ int i;
+ struct mdss_data_type *mdata;
+
+ if (!ctl || !ctl->mdata)
+ return 0;
+
+ mdata = ctl->mdata;
+ for (i = 0; i < mdata->nctl; i++) {
+ struct mdss_mdp_ctl *ctl = mdata->ctl_off + i;
+ u32 vbp_fac;
+
+ /* skip command mode interfaces */
+ if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)
+ && !ctl->is_video_mode)
+ continue;
+
+ if (mdss_mdp_ctl_is_power_on(ctl)) {
+ vbp_fac = mdss_mdp_get_vbp_factor(ctl);
+ vbp_max = max(vbp_max, vbp_fac);
+ }
+ }
+
+ return vbp_max;
+}
+
+static u32 __calc_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
+{
+ u32 fps, v_total, vbp, vbp_fac;
+ struct mdss_panel_info *pinfo;
+
+ if (!ctl || !ctl->panel_data)
+ return 0;
+
+ pinfo = &ctl->panel_data->panel_info;
+ fps = mdss_panel_get_framerate(pinfo,
+ FPS_RESOLUTION_HZ);
+ v_total = mdss_panel_get_vtotal(pinfo);
+ vbp = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+ vbp += pinfo->prg_fet;
+
+ vbp_fac = mult_frac(USEC_PER_SEC, vbp, fps * v_total); /* use uS */
+ pr_debug("vbp_fac=%d vbp=%d v_total=%d fps=%d\n",
+ vbp_fac, vbp, v_total, fps);
+
+ return vbp_fac;
+}
+
+static u32 __get_min_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
+{
+ u32 vbp_min = 0;
+ int i;
+ struct mdss_data_type *mdata;
+
+ if (!ctl || !ctl->mdata)
+ return 0;
+
+ mdata = ctl->mdata;
+ for (i = 0; i < mdata->nctl; i++) {
+ struct mdss_mdp_ctl *tmp_ctl = mdata->ctl_off + i;
+ u32 vbp_fac;
+
+ /* skip command mode interfaces */
+ if (!tmp_ctl->is_video_mode)
+ continue;
+
+ if (mdss_mdp_ctl_is_power_on(tmp_ctl)) {
+ vbp_fac = __calc_prefill_line_time_us(tmp_ctl);
+ vbp_min = min(vbp_min, vbp_fac);
+ }
+ }
+
+ return vbp_min;
+}
+
+static u32 mdss_mdp_calc_prefill_line_time(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_pipe *pipe)
+{
+ u32 prefill_us = 0;
+ u32 prefill_amortized = 0;
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_mixer *mixer;
+ struct mdss_panel_info *pinfo;
+ u32 fps, v_total;
+
+ if (!ctl || !ctl->mdata)
+ return 0;
+
+ mdata = ctl->mdata;
+ mixer = pipe->mixer_left;
+ if (!mixer)
+ return -EINVAL;
+
+ pinfo = &ctl->panel_data->panel_info;
+ fps = mdss_panel_get_framerate(pinfo,
+ FPS_RESOLUTION_HZ);
+ v_total = mdss_panel_get_vtotal(pinfo);
+
+ /* calculate the minimum prefill */
+ prefill_us = __get_min_prefill_line_time_us(ctl);
+
+ /* if pipe is amortizable, add the amortized prefill contribution */
+ if (mdss_mdp_is_amortizable_pipe(pipe, mixer, mdata)) {
+ prefill_amortized = mult_frac(USEC_PER_SEC, pipe->src.y,
+ fps * v_total);
+ prefill_us += prefill_amortized;
+ }
+
+ return prefill_us;
+}
+
+static inline bool __is_multirect_high_pipe(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_mdp_pipe *next_pipe = pipe->multirect.next;
+
+ return (pipe->src.y > next_pipe->src.y);
+}
+
+static u64 mdss_mdp_apply_prefill_factor(u64 prefill_bw,
+ struct mdss_mdp_ctl *ctl, struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u64 total_prefill_bw;
+ u32 prefill_time_us;
+
+ if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
+
+ /*
+ * for multi-rect serial mode, only take the contribution from
+ * pipe that belongs to the rect closest to the origin.
+ */
+ if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL &&
+ __is_multirect_high_pipe(pipe)) {
+ total_prefill_bw = 0;
+ goto exit;
+ }
+
+ prefill_time_us = mdss_mdp_calc_prefill_line_time(ctl, pipe);
+ total_prefill_bw = prefill_time_us ? DIV_ROUND_UP_ULL(
+ USEC_PER_SEC * prefill_bw, prefill_time_us) : 0;
+ } else {
+ total_prefill_bw = prefill_bw *
+ mdss_mdp_get_vbp_factor_max(ctl);
+ }
+
+exit:
+ return total_prefill_bw;
+}
+
+u64 mdss_mdp_perf_calc_simplified_prefill(struct mdss_mdp_pipe *pipe,
+ u32 v_total, u32 fps, struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct simplified_prefill_factors *pfactors =
+ &mdata->prefill_data.prefill_factors;
+ u64 prefill_per_pipe = 0;
+ u32 prefill_lines = pfactors->xtra_ff_factor;
+
+
+ /* do not calculate prefill for command mode */
+ if (!ctl->is_video_mode)
+ goto exit;
+
+ prefill_per_pipe = pipe->src.w * pipe->src_fmt->bpp;
+
+ /* format factors */
+ if (mdss_mdp_is_tile_format(pipe->src_fmt)) {
+ if (mdss_mdp_is_nv12_format(pipe->src_fmt))
+ prefill_lines += pfactors->fmt_mt_nv12_factor;
+ else
+ prefill_lines += pfactors->fmt_mt_factor;
+ } else {
+ prefill_lines += pfactors->fmt_linear_factor;
+ }
+
+ /* scaling factors */
+ if (pipe->src.h > pipe->dst.h) {
+ prefill_lines += pfactors->scale_factor;
+
+ prefill_per_pipe = fudge_factor(prefill_per_pipe,
+ DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci),
+ pipe->dst.h);
+ }
+
+ prefill_per_pipe *= prefill_lines;
+ prefill_per_pipe = mdss_mdp_apply_prefill_factor(prefill_per_pipe,
+ ctl, pipe);
+
+ pr_debug("pipe src: %dx%d bpp:%d\n",
+ pipe->src.w, pipe->src.h, pipe->src_fmt->bpp);
+ pr_debug("ff_factor:%d mt_nv12:%d mt:%d\n",
+ pfactors->xtra_ff_factor,
+ (mdss_mdp_is_tile_format(pipe->src_fmt) &&
+ mdss_mdp_is_nv12_format(pipe->src_fmt)) ?
+ pfactors->fmt_mt_nv12_factor : 0,
+ mdss_mdp_is_tile_format(pipe->src_fmt) ?
+ pfactors->fmt_mt_factor : 0);
+ pr_debug("pipe prefill:%llu lines:%d\n",
+ prefill_per_pipe, prefill_lines);
+
+exit:
+ return prefill_per_pipe;
+}
+
+/**
+ * mdss_mdp_perf_calc_pipe() - calculate performance numbers required by pipe
+ * @pipe: Source pipe struct containing updated pipe params
+ * @perf: Structure containing values that should be updated for
+ * performance tuning
+ * @flags: flags to determine how to perform some of the
+ * calculations, supported flags:
+ *
+ * PERF_CALC_PIPE_APPLY_CLK_FUDGE:
+ * Determine if mdp clock fudge is applicable.
+ * PERF_CALC_PIPE_SINGLE_LAYER:
+ * Indicate if the calculation is for a single pipe staged
+ * in the layer mixer
+ * PERF_CALC_PIPE_CALC_SMP_SIZE:
+ * Indicate if the smp size needs to be calculated, this is
+ * for the cases where SMP haven't been allocated yet, so we need
+ * to estimate here the smp size (i.e. PREPARE IOCTL).
+ *
+ * Function calculates the minimum required performance calculations in order
+ * to avoid MDP underflow. The calculations are based on the way MDP
+ * fetches (bandwidth requirement) and processes data through MDP pipeline
+ * (MDP clock requirement) based on frame size and scaling requirements.
+ */
+
+int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_perf_params *perf, struct mdss_rect *roi,
+ u32 flags)
+{
+ struct mdss_mdp_mixer *mixer;
+ int fps = DEFAULT_FRAME_RATE;
+ u32 v_total = 0, src_h, xres = 0, h_total = 0;
+ struct mdss_rect src, dst;
+ bool is_fbc = false;
+ struct mdss_mdp_prefill_params prefill_params;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool calc_smp_size = false;
+
+ if (!pipe || !perf || !pipe->mixer_left)
+ return -EINVAL;
+
+ mixer = pipe->mixer_left;
+
+ dst = pipe->dst;
+ src = pipe->src;
+
+ /*
+ * when doing vertical decimation lines will be skipped, hence there is
+ * no need to account for these lines in MDP clock or request bus
+ * bandwidth to fetch them.
+ */
+ src_h = DECIMATED_DIMENSION(src.h, pipe->vert_deci);
+
+ if (mdss_mdp_get_panel_params(pipe, mixer, &fps, &v_total,
+ &h_total, &xres)) {
+ pr_err(" error retreiving the panel params!\n");
+ return -EINVAL;
+ }
+
+ if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+ if (!mixer->ctl)
+ return -EINVAL;
+ is_fbc = mixer->ctl->panel_data->panel_info.fbc.enabled;
+ }
+
+ mixer->ctl->frame_rate = fps;
+
+ /* crop rectangles */
+ if (roi && !mixer->ctl->is_video_mode && !pipe->src_split_req)
+ mdss_mdp_crop_rect(&src, &dst, roi);
+
+ pr_debug("v_total=%d, xres=%d fps=%d\n", v_total, xres, fps);
+ pr_debug("src(w,h)(%d,%d) dst(w,h)(%d,%d) dst_y=%d bpp=%d yuv=%d\n",
+ pipe->src.w, src_h, pipe->dst.w, pipe->dst.h, pipe->dst.y,
+ pipe->src_fmt->bpp, pipe->src_fmt->is_yuv);
+
+ if (mdss_mdp_get_pipe_overlap_bw(pipe, roi, &perf->bw_overlap,
+ &perf->bw_overlap_nocr, flags))
+ pr_err("failure calculating overlap bw!\n");
+
+ perf->mdp_clk_rate = get_pipe_mdp_clk_rate(pipe, src, dst,
+ fps, v_total, flags);
+
+ pr_debug("bw:%llu bw_nocr:%llu clk:%d\n", perf->bw_overlap,
+ perf->bw_overlap_nocr, perf->mdp_clk_rate);
+
+ if (pipe->flags & MDP_SOLID_FILL)
+ perf->bw_overlap = 0;
+
+ if (mixer->ctl->intf_num == MDSS_MDP_NO_INTF ||
+ mdata->disable_prefill ||
+ mixer->ctl->disable_prefill ||
+ (pipe->flags & MDP_SOLID_FILL)) {
+ perf->prefill_bytes = 0;
+ perf->bw_prefill = 0;
+ goto exit;
+ }
+
+ if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
+ perf->bw_prefill = mdss_mdp_perf_calc_simplified_prefill(pipe,
+ v_total, fps, mixer->ctl);
+ goto exit;
+ }
+
+ calc_smp_size = (flags & PERF_CALC_PIPE_CALC_SMP_SIZE) ? true : false;
+ prefill_params.smp_bytes = mdss_mdp_perf_calc_smp_size(pipe,
+ calc_smp_size);
+ prefill_params.xres = xres;
+ prefill_params.src_w = src.w;
+ prefill_params.src_h = src_h;
+ prefill_params.dst_w = dst.w;
+ prefill_params.dst_h = dst.h;
+ prefill_params.dst_y = dst.y;
+ prefill_params.bpp = pipe->src_fmt->bpp;
+ prefill_params.is_yuv = pipe->src_fmt->is_yuv;
+ prefill_params.is_caf = mdss_mdp_perf_is_caf(pipe);
+ prefill_params.is_fbc = is_fbc;
+ prefill_params.is_bwc = pipe->bwc_mode;
+ prefill_params.is_tile = mdss_mdp_is_tile_format(pipe->src_fmt);
+ prefill_params.is_hflip = pipe->flags & MDP_FLIP_LR;
+ prefill_params.is_cmd = !mixer->ctl->is_video_mode;
+ prefill_params.pnum = pipe->num;
+ prefill_params.is_ubwc = mdss_mdp_is_ubwc_format(pipe->src_fmt);
+ prefill_params.is_nv12 = mdss_mdp_is_nv12_format(pipe->src_fmt);
+
+ mdss_mdp_get_bw_vote_mode(mixer, mdata->mdp_rev, perf,
+ PERF_CALC_VOTE_MODE_PER_PIPE, flags);
+
+ if (flags & PERF_CALC_PIPE_SINGLE_LAYER)
+ perf->prefill_bytes =
+ mdss_mdp_perf_calc_pipe_prefill_single(&prefill_params);
+ else if (!prefill_params.is_cmd)
+ perf->prefill_bytes =
+ mdss_mdp_perf_calc_pipe_prefill_video(&prefill_params);
+ else
+ perf->prefill_bytes =
+ mdss_mdp_perf_calc_pipe_prefill_cmd(&prefill_params);
+
+exit:
+ pr_debug("mixer=%d pnum=%d clk_rate=%u bw_overlap=%llu bw_prefill=%llu (%d) %s\n",
+ mixer->num, pipe->num, perf->mdp_clk_rate, perf->bw_overlap,
+ perf->bw_prefill, perf->prefill_bytes, mdata->disable_prefill ?
+ "prefill is disabled" : "");
+
+ return 0;
+}
+
+static inline int mdss_mdp_perf_is_overlap(u32 y00, u32 y01, u32 y10, u32 y11)
+{
+ return (y10 < y00 && y11 >= y01) || (y10 >= y00 && y10 < y01);
+}
+
+static inline int cmpu32(const void *a, const void *b)
+{
+ return (*(u32 *)a < *(u32 *)b) ? -1 : 0;
+}
+
+static void mdss_mdp_perf_calc_mixer(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_perf_params *perf,
+ struct mdss_mdp_pipe **pipe_list, int num_pipes,
+ u32 flags)
+{
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_panel_info *pinfo = NULL;
+ int fps = DEFAULT_FRAME_RATE;
+ u32 v_total = 0, bpp = MDSS_MDP_WB_OUTPUT_BPP;
+ int i;
+ u32 max_clk_rate = 0;
+ u64 bw_overlap_max = 0;
+ u64 bw_overlap[MAX_PIPES_PER_LM] = { 0 };
+ u64 bw_overlap_async = 0;
+ u32 v_region[MAX_PIPES_PER_LM * 2] = { 0 };
+ u32 prefill_val = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool apply_fudge = true;
+ struct mdss_mdp_format_params *fmt = NULL;
+
+ WARN_ON(num_pipes > MAX_PIPES_PER_LM);
+
+ memset(perf, 0, sizeof(*perf));
+
+ if (!mixer->rotator_mode) {
+ pinfo = &mixer->ctl->panel_data->panel_info;
+ if (!pinfo) {
+ pr_err("pinfo is NULL\n");
+ goto exit;
+ }
+
+ if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+ if (pinfo->type == MIPI_VIDEO_PANEL) {
+ fps = pinfo->panel_max_fps;
+ v_total = pinfo->panel_max_vtotal;
+ } else {
+ fps = mdss_panel_get_framerate(pinfo,
+ FPS_RESOLUTION_HZ);
+ v_total = mdss_panel_get_vtotal(pinfo);
+ }
+ } else {
+ v_total = mixer->height;
+ }
+
+ /* For writeback panel, mixer type can be other than intf */
+ if (pinfo->type == WRITEBACK_PANEL) {
+ fmt = mdss_mdp_get_format_params(
+ mixer->ctl->dst_format);
+ if (fmt)
+ bpp = fmt->bpp;
+ pinfo = NULL;
+ }
+
+ perf->mdp_clk_rate = mixer->width * v_total * fps;
+ perf->mdp_clk_rate =
+ mdss_mdp_clk_fudge_factor(mixer, perf->mdp_clk_rate);
+
+ if (!pinfo) { /* perf for bus writeback */
+ perf->bw_writeback =
+ fps * mixer->width * mixer->height * bpp;
+
+ if (test_bit(MDSS_QOS_OVERHEAD_FACTOR,
+ mdata->mdss_qos_map))
+ perf->bw_writeback = apply_comp_ratio_factor(
+ perf->bw_writeback, fmt,
+ &mixer->ctl->dst_comp_ratio);
+
+ } else if (pinfo->type == MIPI_CMD_PANEL) {
+ u32 dsi_transfer_rate = mixer->width * v_total;
+
+ /* adjust transfer time from micro seconds */
+ dsi_transfer_rate = mult_frac(dsi_transfer_rate,
+ 1000000, pinfo->mdp_transfer_time_us);
+
+ if (dsi_transfer_rate > perf->mdp_clk_rate)
+ perf->mdp_clk_rate = dsi_transfer_rate;
+ }
+
+ if (is_dsc_compression(pinfo) &&
+ mixer->ctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE)
+ perf->mdp_clk_rate *= 2;
+ }
+
+ /*
+ * In case of border color, we still need enough mdp clock
+ * to avoid under-run. Clock requirement for border color is
+ * based on mixer width.
+ */
+ if (num_pipes == 0)
+ goto exit;
+
+ memset(bw_overlap, 0, sizeof(u64) * MAX_PIPES_PER_LM);
+ memset(v_region, 0, sizeof(u32) * MAX_PIPES_PER_LM * 2);
+
+ /*
+ * Apply this logic only for 8x26 to reduce clock rate
+ * for single video playback use case
+ */
+ if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_101)
+ && mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+ u32 npipes = 0;
+
+ for (i = 0; i < num_pipes; i++) {
+ pipe = pipe_list[i];
+ if (pipe) {
+ if (npipes) {
+ apply_fudge = true;
+ break;
+ }
+ npipes++;
+ apply_fudge = !(pipe->src_fmt->is_yuv)
+ || !(pipe->flags
+ & MDP_SOURCE_ROTATED_90);
+ }
+ }
+ }
+
+ if (apply_fudge)
+ flags |= PERF_CALC_PIPE_APPLY_CLK_FUDGE;
+ if (num_pipes == 1)
+ flags |= PERF_CALC_PIPE_SINGLE_LAYER;
+
+ for (i = 0; i < num_pipes; i++) {
+ struct mdss_mdp_perf_params tmp;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ pipe = pipe_list[i];
+ if (pipe == NULL)
+ continue;
+
+ /*
+ * if is pipe used across two LMs in source split configuration
+ * then it is staged on both LMs. In such cases skip BW calc
+ * for such pipe on right LM to prevent adding BW twice.
+ */
+ if (pipe->src_split_req && mixer->is_right_mixer)
+ continue;
+
+ if (mdss_mdp_perf_calc_pipe(pipe, &tmp, &mixer->roi,
+ flags))
+ continue;
+
+ if (!mdss_mdp_is_nrt_ctl_path(mixer->ctl)) {
+ u64 per_pipe_ib =
+ test_bit(MDSS_QOS_IB_NOCR, mdata->mdss_qos_map) ?
+ tmp.bw_overlap_nocr : tmp.bw_overlap;
+
+ perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+ per_pipe_ib);
+ }
+
+ bitmap_or(perf->bw_vote_mode, perf->bw_vote_mode,
+ tmp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+
+ /*
+ * for async layers, the overlap calculation is skipped
+ * and the bandwidth is added at the end, accounting for
+ * worst case, that async layer might overlap with
+ * all the other layers.
+ */
+ if (pipe->async_update) {
+ bw_overlap[i] = 0;
+ v_region[2*i] = 0;
+ v_region[2*i + 1] = 0;
+ bw_overlap_async += tmp.bw_overlap;
+ } else {
+ bw_overlap[i] = tmp.bw_overlap;
+ v_region[2*i] = pipe->dst.y;
+ v_region[2*i + 1] = pipe->dst.y + pipe->dst.h;
+ }
+
+ if (tmp.mdp_clk_rate > max_clk_rate)
+ max_clk_rate = tmp.mdp_clk_rate;
+
+ if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
+ prefill_val += tmp.bw_prefill;
+ else
+ prefill_val += tmp.prefill_bytes;
+ }
+
+ /*
+ * Sort the v_region array so the total display area can be
+ * divided into individual regions. Check how many pipes fetch
+ * data for each region and sum them up, then the worst case
+ * of all regions is ib request.
+ */
+ sort(v_region, num_pipes * 2, sizeof(u32), cmpu32, NULL);
+ for (i = 1; i < num_pipes * 2; i++) {
+ int j;
+ u64 bw_max_region = 0;
+ u32 y0, y1;
+
+ pr_debug("v_region[%d]%d\n", i, v_region[i]);
+ if (v_region[i] == v_region[i-1])
+ continue;
+ y0 = v_region[i-1];
+ y1 = v_region[i];
+ for (j = 0; j < num_pipes; j++) {
+ if (!bw_overlap[j])
+ continue;
+ pipe = pipe_list[j];
+ if (mdss_mdp_perf_is_overlap(y0, y1, pipe->dst.y,
+ (pipe->dst.y + pipe->dst.h)))
+ bw_max_region += bw_overlap[j];
+ pr_debug("pipe%d rect%d: v[%d](%d,%d)pipe[%d](%d,%d)bw(%llu %llu)\n",
+ pipe->num, pipe->multirect.num,
+ i, y0, y1, j, pipe->dst.y,
+ pipe->dst.y + pipe->dst.h, bw_overlap[j],
+ bw_max_region);
+ }
+ bw_overlap_max = max(bw_overlap_max, bw_max_region);
+ }
+
+ perf->bw_overlap += bw_overlap_max + bw_overlap_async;
+
+ if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
+ perf->bw_prefill += prefill_val;
+ else
+ perf->prefill_bytes += prefill_val;
+
+ if (max_clk_rate > perf->mdp_clk_rate)
+ perf->mdp_clk_rate = max_clk_rate;
+
+exit:
+ pr_debug("final mixer=%d video=%d clk_rate=%u bw=%llu prefill=%d mode=0x%lx\n",
+ mixer->num, mixer->ctl->is_video_mode, perf->mdp_clk_rate,
+ perf->bw_overlap, prefill_val,
+ *(perf->bw_vote_mode));
+}
+
+static bool is_mdp_prefetch_needed(struct mdss_panel_info *pinfo)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool enable_prefetch = false;
+
+ if (mdata->mdp_rev >= MDSS_MDP_HW_REV_105) {
+ if ((pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width +
+ pinfo->lcdc.v_front_porch) < mdata->min_prefill_lines)
+ pr_warn_once("low vbp+vfp may lead to perf issues in some cases\n");
+
+ enable_prefetch = true;
+
+ if ((pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width) >=
+ MDSS_MDP_MAX_PREFILL_FETCH)
+ enable_prefetch = false;
+ } else {
+ if ((pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width) <
+ mdata->min_prefill_lines)
+ pr_warn_once("low vbp may lead to display performance issues");
+ }
+
+ return enable_prefetch;
+}
+
+/**
+ * mdss_mdp_get_prefetch_lines: - Number of fetch lines in vertical front porch
+ * @pinfo: Pointer to the panel information.
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * In some cases, vertical front porch is too high. In such cases limit
+ * the mdp fetch lines as the last (25 - vbp - vpw) lines of vertical
+ * front porch.
+ */
+int mdss_mdp_get_prefetch_lines(struct mdss_panel_info *pinfo)
+{
+ int prefetch_avail = 0;
+ int v_total, vfp_start;
+ u32 prefetch_needed;
+
+ if (!is_mdp_prefetch_needed(pinfo))
+ return 0;
+
+ v_total = mdss_panel_get_vtotal(pinfo);
+ vfp_start = (pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width +
+ pinfo->yres);
+
+ prefetch_avail = v_total - vfp_start;
+ prefetch_needed = MDSS_MDP_MAX_PREFILL_FETCH -
+ pinfo->lcdc.v_back_porch -
+ pinfo->lcdc.v_pulse_width;
+
+ if (prefetch_avail > prefetch_needed)
+ prefetch_avail = prefetch_needed;
+
+ return prefetch_avail;
+}
+
+static bool mdss_mdp_video_mode_intf_connected(struct mdss_mdp_ctl *ctl)
+{
+ int i;
+ struct mdss_data_type *mdata;
+
+ if (!ctl || !ctl->mdata)
+ return 0;
+
+ mdata = ctl->mdata;
+ for (i = 0; i < mdata->nctl; i++) {
+ struct mdss_mdp_ctl *ctl = mdata->ctl_off + i;
+
+ if (ctl->is_video_mode && mdss_mdp_ctl_is_power_on(ctl)) {
+ pr_debug("video interface connected ctl:%d\n",
+ ctl->num);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void __mdss_mdp_perf_calc_ctl_helper(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_perf_params *perf,
+ struct mdss_mdp_pipe **left_plist, int left_cnt,
+ struct mdss_mdp_pipe **right_plist, int right_cnt,
+ u32 flags)
+{
+ struct mdss_mdp_perf_params tmp;
+ struct mdss_data_type *mdata = ctl->mdata;
+
+ memset(perf, 0, sizeof(*perf));
+
+ if (ctl->mixer_left) {
+ mdss_mdp_perf_calc_mixer(ctl->mixer_left, &tmp,
+ left_plist, left_cnt, flags);
+
+ bitmap_or(perf->bw_vote_mode, perf->bw_vote_mode,
+ tmp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+
+ perf->max_per_pipe_ib = tmp.max_per_pipe_ib;
+ perf->bw_overlap += tmp.bw_overlap;
+ perf->mdp_clk_rate = tmp.mdp_clk_rate;
+ perf->bw_writeback += tmp.bw_writeback;
+
+ if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
+ perf->bw_prefill += tmp.bw_prefill;
+ else
+ perf->prefill_bytes += tmp.prefill_bytes;
+ }
+
+ if (ctl->mixer_right) {
+ mdss_mdp_perf_calc_mixer(ctl->mixer_right, &tmp,
+ right_plist, right_cnt, flags);
+
+ bitmap_or(perf->bw_vote_mode, perf->bw_vote_mode,
+ tmp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+
+ perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+ tmp.max_per_pipe_ib);
+ perf->bw_overlap += tmp.bw_overlap;
+ perf->bw_writeback += tmp.bw_writeback;
+ if (tmp.mdp_clk_rate > perf->mdp_clk_rate)
+ perf->mdp_clk_rate = tmp.mdp_clk_rate;
+
+ if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
+ perf->bw_prefill += tmp.bw_prefill;
+ else
+ perf->prefill_bytes += tmp.prefill_bytes;
+
+ if (ctl->intf_type) {
+ u64 clk_rate = mdss_mdp_get_pclk_rate(ctl);
+ /* minimum clock rate due to inefficiency in 3dmux */
+ clk_rate = DIV_ROUND_UP_ULL((clk_rate >> 1) * 9, 8);
+ if (clk_rate > perf->mdp_clk_rate)
+ perf->mdp_clk_rate = clk_rate;
+ }
+ }
+
+ /* request minimum bandwidth to have bus clock on when display is on */
+ if (perf->bw_overlap == 0)
+ perf->bw_overlap = SZ_16M;
+
+ if (!test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map) &&
+ (ctl->intf_type != MDSS_MDP_NO_INTF)) {
+ u32 vbp_fac = mdss_mdp_get_vbp_factor_max(ctl);
+
+ perf->bw_prefill = perf->prefill_bytes;
+ /*
+ * Prefill bandwidth equals the amount of data (number
+ * of prefill_bytes) divided by the the amount time
+ * available (blanking period). It is equivalent that
+ * prefill bytes times a factor in unit Hz, which is
+ * the reciprocal of time.
+ */
+ perf->bw_prefill *= vbp_fac;
+ }
+
+ perf->bw_ctl = max(perf->bw_prefill, perf->bw_overlap);
+ pr_debug("ctl=%d prefill bw=%llu overlap bw=%llu mode=0x%lx writeback:%llu\n",
+ ctl->num, perf->bw_prefill, perf->bw_overlap,
+ *(perf->bw_vote_mode), perf->bw_writeback);
+}
+
+static u32 mdss_check_for_flip(struct mdss_mdp_ctl *ctl)
+{
+ u32 i, panel_orientation;
+ struct mdss_mdp_pipe *pipe;
+ u32 flags = 0;
+
+ panel_orientation = ctl->mfd->panel_orientation;
+ if (panel_orientation & MDP_FLIP_LR)
+ flags |= MDSS_MAX_BW_LIMIT_HFLIP;
+ if (panel_orientation & MDP_FLIP_UD)
+ flags |= MDSS_MAX_BW_LIMIT_VFLIP;
+
+ for (i = 0; i < MAX_PIPES_PER_LM; i++) {
+ if ((flags & MDSS_MAX_BW_LIMIT_HFLIP) &&
+ (flags & MDSS_MAX_BW_LIMIT_VFLIP))
+ return flags;
+
+ if (ctl->mixer_left && ctl->mixer_left->stage_pipe[i]) {
+ pipe = ctl->mixer_left->stage_pipe[i];
+ if (pipe->flags & MDP_FLIP_LR)
+ flags |= MDSS_MAX_BW_LIMIT_HFLIP;
+ if (pipe->flags & MDP_FLIP_UD)
+ flags |= MDSS_MAX_BW_LIMIT_VFLIP;
+ }
+
+ if (ctl->mixer_right && ctl->mixer_right->stage_pipe[i]) {
+ pipe = ctl->mixer_right->stage_pipe[i];
+ if (pipe->flags & MDP_FLIP_LR)
+ flags |= MDSS_MAX_BW_LIMIT_HFLIP;
+ if (pipe->flags & MDP_FLIP_UD)
+ flags |= MDSS_MAX_BW_LIMIT_VFLIP;
+ }
+ }
+
+ return flags;
+}
+
+static int mdss_mdp_set_threshold_max_bandwidth(struct mdss_mdp_ctl *ctl)
+{
+ u32 mode, threshold = 0, max = INT_MAX;
+ u32 i = 0;
+ struct mdss_max_bw_settings *max_bw_settings =
+ ctl->mdata->max_bw_settings;
+
+ if (!ctl->mdata->max_bw_settings_cnt && !ctl->mdata->max_bw_settings)
+ return 0;
+
+ mode = ctl->mdata->bw_mode_bitmap;
+
+ if (!((mode & MDSS_MAX_BW_LIMIT_HFLIP) &&
+ (mode & MDSS_MAX_BW_LIMIT_VFLIP)))
+ mode |= mdss_check_for_flip(ctl);
+
+ pr_debug("final mode = %d, bw_mode_bitmap = %d\n", mode,
+ ctl->mdata->bw_mode_bitmap);
+
+ /* Return minimum bandwidth limit */
+ for (i = 0; i < ctl->mdata->max_bw_settings_cnt; i++) {
+ if (max_bw_settings[i].mdss_max_bw_mode & mode) {
+ threshold = max_bw_settings[i].mdss_max_bw_val;
+ if (threshold < max)
+ max = threshold;
+ }
+ }
+
+ return max;
+}
+
+int mdss_mdp_perf_bw_check(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_pipe **left_plist, int left_cnt,
+ struct mdss_mdp_pipe **right_plist, int right_cnt)
+{
+ struct mdss_data_type *mdata = ctl->mdata;
+ struct mdss_mdp_perf_params perf;
+ u32 bw, threshold, i, mode_switch, max_bw;
+ u64 bw_sum_of_intfs = 0;
+ bool is_video_mode;
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (ctl->intf_type == MDSS_MDP_NO_INTF)
+ return 0;
+
+ __mdss_mdp_perf_calc_ctl_helper(ctl, &perf,
+ left_plist, left_cnt, right_plist, right_cnt,
+ PERF_CALC_PIPE_CALC_SMP_SIZE);
+ ctl->bw_pending = perf.bw_ctl;
+
+ for (i = 0; i < mdata->nctl; i++) {
+ struct mdss_mdp_ctl *temp = mdata->ctl_off + i;
+
+ if (temp->power_state == MDSS_PANEL_POWER_ON &&
+ (temp->intf_type != MDSS_MDP_NO_INTF))
+ bw_sum_of_intfs += temp->bw_pending;
+ }
+
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ pr_debug("calculated bandwidth=%uk\n", bw);
+
+ /* mfd validation happens in func */
+ mode_switch = mdss_fb_get_mode_switch(ctl->mfd);
+ if (mode_switch)
+ is_video_mode = (mode_switch == MIPI_VIDEO_PANEL);
+ else
+ is_video_mode = ctl->is_video_mode;
+ threshold = (is_video_mode ||
+ mdss_mdp_video_mode_intf_connected(ctl)) ?
+ mdata->max_bw_low : mdata->max_bw_high;
+
+ max_bw = mdss_mdp_set_threshold_max_bandwidth(ctl);
+
+ if (max_bw && (max_bw < threshold))
+ threshold = max_bw;
+
+ pr_debug("final threshold bw limit = %d\n", threshold);
+
+ if (bw > threshold) {
+ ctl->bw_pending = 0;
+ pr_debug("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static u32 mdss_mdp_get_max_pipe_bw(struct mdss_mdp_pipe *pipe)
+{
+
+ struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
+ struct mdss_max_bw_settings *max_per_pipe_bw_settings;
+ u32 flags = 0, threshold = 0, panel_orientation;
+ u32 i, max = INT_MAX;
+
+ if (!ctl->mdata->mdss_per_pipe_bw_cnt
+ && !ctl->mdata->max_per_pipe_bw_settings)
+ return 0;
+
+ panel_orientation = ctl->mfd->panel_orientation;
+ max_per_pipe_bw_settings = ctl->mdata->max_per_pipe_bw_settings;
+
+ /* Check for panel orienatation */
+ panel_orientation = ctl->mfd->panel_orientation;
+ if (panel_orientation & MDP_FLIP_LR)
+ flags |= MDSS_MAX_BW_LIMIT_HFLIP;
+ if (panel_orientation & MDP_FLIP_UD)
+ flags |= MDSS_MAX_BW_LIMIT_VFLIP;
+
+ /* check for Hflip/Vflip in pipe */
+ if (pipe->flags & MDP_FLIP_LR)
+ flags |= MDSS_MAX_BW_LIMIT_HFLIP;
+ if (pipe->flags & MDP_FLIP_UD)
+ flags |= MDSS_MAX_BW_LIMIT_VFLIP;
+
+ flags |= ctl->mdata->bw_mode_bitmap;
+
+ for (i = 0; i < ctl->mdata->mdss_per_pipe_bw_cnt; i++) {
+ if (max_per_pipe_bw_settings[i].mdss_max_bw_mode & flags) {
+ threshold = max_per_pipe_bw_settings[i].mdss_max_bw_val;
+ if (threshold < max)
+ max = threshold;
+ }
+ }
+
+ return max;
+}
+
+int mdss_mdp_perf_bw_check_pipe(struct mdss_mdp_perf_params *perf,
+ struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
+ struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
+ u32 vbp_fac = 0, threshold = 0;
+ u64 prefill_bw, pipe_bw, max_pipe_bw;
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (ctl->intf_type == MDSS_MDP_NO_INTF)
+ return 0;
+
+ if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
+ prefill_bw = perf->bw_prefill;
+ } else {
+ vbp_fac = mdss_mdp_get_vbp_factor_max(ctl);
+ prefill_bw = perf->prefill_bytes * vbp_fac;
+ }
+ pipe_bw = max(prefill_bw, perf->bw_overlap);
+ pr_debug("prefill=%llu, vbp_fac=%u, overlap=%llu\n",
+ prefill_bw, vbp_fac, perf->bw_overlap);
+
+ /* convert bandwidth to kb */
+ pipe_bw = DIV_ROUND_UP_ULL(pipe_bw, 1000);
+
+ threshold = mdata->max_bw_per_pipe;
+ max_pipe_bw = mdss_mdp_get_max_pipe_bw(pipe);
+
+ if (max_pipe_bw && (max_pipe_bw < threshold))
+ threshold = max_pipe_bw;
+
+ pr_debug("bw=%llu threshold=%u\n", pipe_bw, threshold);
+
+ if (threshold && pipe_bw > threshold) {
+ pr_debug("pipe exceeds bandwidth: %llukb > %ukb\n", pipe_bw,
+ threshold);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static void mdss_mdp_perf_calc_ctl(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_perf_params *perf)
+{
+ struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM];
+ struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM];
+ int i, left_cnt = 0, right_cnt = 0;
+
+ for (i = 0; i < MAX_PIPES_PER_LM; i++) {
+ if (ctl->mixer_left && ctl->mixer_left->stage_pipe[i]) {
+ left_plist[left_cnt] =
+ ctl->mixer_left->stage_pipe[i];
+ left_cnt++;
+ }
+
+ if (ctl->mixer_right && ctl->mixer_right->stage_pipe[i]) {
+ right_plist[right_cnt] =
+ ctl->mixer_right->stage_pipe[i];
+ right_cnt++;
+ }
+ }
+
+ __mdss_mdp_perf_calc_ctl_helper(ctl, perf,
+ left_plist, left_cnt, right_plist, right_cnt, 0);
+
+ if (ctl->is_video_mode || ((ctl->intf_type != MDSS_MDP_NO_INTF) &&
+ mdss_mdp_video_mode_intf_connected(ctl))) {
+ perf->bw_ctl =
+ max(apply_fudge_factor(perf->bw_overlap,
+ &mdss_res->ib_factor_overlap),
+ apply_fudge_factor(perf->bw_prefill,
+ &mdss_res->ib_factor));
+ perf->bw_writeback = apply_fudge_factor(perf->bw_writeback,
+ &mdss_res->ib_factor);
+ }
+ pr_debug("ctl=%d clk_rate=%u\n", ctl->num, perf->mdp_clk_rate);
+ pr_debug("bw_overlap=%llu bw_prefill=%llu prefill_bytes=%d\n",
+ perf->bw_overlap, perf->bw_prefill, perf->prefill_bytes);
+}
+
+static void set_status(u32 *value, bool status, u32 bit_num)
+{
+ if (status)
+ *value |= BIT(bit_num);
+ else
+ *value &= ~BIT(bit_num);
+}
+
+/**
+ * @ mdss_mdp_ctl_perf_set_transaction_status() -
+ * Set the status of the on-going operations
+ * for the command mode panels.
+ * @ctl - pointer to a ctl
+ *
+ * This function is called to set the status bit in the perf_transaction_status
+ * according to the operation that it is on-going for the command mode
+ * panels, where:
+ *
+ * PERF_SW_COMMIT_STATE:
+ * 1 - If SW operation has been committed and bw
+ * has been requested (HW transaction have not started yet).
+ * 0 - If there is no SW operation pending
+ * PERF_HW_MDP_STATE:
+ * 1 - If HW transaction is on-going
+ * 0 - If there is no HW transaction on going (ping-pong interrupt
+ * has finished)
+ * Only if both states are zero there are no pending operations and
+ * BW could be released.
+ * State can be queried calling "mdss_mdp_ctl_perf_get_transaction_status"
+ */
+void mdss_mdp_ctl_perf_set_transaction_status(struct mdss_mdp_ctl *ctl,
+ enum mdss_mdp_perf_state_type component, bool new_status)
+{
+ u32 previous_transaction;
+ bool previous_status;
+ unsigned long flags;
+
+ if (!ctl || !ctl->panel_data ||
+ (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL))
+ return;
+
+ spin_lock_irqsave(&ctl->spin_lock, flags);
+
+ previous_transaction = ctl->perf_transaction_status;
+ previous_status = previous_transaction & BIT(component) ?
+ PERF_STATUS_BUSY : PERF_STATUS_DONE;
+
+ /*
+ * If we set "done" state when previous state was not "busy",
+ * we want to print a warning since maybe there is a state
+ * that we are not considering
+ */
+ WARN((new_status == PERF_STATUS_DONE) &&
+ (previous_status != PERF_STATUS_BUSY),
+ "unexpected previous state for component: %d\n", component);
+
+ set_status(&ctl->perf_transaction_status, new_status,
+ (u32)component);
+
+ pr_debug("ctl:%d component:%d previous:%d status:%d\n",
+ ctl->num, component, previous_transaction,
+ ctl->perf_transaction_status);
+ pr_debug("ctl:%d new_status:%d prev_status:%d\n",
+ ctl->num, new_status, previous_status);
+
+ spin_unlock_irqrestore(&ctl->spin_lock, flags);
+}
+
+/**
+ * @ mdss_mdp_ctl_perf_get_transaction_status() -
+ * Get the status of the on-going operations
+ * for the command mode panels.
+ * @ctl - pointer to a ctl
+ *
+ * Return:
+ * The status of the transactions for the command mode panels,
+ * note that the bandwidth can be released only if all transaction
+ * status bits are zero.
+ */
+u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl)
+{
+ unsigned long flags;
+ u32 transaction_status;
+
+ if (!ctl)
+ return PERF_STATUS_BUSY;
+
+ /*
+ * If Rotator mode and bandwidth has been released; return STATUS_DONE
+ * so the bandwidth is re-calculated.
+ */
+ if (ctl->mixer_left && ctl->mixer_left->rotator_mode &&
+ !ctl->perf_release_ctl_bw)
+ return PERF_STATUS_DONE;
+
+ /*
+ * If Video Mode or not valid data to determine the status, return busy
+ * status, so the bandwidth cannot be freed by the caller
+ */
+ if (!ctl || !ctl->panel_data ||
+ (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL)) {
+ return PERF_STATUS_BUSY;
+ }
+
+ spin_lock_irqsave(&ctl->spin_lock, flags);
+ transaction_status = ctl->perf_transaction_status;
+ spin_unlock_irqrestore(&ctl->spin_lock, flags);
+ pr_debug("ctl:%d status:%d\n", ctl->num,
+ transaction_status);
+
+ return transaction_status;
+}
+
+/**
+ * @ mdss_mdp_ctl_perf_update_traffic_shaper_bw -
+ * Apply BW fudge factor to rotator
+ * if mdp clock increased during
+ * rotation session.
+ * @ctl - pointer to the controller
+ * @mdp_clk - new mdp clock
+ *
+ * If mdp clock increased and traffic shaper is enabled, we need to
+ * account for the additional bandwidth that will be requested by
+ * the rotator when running at a higher clock, so we apply a fudge
+ * factor proportional to the mdp clock increment.
+ */
+static void mdss_mdp_ctl_perf_update_traffic_shaper_bw(struct mdss_mdp_ctl *ctl,
+ u32 mdp_clk)
+{
+ if ((mdp_clk > 0) && (mdp_clk > ctl->traffic_shaper_mdp_clk)) {
+ ctl->cur_perf.bw_ctl = fudge_factor(ctl->cur_perf.bw_ctl,
+ mdp_clk, ctl->traffic_shaper_mdp_clk);
+ pr_debug("traffic shaper bw:%llu, clk: %d, mdp_clk:%d\n",
+ ctl->cur_perf.bw_ctl, ctl->traffic_shaper_mdp_clk,
+ mdp_clk);
+ }
+}
+
+static u64 mdss_mdp_ctl_calc_client_vote(struct mdss_data_type *mdata,
+ struct mdss_mdp_perf_params *perf, bool nrt_client, u32 mdp_clk)
+{
+ u64 bw_sum_of_intfs = 0;
+ int i;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_mdp_mixer *mixer;
+ struct mdss_mdp_perf_params perf_temp;
+
+ bitmap_zero(perf_temp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+
+ for (i = 0; i < mdata->nctl; i++) {
+ ctl = mdata->ctl_off + i;
+ mixer = ctl->mixer_left;
+ if (mdss_mdp_ctl_is_power_on(ctl) &&
+ /* RealTime clients */
+ ((!nrt_client && ctl->mixer_left &&
+ !ctl->mixer_left->rotator_mode) ||
+ /* Non-RealTime clients */
+ (nrt_client && mdss_mdp_is_nrt_ctl_path(ctl)))) {
+ /* Skip rotation layers as bw calc by rot driver */
+ if (ctl->mixer_left && ctl->mixer_left->rotator_mode)
+ continue;
+ /*
+ * If traffic shaper is enabled we must check
+ * if additional bandwidth is required.
+ */
+ if (ctl->traffic_shaper_enabled)
+ mdss_mdp_ctl_perf_update_traffic_shaper_bw
+ (ctl, mdp_clk);
+
+ mdss_mdp_get_bw_vote_mode(ctl, mdata->mdp_rev,
+ &perf_temp, PERF_CALC_VOTE_MODE_CTL, 0);
+
+ bitmap_or(perf_temp.bw_vote_mode,
+ perf_temp.bw_vote_mode,
+ ctl->cur_perf.bw_vote_mode,
+ MDSS_MDP_BW_MODE_MAX);
+
+ if (nrt_client && ctl->mixer_left &&
+ !ctl->mixer_left->rotator_mode) {
+ bw_sum_of_intfs += ctl->cur_perf.bw_writeback;
+ continue;
+ }
+
+ perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+ ctl->cur_perf.max_per_pipe_ib);
+
+ bw_sum_of_intfs += ctl->cur_perf.bw_ctl;
+
+ pr_debug("ctl_num=%d bw=%llu mode=0x%lx\n", ctl->num,
+ ctl->cur_perf.bw_ctl,
+ *(ctl->cur_perf.bw_vote_mode));
+ }
+ }
+
+ return bw_sum_of_intfs;
+}
+
+static void mdss_mdp_ctl_update_client_vote(struct mdss_data_type *mdata,
+ struct mdss_mdp_perf_params *perf, bool nrt_client, u64 bw_vote)
+{
+ u64 bus_ab_quota, bus_ib_quota;
+
+ bus_ab_quota = max(bw_vote, mdata->perf_tune.min_bus_vote);
+
+ if (test_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map)) {
+ if (!nrt_client)
+ bus_ib_quota = perf->max_per_pipe_ib;
+ else
+ bus_ib_quota = 0;
+ } else {
+ bus_ib_quota = bw_vote;
+ }
+
+ if (test_bit(MDSS_MDP_BW_MODE_SINGLE_LAYER,
+ perf->bw_vote_mode) &&
+ (bus_ib_quota >= PERF_SINGLE_PIPE_BW_FLOOR)) {
+ struct mult_factor ib_factor_vscaling;
+
+ ib_factor_vscaling.numer = 2;
+ ib_factor_vscaling.denom = 1;
+ bus_ib_quota = apply_fudge_factor(bus_ib_quota,
+ &ib_factor_vscaling);
+ }
+
+ if (test_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map) &&
+ !nrt_client)
+ bus_ib_quota = apply_fudge_factor(bus_ib_quota,
+ &mdata->per_pipe_ib_factor);
+
+ bus_ab_quota = apply_fudge_factor(bus_ab_quota, &mdss_res->ab_factor);
+ ATRACE_INT("bus_quota", bus_ib_quota);
+
+ mdss_bus_scale_set_quota(nrt_client ? MDSS_MDP_NRT : MDSS_MDP_RT,
+ bus_ab_quota, bus_ib_quota);
+ pr_debug("client:%s ab=%llu ib=%llu\n", nrt_client ? "nrt" : "rt",
+ bus_ab_quota, bus_ib_quota);
+}
+
+static void mdss_mdp_ctl_perf_update_bus(struct mdss_data_type *mdata,
+ struct mdss_mdp_ctl *ctl, u32 mdp_clk)
+{
+ u64 bw_sum_of_rt_intfs = 0, bw_sum_of_nrt_intfs = 0;
+ struct mdss_mdp_perf_params perf = {0};
+
+ ATRACE_BEGIN(__func__);
+
+ /*
+ * non-real time client
+ * 1. rotator path
+ * 2. writeback output path
+ */
+ if (mdss_mdp_is_nrt_ctl_path(ctl)) {
+ bitmap_zero(perf.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+ bw_sum_of_nrt_intfs = mdss_mdp_ctl_calc_client_vote(mdata,
+ &perf, true, mdp_clk);
+ mdss_mdp_ctl_update_client_vote(mdata, &perf, true,
+ bw_sum_of_nrt_intfs);
+ }
+
+ /*
+ * real time client
+ * 1. any realtime interface - primary or secondary interface
+ * 2. writeback input path
+ */
+ if (!mdss_mdp_is_nrt_ctl_path(ctl) ||
+ (ctl->intf_num == MDSS_MDP_NO_INTF)) {
+ bitmap_zero(perf.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+ bw_sum_of_rt_intfs = mdss_mdp_ctl_calc_client_vote(mdata,
+ &perf, false, mdp_clk);
+ mdss_mdp_ctl_update_client_vote(mdata, &perf, false,
+ bw_sum_of_rt_intfs);
+ }
+
+ ATRACE_END(__func__);
+}
+
+/**
+ * @mdss_mdp_ctl_perf_release_bw() - request zero bandwidth
+ * @ctl - pointer to a ctl
+ *
+ * Function checks a state variable for the ctl, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void mdss_mdp_ctl_perf_release_bw(struct mdss_mdp_ctl *ctl)
+{
+ int transaction_status;
+ struct mdss_data_type *mdata;
+ int i;
+
+ /* only do this for command panel */
+ if (!ctl || !ctl->mdata || !ctl->panel_data ||
+ (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL))
+ return;
+
+ mutex_lock(&mdss_mdp_ctl_lock);
+ mdata = ctl->mdata;
+ /*
+ * If video interface present, cmd panel bandwidth cannot be
+ * released.
+ */
+ for (i = 0; i < mdata->nctl; i++) {
+ struct mdss_mdp_ctl *ctl_local = mdata->ctl_off + i;
+
+ if (mdss_mdp_ctl_is_power_on(ctl_local) &&
+ ctl_local->is_video_mode)
+ goto exit;
+ }
+
+ transaction_status = mdss_mdp_ctl_perf_get_transaction_status(ctl);
+ pr_debug("transaction_status=0x%x\n", transaction_status);
+
+ /*Release the bandwidth only if there are no transactions pending*/
+ if (!transaction_status && mdata->enable_bw_release) {
+ /*
+ * for splitdisplay if release_bw is called using secondary
+ * then find the main ctl and release BW for main ctl because
+ * BW is always calculated/stored using main ctl.
+ */
+ struct mdss_mdp_ctl *ctl_local =
+ mdss_mdp_get_main_ctl(ctl) ? : ctl;
+
+ trace_mdp_cmd_release_bw(ctl_local->num);
+ ctl_local->cur_perf.bw_ctl = 0;
+ ctl_local->new_perf.bw_ctl = 0;
+ pr_debug("Release BW ctl=%d\n", ctl_local->num);
+ mdss_mdp_ctl_perf_update_bus(mdata, ctl, 0);
+ }
+exit:
+ mutex_unlock(&mdss_mdp_ctl_lock);
+}
+
+static int mdss_mdp_select_clk_lvl(struct mdss_data_type *mdata,
+ u32 clk_rate)
+{
+ int i;
+
+ for (i = 0; i < mdata->nclk_lvl; i++) {
+ if (clk_rate > mdata->clock_levels[i]) {
+ continue;
+ } else {
+ clk_rate = mdata->clock_levels[i];
+ break;
+ }
+ }
+
+ return clk_rate;
+}
+
+static void mdss_mdp_perf_release_ctl_bw(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_perf_params *perf)
+{
+ /* Set to zero controller bandwidth. */
+ memset(perf, 0, sizeof(*perf));
+ ctl->perf_release_ctl_bw = false;
+}
+
+u32 mdss_mdp_get_mdp_clk_rate(struct mdss_data_type *mdata)
+{
+ u32 clk_rate = 0;
+ uint i;
+ struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
+
+ for (i = 0; i < mdata->nctl; i++) {
+ struct mdss_mdp_ctl *ctl;
+
+ ctl = mdata->ctl_off + i;
+ if (mdss_mdp_ctl_is_power_on(ctl)) {
+ clk_rate = max(ctl->cur_perf.mdp_clk_rate,
+ clk_rate);
+ clk_rate = clk_round_rate(clk, clk_rate);
+ }
+ }
+ clk_rate = mdss_mdp_select_clk_lvl(mdata, clk_rate);
+
+ pr_debug("clk:%u nctl:%d\n", clk_rate, mdata->nctl);
+ return clk_rate;
+}
+
+static bool is_traffic_shaper_enabled(struct mdss_data_type *mdata)
+{
+ uint i;
+
+ for (i = 0; i < mdata->nctl; i++) {
+ struct mdss_mdp_ctl *ctl;
+
+ ctl = mdata->ctl_off + i;
+ if (mdss_mdp_ctl_is_power_on(ctl))
+ if (ctl->traffic_shaper_enabled)
+ return true;
+ }
+ return false;
+}
+
+static void mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl,
+ int params_changed, bool stop_req)
+{
+ struct mdss_mdp_perf_params *new, *old;
+ int update_bus = 0, update_clk = 0;
+ struct mdss_data_type *mdata;
+ bool is_bw_released;
+ u32 clk_rate = 0;
+
+ if (!ctl || !ctl->mdata)
+ return;
+ ATRACE_BEGIN(__func__);
+ mutex_lock(&mdss_mdp_ctl_lock);
+
+ mdata = ctl->mdata;
+ old = &ctl->cur_perf;
+ new = &ctl->new_perf;
+
+ /*
+ * We could have released the bandwidth if there were no transactions
+ * pending, so we want to re-calculate the bandwidth in this situation.
+ */
+ is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl);
+
+ if (mdss_mdp_ctl_is_power_on(ctl)) {
+ /* Skip perf update if ctl is used for rotation */
+ if (ctl->mixer_left && ctl->mixer_left->rotator_mode)
+ goto end;
+
+ if (ctl->perf_release_ctl_bw &&
+ mdata->enable_rotator_bw_release)
+ mdss_mdp_perf_release_ctl_bw(ctl, new);
+ else if (is_bw_released || params_changed)
+ mdss_mdp_perf_calc_ctl(ctl, new);
+
+ /*
+ * three cases for bus bandwidth update.
+ * 1. new bandwidth vote or writeback output vote
+ * are higher than current vote for update request.
+ * 2. new bandwidth vote or writeback output vote are
+ * lower than current vote at end of commit or stop.
+ * 3. end of writeback/rotator session - last chance to
+ * non-realtime remove vote.
+ */
+ if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
+ (new->bw_writeback > old->bw_writeback))) ||
+ (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
+ (new->bw_writeback < old->bw_writeback))) ||
+ (stop_req && mdss_mdp_is_nrt_ctl_path(ctl))) {
+
+ pr_debug("c=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ ctl->num, params_changed, new->bw_ctl,
+ old->bw_ctl);
+ if (stop_req) {
+ old->bw_writeback = 0;
+ old->bw_ctl = 0;
+ old->max_per_pipe_ib = 0;
+ } else {
+ old->bw_ctl = new->bw_ctl;
+ old->max_per_pipe_ib = new->max_per_pipe_ib;
+ old->bw_writeback = new->bw_writeback;
+ }
+ bitmap_copy(old->bw_vote_mode, new->bw_vote_mode,
+ MDSS_MDP_BW_MODE_MAX);
+ update_bus = 1;
+ }
+
+ /*
+ * If traffic shaper is enabled, we do not decrease the clock,
+ * otherwise we would increase traffic shaper latency. Clock
+ * would be decreased after traffic shaper is done.
+ */
+ if ((params_changed && (new->mdp_clk_rate > old->mdp_clk_rate))
+ || (!params_changed &&
+ (new->mdp_clk_rate < old->mdp_clk_rate) &&
+ (false == is_traffic_shaper_enabled(mdata)))) {
+ old->mdp_clk_rate = new->mdp_clk_rate;
+ update_clk = 1;
+ }
+ } else {
+ memset(old, 0, sizeof(*old));
+ memset(new, 0, sizeof(*new));
+ update_bus = 1;
+ update_clk = 1;
+ }
+
+ /*
+ * Calculate mdp clock before bandwidth calculation. If traffic shaper
+ * is enabled and clock increased, the bandwidth calculation can
+ * use the new clock for the rotator bw calculation.
+ */
+ if (update_clk)
+ clk_rate = mdss_mdp_get_mdp_clk_rate(mdata);
+
+ if (update_bus)
+ mdss_mdp_ctl_perf_update_bus(mdata, ctl, clk_rate);
+
+ /*
+ * Update the clock after bandwidth vote to ensure
+ * bandwidth is available before clock rate is increased.
+ */
+ if (update_clk) {
+ ATRACE_INT("mdp_clk", clk_rate);
+ mdss_mdp_set_clk_rate(clk_rate);
+ pr_debug("update clk rate = %d HZ\n", clk_rate);
+ }
+
+end:
+ mutex_unlock(&mdss_mdp_ctl_lock);
+ ATRACE_END(__func__);
+}
+
+struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata,
+ u32 off)
+{
+ struct mdss_mdp_ctl *ctl = NULL;
+ u32 cnum;
+ u32 nctl = mdata->nctl;
+
+ mutex_lock(&mdss_mdp_ctl_lock);
+ if (mdata->wfd_mode == MDSS_MDP_WFD_SHARED)
+ nctl++;
+
+ for (cnum = off; cnum < nctl; cnum++) {
+ ctl = mdata->ctl_off + cnum;
+ if (ctl->ref_cnt == 0) {
+ ctl->ref_cnt++;
+ ctl->mdata = mdata;
+ mutex_init(&ctl->lock);
+ mutex_init(&ctl->offlock);
+ mutex_init(&ctl->flush_lock);
+ mutex_init(&ctl->rsrc_lock);
+ spin_lock_init(&ctl->spin_lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&ctl->notifier_head);
+ pr_debug("alloc ctl_num=%d\n", ctl->num);
+ break;
+ }
+ ctl = NULL;
+ }
+ mutex_unlock(&mdss_mdp_ctl_lock);
+
+ return ctl;
+}
+
+int mdss_mdp_ctl_free(struct mdss_mdp_ctl *ctl)
+{
+ if (!ctl)
+ return -ENODEV;
+
+ pr_debug("free ctl_num=%d ref_cnt=%d\n", ctl->num, ctl->ref_cnt);
+
+ if (!ctl->ref_cnt) {
+ pr_err("called with ref_cnt=0\n");
+ return -EINVAL;
+ }
+
+ if (ctl->mixer_left && ctl->mixer_left->ref_cnt)
+ mdss_mdp_mixer_free(ctl->mixer_left);
+
+ if (ctl->mixer_right && ctl->mixer_right->ref_cnt)
+ mdss_mdp_mixer_free(ctl->mixer_right);
+
+ if (ctl->wb)
+ mdss_mdp_wb_free(ctl->wb);
+
+ mutex_lock(&mdss_mdp_ctl_lock);
+ ctl->ref_cnt--;
+ ctl->intf_num = MDSS_MDP_NO_INTF;
+ ctl->intf_type = MDSS_MDP_NO_INTF;
+ ctl->is_secure = false;
+ ctl->power_state = MDSS_PANEL_POWER_OFF;
+ ctl->mixer_left = NULL;
+ ctl->mixer_right = NULL;
+ ctl->wb = NULL;
+ ctl->cdm = NULL;
+ memset(&ctl->ops, 0, sizeof(ctl->ops));
+ mutex_unlock(&mdss_mdp_ctl_lock);
+
+ return 0;
+}
+
+/**
+ * mdss_mdp_mixer_alloc() - allocate mdp mixer.
+ * @ctl: mdp controller.
+ * @type: specifying type of mixer requested. interface or writeback.
+ * @mux: specifies if mixer allocation is for split_fb cases.
+ * @rotator: specifies if the mixer requested for rotator operations.
+ *
+ * This function is called to request allocation of mdp mixer
+ * during mdp controller path setup.
+ *
+ * Return: mdp mixer structure that is allocated.
+ * NULL if mixer allocation fails.
+ */
+struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
+ struct mdss_mdp_ctl *ctl, u32 type, int mux, int rotator)
+{
+ struct mdss_mdp_mixer *mixer = NULL, *alt_mixer = NULL;
+ u32 nmixers_intf;
+ u32 nmixers_wb;
+ u32 i;
+ u32 nmixers;
+ struct mdss_mdp_mixer *mixer_pool = NULL;
+
+ if (!ctl || !ctl->mdata)
+ return NULL;
+
+ mutex_lock(&mdss_mdp_ctl_lock);
+ nmixers_intf = ctl->mdata->nmixers_intf;
+ nmixers_wb = ctl->mdata->nmixers_wb;
+
+ switch (type) {
+ case MDSS_MDP_MIXER_TYPE_INTF:
+ mixer_pool = ctl->mdata->mixer_intf;
+ nmixers = nmixers_intf;
+
+ /*
+ * try to reserve first layer mixer for write back if
+ * assertive display needs to be supported through wfd
+ */
+ if (ctl->mdata->has_wb_ad && ctl->intf_num &&
+ ((ctl->panel_data->panel_info.type != MIPI_CMD_PANEL) ||
+ !mux)) {
+ alt_mixer = mixer_pool;
+ mixer_pool++;
+ nmixers--;
+ } else if ((ctl->panel_data->panel_info.type == WRITEBACK_PANEL)
+ && (ctl->mdata->ndspp < nmixers)) {
+ mixer_pool += ctl->mdata->ndspp;
+ nmixers -= ctl->mdata->ndspp;
+ }
+ break;
+
+ case MDSS_MDP_MIXER_TYPE_WRITEBACK:
+ mixer_pool = ctl->mdata->mixer_wb;
+ nmixers = nmixers_wb;
+ if ((ctl->mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED) && rotator)
+ mixer_pool = mixer_pool + nmixers;
+ break;
+
+ default:
+ nmixers = 0;
+ pr_err("invalid pipe type %d\n", type);
+ break;
+ }
+
+ /*Allocate virtual wb mixer if no dedicated wfd wb blk is present*/
+ if ((ctl->mdata->wfd_mode == MDSS_MDP_WFD_SHARED) &&
+ (type == MDSS_MDP_MIXER_TYPE_WRITEBACK))
+ nmixers += 1;
+
+ for (i = 0; i < nmixers; i++) {
+ mixer = mixer_pool + i;
+ if (mixer->ref_cnt == 0)
+ break;
+ mixer = NULL;
+ }
+
+ if (!mixer && alt_mixer && (alt_mixer->ref_cnt == 0))
+ mixer = alt_mixer;
+
+ if (mixer) {
+ mixer->ref_cnt++;
+ mixer->params_changed++;
+ mixer->ctl = ctl;
+ mixer->next_pipe_map = 0;
+ mixer->pipe_mapped = 0;
+ pr_debug("alloc mixer num %d for ctl=%d\n",
+ mixer->num, ctl->num);
+ }
+ mutex_unlock(&mdss_mdp_ctl_lock);
+
+ return mixer;
+}
+
+struct mdss_mdp_mixer *mdss_mdp_mixer_assign(u32 id, bool wb, bool rot)
+{
+ struct mdss_mdp_mixer *mixer = NULL;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ mutex_lock(&mdss_mdp_ctl_lock);
+
+ if (rot && (mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED))
+ mixer = mdata->mixer_wb + mdata->nmixers_wb;
+ else if (wb && id < mdata->nmixers_wb)
+ mixer = mdata->mixer_wb + id;
+ else if (!wb && id < mdata->nmixers_intf)
+ mixer = mdata->mixer_intf + id;
+
+ if (mixer && mixer->ref_cnt == 0) {
+ mixer->ref_cnt++;
+ mixer->params_changed++;
+ } else {
+ pr_err("mixer is in use already = %d\n", id);
+ mixer = NULL;
+ }
+ mutex_unlock(&mdss_mdp_ctl_lock);
+ return mixer;
+}
+
+int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer)
+{
+ if (!mixer)
+ return -ENODEV;
+
+ pr_debug("free mixer_num=%d ref_cnt=%d\n", mixer->num, mixer->ref_cnt);
+
+ if (!mixer->ref_cnt) {
+ pr_err("called with ref_cnt=0\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mdss_mdp_ctl_lock);
+ mixer->ref_cnt--;
+ mixer->is_right_mixer = false;
+ mutex_unlock(&mdss_mdp_ctl_lock);
+
+ return 0;
+}
+
+struct mdss_mdp_mixer *mdss_mdp_block_mixer_alloc(void)
+{
+ struct mdss_mdp_ctl *ctl = NULL;
+ struct mdss_mdp_mixer *mixer = NULL;
+ struct mdss_mdp_writeback *wb = NULL;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 offset = mdss_mdp_get_wb_ctl_support(mdata, true);
+ int ret = 0;
+
+ ctl = mdss_mdp_ctl_alloc(mdss_res, offset);
+ if (!ctl) {
+ pr_debug("unable to allocate wb ctl\n");
+ return NULL;
+ }
+
+ mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK,
+ false, true);
+ if (!mixer) {
+ pr_debug("unable to allocate wb mixer\n");
+ goto error;
+ }
+
+ mixer->rotator_mode = 1;
+
+ switch (mixer->num) {
+ case MDSS_MDP_WB_LAYERMIXER0:
+ ctl->opmode = MDSS_MDP_CTL_OP_ROT0_MODE;
+ break;
+ case MDSS_MDP_WB_LAYERMIXER1:
+ ctl->opmode = MDSS_MDP_CTL_OP_ROT1_MODE;
+ break;
+ default:
+ pr_err("invalid layer mixer=%d\n", mixer->num);
+ goto error;
+ }
+
+ wb = mdss_mdp_wb_alloc(MDSS_MDP_WB_ROTATOR, ctl->num);
+ if (!wb) {
+ pr_err("Unable to allocate writeback block\n");
+ goto error;
+ }
+
+ ctl->mixer_left = mixer;
+
+ ctl->ops.start_fnc = mdss_mdp_writeback_start;
+ ctl->power_state = MDSS_PANEL_POWER_ON;
+ ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_BLOCK;
+ mixer->ctl = ctl;
+ ctl->wb = wb;
+
+ if (ctl->ops.start_fnc)
+ ret = ctl->ops.start_fnc(ctl);
+
+ if (!ret)
+ return mixer;
+error:
+ if (wb)
+ mdss_mdp_wb_free(wb);
+ if (mixer)
+ mdss_mdp_mixer_free(mixer);
+ if (ctl)
+ mdss_mdp_ctl_free(ctl);
+
+ return NULL;
+}
+
+int mdss_mdp_block_mixer_destroy(struct mdss_mdp_mixer *mixer)
+{
+ struct mdss_mdp_ctl *ctl;
+
+ if (!mixer || !mixer->ctl) {
+ pr_err("invalid ctl handle\n");
+ return -ENODEV;
+ }
+
+ ctl = mixer->ctl;
+ mixer->rotator_mode = 0;
+
+ pr_debug("destroy ctl=%d mixer=%d\n", ctl->num, mixer->num);
+
+ if (ctl->ops.stop_fnc)
+ ctl->ops.stop_fnc(ctl, MDSS_PANEL_POWER_OFF);
+
+ mdss_mdp_ctl_free(ctl);
+
+ mdss_mdp_ctl_perf_update(ctl, 0, true);
+
+ return 0;
+}
+
+int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
+ ktime_t *wakeup_time)
+{
+ struct mdss_panel_info *pinfo;
+ u64 clk_rate;
+ u32 clk_period;
+ u32 current_line, total_line;
+ u32 time_of_line, time_to_vsync, adjust_line_ns;
+
+ ktime_t current_time = ktime_get();
+
+ if (!ctl->ops.read_line_cnt_fnc)
+ return -ENOTSUPP;
+
+ pinfo = &ctl->panel_data->panel_info;
+ if (!pinfo)
+ return -ENODEV;
+
+ clk_rate = mdss_mdp_get_pclk_rate(ctl);
+
+ clk_rate = DIV_ROUND_UP_ULL(clk_rate, 1000); /* in kHz */
+ if (!clk_rate)
+ return -EINVAL;
+
+ /*
+ * calculate clk_period as pico second to maintain good
+ * accuracy with high pclk rate and this number is in 17 bit
+ * range.
+ */
+ clk_period = DIV_ROUND_UP_ULL(1000000000, clk_rate);
+ if (!clk_period)
+ return -EINVAL;
+
+ time_of_line = (pinfo->lcdc.h_back_porch +
+ pinfo->lcdc.h_front_porch +
+ pinfo->lcdc.h_pulse_width +
+ pinfo->xres) * clk_period;
+
+ time_of_line /= 1000; /* in nano second */
+ if (!time_of_line)
+ return -EINVAL;
+
+ current_line = ctl->ops.read_line_cnt_fnc(ctl);
+
+ total_line = pinfo->lcdc.v_back_porch +
+ pinfo->lcdc.v_front_porch +
+ pinfo->lcdc.v_pulse_width +
+ pinfo->yres;
+
+ if (current_line >= total_line)
+ time_to_vsync = time_of_line * total_line;
+ else
+ time_to_vsync = time_of_line * (total_line - current_line);
+
+ if (pinfo->adjust_timer_delay_ms) {
+ adjust_line_ns = pinfo->adjust_timer_delay_ms
+ * 1000000; /* convert to ns */
+
+ /* Ignore large values of adjust_line_ns\ */
+ if (time_to_vsync > adjust_line_ns)
+ time_to_vsync -= adjust_line_ns;
+ }
+
+ if (!time_to_vsync)
+ return -EINVAL;
+
+ *wakeup_time = ktime_add_ns(current_time, time_to_vsync);
+
+ pr_debug("clk_rate=%lldkHz clk_period=%d cur_line=%d tot_line=%d\n",
+ clk_rate, clk_period, current_line, total_line);
+ pr_debug("time_to_vsync=%d current_time=%d wakeup_time=%d\n",
+ time_to_vsync, (int)ktime_to_ms(current_time),
+ (int)ktime_to_ms(*wakeup_time));
+
+ return 0;
+}
+
+static void __cpu_pm_work_handler(struct work_struct *work)
+{
+ struct mdss_mdp_ctl *ctl =
+ container_of(work, typeof(*ctl), cpu_pm_work);
+ ktime_t wakeup_time;
+ struct mdss_overlay_private *mdp5_data;
+
+ if (!ctl)
+ return;
+
+ if (mdss_mdp_display_wakeup_time(ctl, &wakeup_time))
+ return;
+
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ activate_event_timer(mdp5_data->cpu_pm_hdl, wakeup_time);
+}
+
+void mdss_mdp_ctl_event_timer(void *data)
+{
+ struct mdss_overlay_private *mdp5_data =
+ (struct mdss_overlay_private *)data;
+ struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+
+ if (mdp5_data->cpu_pm_hdl && ctl && ctl->autorefresh_frame_cnt)
+ schedule_work(&ctl->cpu_pm_work);
+}
+
+int mdss_mdp_ctl_cmd_set_autorefresh(struct mdss_mdp_ctl *ctl, int frame_cnt)
+{
+ int ret = 0;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+
+ if (ctl->panel_data->panel_info.type == MIPI_CMD_PANEL) {
+ ret = mdss_mdp_cmd_set_autorefresh_mode(ctl, frame_cnt);
+ if (!ret) {
+ ctl->autorefresh_frame_cnt = frame_cnt;
+ if (frame_cnt)
+ mdss_mdp_ctl_event_timer(mdp5_data);
+ }
+ } else {
+ pr_err("Mode not supported for this panel\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int mdss_mdp_ctl_cmd_get_autorefresh(struct mdss_mdp_ctl *ctl)
+{
+ if (ctl->panel_data->panel_info.type == MIPI_CMD_PANEL)
+ return mdss_mdp_cmd_get_autorefresh_mode(ctl);
+ else
+ return 0;
+}
+
+int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff)
+{
+ switch (ctl->panel_data->panel_info.type) {
+ case MIPI_VIDEO_PANEL:
+ case EDP_PANEL:
+ case DTV_PANEL:
+ return mdss_mdp_video_reconfigure_splash_done(ctl, handoff);
+ case MIPI_CMD_PANEL:
+ return mdss_mdp_cmd_reconfigure_splash_done(ctl, handoff);
+ default:
+ return 0;
+ }
+}
+
+static inline int mdss_mdp_set_split_ctl(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_ctl *split_ctl)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_panel_info *pinfo;
+
+
+ if (!ctl || !split_ctl || !mdata)
+ return -ENODEV;
+
+ /* setup split ctl mixer as right mixer of original ctl so that
+ * original ctl can work the same way as dual pipe solution
+ */
+ ctl->mixer_right = split_ctl->mixer_left;
+ pinfo = &ctl->panel_data->panel_info;
+
+ /* add x offset from left ctl's border */
+ split_ctl->border_x_off += (pinfo->lcdc.border_left +
+ pinfo->lcdc.border_right);
+
+ return 0;
+}
+
+static inline void __dsc_enable(struct mdss_mdp_mixer *mixer)
+{
+ mdss_mdp_pingpong_write(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_DSC_MODE, 1);
+}
+
+static inline void __dsc_disable(struct mdss_mdp_mixer *mixer)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ char __iomem *offset = mdata->mdp_base;
+
+ mdss_mdp_pingpong_write(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_DSC_MODE, 0);
+
+ if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0) {
+ offset += MDSS_MDP_DSC_0_OFFSET;
+ } else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
+ offset += MDSS_MDP_DSC_1_OFFSET;
+ } else {
+ pr_err("invalid mixer numer=%d\n", mixer->num);
+ return;
+ }
+ writel_relaxed(0, offset + MDSS_MDP_REG_DSC_COMMON_MODE);
+}
+
+static void __dsc_config(struct mdss_mdp_mixer *mixer,
+ struct dsc_desc *dsc, u32 mode, bool ich_reset_override)
+{
+ u32 data;
+ int bpp, lsb;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ char __iomem *offset = mdata->mdp_base;
+ u32 initial_lines = dsc->initial_lines;
+ bool is_cmd_mode = !(mode & BIT(2));
+
+ data = mdss_mdp_pingpong_read(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP);
+ data |= BIT(18); /* endian flip */
+ mdss_mdp_pingpong_write(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP, data);
+
+ if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0) {
+ offset += MDSS_MDP_DSC_0_OFFSET;
+ } else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
+ offset += MDSS_MDP_DSC_1_OFFSET;
+ } else {
+ pr_err("invalid mixer numer=%d\n", mixer->num);
+ return;
+ }
+
+ writel_relaxed(mode, offset + MDSS_MDP_REG_DSC_COMMON_MODE);
+
+ data = 0;
+ if (ich_reset_override)
+ data = 3 << 28;
+
+ if (is_cmd_mode)
+ initial_lines += 1;
+
+ data |= (initial_lines << 20);
+ data |= ((dsc->slice_last_group_size - 1) << 18);
+ /* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+ lsb = dsc->bpp % 4;
+ bpp = dsc->bpp / 4;
+ bpp *= 4; /* either 8 or 12 */
+ bpp <<= 4;
+ bpp |= lsb;
+ data |= (bpp << 8);
+ data |= (dsc->block_pred_enable << 7);
+ data |= (dsc->line_buf_depth << 3);
+ data |= (dsc->enable_422 << 2);
+ data |= (dsc->convert_rgb << 1);
+ data |= dsc->input_10_bits;
+
+ pr_debug("%d %d %d %d %d %d %d %d %d, data=%x\n",
+ ich_reset_override,
+ initial_lines, dsc->slice_last_group_size,
+ dsc->bpp, dsc->block_pred_enable, dsc->line_buf_depth,
+ dsc->enable_422, dsc->convert_rgb, dsc->input_10_bits, data);
+
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_ENC);
+
+ data = dsc->pic_width << 16;
+ data |= dsc->pic_height;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_PICTURE);
+
+ data = dsc->slice_width << 16;
+ data |= dsc->slice_height;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SLICE);
+
+ data = dsc->chunk_size << 16;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_CHUNK_SIZE);
+
+ pr_debug("mix%d pic_w=%d pic_h=%d, slice_w=%d slice_h=%d, chunk=%d\n",
+ mixer->num, dsc->pic_width, dsc->pic_height,
+ dsc->slice_width, dsc->slice_height, dsc->chunk_size);
+ MDSS_XLOG(mixer->num, dsc->pic_width, dsc->pic_height,
+ dsc->slice_width, dsc->slice_height, dsc->chunk_size);
+
+ data = dsc->initial_dec_delay << 16;
+ data |= dsc->initial_xmit_delay;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_DELAY);
+
+ data = dsc->initial_scale_value;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SCALE_INITIAL);
+
+ data = dsc->scale_decrement_interval;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SCALE_DEC_INTERVAL);
+
+ data = dsc->scale_increment_interval;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SCALE_INC_INTERVAL);
+
+ data = dsc->first_line_bpg_offset;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_FIRST_LINE_BPG_OFFSET);
+
+ data = dsc->nfl_bpg_offset << 16;
+ data |= dsc->slice_bpg_offset;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_BPG_OFFSET);
+
+ data = dsc->initial_offset << 16;
+ data |= dsc->final_offset;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_DSC_OFFSET);
+
+ data = dsc->det_thresh_flatness << 10;
+ data |= dsc->max_qp_flatness << 5;
+ data |= dsc->min_qp_flatness;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_FLATNESS);
+ writel_relaxed(0x983, offset + MDSS_MDP_REG_DSC_FLATNESS);
+
+ data = dsc->rc_model_size; /* rate_buffer_size */
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_RC_MODEL_SIZE);
+
+ data = dsc->tgt_offset_lo << 18;
+ data |= dsc->tgt_offset_hi << 14;
+ data |= dsc->quant_incr_limit1 << 9;
+ data |= dsc->quant_incr_limit0 << 4;
+ data |= dsc->edge_factor;
+ writel_relaxed(data, offset + MDSS_MDP_REG_DSC_RC);
+}
+
+static void __dsc_config_thresh(struct mdss_mdp_mixer *mixer,
+ struct dsc_desc *dsc)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ char __iomem *offset, *off;
+ u32 *lp;
+ char *cp;
+ int i;
+
+ offset = mdata->mdp_base;
+
+ if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0) {
+ offset += MDSS_MDP_DSC_0_OFFSET;
+ } else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
+ offset += MDSS_MDP_DSC_1_OFFSET;
+ } else {
+ pr_err("invalid mixer numer=%d\n", mixer->num);
+ return;
+ }
+
+ lp = dsc->buf_thresh;
+ off = offset + MDSS_MDP_REG_DSC_RC_BUF_THRESH;
+ for (i = 0; i < 14; i++) {
+ writel_relaxed(*lp++, off);
+ off += 4;
+ }
+
+ cp = dsc->range_min_qp;
+ off = offset + MDSS_MDP_REG_DSC_RANGE_MIN_QP;
+ for (i = 0; i < 15; i++) {
+ writel_relaxed(*cp++, off);
+ off += 4;
+ }
+
+ cp = dsc->range_max_qp;
+ off = offset + MDSS_MDP_REG_DSC_RANGE_MAX_QP;
+ for (i = 0; i < 15; i++) {
+ writel_relaxed(*cp++, off);
+ off += 4;
+ }
+
+ cp = dsc->range_bpg_offset;
+ off = offset + MDSS_MDP_REG_DSC_RANGE_BPG_OFFSET;
+ for (i = 0; i < 15; i++) {
+ writel_relaxed(*cp++, off);
+ off += 4;
+ }
+}
+
+static bool __is_dsc_merge_enabled(u32 common_mode)
+{
+ return common_mode & BIT(1);
+}
+
+static bool __dsc_is_3d_mux_enabled(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_info *pinfo)
+{
+ return ctl && is_dual_lm_single_display(ctl->mfd) &&
+ pinfo && (pinfo->dsc_enc_total == 1);
+}
+
+/* must be called from master ctl */
+static u32 __dsc_get_common_mode(struct mdss_mdp_ctl *ctl, bool mux_3d)
+{
+ u32 common_mode = 0;
+
+ if (ctl->is_video_mode)
+ common_mode = BIT(2);
+
+ if (mdss_mdp_is_both_lm_valid(ctl))
+ common_mode |= BIT(0);
+
+ if (is_dual_lm_single_display(ctl->mfd)) {
+ if (mux_3d)
+ common_mode &= ~BIT(0);
+ else if (mdss_mdp_is_both_lm_valid(ctl)) /* dsc_merge */
+ common_mode |= BIT(1);
+ }
+
+ return common_mode;
+}
+
+static void __dsc_get_pic_dim(struct mdss_mdp_mixer *mixer_l,
+ struct mdss_mdp_mixer *mixer_r, u32 *pic_w, u32 *pic_h)
+{
+ bool valid_l = mixer_l && mixer_l->valid_roi;
+ bool valid_r = mixer_r && mixer_r->valid_roi;
+
+ *pic_w = 0;
+ *pic_h = 0;
+
+ if (valid_l) {
+ *pic_w = mixer_l->roi.w;
+ *pic_h = mixer_l->roi.h;
+ }
+
+ if (valid_r) {
+ *pic_w += mixer_r->roi.w;
+ *pic_h = mixer_r->roi.h;
+ }
+}
+
+static bool __is_ich_reset_override_needed(bool pu_en, struct dsc_desc *dsc)
+{
+ /*
+ * As per the DSC spec, ICH_RESET can be either end of the slice line
+ * or at the end of the slice. HW internally generates ich_reset at
+ * end of the slice line if DSC_MERGE is used or encoder has two
+ * soft slices. However, if encoder has only 1 soft slice and DSC_MERGE
+ * is not used then it will generate ich_reset at the end of slice.
+ *
+ * Now as per the spec, during one PPS session, position where
+ * ich_reset is generated should not change. Now if full-screen frame
+ * has more than 1 soft slice then HW will automatically generate
+ * ich_reset at the end of slice_line. But for the same panel, if
+ * partial frame is enabled and only 1 encoder is used with 1 slice,
+ * then HW will generate ich_reset at end of the slice. This is a
+ * mismatch. Prevent this by overriding HW's decision.
+ */
+ return pu_en && dsc && (dsc->full_frame_slices > 1) &&
+ (dsc->slice_width == dsc->pic_width);
+}
+
+static void __dsc_setup_dual_lm_single_display(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_info *pinfo)
+{
+ u32 pic_width = 0, pic_height = 0;
+ u32 intf_ip_w, enc_ip_w, common_mode, this_frame_slices;
+ bool valid_l, valid_r;
+ bool enable_right_dsc;
+ bool mux_3d, ich_reset_override;
+ struct dsc_desc *dsc;
+ struct mdss_mdp_mixer *mixer_l, *mixer_r;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!pinfo || !ctl || !ctl->is_master ||
+ !is_dual_lm_single_display(ctl->mfd))
+ return;
+
+ dsc = &pinfo->dsc;
+ mixer_l = ctl->mixer_left;
+ mixer_r = ctl->mixer_right;
+
+ mux_3d = __dsc_is_3d_mux_enabled(ctl, pinfo);
+ common_mode = __dsc_get_common_mode(ctl, mux_3d);
+ __dsc_get_pic_dim(mixer_l, mixer_r, &pic_width, &pic_height);
+
+ valid_l = mixer_l->valid_roi;
+ valid_r = mixer_r->valid_roi;
+ if (mdss_mdp_is_lm_swap_needed(mdata, ctl)) {
+ valid_l = true;
+ valid_r = false;
+ }
+
+ this_frame_slices = pic_width / dsc->slice_width;
+
+ /* enable or disable pp_split + DSC_Merge based on partial update */
+ if ((pinfo->partial_update_enabled) && !mux_3d &&
+ (dsc->full_frame_slices == 4) &&
+ (mdss_has_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT))) {
+
+ if (valid_l && valid_r) {
+ /* left + right */
+ pr_debug("full line (4 slices) or middle 2 slice partial update\n");
+ writel_relaxed(0x0,
+ mdata->mdp_base + mdata->ppb_ctl[0]);
+ writel_relaxed(0x0,
+ mdata->mdp_base + MDSS_MDP_REG_DCE_SEL);
+ } else if (valid_l || valid_r) {
+ /* left-only or right-only */
+ if (this_frame_slices == 2) {
+ pr_debug("2 slice parital update, use merge\n");
+
+ /* tandem + merge */
+ common_mode = BIT(1) | BIT(0);
+
+ valid_r = true;
+ valid_l = true;
+
+ writel_relaxed(0x2 << 4, mdata->mdp_base +
+ mdata->ppb_ctl[0]);
+ writel_relaxed(BIT(0),
+ mdata->mdp_base + MDSS_MDP_REG_DCE_SEL);
+ } else {
+ pr_debug("only one slice partial update\n");
+ writel_relaxed(0x0, mdata->mdp_base +
+ mdata->ppb_ctl[0]);
+ writel_relaxed(0x0, mdata->mdp_base +
+ MDSS_MDP_REG_DCE_SEL);
+ }
+ }
+ } else {
+ writel_relaxed(0x0, mdata->mdp_base + MDSS_MDP_REG_DCE_SEL);
+ }
+
+ mdss_panel_dsc_update_pic_dim(dsc, pic_width, pic_height);
+
+ intf_ip_w = this_frame_slices * dsc->slice_width;
+ mdss_panel_dsc_pclk_param_calc(dsc, intf_ip_w);
+
+ enc_ip_w = intf_ip_w;
+ /* if dsc_merge, both encoders work on same number of slices */
+ if (__is_dsc_merge_enabled(common_mode))
+ enc_ip_w /= 2;
+ mdss_panel_dsc_initial_line_calc(dsc, enc_ip_w);
+
+ /*
+ * __is_ich_reset_override_needed should be called only after
+ * updating pic dimension, mdss_panel_dsc_update_pic_dim.
+ */
+ ich_reset_override = __is_ich_reset_override_needed(
+ pinfo->partial_update_enabled, dsc);
+ if (valid_l) {
+ __dsc_config(mixer_l, dsc, common_mode, ich_reset_override);
+ __dsc_config_thresh(mixer_l, dsc);
+ __dsc_enable(mixer_l);
+ } else {
+ __dsc_disable(mixer_l);
+ }
+
+ enable_right_dsc = valid_r;
+ if (mux_3d && valid_l)
+ enable_right_dsc = false;
+
+ if (enable_right_dsc) {
+ __dsc_config(mixer_r, dsc, common_mode, ich_reset_override);
+ __dsc_config_thresh(mixer_r, dsc);
+ __dsc_enable(mixer_r);
+ } else {
+ __dsc_disable(mixer_r);
+ }
+
+ pr_debug("mix%d: valid_l=%d mix%d: valid_r=%d mode=%d, pic_dim:%dx%d mux_3d=%d intf_ip_w=%d enc_ip_w=%d ich_ovrd=%d\n",
+ mixer_l->num, valid_l, mixer_r->num, valid_r,
+ common_mode, pic_width, pic_height,
+ mux_3d, intf_ip_w, enc_ip_w, ich_reset_override);
+
+ MDSS_XLOG(mixer_l->num, valid_l, mixer_r->num, valid_r,
+ common_mode, pic_width, pic_height,
+ mux_3d, intf_ip_w, enc_ip_w, ich_reset_override);
+}
+
+static void __dsc_setup_dual_lm_dual_display(
+ struct mdss_mdp_ctl *ctl, struct mdss_panel_info *pinfo,
+ struct mdss_mdp_ctl *sctl, struct mdss_panel_info *spinfo)
+{
+ u32 pic_width = 0, pic_height = 0;
+ u32 intf_ip_w, enc_ip_w, common_mode, this_frame_slices;
+ bool valid_l, valid_r;
+ bool ich_reset_override;
+ struct dsc_desc *dsc_l, *dsc_r;
+ struct mdss_mdp_mixer *mixer_l, *mixer_r;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!pinfo || !ctl || !sctl || !spinfo ||
+ !ctl->is_master || !ctl->mfd ||
+ (ctl->mfd->split_mode != MDP_DUAL_LM_DUAL_DISPLAY))
+ return;
+
+ dsc_l = &pinfo->dsc;
+ dsc_r = &spinfo->dsc;
+
+ mixer_l = ctl->mixer_left;
+ mixer_r = ctl->mixer_right;
+
+ common_mode = __dsc_get_common_mode(ctl, false);
+ /*
+ * In this topology, both DSC use same pic dimension. So no need to
+ * maintain two separate local copies.
+ */
+ __dsc_get_pic_dim(mixer_l, mixer_r, &pic_width, &pic_height);
+
+ valid_l = mixer_l->valid_roi;
+ valid_r = mixer_r->valid_roi;
+ if (mdss_mdp_is_lm_swap_needed(mdata, ctl)) {
+ valid_l = true;
+ valid_r = false;
+ }
+
+ /*
+ * Since both DSC use same pic dimension, set same pic dimension
+ * to both DSC structures.
+ */
+ mdss_panel_dsc_update_pic_dim(dsc_l, pic_width, pic_height);
+ mdss_panel_dsc_update_pic_dim(dsc_r, pic_width, pic_height);
+
+ this_frame_slices = pic_width / dsc_l->slice_width;
+ intf_ip_w = this_frame_slices * dsc_l->slice_width;
+ if (valid_l && valid_r)
+ intf_ip_w /= 2;
+ /*
+ * In this topology when both interfaces are active, they have same
+ * load so intf_ip_w will be same.
+ */
+ mdss_panel_dsc_pclk_param_calc(dsc_l, intf_ip_w);
+ mdss_panel_dsc_pclk_param_calc(dsc_r, intf_ip_w);
+
+ /*
+ * In this topology, since there is no dsc_merge, uncompressed input
+ * to encoder and interface is same.
+ */
+ enc_ip_w = intf_ip_w;
+ mdss_panel_dsc_initial_line_calc(dsc_l, enc_ip_w);
+ mdss_panel_dsc_initial_line_calc(dsc_r, enc_ip_w);
+
+ /*
+ * __is_ich_reset_override_needed should be called only after
+ * updating pic dimension, mdss_panel_dsc_update_pic_dim.
+ */
+ ich_reset_override = __is_ich_reset_override_needed(
+ pinfo->partial_update_enabled, dsc_l);
+
+ if (valid_l) {
+ __dsc_config(mixer_l, dsc_l, common_mode, ich_reset_override);
+ __dsc_config_thresh(mixer_l, dsc_l);
+ __dsc_enable(mixer_l);
+ } else {
+ __dsc_disable(mixer_l);
+ }
+
+ if (valid_r) {
+ __dsc_config(mixer_r, dsc_r, common_mode, ich_reset_override);
+ __dsc_config_thresh(mixer_r, dsc_r);
+ __dsc_enable(mixer_r);
+ } else {
+ __dsc_disable(mixer_r);
+ }
+
+ pr_debug("mix%d: valid_l=%d mix%d: valid_r=%d mode=%d, pic_dim:%dx%d intf_ip_w=%d enc_ip_w=%d ich_ovrd=%d\n",
+ mixer_l->num, valid_l, mixer_r->num, valid_r,
+ common_mode, pic_width, pic_height,
+ intf_ip_w, enc_ip_w, ich_reset_override);
+
+ MDSS_XLOG(mixer_l->num, valid_l, mixer_r->num, valid_r,
+ common_mode, pic_width, pic_height,
+ intf_ip_w, enc_ip_w, ich_reset_override);
+}
+
+static void __dsc_setup_single_lm_single_display(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_info *pinfo)
+{
+ u32 pic_width = 0, pic_height = 0;
+ u32 intf_ip_w, enc_ip_w, common_mode, this_frame_slices;
+ bool valid;
+ bool ich_reset_override;
+ struct dsc_desc *dsc;
+ struct mdss_mdp_mixer *mixer;
+
+ if (!pinfo || !ctl || !ctl->is_master)
+ return;
+
+ dsc = &pinfo->dsc;
+ mixer = ctl->mixer_left;
+ valid = mixer->valid_roi;
+
+ common_mode = __dsc_get_common_mode(ctl, false);
+ __dsc_get_pic_dim(mixer, NULL, &pic_width, &pic_height);
+
+ mdss_panel_dsc_update_pic_dim(dsc, pic_width, pic_height);
+
+ this_frame_slices = pic_width / dsc->slice_width;
+ intf_ip_w = this_frame_slices * dsc->slice_width;
+ mdss_panel_dsc_pclk_param_calc(dsc, intf_ip_w);
+
+ enc_ip_w = intf_ip_w;
+ mdss_panel_dsc_initial_line_calc(dsc, enc_ip_w);
+
+ /*
+ * __is_ich_reset_override_needed should be called only after
+ * updating pic dimension, mdss_panel_dsc_update_pic_dim.
+ */
+ ich_reset_override = __is_ich_reset_override_needed(
+ pinfo->partial_update_enabled, dsc);
+ if (valid) {
+ __dsc_config(mixer, dsc, common_mode, ich_reset_override);
+ __dsc_config_thresh(mixer, dsc);
+ __dsc_enable(mixer);
+ } else {
+ __dsc_disable(mixer);
+ }
+
+ pr_debug("mix%d: valid=%d mode=%d, pic_dim:%dx%d intf_ip_w=%d enc_ip_w=%d ich_ovrd=%d\n",
+ mixer->num, valid, common_mode, pic_width, pic_height,
+ intf_ip_w, enc_ip_w, ich_reset_override);
+
+ MDSS_XLOG(mixer->num, valid, common_mode, pic_width, pic_height,
+ intf_ip_w, enc_ip_w, ich_reset_override);
+}
+
+void mdss_mdp_ctl_dsc_setup(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_info *pinfo)
+{
+ struct mdss_mdp_ctl *sctl;
+ struct mdss_panel_info *spinfo;
+
+ if (!is_dsc_compression(pinfo))
+ return;
+
+ if (!ctl->is_master) {
+ pr_debug("skip slave ctl because master will program for both\n");
+ return;
+ }
+
+ switch (ctl->mfd->split_mode) {
+ case MDP_DUAL_LM_SINGLE_DISPLAY:
+ __dsc_setup_dual_lm_single_display(ctl, pinfo);
+ break;
+ case MDP_DUAL_LM_DUAL_DISPLAY:
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl) {
+ spinfo = &sctl->panel_data->panel_info;
+ __dsc_setup_dual_lm_dual_display(ctl, pinfo, sctl,
+ spinfo);
+ }
+ break;
+ default:
+ /* pp_split is not supported yet */
+ __dsc_setup_single_lm_single_display(ctl, pinfo);
+ break;
+ }
+}
+
+static int mdss_mdp_ctl_fbc_enable(int enable,
+ struct mdss_mdp_mixer *mixer, struct mdss_panel_info *pdata)
+{
+ struct fbc_panel_info *fbc;
+ u32 mode = 0, budget_ctl = 0, lossy_mode = 0, width;
+
+ if (!pdata) {
+ pr_err("Invalid pdata\n");
+ return -EINVAL;
+ }
+
+ fbc = &pdata->fbc;
+
+ if (!fbc->enabled) {
+ pr_debug("FBC not enabled\n");
+ return -EINVAL;
+ }
+
+ if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0 ||
+ mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
+ pr_debug("Mixer supports FBC.\n");
+ } else {
+ pr_debug("Mixer doesn't support FBC.\n");
+ return -EINVAL;
+ }
+
+ if (enable) {
+ if (fbc->enc_mode && pdata->bpp) {
+ /* width is the compressed width */
+ width = mult_frac(pdata->xres, fbc->target_bpp,
+ pdata->bpp);
+ } else {
+ /* width is the source width */
+ width = pdata->xres;
+ }
+
+ mode = ((width) << 16) | ((fbc->slice_height) << 11) |
+ ((fbc->pred_mode) << 10) | ((fbc->enc_mode) << 9) |
+ ((fbc->comp_mode) << 8) | ((fbc->qerr_enable) << 7) |
+ ((fbc->cd_bias) << 4) | ((fbc->pat_enable) << 3) |
+ ((fbc->vlc_enable) << 2) | ((fbc->bflc_enable) << 1) |
+ enable;
+
+ budget_ctl = ((fbc->line_x_budget) << 12) |
+ ((fbc->block_x_budget) << 8) | fbc->block_budget;
+
+ lossy_mode = ((fbc->max_pred_err) << 28) |
+ ((fbc->lossless_mode_thd) << 16) |
+ ((fbc->lossy_mode_thd) << 8) |
+ ((fbc->lossy_rgb_thd) << 4) | fbc->lossy_mode_idx;
+ }
+
+ mdss_mdp_pingpong_write(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_FBC_MODE, mode);
+ mdss_mdp_pingpong_write(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_FBC_BUDGET_CTL, budget_ctl);
+ mdss_mdp_pingpong_write(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_FBC_LOSSY_MODE, lossy_mode);
+
+ return 0;
+}
+
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_ctl *split_ctl;
+ u32 width, height;
+ int split_fb, rc = 0;
+ u32 max_mixer_width;
+ struct mdss_panel_info *pinfo;
+
+ if (!ctl || !ctl->panel_data) {
+ pr_err("invalid ctl handle\n");
+ return -ENODEV;
+ }
+
+ pinfo = &ctl->panel_data->panel_info;
+ if (pinfo->type == WRITEBACK_PANEL) {
+ pr_err("writeback panel, ignore\n");
+ return 0;
+ }
+
+ split_ctl = mdss_mdp_get_split_ctl(ctl);
+
+ width = get_panel_width(ctl);
+ height = get_panel_yres(pinfo);
+
+ max_mixer_width = ctl->mdata->max_mixer_width;
+
+ split_fb = ((is_dual_lm_single_display(ctl->mfd)) &&
+ (ctl->mfd->split_fb_left <= max_mixer_width) &&
+ (ctl->mfd->split_fb_right <= max_mixer_width)) ? 1 : 0;
+ pr_debug("max=%d xres=%d left=%d right=%d\n", max_mixer_width,
+ width, ctl->mfd->split_fb_left, ctl->mfd->split_fb_right);
+
+ if ((split_ctl && (width > max_mixer_width)) ||
+ (width > (2 * max_mixer_width))) {
+ pr_err("Unsupported panel resolution: %dx%d\n", width, height);
+ return -ENOTSUPP;
+ }
+
+ ctl->width = width;
+ ctl->height = height;
+ ctl->roi = (struct mdss_rect) {0, 0, width, height};
+
+ if (!ctl->mixer_left) {
+ ctl->mixer_left =
+ mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
+ ((width > max_mixer_width) || split_fb), 0);
+ if (!ctl->mixer_left) {
+ pr_err("unable to allocate layer mixer\n");
+ return -ENOMEM;
+ } else if (split_fb && ctl->mixer_left->num >= 1 &&
+ (ctl->panel_data->panel_info.type == MIPI_CMD_PANEL)) {
+ pr_err("use only DSPP0 and DSPP1 with cmd split\n");
+ return -EPERM;
+ }
+ }
+
+ if (split_fb) {
+ width = ctl->mfd->split_fb_left;
+ width += (pinfo->lcdc.border_left +
+ pinfo->lcdc.border_right);
+ } else if (width > max_mixer_width) {
+ width /= 2;
+ }
+
+ ctl->mixer_left->width = width;
+ ctl->mixer_left->height = height;
+ ctl->mixer_left->roi = (struct mdss_rect) {0, 0, width, height};
+ ctl->mixer_left->valid_roi = true;
+ ctl->mixer_left->roi_changed = true;
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ pr_debug("dual display detected\n");
+ } else {
+ if (split_fb)
+ width = ctl->mfd->split_fb_right;
+
+ if (width < ctl->width) {
+ if (ctl->mixer_right == NULL) {
+ ctl->mixer_right = mdss_mdp_mixer_alloc(ctl,
+ MDSS_MDP_MIXER_TYPE_INTF, true, 0);
+ if (!ctl->mixer_right) {
+ pr_err("unable to allocate right mixer\n");
+ if (ctl->mixer_left)
+ mdss_mdp_mixer_free(
+ ctl->mixer_left);
+ return -ENOMEM;
+ }
+ }
+ ctl->mixer_right->is_right_mixer = true;
+ ctl->mixer_right->width = width;
+ ctl->mixer_right->height = height;
+ ctl->mixer_right->roi = (struct mdss_rect)
+ {0, 0, width, height};
+ ctl->mixer_right->valid_roi = true;
+ ctl->mixer_right->roi_changed = true;
+ } else if (ctl->mixer_right) {
+ ctl->mixer_right->valid_roi = false;
+ ctl->mixer_right->roi_changed = false;
+ mdss_mdp_mixer_free(ctl->mixer_right);
+ ctl->mixer_right = NULL;
+ }
+
+ if (ctl->mixer_right) {
+ if (!is_dsc_compression(pinfo) ||
+ (pinfo->dsc_enc_total == 1))
+ ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+ MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+ } else {
+ ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+ MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
+ }
+ }
+
+ rc = mdss_mdp_pp_default_overlay_config(ctl->mfd, ctl->panel_data,
+ true);
+ /*
+ * Ignore failure of PP config, ctl set-up can succeed.
+ */
+ if (rc) {
+ pr_err("failed to set the pp config rc %dfb %d\n", rc,
+ ctl->mfd->index);
+ rc = 0;
+ }
+ return 0;
+}
+
+/**
+ * mdss_mdp_ctl_reconfig() - re-configure ctl for new mode
+ * @ctl: mdp controller.
+ * @pdata: panel data
+ *
+ * This function is called when we are trying to dynamically change
+ * the DSI mode. We need to change various mdp_ctl properties to
+ * the new mode of operation.
+ */
+int mdss_mdp_ctl_reconfig(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_data *pdata)
+{
+ void *tmp;
+ int ret = 0;
+
+ /*
+ * Switch first to prevent deleting important data in the case
+ * where panel type is not supported in reconfig
+ */
+ if ((pdata->panel_info.type != MIPI_VIDEO_PANEL) &&
+ (pdata->panel_info.type != MIPI_CMD_PANEL)) {
+ pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
+ return -EINVAL;
+ }
+
+ /* if only changing resolution there is no need for intf reconfig */
+ if (!ctl->is_video_mode == (pdata->panel_info.type == MIPI_CMD_PANEL))
+ goto skip_intf_reconfig;
+
+ /*
+ * Intentionally not clearing stop function, as stop will
+ * be called after panel is instructed mode switch is happening
+ */
+ tmp = ctl->ops.stop_fnc;
+ memset(&ctl->ops, 0, sizeof(ctl->ops));
+ ctl->ops.stop_fnc = tmp;
+
+ switch (pdata->panel_info.type) {
+ case MIPI_VIDEO_PANEL:
+ ctl->is_video_mode = true;
+ ctl->intf_type = MDSS_INTF_DSI;
+ ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+ ctl->ops.start_fnc = mdss_mdp_video_start;
+ break;
+ case MIPI_CMD_PANEL:
+ ctl->is_video_mode = false;
+ ctl->intf_type = MDSS_INTF_DSI;
+ ctl->opmode = MDSS_MDP_CTL_OP_CMD_MODE;
+ ctl->ops.start_fnc = mdss_mdp_cmd_start;
+ break;
+ }
+
+ ctl->is_secure = false;
+ ctl->split_flush_en = false;
+ ctl->perf_release_ctl_bw = false;
+ ctl->play_cnt = 0;
+
+ ctl->opmode |= (ctl->intf_num << 4);
+
+skip_intf_reconfig:
+ ctl->width = get_panel_xres(&pdata->panel_info);
+ ctl->height = get_panel_yres(&pdata->panel_info);
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) {
+ if (ctl->mixer_left) {
+ ctl->mixer_left->width = ctl->width / 2;
+ ctl->mixer_left->height = ctl->height;
+ }
+ if (ctl->mixer_right) {
+ ctl->mixer_right->width = ctl->width / 2;
+ ctl->mixer_right->height = ctl->height;
+ }
+ } else {
+ /*
+ * Handles MDP_SPLIT_MODE_NONE, MDP_DUAL_LM_DUAL_DISPLAY and
+ * MDP_PINGPONG_SPLIT case.
+ */
+ if (ctl->mixer_left) {
+ ctl->mixer_left->width = ctl->width;
+ ctl->mixer_left->height = ctl->height;
+ }
+ }
+ ctl->roi = (struct mdss_rect) {0, 0, ctl->width, ctl->height};
+
+ ctl->border_x_off = pdata->panel_info.lcdc.border_left;
+ ctl->border_y_off = pdata->panel_info.lcdc.border_top;
+
+ return ret;
+}
+
+struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
+ struct msm_fb_data_type *mfd)
+{
+ int ret = 0, offset;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_panel_info *pinfo;
+
+ if (pdata->panel_info.type == WRITEBACK_PANEL)
+ offset = mdss_mdp_get_wb_ctl_support(mdata, false);
+ else
+ offset = MDSS_MDP_CTL0;
+
+ if (is_pingpong_split(mfd) && !mdata->has_pingpong_split) {
+ pr_err("Error: pp_split cannot be enabled on fb%d if HW doesn't support it\n",
+ mfd->index);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctl = mdss_mdp_ctl_alloc(mdata, offset);
+ if (!ctl) {
+ pr_err("unable to allocate ctl\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pinfo = &pdata->panel_info;
+ ctl->mfd = mfd;
+ ctl->panel_data = pdata;
+ ctl->is_video_mode = false;
+ ctl->perf_release_ctl_bw = false;
+ ctl->border_x_off = pinfo->lcdc.border_left;
+ ctl->border_y_off = pinfo->lcdc.border_top;
+ ctl->disable_prefill = false;
+
+ switch (pdata->panel_info.type) {
+ case EDP_PANEL:
+ ctl->is_video_mode = true;
+ ctl->intf_num = MDSS_MDP_INTF0;
+ ctl->intf_type = MDSS_INTF_EDP;
+ ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+ ctl->ops.start_fnc = mdss_mdp_video_start;
+ break;
+ case MIPI_VIDEO_PANEL:
+ ctl->is_video_mode = true;
+ if (pdata->panel_info.pdest == DISPLAY_1)
+ ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF2 :
+ MDSS_MDP_INTF1;
+ else
+ ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF1 :
+ MDSS_MDP_INTF2;
+ ctl->intf_type = MDSS_INTF_DSI;
+ ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+ ctl->ops.start_fnc = mdss_mdp_video_start;
+ break;
+ case MIPI_CMD_PANEL:
+ if (pdata->panel_info.pdest == DISPLAY_1)
+ ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF2 :
+ MDSS_MDP_INTF1;
+ else
+ ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF1 :
+ MDSS_MDP_INTF2;
+ ctl->intf_type = MDSS_INTF_DSI;
+ ctl->opmode = MDSS_MDP_CTL_OP_CMD_MODE;
+ ctl->ops.start_fnc = mdss_mdp_cmd_start;
+ INIT_WORK(&ctl->cpu_pm_work, __cpu_pm_work_handler);
+ break;
+ case DTV_PANEL:
+ ctl->is_video_mode = true;
+ ctl->intf_num = MDSS_MDP_INTF3;
+ ctl->intf_type = MDSS_INTF_HDMI;
+ ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+ ctl->ops.start_fnc = mdss_mdp_video_start;
+ break;
+ case WRITEBACK_PANEL:
+ ctl->intf_num = MDSS_MDP_NO_INTF;
+ ctl->ops.start_fnc = mdss_mdp_writeback_start;
+ break;
+ default:
+ pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
+ ret = -EINVAL;
+ goto ctl_init_fail;
+ }
+
+ ctl->opmode |= (ctl->intf_num << 4);
+
+ if (ctl->intf_num == MDSS_MDP_NO_INTF) {
+ ctl->dst_format = pdata->panel_info.out_format;
+ } else {
+ switch (pdata->panel_info.bpp) {
+ case 18:
+ if (ctl->intf_type == MDSS_INTF_DSI)
+ ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666 |
+ MDSS_MDP_PANEL_FORMAT_PACK_ALIGN_MSB;
+ else
+ ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666;
+ break;
+ case 24:
+ default:
+ ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB888;
+ break;
+ }
+ }
+
+ return ctl;
+ctl_init_fail:
+ mdss_mdp_ctl_free(ctl);
+
+ return ERR_PTR(ret);
+}
+
+int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_data *pdata)
+{
+ struct mdss_mdp_ctl *sctl;
+ struct mdss_mdp_mixer *mixer;
+
+ if (!ctl || !pdata)
+ return -ENODEV;
+
+ if (pdata->panel_info.xres > ctl->mdata->max_mixer_width) {
+ pr_err("Unsupported second panel resolution: %dx%d\n",
+ pdata->panel_info.xres, pdata->panel_info.yres);
+ return -ENOTSUPP;
+ }
+
+ if (ctl->mixer_right) {
+ pr_err("right mixer already setup for ctl=%d\n", ctl->num);
+ return -EPERM;
+ }
+
+ sctl = mdss_mdp_ctl_init(pdata, ctl->mfd);
+ if (!sctl) {
+ pr_err("unable to setup split display\n");
+ return -ENODEV;
+ }
+
+ sctl->width = get_panel_xres(&pdata->panel_info);
+ sctl->height = get_panel_yres(&pdata->panel_info);
+
+ sctl->roi = (struct mdss_rect){0, 0, sctl->width, sctl->height};
+
+ if (!ctl->mixer_left) {
+ ctl->mixer_left = mdss_mdp_mixer_alloc(ctl,
+ MDSS_MDP_MIXER_TYPE_INTF,
+ false, 0);
+ if (!ctl->mixer_left) {
+ pr_err("unable to allocate layer mixer\n");
+ mdss_mdp_ctl_destroy(sctl);
+ return -ENOMEM;
+ }
+ }
+
+ mixer = mdss_mdp_mixer_alloc(sctl, MDSS_MDP_MIXER_TYPE_INTF, false, 0);
+ if (!mixer) {
+ pr_err("unable to allocate layer mixer\n");
+ mdss_mdp_ctl_destroy(sctl);
+ return -ENOMEM;
+ }
+
+ mixer->is_right_mixer = true;
+ mixer->width = sctl->width;
+ mixer->height = sctl->height;
+ mixer->roi = (struct mdss_rect)
+ {0, 0, mixer->width, mixer->height};
+ mixer->valid_roi = true;
+ mixer->roi_changed = true;
+ sctl->mixer_left = mixer;
+
+ return mdss_mdp_set_split_ctl(ctl, sctl);
+}
+
+static void mdss_mdp_ctl_split_display_enable(int enable,
+ struct mdss_mdp_ctl *main_ctl, struct mdss_mdp_ctl *slave_ctl)
+{
+ u32 upper = 0, lower = 0;
+
+ pr_debug("split main ctl=%d intf=%d\n",
+ main_ctl->num, main_ctl->intf_num);
+
+ if (slave_ctl)
+ pr_debug("split slave ctl=%d intf=%d\n",
+ slave_ctl->num, slave_ctl->intf_num);
+
+ if (enable) {
+ if (main_ctl->opmode & MDSS_MDP_CTL_OP_CMD_MODE) {
+ /* interface controlling sw trigger (cmd mode) */
+ lower |= BIT(1);
+ if (main_ctl->intf_num == MDSS_MDP_INTF2)
+ lower |= BIT(4);
+ else
+ lower |= BIT(8);
+ /*
+ * Enable SMART_PANEL_FREE_RUN if ping pong split
+ * is enabled.
+ */
+ if (is_pingpong_split(main_ctl->mfd))
+ lower |= BIT(2);
+ upper = lower;
+ } else {
+ /* interface controlling sw trigger (video mode) */
+ if (main_ctl->intf_num == MDSS_MDP_INTF2) {
+ lower |= BIT(4);
+ upper |= BIT(8);
+ } else {
+ lower |= BIT(8);
+ upper |= BIT(4);
+ }
+ }
+ }
+ writel_relaxed(upper, main_ctl->mdata->mdp_base +
+ MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL);
+ writel_relaxed(lower, main_ctl->mdata->mdp_base +
+ MDSS_MDP_REG_SPLIT_DISPLAY_LOWER_PIPE_CTRL);
+ writel_relaxed(enable, main_ctl->mdata->mdp_base +
+ MDSS_MDP_REG_SPLIT_DISPLAY_EN);
+
+ if ((main_ctl->mdata->mdp_rev >= MDSS_MDP_HW_REV_103)
+ && main_ctl->is_video_mode) {
+ struct mdss_overlay_private *mdp5_data;
+ bool mixer_swap = false;
+
+ if (main_ctl->mfd) {
+ mdp5_data = mfd_to_mdp5_data(main_ctl->mfd);
+ mixer_swap = mdp5_data->mixer_swap;
+ }
+
+ main_ctl->split_flush_en = !mixer_swap;
+ if (main_ctl->split_flush_en)
+ writel_relaxed(enable ? 0x1 : 0x0,
+ main_ctl->mdata->mdp_base +
+ MMSS_MDP_MDP_SSPP_SPARE_0);
+ }
+}
+
+static void mdss_mdp_ctl_pp_split_display_enable(bool enable,
+ struct mdss_mdp_ctl *ctl)
+{
+ u32 cfg = 0, cntl = 0;
+
+ if (!ctl->mdata->nppb_ctl || !ctl->mdata->nppb_cfg) {
+ pr_err("No PPB to enable PP split\n");
+ WARN_ON(1);
+ }
+
+ mdss_mdp_ctl_split_display_enable(enable, ctl, NULL);
+
+ if (enable) {
+ cfg = ctl->slave_intf_num << 20; /* Set slave intf */
+ cfg |= BIT(16); /* Set horizontal split */
+ cntl = BIT(5); /* enable dst split */
+ }
+
+ writel_relaxed(cfg, ctl->mdata->mdp_base + ctl->mdata->ppb_cfg[0]);
+ writel_relaxed(cntl, ctl->mdata->mdp_base + ctl->mdata->ppb_ctl[0]);
+}
+
+int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_ctl *sctl;
+ int rc;
+
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(rc, "unable to close panel for intf=%d\n", ctl->intf_num);
+
+ (void) mdss_mdp_pp_default_overlay_config(ctl->mfd, ctl->panel_data,
+ false);
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl) {
+ pr_debug("destroying split display ctl=%d\n", sctl->num);
+ mdss_mdp_ctl_free(sctl);
+ }
+
+ mdss_mdp_ctl_free(ctl);
+
+ return 0;
+}
+
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg,
+ u32 flags)
+{
+ struct mdss_panel_data *pdata;
+ int rc = 0;
+
+ if (!ctl || !ctl->panel_data)
+ return -ENODEV;
+
+ pdata = ctl->panel_data;
+
+ if (flags & CTL_INTF_EVENT_FLAG_SLAVE_INTF) {
+ pdata = pdata->next;
+ if (!pdata) {
+ pr_err("Error: event=%d flags=0x%x, ctl%d slave intf is not present\n",
+ event, flags, ctl->num);
+ return -EINVAL;
+ }
+ }
+
+ pr_debug("sending ctl=%d event=%d flag=0x%x\n", ctl->num, event, flags);
+
+ do {
+ if (pdata->event_handler)
+ rc = pdata->event_handler(pdata, event, arg);
+ pdata = pdata->next;
+ } while (rc == 0 && pdata && pdata->active &&
+ !(flags & CTL_INTF_EVENT_FLAG_SKIP_BROADCAST));
+
+ return rc;
+}
+
+static void mdss_mdp_ctl_restore_sub(struct mdss_mdp_ctl *ctl)
+{
+ u32 temp;
+ int ret = 0;
+
+ temp = readl_relaxed(ctl->mdata->mdp_base +
+ MDSS_MDP_REG_DISP_INTF_SEL);
+ temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
+ writel_relaxed(temp, ctl->mdata->mdp_base +
+ MDSS_MDP_REG_DISP_INTF_SEL);
+
+ if (ctl->mfd && ctl->panel_data) {
+ ctl->mfd->ipc_resume = true;
+ mdss_mdp_pp_resume(ctl->mfd);
+
+ if (is_dsc_compression(&ctl->panel_data->panel_info)) {
+ /*
+ * Avoid redundant call to dsc_setup when mode switch
+ * is in progress. During the switch, dsc_setup is
+ * handled in mdss_mode_switch() function.
+ */
+ if (ctl->pending_mode_switch != SWITCH_RESOLUTION)
+ mdss_mdp_ctl_dsc_setup(ctl,
+ &ctl->panel_data->panel_info);
+ } else if (ctl->panel_data->panel_info.compression_mode ==
+ COMPRESSION_FBC) {
+ ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left,
+ &ctl->panel_data->panel_info);
+ if (ret)
+ pr_err("Failed to restore FBC mode\n");
+ }
+ }
+}
+
+/*
+ * mdss_mdp_ctl_restore() - restore mdp ctl path
+ * @locked - boolean to signal that clock lock is already acquired
+ *
+ * This function is called whenever MDP comes out of a power collapse as
+ * a result of a screen update. It restores the MDP controller's software
+ * state to the hardware registers.
+ * Function does not enable the clocks, so caller must make sure
+ * clocks are enabled before calling.
+ * The locked boolean in the parametrs signals that synchronization
+ * with mdp clocks access is not required downstream.
+ * Only call this function setting this value to true if the clocks access
+ * synchronization is guaranteed by the caller.
+ */
+void mdss_mdp_ctl_restore(bool locked)
+{
+ struct mdss_mdp_ctl *ctl = NULL;
+ struct mdss_mdp_ctl *sctl;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 cnum;
+
+ for (cnum = MDSS_MDP_CTL0; cnum < mdata->nctl; cnum++) {
+ ctl = mdata->ctl_off + cnum;
+ if (!mdss_mdp_ctl_is_power_on(ctl))
+ continue;
+
+ pr_debug("restoring ctl%d, intf_type=%d\n", cnum,
+ ctl->intf_type);
+ ctl->play_cnt = 0;
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ mdss_mdp_ctl_restore_sub(ctl);
+ if (sctl) {
+ mdss_mdp_ctl_restore_sub(sctl);
+ mdss_mdp_ctl_split_display_enable(1, ctl, sctl);
+ } else if (is_pingpong_split(ctl->mfd)) {
+ mdss_mdp_ctl_pp_split_display_enable(1, ctl);
+ }
+
+ if (ctl->ops.restore_fnc)
+ ctl->ops.restore_fnc(ctl, locked);
+ }
+}
+
+static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl, bool handoff)
+{
+ struct mdss_mdp_mixer *mixer;
+ u32 outsize, temp;
+ int ret = 0;
+ int i, nmixers;
+
+ pr_debug("ctl_num=%d\n", ctl->num);
+
+ /*
+ * Need start_fnc in 2 cases:
+ * (1) handoff
+ * (2) continuous splash finished.
+ */
+ if (handoff || !ctl->panel_data->panel_info.cont_splash_enabled) {
+ if (ctl->ops.start_fnc)
+ ret = ctl->ops.start_fnc(ctl);
+ else
+ pr_warn("no start function for ctl=%d type=%d\n",
+ ctl->num,
+ ctl->panel_data->panel_info.type);
+
+ if (ret) {
+ pr_err("unable to start intf\n");
+ return ret;
+ }
+ }
+
+ if (!ctl->panel_data->panel_info.cont_splash_enabled) {
+ nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER +
+ MDSS_MDP_WB_MAX_LAYERMIXER;
+ for (i = 0; i < nmixers; i++)
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_LAYER(i), 0);
+ }
+
+ temp = readl_relaxed(ctl->mdata->mdp_base +
+ MDSS_MDP_REG_DISP_INTF_SEL);
+ temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
+ if (is_pingpong_split(ctl->mfd))
+ temp |= (ctl->intf_type << (ctl->intf_num * 8));
+
+ writel_relaxed(temp, ctl->mdata->mdp_base +
+ MDSS_MDP_REG_DISP_INTF_SEL);
+
+ mixer = ctl->mixer_left;
+ if (mixer) {
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+
+ mixer->params_changed++;
+
+ outsize = (mixer->height << 16) | mixer->width;
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
+
+ if (is_dsc_compression(pinfo)) {
+ mdss_mdp_ctl_dsc_setup(ctl, pinfo);
+ } else if (pinfo->compression_mode == COMPRESSION_FBC) {
+ ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left,
+ pinfo);
+ }
+ }
+ return ret;
+}
+
+int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl, bool handoff)
+{
+ struct mdss_mdp_ctl *sctl;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int ret = 0;
+
+ pr_debug("ctl_num=%d, power_state=%d\n", ctl->num, ctl->power_state);
+
+ if (mdss_mdp_ctl_is_power_on_interactive(ctl)
+ && !(ctl->pending_mode_switch)) {
+ pr_debug("%d: panel already on!\n", __LINE__);
+ return 0;
+ }
+
+ if (mdss_mdp_ctl_is_power_off(ctl)) {
+ ret = mdss_mdp_ctl_setup(ctl);
+ if (ret)
+ return ret;
+ }
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+
+ mutex_lock(&ctl->lock);
+
+ if (mdss_mdp_ctl_is_power_off(ctl))
+ memset(&ctl->cur_perf, 0, sizeof(ctl->cur_perf));
+
+ /*
+ * keep power_on false during handoff to avoid unexpected
+ * operations to overlay.
+ */
+ if (!handoff || ctl->pending_mode_switch)
+ ctl->power_state = MDSS_PANEL_POWER_ON;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ ret = mdss_mdp_ctl_start_sub(ctl, handoff);
+ if (ret == 0) {
+ if (sctl && ctl->mfd &&
+ ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ /*split display available */
+ ret = mdss_mdp_ctl_start_sub(sctl, handoff);
+ if (!ret)
+ mdss_mdp_ctl_split_display_enable(1, ctl, sctl);
+ } else if (ctl->mixer_right) {
+ struct mdss_mdp_mixer *mixer = ctl->mixer_right;
+ u32 out;
+
+ mixer->params_changed++;
+ out = (mixer->height << 16) | mixer->width;
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, out);
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
+ } else if (is_pingpong_split(ctl->mfd)) {
+ ctl->slave_intf_num = (ctl->intf_num + 1);
+ mdss_mdp_ctl_pp_split_display_enable(true, ctl);
+ }
+ }
+
+ mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_RESUME);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ mutex_unlock(&ctl->lock);
+
+ return ret;
+}
+
+int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl, int power_state)
+{
+ struct mdss_mdp_ctl *sctl;
+ int ret = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ pr_debug("ctl_num=%d, power_state=%d\n", ctl->num, ctl->power_state);
+
+ if (!ctl->mfd->panel_reconfig && !mdss_mdp_ctl_is_power_on(ctl)) {
+ pr_debug("%s %d already off!\n", __func__, __LINE__);
+ return 0;
+ }
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+
+ mutex_lock(&ctl->lock);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND);
+
+ if (ctl->ops.stop_fnc) {
+ ret = ctl->ops.stop_fnc(ctl, power_state);
+ if (ctl->panel_data->panel_info.compression_mode ==
+ COMPRESSION_FBC) {
+ mdss_mdp_ctl_fbc_enable(0, ctl->mixer_left,
+ &ctl->panel_data->panel_info);
+ }
+ } else {
+ pr_warn("no stop func for ctl=%d\n", ctl->num);
+ }
+
+ if (sctl && sctl->ops.stop_fnc) {
+ ret = sctl->ops.stop_fnc(sctl, power_state);
+ if (sctl->panel_data->panel_info.compression_mode ==
+ COMPRESSION_FBC) {
+ mdss_mdp_ctl_fbc_enable(0, sctl->mixer_left,
+ &sctl->panel_data->panel_info);
+ }
+ }
+ if (ret) {
+ pr_warn("error powering off intf ctl=%d\n", ctl->num);
+ goto end;
+ }
+
+ if (mdss_panel_is_power_on(power_state)) {
+ pr_debug("panel is not off, leaving ctl power on\n");
+ goto end;
+ }
+
+ if (sctl)
+ mdss_mdp_ctl_split_display_enable(0, ctl, sctl);
+
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, 0);
+ if (sctl) {
+ mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, 0);
+ mdss_mdp_reset_mixercfg(sctl);
+ }
+
+ mdss_mdp_reset_mixercfg(ctl);
+
+ ctl->play_cnt = 0;
+
+end:
+ if (!ret) {
+ ctl->power_state = power_state;
+ if (!ctl->pending_mode_switch)
+ mdss_mdp_ctl_perf_update(ctl, 0, true);
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ mutex_unlock(&ctl->lock);
+
+ return ret;
+}
+
+/*
+ * mdss_mdp_pipe_reset() - Halts all the pipes during ctl reset.
+ * @mixer: Mixer from which to reset all pipes.
+ * This function called during control path reset and will halt
+ * all the pipes staged on the mixer.
+ */
+static void mdss_mdp_pipe_reset(struct mdss_mdp_mixer *mixer, bool is_recovery)
+{
+ unsigned long pipe_map;
+ u32 bit = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool sw_rst_avail = mdss_mdp_pipe_is_sw_reset_available(mdata);
+
+ if (!mixer)
+ return;
+
+ pipe_map = mixer->pipe_mapped;
+ pr_debug("pipe_map=0x%lx\n", pipe_map);
+ for_each_set_bit_from(bit, &pipe_map, MAX_PIPES_PER_LM) {
+ struct mdss_mdp_pipe *pipe;
+
+ /*
+ * this assumes that within lm there can be either rect0+rect1
+ * or rect0 only. Thus to find the hardware pipe to halt only
+ * check for rect 0 is sufficient.
+ */
+ pipe = mdss_mdp_pipe_search(mdata, 1 << bit,
+ MDSS_MDP_PIPE_RECT0);
+ if (pipe) {
+ mdss_mdp_pipe_fetch_halt(pipe, is_recovery);
+ if (sw_rst_avail)
+ mdss_mdp_pipe_clk_force_off(pipe);
+ }
+ }
+}
+
+static u32 mdss_mdp_poll_ctl_reset_status(struct mdss_mdp_ctl *ctl, u32 cnt)
+{
+ u32 status;
+ /*
+ * it takes around 30us to have mdp finish resetting its ctl path
+ * poll every 50us so that reset should be completed at 1st poll
+ */
+ do {
+ udelay(50);
+ status = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_SW_RESET);
+ status &= 0x01;
+ pr_debug("status=%x, count=%d\n", status, cnt);
+ cnt--;
+ } while (cnt > 0 && status);
+
+ return status;
+}
+
+/*
+ * mdss_mdp_check_ctl_reset_status() - checks ctl reset status
+ * @ctl: mdp controller
+ *
+ * This function checks the ctl reset status before every frame update.
+ * If the reset bit is set, it keeps polling the status till the hw
+ * reset is complete. And does a panic if hw fails to complet the reset
+ * with in the max poll interval.
+ */
+void mdss_mdp_check_ctl_reset_status(struct mdss_mdp_ctl *ctl)
+{
+ u32 status;
+
+ if (!ctl)
+ return;
+
+ status = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_SW_RESET);
+ status &= 0x01;
+ if (!status)
+ return;
+
+ pr_debug("hw ctl reset is set for ctl:%d\n", ctl->num);
+ status = mdss_mdp_poll_ctl_reset_status(ctl, 5);
+ if (status) {
+ pr_err("hw recovery is not complete for ctl:%d\n", ctl->num);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt", "dbg_bus",
+ "vbif_dbg_bus", "panic");
+ }
+}
+
+/*
+ * mdss_mdp_ctl_reset() - reset mdp ctl path.
+ * @ctl: mdp controller.
+ * this function called when underflow happen,
+ * it will reset mdp ctl path and poll for its completion
+ *
+ * Note: called within atomic context.
+ */
+int mdss_mdp_ctl_reset(struct mdss_mdp_ctl *ctl, bool is_recovery)
+{
+ u32 status;
+ struct mdss_mdp_mixer *mixer;
+
+ if (!ctl) {
+ pr_err("ctl not initialized\n");
+ return -EINVAL;
+ }
+
+ mixer = ctl->mixer_left;
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_SW_RESET, 1);
+
+ status = mdss_mdp_poll_ctl_reset_status(ctl, 20);
+ if (status)
+ pr_err("sw ctl:%d reset timedout\n", ctl->num);
+
+ if (mixer) {
+ mdss_mdp_pipe_reset(mixer, is_recovery);
+
+ if (is_dual_lm_single_display(ctl->mfd) &&
+ ctl->mixer_right)
+ mdss_mdp_pipe_reset(ctl->mixer_right, is_recovery);
+ }
+
+ return (status) ? -EAGAIN : 0;
+}
+
+/*
+ * mdss_mdp_mixer_update_pipe_map() - keep track of pipe configuration in mixer
+ * @master_ctl: mdp controller.
+ *
+ * This function keeps track of the current mixer configuration in the hardware.
+ * It's callers responsibility to call with master control.
+ */
+void mdss_mdp_mixer_update_pipe_map(struct mdss_mdp_ctl *master_ctl,
+ int mixer_mux)
+{
+ struct mdss_mdp_mixer *mixer = mdss_mdp_mixer_get(master_ctl,
+ mixer_mux);
+
+ if (!mixer)
+ return;
+
+ pr_debug("mixer%d pipe_mapped=0x%x next_pipes=0x%x\n",
+ mixer->num, mixer->pipe_mapped, mixer->next_pipe_map);
+
+ mixer->pipe_mapped = mixer->next_pipe_map;
+}
+
+static void mdss_mdp_set_mixer_roi(struct mdss_mdp_mixer *mixer,
+ struct mdss_rect *roi)
+{
+ mixer->valid_roi = (roi->w && roi->h);
+ mixer->roi_changed = false;
+
+ if (!mdss_rect_cmp(roi, &mixer->roi)) {
+ mixer->roi = *roi;
+ mixer->params_changed++;
+ mixer->roi_changed = true;
+ }
+
+ pr_debug("mixer%d ROI %s: [%d, %d, %d, %d]\n",
+ mixer->num, mixer->roi_changed ? "changed" : "not changed",
+ mixer->roi.x, mixer->roi.y, mixer->roi.w, mixer->roi.h);
+ MDSS_XLOG(mixer->num, mixer->roi_changed, mixer->valid_roi,
+ mixer->roi.x, mixer->roi.y, mixer->roi.w, mixer->roi.h);
+}
+
+/* only call from master ctl */
+void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
+ struct mdss_rect *l_roi, struct mdss_rect *r_roi)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ enum mdss_mdp_pu_type previous_frame_pu_type, current_frame_pu_type;
+
+ /* Reset ROI when we have (1) invalid ROI (2) feature disabled */
+ if ((!l_roi->w && l_roi->h) || (l_roi->w && !l_roi->h) ||
+ (!r_roi->w && r_roi->h) || (r_roi->w && !r_roi->h) ||
+ (!l_roi->w && !l_roi->h && !r_roi->w && !r_roi->h) ||
+ !ctl->panel_data->panel_info.partial_update_enabled) {
+
+ if (ctl->mixer_left) {
+ *l_roi = (struct mdss_rect) {0, 0,
+ ctl->mixer_left->width,
+ ctl->mixer_left->height};
+ }
+
+ if (ctl->mixer_right) {
+ *r_roi = (struct mdss_rect) {0, 0,
+ ctl->mixer_right->width,
+ ctl->mixer_right->height};
+ }
+ }
+
+ previous_frame_pu_type = mdss_mdp_get_pu_type(ctl);
+ if (ctl->mixer_left) {
+ mdss_mdp_set_mixer_roi(ctl->mixer_left, l_roi);
+ ctl->roi = ctl->mixer_left->roi;
+ }
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+
+ if (sctl && sctl->mixer_left) {
+ mdss_mdp_set_mixer_roi(sctl->mixer_left, r_roi);
+ sctl->roi = sctl->mixer_left->roi;
+ }
+ } else if (is_dual_lm_single_display(ctl->mfd) && ctl->mixer_right) {
+
+ mdss_mdp_set_mixer_roi(ctl->mixer_right, r_roi);
+
+ /* in this case, CTL_ROI is a union of left+right ROIs. */
+ ctl->roi.w += ctl->mixer_right->roi.w;
+
+ /* right_only, update roi.x as per CTL ROI guidelines */
+ if (ctl->mixer_left && !ctl->mixer_left->valid_roi) {
+ ctl->roi = ctl->mixer_right->roi;
+ ctl->roi.x = left_lm_w_from_mfd(ctl->mfd) +
+ ctl->mixer_right->roi.x;
+ }
+ }
+
+ current_frame_pu_type = mdss_mdp_get_pu_type(ctl);
+
+ /*
+ * Force HW programming whenever partial update type changes
+ * between two consecutive frames to avoid incorrect HW programming.
+ */
+ if (is_split_lm(ctl->mfd) && mdata->has_src_split &&
+ (previous_frame_pu_type != current_frame_pu_type)) {
+ if (ctl->mixer_left)
+ ctl->mixer_left->roi_changed = true;
+ if (ctl->mixer_right)
+ ctl->mixer_right->roi_changed = true;
+ }
+}
+
+static void __mdss_mdp_mixer_update_cfg_masks(u32 pnum,
+ enum mdss_mdp_pipe_rect rect_num,
+ u32 stage, struct mdss_mdp_mixer_cfg *cfg)
+{
+ u32 masks[NUM_MIXERCFG_REGS] = { 0 };
+ int i;
+
+ if (pnum >= MDSS_MDP_MAX_SSPP)
+ return;
+
+ if (rect_num == MDSS_MDP_PIPE_RECT0) {
+ masks[0] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].base, stage);
+ masks[1] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].ext, stage);
+ masks[2] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].ext2, stage);
+ } else { /* RECT1 */
+ masks[2] = mdss_mdp_hwio_mask(&mdp_pipe_rec1_hwio[pnum].ext2,
+ stage);
+ }
+
+ for (i = 0; i < NUM_MIXERCFG_REGS; i++)
+ cfg->config_masks[i] |= masks[i];
+
+ pr_debug("pnum=%d stage=%d cfg=0x%08x ext=0x%08x\n",
+ pnum, stage, masks[0], masks[1]);
+}
+
+static void __mdss_mdp_mixer_get_offsets(u32 mixer_num,
+ u32 *offsets, size_t count)
+{
+ WARN_ON(count < NUM_MIXERCFG_REGS);
+
+ offsets[0] = MDSS_MDP_REG_CTL_LAYER(mixer_num);
+ offsets[1] = MDSS_MDP_REG_CTL_LAYER_EXTN(mixer_num);
+ offsets[2] = MDSS_MDP_REG_CTL_LAYER_EXTN2(mixer_num);
+}
+
+static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
+{
+ /*
+ * mapping to hardware expectation of actual mixer programming to
+ * happen on following registers:
+ * INTF: 0, 1, 2, 5
+ * WB: 3, 4
+ * With some exceptions on certain revisions
+ */
+ if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) {
+ u32 wb_offset;
+
+ if (test_bit(MDSS_CAPS_MIXER_1_FOR_WB,
+ mixer->ctl->mdata->mdss_caps_map))
+ wb_offset = MDSS_MDP_INTF_LAYERMIXER1;
+ else
+ wb_offset = MDSS_MDP_INTF_LAYERMIXER3;
+
+ return mixer->num + wb_offset;
+ } else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3) {
+ return 5;
+ } else {
+ return mixer->num;
+ }
+}
+
+static inline void __mdss_mdp_mixer_write_layer(struct mdss_mdp_ctl *ctl,
+ u32 mixer_num, u32 *values, size_t count)
+{
+ u32 off[NUM_MIXERCFG_REGS];
+ int i;
+
+ WARN_ON(!values || count < NUM_MIXERCFG_REGS);
+
+ __mdss_mdp_mixer_get_offsets(mixer_num, off, ARRAY_SIZE(off));
+
+ for (i = 0; i < count; i++)
+ mdss_mdp_ctl_write(ctl, off[i], values[i]);
+}
+
+static void __mdss_mdp_mixer_write_cfg(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_mixer_cfg *cfg)
+{
+ u32 vals[NUM_MIXERCFG_REGS] = {0};
+ int i, mixer_num;
+
+ if (!mixer)
+ return;
+
+ mixer_num = __mdss_mdp_mixer_get_hw_num(mixer);
+
+ if (cfg) {
+ for (i = 0; i < NUM_MIXERCFG_REGS; i++)
+ vals[i] = cfg->config_masks[i];
+
+ if (cfg->border_enabled)
+ vals[0] |= MDSS_MDP_LM_BORDER_COLOR;
+ if (cfg->cursor_enabled)
+ vals[0] |= MDSS_MDP_LM_CURSOR_OUT;
+ }
+
+ __mdss_mdp_mixer_write_layer(mixer->ctl, mixer_num,
+ vals, ARRAY_SIZE(vals));
+
+ pr_debug("mixer=%d cfg=0%08x cfg_extn=0x%08x cfg_extn2=0x%08x\n",
+ mixer->num, vals[0], vals[1], vals[2]);
+ MDSS_XLOG(mixer->num, vals[0], vals[1], vals[2]);
+}
+
+void mdss_mdp_reset_mixercfg(struct mdss_mdp_ctl *ctl)
+{
+ u32 vals[NUM_MIXERCFG_REGS] = {0};
+ int i, nmixers;
+
+ if (!ctl)
+ return;
+
+ nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER + MDSS_MDP_WB_MAX_LAYERMIXER;
+
+ for (i = 0; i < nmixers; i++)
+ __mdss_mdp_mixer_write_layer(ctl, i, vals, ARRAY_SIZE(vals));
+}
+
+bool mdss_mdp_mixer_reg_has_pipe(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_pipe *pipe)
+{
+ u32 offs[NUM_MIXERCFG_REGS];
+ u32 cfgs[NUM_MIXERCFG_REGS];
+ struct mdss_mdp_mixer_cfg mixercfg;
+ int i, mixer_num;
+
+ if (!mixer)
+ return false;
+
+ memset(&mixercfg, 0, sizeof(mixercfg));
+
+ mixer_num = __mdss_mdp_mixer_get_hw_num(mixer);
+ __mdss_mdp_mixer_get_offsets(mixer_num, offs, NUM_MIXERCFG_REGS);
+
+ for (i = 0; i < NUM_MIXERCFG_REGS; i++)
+ cfgs[i] = mdss_mdp_ctl_read(mixer->ctl, offs[i]);
+
+ __mdss_mdp_mixer_update_cfg_masks(pipe->num, pipe->multirect.num, -1,
+ &mixercfg);
+ for (i = 0; i < NUM_MIXERCFG_REGS; i++) {
+ if (cfgs[i] & mixercfg.config_masks[i]) {
+ MDSS_XLOG(mixer->num, cfgs[0], cfgs[1]);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void mdss_mdp_mixer_setup(struct mdss_mdp_ctl *master_ctl,
+ int mixer_mux, bool lm_swap)
+{
+ int i, mixer_num;
+ int stage, screen_state, outsize;
+ u32 off, blend_op, blend_stage;
+ u32 mixer_op_mode = 0, bg_alpha_enable = 0;
+ struct mdss_mdp_mixer_cfg mixercfg;
+ u32 fg_alpha = 0, bg_alpha = 0;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_mdp_ctl *ctl, *ctl_hw;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_mixer *mixer_hw = mdss_mdp_mixer_get(master_ctl,
+ mixer_mux);
+ struct mdss_mdp_mixer *mixer;
+
+ if (!mixer_hw)
+ return;
+
+ ctl = mixer_hw->ctl;
+ if (!ctl)
+ return;
+
+ ctl_hw = ctl;
+ mixer_hw->params_changed = 0;
+
+ /* check if mixer setup for rotator is needed */
+ if (mixer_hw->rotator_mode) {
+ __mdss_mdp_mixer_write_cfg(mixer_hw, NULL);
+ return;
+ }
+
+ memset(&mixercfg, 0, sizeof(mixercfg));
+
+ if (lm_swap) {
+ if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
+ mixer = mdss_mdp_mixer_get(master_ctl,
+ MDSS_MDP_MIXER_MUX_LEFT);
+ else
+ mixer = mdss_mdp_mixer_get(master_ctl,
+ MDSS_MDP_MIXER_MUX_RIGHT);
+ ctl_hw = mixer->ctl;
+ } else {
+ mixer = mixer_hw;
+ }
+
+ /*
+ * if lm_swap was used on MDP_DUAL_LM_DUAL_DISPLAY then we need to
+ * reset mixercfg every frame because there might be a stale value
+ * in mixerfcfg register.
+ */
+ if ((ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) &&
+ is_dsc_compression(&ctl->panel_data->panel_info) &&
+ ctl->panel_data->panel_info.partial_update_enabled &&
+ mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU))
+ mdss_mdp_reset_mixercfg(ctl_hw);
+
+ if (!mixer->valid_roi) {
+ /*
+ * resetting mixer config is specifically needed when split
+ * mode is MDP_DUAL_LM_SINGLE_DISPLAY but update is only on
+ * one side.
+ */
+ __mdss_mdp_mixer_write_cfg(mixer_hw, NULL);
+
+ MDSS_XLOG(mixer->num, mixer_hw->num, XLOG_FUNC_EXIT);
+ return;
+ }
+
+ trace_mdp_mixer_update(mixer_hw->num);
+ pr_debug("setup mixer=%d hw=%d\n", mixer->num, mixer_hw->num);
+ screen_state = ctl->force_screen_state;
+
+ outsize = (mixer->roi.h << 16) | mixer->roi.w;
+ mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
+
+ if (screen_state == MDSS_SCREEN_FORCE_BLANK) {
+ mixercfg.border_enabled = true;
+ goto update_mixer;
+ }
+
+ pipe = mixer->stage_pipe[MDSS_MDP_STAGE_BASE * MAX_PIPES_PER_STAGE];
+ if (pipe == NULL) {
+ mixercfg.border_enabled = true;
+ } else {
+ __mdss_mdp_mixer_update_cfg_masks(pipe->num,
+ pipe->multirect.num, MDSS_MDP_STAGE_BASE,
+ &mixercfg);
+
+ if (pipe->src_fmt->alpha_enable)
+ bg_alpha_enable = 1;
+ }
+
+ i = MDSS_MDP_STAGE_0 * MAX_PIPES_PER_STAGE;
+ for (; i < MAX_PIPES_PER_LM; i++) {
+ pipe = mixer->stage_pipe[i];
+ if (pipe == NULL)
+ continue;
+
+ stage = i / MAX_PIPES_PER_STAGE;
+ if (stage != pipe->mixer_stage) {
+ pr_warn("pipe%d rec%d mixer:%d stage mismatch. pipe->mixer_stage=%d, mixer->stage_pipe=%d multirect_mode=%d. skip staging it\n",
+ pipe->num, pipe->multirect.num, mixer->num,
+ pipe->mixer_stage, stage, pipe->multirect.mode);
+ mixer->stage_pipe[i] = NULL;
+ continue;
+ }
+
+ /*
+ * pipe which is staged on both LMs will be tracked through
+ * left mixer only.
+ */
+ if (!pipe->src_split_req || !mixer->is_right_mixer)
+ mixer->next_pipe_map |= pipe->ndx;
+
+ blend_stage = stage - MDSS_MDP_STAGE_0;
+ off = MDSS_MDP_REG_LM_BLEND_OFFSET(blend_stage);
+
+ /*
+ * Account for additional blending stages
+ * from MDP v1.5 onwards
+ */
+ if (blend_stage > 3)
+ off += MDSS_MDP_REG_LM_BLEND_STAGE4;
+ blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
+ MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
+ fg_alpha = pipe->alpha;
+ bg_alpha = 0xFF - pipe->alpha;
+ /* keep fg alpha */
+ mixer_op_mode |= 1 << (blend_stage + 1);
+
+ switch (pipe->blend_op) {
+ case BLEND_OP_OPAQUE:
+
+ blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
+ MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
+
+ pr_debug("pnum=%d stg=%d op=OPAQUE\n", pipe->num,
+ stage);
+ break;
+
+ case BLEND_OP_PREMULTIPLIED:
+ if (pipe->src_fmt->alpha_enable) {
+ blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
+ MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDSS_MDP_BLEND_BG_MOD_ALPHA |
+ MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
+ }
+ }
+ pr_debug("pnum=%d stg=%d op=PREMULTIPLIED\n", pipe->num,
+ stage);
+ break;
+
+ case BLEND_OP_COVERAGE:
+ if (pipe->src_fmt->alpha_enable) {
+ blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_PIXEL |
+ MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDSS_MDP_BLEND_FG_MOD_ALPHA |
+ MDSS_MDP_BLEND_FG_INV_MOD_ALPHA |
+ MDSS_MDP_BLEND_BG_MOD_ALPHA |
+ MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
+ }
+ }
+ pr_debug("pnum=%d stg=%d op=COVERAGE\n", pipe->num,
+ stage);
+ break;
+
+ default:
+ blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
+ MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
+ pr_debug("pnum=%d stg=%d op=NONE\n", pipe->num,
+ stage);
+ break;
+ }
+
+ if (!pipe->src_fmt->alpha_enable && bg_alpha_enable)
+ mixer_op_mode = 0;
+
+ __mdss_mdp_mixer_update_cfg_masks(pipe->num,
+ pipe->multirect.num, stage, &mixercfg);
+
+ trace_mdp_sspp_change(pipe);
+
+ pr_debug("stg=%d op=%x fg_alpha=%x bg_alpha=%x\n", stage,
+ blend_op, fg_alpha, bg_alpha);
+ mdp_mixer_write(mixer_hw,
+ off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
+ mdp_mixer_write(mixer_hw,
+ off + MDSS_MDP_REG_LM_BLEND_FG_ALPHA, fg_alpha);
+ mdp_mixer_write(mixer_hw,
+ off + MDSS_MDP_REG_LM_BLEND_BG_ALPHA, bg_alpha);
+ }
+
+ if (mixer->cursor_enabled)
+ mixercfg.cursor_enabled = true;
+
+update_mixer:
+ mixer_num = __mdss_mdp_mixer_get_hw_num(mixer_hw);
+ ctl_hw->flush_bits |= BIT(mixer_num < 5 ? 6 + mixer_num : 20);
+
+ /* Read GC enable/disable status on LM */
+ mixer_op_mode |=
+ (mdp_mixer_read(mixer_hw, MDSS_MDP_REG_LM_OP_MODE) & BIT(0));
+
+ if (mixer->src_split_req && mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
+ mixer_op_mode |= BIT(31);
+
+ mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_OP_MODE, mixer_op_mode);
+
+ mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_BORDER_COLOR_0,
+ (mdata->bcolor0 & 0xFFF) | ((mdata->bcolor1 & 0xFFF) << 16));
+ mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_BORDER_COLOR_1,
+ mdata->bcolor2 & 0xFFF);
+
+ __mdss_mdp_mixer_write_cfg(mixer_hw, &mixercfg);
+
+ pr_debug("mixer=%d hw=%d op_mode=0x%08x w=%d h=%d bc0=0x%x bc1=0x%x\n",
+ mixer->num, mixer_hw->num,
+ mixer_op_mode, mixer->roi.w, mixer->roi.h,
+ (mdata->bcolor0 & 0xFFF) | ((mdata->bcolor1 & 0xFFF) << 16),
+ mdata->bcolor2 & 0xFFF);
+ MDSS_XLOG(mixer->num, mixer_hw->num,
+ mixer_op_mode, mixer->roi.h, mixer->roi.w);
+}
+
+int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata,
+ u32 *mixer_offsets, u32 *dspp_offsets, u32 *pingpong_offsets,
+ u32 type, u32 len)
+{
+ struct mdss_mdp_mixer *head;
+ u32 i;
+ int rc = 0;
+ u32 size = len;
+
+ if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
+ (mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
+ size++;
+
+ head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_mixer) *
+ size, GFP_KERNEL);
+
+ if (!head) {
+ pr_err("unable to setup mixer type=%d :kzalloc fail\n",
+ type);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < len; i++) {
+ head[i].type = type;
+ head[i].base = mdata->mdss_io.base + mixer_offsets[i];
+ head[i].ref_cnt = 0;
+ head[i].num = i;
+ if (type == MDSS_MDP_MIXER_TYPE_INTF && dspp_offsets
+ && pingpong_offsets) {
+ if (mdata->ndspp > i)
+ head[i].dspp_base = mdata->mdss_io.base +
+ dspp_offsets[i];
+ head[i].pingpong_base = mdata->mdss_io.base +
+ pingpong_offsets[i];
+ }
+ }
+
+ /*
+ * Duplicate the last writeback mixer for concurrent line and block mode
+ * operations
+ */
+ if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
+ (mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
+ head[len] = head[len - 1];
+
+ switch (type) {
+
+ case MDSS_MDP_MIXER_TYPE_INTF:
+ mdata->mixer_intf = head;
+ break;
+
+ case MDSS_MDP_MIXER_TYPE_WRITEBACK:
+ mdata->mixer_wb = head;
+ break;
+
+ default:
+ pr_err("Invalid mixer type=%d\n", type);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata,
+ u32 *ctl_offsets, u32 len)
+{
+ struct mdss_mdp_ctl *head;
+ struct mutex *shared_lock = NULL;
+ u32 i;
+ u32 size = len;
+
+ if (mdata->wfd_mode == MDSS_MDP_WFD_SHARED) {
+ size++;
+ shared_lock = devm_kzalloc(&mdata->pdev->dev,
+ sizeof(struct mutex),
+ GFP_KERNEL);
+ if (!shared_lock) {
+ pr_err("unable to allocate mem for mutex\n");
+ return -ENOMEM;
+ }
+ mutex_init(shared_lock);
+ }
+
+ head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_ctl) *
+ size, GFP_KERNEL);
+
+ if (!head) {
+ pr_err("unable to setup ctl and wb: kzalloc fail\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < len; i++) {
+ head[i].num = i;
+ head[i].base = (mdata->mdss_io.base) + ctl_offsets[i];
+ head[i].ref_cnt = 0;
+ }
+
+ if (mdata->wfd_mode == MDSS_MDP_WFD_SHARED) {
+ head[len - 1].shared_lock = shared_lock;
+ /*
+ * Allocate a virtual ctl to be able to perform simultaneous
+ * line mode and block mode operations on the same
+ * writeback block
+ */
+ head[len] = head[len - 1];
+ head[len].num = head[len - 1].num;
+ }
+ mdata->ctl_off = head;
+
+ return 0;
+}
+
+int mdss_mdp_wb_addr_setup(struct mdss_data_type *mdata,
+ u32 num_block_wb, u32 num_intf_wb)
+{
+ struct mdss_mdp_writeback *wb;
+ u32 total, i;
+
+ total = num_block_wb + num_intf_wb;
+ wb = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_writeback) *
+ total, GFP_KERNEL);
+ if (!wb)
+ return -ENOMEM;
+
+ for (i = 0; i < total; i++) {
+ wb[i].num = i;
+ if (i < num_block_wb) {
+ wb[i].caps = MDSS_MDP_WB_ROTATOR | MDSS_MDP_WB_WFD;
+ if (mdss_mdp_is_ubwc_supported(mdata))
+ wb[i].caps |= MDSS_MDP_WB_UBWC;
+ } else {
+ wb[i].caps = MDSS_MDP_WB_WFD | MDSS_MDP_WB_INTF;
+ }
+ }
+
+ mdata->wb = wb;
+ mdata->nwb = total;
+ mutex_init(&mdata->wb_lock);
+
+ return 0;
+}
+
+struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux)
+{
+ struct mdss_mdp_mixer *mixer = NULL;
+
+ if (!ctl) {
+ pr_err("ctl not initialized\n");
+ return NULL;
+ }
+
+ switch (mux) {
+ case MDSS_MDP_MIXER_MUX_DEFAULT:
+ case MDSS_MDP_MIXER_MUX_LEFT:
+ mixer = ctl->mixer_left;
+ break;
+ case MDSS_MDP_MIXER_MUX_RIGHT:
+ mixer = ctl->mixer_right;
+ break;
+ }
+
+ return mixer;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_get_staged_pipe(struct mdss_mdp_ctl *ctl,
+ int mux, int stage, bool is_right_blend)
+{
+ struct mdss_mdp_pipe *pipe = NULL;
+ struct mdss_mdp_mixer *mixer;
+ int index = (stage * MAX_PIPES_PER_STAGE) + (int)is_right_blend;
+
+ if (!ctl)
+ return NULL;
+
+ WARN_ON(index > MAX_PIPES_PER_LM);
+
+ mixer = mdss_mdp_mixer_get(ctl, mux);
+ if (mixer && (index < MAX_PIPES_PER_LM))
+ pipe = mixer->stage_pipe[index];
+
+ pr_debug("%pS index=%d pipe%d\n", __builtin_return_address(0),
+ index, pipe ? pipe->num : -1);
+ return pipe;
+}
+
+int mdss_mdp_get_pipe_flush_bits(struct mdss_mdp_pipe *pipe)
+{
+ if (WARN_ON(!pipe || pipe->num >= MDSS_MDP_MAX_SSPP))
+ return 0;
+
+ return BIT(mdp_pipe_hwio[pipe->num].flush_bit);
+}
+
+int mdss_mdp_async_ctl_flush(struct msm_fb_data_type *mfd,
+ u32 flush_bits)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+ int ret = 0;
+
+ mutex_lock(&ctl->flush_lock);
+
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+ if ((!ctl->split_flush_en) && sctl)
+ mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+
+ mutex_unlock(&ctl->flush_lock);
+ return ret;
+}
+
+int mdss_mdp_mixer_pipe_update(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer, int params_changed)
+{
+ struct mdss_mdp_ctl *ctl;
+ int i, j, k;
+
+ if (!pipe)
+ return -EINVAL;
+ if (!mixer)
+ return -EINVAL;
+ ctl = mixer->ctl;
+ if (!ctl)
+ return -EINVAL;
+
+ if (pipe->mixer_stage >= MDSS_MDP_MAX_STAGE) {
+ pr_err("invalid mixer stage\n");
+ return -EINVAL;
+ }
+
+ pr_debug("pnum=%x mixer=%d stage=%d\n", pipe->num, mixer->num,
+ pipe->mixer_stage);
+
+ mutex_lock(&ctl->flush_lock);
+
+ if (params_changed) {
+ mixer->params_changed++;
+ for (i = MDSS_MDP_STAGE_UNUSED; i < MDSS_MDP_MAX_STAGE; i++) {
+ j = i * MAX_PIPES_PER_STAGE;
+
+ /*
+ * this could lead to cases where left blend index is
+ * not populated. For instance, where pipe is spanning
+ * across layer mixers. But this is handled properly
+ * within mixer programming code.
+ */
+ if (pipe->is_right_blend)
+ j++;
+
+ /* First clear all blend containers for current stage */
+ for (k = 0; k < MAX_PIPES_PER_STAGE; k++) {
+ u32 ndx = (i * MAX_PIPES_PER_STAGE) + k;
+
+ if (mixer->stage_pipe[ndx] == pipe)
+ mixer->stage_pipe[ndx] = NULL;
+ }
+
+ /* then stage actual pipe on specific blend container */
+ if (i == pipe->mixer_stage)
+ mixer->stage_pipe[j] = pipe;
+ }
+ }
+
+ ctl->flush_bits |= mdss_mdp_get_pipe_flush_bits(pipe);
+
+ mutex_unlock(&ctl->flush_lock);
+
+ return 0;
+}
+
+/**
+ * mdss_mdp_mixer_unstage_all() - Unstage all pipes from mixer
+ * @mixer: Mixer from which to unstage all pipes
+ *
+ * Unstage any pipes that are currently attached to mixer.
+ *
+ * NOTE: this will not update the pipe structure, and thus a full
+ * deinitialization or reconfiguration of all pipes is expected after this call.
+ */
+void mdss_mdp_mixer_unstage_all(struct mdss_mdp_mixer *mixer)
+{
+ struct mdss_mdp_pipe *tmp;
+ int i;
+
+ if (!mixer)
+ return;
+
+ for (i = 0; i < MAX_PIPES_PER_LM; i++) {
+ tmp = mixer->stage_pipe[i];
+ if (tmp) {
+ mixer->stage_pipe[i] = NULL;
+ mixer->params_changed++;
+ tmp->params_changed++;
+ }
+ }
+}
+
+int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer)
+{
+ int i, right_blend;
+
+ if (!pipe)
+ return -EINVAL;
+ if (!mixer)
+ return -EINVAL;
+
+ right_blend = pipe->is_right_blend ? 1 : 0;
+ i = (pipe->mixer_stage * MAX_PIPES_PER_STAGE) + right_blend;
+ if ((i < MAX_PIPES_PER_LM) && (pipe == mixer->stage_pipe[i])) {
+ pr_debug("unstage p%d from %s side of stage=%d lm=%d ndx=%d\n",
+ pipe->num, right_blend ? "right" : "left",
+ pipe->mixer_stage, mixer->num, i);
+ } else {
+ int stage;
+
+ for (i = 0; i < MAX_PIPES_PER_LM; i++) {
+ if (pipe != mixer->stage_pipe[i])
+ continue;
+
+ stage = i / MAX_PIPES_PER_STAGE;
+ right_blend = i & 1;
+
+ pr_warn("lm=%d pipe #%d stage=%d with %s blend, unstaged from %s side of stage=%d!\n",
+ mixer->num, pipe->num, pipe->mixer_stage,
+ pipe->is_right_blend ? "right" : "left",
+ right_blend ? "right" : "left", stage);
+ break;
+ }
+
+ /* pipe not found, not a failure */
+ if (i == MAX_PIPES_PER_LM)
+ return 0;
+ }
+
+ mixer->params_changed++;
+ mixer->stage_pipe[i] = NULL;
+
+ return 0;
+}
+
+int mdss_mdp_ctl_update_fps(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_panel_info *pinfo;
+ struct mdss_overlay_private *mdp5_data;
+ int ret = 0;
+ int new_fps;
+
+ if (!ctl->panel_data || !ctl->mfd)
+ return -ENODEV;
+
+ pinfo = &ctl->panel_data->panel_info;
+
+ if (!pinfo->dynamic_fps || !ctl->ops.config_fps_fnc)
+ return 0;
+
+ if (!pinfo->default_fps) {
+ /* we haven't got any call to update the fps */
+ return 0;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ if (!mdp5_data)
+ return -ENODEV;
+
+ /*
+ * Panel info is already updated with the new fps info,
+ * so we need to lock the data to make sure the panel info
+ * is not updated while we reconfigure the HW.
+ */
+ mutex_lock(&mdp5_data->dfps_lock);
+
+ if ((pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) ||
+ (pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP) ||
+ (pinfo->dfps_update ==
+ DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP) ||
+ (pinfo->dfps_update ==
+ DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) ||
+ pinfo->dfps_update == DFPS_IMMEDIATE_CLK_UPDATE_MODE) {
+ new_fps = mdss_panel_get_framerate(pinfo,
+ FPS_RESOLUTION_DEFAULT);
+ } else {
+ new_fps = pinfo->new_fps;
+ }
+
+ pr_debug("fps new:%d old:%d\n", new_fps,
+ pinfo->current_fps);
+
+ if (new_fps == pinfo->current_fps) {
+ pr_debug("FPS is already %d\n", new_fps);
+ ret = 0;
+ goto exit;
+ }
+
+ ret = ctl->ops.config_fps_fnc(ctl, new_fps);
+ if (!ret)
+ pr_debug("fps set to %d\n", new_fps);
+ else
+ pr_err("Failed to configure %d fps rc=%d\n",
+ new_fps, ret);
+
+exit:
+ mutex_unlock(&mdp5_data->dfps_lock);
+ return ret;
+}
+
+int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl)
+{
+ int ret;
+ u32 reg_data, flush_data;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!ctl) {
+ pr_err("invalid ctl\n");
+ return -ENODEV;
+ }
+
+ ret = mutex_lock_interruptible(&ctl->lock);
+ if (ret)
+ return ret;
+
+ if (!mdss_mdp_ctl_is_power_on(ctl)) {
+ mutex_unlock(&ctl->lock);
+ return 0;
+ }
+
+ ATRACE_BEGIN("wait_fnc");
+ if (ctl->ops.wait_fnc)
+ ret = ctl->ops.wait_fnc(ctl, NULL);
+ ATRACE_END("wait_fnc");
+
+ trace_mdp_commit(ctl);
+
+ mdss_mdp_ctl_perf_update(ctl, 0, false);
+ mdata->bw_limit_pending = false;
+
+ if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_103)) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ reg_data = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH);
+ flush_data = readl_relaxed(mdata->mdp_base + AHB_CLK_OFFSET);
+ if ((flush_data & BIT(28)) &&
+ !(ctl->flush_reg_data & reg_data)) {
+
+ flush_data &= ~(BIT(28));
+ writel_relaxed(flush_data,
+ mdata->mdp_base + AHB_CLK_OFFSET);
+ ctl->flush_reg_data = 0;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ }
+
+ mutex_unlock(&ctl->lock);
+ return ret;
+}
+
+int mdss_mdp_display_wait4pingpong(struct mdss_mdp_ctl *ctl, bool use_lock)
+{
+ struct mdss_mdp_ctl *sctl = NULL;
+ int ret;
+ bool recovery_needed = false;
+
+ if (use_lock) {
+ ret = mutex_lock_interruptible(&ctl->lock);
+ if (ret)
+ return ret;
+ }
+
+ if (!mdss_mdp_ctl_is_power_on(ctl) || !ctl->ops.wait_pingpong) {
+ if (use_lock)
+ mutex_unlock(&ctl->lock);
+ return 0;
+ }
+
+ ATRACE_BEGIN("wait_pingpong");
+ ret = ctl->ops.wait_pingpong(ctl, NULL);
+ ATRACE_END("wait_pingpong");
+ if (ret)
+ recovery_needed = true;
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+
+ if (sctl && sctl->ops.wait_pingpong) {
+ ATRACE_BEGIN("wait_pingpong sctl");
+ ret = sctl->ops.wait_pingpong(sctl, NULL);
+ ATRACE_END("wait_pingpong sctl");
+ if (ret)
+ recovery_needed = true;
+ }
+
+ ctl->mdata->bw_limit_pending = false;
+ if (recovery_needed) {
+ mdss_mdp_ctl_reset(ctl, true);
+ if (sctl)
+ mdss_mdp_ctl_reset(sctl, true);
+
+ mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_RESET_WRITE_PTR,
+ NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+
+ pr_debug("pingpong timeout recovery finished\n");
+ }
+
+ if (use_lock)
+ mutex_unlock(&ctl->lock);
+
+ return ret;
+}
+
+static void mdss_mdp_force_border_color(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool lm_swap = mdss_mdp_is_lm_swap_needed(mdata, ctl);
+
+ ctl->force_screen_state = MDSS_SCREEN_FORCE_BLANK;
+
+ if (sctl)
+ sctl->force_screen_state = MDSS_SCREEN_FORCE_BLANK;
+
+ mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_LEFT, lm_swap);
+ mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_RIGHT, lm_swap);
+
+ ctl->force_screen_state = MDSS_SCREEN_DEFAULT;
+ if (sctl)
+ sctl->force_screen_state = MDSS_SCREEN_DEFAULT;
+
+ /*
+ * Update the params changed for mixer for the next frame to
+ * configure the mixer setup properly.
+ */
+ if (ctl->mixer_left)
+ ctl->mixer_left->params_changed++;
+ if (ctl->mixer_right)
+ ctl->mixer_right->params_changed++;
+}
+
+int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
+ struct mdss_mdp_commit_cb *commit_cb)
+{
+ struct mdss_mdp_ctl *sctl = NULL;
+ int ret = 0;
+ bool is_bw_released, split_lm_valid;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 ctl_flush_bits = 0, sctl_flush_bits = 0;
+
+ if (!ctl) {
+ pr_err("display function not set\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&ctl->lock);
+ pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
+
+ if (!mdss_mdp_ctl_is_power_on(ctl)) {
+ mutex_unlock(&ctl->lock);
+ return 0;
+ }
+
+ split_lm_valid = mdss_mdp_is_both_lm_valid(ctl);
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ mutex_lock(&ctl->flush_lock);
+
+ /*
+ * We could have released the bandwidth if there were no transactions
+ * pending, so we want to re-calculate the bandwidth in this situation
+ */
+ is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl);
+ if (is_bw_released) {
+ if (sctl)
+ is_bw_released =
+ !mdss_mdp_ctl_perf_get_transaction_status(sctl);
+ }
+
+ /*
+ * left update on any topology or
+ * any update on MDP_DUAL_LM_SINGLE_DISPLAY topology.
+ */
+ if (ctl->mixer_left->valid_roi ||
+ (is_dual_lm_single_display(ctl->mfd) &&
+ ctl->mixer_right->valid_roi))
+ mdss_mdp_ctl_perf_set_transaction_status(ctl,
+ PERF_SW_COMMIT_STATE, PERF_STATUS_BUSY);
+
+ /* right update on MDP_DUAL_LM_DUAL_DISPLAY */
+ if (sctl && sctl->mixer_left->valid_roi)
+ mdss_mdp_ctl_perf_set_transaction_status(sctl,
+ PERF_SW_COMMIT_STATE, PERF_STATUS_BUSY);
+
+ if (ctl->mixer_right)
+ ctl->mixer_right->src_split_req =
+ mdata->has_src_split && split_lm_valid;
+
+ if (is_bw_released || ctl->force_screen_state ||
+ (ctl->mixer_left->params_changed) ||
+ (ctl->mixer_right && ctl->mixer_right->params_changed)) {
+ bool lm_swap = mdss_mdp_is_lm_swap_needed(mdata, ctl);
+
+ ATRACE_BEGIN("prepare_fnc");
+ if (ctl->ops.prepare_fnc)
+ ret = ctl->ops.prepare_fnc(ctl, arg);
+ ATRACE_END("prepare_fnc");
+ if (ret) {
+ pr_err("error preparing display\n");
+ mutex_unlock(&ctl->flush_lock);
+ goto done;
+ }
+
+ ATRACE_BEGIN("mixer_programming");
+ mdss_mdp_ctl_perf_update(ctl, 1, false);
+
+ mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_LEFT, lm_swap);
+ mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_RIGHT, lm_swap);
+
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, ctl->opmode);
+ ctl->flush_bits |= BIT(17); /* CTL */
+
+ if (sctl) {
+ mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP,
+ sctl->opmode);
+ sctl->flush_bits |= BIT(17);
+ sctl_flush_bits = sctl->flush_bits;
+ }
+ ATRACE_END("mixer_programming");
+ }
+
+ /*
+ * With partial frame update, enable split display bit only
+ * when validity of ROI's on both the DSI's are identical.
+ */
+ if (sctl)
+ mdss_mdp_ctl_split_display_enable(split_lm_valid, ctl, sctl);
+
+ ATRACE_BEGIN("postproc_programming");
+ if (ctl->is_video_mode && ctl->mfd && ctl->mfd->dcm_state != DTM_ENTER)
+ /* postprocessing setup, including dspp */
+ mdss_mdp_pp_setup_locked(ctl);
+
+ if (sctl) {
+ if (ctl->split_flush_en) {
+ ctl->flush_bits |= sctl->flush_bits;
+ sctl->flush_bits = 0;
+ sctl_flush_bits = 0;
+ } else {
+ sctl_flush_bits = sctl->flush_bits;
+ }
+ }
+ ctl_flush_bits = ctl->flush_bits;
+
+ ATRACE_END("postproc_programming");
+
+ mutex_unlock(&ctl->flush_lock);
+
+ ATRACE_BEGIN("frame_ready");
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CFG_DONE);
+ if (commit_cb)
+ commit_cb->commit_cb_fnc(
+ MDP_COMMIT_STAGE_SETUP_DONE,
+ commit_cb->data);
+ ret = mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
+
+ /*
+ * When wait for fence timed out, driver ignores the fences
+ * for signalling. Hardware needs to access only on the buffers
+ * that are valid and driver needs to ensure it. This function
+ * would set the mixer state to border when there is timeout.
+ */
+ if (ret == NOTIFY_BAD) {
+ mdss_mdp_force_border_color(ctl);
+ ctl_flush_bits |= (ctl->flush_bits | BIT(17));
+ if (sctl && (!ctl->split_flush_en))
+ sctl_flush_bits |= (sctl->flush_bits | BIT(17));
+ ret = 0;
+ }
+
+ ATRACE_END("frame_ready");
+
+ if (ctl->ops.wait_pingpong && !mdata->serialize_wait4pp)
+ mdss_mdp_display_wait4pingpong(ctl, false);
+
+ /* Moved pp programming to post ping pong */
+ if (!ctl->is_video_mode && ctl->mfd &&
+ ctl->mfd->dcm_state != DTM_ENTER) {
+ /* postprocessing setup, including dspp */
+ mutex_lock(&ctl->flush_lock);
+ mdss_mdp_pp_setup_locked(ctl);
+ if (sctl) {
+ if (ctl->split_flush_en) {
+ ctl->flush_bits |= sctl->flush_bits;
+ sctl->flush_bits = 0;
+ sctl_flush_bits = 0;
+ } else {
+ sctl_flush_bits = sctl->flush_bits;
+ }
+ }
+ ctl_flush_bits = ctl->flush_bits;
+ mutex_unlock(&ctl->flush_lock);
+ }
+ /*
+ * if serialize_wait4pp is false then roi_bkup used in wait4pingpong
+ * will be of previous frame as expected.
+ */
+ ctl->roi_bkup.w = ctl->roi.w;
+ ctl->roi_bkup.h = ctl->roi.h;
+
+ /*
+ * update roi of panel_info which will be
+ * used by dsi to set col_page addr of panel.
+ */
+ if (ctl->panel_data &&
+ ctl->panel_data->panel_info.partial_update_enabled) {
+
+ if (is_pingpong_split(ctl->mfd)) {
+ bool pp_split = false;
+ struct mdss_rect l_roi, r_roi, temp = {0};
+ u32 opmode = mdss_mdp_ctl_read(ctl,
+ MDSS_MDP_REG_CTL_TOP) & ~0xF0; /* clear OUT_SEL */
+ /*
+ * with pp split enabled, it is a requirement that both
+ * panels share equal load, so split-point is center.
+ */
+ u32 left_panel_w = left_lm_w_from_mfd(ctl->mfd) / 2;
+
+ mdss_rect_split(&ctl->roi, &l_roi, &r_roi,
+ left_panel_w);
+
+ /*
+ * If update is only on left panel then we still send
+ * zeroed out right panel ROIs to DSI driver. Based on
+ * zeroed ROI, DSI driver identifies which panel is not
+ * transmitting.
+ */
+ ctl->panel_data->panel_info.roi = l_roi;
+ ctl->panel_data->next->panel_info.roi = r_roi;
+
+ /* based on the roi, update ctl topology */
+ if (!mdss_rect_cmp(&temp, &l_roi) &&
+ !mdss_rect_cmp(&temp, &r_roi)) {
+ /* left + right */
+ opmode |= (ctl->intf_num << 4);
+ pp_split = true;
+ } else if (mdss_rect_cmp(&temp, &l_roi)) {
+ /* right only */
+ opmode |= (ctl->slave_intf_num << 4);
+ pp_split = false;
+ } else {
+ /* left only */
+ opmode |= (ctl->intf_num << 4);
+ pp_split = false;
+ }
+
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, opmode);
+
+ mdss_mdp_ctl_pp_split_display_enable(pp_split, ctl);
+ } else {
+ /*
+ * if single lm update on 3D mux topology, clear it.
+ */
+ if ((is_dual_lm_single_display(ctl->mfd)) &&
+ (ctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE) &&
+ (!mdss_mdp_is_both_lm_valid(ctl))) {
+
+ u32 opmode = mdss_mdp_ctl_read(ctl,
+ MDSS_MDP_REG_CTL_TOP);
+ opmode &= ~(0xF << 19); /* clear 3D Mux */
+
+ mdss_mdp_ctl_write(ctl,
+ MDSS_MDP_REG_CTL_TOP, opmode);
+ }
+
+ ctl->panel_data->panel_info.roi = ctl->roi;
+ if (sctl && sctl->panel_data)
+ sctl->panel_data->panel_info.roi = sctl->roi;
+ }
+ }
+
+ if (commit_cb)
+ commit_cb->commit_cb_fnc(MDP_COMMIT_STAGE_READY_FOR_KICKOFF,
+ commit_cb->data);
+
+ if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC) &&
+ !bitmap_empty(mdata->bwc_enable_map, MAX_DRV_SUP_PIPES))
+ mdss_mdp_bwcpanic_ctrl(mdata, true);
+
+ ATRACE_BEGIN("flush_kickoff");
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush_bits);
+ if (sctl && sctl_flush_bits) {
+ mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH,
+ sctl_flush_bits);
+ sctl->flush_bits = 0;
+ }
+
+ MDSS_XLOG(ctl->intf_num, ctl_flush_bits, sctl_flush_bits,
+ split_lm_valid);
+ wmb(); /* ensure write is finished before progressing */
+ ctl->flush_reg_data = ctl_flush_bits;
+ ctl->flush_bits = 0;
+
+ mdss_mdp_mixer_update_pipe_map(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ mdss_mdp_mixer_update_pipe_map(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+
+ /* right-only kickoff */
+ if (!ctl->mixer_left->valid_roi &&
+ sctl && sctl->mixer_left->valid_roi) {
+ /*
+ * Separate kickoff on DSI1 is needed only when we have
+ * ONLY right half updating on a dual DSI panel
+ */
+ if (sctl->ops.display_fnc)
+ ret = sctl->ops.display_fnc(sctl, arg);
+ } else {
+ if (ctl->ops.display_fnc)
+ ret = ctl->ops.display_fnc(ctl, arg); /* DSI0 kickoff */
+ }
+
+ if (ret)
+ pr_warn("ctl %d error displaying frame\n", ctl->num);
+
+ ctl->play_cnt++;
+ ATRACE_END("flush_kickoff");
+
+done:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ mutex_unlock(&ctl->lock);
+
+ return ret;
+}
+
+void mdss_mdp_ctl_notifier_register(struct mdss_mdp_ctl *ctl,
+ struct notifier_block *notifier)
+{
+ struct mdss_mdp_ctl *sctl;
+
+ blocking_notifier_chain_register(&ctl->notifier_head, notifier);
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ blocking_notifier_chain_register(&sctl->notifier_head,
+ notifier);
+}
+
+void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl,
+ struct notifier_block *notifier)
+{
+ struct mdss_mdp_ctl *sctl;
+
+ blocking_notifier_chain_unregister(&ctl->notifier_head, notifier);
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ blocking_notifier_chain_unregister(&sctl->notifier_head,
+ notifier);
+}
+
+int mdss_mdp_ctl_notify(struct mdss_mdp_ctl *ctl, int event)
+{
+ return blocking_notifier_call_chain(&ctl->notifier_head, event, ctl);
+}
+
+int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id)
+{
+ int i;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_data_type *mdata;
+ u32 mixer_cnt = 0;
+
+ mutex_lock(&mdss_mdp_ctl_lock);
+ mdata = mdss_mdp_get_mdata();
+ for (i = 0; i < mdata->nctl; i++) {
+ ctl = mdata->ctl_off + i;
+ if ((mdss_mdp_ctl_is_power_on(ctl)) && (ctl->mfd) &&
+ (ctl->mfd->index == fb_num)) {
+ if (ctl->mixer_left) {
+ mixer_id[mixer_cnt] = ctl->mixer_left->num;
+ mixer_cnt++;
+ }
+ if (mixer_cnt && ctl->mixer_right) {
+ mixer_id[mixer_cnt] = ctl->mixer_right->num;
+ mixer_cnt++;
+ }
+ if (mixer_cnt)
+ break;
+ }
+ }
+ mutex_unlock(&mdss_mdp_ctl_lock);
+ return mixer_cnt;
+}
+
+/**
+ * @mdss_mdp_ctl_mixer_switch() - return ctl mixer of @return_type
+ * @ctl: Pointer to ctl structure to be switched.
+ * @return_type: wb_type of the ctl to be switched to.
+ *
+ * Virtual mixer switch should be performed only when there is no
+ * dedicated wfd block and writeback block is shared.
+ */
+struct mdss_mdp_ctl *mdss_mdp_ctl_mixer_switch(struct mdss_mdp_ctl *ctl,
+ u32 return_type)
+{
+ int i;
+ struct mdss_data_type *mdata = ctl->mdata;
+
+ if (ctl->wb_type == return_type) {
+ mdata->mixer_switched = false;
+ return ctl;
+ }
+ for (i = 0; i <= mdata->nctl; i++) {
+ if (mdata->ctl_off[i].wb_type == return_type) {
+ pr_debug("switching mixer from ctl=%d to ctl=%d\n",
+ ctl->num, mdata->ctl_off[i].num);
+ mdata->mixer_switched = true;
+ return mdata->ctl_off + i;
+ }
+ }
+ pr_err("unable to switch mixer to type=%d\n", return_type);
+ return NULL;
+}
+
+static int __mdss_mdp_mixer_handoff_helper(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_pipe *pipe)
+{
+ int rc = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 right_blend = 0;
+
+ if (!mixer) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * It is possible to have more the one pipe staged on a single
+ * layer mixer at same staging level.
+ */
+ if (mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED] != NULL) {
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_103) {
+ pr_err("More than one pipe staged on mixer num %d\n",
+ mixer->num);
+ rc = -EINVAL;
+ goto error;
+ } else if (mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED + 1] !=
+ NULL) {
+ pr_err("More than two pipe staged on mixer num %d\n",
+ mixer->num);
+ rc = -EINVAL;
+ goto error;
+ } else {
+ right_blend = 1;
+ }
+ }
+
+ pr_debug("Staging pipe num %d on mixer num %d\n",
+ pipe->num, mixer->num);
+ mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED + right_blend] = pipe;
+ pipe->mixer_left = mixer;
+ pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+
+error:
+ return rc;
+}
+
+/**
+ * mdss_mdp_mixer_handoff() - Stages a given pipe on the appropriate mixer
+ * @ctl: pointer to the control structure associated with the overlay device.
+ * @num: the mixer number on which the pipe needs to be staged.
+ * @pipe: pointer to the pipe to be staged.
+ *
+ * Function stages a given pipe on either the left mixer or the right mixer
+ * for the control structre based on the mixer number. If the input mixer
+ * number does not match either of the mixers then an error is returned.
+ * This function is called during overlay handoff when certain pipes are
+ * already staged by the bootloader.
+ */
+int mdss_mdp_mixer_handoff(struct mdss_mdp_ctl *ctl, u32 num,
+ struct mdss_mdp_pipe *pipe)
+{
+ int rc = 0;
+ struct mdss_mdp_mixer *mx_left = ctl->mixer_left;
+ struct mdss_mdp_mixer *mx_right = ctl->mixer_right;
+
+ /*
+ * For performance calculations, stage the handed off pipe
+ * as MDSS_MDP_STAGE_UNUSED
+ */
+ if (mx_left && (mx_left->num == num)) {
+ rc = __mdss_mdp_mixer_handoff_helper(mx_left, pipe);
+ } else if (mx_right && (mx_right->num == num)) {
+ rc = __mdss_mdp_mixer_handoff_helper(mx_right, pipe);
+ } else {
+ pr_err("pipe num %d staged on unallocated mixer num %d\n",
+ pipe->num, num);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+struct mdss_mdp_writeback *mdss_mdp_wb_alloc(u32 caps, u32 reg_index)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_writeback *wb = NULL;
+ int i;
+ bool wb_virtual_on;
+
+ wb_virtual_on = (mdata->nctl == mdata->nwb_offsets);
+
+ if (wb_virtual_on && reg_index >= mdata->nwb_offsets)
+ return NULL;
+
+ mutex_lock(&mdata->wb_lock);
+
+ for (i = 0; i < mdata->nwb; i++) {
+ wb = mdata->wb + i;
+ if ((wb->caps & caps) &&
+ (atomic_read(&wb->kref.refcount) == 0)) {
+ kref_init(&wb->kref);
+ break;
+ }
+ wb = NULL;
+ }
+ mutex_unlock(&mdata->wb_lock);
+
+ if (wb) {
+ wb->base = mdata->mdss_io.base;
+ if (wb_virtual_on)
+ wb->base += mdata->wb_offsets[reg_index];
+ else
+ wb->base += mdata->wb_offsets[i];
+ }
+
+ return wb;
+}
+
+bool mdss_mdp_is_wb_mdp_intf(u32 num, u32 reg_index)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_writeback *wb = NULL;
+ bool wb_virtual_on;
+
+ wb_virtual_on = (mdata->nctl == mdata->nwb_offsets);
+
+ if (num >= mdata->nwb || (wb_virtual_on && reg_index >=
+ mdata->nwb_offsets))
+ return false;
+
+ wb = mdata->wb + num;
+ if (!wb)
+ return false;
+
+ return (wb->caps & MDSS_MDP_WB_INTF) ? true : false;
+}
+
+struct mdss_mdp_writeback *mdss_mdp_wb_assign(u32 num, u32 reg_index)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_writeback *wb = NULL;
+ bool wb_virtual_on;
+
+ wb_virtual_on = (mdata->nctl == mdata->nwb_offsets);
+
+ if (num >= mdata->nwb)
+ return NULL;
+
+ if (wb_virtual_on && reg_index >= mdata->nwb_offsets)
+ return NULL;
+
+ mutex_lock(&mdata->wb_lock);
+ wb = mdata->wb + num;
+ if (atomic_read(&wb->kref.refcount) == 0)
+ kref_init(&wb->kref);
+ else
+ wb = NULL;
+ mutex_unlock(&mdata->wb_lock);
+
+ if (!wb)
+ return NULL;
+
+ wb->base = mdata->mdss_io.base;
+ if (wb_virtual_on)
+ wb->base += mdata->wb_offsets[reg_index];
+ else
+ wb->base += mdata->wb_offsets[num];
+
+ return wb;
+}
+
+static void mdss_mdp_wb_release(struct kref *kref)
+{
+ struct mdss_mdp_writeback *wb =
+ container_of(kref, struct mdss_mdp_writeback, kref);
+
+ if (!wb)
+ return;
+
+ wb->base = NULL;
+}
+
+void mdss_mdp_wb_free(struct mdss_mdp_writeback *wb)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (kref_put_mutex(&wb->kref, mdss_mdp_wb_release,
+ &mdata->wb_lock))
+ mutex_unlock(&mdata->wb_lock);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_debug.c
new file mode 100644
index 0000000..d24ff53
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.c
@@ -0,0 +1,1515 @@
+/*
+ * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/time.h>
+#include <linux/seq_file.h>
+
+#include "mdss_mdp.h"
+#include "mdss_panel.h"
+#include "mdss_debug.h"
+#include "mdss_mdp_debug.h"
+
+#define BUF_DUMP_LAST_N 10
+
+static struct debug_bus dbg_bus_8996[] = {
+
+ /*
+ * sspp0 - 0x188
+ * sspp1 - 0x298
+ * dspp - 0x348
+ * periph - 0x418
+ */
+
+ /* Unpack 0 sspp 0*/
+ { 0x188, 50, 2 },
+ { 0x188, 60, 2 },
+ { 0x188, 54, 2 },
+ { 0x188, 64, 2 },
+ { 0x188, 70, 2 },
+ { 0x188, 85, 2 },
+ /* Upack 0 sspp 1*/
+ { 0x298, 50, 2 },
+ { 0x298, 60, 2 },
+ { 0x298, 54, 2 },
+ { 0x298, 64, 2 },
+ { 0x298, 70, 2 },
+ { 0x298, 85, 2 },
+ /* scheduler */
+ { 0x348, 130, 0 },
+ { 0x348, 130, 1 },
+ { 0x348, 130, 2 },
+ { 0x348, 130, 3 },
+ { 0x348, 130, 4 },
+ { 0x348, 130, 5 },
+
+ /* qseed */
+ {0x188, 6, 0},
+ {0x188, 6, 1},
+ {0x188, 26, 0},
+ {0x188, 26, 1},
+ {0x298, 6, 0},
+ {0x298, 6, 1},
+ {0x298, 26, 0},
+ {0x298, 26, 1},
+
+ /* scale */
+ {0x188, 16, 0},
+ {0x188, 16, 1},
+ {0x188, 36, 0},
+ {0x188, 36, 1},
+ {0x298, 16, 0},
+ {0x298, 16, 1},
+ {0x298, 36, 0},
+ {0x298, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { 0x188, 0, 0 },
+ { 0x188, 0, 1 },
+ { 0x188, 0, 2 },
+ { 0x188, 0, 3 },
+ { 0x188, 0, 4 },
+ { 0x188, 0, 5 },
+ { 0x188, 0, 6 },
+ { 0x188, 0, 7 },
+
+ { 0x188, 1, 0 },
+ { 0x188, 1, 1 },
+ { 0x188, 1, 2 },
+ { 0x188, 1, 3 },
+ { 0x188, 1, 4 },
+ { 0x188, 1, 5 },
+ { 0x188, 1, 6 },
+ { 0x188, 1, 7 },
+
+ { 0x188, 2, 0 },
+ { 0x188, 2, 1 },
+ { 0x188, 2, 2 },
+ { 0x188, 2, 3 },
+ { 0x188, 2, 4 },
+ { 0x188, 2, 5 },
+ { 0x188, 2, 6 },
+ { 0x188, 2, 7 },
+
+ { 0x188, 4, 0 },
+ { 0x188, 4, 1 },
+ { 0x188, 4, 2 },
+ { 0x188, 4, 3 },
+ { 0x188, 4, 4 },
+ { 0x188, 4, 5 },
+ { 0x188, 4, 6 },
+ { 0x188, 4, 7 },
+
+ { 0x188, 5, 0 },
+ { 0x188, 5, 1 },
+ { 0x188, 5, 2 },
+ { 0x188, 5, 3 },
+ { 0x188, 5, 4 },
+ { 0x188, 5, 5 },
+ { 0x188, 5, 6 },
+ { 0x188, 5, 7 },
+
+ /* vig 2 */
+ { 0x188, 20, 0 },
+ { 0x188, 20, 1 },
+ { 0x188, 20, 2 },
+ { 0x188, 20, 3 },
+ { 0x188, 20, 4 },
+ { 0x188, 20, 5 },
+ { 0x188, 20, 6 },
+ { 0x188, 20, 7 },
+
+ { 0x188, 21, 0 },
+ { 0x188, 21, 1 },
+ { 0x188, 21, 2 },
+ { 0x188, 21, 3 },
+ { 0x188, 21, 4 },
+ { 0x188, 21, 5 },
+ { 0x188, 21, 6 },
+ { 0x188, 21, 7 },
+
+ { 0x188, 22, 0 },
+ { 0x188, 22, 1 },
+ { 0x188, 22, 2 },
+ { 0x188, 22, 3 },
+ { 0x188, 22, 4 },
+ { 0x188, 22, 5 },
+ { 0x188, 22, 6 },
+ { 0x188, 22, 7 },
+
+ { 0x188, 24, 0 },
+ { 0x188, 24, 1 },
+ { 0x188, 24, 2 },
+ { 0x188, 24, 3 },
+ { 0x188, 24, 4 },
+ { 0x188, 24, 5 },
+ { 0x188, 24, 6 },
+ { 0x188, 24, 7 },
+
+ { 0x188, 25, 0 },
+ { 0x188, 25, 1 },
+ { 0x188, 25, 2 },
+ { 0x188, 25, 3 },
+ { 0x188, 25, 4 },
+ { 0x188, 25, 5 },
+ { 0x188, 25, 6 },
+ { 0x188, 25, 7 },
+
+ /* rgb 0 */
+ { 0x188, 10, 0 },
+ { 0x188, 10, 1 },
+ { 0x188, 10, 2 },
+ { 0x188, 10, 3 },
+ { 0x188, 10, 4 },
+ { 0x188, 10, 5 },
+ { 0x188, 10, 6 },
+ { 0x188, 10, 7 },
+
+ { 0x188, 11, 0 },
+ { 0x188, 11, 1 },
+ { 0x188, 11, 2 },
+ { 0x188, 11, 3 },
+ { 0x188, 11, 4 },
+ { 0x188, 11, 5 },
+ { 0x188, 11, 6 },
+ { 0x188, 11, 7 },
+
+ { 0x188, 12, 0 },
+ { 0x188, 12, 1 },
+ { 0x188, 12, 2 },
+ { 0x188, 12, 3 },
+ { 0x188, 12, 4 },
+ { 0x188, 12, 5 },
+ { 0x188, 12, 6 },
+ { 0x188, 12, 7 },
+
+ { 0x188, 14, 0 },
+ { 0x188, 14, 1 },
+ { 0x188, 14, 2 },
+ { 0x188, 14, 3 },
+ { 0x188, 14, 4 },
+ { 0x188, 14, 5 },
+ { 0x188, 14, 6 },
+ { 0x188, 14, 7 },
+
+ { 0x188, 15, 0 },
+ { 0x188, 15, 1 },
+ { 0x188, 15, 2 },
+ { 0x188, 15, 3 },
+ { 0x188, 15, 4 },
+ { 0x188, 15, 5 },
+ { 0x188, 15, 6 },
+ { 0x188, 15, 7 },
+
+ /* rgb 2 */
+ { 0x188, 30, 0 },
+ { 0x188, 30, 1 },
+ { 0x188, 30, 2 },
+ { 0x188, 30, 3 },
+ { 0x188, 30, 4 },
+ { 0x188, 30, 5 },
+ { 0x188, 30, 6 },
+ { 0x188, 30, 7 },
+
+ { 0x188, 31, 0 },
+ { 0x188, 31, 1 },
+ { 0x188, 31, 2 },
+ { 0x188, 31, 3 },
+ { 0x188, 31, 4 },
+ { 0x188, 31, 5 },
+ { 0x188, 31, 6 },
+ { 0x188, 31, 7 },
+
+ { 0x188, 32, 0 },
+ { 0x188, 32, 1 },
+ { 0x188, 32, 2 },
+ { 0x188, 32, 3 },
+ { 0x188, 32, 4 },
+ { 0x188, 32, 5 },
+ { 0x188, 32, 6 },
+ { 0x188, 32, 7 },
+
+ { 0x188, 34, 0 },
+ { 0x188, 34, 1 },
+ { 0x188, 34, 2 },
+ { 0x188, 34, 3 },
+ { 0x188, 34, 4 },
+ { 0x188, 34, 5 },
+ { 0x188, 34, 6 },
+ { 0x188, 34, 7 },
+
+ { 0x188, 35, 0 },
+ { 0x188, 35, 1 },
+ { 0x188, 35, 2 },
+ { 0x188, 35, 3 },
+ { 0x188, 35, 4 },
+ { 0x188, 35, 5 },
+ { 0x188, 35, 6 },
+ { 0x188, 35, 7 },
+
+ /* dma 0 */
+ { 0x188, 40, 0 },
+ { 0x188, 40, 1 },
+ { 0x188, 40, 2 },
+ { 0x188, 40, 3 },
+ { 0x188, 40, 4 },
+ { 0x188, 40, 5 },
+ { 0x188, 40, 6 },
+ { 0x188, 40, 7 },
+
+ { 0x188, 41, 0 },
+ { 0x188, 41, 1 },
+ { 0x188, 41, 2 },
+ { 0x188, 41, 3 },
+ { 0x188, 41, 4 },
+ { 0x188, 41, 5 },
+ { 0x188, 41, 6 },
+ { 0x188, 41, 7 },
+
+ { 0x188, 42, 0 },
+ { 0x188, 42, 1 },
+ { 0x188, 42, 2 },
+ { 0x188, 42, 3 },
+ { 0x188, 42, 4 },
+ { 0x188, 42, 5 },
+ { 0x188, 42, 6 },
+ { 0x188, 42, 7 },
+
+ { 0x188, 44, 0 },
+ { 0x188, 44, 1 },
+ { 0x188, 44, 2 },
+ { 0x188, 44, 3 },
+ { 0x188, 44, 4 },
+ { 0x188, 44, 5 },
+ { 0x188, 44, 6 },
+ { 0x188, 44, 7 },
+
+ { 0x188, 45, 0 },
+ { 0x188, 45, 1 },
+ { 0x188, 45, 2 },
+ { 0x188, 45, 3 },
+ { 0x188, 45, 4 },
+ { 0x188, 45, 5 },
+ { 0x188, 45, 6 },
+ { 0x188, 45, 7 },
+
+ /* cursor 0 */
+ { 0x188, 80, 0 },
+ { 0x188, 80, 1 },
+ { 0x188, 80, 2 },
+ { 0x188, 80, 3 },
+ { 0x188, 80, 4 },
+ { 0x188, 80, 5 },
+ { 0x188, 80, 6 },
+ { 0x188, 80, 7 },
+
+ { 0x188, 81, 0 },
+ { 0x188, 81, 1 },
+ { 0x188, 81, 2 },
+ { 0x188, 81, 3 },
+ { 0x188, 81, 4 },
+ { 0x188, 81, 5 },
+ { 0x188, 81, 6 },
+ { 0x188, 81, 7 },
+
+ { 0x188, 82, 0 },
+ { 0x188, 82, 1 },
+ { 0x188, 82, 2 },
+ { 0x188, 82, 3 },
+ { 0x188, 82, 4 },
+ { 0x188, 82, 5 },
+ { 0x188, 82, 6 },
+ { 0x188, 82, 7 },
+
+ { 0x188, 83, 0 },
+ { 0x188, 83, 1 },
+ { 0x188, 83, 2 },
+ { 0x188, 83, 3 },
+ { 0x188, 83, 4 },
+ { 0x188, 83, 5 },
+ { 0x188, 83, 6 },
+ { 0x188, 83, 7 },
+
+ { 0x188, 84, 0 },
+ { 0x188, 84, 1 },
+ { 0x188, 84, 2 },
+ { 0x188, 84, 3 },
+ { 0x188, 84, 4 },
+ { 0x188, 84, 5 },
+ { 0x188, 84, 6 },
+ { 0x188, 84, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { 0x298, 0, 0 },
+ { 0x298, 0, 1 },
+ { 0x298, 0, 2 },
+ { 0x298, 0, 3 },
+ { 0x298, 0, 4 },
+ { 0x298, 0, 5 },
+ { 0x298, 0, 6 },
+ { 0x298, 0, 7 },
+
+ { 0x298, 1, 0 },
+ { 0x298, 1, 1 },
+ { 0x298, 1, 2 },
+ { 0x298, 1, 3 },
+ { 0x298, 1, 4 },
+ { 0x298, 1, 5 },
+ { 0x298, 1, 6 },
+ { 0x298, 1, 7 },
+
+ { 0x298, 2, 0 },
+ { 0x298, 2, 1 },
+ { 0x298, 2, 2 },
+ { 0x298, 2, 3 },
+ { 0x298, 2, 4 },
+ { 0x298, 2, 5 },
+ { 0x298, 2, 6 },
+ { 0x298, 2, 7 },
+
+ { 0x298, 4, 0 },
+ { 0x298, 4, 1 },
+ { 0x298, 4, 2 },
+ { 0x298, 4, 3 },
+ { 0x298, 4, 4 },
+ { 0x298, 4, 5 },
+ { 0x298, 4, 6 },
+ { 0x298, 4, 7 },
+
+ { 0x298, 5, 0 },
+ { 0x298, 5, 1 },
+ { 0x298, 5, 2 },
+ { 0x298, 5, 3 },
+ { 0x298, 5, 4 },
+ { 0x298, 5, 5 },
+ { 0x298, 5, 6 },
+ { 0x298, 5, 7 },
+
+ /* vig 3 */
+ { 0x298, 20, 0 },
+ { 0x298, 20, 1 },
+ { 0x298, 20, 2 },
+ { 0x298, 20, 3 },
+ { 0x298, 20, 4 },
+ { 0x298, 20, 5 },
+ { 0x298, 20, 6 },
+ { 0x298, 20, 7 },
+
+ { 0x298, 21, 0 },
+ { 0x298, 21, 1 },
+ { 0x298, 21, 2 },
+ { 0x298, 21, 3 },
+ { 0x298, 21, 4 },
+ { 0x298, 21, 5 },
+ { 0x298, 21, 6 },
+ { 0x298, 21, 7 },
+
+ { 0x298, 22, 0 },
+ { 0x298, 22, 1 },
+ { 0x298, 22, 2 },
+ { 0x298, 22, 3 },
+ { 0x298, 22, 4 },
+ { 0x298, 22, 5 },
+ { 0x298, 22, 6 },
+ { 0x298, 22, 7 },
+
+ { 0x298, 24, 0 },
+ { 0x298, 24, 1 },
+ { 0x298, 24, 2 },
+ { 0x298, 24, 3 },
+ { 0x298, 24, 4 },
+ { 0x298, 24, 5 },
+ { 0x298, 24, 6 },
+ { 0x298, 24, 7 },
+
+ { 0x298, 25, 0 },
+ { 0x298, 25, 1 },
+ { 0x298, 25, 2 },
+ { 0x298, 25, 3 },
+ { 0x298, 25, 4 },
+ { 0x298, 25, 5 },
+ { 0x298, 25, 6 },
+ { 0x298, 25, 7 },
+
+ /* rgb 1 */
+ { 0x298, 10, 0 },
+ { 0x298, 10, 1 },
+ { 0x298, 10, 2 },
+ { 0x298, 10, 3 },
+ { 0x298, 10, 4 },
+ { 0x298, 10, 5 },
+ { 0x298, 10, 6 },
+ { 0x298, 10, 7 },
+
+ { 0x298, 11, 0 },
+ { 0x298, 11, 1 },
+ { 0x298, 11, 2 },
+ { 0x298, 11, 3 },
+ { 0x298, 11, 4 },
+ { 0x298, 11, 5 },
+ { 0x298, 11, 6 },
+ { 0x298, 11, 7 },
+
+ { 0x298, 12, 0 },
+ { 0x298, 12, 1 },
+ { 0x298, 12, 2 },
+ { 0x298, 12, 3 },
+ { 0x298, 12, 4 },
+ { 0x298, 12, 5 },
+ { 0x298, 12, 6 },
+ { 0x298, 12, 7 },
+
+ { 0x298, 14, 0 },
+ { 0x298, 14, 1 },
+ { 0x298, 14, 2 },
+ { 0x298, 14, 3 },
+ { 0x298, 14, 4 },
+ { 0x298, 14, 5 },
+ { 0x298, 14, 6 },
+ { 0x298, 14, 7 },
+
+ { 0x298, 15, 0 },
+ { 0x298, 15, 1 },
+ { 0x298, 15, 2 },
+ { 0x298, 15, 3 },
+ { 0x298, 15, 4 },
+ { 0x298, 15, 5 },
+ { 0x298, 15, 6 },
+ { 0x298, 15, 7 },
+
+ /* rgb 3 */
+ { 0x298, 30, 0 },
+ { 0x298, 30, 1 },
+ { 0x298, 30, 2 },
+ { 0x298, 30, 3 },
+ { 0x298, 30, 4 },
+ { 0x298, 30, 5 },
+ { 0x298, 30, 6 },
+ { 0x298, 30, 7 },
+
+ { 0x298, 31, 0 },
+ { 0x298, 31, 1 },
+ { 0x298, 31, 2 },
+ { 0x298, 31, 3 },
+ { 0x298, 31, 4 },
+ { 0x298, 31, 5 },
+ { 0x298, 31, 6 },
+ { 0x298, 31, 7 },
+
+ { 0x298, 32, 0 },
+ { 0x298, 32, 1 },
+ { 0x298, 32, 2 },
+ { 0x298, 32, 3 },
+ { 0x298, 32, 4 },
+ { 0x298, 32, 5 },
+ { 0x298, 32, 6 },
+ { 0x298, 32, 7 },
+
+ { 0x298, 34, 0 },
+ { 0x298, 34, 1 },
+ { 0x298, 34, 2 },
+ { 0x298, 34, 3 },
+ { 0x298, 34, 4 },
+ { 0x298, 34, 5 },
+ { 0x298, 34, 6 },
+ { 0x298, 34, 7 },
+
+ { 0x298, 35, 0 },
+ { 0x298, 35, 1 },
+ { 0x298, 35, 2 },
+ { 0x298, 35, 3 },
+ { 0x298, 35, 4 },
+ { 0x298, 35, 5 },
+ { 0x298, 35, 6 },
+ { 0x298, 35, 7 },
+
+ /* dma 1 */
+ { 0x298, 40, 0 },
+ { 0x298, 40, 1 },
+ { 0x298, 40, 2 },
+ { 0x298, 40, 3 },
+ { 0x298, 40, 4 },
+ { 0x298, 40, 5 },
+ { 0x298, 40, 6 },
+ { 0x298, 40, 7 },
+
+ { 0x298, 41, 0 },
+ { 0x298, 41, 1 },
+ { 0x298, 41, 2 },
+ { 0x298, 41, 3 },
+ { 0x298, 41, 4 },
+ { 0x298, 41, 5 },
+ { 0x298, 41, 6 },
+ { 0x298, 41, 7 },
+
+ { 0x298, 42, 0 },
+ { 0x298, 42, 1 },
+ { 0x298, 42, 2 },
+ { 0x298, 42, 3 },
+ { 0x298, 42, 4 },
+ { 0x298, 42, 5 },
+ { 0x298, 42, 6 },
+ { 0x298, 42, 7 },
+
+ { 0x298, 44, 0 },
+ { 0x298, 44, 1 },
+ { 0x298, 44, 2 },
+ { 0x298, 44, 3 },
+ { 0x298, 44, 4 },
+ { 0x298, 44, 5 },
+ { 0x298, 44, 6 },
+ { 0x298, 44, 7 },
+
+ { 0x298, 45, 0 },
+ { 0x298, 45, 1 },
+ { 0x298, 45, 2 },
+ { 0x298, 45, 3 },
+ { 0x298, 45, 4 },
+ { 0x298, 45, 5 },
+ { 0x298, 45, 6 },
+ { 0x298, 45, 7 },
+
+ /* cursor 1 */
+ { 0x298, 80, 0 },
+ { 0x298, 80, 1 },
+ { 0x298, 80, 2 },
+ { 0x298, 80, 3 },
+ { 0x298, 80, 4 },
+ { 0x298, 80, 5 },
+ { 0x298, 80, 6 },
+ { 0x298, 80, 7 },
+
+ { 0x298, 81, 0 },
+ { 0x298, 81, 1 },
+ { 0x298, 81, 2 },
+ { 0x298, 81, 3 },
+ { 0x298, 81, 4 },
+ { 0x298, 81, 5 },
+ { 0x298, 81, 6 },
+ { 0x298, 81, 7 },
+
+ { 0x298, 82, 0 },
+ { 0x298, 82, 1 },
+ { 0x298, 82, 2 },
+ { 0x298, 82, 3 },
+ { 0x298, 82, 4 },
+ { 0x298, 82, 5 },
+ { 0x298, 82, 6 },
+ { 0x298, 82, 7 },
+
+ { 0x298, 83, 0 },
+ { 0x298, 83, 1 },
+ { 0x298, 83, 2 },
+ { 0x298, 83, 3 },
+ { 0x298, 83, 4 },
+ { 0x298, 83, 5 },
+ { 0x298, 83, 6 },
+ { 0x298, 83, 7 },
+
+ { 0x298, 84, 0 },
+ { 0x298, 84, 1 },
+ { 0x298, 84, 2 },
+ { 0x298, 84, 3 },
+ { 0x298, 84, 4 },
+ { 0x298, 84, 5 },
+ { 0x298, 84, 6 },
+ { 0x298, 84, 7 },
+
+ /* dspp */
+ { 0x348, 13, 0 },
+ { 0x348, 19, 0 },
+ { 0x348, 14, 0 },
+ { 0x348, 14, 1 },
+ { 0x348, 14, 3 },
+ { 0x348, 20, 0 },
+ { 0x348, 20, 1 },
+ { 0x348, 20, 3 },
+
+ /* dither */
+ { 0x348, 18, 1 },
+ { 0x348, 24, 1 },
+
+ /* ppb_0 */
+ { 0x348, 31, 0 },
+ { 0x348, 33, 0 },
+ { 0x348, 35, 0 },
+ { 0x348, 42, 0 },
+
+ /* ppb_1 */
+ { 0x348, 32, 0 },
+ { 0x348, 34, 0 },
+ { 0x348, 36, 0 },
+ { 0x348, 43, 0 },
+
+ /* lm_lut */
+ { 0x348, 109, 0 },
+ { 0x348, 105, 0 },
+ { 0x348, 103, 0 },
+ { 0x348, 101, 0 },
+ { 0x348, 99, 0 },
+
+ /* tear-check */
+ { 0x418, 63, 0 },
+ { 0x418, 64, 0 },
+ { 0x418, 65, 0 },
+ { 0x418, 73, 0 },
+ { 0x418, 74, 0 },
+
+ /* crossbar */
+ { 0x348, 0, 0},
+
+ /* blend */
+ /* LM0 */
+ { 0x348, 63, 0},
+ { 0x348, 63, 1},
+ { 0x348, 63, 2},
+ { 0x348, 63, 3},
+ { 0x348, 63, 4},
+ { 0x348, 63, 5},
+ { 0x348, 63, 6},
+ { 0x348, 63, 7},
+
+ { 0x348, 64, 0},
+ { 0x348, 64, 1},
+ { 0x348, 64, 2},
+ { 0x348, 64, 3},
+ { 0x348, 64, 4},
+ { 0x348, 64, 5},
+ { 0x348, 64, 6},
+ { 0x348, 64, 7},
+
+ { 0x348, 65, 0},
+ { 0x348, 65, 1},
+ { 0x348, 65, 2},
+ { 0x348, 65, 3},
+ { 0x348, 65, 4},
+ { 0x348, 65, 5},
+ { 0x348, 65, 6},
+ { 0x348, 65, 7},
+
+ { 0x348, 66, 0},
+ { 0x348, 66, 1},
+ { 0x348, 66, 2},
+ { 0x348, 66, 3},
+ { 0x348, 66, 4},
+ { 0x348, 66, 5},
+ { 0x348, 66, 6},
+ { 0x348, 66, 7},
+
+ { 0x348, 67, 0},
+ { 0x348, 67, 1},
+ { 0x348, 67, 2},
+ { 0x348, 67, 3},
+ { 0x348, 67, 4},
+ { 0x348, 67, 5},
+ { 0x348, 67, 6},
+ { 0x348, 67, 7},
+
+ { 0x348, 68, 0},
+ { 0x348, 68, 1},
+ { 0x348, 68, 2},
+ { 0x348, 68, 3},
+ { 0x348, 68, 4},
+ { 0x348, 68, 5},
+ { 0x348, 68, 6},
+ { 0x348, 68, 7},
+
+ { 0x348, 69, 0},
+ { 0x348, 69, 1},
+ { 0x348, 69, 2},
+ { 0x348, 69, 3},
+ { 0x348, 69, 4},
+ { 0x348, 69, 5},
+ { 0x348, 69, 6},
+ { 0x348, 69, 7},
+
+ /* LM1 */
+ { 0x348, 70, 0},
+ { 0x348, 70, 1},
+ { 0x348, 70, 2},
+ { 0x348, 70, 3},
+ { 0x348, 70, 4},
+ { 0x348, 70, 5},
+ { 0x348, 70, 6},
+ { 0x348, 70, 7},
+
+ { 0x348, 71, 0},
+ { 0x348, 71, 1},
+ { 0x348, 71, 2},
+ { 0x348, 71, 3},
+ { 0x348, 71, 4},
+ { 0x348, 71, 5},
+ { 0x348, 71, 6},
+ { 0x348, 71, 7},
+
+ { 0x348, 72, 0},
+ { 0x348, 72, 1},
+ { 0x348, 72, 2},
+ { 0x348, 72, 3},
+ { 0x348, 72, 4},
+ { 0x348, 72, 5},
+ { 0x348, 72, 6},
+ { 0x348, 72, 7},
+
+ { 0x348, 73, 0},
+ { 0x348, 73, 1},
+ { 0x348, 73, 2},
+ { 0x348, 73, 3},
+ { 0x348, 73, 4},
+ { 0x348, 73, 5},
+ { 0x348, 73, 6},
+ { 0x348, 73, 7},
+
+ { 0x348, 74, 0},
+ { 0x348, 74, 1},
+ { 0x348, 74, 2},
+ { 0x348, 74, 3},
+ { 0x348, 74, 4},
+ { 0x348, 74, 5},
+ { 0x348, 74, 6},
+ { 0x348, 74, 7},
+
+ { 0x348, 75, 0},
+ { 0x348, 75, 1},
+ { 0x348, 75, 2},
+ { 0x348, 75, 3},
+ { 0x348, 75, 4},
+ { 0x348, 75, 5},
+ { 0x348, 75, 6},
+ { 0x348, 75, 7},
+
+ { 0x348, 76, 0},
+ { 0x348, 76, 1},
+ { 0x348, 76, 2},
+ { 0x348, 76, 3},
+ { 0x348, 76, 4},
+ { 0x348, 76, 5},
+ { 0x348, 76, 6},
+ { 0x348, 76, 7},
+
+ /* LM2 */
+ { 0x348, 77, 0},
+ { 0x348, 77, 1},
+ { 0x348, 77, 2},
+ { 0x348, 77, 3},
+ { 0x348, 77, 4},
+ { 0x348, 77, 5},
+ { 0x348, 77, 6},
+ { 0x348, 77, 7},
+
+ { 0x348, 78, 0},
+ { 0x348, 78, 1},
+ { 0x348, 78, 2},
+ { 0x348, 78, 3},
+ { 0x348, 78, 4},
+ { 0x348, 78, 5},
+ { 0x348, 78, 6},
+ { 0x348, 78, 7},
+
+ { 0x348, 79, 0},
+ { 0x348, 79, 1},
+ { 0x348, 79, 2},
+ { 0x348, 79, 3},
+ { 0x348, 79, 4},
+ { 0x348, 79, 5},
+ { 0x348, 79, 6},
+ { 0x348, 79, 7},
+
+ { 0x348, 80, 0},
+ { 0x348, 80, 1},
+ { 0x348, 80, 2},
+ { 0x348, 80, 3},
+ { 0x348, 80, 4},
+ { 0x348, 80, 5},
+ { 0x348, 80, 6},
+ { 0x348, 80, 7},
+
+ { 0x348, 81, 0},
+ { 0x348, 81, 1},
+ { 0x348, 81, 2},
+ { 0x348, 81, 3},
+ { 0x348, 81, 4},
+ { 0x348, 81, 5},
+ { 0x348, 81, 6},
+ { 0x348, 81, 7},
+
+ { 0x348, 82, 0},
+ { 0x348, 82, 1},
+ { 0x348, 82, 2},
+ { 0x348, 82, 3},
+ { 0x348, 82, 4},
+ { 0x348, 82, 5},
+ { 0x348, 82, 6},
+ { 0x348, 82, 7},
+
+ { 0x348, 83, 0},
+ { 0x348, 83, 1},
+ { 0x348, 83, 2},
+ { 0x348, 83, 3},
+ { 0x348, 83, 4},
+ { 0x348, 83, 5},
+ { 0x348, 83, 6},
+ { 0x348, 83, 7},
+
+ /* csc */
+ {0x188, 7, 0},
+ {0x188, 7, 1},
+ {0x188, 27, 0},
+ {0x188, 27, 1},
+ {0x298, 7, 0},
+ {0x298, 7, 1},
+ {0x298, 27, 0},
+ {0x298, 27, 1},
+
+ /* pcc */
+ { 0x188, 3, 3},
+ { 0x188, 23, 3},
+ { 0x188, 13, 3},
+ { 0x188, 33, 3},
+ { 0x188, 43, 3},
+ { 0x298, 3, 3},
+ { 0x298, 23, 3},
+ { 0x298, 13, 3},
+ { 0x298, 33, 3},
+ { 0x298, 43, 3},
+
+ /* spa */
+ { 0x188, 8, 0},
+ { 0x188, 28, 0},
+ { 0x298, 8, 0},
+ { 0x298, 28, 0},
+ { 0x348, 13, 0},
+ { 0x348, 19, 0},
+
+ /* igc */
+ { 0x188, 9, 0},
+ { 0x188, 9, 1},
+ { 0x188, 9, 3},
+ { 0x188, 29, 0},
+ { 0x188, 29, 1},
+ { 0x188, 29, 3},
+ { 0x188, 17, 0},
+ { 0x188, 17, 1},
+ { 0x188, 17, 3},
+ { 0x188, 37, 0},
+ { 0x188, 37, 1},
+ { 0x188, 37, 3},
+ { 0x188, 46, 0},
+ { 0x188, 46, 1},
+ { 0x188, 46, 3},
+
+ { 0x298, 9, 0},
+ { 0x298, 9, 1},
+ { 0x298, 9, 3},
+ { 0x298, 29, 0},
+ { 0x298, 29, 1},
+ { 0x298, 29, 3},
+ { 0x298, 17, 0},
+ { 0x298, 17, 1},
+ { 0x298, 17, 3},
+ { 0x298, 37, 0},
+ { 0x298, 37, 1},
+ { 0x298, 37, 3},
+ { 0x298, 46, 0},
+ { 0x298, 46, 1},
+ { 0x298, 46, 3},
+
+ { 0x348, 14, 0},
+ { 0x348, 14, 1},
+ { 0x348, 14, 3},
+ { 0x348, 20, 0},
+ { 0x348, 20, 1},
+ { 0x348, 20, 3},
+
+ { 0x418, 60, 0},
+};
+
+static struct vbif_debug_bus vbif_dbg_bus_8996[] = {
+ {0x214, 0x21c, 16, 2, 0x10}, /* arb clients */
+ {0x214, 0x21c, 0, 14, 0x13}, /* xin blocks - axi side */
+ {0x21c, 0x214, 0, 14, 0xc}, /* xin blocks - clock side */
+};
+
+static struct vbif_debug_bus nrt_vbif_dbg_bus_8996[] = {
+ {0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
+ {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
+ {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
+};
+
+void mdss_mdp_hw_rev_debug_caps_init(struct mdss_data_type *mdata)
+{
+ mdata->dbg_bus = NULL;
+ mdata->dbg_bus_size = 0;
+
+ switch (mdata->mdp_rev) {
+ case MDSS_MDP_HW_REV_107:
+ case MDSS_MDP_HW_REV_107_1:
+ case MDSS_MDP_HW_REV_107_2:
+ mdata->dbg_bus = dbg_bus_8996;
+ mdata->dbg_bus_size = ARRAY_SIZE(dbg_bus_8996);
+ mdata->vbif_dbg_bus = vbif_dbg_bus_8996;
+ mdata->vbif_dbg_bus_size = ARRAY_SIZE(vbif_dbg_bus_8996);
+ mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_8996;
+ mdata->nrt_vbif_dbg_bus_size =
+ ARRAY_SIZE(nrt_vbif_dbg_bus_8996);
+ break;
+ default:
+ break;
+ }
+}
+
+void mdss_mdp_debug_mid(u32 mid)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+ struct range_dump_node *xlog_node;
+ struct mdss_debug_base *blk_base;
+ char *addr;
+ u32 len;
+
+ list_for_each_entry(blk_base, &mdd->base_list, head) {
+ list_for_each_entry(xlog_node, &blk_base->dump_list, head) {
+ if (xlog_node->xin_id != mid)
+ continue;
+
+ len = get_dump_range(&xlog_node->offset,
+ blk_base->max_offset);
+ addr = blk_base->base + xlog_node->offset.start;
+ pr_info("%s: mid:%d range_base=0x%pK start=0x%x end=0x%x\n",
+ xlog_node->range_name, mid, addr,
+ xlog_node->offset.start, xlog_node->offset.end);
+
+ /*
+ * Next instruction assumes that MDP clocks are ON
+ * because it is called from interrupt context
+ */
+ mdss_dump_reg((const char *)xlog_node->range_name,
+ MDSS_DBG_DUMP_IN_LOG, addr, len,
+ &xlog_node->reg_dump, true);
+ }
+ }
+}
+
+static void __print_time(char *buf, u32 size, u64 ts)
+{
+ unsigned long rem_ns = do_div(ts, NSEC_PER_SEC);
+
+ snprintf(buf, size, "%llu.%06lu", ts, rem_ns);
+}
+
+static void __print_buf(struct seq_file *s, struct mdss_mdp_data *buf,
+ bool show_pipe)
+{
+ char tmpbuf[20];
+ int i;
+ const char * const buf_stat_stmap[] = {
+ [MDP_BUF_STATE_UNUSED] = "UNUSED ",
+ [MDP_BUF_STATE_READY] = "READY ",
+ [MDP_BUF_STATE_ACTIVE] = "ACTIVE ",
+ [MDP_BUF_STATE_CLEANUP] = "CLEANUP",
+ };
+ const char * const domain_stmap[] = {
+ [MDSS_IOMMU_DOMAIN_UNSECURE] = "mdp_unsecure",
+ [MDSS_IOMMU_DOMAIN_ROT_UNSECURE] = "rot_unsecure",
+ [MDSS_IOMMU_DOMAIN_SECURE] = "mdp_secure",
+ [MDSS_IOMMU_DOMAIN_ROT_SECURE] = "rot_secure",
+ [MDSS_IOMMU_MAX_DOMAIN] = "undefined",
+ };
+ const char * const dma_data_dir_stmap[] = {
+ [DMA_BIDIRECTIONAL] = "read/write",
+ [DMA_TO_DEVICE] = "read",
+ [DMA_FROM_DEVICE] = "read/write",
+ [DMA_NONE] = "????",
+ };
+
+ seq_puts(s, "\t");
+ if (show_pipe && buf->last_pipe)
+ seq_printf(s, "pnum=%d ", buf->last_pipe->num);
+
+ seq_printf(s, "state=%s addr=%pa size=%lu ",
+ buf->state < ARRAY_SIZE(buf_stat_stmap) &&
+ buf_stat_stmap[buf->state] ? buf_stat_stmap[buf->state] : "?",
+ &buf->p[0].addr, buf->p[0].len);
+
+ __print_time(tmpbuf, sizeof(tmpbuf), buf->last_alloc);
+ seq_printf(s, "alloc_time=%s ", tmpbuf);
+ if (buf->state == MDP_BUF_STATE_UNUSED) {
+ __print_time(tmpbuf, sizeof(tmpbuf), buf->last_freed);
+ seq_printf(s, "freed_time=%s ", tmpbuf);
+ } else {
+ for (i = 0; i < buf->num_planes; i++) {
+ seq_puts(s, "\n\t\t");
+ seq_printf(s, "plane[%d] domain=%s ", i,
+ domain_stmap[buf->p[i].domain]);
+ seq_printf(s, "permission=%s ",
+ dma_data_dir_stmap[buf->p[i].dir]);
+ }
+ }
+ seq_puts(s, "\n");
+}
+
+static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_mdp_data *buf;
+ int format;
+ int smps[4];
+ int i;
+
+ seq_printf(s, "\nSSPP #%d type=%s ndx=%x flags=0x%08x play_cnt=%u xin_id=%d\n",
+ pipe->num, mdss_mdp_pipetype2str(pipe->type),
+ pipe->ndx, pipe->flags, pipe->play_cnt, pipe->xin_id);
+ seq_printf(s, "\tstage=%d alpha=0x%x transp=0x%x blend_op=%d\n",
+ pipe->mixer_stage, pipe->alpha,
+ pipe->transp, pipe->blend_op);
+ if (pipe->multirect.max_rects > 1) {
+ const char * const fmodes[] = {
+ [MDSS_MDP_PIPE_MULTIRECT_PARALLEL] = "parallel",
+ [MDSS_MDP_PIPE_MULTIRECT_SERIAL] = "serial",
+ [MDSS_MDP_PIPE_MULTIRECT_NONE] = "single",
+ };
+ const char *mode = NULL;
+
+ if (pipe->multirect.mode < ARRAY_SIZE(fmodes))
+ mode = fmodes[pipe->multirect.mode];
+ if (!mode)
+ mode = "invalid";
+
+ seq_printf(s, "\trect=%d/%d fetch_mode=%s\n",
+ pipe->multirect.num, pipe->multirect.max_rects,
+ mode);
+ }
+
+ format = pipe->src_fmt->format;
+ seq_printf(s, "\tsrc w=%d h=%d format=%d (%s)\n",
+ pipe->img_width, pipe->img_height, format,
+ mdss_mdp_format2str(format));
+ seq_printf(s, "\tsrc_rect x=%d y=%d w=%d h=%d H.dec=%d V.dec=%d\n",
+ pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+ pipe->horz_deci, pipe->vert_deci);
+ seq_printf(s, "\tdst_rect x=%d y=%d w=%d h=%d\n",
+ pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+ smps[0] = bitmap_weight(pipe->smp_map[0].allocated,
+ MAX_DRV_SUP_MMB_BLKS);
+ smps[1] = bitmap_weight(pipe->smp_map[1].allocated,
+ MAX_DRV_SUP_MMB_BLKS);
+ smps[2] = bitmap_weight(pipe->smp_map[0].reserved,
+ MAX_DRV_SUP_MMB_BLKS);
+ smps[3] = bitmap_weight(pipe->smp_map[1].reserved,
+ MAX_DRV_SUP_MMB_BLKS);
+
+ seq_printf(s, "\tSMP allocated=[%d %d] reserved=[%d %d]\n",
+ smps[0], smps[1], smps[2], smps[3]);
+
+ seq_puts(s, "\tSupported formats = ");
+ for (i = 0; i < BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1); i++)
+ seq_printf(s, "0x%02X ", pipe->supported_formats[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "Data:\n");
+
+ list_for_each_entry(buf, &pipe->buf_queue, pipe_list)
+ __print_buf(s, buf, false);
+}
+
+static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer)
+{
+ struct mdss_mdp_pipe *pipe;
+ int i, cnt = 0;
+
+ if (!mixer)
+ return;
+
+ seq_printf(s, "\n%s Mixer #%d res=%dx%d roi[%d, %d, %d, %d] %s\n",
+ mixer->type == MDSS_MDP_MIXER_TYPE_INTF ? "Intf" : "Writeback",
+ mixer->num, mixer->width, mixer->height,
+ mixer->roi.x, mixer->roi.y, mixer->roi.w, mixer->roi.h,
+ mixer->cursor_enabled ? "w/cursor" : "");
+
+ for (i = 0; i < ARRAY_SIZE(mixer->stage_pipe); i++) {
+ pipe = mixer->stage_pipe[i];
+ if (pipe) {
+ __dump_pipe(s, pipe);
+ cnt++;
+ }
+ }
+
+ seq_printf(s, "\nTotal pipes=%d\n", cnt);
+}
+
+static void __dump_timings(struct seq_file *s, struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_panel_info *pinfo;
+
+ if (!ctl || !ctl->panel_data)
+ return;
+
+ pinfo = &ctl->panel_data->panel_info;
+ seq_printf(s, "Panel #%d %dx%dp%d\n",
+ pinfo->pdest, pinfo->xres, pinfo->yres,
+ mdss_panel_get_framerate(pinfo, FPS_RESOLUTION_HZ));
+ seq_printf(s, "\tvbp=%d vfp=%d vpw=%d hbp=%d hfp=%d hpw=%d\n",
+ pinfo->lcdc.v_back_porch,
+ pinfo->lcdc.v_front_porch,
+ pinfo->lcdc.v_pulse_width,
+ pinfo->lcdc.h_back_porch,
+ pinfo->lcdc.h_front_porch,
+ pinfo->lcdc.h_pulse_width);
+
+ if (pinfo->lcdc.border_bottom || pinfo->lcdc.border_top ||
+ pinfo->lcdc.border_left ||
+ pinfo->lcdc.border_right) {
+ seq_printf(s, "\tborder (l,t,r,b):[%d,%d,%d,%d] off xy:%d,%d\n",
+ pinfo->lcdc.border_left,
+ pinfo->lcdc.border_top,
+ pinfo->lcdc.border_right,
+ pinfo->lcdc.border_bottom,
+ ctl->border_x_off,
+ ctl->border_y_off);
+ }
+}
+
+static void __dump_ctl(struct seq_file *s, struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_perf_params *perf;
+
+ if (!mdss_mdp_ctl_is_power_on(ctl))
+ return;
+
+ seq_printf(s, "\n--[ Control path #%d - ", ctl->num);
+
+ if (ctl->panel_data) {
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+
+ seq_printf(s, "%s%s]--\n",
+ sctl && sctl->panel_data ? "DUAL " : "",
+ mdss_panel2str(ctl->panel_data->panel_info.type));
+ __dump_timings(s, ctl);
+ __dump_timings(s, sctl);
+ } else {
+ struct mdss_mdp_mixer *mixer;
+
+ mixer = ctl->mixer_left;
+ if (mixer) {
+ seq_printf(s, "%s%d",
+ (mixer->rotator_mode ? "rot" : "wb"),
+ mixer->num);
+ } else {
+ seq_puts(s, "unknown");
+ }
+ seq_puts(s, "]--\n");
+ }
+ perf = &ctl->cur_perf;
+ seq_printf(s, "MDP Clk=%u Final BW=%llu\n",
+ perf->mdp_clk_rate,
+ perf->bw_ctl);
+ seq_printf(s, "Play Count=%u Underrun Count=%u\n",
+ ctl->play_cnt, ctl->underrun_cnt);
+
+ __dump_mixer(s, ctl->mixer_left);
+ __dump_mixer(s, ctl->mixer_right);
+}
+
+static int __dump_mdp(struct seq_file *s, struct mdss_data_type *mdata)
+{
+ struct mdss_mdp_ctl *ctl;
+ int i, ignore_ndx = -1;
+
+ for (i = 0; i < mdata->nctl; i++) {
+ ctl = mdata->ctl_off + i;
+ /* ignore slave ctl in split display case */
+ if (ctl->num == ignore_ndx)
+ continue;
+ if (ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
+ ignore_ndx = ctl->mixer_right->ctl->num;
+ __dump_ctl(s, ctl);
+ }
+ return 0;
+}
+
+#define DUMP_CHUNK 256
+#define DUMP_SIZE SZ_32K
+void mdss_mdp_dump(struct mdss_data_type *mdata)
+{
+ struct seq_file s = {
+ .size = DUMP_SIZE - 1,
+ };
+ int i;
+
+ s.buf = kzalloc(DUMP_SIZE, GFP_KERNEL);
+ if (!s.buf)
+ return;
+
+ __dump_mdp(&s, mdata);
+ seq_puts(&s, "\n");
+
+ pr_info("MDP DUMP\n------------------------\n");
+ for (i = 0; i < s.count; i += DUMP_CHUNK) {
+ if ((s.count - i) > DUMP_CHUNK) {
+ char c = s.buf[i + DUMP_CHUNK];
+
+ s.buf[i + DUMP_CHUNK] = 0;
+ pr_cont("%s", s.buf + i);
+ s.buf[i + DUMP_CHUNK] = c;
+ } else {
+ s.buf[s.count] = 0;
+ pr_cont("%s", s.buf + i);
+ }
+ }
+
+ kfree(s.buf);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void __dump_buf_data(struct seq_file *s, struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_data *buf;
+ int i = 0;
+
+ seq_printf(s, "List of buffers for fb%d\n", mfd->index);
+
+ mutex_lock(&mdp5_data->list_lock);
+ if (!list_empty(&mdp5_data->bufs_used)) {
+ seq_puts(s, " Buffers used:\n");
+ list_for_each_entry(buf, &mdp5_data->bufs_used, buf_list)
+ __print_buf(s, buf, true);
+ }
+
+ if (!list_empty(&mdp5_data->bufs_freelist)) {
+ seq_puts(s, " Buffers in free list:\n");
+ list_for_each_entry(buf, &mdp5_data->bufs_freelist, buf_list)
+ __print_buf(s, buf, true);
+ }
+
+ if (!list_empty(&mdp5_data->bufs_pool)) {
+ seq_printf(s, " Last %d buffers used:\n", BUF_DUMP_LAST_N);
+
+ list_for_each_entry_reverse(buf, &mdp5_data->bufs_pool,
+ buf_list) {
+ if (buf->last_freed == 0 || i == BUF_DUMP_LAST_N)
+ break;
+ __print_buf(s, buf, true);
+ i++;
+ }
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+}
+
+static int __dump_buffers(struct seq_file *s, struct mdss_data_type *mdata)
+{
+ struct mdss_mdp_ctl *ctl;
+ int i, ignore_ndx = -1;
+
+ for (i = 0; i < mdata->nctl; i++) {
+ ctl = mdata->ctl_off + i;
+ /* ignore slave ctl in split display case */
+ if (ctl->num == ignore_ndx)
+ continue;
+ if (ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
+ ignore_ndx = ctl->mixer_right->ctl->num;
+
+ if (ctl->mfd)
+ __dump_buf_data(s, ctl->mfd);
+ }
+ return 0;
+}
+
+static int mdss_debugfs_dump_show(struct seq_file *s, void *v)
+{
+ struct mdss_data_type *mdata = (struct mdss_data_type *)s->private;
+
+ return __dump_mdp(s, mdata);
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdss_debugfs_dump);
+
+static int mdss_debugfs_buffers_show(struct seq_file *s, void *v)
+{
+ struct mdss_data_type *mdata = (struct mdss_data_type *)s->private;
+
+ return __dump_buffers(s, mdata);
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdss_debugfs_buffers);
+
+static int __danger_safe_signal_status(struct seq_file *s, bool danger_status)
+{
+ struct mdss_data_type *mdata = (struct mdss_data_type *)s->private;
+ u32 status;
+ int i, j;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if (danger_status) {
+ seq_puts(s, "\nDanger signal status:\n");
+ status = readl_relaxed(mdata->mdp_base +
+ MDSS_MDP_DANGER_STATUS);
+ } else {
+ seq_puts(s, "\nSafe signal status:\n");
+ status = readl_relaxed(mdata->mdp_base +
+ MDSS_MDP_SAFE_STATUS);
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ seq_printf(s, "MDP : 0x%lx\n",
+ DANGER_SAFE_STATUS(status, MDP_DANGER_SAFE_BIT_OFFSET));
+
+ for (i = 0, j = VIG_DANGER_SAFE_BIT_OFFSET; i < mdata->nvig_pipes;
+ i++, j += 2)
+ seq_printf(s, "VIG%d : 0x%lx \t", i,
+ DANGER_SAFE_STATUS(status, j));
+ seq_puts(s, "\n");
+
+ for (i = 0, j = RGB_DANGER_SAFE_BIT_OFFSET; i < mdata->nrgb_pipes;
+ i++, j += 2)
+ seq_printf(s, "RGB%d : 0x%lx \t", i,
+ DANGER_SAFE_STATUS(status, j));
+ seq_puts(s, "\n");
+ for (i = 0, j = DMA_DANGER_SAFE_BIT_OFFSET; i < mdata->ndma_pipes;
+ i++, j += 2)
+ seq_printf(s, "DMA%d : 0x%lx \t", i,
+ DANGER_SAFE_STATUS(status, j));
+ seq_puts(s, "\n");
+
+ for (i = 0, j = CURSOR_DANGER_SAFE_BIT_OFFSET; i < mdata->ncursor_pipes;
+ i++, j += 2)
+ seq_printf(s, "CURSOR%d : 0x%lx \t", i,
+ DANGER_SAFE_STATUS(status, j));
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int mdss_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+ return __danger_safe_signal_status(s, true);
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdss_debugfs_danger_stats);
+
+static int mdss_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+ return __danger_safe_signal_status(s, false);
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdss_debugfs_safe_stats);
+
+static void __stats_ctl_dump(struct mdss_mdp_ctl *ctl, struct seq_file *s)
+{
+ if (!ctl->ref_cnt)
+ return;
+
+ if (ctl->intf_num) {
+ seq_printf(s, "intf%d: play: %08u \t",
+ ctl->intf_num, ctl->play_cnt);
+ seq_printf(s, "vsync: %08u \tunderrun: %08u\n",
+ ctl->vsync_cnt, ctl->underrun_cnt);
+ if (ctl->mfd) {
+ seq_printf(s, "user_bl: %08u \tmod_bl: %08u\n",
+ ctl->mfd->bl_level, ctl->mfd->bl_level_scaled);
+ }
+ } else {
+ seq_printf(s, "wb: \tmode=%x \tplay: %08u\n",
+ ctl->opmode, ctl->play_cnt);
+ }
+}
+
+static void __dump_stat(struct seq_file *s, char *ptypestr,
+ struct mdss_mdp_pipe *pipe_list, int count)
+{
+ struct mdss_mdp_pipe *pipe;
+ int i = 0, ndx = 0;
+ u32 rects_per_pipe = 1;
+
+ while (i < count) {
+ pipe = pipe_list + ndx;
+ rects_per_pipe = pipe->multirect.max_rects;
+
+ if (rects_per_pipe == 1)
+ seq_printf(s, "%s%d", ptypestr, i);
+ else
+ seq_printf(s, "%s%d.%d", ptypestr, i,
+ ndx % rects_per_pipe);
+
+ seq_printf(s, " : %08u\t", pipe->play_cnt);
+
+ if ((++ndx % rects_per_pipe) == 0)
+ i++;
+
+ if ((ndx % 4) == 0)
+ seq_puts(s, "\n");
+ }
+
+ if ((ndx % 4) != 0)
+ seq_puts(s, "\n");
+}
+
+static int mdss_debugfs_stats_show(struct seq_file *s, void *v)
+{
+ struct mdss_data_type *mdata = (struct mdss_data_type *)s->private;
+ int i;
+
+ seq_puts(s, "\nmdp:\n");
+
+ for (i = 0; i < mdata->nctl; i++)
+ __stats_ctl_dump(mdata->ctl_off + i, s);
+ seq_puts(s, "\n");
+
+ __dump_stat(s, "VIG", mdata->vig_pipes, mdata->nvig_pipes);
+ __dump_stat(s, "RGB", mdata->rgb_pipes, mdata->nrgb_pipes);
+ __dump_stat(s, "DMA", mdata->dma_pipes, mdata->ndma_pipes);
+ __dump_stat(s, "CURSOR", mdata->cursor_pipes, mdata->ncursor_pipes);
+
+ return 0;
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdss_debugfs_stats);
+
+int mdss_mdp_debugfs_init(struct mdss_data_type *mdata)
+{
+ struct mdss_debug_data *mdd;
+
+ if (!mdata)
+ return -ENODEV;
+
+ mdd = mdata->debug_inf.debug_data;
+ if (!mdd)
+ return -ENOENT;
+
+ debugfs_create_file("dump", 0644, mdd->root, mdata,
+ &mdss_debugfs_dump_fops);
+ debugfs_create_file("buffers", 0644, mdd->root, mdata,
+ &mdss_debugfs_buffers_fops);
+ debugfs_create_file("stat", 0644, mdd->root, mdata,
+ &mdss_debugfs_stats_fops);
+ debugfs_create_file("danger_stat", 0644, mdd->root, mdata,
+ &mdss_debugfs_danger_stats_fops);
+ debugfs_create_file("safe_stat", 0644, mdd->root, mdata,
+ &mdss_debugfs_safe_stats_fops);
+ debugfs_create_bool("serialize_wait4pp", 0644, mdd->root,
+ (bool *)&mdata->serialize_wait4pp);
+ debugfs_create_bool("wait4autorefresh", 0644, mdd->root,
+ (bool *)&mdata->wait4autorefresh);
+ debugfs_create_bool("enable_gate", 0644, mdd->root,
+ (bool *)&mdata->enable_gate);
+
+ debugfs_create_u32("color0", 0644, mdd->bordercolor,
+ (u32 *)&mdata->bcolor0);
+ debugfs_create_u32("color1", 0644, mdd->bordercolor,
+ (u32 *)&mdata->bcolor1);
+ debugfs_create_u32("color2", 0644, mdd->bordercolor,
+ (u32 *)&mdata->bcolor2);
+ debugfs_create_u32("ad_debugen", 0644, mdd->postproc,
+ (u32 *)&mdata->ad_debugen);
+
+ return 0;
+}
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.h b/drivers/video/fbdev/msm/mdss_mdp_debug.h
new file mode 100644
index 0000000..fe7ff09
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2014-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_DEBUG_H
+#define MDSS_MDP_DEBUG_H
+
+#include <linux/msm_mdp.h>
+#include <linux/stringify.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+
+#define MDP_DANGER_SAFE_BIT_OFFSET 0
+#define VIG_DANGER_SAFE_BIT_OFFSET 4
+#define RGB_DANGER_SAFE_BIT_OFFSET 12
+#define DMA_DANGER_SAFE_BIT_OFFSET 20
+#define CURSOR_DANGER_SAFE_BIT_OFFSET 24
+
+#define DANGER_SAFE_STATUS(X, Y) (((X) & (BIT(Y) | BIT((Y)+1))) >> (Y))
+
+static inline const char *mdss_mdp_pipetype2str(u32 ptype)
+{
+ static const char * const strings[] = {
+#define PIPE_TYPE(t)[MDSS_MDP_PIPE_TYPE_ ## t] = __stringify(t)
+ PIPE_TYPE(VIG),
+ PIPE_TYPE(RGB),
+ PIPE_TYPE(DMA),
+ PIPE_TYPE(CURSOR),
+#undef PIPE_TYPE
+ };
+
+ if (ptype >= ARRAY_SIZE(strings) || !strings[ptype])
+ return "UNKNOWN";
+
+ return strings[ptype];
+}
+
+static inline const char *mdss_mdp_format2str(u32 format)
+{
+ static const char * const strings[] = {
+#define FORMAT_NAME(f)[MDP_ ## f] = __stringify(f)
+ FORMAT_NAME(RGB_565),
+ FORMAT_NAME(BGR_565),
+ FORMAT_NAME(RGB_888),
+ FORMAT_NAME(BGR_888),
+ FORMAT_NAME(RGBX_8888),
+ FORMAT_NAME(RGBA_8888),
+ FORMAT_NAME(ARGB_8888),
+ FORMAT_NAME(XRGB_8888),
+ FORMAT_NAME(BGRA_8888),
+ FORMAT_NAME(BGRX_8888),
+ FORMAT_NAME(Y_CBCR_H2V2_VENUS),
+ FORMAT_NAME(Y_CBCR_H2V2),
+ FORMAT_NAME(Y_CRCB_H2V2),
+ FORMAT_NAME(Y_CB_CR_H2V2),
+ FORMAT_NAME(Y_CR_CB_H2V2),
+ FORMAT_NAME(Y_CR_CB_GH2V2),
+ FORMAT_NAME(YCBYCR_H2V1),
+ FORMAT_NAME(YCRYCB_H2V1),
+ FORMAT_NAME(RGBA_8888_UBWC),
+ FORMAT_NAME(RGBX_8888_UBWC),
+ FORMAT_NAME(RGB_565_UBWC),
+ FORMAT_NAME(Y_CBCR_H2V2_UBWC)
+#undef FORMAT_NAME
+ };
+
+ if (format >= ARRAY_SIZE(strings) || !strings[format])
+ return "UNKNOWN";
+
+ return strings[format];
+}
+void mdss_mdp_dump(struct mdss_data_type *mdata);
+void mdss_mdp_hw_rev_debug_caps_init(struct mdss_data_type *mdata);
+
+
+#ifdef CONFIG_DEBUG_FS
+int mdss_mdp_debugfs_init(struct mdss_data_type *mdata);
+#else
+static inline int mdss_mdp_debugfs_init(struct mdss_data_type *mdata)
+{
+ return 0;
+}
+#endif
+
+#endif /* MDSS_MDP_DEBUG_H */
diff --git a/drivers/video/fbdev/msm/mdss_mdp_formats.h b/drivers/video/fbdev/msm/mdss_mdp_formats.h
new file mode 100644
index 0000000..cdb9547
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_formats.h
@@ -0,0 +1,504 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_FORMATS_H
+#define MDSS_MDP_FORMATS_H
+
+#include <linux/msm_mdp.h>
+
+#include "mdss_mdp.h"
+
+ /*
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_4BIT,
+ COLOR_5BIT,
+ COLOR_6BIT,
+ COLOR_8BIT,
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+};
+
+#define UBWC_META_MACRO_W_H 16
+#define UBWC_META_BLOCK_SIZE 256
+
+#define FMT_RGB_565(fmt, fetch_type, flag_arg, e0, e1, e2) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = 0, \
+ .unpack_count = 3, \
+ .bpp = 2, \
+ .fetch_mode = (fetch_type), \
+ .element = { (e0), (e1), (e2) }, \
+ .bits = { \
+ [C2_R_Cr] = COLOR_5BIT, \
+ [C0_G_Y] = COLOR_6BIT, \
+ [C1_B_Cb] = COLOR_5BIT, \
+ }, \
+ }
+
+#define FMT_RGB_888(fmt, fetch_type, flag_arg, e0, e1, e2) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = 0, \
+ .unpack_count = 3, \
+ .bpp = 3, \
+ .fetch_mode = (fetch_type), \
+ .element = { (e0), (e1), (e2) }, \
+ .bits = { \
+ [C2_R_Cr] = COLOR_8BIT, \
+ [C0_G_Y] = COLOR_8BIT, \
+ [C1_B_Cb] = COLOR_8BIT, \
+ }, \
+ }
+
+#define FMT_RGB_8888(fmt, fetch_type, flag_arg, \
+ alpha_en, e0, e1, e2, e3) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = (alpha_en), \
+ .unpack_count = 4, \
+ .bpp = 4, \
+ .fetch_mode = (fetch_type), \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { \
+ [C3_ALPHA] = COLOR_8BIT, \
+ [C2_R_Cr] = COLOR_8BIT, \
+ [C0_G_Y] = COLOR_8BIT, \
+ [C1_B_Cb] = COLOR_8BIT, \
+ }, \
+ }
+
+#define FMT_YUV_COMMON(fmt) \
+ .format = (fmt), \
+ .is_yuv = 1, \
+ .bits = { \
+ [C2_R_Cr] = COLOR_8BIT, \
+ [C0_G_Y] = COLOR_8BIT, \
+ [C1_B_Cb] = COLOR_8BIT, \
+ }, \
+ .alpha_enable = 0, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0
+
+#define FMT_YUV_PSEUDO(fmt, fetch_type, samp, \
+ flag_arg, e0, e1) \
+ { \
+ FMT_YUV_COMMON(fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR, \
+ .chroma_sample = samp, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = (fetch_type), \
+ .element = { (e0), (e1) }, \
+ }
+
+#define FMT_YUV_PLANR(fmt, fetch_type, samp, \
+ flag_arg, e0, e1) \
+ { \
+ FMT_YUV_COMMON(fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = MDSS_MDP_PLANE_PLANAR, \
+ .chroma_sample = samp, \
+ .bpp = 1, \
+ .unpack_count = 1, \
+ .fetch_mode = (fetch_type), \
+ .element = { (e0), (e1) } \
+ }
+
+#define FMT_RGB_1555(fmt, alpha_en, flag_arg, e0, e1, e2, e3) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = (alpha_en), \
+ .unpack_count = 4, \
+ .bpp = 2, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .fetch_mode = MDSS_MDP_FETCH_LINEAR, \
+ .bits = { \
+ [C3_ALPHA] = COLOR_ALPHA_1BIT, \
+ [C2_R_Cr] = COLOR_5BIT, \
+ [C0_G_Y] = COLOR_5BIT, \
+ [C1_B_Cb] = COLOR_5BIT, \
+ }, \
+ }
+
+#define FMT_RGB_4444(fmt, alpha_en, flag_arg, e0, e1, e2, e3) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = (alpha_en), \
+ .unpack_count = 4, \
+ .bpp = 2, \
+ .fetch_mode = MDSS_MDP_FETCH_LINEAR, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { \
+ [C3_ALPHA] = COLOR_ALPHA_4BIT, \
+ [C2_R_Cr] = COLOR_4BIT, \
+ [C0_G_Y] = COLOR_4BIT, \
+ [C1_B_Cb] = COLOR_4BIT, \
+ }, \
+ }
+
+#define FMT_RGB_2101010(fmt, fetch_type, flag_arg, \
+ alpha_en, e0, e1, e2, e3) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = (alpha_en), \
+ .unpack_count = 4, \
+ .bpp = 4, \
+ .fetch_mode = (fetch_type), \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { \
+ [C3_ALPHA] = COLOR_8BIT, \
+ [C2_R_Cr] = COLOR_8BIT, \
+ [C0_G_Y] = COLOR_8BIT, \
+ [C1_B_Cb] = COLOR_8BIT, \
+ }, \
+ .unpack_dx_format = 1, \
+ }
+
+#define FMT_YUV_PSEUDO_10(fmt, fetch_type, samp, \
+ flag_arg, e0, e1, unpack_type, unpack_align) \
+ { \
+ FMT_YUV_COMMON(fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR, \
+ .chroma_sample = samp, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = (fetch_type), \
+ .element = { (e0), (e1) }, \
+ .unpack_dx_format = 1, \
+ .unpack_tight = unpack_type, \
+ .unpack_align_msb = unpack_align, \
+ }
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static struct mdss_mdp_format_params_ubwc mdss_mdp_format_ubwc_map[] = {
+ {
+ .mdp_format = FMT_RGB_565(MDP_RGB_565_UBWC,
+ MDSS_MDP_FETCH_UBWC,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 16,
+ },
+ },
+ {
+ .mdp_format = FMT_RGB_8888(MDP_RGBA_8888_UBWC,
+ MDSS_MDP_FETCH_UBWC,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 1,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 16,
+ },
+ },
+ {
+ .mdp_format = FMT_RGB_8888(MDP_RGBX_8888_UBWC,
+ MDSS_MDP_FETCH_UBWC,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 0,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 16,
+ },
+ },
+ {
+ .mdp_format = FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V2_UBWC,
+ MDSS_MDP_FETCH_UBWC, MDSS_MDP_CHROMA_420,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ C1_B_Cb, C2_R_Cr),
+ .micro = {
+ .tile_height = 8,
+ .tile_width = 32,
+ },
+ },
+ {
+ .mdp_format = FMT_RGB_2101010(MDP_RGBA_1010102_UBWC,
+ MDSS_MDP_FETCH_UBWC,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 16,
+ },
+ },
+ {
+ .mdp_format = FMT_RGB_2101010(MDP_RGBX_1010102_UBWC,
+ MDSS_MDP_FETCH_UBWC,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 16,
+ },
+ },
+ {
+ .mdp_format = FMT_YUV_PSEUDO_10(MDP_Y_CBCR_H2V2_TP10_UBWC,
+ MDSS_MDP_FETCH_UBWC, MDSS_MDP_CHROMA_420,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ C1_B_Cb, C2_R_Cr, 1, 0),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 48,
+ },
+ },
+};
+
+static struct mdss_mdp_format_params mdss_mdp_format_map[] = {
+ FMT_RGB_565(MDP_RGB_565, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_565(MDP_BGR_565, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_565(MDP_RGB_565_TILE, MDSS_MDP_FETCH_TILE, VALID_ROT_WB_FORMAT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_565(MDP_BGR_565_TILE, MDSS_MDP_FETCH_TILE, VALID_ROT_WB_FORMAT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_888(MDP_RGB_888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_888(MDP_BGR_888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C0_G_Y, C2_R_Cr),
+
+ FMT_RGB_8888(MDP_XRGB_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT | VALID_MDP_CURSOR_FORMAT, 0,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_8888(MDP_ARGB_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_8888(MDP_ABGR_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, 1,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_8888(MDP_RGBA_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT, 1,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ FMT_RGB_8888(MDP_RGBX_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, 0, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ C3_ALPHA),
+ FMT_RGB_8888(MDP_BGRA_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+ FMT_RGB_8888(MDP_BGRX_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, 0, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ C3_ALPHA),
+ FMT_RGB_8888(MDP_XBGR_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, 0, C3_ALPHA, C1_B_Cb, C0_G_Y,
+ C2_R_Cr),
+ FMT_RGB_8888(MDP_RGBA_8888_TILE, MDSS_MDP_FETCH_TILE,
+ VALID_ROT_WB_FORMAT, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ FMT_RGB_8888(MDP_ARGB_8888_TILE, MDSS_MDP_FETCH_TILE,
+ VALID_ROT_WB_FORMAT, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_8888(MDP_ABGR_8888_TILE, MDSS_MDP_FETCH_TILE,
+ VALID_ROT_WB_FORMAT, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_8888(MDP_BGRA_8888_TILE, MDSS_MDP_FETCH_TILE,
+ VALID_ROT_WB_FORMAT, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+ FMT_RGB_8888(MDP_RGBX_8888_TILE, MDSS_MDP_FETCH_TILE,
+ VALID_ROT_WB_FORMAT, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ FMT_RGB_8888(MDP_XRGB_8888_TILE, MDSS_MDP_FETCH_TILE,
+ VALID_ROT_WB_FORMAT, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_8888(MDP_XBGR_8888_TILE, MDSS_MDP_FETCH_TILE,
+ VALID_ROT_WB_FORMAT, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_8888(MDP_BGRX_8888_TILE, MDSS_MDP_FETCH_TILE,
+ VALID_ROT_WB_FORMAT, 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+
+ FMT_YUV_PSEUDO(MDP_Y_CRCB_H1V1, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_RGB, 0, C2_R_Cr, C1_B_Cb),
+ FMT_YUV_PSEUDO(MDP_Y_CBCR_H1V1, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_RGB, 0, C1_B_Cb, C2_R_Cr),
+ FMT_YUV_PSEUDO(MDP_Y_CRCB_H2V1, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_H2V1, VALID_ROT_WB_FORMAT, C2_R_Cr, C1_B_Cb),
+ FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V1, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_H2V1, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+ FMT_YUV_PSEUDO(MDP_Y_CRCB_H1V2, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_H1V2, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C1_B_Cb),
+ FMT_YUV_PSEUDO(MDP_Y_CBCR_H1V2, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_H1V2, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+ FMT_YUV_PSEUDO(MDP_Y_CRCB_H2V2, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C1_B_Cb),
+ FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V2, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+ FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V2_VENUS, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+ FMT_YUV_PSEUDO(MDP_Y_CRCB_H2V2_VENUS, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C1_B_Cb),
+
+ FMT_YUV_PLANR(MDP_Y_CB_CR_H2V2, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C1_B_Cb),
+ FMT_YUV_PLANR(MDP_Y_CR_CB_H2V2, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+ FMT_YUV_PLANR(MDP_Y_CR_CB_GH2V2, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+
+ {
+ FMT_YUV_COMMON(MDP_YCBCR_H1V1),
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+ .chroma_sample = MDSS_MDP_CHROMA_RGB,
+ .unpack_count = 3,
+ .bpp = 3,
+ .fetch_mode = MDSS_MDP_FETCH_LINEAR,
+ .element = { C2_R_Cr, C1_B_Cb, C0_G_Y },
+ },
+ {
+ FMT_YUV_COMMON(MDP_YCRCB_H1V1),
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+ .chroma_sample = MDSS_MDP_CHROMA_RGB,
+ .unpack_count = 3,
+ .bpp = 3,
+ .fetch_mode = MDSS_MDP_FETCH_LINEAR,
+ .element = { C1_B_Cb, C2_R_Cr, C0_G_Y },
+ },
+ {
+ FMT_YUV_COMMON(MDP_YCRYCB_H2V1),
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+ .chroma_sample = MDSS_MDP_CHROMA_H2V1,
+ .unpack_count = 4,
+ .bpp = 2,
+ .fetch_mode = MDSS_MDP_FETCH_LINEAR,
+ .element = { C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y },
+ },
+ {
+ FMT_YUV_COMMON(MDP_YCBYCR_H2V1),
+ .flag = VALID_MDP_WB_INTF_FORMAT,
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+ .chroma_sample = MDSS_MDP_CHROMA_H2V1,
+ .unpack_count = 4,
+ .bpp = 2,
+ .fetch_mode = MDSS_MDP_FETCH_LINEAR,
+ .element = { C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y },
+ },
+ {
+ FMT_YUV_COMMON(MDP_CRYCBY_H2V1),
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+ .chroma_sample = MDSS_MDP_CHROMA_H2V1,
+ .unpack_count = 4,
+ .bpp = 2,
+ .fetch_mode = MDSS_MDP_FETCH_LINEAR,
+ .element = { C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr },
+ },
+ {
+ FMT_YUV_COMMON(MDP_CBYCRY_H2V1),
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+ .chroma_sample = MDSS_MDP_CHROMA_H2V1,
+ .unpack_count = 4,
+ .bpp = 2,
+ .fetch_mode = MDSS_MDP_FETCH_LINEAR,
+ .element = { C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y},
+ },
+ FMT_RGB_1555(MDP_RGBA_5551, 1, VALID_ROT_WB_FORMAT |
+ VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_1555(MDP_ARGB_1555, 1, VALID_ROT_WB_FORMAT |
+ VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+ FMT_RGB_1555(MDP_ABGR_1555, 1, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ FMT_RGB_1555(MDP_BGRA_5551, 1, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_1555(MDP_BGRX_5551, 0, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_1555(MDP_RGBX_5551, 0, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_1555(MDP_XBGR_1555, 0, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ FMT_RGB_1555(MDP_XRGB_1555, 0, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+ FMT_RGB_4444(MDP_ABGR_4444, 1, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ FMT_RGB_4444(MDP_BGRA_4444, 1, VALID_ROT_WB_FORMAT |
+ VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_4444(MDP_BGRX_4444, 0, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_4444(MDP_RGBX_4444, 0, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_4444(MDP_XBGR_4444, 0, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ FMT_RGB_4444(MDP_XRGB_4444, 0, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+
+ FMT_RGB_4444(MDP_RGBA_4444, 1, VALID_ROT_WB_FORMAT |
+ VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_4444(MDP_ARGB_4444, 1, VALID_ROT_WB_FORMAT |
+ VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+
+ FMT_RGB_2101010(MDP_RGBA_1010102, MDSS_MDP_FETCH_LINEAR,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 1,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ FMT_RGB_2101010(MDP_ARGB_2101010, MDSS_MDP_FETCH_LINEAR, 0, 1,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_2101010(MDP_RGBX_1010102, MDSS_MDP_FETCH_LINEAR,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 0,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+ FMT_RGB_2101010(MDP_XRGB_2101010, MDSS_MDP_FETCH_LINEAR, 0, 0,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+ FMT_RGB_2101010(MDP_BGRA_1010102, MDSS_MDP_FETCH_LINEAR,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 1,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+ FMT_RGB_2101010(MDP_ABGR_2101010, MDSS_MDP_FETCH_LINEAR, 0, 1,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_2101010(MDP_BGRX_1010102, MDSS_MDP_FETCH_LINEAR,
+ VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 0,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+ FMT_RGB_2101010(MDP_XBGR_2101010, MDSS_MDP_FETCH_LINEAR, 0, 0,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+
+ FMT_YUV_PSEUDO_10(MDP_Y_CBCR_H2V2_P010, MDSS_MDP_FETCH_LINEAR,
+ MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT,
+ C1_B_Cb, C2_R_Cr, 0, 1),
+
+};
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
new file mode 100644
index 0000000..54ceb19
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -0,0 +1,844 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_HWIO_H
+#define MDSS_MDP_HWIO_H
+
+#include <linux/bitops.h>
+
+/*
+ * struct mdss_mdp_hwio_cfg - used to define a register bitfield
+ * @start: bitfield offset start from lsb
+ * @len: number of lsb bits that can be taken from field value
+ * @shift: number of lsb bits to truncate from field value
+ */
+struct mdss_mdp_hwio_cfg {
+ u32 start, len, shift;
+};
+
+static inline u32 mdss_mdp_hwio_mask(struct mdss_mdp_hwio_cfg *cfg, u32 val)
+{
+ u32 mask = (1 << cfg->len) - 1;
+
+ return ((val >> cfg->shift) & mask) << cfg->start;
+}
+
+#define IGC_LUT_ENTRIES 256
+#define GC_LUT_SEGMENTS 16
+#define ENHIST_LUT_ENTRIES 256
+#define HIST_V_SIZE 256
+
+/* QSEED3 LUT sizes */
+#define DIR_LUT_IDX 1
+#define DIR_LUT_COEFFS 200
+#define CIR_LUT_IDX 9
+#define CIR_LUT_COEFFS 60
+#define SEP_LUT_IDX 10
+#define SEP_LUT_COEFFS 60
+
+
+#define MDSS_MDP_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+#define MDSS_REG_HW_VERSION 0x0
+#define MDSS_REG_HW_INTR_STATUS 0x10
+
+#define MDSS_INTR_MDP BIT(0)
+#define MDSS_INTR_DSI0 BIT(4)
+#define MDSS_INTR_DSI1 BIT(5)
+#define MDSS_INTR_HDMI BIT(8)
+#define MDSS_INTR_EDP BIT(12)
+
+#define MDSS_MDP_REG_HW_VERSION 0x0
+#define MDSS_MDP_REG_DISP_INTF_SEL 0x00004
+#define MDSS_MDP_REG_INTR2_EN 0x00008
+#define MDSS_MDP_REG_INTR2_STATUS 0x0000C
+#define MDSS_MDP_REG_INTR2_CLEAR 0x0002C
+#define MDSS_MDP_REG_INTR_EN 0x00010
+#define MDSS_MDP_REG_INTR_STATUS 0x00014
+#define MDSS_MDP_REG_INTR_CLEAR 0x00018
+#define MDSS_MDP_REG_HIST_INTR_EN 0x0001C
+#define MDSS_MDP_REG_HIST_INTR_STATUS 0x00020
+#define MDSS_MDP_REG_HIST_INTR_CLEAR 0x00024
+#define MMSS_MDP_MDP_SSPP_SPARE_0 0x00028
+
+#define MMSS_MDP_PANIC_ROBUST_CTRL 0x00178
+#define MMSS_MDP_PANIC_LUT0 0x0017C
+#define MMSS_MDP_PANIC_LUT1 0x00180
+#define MMSS_MDP_ROBUST_LUT 0x00184
+#define MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL 0x00190
+
+#define MDSS_MDP_REG_VIDEO_INTF_UNDERFLOW_CTL 0x002E0
+#define MDSS_MDP_REG_SPLIT_DISPLAY_EN 0x002F4
+#define MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x002F8
+#define MDSS_MDP_DANGER_STATUS 0x00360
+#define MDSS_MDP_SAFE_STATUS 0x00364
+#define MDSS_MDP_REG_SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x003F0
+#define MDSS_MDP_REG_DCE_SEL 0x00450
+
+#define MDSS_INTF_DSI 0x1
+#define MDSS_INTF_HDMI 0x3
+#define MDSS_INTF_LCDC 0x5
+#define MDSS_INTF_EDP 0x9
+
+#define MDSS_MDP_INTR_WB_0_DONE BIT(0)
+#define MDSS_MDP_INTR_WB_1_DONE BIT(1)
+#define MDSS_MDP_INTR_WB_2_DONE BIT(4)
+#define MDSS_MDP_INTR_PING_PONG_0_DONE BIT(8)
+#define MDSS_MDP_INTR_PING_PONG_1_DONE BIT(9)
+#define MDSS_MDP_INTR_PING_PONG_2_DONE BIT(10)
+#define MDSS_MDP_INTR_PING_PONG_3_DONE BIT(11)
+#define MDSS_MDP_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define MDSS_MDP_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define MDSS_MDP_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define MDSS_MDP_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define MDSS_MDP_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define MDSS_MDP_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define MDSS_MDP_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define MDSS_MDP_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define MDSS_MDP_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define MDSS_MDP_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define MDSS_MDP_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define MDSS_MDP_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+#define MDSS_MDP_INTR_INTF_0_UNDERRUN BIT(24)
+#define MDSS_MDP_INTR_INTF_0_VSYNC BIT(25)
+#define MDSS_MDP_INTR_INTF_1_UNDERRUN BIT(26)
+#define MDSS_MDP_INTR_INTF_1_VSYNC BIT(27)
+#define MDSS_MDP_INTR_INTF_2_UNDERRUN BIT(28)
+#define MDSS_MDP_INTR_INTF_2_VSYNC BIT(29)
+#define MDSS_MDP_INTR_INTF_3_UNDERRUN BIT(30)
+#define MDSS_MDP_INTR_INTF_3_VSYNC BIT(31)
+
+#define MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW BIT(14)
+#define MDSS_MDP_INTR2_PING_PONG_3_CWB_OVERFLOW BIT(15)
+
+#define MDSS_MDP_HIST_INTR_VIG_0_DONE BIT(0)
+#define MDSS_MDP_HIST_INTR_VIG_0_RESET_DONE BIT(1)
+#define MDSS_MDP_HIST_INTR_VIG_1_DONE BIT(4)
+#define MDSS_MDP_HIST_INTR_VIG_1_RESET_DONE BIT(5)
+#define MDSS_MDP_HIST_INTR_VIG_2_DONE BIT(8)
+#define MDSS_MDP_HIST_INTR_VIG_2_RESET_DONE BIT(9)
+#define MDSS_MDP_HIST_INTR_VIG_3_DONE BIT(10)
+#define MDSS_MDP_HIST_INTR_VIG_3_RESET_DONE BIT(11)
+#define MDSS_MDP_HIST_INTR_DSPP_0_DONE BIT(12)
+#define MDSS_MDP_HIST_INTR_DSPP_0_RESET_DONE BIT(13)
+#define MDSS_MDP_HIST_INTR_DSPP_1_DONE BIT(16)
+#define MDSS_MDP_HIST_INTR_DSPP_1_RESET_DONE BIT(17)
+#define MDSS_MDP_HIST_INTR_DSPP_2_DONE BIT(20)
+#define MDSS_MDP_HIST_INTR_DSPP_2_RESET_DONE BIT(21)
+#define MDSS_MDP_HIST_INTR_DSPP_3_DONE BIT(22)
+#define MDSS_MDP_HIST_INTR_DSPP_3_RESET_DONE BIT(23)
+
+enum mdss_mdp_intr_type {
+ MDSS_MDP_IRQ_TYPE_WB_ROT_COMP,
+ MDSS_MDP_IRQ_TYPE_WB_WFD_COMP,
+ MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+ MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+ MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+ MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+ MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+ MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW,
+};
+
+#define MDSS_MDP_INTF_INTR_PROG_LINE BIT(8)
+
+enum mdss_mdp_intf_intr_type {
+ MDSS_MDP_INTF_IRQ_PROG_LINE = 8,
+};
+
+#define MDSS_MDP_REG_IGC_VIG_BASE 0x200
+#define MDSS_MDP_REG_IGC_RGB_BASE 0x210
+#define MDSS_MDP_REG_IGC_DMA_BASE 0x220
+#define MDSS_MDP_REG_IGC_DSPP_BASE 0x300
+
+enum mdss_mdp_ctl_index {
+ MDSS_MDP_CTL0,
+ MDSS_MDP_CTL1,
+ MDSS_MDP_CTL2,
+ MDSS_MDP_CTL3,
+ MDSS_MDP_CTL4,
+ MDSS_MDP_CTL5,
+ MDSS_MDP_MAX_CTL
+};
+
+
+#define MDSS_MDP_REG_CTL_LAYER_EXTN_OFFSET 0x40
+#define MDSS_MDP_REG_CTL_LAYER_EXTN2_OFFSET 0x70
+#define MDSS_MDP_CTL_X_LAYER_5 0x24
+
+/* mixer 5 has different offset than others */
+#define MDSS_MDP_REG_CTL_LAYER(lm) \
+ (((lm) == 5) ? MDSS_MDP_CTL_X_LAYER_5 : ((lm) * 0x004))
+
+#define MDSS_MDP_REG_CTL_LAYER_EXTN(lm) \
+ (MDSS_MDP_REG_CTL_LAYER_EXTN_OFFSET + ((lm) * 0x004))
+
+#define MDSS_MDP_REG_CTL_LAYER_EXTN2(lm) \
+ (MDSS_MDP_REG_CTL_LAYER_EXTN2_OFFSET + ((lm) * 0x004))
+
+#define MDSS_MDP_REG_CTL_TOP 0x014
+#define MDSS_MDP_REG_CTL_FLUSH 0x018
+#define MDSS_MDP_REG_CTL_START 0x01C
+#define MDSS_MDP_REG_CTL_PACK_3D 0x020
+#define MDSS_MDP_REG_CTL_SW_RESET 0x030
+
+#define MDSS_MDP_CTL_OP_VIDEO_MODE (0 << 17)
+#define MDSS_MDP_CTL_OP_CMD_MODE (1 << 17)
+
+#define MDSS_MDP_CTL_OP_ROT0_MODE 0x1
+#define MDSS_MDP_CTL_OP_ROT1_MODE 0x2
+#define MDSS_MDP_CTL_OP_WB0_MODE 0x3
+#define MDSS_MDP_CTL_OP_WB1_MODE 0x4
+#define MDSS_MDP_CTL_OP_WFD_MODE 0x5
+
+#define MDSS_MDP_CTL_OP_PACK_3D_ENABLE BIT(19)
+#define MDSS_MDP_CTL_OP_PACK_3D_FRAME_INT (0 << 20)
+#define MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT (1 << 20)
+#define MDSS_MDP_CTL_OP_PACK_3D_V_ROW_INT (2 << 20)
+#define MDSS_MDP_CTL_OP_PACK_3D_COL_INT (3 << 20)
+
+enum mdss_mdp_sspp_index {
+ MDSS_MDP_SSPP_VIG0,
+ MDSS_MDP_SSPP_VIG1,
+ MDSS_MDP_SSPP_VIG2,
+ MDSS_MDP_SSPP_RGB0,
+ MDSS_MDP_SSPP_RGB1,
+ MDSS_MDP_SSPP_RGB2,
+ MDSS_MDP_SSPP_DMA0,
+ MDSS_MDP_SSPP_DMA1,
+ MDSS_MDP_SSPP_VIG3,
+ MDSS_MDP_SSPP_RGB3,
+ MDSS_MDP_SSPP_CURSOR0,
+ MDSS_MDP_SSPP_CURSOR1,
+ MDSS_MDP_SSPP_DMA2,
+ MDSS_MDP_SSPP_DMA3,
+ MDSS_MDP_MAX_SSPP,
+};
+
+enum mdss_mdp_sspp_fetch_type {
+ MDSS_MDP_PLANE_INTERLEAVED,
+ MDSS_MDP_PLANE_PLANAR,
+ MDSS_MDP_PLANE_PSEUDO_PLANAR,
+};
+
+enum mdss_mdp_sspp_chroma_samp_type {
+ MDSS_MDP_CHROMA_RGB,
+ MDSS_MDP_CHROMA_H2V1,
+ MDSS_MDP_CHROMA_H1V2,
+ MDSS_MDP_CHROMA_420
+};
+
+#define MDSS_MDP_REG_SSPP_SRC_SIZE 0x000
+#define MDSS_MDP_REG_SSPP_SRC_IMG_SIZE 0x004
+#define MDSS_MDP_REG_SSPP_SRC_XY 0x008
+#define MDSS_MDP_REG_SSPP_OUT_SIZE 0x00C
+#define MDSS_MDP_REG_SSPP_OUT_XY 0x010
+#define MDSS_MDP_REG_SSPP_SRC0_ADDR 0x014
+#define MDSS_MDP_REG_SSPP_SRC1_ADDR 0x018
+#define MDSS_MDP_REG_SSPP_SRC2_ADDR 0x01C
+#define MDSS_MDP_REG_SSPP_SRC3_ADDR 0x020
+#define MDSS_MDP_REG_SSPP_SRC_YSTRIDE0 0x024
+#define MDSS_MDP_REG_SSPP_SRC_YSTRIDE1 0x028
+#define MDSS_MDP_REG_SSPP_STILE_FRAME_SIZE 0x02C
+#define MDSS_MDP_REG_SSPP_SRC_FORMAT 0x030
+#define MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN 0x034
+#define MDSS_MDP_REG_SSPP_SRC_OP_MODE 0x038
+#define MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR 0x03C
+#define MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_0 0x050
+#define MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_1 0x054
+#define MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_2 0x058
+#define MDSS_MDP_REG_SSPP_DANGER_LUT 0x060
+#define MDSS_MDP_REG_SSPP_SAFE_LUT 0x064
+#define MDSS_MDP_REG_SSPP_CREQ_LUT 0x068
+#define MDSS_MDP_REG_SSPP_QOS_CTRL 0x06C
+#define MDSS_MDP_REG_SSPP_CDP_CTRL 0x134
+#define MDSS_MDP_REG_SSPP_UBWC_ERROR_STATUS 0x138
+#define MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER 0x130
+#define MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER_PREFILL 0x150
+#define MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
+
+#define MDSS_MDP_REG_SSPP_MULTI_REC_OP_MODE 0x170
+#define MDSS_MDP_REG_SSPP_OUT_SIZE_REC1 0x160
+#define MDSS_MDP_REG_SSPP_OUT_XY_REC1 0x164
+#define MDSS_MDP_REG_SSPP_SRC_XY_REC1 0x168
+#define MDSS_MDP_REG_SSPP_SRC_SIZE_REC1 0x16C
+#define MDSS_MDP_REG_SSPP_SRC_FORMAT_REC1 0x174
+#define MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN_REC1 0x178
+#define MDSS_MDP_REG_SSPP_SRC_OP_MODE_REC1 0x17C
+#define MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR_REC1 0x180
+
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR 0x03C
+#define MDSS_MDP_REG_SSPP_FETCH_CONFIG 0x048
+#define MDSS_MDP_REG_SSPP_VC1_RANGE 0x04C
+#define MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS 0x070
+
+#define MDSS_MDP_REG_SSPP_CURRENT_SRC0_ADDR 0x0A4
+#define MDSS_MDP_REG_SSPP_CURRENT_SRC1_ADDR 0x0A8
+#define MDSS_MDP_REG_SSPP_CURRENT_SRC2_ADDR 0x0AC
+#define MDSS_MDP_REG_SSPP_CURRENT_SRC3_ADDR 0x0B0
+#define MDSS_MDP_REG_SSPP_DECIMATION_CONFIG 0x0B4
+#define MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_LR 0x100
+#define MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_TB 0x104
+#define MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+
+#define MDSS_MDP_REG_VIG_OP_MODE 0x200
+#define MDSS_MDP_REG_VIG_QSEED2_CONFIG 0x204
+#define MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX 0x210
+#define MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY 0x214
+#define MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX 0x218
+#define MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY 0x21C
+#define MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX 0x220
+#define MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY 0x224
+#define MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX 0x228
+#define MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY 0x22C
+#define MDSS_MDP_REG_VIG_QSEED2_SHARP 0x230
+#define MDSS_MDP_REG_VIG_MEM_COL_BASE 0x288
+#define MDSS_MDP_REG_VIG_PA_BASE 0x310
+
+/* QSEED3 registers shared by VIG and Destination Scaler */
+#define MDSS_MDP_REG_SCALER_HW_VERSION 0x00
+#define MDSS_MDP_REG_SCALER_OP_MODE 0x04
+#define MDSS_MDP_REG_SCALER_RGB2Y_COEFF 0x08
+#define MDSS_MDP_REG_SCALER_PHASE_INIT 0x0C
+#define MDSS_MDP_REG_SCALER_PHASE_STEP_Y_H 0x10
+#define MDSS_MDP_REG_SCALER_PHASE_STEP_Y_V 0x14
+#define MDSS_MDP_REG_SCALER_PHASE_STEP_UV_H 0x18
+#define MDSS_MDP_REG_SCALER_PHASE_STEP_UV_V 0x1C
+#define MDSS_MDP_REG_SCALER_PRELOAD 0x20
+#define MDSS_MDP_REG_SCALER_DE_SHARPEN 0x24
+#define MDSS_MDP_REG_SCALER_DE_SHARPEN_CTL 0x28
+#define MDSS_MDP_REG_SCALER_DE_SHAPE_CTL 0x2C
+#define MDSS_MDP_REG_SCALER_DE_THRESHOLD 0x30
+#define MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_0 0x34
+#define MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_1 0x38
+#define MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_2 0x3C
+#define MDSS_MDP_REG_SCALER_SRC_SIZE_Y_RGB_A 0x40
+#define MDSS_MDP_REG_SCALER_SRC_SIZE_UV 0x44
+#define MDSS_MDP_REG_SCALER_DST_SIZE 0x48
+#define MDSS_MDP_REG_SCALER_COEF_LUT_CTRL 0x4C
+#define MDSS_MDP_REG_SCALER_BUFFER_CTRL 0x50
+#define MDSS_MDP_REG_SCALER_CLK_CTRL0 0x54
+#define MDSS_MDP_REG_SCALER_CLK_CTRL1 0x58
+#define MDSS_MDP_REG_SCALER_CLK_STATUS 0x5C
+#define MDSS_MDP_REG_SCALER_MISR_CTRL 0x70
+#define MDSS_MDP_REG_SCALER_MISR_SIGNATURE_0 0x74
+#define MDSS_MDP_REG_SCALER_MISR_SIGNATURE_1 0x78
+
+#define SCALER_EN BIT(0)
+#define SCALER_DIR_EN BIT(4)
+#define SCALER_DE_EN BIT(8)
+#define SCALER_ALPHA_EN BIT(10)
+#define SCALER_COLOR_SPACE 12
+#define SCALER_BIT_WIDTH 14
+#define Y_FILTER_CFG 16
+#define UV_FILTER_CFG 24
+#define ALPHA_FILTER_CFG 30
+#define SCALER_BLEND_CFG 31
+
+#define PHASE_BITS 0x3F
+#define PHASE_STEP_BITS 0xFFFFFF
+#define PRELOAD_BITS 0x7F
+
+#define Y_PHASE_INIT_H 0
+#define Y_PHASE_INIT_V 8
+#define UV_PHASE_INIT_H 16
+#define UV_PHASE_INIT_V 24
+#define Y_PRELOAD_H 0
+#define Y_PRELOAD_V 8
+#define UV_PRELOAD_H 16
+#define UV_PRELOAD_V 24
+/* supported filters */
+#define EDGE_DIRECTED_2D 0x0
+#define CIRCULAR_2D 0x1
+#define SEPERABLE_1D 0x2
+#define BILINEAR 0x3
+#define ALPHA_DROP_REPEAT 0x0
+#define ALPHA_BILINEAR 0x1
+
+
+/* in mpq product */
+#define MDSS_MDP_REG_VIG_FLUSH_SEL 0x204
+
+#define MDSS_MDP_VIG_OP_PA_SAT_ZERO_EXP_EN BIT(2)
+#define MDSS_MDP_VIG_OP_PA_MEM_PROTECT_EN BIT(3)
+#define MDSS_MDP_VIG_OP_PA_EN BIT(4)
+#define MDSS_MDP_VIG_OP_PA_MEM_COL_SKIN_MASK BIT(5)
+#define MDSS_MDP_VIG_OP_PA_MEM_COL_FOL_MASK BIT(6)
+#define MDSS_MDP_VIG_OP_PA_MEM_COL_SKY_MASK BIT(7)
+#define MDSS_MDP_VIG_OP_HIST_LUTV_EN BIT(10)
+#define MDSS_MDP_VIG_OP_PA_HUE_MASK BIT(25)
+#define MDSS_MDP_VIG_OP_PA_SAT_MASK BIT(26)
+#define MDSS_MDP_VIG_OP_PA_VAL_MASK BIT(27)
+#define MDSS_MDP_VIG_OP_PA_CONT_MASK BIT(28)
+
+#define MDSS_MDP_REG_SCALE_CONFIG 0x204
+#define MDSS_MDP_REG_SCALE_PHASE_STEP_X 0x210
+#define MDSS_MDP_REG_SCALE_PHASE_STEP_Y 0x214
+#define MDSS_MDP_REG_SCALE_INIT_PHASE_X 0x220
+#define MDSS_MDP_REG_SCALE_INIT_PHASE_Y 0x224
+
+#define MDSS_MDP_REG_VIG_CSC_1_BASE 0x320
+
+#define MDSS_MDP_REG_VIG_CSC_10_BASE 0x1A04
+#define MDSS_MDP_REG_VIG_CSC_10_OP_MODE 0x1A00
+
+#define MDSS_MDP_REG_VIG_HIST_CTL_BASE 0x2C4
+#define MDSS_MDP_REG_VIG_HIST_DATA_BASE 0x2E0
+#define MDSS_MDP_REG_VIG_HIST_LUT_BASE 0x2F0
+
+#define MDSS_MDP_SCALE_FILTER_NEAREST 0x0
+#define MDSS_MDP_SCALE_FILTER_BIL 0x1
+#define MDSS_MDP_SCALE_FILTER_PCMN 0x2
+#define MDSS_MDP_SCALE_FILTER_CA 0x3
+#define MDSS_MDP_SCALEY_EN BIT(1)
+#define MDSS_MDP_SCALEX_EN BIT(0)
+#define MDSS_MDP_FMT_SOLID_FILL 0x4037FF
+
+#define MDSS_MDP_INTF_EDP_SEL (BIT(3) | BIT(1))
+#define MDSS_MDP_INTF_HDMI_SEL (BIT(25) | BIT(24))
+#define MDSS_MDP_INTF_DSI0_SEL BIT(8)
+#define MDSS_MDP_INTF_DSI1_SEL BIT(16)
+
+enum mdss_mdp_mixer_intf_index {
+ MDSS_MDP_INTF_LAYERMIXER0,
+ MDSS_MDP_INTF_LAYERMIXER1,
+ MDSS_MDP_INTF_LAYERMIXER2,
+ MDSS_MDP_INTF_LAYERMIXER3,
+ MDSS_MDP_INTF_MAX_LAYERMIXER,
+};
+
+enum mdss_mdp_mixer_wb_index {
+ MDSS_MDP_WB_LAYERMIXER0,
+ MDSS_MDP_WB_LAYERMIXER1,
+ MDSS_MDP_WB_MAX_LAYERMIXER,
+};
+
+enum mdss_mdp_stage_index {
+ MDSS_MDP_STAGE_UNUSED,
+ MDSS_MDP_STAGE_BASE,
+ MDSS_MDP_STAGE_0,
+ MDSS_MDP_STAGE_1,
+ MDSS_MDP_STAGE_2,
+ MDSS_MDP_STAGE_3,
+ MDSS_MDP_STAGE_4,
+ MDSS_MDP_STAGE_5,
+ MDSS_MDP_STAGE_6,
+ MDSS_MDP_MAX_STAGE
+};
+#define MAX_PIPES_PER_STAGE 0x2
+#define MAX_PIPES_PER_LM (MDSS_MDP_MAX_STAGE*MAX_PIPES_PER_STAGE)
+
+#define MDSS_MDP_REG_LM_OP_MODE 0x000
+#define MDSS_MDP_REG_LM_OUT_SIZE 0x004
+#define MDSS_MDP_REG_LM_BORDER_COLOR_0 0x008
+#define MDSS_MDP_REG_LM_BORDER_COLOR_1 0x010
+
+#define MDSS_MDP_REG_LM_BLEND_OFFSET(stage) (0x20 + ((stage) * 0x30))
+#define MDSS_MDP_REG_LM_BLEND_OP 0x00
+#define MDSS_MDP_REG_LM_BLEND_FG_ALPHA 0x04
+#define MDSS_MDP_REG_LM_BLEND_BG_ALPHA 0x08
+#define MDSS_MDP_REG_LM_BLEND_FG_TRANSP_LOW0 0x0C
+#define MDSS_MDP_REG_LM_BLEND_FG_TRANSP_LOW1 0x10
+#define MDSS_MDP_REG_LM_BLEND_FG_TRANSP_HIGH0 0x14
+#define MDSS_MDP_REG_LM_BLEND_FG_TRANSP_HIGH1 0x18
+#define MDSS_MDP_REG_LM_BLEND_BG_TRANSP_LOW0 0x1C
+#define MDSS_MDP_REG_LM_BLEND_BG_TRANSP_LOW1 0x20
+#define MDSS_MDP_REG_LM_BLEND_BG_TRANSP_HIGH0 0x24
+#define MDSS_MDP_REG_LM_BLEND_BG_TRANSP_HIGH1 0x28
+#define MDSS_MDP_REG_LM_BLEND_STAGE4 0x150
+
+#define MDSS_MDP_REG_LM_CURSOR_IMG_SIZE 0xE0
+#define MDSS_MDP_REG_LM_CURSOR_SIZE 0xE4
+#define MDSS_MDP_REG_LM_CURSOR_XY 0xE8
+#define MDSS_MDP_REG_LM_CURSOR_STRIDE 0xDC
+#define MDSS_MDP_REG_LM_CURSOR_FORMAT 0xEC
+#define MDSS_MDP_REG_LM_CURSOR_BASE_ADDR 0xF0
+#define MDSS_MDP_REG_LM_CURSOR_START_XY 0xF4
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG 0xF8
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_PARAM 0xFC
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW0 0x100
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW1 0x104
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH0 0x108
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH1 0x10C
+
+#define MDSS_MDP_REG_LM_GC_LUT_BASE 0x110
+
+#define MDSS_MDP_LM_BORDER_COLOR (1 << 24)
+#define MDSS_MDP_LM_CURSOR_OUT (1 << 25)
+#define MDSS_MDP_BLEND_FG_ALPHA_FG_CONST (0 << 0)
+#define MDSS_MDP_BLEND_FG_ALPHA_BG_CONST (1 << 0)
+#define MDSS_MDP_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
+#define MDSS_MDP_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
+#define MDSS_MDP_BLEND_FG_INV_ALPHA (1 << 2)
+#define MDSS_MDP_BLEND_FG_MOD_ALPHA (1 << 3)
+#define MDSS_MDP_BLEND_FG_INV_MOD_ALPHA (1 << 4)
+#define MDSS_MDP_BLEND_FG_TRANSP_EN (1 << 5)
+#define MDSS_MDP_BLEND_BG_ALPHA_FG_CONST (0 << 8)
+#define MDSS_MDP_BLEND_BG_ALPHA_BG_CONST (1 << 8)
+#define MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
+#define MDSS_MDP_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
+#define MDSS_MDP_BLEND_BG_INV_ALPHA (1 << 10)
+#define MDSS_MDP_BLEND_BG_MOD_ALPHA (1 << 11)
+#define MDSS_MDP_BLEND_BG_INV_MOD_ALPHA (1 << 12)
+#define MDSS_MDP_BLEND_BG_TRANSP_EN (1 << 13)
+
+enum mdss_mdp_writeback_index {
+ MDSS_MDP_WRITEBACK0,
+ MDSS_MDP_WRITEBACK1,
+ MDSS_MDP_WRITEBACK2,
+ MDSS_MDP_WRITEBACK3,
+ MDSS_MDP_WRITEBACK4,
+ MDSS_MDP_MAX_WRITEBACK
+};
+
+#define MDSS_MDP_REG_WB_DST_FORMAT 0x000
+#define MDSS_MDP_REG_WB_DST_OP_MODE 0x004
+#define MDSS_MDP_REG_WB_DST_PACK_PATTERN 0x008
+#define MDSS_MDP_REG_WB_DST0_ADDR 0x00C
+#define MDSS_MDP_REG_WB_DST1_ADDR 0x010
+#define MDSS_MDP_REG_WB_DST2_ADDR 0x014
+#define MDSS_MDP_REG_WB_DST3_ADDR 0x018
+#define MDSS_MDP_REG_WB_DST_YSTRIDE0 0x01C
+#define MDSS_MDP_REG_WB_DST_YSTRIDE1 0x020
+#define MDSS_MDP_REG_WB_DST_YSTRIDE1 0x020
+#define MDSS_MDP_REG_WB_DST_DITHER_BITDEPTH 0x024
+#define MDSS_MDP_REG_WB_DST_MATRIX_ROW0 0x030
+#define MDSS_MDP_REG_WB_DST_MATRIX_ROW1 0x034
+#define MDSS_MDP_REG_WB_DST_MATRIX_ROW2 0x038
+#define MDSS_MDP_REG_WB_DST_MATRIX_ROW3 0x03C
+#define MDSS_MDP_REG_WB_DST_WRITE_CONFIG 0x048
+#define MDSS_MDP_REG_WB_ROTATION_DNSCALER 0x050
+#define MDSS_MDP_REG_WB_ROTATOR_PIPE_DOWNSCALER 0x054
+#define MDSS_MDP_REG_WB_N16_INIT_PHASE_X_C03 0x060
+#define MDSS_MDP_REG_WB_N16_INIT_PHASE_X_C12 0x064
+#define MDSS_MDP_REG_WB_N16_INIT_PHASE_Y_C03 0x068
+#define MDSS_MDP_REG_WB_N16_INIT_PHASE_Y_C12 0x06C
+#define MDSS_MDP_REG_WB_OUT_SIZE 0x074
+#define MDSS_MDP_REG_WB_ALPHA_X_VALUE 0x078
+#define MDSS_MDP_REG_WB_CSC_BASE 0x260
+#define MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS 0x2B0
+#define MDSS_MDP_REG_WB_CDP_CTRL 0x2B4
+
+#define MDSS_MDP_MAX_AD_AL 65535
+#define MDSS_MDP_MAX_AD_STR 255
+#define MDSS_MDP_AD_BL_SCALE 4095
+
+#define MDSS_MDP_REG_AD_BYPASS 0x000
+#define MDSS_MDP_REG_AD_CTRL_0 0x004
+#define MDSS_MDP_REG_AD_CTRL_1 0x008
+#define MDSS_MDP_REG_AD_FRAME_SIZE 0x00C
+#define MDSS_MDP_REG_AD_CON_CTRL_0 0x010
+#define MDSS_MDP_REG_AD_CON_CTRL_1 0x014
+#define MDSS_MDP_REG_AD_STR_MAN 0x018
+#define MDSS_MDP_REG_AD_VAR 0x01C
+#define MDSS_MDP_REG_AD_DITH 0x020
+#define MDSS_MDP_REG_AD_DITH_CTRL 0x024
+#define MDSS_MDP_REG_AD_AMP_LIM 0x028
+#define MDSS_MDP_REG_AD_SLOPE 0x02C
+#define MDSS_MDP_REG_AD_BW_LVL 0x030
+#define MDSS_MDP_REG_AD_LOGO_POS 0x034
+#define MDSS_MDP_REG_AD_LUT_FI 0x038
+#define MDSS_MDP_REG_AD_LUT_CC 0x07C
+#define MDSS_MDP_REG_AD_STR_LIM 0x0C8
+#define MDSS_MDP_REG_AD_CALIB_AB 0x0CC
+#define MDSS_MDP_REG_AD_CALIB_CD 0x0D0
+#define MDSS_MDP_REG_AD_MODE_SEL 0x0D4
+#define MDSS_MDP_REG_AD_TFILT_CTRL 0x0D8
+#define MDSS_MDP_REG_AD_BL_MINMAX 0x0DC
+#define MDSS_MDP_REG_AD_BL 0x0E0
+#define MDSS_MDP_REG_AD_BL_MAX 0x0E8
+#define MDSS_MDP_REG_AD_AL 0x0EC
+#define MDSS_MDP_REG_AD_AL_MIN 0x0F0
+#define MDSS_MDP_REG_AD_AL_FILT 0x0F4
+#define MDSS_MDP_REG_AD_CFG_BUF 0x0F8
+#define MDSS_MDP_REG_AD_LUT_AL 0x100
+#define MDSS_MDP_REG_AD_TARG_STR 0x144
+#define MDSS_MDP_REG_AD_START_CALC 0x148
+#define MDSS_MDP_REG_AD_STR_OUT 0x14C
+#define MDSS_MDP_REG_AD_BL_OUT 0x154
+#define MDSS_MDP_REG_AD_CALC_DONE 0x158
+#define MDSS_MDP_REG_AD_FRAME_END 0x15C
+#define MDSS_MDP_REG_AD_PROCS_END 0x160
+#define MDSS_MDP_REG_AD_FRAME_START 0x164
+#define MDSS_MDP_REG_AD_PROCS_START 0x168
+#define MDSS_MDP_REG_AD_TILE_CTRL 0x16C
+
+enum mdss_mdp_dspp_index {
+ MDSS_MDP_DSPP0,
+ MDSS_MDP_DSPP1,
+ MDSS_MDP_DSPP2,
+ MDSS_MDP_DSPP3,
+ MDSS_MDP_MAX_DSPP
+};
+
+#define MDSS_MDP_REG_DSPP_OP_MODE 0x000
+#define MDSS_MDP_REG_DSPP_PCC_BASE 0x030
+#define MDSS_MDP_REG_DSPP_DITHER_DEPTH 0x150
+#define MDSS_MDP_REG_DSPP_HIST_CTL_BASE 0x210
+#define MDSS_MDP_REG_DSPP_HIST_DATA_BASE 0x22C
+#define MDSS_MDP_REG_DSPP_HIST_LUT_BASE 0x230
+#define MDSS_MDP_REG_DSPP_PA_BASE 0x238
+#define MDSS_MDP_REG_DSPP_SIX_ZONE_BASE 0x248
+#define MDSS_MDP_REG_DSPP_GAMUT_BASE 0x2DC
+#define MDSS_MDP_REG_DSPP_GC_BASE 0x2B0
+
+#define MDSS_MDP_DSPP_OP_IGC_LUT_EN BIT(0)
+#define MDSS_MDP_DSPP_OP_PA_SAT_ZERO_EXP_EN BIT(1)
+#define MDSS_MDP_DSPP_OP_PA_MEM_PROTECT_EN BIT(2)
+#define MDSS_MDP_DSPP_OP_PCC_EN BIT(4)
+#define MDSS_MDP_DSPP_OP_PA_MEM_COL_SKIN_MASK BIT(5)
+#define MDSS_MDP_DSPP_OP_PA_MEM_COL_FOL_MASK BIT(6)
+#define MDSS_MDP_DSPP_OP_PA_MEM_COL_SKY_MASK BIT(7)
+#define MDSS_MDP_DSPP_OP_DST_DITHER_EN BIT(8)
+#define MDSS_MDP_DSPP_OP_HIST_EN BIT(16)
+#define MDSS_MDP_DSPP_OP_HIST_LUTV_EN BIT(19)
+#define MDSS_MDP_DSPP_OP_PA_EN BIT(20)
+#define MDSS_MDP_DSPP_OP_ARGC_LUT_EN BIT(22)
+#define MDSS_MDP_DSPP_OP_GAMUT_EN BIT(23)
+#define MDSS_MDP_DSPP_OP_GAMUT_PCC_ORDER BIT(24)
+#define MDSS_MDP_DSPP_OP_PA_HUE_MASK BIT(25)
+#define MDSS_MDP_DSPP_OP_PA_SAT_MASK BIT(26)
+#define MDSS_MDP_DSPP_OP_PA_VAL_MASK BIT(27)
+#define MDSS_MDP_DSPP_OP_PA_CONT_MASK BIT(28)
+#define MDSS_MDP_DSPP_OP_PA_SIX_ZONE_HUE_MASK BIT(29)
+#define MDSS_MDP_DSPP_OP_PA_SIX_ZONE_SAT_MASK BIT(30)
+#define MDSS_MDP_DSPP_OP_PA_SIX_ZONE_VAL_MASK BIT(31)
+
+enum mdss_mpd_intf_index {
+ MDSS_MDP_NO_INTF,
+ MDSS_MDP_INTF0,
+ MDSS_MDP_INTF1,
+ MDSS_MDP_INTF2,
+ MDSS_MDP_INTF3,
+ MDSS_MDP_MAX_INTF
+};
+
+#define MDSS_MDP_REG_INTF_TIMING_ENGINE_EN 0x000
+#define MDSS_MDP_REG_INTF_CONFIG 0x004
+#define MDSS_MDP_REG_INTF_HSYNC_CTL 0x008
+#define MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0 0x00C
+#define MDSS_MDP_REG_INTF_VSYNC_PERIOD_F1 0x010
+#define MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define MDSS_MDP_REG_INTF_DISPLAY_V_START_F0 0x01C
+#define MDSS_MDP_REG_INTF_DISPLAY_V_START_F1 0x020
+#define MDSS_MDP_REG_INTF_DISPLAY_V_END_F0 0x024
+#define MDSS_MDP_REG_INTF_DISPLAY_V_END_F1 0x028
+#define MDSS_MDP_REG_INTF_ACTIVE_V_START_F0 0x02C
+#define MDSS_MDP_REG_INTF_ACTIVE_V_START_F1 0x030
+#define MDSS_MDP_REG_INTF_ACTIVE_V_END_F0 0x034
+#define MDSS_MDP_REG_INTF_ACTIVE_V_END_F1 0x038
+#define MDSS_MDP_REG_INTF_DISPLAY_HCTL 0x03C
+#define MDSS_MDP_REG_INTF_ACTIVE_HCTL 0x040
+#define MDSS_MDP_REG_INTF_BORDER_COLOR 0x044
+#define MDSS_MDP_REG_INTF_UNDERFLOW_COLOR 0x048
+#define MDSS_MDP_REG_INTF_HSYNC_SKEW 0x04C
+#define MDSS_MDP_REG_INTF_POLARITY_CTL 0x050
+#define MDSS_MDP_REG_INTF_TEST_CTL 0x054
+#define MDSS_MDP_REG_INTF_TP_COLOR0 0x058
+#define MDSS_MDP_REG_INTF_TP_COLOR1 0x05C
+#define MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define MDSS_MDP_REG_INTF_FRAME_COUNT 0x0AC
+#define MDSS_MDP_REG_INTF_LINE_COUNT 0x0B0
+
+#define MDSS_MDP_REG_INTF_DEFLICKER_CONFIG 0x0F0
+#define MDSS_MDP_REG_INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define MDSS_MDP_REG_INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define MDSS_MDP_REG_INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define MDSS_MDP_REG_INTF_PANEL_FORMAT 0x090
+#define MDSS_MDP_REG_INTF_TPG_ENABLE 0x100
+#define MDSS_MDP_REG_INTF_TPG_MAIN_CONTROL 0x104
+#define MDSS_MDP_REG_INTF_TPG_VIDEO_CONFIG 0x108
+#define MDSS_MDP_REG_INTF_TPG_COMPONENT_LIMITS 0x10C
+#define MDSS_MDP_REG_INTF_TPG_RECTANGLE 0x110
+#define MDSS_MDP_REG_INTF_TPG_INITIAL_VALUE 0x114
+#define MDSS_MDP_REG_INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define MDSS_MDP_REG_INTF_TPG_RGB_MAPPING 0x11C
+#define MDSS_MDP_REG_INTF_PROG_FETCH_START 0x170
+#define MDSS_MDP_REG_INTF_INTR_EN 0x1C0
+#define MDSS_MDP_REG_INTF_INTR_STATUS 0x1C4
+#define MDSS_MDP_REG_INTF_INTR_CLEAR 0x1C8
+#define MDSS_MDP_REG_INTF_PROG_LINE_INTR_CONF 0x250
+#define MDSS_MDP_REG_INTF_VBLANK_END_CONF 0x264
+
+#define MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define MDSS_MDP_REG_INTF_FRAME_COUNT 0x0AC
+#define MDSS_MDP_REG_INTF_LINE_COUNT 0x0B0
+#define MDSS_MDP_PANEL_FORMAT_RGB888 0x213F
+#define MDSS_MDP_PANEL_FORMAT_RGB666 0x212A
+
+#define MDSS_MDP_PANEL_FORMAT_PACK_ALIGN_MSB BIT(7)
+
+enum mdss_mdp_pingpong_index {
+ MDSS_MDP_PINGPONG0,
+ MDSS_MDP_PINGPONG1,
+ MDSS_MDP_PINGPONG2,
+ MDSS_MDP_PINGPONG3,
+ MDSS_MDP_MAX_PINGPONG
+};
+
+#define MDSS_MDP_REG_PP_TEAR_CHECK_EN 0x000
+#define MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC 0x004
+#define MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT 0x008
+#define MDSS_MDP_REG_PP_SYNC_WRCOUNT 0x00C
+#define MDSS_MDP_REG_PP_VSYNC_INIT_VAL 0x010
+#define MDSS_MDP_REG_PP_INT_COUNT_VAL 0x014
+#define MDSS_MDP_REG_PP_SYNC_THRESH 0x018
+#define MDSS_MDP_REG_PP_START_POS 0x01C
+#define MDSS_MDP_REG_PP_RD_PTR_IRQ 0x020
+#define MDSS_MDP_REG_PP_WR_PTR_IRQ 0x024
+#define MDSS_MDP_REG_PP_OUT_LINE_COUNT 0x028
+#define MDSS_MDP_REG_PP_LINE_COUNT 0x02C
+#define MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG 0x030
+
+#define MDSS_MDP_REG_PP_FBC_MODE 0x034
+#define MDSS_MDP_REG_PP_FBC_BUDGET_CTL 0x038
+#define MDSS_MDP_REG_PP_FBC_LOSSY_MODE 0x03C
+#define MDSS_MDP_REG_PP_DSC_MODE 0x0a0
+#define MDSS_MDP_REG_PP_DCE_DATA_IN_SWAP 0x0ac
+#define MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP 0x0c8
+
+#define MDSS_MDP_DSC_0_OFFSET 0x80000
+#define MDSS_MDP_DSC_1_OFFSET 0x80400
+
+#define MDSS_MDP_REG_DSC_COMMON_MODE 0x000
+#define MDSS_MDP_REG_DSC_ENC 0x004
+#define MDSS_MDP_REG_DSC_PICTURE 0x008
+#define MDSS_MDP_REG_DSC_SLICE 0x00c
+#define MDSS_MDP_REG_DSC_CHUNK_SIZE 0x010
+#define MDSS_MDP_REG_DSC_DELAY 0x014
+#define MDSS_MDP_REG_DSC_SCALE_INITIAL 0x018
+#define MDSS_MDP_REG_DSC_SCALE_DEC_INTERVAL 0x01c
+#define MDSS_MDP_REG_DSC_SCALE_INC_INTERVAL 0x020
+#define MDSS_MDP_REG_DSC_FIRST_LINE_BPG_OFFSET 0x024
+#define MDSS_MDP_REG_DSC_BPG_OFFSET 0x028
+#define MDSS_MDP_REG_DSC_DSC_OFFSET 0x02c
+#define MDSS_MDP_REG_DSC_FLATNESS 0x030
+#define MDSS_MDP_REG_DSC_RC_MODEL_SIZE 0x034
+#define MDSS_MDP_REG_DSC_RC 0x038
+#define MDSS_MDP_REG_DSC_RC_BUF_THRESH 0x03c
+#define MDSS_MDP_REG_DSC_RANGE_MIN_QP 0x074
+#define MDSS_MDP_REG_DSC_RANGE_MAX_QP 0x0b0
+#define MDSS_MDP_REG_DSC_RANGE_BPG_OFFSET 0x0ec
+
+#define MDSS_MDP_REG_SMP_ALLOC_W0 0x00080
+#define MDSS_MDP_REG_SMP_ALLOC_R0 0x00130
+
+#define MDSS_MDP_UP_MISR_SEL 0x2A0
+#define MDSS_MDP_UP_MISR_CTRL_MDP 0x2A4
+#define MDSS_MDP_UP_MISR_SIGN_MDP 0x2A8
+#define MDSS_MDP_UP_MISR_LMIX_SEL_OFFSET 0x4C
+
+#define MDSS_MDP_LP_MISR_SEL 0x350
+#define MDSS_MDP_LP_MISR_CTRL_MDP 0x354
+#define MDSS_MDP_LP_MISR_CTRL_HDMI 0x358
+#define MDSS_MDP_LP_MISR_CTRL_EDP 0x35C
+#define MDSS_MDP_LP_MISR_CTRL_DSI0 0x360
+#define MDSS_MDP_LP_MISR_CTRL_DSI1 0x364
+
+#define MDSS_MDP_LP_MISR_SIGN_MDP 0x368
+#define MDSS_MDP_LP_MISR_SIGN_EDP 0x36C
+#define MDSS_MDP_LP_MISR_SIGN_HDMI 0x370
+#define MDSS_MDP_LP_MISR_SIGN_DSI0 0x374
+#define MDSS_MDP_LP_MISR_SIGN_DSI1 0x378
+
+#define MDSS_MDP_MISR_CTRL_FRAME_COUNT_MASK 0xFF
+#define MDSS_MDP_MISR_CTRL_ENABLE BIT(8)
+#define MDSS_MDP_MISR_CTRL_STATUS BIT(9)
+#define MDSS_MDP_MISR_CTRL_STATUS_CLEAR BIT(10)
+#define MDSS_MDP_LP_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+#define MDSS_MDP_LP_MISR_SEL_LMIX0_BLEND 0x08
+#define MDSS_MDP_LP_MISR_SEL_LMIX0_GC 0x09
+#define MDSS_MDP_LP_MISR_SEL_LMIX1_BLEND 0x0A
+#define MDSS_MDP_LP_MISR_SEL_LMIX1_GC 0x0B
+#define MDSS_MDP_LP_MISR_SEL_LMIX2_BLEND 0x0C
+#define MDSS_MDP_LP_MISR_SEL_LMIX2_GC 0x0D
+#define MDSS_MDP_LP_MISR_SEL_LMIX3_BLEND 0x0E
+#define MDSS_MDP_LP_MISR_SEL_LMIX3_GC 0x0F
+#define MDSS_MDP_LP_MISR_SEL_LMIX4_BLEND 0x10
+#define MDSS_MDP_LP_MISR_SEL_LMIX4_GC 0x11
+
+#define MDSS_MDP_LAYER_MIXER_MISR_CTRL 0x380
+#define MDSS_MDP_LAYER_MIXER_MISR_SIGNATURE \
+ (MDSS_MDP_LAYER_MIXER_MISR_CTRL + 0x4)
+
+#define MDSS_MDP_INTF_MISR_CTRL 0x180
+#define MDSS_MDP_INTF_MISR_SIGNATURE (MDSS_MDP_INTF_MISR_CTRL + 0x4)
+#define MDSS_MDP_INTF_CMD_MISR_CTRL (MDSS_MDP_INTF_MISR_CTRL + 0x8)
+#define MDSS_MDP_INTF_CMD_MISR_SIGNATURE (MDSS_MDP_INTF_MISR_CTRL + 0xC)
+
+#define MDSS_MDP_REG_CDM_CSC_10_OPMODE 0x000
+#define MDSS_MDP_REG_CDM_CSC_10_BASE 0x004
+
+#define MDSS_MDP_REG_CDM_CDWN2_OP_MODE 0x100
+#define MDSS_MDP_REG_CDM_CDWN2_CLAMP_OUT 0x104
+#define MDSS_MDP_REG_CDM_CDWN2_PARAMS_3D_0 0x108
+#define MDSS_MDP_REG_CDM_CDWN2_PARAMS_3D_1 0x10C
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_V 0x128
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define MDSS_MDP_REG_CDM_CDWN2_OUT_SIZE 0x130
+
+#define MDSS_MDP_REG_CDM_HDMI_PACK_OP_MODE 0x200
+
+/* Following offsets are with respect to MDP base */
+#define MDSS_MDP_MDP_OUT_CTL_0 0x410
+#define MDSS_MDP_INTF_CMD_MISR_CTRL (MDSS_MDP_INTF_MISR_CTRL + 0x8)
+#define MDSS_MDP_INTF_CMD_MISR_SIGNATURE (MDSS_MDP_INTF_MISR_CTRL + 0xC)
+/* following offsets are with respect to MDP VBIF base */
+#define MMSS_VBIF_CLKON 0x4
+#define MMSS_VBIF_RD_LIM_CONF 0x0B0
+#define MMSS_VBIF_WR_LIM_CONF 0x0C0
+#define MMSS_VBIF_OUT_RD_LIM_CONF0 0x0D0
+
+#define MMSS_VBIF_XIN_HALT_CTRL0 0x200
+#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
+#define MMSS_VBIF_AXI_HALT_CTRL0 0x208
+#define MMSS_VBIF_AXI_HALT_CTRL1 0x20C
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
+#define MMSS_VBIF_TEST_BUS_OUT 0x230
+
+#define MDSS_VBIF_QOS_REMAP_BASE 0x020
+#define MDSS_VBIF_QOS_REMAP_ENTRIES 0x4
+
+#define MDSS_VBIF_QOS_RP_REMAP_BASE 0x550
+#define MDSS_VBIF_QOS_LVL_REMAP_BASE 0x570
+
+#define MDSS_VBIF_FIXED_SORT_EN 0x30
+#define MDSS_VBIF_FIXED_SORT_SEL0 0x34
+
+#define MDSS_MDP_REG_TRAFFIC_SHAPER_EN BIT(31)
+#define MDSS_MDP_REG_TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define MDSS_MDP_REG_TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define MDSS_MDP_REG_TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
new file mode 100644
index 0000000..7a0542a
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -0,0 +1,3527 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+
+#include "mdss_mdp.h"
+#include "mdss_panel.h"
+#include "mdss_debug.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_dsi_clk.h"
+#include <linux/interrupt.h>
+
+#define MAX_RECOVERY_TRIALS 10
+#define MAX_SESSIONS 2
+
+#define SPLIT_MIXER_OFFSET 0x800
+
+#define STOP_TIMEOUT(hz) msecs_to_jiffies((1000 / hz) * (6 + 2))
+#define POWER_COLLAPSE_TIME msecs_to_jiffies(100)
+#define CMD_MODE_IDLE_TIMEOUT msecs_to_jiffies(16 * 4)
+#define INPUT_EVENT_HANDLER_DELAY_USECS (16000 * 4)
+#define AUTOREFRESH_MAX_FRAME_CNT 6
+
+static DEFINE_MUTEX(cmd_clk_mtx);
+
+static DEFINE_MUTEX(cmd_off_mtx);
+
+enum mdss_mdp_cmd_autorefresh_state {
+ MDP_AUTOREFRESH_OFF,
+ MDP_AUTOREFRESH_ON_REQUESTED,
+ MDP_AUTOREFRESH_ON,
+ MDP_AUTOREFRESH_OFF_REQUESTED
+};
+
+struct mdss_mdp_cmd_ctx {
+ struct mdss_mdp_ctl *ctl;
+
+ u32 default_pp_num;
+ u32 current_pp_num;
+ /*
+ * aux_pp_num will be set only when topology is using split-lm.
+ * aux_pp_num will be used only when MDSS_QUIRK_DSC_RIGHT_ONLY_PU
+ * quirk is set and on following partial updates.
+ *
+ * right-only update on DUAL_LM_SINGLE_DISPLAY with DSC_MERGE
+ * right-only update on DUAL_LM_DUAL_DISPLAY with DSC
+ */
+ u32 aux_pp_num;
+
+ u8 ref_cnt;
+ struct completion stop_comp;
+ atomic_t rdptr_cnt;
+ wait_queue_head_t rdptr_waitq;
+ struct completion pp_done;
+ wait_queue_head_t pp_waitq;
+ struct list_head vsync_handlers;
+ struct list_head lineptr_handlers;
+ int panel_power_state;
+ atomic_t koff_cnt;
+ u32 intf_stopped;
+ struct mutex mdp_rdptr_lock;
+ struct mutex mdp_wrptr_lock;
+ struct mutex clk_mtx;
+ spinlock_t clk_lock;
+ spinlock_t koff_lock;
+ struct work_struct gate_clk_work;
+ struct delayed_work delayed_off_clk_work;
+ struct work_struct pp_done_work;
+ struct work_struct early_wakeup_clk_work;
+ atomic_t pp_done_cnt;
+ struct completion rdptr_done;
+
+ /*
+ * While autorefresh is on, partial update is not supported. So
+ * autorefresh state machine is always maintained through master ctx.
+ */
+ struct mutex autorefresh_lock;
+ struct completion autorefresh_ppdone;
+ enum mdss_mdp_cmd_autorefresh_state autorefresh_state;
+ int autorefresh_frame_cnt;
+ bool ignore_external_te;
+ struct completion autorefresh_done;
+
+ int vsync_irq_cnt;
+ int lineptr_irq_cnt;
+ bool lineptr_enabled;
+ u32 prev_wr_ptr_irq;
+
+ struct mdss_intf_recovery intf_recovery;
+ struct mdss_intf_recovery intf_mdp_callback;
+ struct mdss_mdp_cmd_ctx *sync_ctx; /* for partial update */
+ u32 pp_timeout_report_cnt;
+ bool pingpong_split_slave;
+};
+
+struct mdss_mdp_cmd_ctx mdss_mdp_cmd_ctx_list[MAX_SESSIONS];
+
+static int mdss_mdp_cmd_do_notifier(struct mdss_mdp_cmd_ctx *ctx);
+static inline void mdss_mdp_cmd_clk_on(struct mdss_mdp_cmd_ctx *ctx);
+static inline void mdss_mdp_cmd_clk_off(struct mdss_mdp_cmd_ctx *ctx);
+static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg);
+static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_ctl *sctl);
+static int mdss_mdp_setup_vsync(struct mdss_mdp_cmd_ctx *ctx, bool enable);
+
+static bool __mdss_mdp_cmd_is_aux_pp_needed(struct mdss_data_type *mdata,
+ struct mdss_mdp_ctl *mctl)
+{
+ return (mdata && mctl && mctl->is_master &&
+ mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU) &&
+ is_dsc_compression(&mctl->panel_data->panel_info) &&
+ ((mctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
+ ((mctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
+ (mctl->panel_data->panel_info.dsc_enc_total == 1))) &&
+ !mctl->mixer_left->valid_roi &&
+ mctl->mixer_right->valid_roi);
+}
+
+static bool __mdss_mdp_cmd_is_panel_power_off(struct mdss_mdp_cmd_ctx *ctx)
+{
+ return mdss_panel_is_power_off(ctx->panel_power_state);
+}
+
+static bool __mdss_mdp_cmd_is_panel_power_on_interactive(
+ struct mdss_mdp_cmd_ctx *ctx)
+{
+ return mdss_panel_is_power_on_interactive(ctx->panel_power_state);
+}
+
+static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_mixer *mixer;
+ u32 cnt = 0xffff; /* init it to an invalid value */
+ u32 init;
+ u32 height;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ if (!mixer) {
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+ if (!mixer) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ goto exit;
+ }
+ }
+
+ init = mdss_mdp_pingpong_read(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_VSYNC_INIT_VAL) & 0xffff;
+ height = mdss_mdp_pingpong_read(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
+
+ if (height < init) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ goto exit;
+ }
+
+ cnt = mdss_mdp_pingpong_read(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_INT_COUNT_VAL) & 0xffff;
+
+ if (cnt < init) /* wrap around happened at height */
+ cnt += (height - init);
+ else
+ cnt -= init;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ pr_debug("cnt=%d init=%d height=%d\n", cnt, init, height);
+exit:
+ return cnt;
+}
+
+static int mdss_mdp_tearcheck_enable(struct mdss_mdp_ctl *ctl, bool enable)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_ctl *sctl;
+ struct mdss_mdp_pp_tear_check *te;
+ struct mdss_mdp_mixer *mixer =
+ mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+
+ if (IS_ERR_OR_NULL(ctl->panel_data)) {
+ pr_err("no panel data\n");
+ return -ENODEV;
+ }
+
+ if (IS_ERR_OR_NULL(mixer)) {
+ pr_err("mixer not configured\n");
+ return -ENODEV;
+ }
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ te = &ctl->panel_data->panel_info.te;
+
+ pr_debug("%s: enable=%d\n", __func__, enable);
+
+ mdss_mdp_pingpong_write(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_TEAR_CHECK_EN,
+ (te ? te->tear_check_en : 0) && enable);
+
+ /*
+ * When there are two controls, driver needs to enable
+ * tear check configuration for both.
+ */
+ if (sctl) {
+ mixer = mdss_mdp_mixer_get(sctl, MDSS_MDP_MIXER_MUX_LEFT);
+ te = &sctl->panel_data->panel_info.te;
+ mdss_mdp_pingpong_write(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_TEAR_CHECK_EN,
+ (te ? te->tear_check_en : 0) && enable);
+ }
+
+ /*
+ * In the case of pingpong split, there is no second
+ * control and enables only slave tear check block as
+ * defined in slave_pingpong_base.
+ */
+ if (is_pingpong_split(ctl->mfd))
+ mdss_mdp_pingpong_write(mdata->slave_pingpong_base,
+ MDSS_MDP_REG_PP_TEAR_CHECK_EN,
+ (te ? te->tear_check_en : 0) && enable);
+
+ /*
+ * In case of DUAL_LM_SINGLE_DISPLAY, always keep right PP enabled
+ * if partial update is enabled. So when right-only update comes then
+ * by changing CTL topology, HW switches directly to right PP.
+ */
+ if (ctl->panel_data->panel_info.partial_update_enabled &&
+ is_dual_lm_single_display(ctl->mfd)) {
+
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+ mdss_mdp_pingpong_write(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_TEAR_CHECK_EN,
+ (te ? te->tear_check_en : 0) && enable);
+
+ }
+
+ return 0;
+}
+
+static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_cmd_ctx *ctx, bool locked)
+{
+ struct mdss_mdp_pp_tear_check *te = NULL;
+ struct mdss_panel_info *pinfo;
+ u32 vsync_clk_speed_hz, total_lines, vclks_line, cfg = 0;
+ char __iomem *pingpong_base;
+ struct mdss_mdp_ctl *ctl = ctx->ctl;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (IS_ERR_OR_NULL(ctl->panel_data)) {
+ pr_err("no panel data\n");
+ return -ENODEV;
+ }
+
+ pinfo = &ctl->panel_data->panel_info;
+ te = &ctl->panel_data->panel_info.te;
+
+ mdss_mdp_vsync_clk_enable(1, locked);
+
+ vsync_clk_speed_hz =
+ mdss_mdp_get_clk_rate(MDSS_CLK_MDP_VSYNC, locked);
+
+ total_lines = mdss_panel_get_vtotal(pinfo);
+
+ total_lines *= pinfo->mipi.frame_rate;
+
+ vclks_line = (total_lines) ? vsync_clk_speed_hz/total_lines : 0;
+
+ cfg = BIT(19);
+ if (pinfo->mipi.hw_vsync_mode)
+ cfg |= BIT(20);
+
+ if (te->refx100) {
+ vclks_line = vclks_line * pinfo->mipi.frame_rate *
+ 100 / te->refx100;
+ } else {
+ pr_warn("refx100 cannot be zero! Use 6000 as default\n");
+ vclks_line = vclks_line * pinfo->mipi.frame_rate *
+ 100 / 6000;
+ }
+
+ cfg |= vclks_line;
+
+ pr_debug("%s: yres=%d vclks=%x height=%d init=%d rd=%d start=%d wr=%d\n",
+ __func__, pinfo->yres, vclks_line, te->sync_cfg_height,
+ te->vsync_init_val, te->rd_ptr_irq, te->start_pos,
+ te->wr_ptr_irq);
+ pr_debug("thrd_start =%d thrd_cont=%d pp_split=%d\n",
+ te->sync_threshold_start, te->sync_threshold_continue,
+ ctx->pingpong_split_slave);
+
+ pingpong_base = mixer->pingpong_base;
+
+ if (ctx->pingpong_split_slave)
+ pingpong_base = mdata->slave_pingpong_base;
+
+ mdss_mdp_pingpong_write(pingpong_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+ mdss_mdp_pingpong_write(pingpong_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT,
+ te ? te->sync_cfg_height : 0);
+ mdss_mdp_pingpong_write(pingpong_base,
+ MDSS_MDP_REG_PP_VSYNC_INIT_VAL,
+ te ? te->vsync_init_val : 0);
+ mdss_mdp_pingpong_write(pingpong_base,
+ MDSS_MDP_REG_PP_RD_PTR_IRQ,
+ te ? te->rd_ptr_irq : 0);
+ mdss_mdp_pingpong_write(pingpong_base,
+ MDSS_MDP_REG_PP_WR_PTR_IRQ,
+ te ? te->wr_ptr_irq : 0);
+ mdss_mdp_pingpong_write(pingpong_base,
+ MDSS_MDP_REG_PP_START_POS,
+ te ? te->start_pos : 0);
+ mdss_mdp_pingpong_write(pingpong_base,
+ MDSS_MDP_REG_PP_SYNC_THRESH,
+ te ? ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start) : 0);
+ mdss_mdp_pingpong_write(pingpong_base,
+ MDSS_MDP_REG_PP_SYNC_WRCOUNT,
+ te ? (te->start_pos + te->sync_threshold_start + 1) : 0);
+
+ return 0;
+}
+
+static int mdss_mdp_cmd_tearcheck_setup(struct mdss_mdp_cmd_ctx *ctx,
+ bool locked)
+{
+ int rc = 0;
+ struct mdss_mdp_mixer *mixer = NULL, *mixer_right = NULL;
+ struct mdss_mdp_ctl *ctl = ctx->ctl;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 offset = 0;
+
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ if (mixer) {
+ /*
+ * Disable auto refresh mode, if enabled in splash to
+ * avoid corruption.
+ */
+ if (mdss_mdp_pingpong_read(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG) & BIT(31)) {
+ offset = MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG;
+ if (is_pingpong_split(ctl->mfd))
+ writel_relaxed(0x0,
+ (mdata->slave_pingpong_base + offset));
+ if (is_split_lm(ctl->mfd)) {
+ mixer_right =
+ mdss_mdp_mixer_get(ctl,
+ MDSS_MDP_MIXER_MUX_RIGHT);
+ if (mixer_right)
+ writel_relaxed(0x0,
+ (mixer_right->pingpong_base + offset));
+ }
+ mdss_mdp_pingpong_write(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0x0);
+ pr_debug("%s: disabling auto refresh\n", __func__);
+ }
+ rc = mdss_mdp_cmd_tearcheck_cfg(mixer, ctx, locked);
+ if (rc)
+ goto err;
+ }
+
+ /*
+ * In case of DUAL_LM_SINGLE_DISPLAY, always keep right PP enabled
+ * if partial update is enabled. So when right-only update comes then
+ * by changing CTL topology, HW switches directly to right PP.
+ */
+ if (ctl->panel_data->panel_info.partial_update_enabled &&
+ is_dual_lm_single_display(ctl->mfd)) {
+
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+ if (mixer)
+ rc = mdss_mdp_cmd_tearcheck_cfg(mixer, ctx, locked);
+ }
+err:
+ return rc;
+}
+
+/**
+ * enum mdp_rsrc_ctl_events - events for the resource control state machine
+ * @MDP_RSRC_CTL_EVENT_KICKOFF:
+ * This event happens at NORMAL priority.
+ * Event that signals the start of the transfer, regardless of the
+ * state at which we enter this state (ON/OFF or GATE),
+ * we must ensure that power state is ON when we return from this
+ * event.
+ *
+ * @MDP_RSRC_CTL_EVENT_PP_DONE:
+ * This event happens at INTERRUPT level.
+ * Event signals the end of the data transfer, when getting this
+ * event we should have been in ON state, since a transfer was
+ * ongoing (if this is not the case, then
+ * there is a bug).
+ * Since this event is received at interrupt ievel, by the end of
+ * the event we haven't changed the power state, but scheduled
+ * work items to do the transition, so by the end of this event:
+ * 1. A work item is scheduled to go to gate state as soon as
+ * possible (as soon as scheduler give us the chance)
+ * 2. A delayed work is scheduled to go to OFF after
+ * CMD_MODE_IDLE_TIMEOUT time. Power State will be updated
+ * at the end of each work item, to make sure we update
+ * the status once the transition is fully done.
+ *
+ * @MDP_RSRC_CTL_EVENT_STOP:
+ * This event happens at NORMAL priority.
+ * When we get this event, we are expected to wait to finish any
+ * pending data transfer and turn off all the clocks/resources,
+ * so after return from this event we must be in off
+ * state.
+ *
+ * @MDP_RSRC_CTL_EVENT_EARLY_WAKE_UP:
+ * This event happens at NORMAL priority from a work item.
+ * Event signals that there will be a frame update soon and mdp should wake
+ * up early to update the frame with little latency.
+ */
+enum mdp_rsrc_ctl_events {
+ MDP_RSRC_CTL_EVENT_KICKOFF = 1,
+ MDP_RSRC_CTL_EVENT_PP_DONE,
+ MDP_RSRC_CTL_EVENT_STOP,
+ MDP_RSRC_CTL_EVENT_EARLY_WAKE_UP
+};
+
+enum {
+ MDP_RSRC_CTL_STATE_OFF,
+ MDP_RSRC_CTL_STATE_ON,
+ MDP_RSRC_CTL_STATE_GATE,
+};
+
+/* helper functions for debugging */
+static char *get_sw_event_name(u32 sw_event)
+{
+ switch (sw_event) {
+ case MDP_RSRC_CTL_EVENT_KICKOFF:
+ return "KICKOFF";
+ case MDP_RSRC_CTL_EVENT_PP_DONE:
+ return "PP_DONE";
+ case MDP_RSRC_CTL_EVENT_STOP:
+ return "STOP";
+ case MDP_RSRC_CTL_EVENT_EARLY_WAKE_UP:
+ return "EARLY_WAKE_UP";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static char *get_clk_pwr_state_name(u32 pwr_state)
+{
+ switch (pwr_state) {
+ case MDP_RSRC_CTL_STATE_ON:
+ return "STATE_ON";
+ case MDP_RSRC_CTL_STATE_OFF:
+ return "STATE_OFF";
+ case MDP_RSRC_CTL_STATE_GATE:
+ return "STATE_GATE";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/**
+ * mdss_mdp_get_split_display_ctls() - get the display controllers
+ * @ctl: Pointer to pointer to the controller used to do the operation.
+ * This can be the pointer to the master or slave of a display with
+ * the MDP_DUAL_LM_DUAL_DISPLAY split mode.
+ * @sctl: Pointer to pointer where it is expected to be set the slave
+ * controller. Function does not expect any input parameter here.
+ *
+ * This function will populate the pointers to pointers with the controllers of
+ * the split display ordered such way that the first input parameter will be
+ * populated with the master controller and second parameter will be populated
+ * with the slave controller, so the caller can assume both controllers are set
+ * in the right order after return.
+ *
+ * This function can only be called for split configuration that uses two
+ * controllers, it expects that first pointer is the one passed to do the
+ * operation and it can be either the pointer of the master or slave,
+ * since is the job of this function to find and accommodate the master/slave
+ * controllers accordingly.
+ *
+ * Return: 0 - succeed, otherwise - fail
+ */
+int mdss_mdp_get_split_display_ctls(struct mdss_mdp_ctl **ctl,
+ struct mdss_mdp_ctl **sctl)
+{
+ int rc = 0;
+ *sctl = NULL;
+
+ if (*ctl == NULL) {
+ pr_err("%s invalid ctl\n", __func__);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if ((*ctl)->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ *sctl = mdss_mdp_get_split_ctl(*ctl);
+ if (*sctl) {
+ /* pointers are in the correct order */
+ pr_debug("%s ctls in correct order ctl:%d sctl:%d\n",
+ __func__, (*ctl)->num, (*sctl)->num);
+ goto exit;
+ } else {
+ /*
+ * If we have a split display and we didn't find the
+ * Slave controller from the Master, this means that
+ * ctl is the slave controller, so look for the Master
+ */
+ *sctl = mdss_mdp_get_main_ctl(*ctl);
+ if (!(*sctl)) {
+ /*
+ * Bad state, this shouldn't happen, we should
+ * be having both controllers since we are in
+ * dual-lm, dual-display.
+ */
+ pr_err("%s cannot find master ctl\n",
+ __func__);
+ WARN_ON(1);
+ }
+ /*
+ * We have both controllers but sctl has the Master,
+ * swap the pointers so we can keep the master in the
+ * ctl pointer and control the order in the power
+ * sequence.
+ */
+ pr_debug("ctl is not the master, swap pointers\n");
+ swap(*ctl, *sctl);
+ }
+ } else {
+ pr_debug("%s no split mode:%d\n", __func__,
+ (*ctl)->mfd->split_mode);
+ }
+exit:
+ return rc;
+}
+
+/**
+ * mdss_mdp_resource_control() - control the state of mdp resources
+ * @ctl: pointer to controller to notify the event.
+ * @sw_event: software event to modify the state of the resources.
+ *
+ * This function implements an state machine to control the state of
+ * the mdp resources (clocks, bw, mmu), the states can be ON, OFF and GATE,
+ * transition between each state is controlled through the MDP_RSRC_CTL_EVENT_
+ * events.
+ *
+ * Return: 0 - succeed, otherwise - fail
+ */
+int mdss_mdp_resource_control(struct mdss_mdp_ctl *ctl, u32 sw_event)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_ctl *sctl = NULL;
+ struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+ struct dsi_panel_clk_ctrl clk_ctrl;
+ u32 status;
+ int rc = 0;
+ bool schedule_off = false;
+
+ /* Get both controllers in the correct order for dual displays */
+ mdss_mdp_get_split_display_ctls(&ctl, &sctl);
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("%s invalid ctx\n", __func__);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (sctl)
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+ /* In pingpong split we have single controller, dual context */
+ if (is_pingpong_split(ctl->mfd))
+ sctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[SLAVE_CTX];
+
+ pr_debug("%pS-->%s: task:%s ctl:%d pwr_state:%s event:%s\n",
+ __builtin_return_address(0), __func__,
+ current->group_leader->comm, ctl->num,
+ get_clk_pwr_state_name(mdp5_data->resources_state),
+ get_sw_event_name(sw_event));
+
+ MDSS_XLOG(ctl->num, mdp5_data->resources_state, sw_event,
+ XLOG_FUNC_ENTRY);
+
+ switch (sw_event) {
+ case MDP_RSRC_CTL_EVENT_KICKOFF:
+ /*
+ * Cancel any work item pending:
+ * If POWER-OFF was cancel:
+ * Only UNGATE the clocks (resources should be ON)
+ * If GATE && POWER-OFF were cancel:
+ * UNGATE and POWER-ON
+ * If only GATE was cancel:
+ * something can be wrong, OFF should have been
+ * cancel as well.
+ */
+
+ /* update the active only vote */
+ mdata->ao_bw_uc_idx = mdata->curr_bw_uc_idx;
+
+ /* Cancel GATE Work Item */
+ if (cancel_work_sync(&ctx->gate_clk_work)) {
+ pr_debug("%s gate work canceled\n", __func__);
+
+ if (mdp5_data->resources_state !=
+ MDP_RSRC_CTL_STATE_ON)
+ pr_debug("%s unexpected power state\n",
+ __func__);
+ }
+
+ /* Cancel OFF Work Item */
+ if (cancel_delayed_work_sync(&ctx->delayed_off_clk_work)) {
+ pr_debug("%s off work canceled\n", __func__);
+
+ if (mdp5_data->resources_state ==
+ MDP_RSRC_CTL_STATE_OFF)
+ pr_debug("%s unexpected OFF state\n",
+ __func__);
+ }
+
+ mutex_lock(&ctl->rsrc_lock);
+ MDSS_XLOG(ctl->num, mdp5_data->resources_state, sw_event, 0x11);
+ /* Transition OFF->ON || GATE->ON (enable clocks) */
+ if ((mdp5_data->resources_state == MDP_RSRC_CTL_STATE_OFF) ||
+ (mdp5_data->resources_state ==
+ MDP_RSRC_CTL_STATE_GATE)) {
+ u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
+
+ /* Enable/Ungate DSI clocks and resources */
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ clk_ctrl.state = MDSS_DSI_CLK_ON;
+ clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+ mdss_mdp_ctl_intf_event /* enable master */
+ (ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, flags);
+
+ if (sctx) { /* then slave */
+ if (sctx->pingpong_split_slave)
+ flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
+
+ mdss_mdp_ctl_intf_event(sctx->ctl,
+ MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, flags);
+ }
+
+ if (mdp5_data->resources_state ==
+ MDP_RSRC_CTL_STATE_GATE)
+ mdp5_data->resources_state =
+ MDP_RSRC_CTL_STATE_ON;
+ }
+
+ /* Transition OFF->ON (enable resources)*/
+ if (mdp5_data->resources_state ==
+ MDP_RSRC_CTL_STATE_OFF) {
+ /* Add an extra vote for the ahb bus */
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_LOW);
+
+ /* Enable MDP resources */
+ mdss_mdp_cmd_clk_on(ctx);
+ if (sctx)
+ mdss_mdp_cmd_clk_on(sctx);
+
+ mdp5_data->resources_state = MDP_RSRC_CTL_STATE_ON;
+ }
+
+ if (mdp5_data->resources_state != MDP_RSRC_CTL_STATE_ON) {
+ /* we must be ON by the end of kickoff */
+ pr_err("%s unexpected power state during:%s\n",
+ __func__, get_sw_event_name(sw_event));
+ WARN_ON(1);
+ }
+ mutex_unlock(&ctl->rsrc_lock);
+ break;
+ case MDP_RSRC_CTL_EVENT_PP_DONE:
+ if (mdp5_data->resources_state != MDP_RSRC_CTL_STATE_ON) {
+ pr_err("%s unexpected power state during:%s\n",
+ __func__, get_sw_event_name(sw_event));
+ WARN_ON(1);
+ }
+
+ /* Check that no pending kickoff is on-going */
+ status = mdss_mdp_ctl_perf_get_transaction_status(ctl);
+
+ /*
+ * Same for the slave controller. for cases where
+ * transaction is only pending in the slave controller.
+ */
+ if (sctl)
+ status |= mdss_mdp_ctl_perf_get_transaction_status(
+ sctl);
+
+ /*
+ * Schedule the work items to shut down only if
+ * 1. no kickoff has been scheduled
+ * 2. no stop command has been started
+ * 3. no autorefresh is enabled
+ * 4. no commit is pending
+ */
+ if ((status == PERF_STATUS_DONE) &&
+ !ctx->intf_stopped &&
+ (ctx->autorefresh_state == MDP_AUTOREFRESH_OFF) &&
+ !ctl->mfd->atomic_commit_pending) {
+ pr_debug("schedule release after:%d ms\n",
+ jiffies_to_msecs
+ (CMD_MODE_IDLE_TIMEOUT));
+
+ MDSS_XLOG(ctl->num, mdp5_data->resources_state,
+ sw_event, 0x22);
+
+ /* start work item to gate */
+ if (mdata->enable_gate)
+ schedule_work(&ctx->gate_clk_work);
+
+ /* start work item to shut down after delay */
+ schedule_delayed_work(
+ &ctx->delayed_off_clk_work,
+ CMD_MODE_IDLE_TIMEOUT);
+ }
+
+ break;
+ case MDP_RSRC_CTL_EVENT_STOP:
+
+ /* Cancel early wakeup Work Item */
+ if (cancel_work_sync(&ctx->early_wakeup_clk_work))
+ pr_debug("early wakeup work canceled\n");
+
+ /* If we are already OFF, just return */
+ if (mdp5_data->resources_state ==
+ MDP_RSRC_CTL_STATE_OFF) {
+ pr_debug("resources already off\n");
+ goto exit;
+ }
+
+ /* If pp_done is on-going, wait for it to finish */
+ mdss_mdp_cmd_wait4pingpong(ctl, NULL);
+ if (sctl)
+ mdss_mdp_cmd_wait4pingpong(sctl, NULL);
+
+ mutex_lock(&ctx->autorefresh_lock);
+ if (ctx->autorefresh_state != MDP_AUTOREFRESH_OFF) {
+ pr_debug("move autorefresh to disable state\n");
+ mdss_mdp_disable_autorefresh(ctl, sctl);
+ }
+ mutex_unlock(&ctx->autorefresh_lock);
+
+ /*
+ * If a pp_done happened just before the stop,
+ * we can still have some work items running;
+ * cancel any pending works.
+ */
+
+ /* Cancel GATE Work Item */
+ if (cancel_work_sync(&ctx->gate_clk_work)) {
+ pr_debug("gate work canceled\n");
+
+ if (mdp5_data->resources_state !=
+ MDP_RSRC_CTL_STATE_ON)
+ pr_debug("%s power state is not ON\n",
+ __func__);
+ }
+
+ /* Cancel OFF Work Item */
+ if (cancel_delayed_work_sync(&ctx->delayed_off_clk_work)) {
+ pr_debug("off work canceled\n");
+
+
+ if (mdp5_data->resources_state ==
+ MDP_RSRC_CTL_STATE_OFF)
+ pr_debug("%s unexpected OFF state\n",
+ __func__);
+ }
+
+ mutex_lock(&ctl->rsrc_lock);
+ MDSS_XLOG(ctl->num, mdp5_data->resources_state, sw_event, 0x33);
+ if ((mdp5_data->resources_state == MDP_RSRC_CTL_STATE_ON) ||
+ (mdp5_data->resources_state
+ == MDP_RSRC_CTL_STATE_GATE)) {
+
+ /* Enable MDP clocks if gated */
+ if (mdp5_data->resources_state ==
+ MDP_RSRC_CTL_STATE_GATE)
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ /* First Power off slave DSI (if present) */
+ if (sctx)
+ mdss_mdp_cmd_clk_off(sctx);
+
+ /* Now Power off master DSI */
+ mdss_mdp_cmd_clk_off(ctx);
+
+ /* Remove extra vote for the ahb bus */
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+
+
+ /* we are done accessing the resources */
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ /* update the state, now we are in off */
+ mdp5_data->resources_state = MDP_RSRC_CTL_STATE_OFF;
+ }
+ mutex_unlock(&ctl->rsrc_lock);
+ break;
+ case MDP_RSRC_CTL_EVENT_EARLY_WAKE_UP:
+ /*
+ * Cancel any work item pending and:
+ * 1. If the current state is ON, stay in ON.
+ * 2. If the current state is GATED, stay at GATED.
+ * 3. If the current state is POWER-OFF, POWER-ON and
+ * schedule a work item to POWER-OFF if no
+ * kickoffs get scheduled.
+ */
+
+ /* if panels are off, do not process early wake up */
+ if ((ctx && __mdss_mdp_cmd_is_panel_power_off(ctx)) ||
+ (sctx && __mdss_mdp_cmd_is_panel_power_off(sctx)))
+ break;
+
+ /* Cancel GATE Work Item */
+ if (cancel_work_sync(&ctx->gate_clk_work)) {
+ pr_debug("%s: %s - gate_work cancelled\n",
+ __func__, get_sw_event_name(sw_event));
+ schedule_off = true;
+ }
+
+ /* Cancel OFF Work Item */
+ if (cancel_delayed_work_sync(
+ &ctx->delayed_off_clk_work)) {
+ pr_debug("%s: %s - off work cancelled\n",
+ __func__, get_sw_event_name(sw_event));
+ schedule_off = true;
+ }
+
+ mutex_lock(&ctl->rsrc_lock);
+ MDSS_XLOG(ctl->num, mdp5_data->resources_state, sw_event,
+ schedule_off, 0x44);
+ if (mdp5_data->resources_state == MDP_RSRC_CTL_STATE_OFF) {
+ u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ clk_ctrl.state = MDSS_DSI_CLK_ON;
+ clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+ mdss_mdp_ctl_intf_event(ctx->ctl,
+ MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, flags);
+
+ if (sctx) { /* then slave */
+ if (sctx->pingpong_split_slave)
+ flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
+
+ mdss_mdp_ctl_intf_event(sctx->ctl,
+ MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, flags);
+ }
+
+ mdss_mdp_cmd_clk_on(ctx);
+ if (sctx)
+ mdss_mdp_cmd_clk_on(sctx);
+
+ mdp5_data->resources_state = MDP_RSRC_CTL_STATE_ON;
+ schedule_off = true;
+ }
+
+ /*
+ * Driver will schedule off work under three cases:
+ * 1. Early wakeup cancelled the gate work.
+ * 2. Early wakeup cancelled the off work.
+ * 3. Early wakeup changed the state to ON.
+ *
+ * Driver will not allow off work under one condition:
+ * 1. Kickoff is pending.
+ */
+ if (schedule_off && !ctl->mfd->atomic_commit_pending) {
+ /*
+ * Schedule off work after cmd mode idle timeout is
+ * reached. This is to prevent the case where early wake
+ * up is called but no frame update is sent.
+ */
+ schedule_delayed_work(&ctx->delayed_off_clk_work,
+ CMD_MODE_IDLE_TIMEOUT);
+ pr_debug("off work scheduled\n");
+ }
+ mutex_unlock(&ctl->rsrc_lock);
+ break;
+ default:
+ pr_warn("%s unexpected event (%d)\n", __func__, sw_event);
+ break;
+ }
+ MDSS_XLOG(sw_event, mdp5_data->resources_state, XLOG_FUNC_EXIT);
+
+exit:
+ return rc;
+}
+
+static bool mdss_mdp_cmd_is_autorefresh_enabled(struct mdss_mdp_ctl *mctl)
+{
+ struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
+ bool enabled = false;
+
+ /* check the ctl to make sure the lock was initialized */
+ if (!ctx || !ctx->ctl)
+ return 0;
+
+ mutex_lock(&ctx->autorefresh_lock);
+ if (ctx->autorefresh_state == MDP_AUTOREFRESH_ON)
+ enabled = true;
+ mutex_unlock(&ctx->autorefresh_lock);
+
+ return enabled;
+}
+
+static inline void mdss_mdp_cmd_clk_on(struct mdss_mdp_cmd_ctx *ctx)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ pr_debug("%pS-->%s: task:%s ctx%d\n", __builtin_return_address(0),
+ __func__, current->group_leader->comm, ctx->current_pp_num);
+
+ mutex_lock(&ctx->clk_mtx);
+ MDSS_XLOG(ctx->current_pp_num, atomic_read(&ctx->koff_cnt),
+ mdata->bus_ref_cnt);
+
+ mdss_bus_bandwidth_ctrl(true);
+
+ mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_RESUME);
+
+ mutex_unlock(&ctx->clk_mtx);
+}
+
+static inline void mdss_mdp_cmd_clk_off(struct mdss_mdp_cmd_ctx *ctx)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct dsi_panel_clk_ctrl clk_ctrl;
+
+ pr_debug("%pS-->%s: task:%s ctx%d\n", __builtin_return_address(0),
+ __func__, current->group_leader->comm, ctx->current_pp_num);
+
+ mutex_lock(&ctx->clk_mtx);
+ MDSS_XLOG(ctx->current_pp_num, atomic_read(&ctx->koff_cnt),
+ mdata->bus_ref_cnt);
+
+ mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND);
+
+ /* Power off DSI, is caller responsibility to do slave then master */
+ if (ctx->ctl) {
+ u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
+
+ if (ctx->pingpong_split_slave)
+ flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
+
+ clk_ctrl.state = MDSS_DSI_CLK_OFF;
+ clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+ mdss_mdp_ctl_intf_event
+ (ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, flags);
+ } else {
+ pr_err("OFF with ctl:NULL\n");
+ }
+
+ mdss_bus_bandwidth_ctrl(false);
+
+ mutex_unlock(&ctx->clk_mtx);
+}
+
+static void mdss_mdp_cmd_readptr_done(void *arg)
+{
+ struct mdss_mdp_ctl *ctl = arg;
+ struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+ struct mdss_mdp_vsync_handler *tmp;
+ ktime_t vsync_time;
+
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return;
+ }
+
+ vsync_time = ktime_get();
+ ctl->vsync_cnt++;
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+ complete_all(&ctx->rdptr_done);
+
+ /* If caller is waiting for the read pointer, notify. */
+ if (atomic_read(&ctx->rdptr_cnt)) {
+ if (atomic_add_unless(&ctx->rdptr_cnt, -1, 0)) {
+ MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+ if (atomic_read(&ctx->rdptr_cnt))
+ pr_warn("%s: too many rdptrs=%d!\n",
+ __func__, atomic_read(&ctx->rdptr_cnt));
+ }
+ wake_up_all(&ctx->rdptr_waitq);
+ }
+
+ spin_lock(&ctx->clk_lock);
+ list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
+ if (tmp->enabled && !tmp->cmd_post_flush)
+ tmp->vsync_handler(ctl, vsync_time);
+ }
+ spin_unlock(&ctx->clk_lock);
+}
+
+static int mdss_mdp_cmd_wait4readptr(struct mdss_mdp_cmd_ctx *ctx)
+{
+ int rc = 0;
+
+ rc = wait_event_timeout(ctx->rdptr_waitq,
+ atomic_read(&ctx->rdptr_cnt) == 0,
+ KOFF_TIMEOUT);
+ if (rc <= 0) {
+ if (atomic_read(&ctx->rdptr_cnt))
+ pr_err("timed out waiting for rdptr irq\n");
+ else
+ rc = 1;
+ }
+ return rc;
+}
+
+static int mdss_mdp_cmd_intf_callback(void *data, int event)
+{
+ struct mdss_mdp_cmd_ctx *ctx = data;
+ struct mdss_mdp_pp_tear_check *te = NULL;
+ u32 timeout_us = 3000, val = 0;
+ struct mdss_mdp_mixer *mixer;
+
+ if (!data) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!ctx->ctl)
+ return -EINVAL;
+
+ switch (event) {
+ case MDP_INTF_CALLBACK_DSI_WAIT:
+ pr_debug("%s: wait for frame cnt:%d event:%d\n",
+ __func__, atomic_read(&ctx->rdptr_cnt), event);
+
+ /*
+ * if we are going to suspended or pp split is not enabled,
+ * just return
+ */
+ if (ctx->intf_stopped || !is_pingpong_split(ctx->ctl->mfd))
+ return -EINVAL;
+ atomic_inc(&ctx->rdptr_cnt);
+
+ /* enable clks and rd_ptr interrupt */
+ mdss_mdp_setup_vsync(ctx, true);
+
+ mixer = mdss_mdp_mixer_get(ctx->ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ if (!mixer) {
+ pr_err("%s: null mixer\n", __func__);
+ return -EINVAL;
+ }
+
+ /* wait for read pointer */
+ MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+ pr_debug("%s: wait for frame cnt:%d\n",
+ __func__, atomic_read(&ctx->rdptr_cnt));
+ mdss_mdp_cmd_wait4readptr(ctx);
+
+ /* wait for 3ms to make sure we are within the frame */
+ te = &ctx->ctl->panel_data->panel_info.te;
+ readl_poll_timeout(mixer->pingpong_base +
+ MDSS_MDP_REG_PP_INT_COUNT_VAL, val,
+ (val & 0xffff) > (te->start_pos +
+ te->sync_threshold_start), 10, timeout_us);
+
+ /* disable rd_ptr interrupt */
+ mdss_mdp_setup_vsync(ctx, false);
+
+ break;
+ default:
+ pr_debug("%s: unhandled event=%d\n", __func__, event);
+ break;
+ }
+ return 0;
+}
+
+static void mdss_mdp_cmd_lineptr_done(void *arg)
+{
+ struct mdss_mdp_ctl *ctl = arg;
+ struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+ struct mdss_mdp_lineptr_handler *tmp;
+ ktime_t lineptr_time;
+
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return;
+ }
+
+ lineptr_time = ktime_get();
+ pr_debug("intr lineptr_time=%lld\n", ktime_to_ms(lineptr_time));
+
+ spin_lock(&ctx->clk_lock);
+ list_for_each_entry(tmp, &ctx->lineptr_handlers, list) {
+ if (tmp->enabled)
+ tmp->lineptr_handler(ctl, lineptr_time);
+ }
+ spin_unlock(&ctx->clk_lock);
+}
+
+static int mdss_mdp_cmd_intf_recovery(void *data, int event)
+{
+ struct mdss_mdp_cmd_ctx *ctx = data;
+ unsigned long flags;
+ bool reset_done = false, notify_frame_timeout = false;
+
+ if (!data) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!ctx->ctl)
+ return -EINVAL;
+
+ /*
+ * Currently, only intf_fifo_underflow is
+ * supported for recovery sequence for command
+ * mode DSI interface
+ */
+ if (event != MDP_INTF_DSI_CMD_FIFO_UNDERFLOW) {
+ pr_warn("%s: unsupported recovery event:%d\n",
+ __func__, event);
+ return -EPERM;
+ }
+
+ if (atomic_read(&ctx->koff_cnt)) {
+ mdss_mdp_ctl_reset(ctx->ctl, true);
+ reset_done = true;
+ }
+
+ spin_lock_irqsave(&ctx->koff_lock, flags);
+ if (reset_done && atomic_add_unless(&ctx->koff_cnt, -1, 0)) {
+ pr_debug("%s: intf_num=%d\n", __func__, ctx->ctl->intf_num);
+ mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num);
+ mdss_mdp_set_intr_callback_nosync(
+ MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num, NULL, NULL);
+ if (mdss_mdp_cmd_do_notifier(ctx))
+ notify_frame_timeout = true;
+ }
+ spin_unlock_irqrestore(&ctx->koff_lock, flags);
+
+ if (notify_frame_timeout)
+ mdss_mdp_ctl_notify(ctx->ctl, MDP_NOTIFY_FRAME_TIMEOUT);
+
+ return 0;
+}
+
+static void mdss_mdp_cmd_pingpong_done(void *arg)
+{
+ struct mdss_mdp_ctl *ctl = arg;
+ struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+ struct mdss_mdp_vsync_handler *tmp;
+ ktime_t vsync_time;
+ bool sync_ppdone;
+
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+
+ mdss_mdp_ctl_perf_set_transaction_status(ctl,
+ PERF_HW_MDP_STATE, PERF_STATUS_DONE);
+
+ spin_lock(&ctx->clk_lock);
+ list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
+ if (tmp->enabled && tmp->cmd_post_flush)
+ tmp->vsync_handler(ctl, vsync_time);
+ }
+ spin_unlock(&ctx->clk_lock);
+
+ spin_lock(&ctx->koff_lock);
+
+ mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num);
+ mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num, NULL, NULL);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
+
+ /*
+ * check state of sync ctx before decrementing koff_cnt to avoid race
+ * condition. That is, once both koff_cnt have been served and new koff
+ * can be triggered (sctx->koff_cnt could change)
+ */
+ sync_ppdone = mdss_mdp_cmd_do_notifier(ctx);
+
+ if (atomic_add_unless(&ctx->koff_cnt, -1, 0)) {
+ if (atomic_read(&ctx->koff_cnt))
+ pr_err("%s: too many kickoffs=%d!\n", __func__,
+ atomic_read(&ctx->koff_cnt));
+ if (sync_ppdone) {
+ atomic_inc(&ctx->pp_done_cnt);
+ schedule_work(&ctx->pp_done_work);
+
+ mdss_mdp_resource_control(ctl,
+ MDP_RSRC_CTL_EVENT_PP_DONE);
+ }
+ wake_up_all(&ctx->pp_waitq);
+ } else {
+ pr_err("%s: should not have pingpong interrupt!\n", __func__);
+ }
+
+ pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d cnt=%d\n", __func__,
+ ctl->num, ctl->intf_num, ctx->current_pp_num,
+ atomic_read(&ctx->koff_cnt));
+
+ trace_mdp_cmd_pingpong_done(ctl, ctx->current_pp_num,
+ atomic_read(&ctx->koff_cnt));
+
+ spin_unlock(&ctx->koff_lock);
+}
+
+static int mdss_mdp_setup_lineptr(struct mdss_mdp_cmd_ctx *ctx,
+ bool enable)
+{
+ int changed = 0;
+
+ mutex_lock(&ctx->mdp_wrptr_lock);
+
+ if (enable) {
+ if (ctx->lineptr_irq_cnt == 0)
+ changed++;
+ ctx->lineptr_irq_cnt++;
+ } else {
+ if (ctx->lineptr_irq_cnt) {
+ ctx->lineptr_irq_cnt--;
+ if (ctx->lineptr_irq_cnt == 0)
+ changed++;
+ } else {
+ pr_warn("%pS->%s: wr_ptr can not be turned off\n",
+ __builtin_return_address(0), __func__);
+ }
+ }
+
+ if (changed)
+ MDSS_XLOG(ctx->lineptr_irq_cnt, enable, current->pid);
+
+ pr_debug("%pS->%s: lineptr_irq_cnt=%d changed=%d enable=%d ctl:%d pp:%d\n",
+ __builtin_return_address(0), __func__,
+ ctx->lineptr_irq_cnt, changed, enable,
+ ctx->ctl->num, ctx->default_pp_num);
+
+ if (changed) {
+ if (enable) {
+ /* enable clocks and irq */
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+ ctx->default_pp_num);
+ } else {
+ /* disable clocks and irq */
+ mdss_mdp_irq_disable(MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+ ctx->default_pp_num);
+ /*
+ * check the intr status and clear the irq before
+ * disabling the clocks
+ */
+ mdss_mdp_intr_check_and_clear(
+ MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+ ctx->default_pp_num);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ }
+ }
+
+ mutex_unlock(&ctx->mdp_wrptr_lock);
+ return ctx->lineptr_irq_cnt;
+}
+
+static int mdss_mdp_cmd_add_lineptr_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_lineptr_handler *handle)
+{
+ struct mdss_mdp_cmd_ctx *ctx;
+ unsigned long flags;
+ int ret = 0;
+
+ mutex_lock(&cmd_off_mtx);
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctl->is_master) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("%pS->%s: ctl=%d\n",
+ __builtin_return_address(0), __func__, ctl->num);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (!handle->enabled) {
+ handle->enabled = true;
+ list_add(&handle->list, &ctx->lineptr_handlers);
+ }
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY)
+ mutex_lock(&cmd_clk_mtx);
+
+ mdss_mdp_setup_lineptr(ctx, true);
+ ctx->lineptr_enabled = true;
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY)
+ mutex_unlock(&cmd_clk_mtx);
+done:
+ mutex_unlock(&cmd_off_mtx);
+
+ return ret;
+}
+
+static int mdss_mdp_cmd_remove_lineptr_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_lineptr_handler *handle)
+{
+ struct mdss_mdp_cmd_ctx *ctx;
+ unsigned long flags;
+ bool disabled = true;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctl->is_master || !ctx->lineptr_enabled)
+ return -EINVAL;
+
+ pr_debug("%pS->%s: ctl=%d\n",
+ __builtin_return_address(0), __func__, ctl->num);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (handle->enabled) {
+ handle->enabled = false;
+ list_del_init(&handle->list);
+ } else {
+ disabled = false;
+ }
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+ if (disabled)
+ mdss_mdp_setup_lineptr(ctx, false);
+ ctx->lineptr_enabled = false;
+ ctx->prev_wr_ptr_irq = 0;
+
+ return 0;
+}
+
+static int mdss_mdp_cmd_lineptr_ctrl(struct mdss_mdp_ctl *ctl, bool enable)
+{
+ struct mdss_mdp_pp_tear_check *te;
+ struct mdss_mdp_cmd_ctx *ctx;
+ int rc = 0;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctl->is_master)
+ return -EINVAL;
+
+ te = &ctl->panel_data->panel_info.te;
+ pr_debug("%pS->%s: ctl=%d en=%d, prev_lineptr=%d, lineptr=%d\n",
+ __builtin_return_address(0), __func__, ctl->num,
+ enable, ctx->prev_wr_ptr_irq, te->wr_ptr_irq);
+
+ if (enable) {
+ /* update reg only if the value has changed */
+ if (ctx->prev_wr_ptr_irq != te->wr_ptr_irq) {
+ ctx->prev_wr_ptr_irq = te->wr_ptr_irq;
+ mdss_mdp_pingpong_write(ctl->mixer_left->pingpong_base,
+ MDSS_MDP_REG_PP_WR_PTR_IRQ, te->wr_ptr_irq);
+ }
+
+ /*
+ * add handler only when lineptr is not enabled
+ * and wr ptr is non zero
+ */
+ if (!ctx->lineptr_enabled && te->wr_ptr_irq)
+ rc = mdss_mdp_cmd_add_lineptr_handler(ctl,
+ &ctl->lineptr_handler);
+ /* Disable handler when the value is zero */
+ else if (ctx->lineptr_enabled && !te->wr_ptr_irq)
+ rc = mdss_mdp_cmd_remove_lineptr_handler(ctl,
+ &ctl->lineptr_handler);
+ } else {
+ if (ctx->lineptr_enabled)
+ rc = mdss_mdp_cmd_remove_lineptr_handler(ctl,
+ &ctl->lineptr_handler);
+ }
+
+ return rc;
+}
+
+/*
+ * Interface used to update the new lineptr value set by the sysfs node.
+ * Value is instantly updated only when autorefresh is enabled, else
+ * new value would be set in the next kickoff.
+ */
+static int mdss_mdp_cmd_update_lineptr(struct mdss_mdp_ctl *ctl, bool enable)
+{
+ if (mdss_mdp_cmd_is_autorefresh_enabled(ctl))
+ return mdss_mdp_cmd_lineptr_ctrl(ctl, enable);
+
+ return 0;
+}
+
+/**
+ * mdss_mdp_cmd_autorefresh_pp_done() - pp done irq callback for autorefresh
+ * @arg: void pointer to the controller context.
+ *
+ * This function is the pp_done interrupt callback while disabling
+ * autorefresh. This function does not modify the kickoff count (koff_cnt).
+ */
+static void mdss_mdp_cmd_autorefresh_pp_done(void *arg)
+{
+ struct mdss_mdp_ctl *ctl = arg;
+ struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+
+ mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num);
+ mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num, NULL, NULL);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
+ complete_all(&ctx->autorefresh_ppdone);
+
+ pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d cnt=%d\n", __func__,
+ ctl->num, ctl->intf_num, ctx->current_pp_num,
+ atomic_read(&ctx->koff_cnt));
+}
+
+static void pingpong_done_work(struct work_struct *work)
+{
+ u32 status;
+ struct mdss_mdp_cmd_ctx *ctx =
+ container_of(work, typeof(*ctx), pp_done_work);
+ struct mdss_mdp_ctl *ctl = ctx->ctl;
+
+ if (ctl) {
+ while (atomic_add_unless(&ctx->pp_done_cnt, -1, 0))
+ mdss_mdp_ctl_notify(ctx->ctl, MDP_NOTIFY_FRAME_DONE);
+
+ status = mdss_mdp_ctl_perf_get_transaction_status(ctx->ctl);
+ if (status == 0)
+ mdss_mdp_ctl_perf_release_bw(ctx->ctl);
+
+ if (!ctl->is_master)
+ ctl = mdss_mdp_get_main_ctl(ctl);
+
+ /* do not disable lineptr when autorefresh is enabled */
+ if (mdss_mdp_is_lineptr_supported(ctl)
+ && !mdss_mdp_cmd_is_autorefresh_enabled(ctl))
+ mdss_mdp_cmd_lineptr_ctrl(ctl, false);
+ }
+}
+
+static void clk_ctrl_delayed_off_work(struct work_struct *work)
+{
+ struct mdss_overlay_private *mdp5_data;
+ struct delayed_work *dw = to_delayed_work(work);
+ struct mdss_mdp_cmd_ctx *ctx = container_of(dw,
+ struct mdss_mdp_cmd_ctx, delayed_off_clk_work);
+ struct mdss_mdp_ctl *ctl, *sctl;
+ struct mdss_mdp_cmd_ctx *sctx = NULL;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+
+ ctl = ctx->ctl;
+ if (!ctl || !ctl->panel_data) {
+ pr_err("NULL ctl||panel_data\n");
+ return;
+ }
+
+ if (ctl->mfd->atomic_commit_pending) {
+ pr_debug("leave clocks on for queued kickoff\n");
+ return;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ ATRACE_BEGIN(__func__);
+
+ /*
+ * Ideally we should not wait for the gate work item to finish, since
+ * this work happens CMD_MODE_IDLE_TIMEOUT time after,
+ * but if the system is laggy, prevent from a race condition
+ * between both work items by waiting for the gate to finish.
+ */
+ if (mdata->enable_gate)
+ flush_work(&ctx->gate_clk_work);
+
+ pr_debug("ctl:%d pwr_state:%s\n", ctl->num,
+ get_clk_pwr_state_name
+ (mdp5_data->resources_state));
+
+ mutex_lock(&ctl->rsrc_lock);
+ MDSS_XLOG(ctl->num, mdp5_data->resources_state, XLOG_FUNC_ENTRY);
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ mutex_lock(&cmd_clk_mtx);
+
+ if (mdss_mdp_get_split_display_ctls(&ctl, &sctl)) {
+ /* error when getting both controllers, just returnr */
+ pr_err("cannot get both controllers for the split display\n");
+ goto exit;
+ }
+
+ /* re-assign to have the correct order in the context */
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !sctx) {
+ pr_err("invalid %s %s\n",
+ ctx?"":"ctx", sctx?"":"sctx");
+ goto exit;
+ }
+ } else if (is_pingpong_split(ctl->mfd)) {
+ mutex_lock(&cmd_clk_mtx);
+ sctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[SLAVE_CTX];
+ if (!sctx) {
+ pr_err("invalid sctx\n");
+ goto exit;
+ }
+ }
+
+ if (ctx->autorefresh_state != MDP_AUTOREFRESH_OFF) {
+ /*
+ * Driver shouldn't have scheduled this work item if
+ * autorefresh was enabled, but if any race
+ * condition happens between this work queue and
+ * the enable of the feature, make sure we do not
+ * process this request and mark this error.
+ */
+ pr_err("cannot disable clks while autorefresh is not off\n");
+ goto exit;
+ }
+
+ /* Enable clocks if Gate feature is enabled and we are in this state */
+ if (mdata->enable_gate && (mdp5_data->resources_state
+ == MDP_RSRC_CTL_STATE_GATE))
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ /* first power off the slave DSI (if present) */
+ if (sctx)
+ mdss_mdp_cmd_clk_off(sctx);
+
+ /* now power off the master DSI */
+ mdss_mdp_cmd_clk_off(ctx);
+
+ /* Remove extra vote for the ahb bus */
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ /* update state machine that power off transition is done */
+ mdp5_data->resources_state = MDP_RSRC_CTL_STATE_OFF;
+
+exit:
+ /* do this at the end, so we can also protect the global power state*/
+ if ((ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
+ is_pingpong_split(ctl->mfd))
+ mutex_unlock(&cmd_clk_mtx);
+
+ MDSS_XLOG(ctl->num, mdp5_data->resources_state, XLOG_FUNC_EXIT);
+ mutex_unlock(&ctl->rsrc_lock);
+
+ ATRACE_END(__func__);
+}
+
+static void clk_ctrl_gate_work(struct work_struct *work)
+{
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_mdp_cmd_ctx *ctx =
+ container_of(work, typeof(*ctx), gate_clk_work);
+ struct mdss_mdp_ctl *ctl, *sctl;
+ struct mdss_mdp_cmd_ctx *sctx = NULL;
+ struct dsi_panel_clk_ctrl clk_ctrl;
+
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+
+ ATRACE_BEGIN(__func__);
+ ctl = ctx->ctl;
+ if (!ctl) {
+ pr_err("%s: invalid ctl\n", __func__);
+ return;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ if (!mdp5_data) {
+ pr_err("%s: invalid mdp data\n", __func__);
+ return;
+ }
+
+ pr_debug("%s ctl:%d pwr_state:%s\n", __func__,
+ ctl->num, get_clk_pwr_state_name
+ (mdp5_data->resources_state));
+
+ mutex_lock(&ctl->rsrc_lock);
+ MDSS_XLOG(ctl->num, mdp5_data->resources_state, XLOG_FUNC_ENTRY);
+
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ mutex_lock(&cmd_clk_mtx);
+
+ if (mdss_mdp_get_split_display_ctls(&ctl, &sctl)) {
+ /* error when getting both controllers, just return */
+ pr_err("%s cannot get both cts for the split display\n",
+ __func__);
+ goto exit;
+ }
+
+ /* re-assign to have the correct order in the context */
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !sctx) {
+ pr_err("%s ERROR invalid %s %s\n", __func__,
+ ctx?"":"ctx", sctx?"":"sctx");
+ goto exit;
+ }
+ } else if (is_pingpong_split(ctl->mfd)) {
+ mutex_lock(&cmd_clk_mtx);
+ sctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[SLAVE_CTX];
+ if (!sctx) {
+ pr_err("invalid sctx\n");
+ goto exit;
+ }
+ }
+
+ if (ctx->autorefresh_state != MDP_AUTOREFRESH_OFF) {
+ /*
+ * Driver shouldn't have scheduled this work item if
+ * autorefresh was enabled, but if any race
+ * condition happens between this work queue and
+ * the enable of the feature, make sure we do not
+ * process this request and mark this error.
+ */
+ pr_err("cannot gate clocks with autorefresh\n");
+ goto exit;
+ }
+
+ clk_ctrl.state = MDSS_DSI_CLK_EARLY_GATE;
+ clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+ /* First gate the DSI clocks for the slave controller (if present) */
+ if (sctx) {
+ u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
+
+ if (sctx->pingpong_split_slave)
+ flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
+
+ mdss_mdp_ctl_intf_event(sctx->ctl,
+ MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, flags);
+ }
+
+ /* Now gate DSI clocks for the master */
+ mdss_mdp_ctl_intf_event
+ (ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+
+ /* Gate mdp clocks */
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ /* update state machine that gate transition is done */
+ mdp5_data->resources_state = MDP_RSRC_CTL_STATE_GATE;
+
+exit:
+ /* unlock mutex needed for split display */
+ if ((ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
+ is_pingpong_split(ctl->mfd))
+ mutex_unlock(&cmd_clk_mtx);
+
+ MDSS_XLOG(ctl->num, mdp5_data->resources_state, XLOG_FUNC_EXIT);
+ mutex_unlock(&ctl->rsrc_lock);
+
+ ATRACE_END(__func__);
+}
+
+static int mdss_mdp_setup_vsync(struct mdss_mdp_cmd_ctx *ctx,
+ bool enable)
+{
+ int changed = 0;
+
+ mutex_lock(&ctx->mdp_rdptr_lock);
+
+ if (enable) {
+ if (ctx->vsync_irq_cnt == 0)
+ changed++;
+ ctx->vsync_irq_cnt++;
+ } else {
+ if (ctx->vsync_irq_cnt) {
+ ctx->vsync_irq_cnt--;
+ if (ctx->vsync_irq_cnt == 0)
+ changed++;
+ } else {
+ pr_warn("%pS->%s: rd_ptr can not be turned off\n",
+ __builtin_return_address(0), __func__);
+ }
+ }
+
+ if (changed)
+ MDSS_XLOG(ctx->vsync_irq_cnt, enable, current->pid);
+
+ pr_debug("%pS->%s: vsync_cnt=%d changed=%d enable=%d ctl:%d pp:%d\n",
+ __builtin_return_address(0), __func__,
+ ctx->vsync_irq_cnt, changed, enable,
+ ctx->ctl->num, ctx->default_pp_num);
+
+ if (changed) {
+ if (enable) {
+ /* enable clocks and irq */
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+ ctx->default_pp_num);
+ } else {
+ /* disable clocks and irq */
+ mdss_mdp_irq_disable(MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+ ctx->default_pp_num);
+ /*
+ * check the intr status and clear the irq before
+ * disabling the clocks
+ */
+ mdss_mdp_intr_check_and_clear(
+ MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+ ctx->default_pp_num);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ }
+ }
+
+ mutex_unlock(&ctx->mdp_rdptr_lock);
+ return ctx->vsync_irq_cnt;
+}
+
+static int mdss_mdp_cmd_add_vsync_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_vsync_handler *handle)
+{
+ struct mdss_mdp_ctl *sctl = NULL;
+ struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+ unsigned long flags;
+ bool enable_rdptr = false;
+ int ret = 0;
+
+ mutex_lock(&cmd_off_mtx);
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ ret = -ENODEV;
+ goto done;
+ }
+
+ pr_debug("%pS->%s ctl:%d\n",
+ __builtin_return_address(0), __func__, ctl->num);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (!handle->enabled) {
+ handle->enabled = true;
+ list_add(&handle->list, &ctx->vsync_handlers);
+
+ enable_rdptr = !handle->cmd_post_flush;
+ }
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+ if (enable_rdptr) {
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY)
+ mutex_lock(&cmd_clk_mtx);
+
+ /* enable rd_ptr interrupt and clocks */
+ mdss_mdp_setup_vsync(ctx, true);
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY)
+ mutex_unlock(&cmd_clk_mtx);
+ }
+
+done:
+ mutex_unlock(&cmd_off_mtx);
+
+ return ret;
+}
+
+static int mdss_mdp_cmd_remove_vsync_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_vsync_handler *handle)
+{
+ struct mdss_mdp_ctl *sctl;
+ struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+ unsigned long flags;
+ bool disable_vsync_irq = false;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return -ENODEV;
+ }
+
+ pr_debug("%pS->%s ctl:%d\n",
+ __builtin_return_address(0), __func__, ctl->num);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), 0x88888);
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (handle->enabled) {
+ handle->enabled = false;
+ list_del_init(&handle->list);
+ disable_vsync_irq = !handle->cmd_post_flush;
+ }
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+ if (disable_vsync_irq) {
+ /* disable rd_ptr interrupt and clocks */
+ mdss_mdp_setup_vsync(ctx, false);
+ complete(&ctx->stop_comp);
+ }
+
+ return 0;
+}
+
+int mdss_mdp_cmd_reconfigure_splash_done(struct mdss_mdp_ctl *ctl,
+ bool handoff)
+{
+ struct mdss_panel_data *pdata;
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+ struct dsi_panel_clk_ctrl clk_ctrl;
+ int ret = 0;
+
+ pdata = ctl->panel_data;
+
+ clk_ctrl.state = MDSS_DSI_CLK_OFF;
+ clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+ if (sctl) {
+ u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
+
+ if (is_pingpong_split(sctl->mfd))
+ flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
+
+ mdss_mdp_ctl_intf_event(sctl, MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, flags);
+ }
+
+ mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+
+ pdata->panel_info.cont_splash_enabled = 0;
+ if (sctl)
+ sctl->panel_data->panel_info.cont_splash_enabled = 0;
+ else if (pdata->next && is_pingpong_split(ctl->mfd))
+ pdata->next->panel_info.cont_splash_enabled = 0;
+
+ return ret;
+}
+
+static int __mdss_mdp_wait4pingpong(struct mdss_mdp_cmd_ctx *ctx)
+{
+ int rc = 0;
+ s64 expected_time = ktime_to_ms(ktime_get()) + KOFF_TIMEOUT_MS;
+ s64 time;
+
+ do {
+ rc = wait_event_timeout(ctx->pp_waitq,
+ atomic_read(&ctx->koff_cnt) == 0,
+ KOFF_TIMEOUT);
+ time = ktime_to_ms(ktime_get());
+
+ MDSS_XLOG(rc, time, expected_time, atomic_read(&ctx->koff_cnt));
+ /*
+ * If we time out, counter is valid and time is less,
+ * wait again.
+ */
+ } while (atomic_read(&ctx->koff_cnt) && (rc == 0) &&
+ (time < expected_time));
+
+ return rc;
+}
+
+static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ struct mdss_mdp_cmd_ctx *ctx;
+ struct mdss_panel_data *pdata;
+ unsigned long flags;
+ int rc = 0, te_irq;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ pdata = ctl->panel_data;
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctl->roi_bkup.w,
+ ctl->roi_bkup.h);
+
+ pr_debug("%s: intf_num=%d ctx=%pK koff_cnt=%d\n", __func__,
+ ctl->intf_num, ctx, atomic_read(&ctx->koff_cnt));
+
+ rc = __mdss_mdp_wait4pingpong(ctx);
+
+ trace_mdp_cmd_wait_pingpong(ctl->num,
+ atomic_read(&ctx->koff_cnt));
+
+ if (rc <= 0) {
+ u32 status, mask;
+
+ mask = mdss_mdp_get_irq_mask(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num);
+ status = mask & readl_relaxed(ctl->mdata->mdp_base +
+ MDSS_MDP_REG_INTR_STATUS);
+ MDSS_XLOG(status, rc, atomic_read(&ctx->koff_cnt));
+ if (status) {
+ pr_warn("pp done but irq not triggered\n");
+ mdss_mdp_irq_clear(ctl->mdata,
+ MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num);
+ local_irq_save(flags);
+ mdss_mdp_cmd_pingpong_done(ctl);
+ local_irq_restore(flags);
+ rc = 1;
+ }
+
+ rc = atomic_read(&ctx->koff_cnt) == 0;
+ }
+
+ if (rc <= 0) {
+ pr_err("%s:wait4pingpong timed out ctl=%d rc=%d cnt=%d koff_cnt=%d\n",
+ __func__,
+ ctl->num, rc, ctx->pp_timeout_report_cnt,
+ atomic_read(&ctx->koff_cnt));
+
+ /* enable TE irq to check if it is coming from the panel */
+ te_irq = gpio_to_irq(pdata->panel_te_gpio);
+ enable_irq(te_irq);
+
+ /* wait for 20ms to ensure we are getting the next TE */
+ usleep_range(20000, 20010);
+
+ reinit_completion(&pdata->te_done);
+ rc = wait_for_completion_timeout(&pdata->te_done, KOFF_TIMEOUT);
+
+ if (!rc) {
+ MDSS_XLOG(0xbac);
+ mdss_fb_report_panel_dead(ctl->mfd);
+ } else if (ctx->pp_timeout_report_cnt == 0) {
+ MDSS_XLOG(0xbad);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+ "dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus", "panic");
+ } else if (ctx->pp_timeout_report_cnt == MAX_RECOVERY_TRIALS) {
+ MDSS_XLOG(0xbad2);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+ "dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus", "panic");
+ mdss_fb_report_panel_dead(ctl->mfd);
+ }
+
+ /* disable te irq */
+ disable_irq_nosync(te_irq);
+
+ ctx->pp_timeout_report_cnt++;
+ rc = -EPERM;
+
+ mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num);
+ mdss_mdp_set_intr_callback_nosync(
+ MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num, NULL, NULL);
+ if (atomic_add_unless(&ctx->koff_cnt, -1, 0)
+ && mdss_mdp_cmd_do_notifier(ctx))
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_TIMEOUT);
+
+ } else {
+ rc = 0;
+ ctx->pp_timeout_report_cnt = 0;
+ }
+
+ cancel_work_sync(&ctx->pp_done_work);
+
+ /* signal any pending ping pong done events */
+ while (atomic_add_unless(&ctx->pp_done_cnt, -1, 0))
+ mdss_mdp_ctl_notify(ctx->ctl, MDP_NOTIFY_FRAME_DONE);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), rc);
+
+ return rc;
+}
+
+static int mdss_mdp_cmd_do_notifier(struct mdss_mdp_cmd_ctx *ctx)
+{
+ struct mdss_mdp_cmd_ctx *sctx;
+
+ sctx = ctx->sync_ctx;
+ if (!sctx || atomic_read(&sctx->koff_cnt) == 0)
+ return 1;
+
+ return 0;
+}
+
+static void mdss_mdp_cmd_set_sync_ctx(
+ struct mdss_mdp_ctl *ctl, struct mdss_mdp_ctl *sctl)
+{
+ struct mdss_mdp_cmd_ctx *ctx, *sctx;
+
+ ctx = (struct mdss_mdp_cmd_ctx *)ctl->intf_ctx[MASTER_CTX];
+
+ if (!sctl) {
+ ctx->sync_ctx = NULL;
+ return;
+ }
+
+ sctx = (struct mdss_mdp_cmd_ctx *)sctl->intf_ctx[MASTER_CTX];
+
+ if (!sctl->roi.w && !sctl->roi.h) {
+ /* left only */
+ ctx->sync_ctx = NULL;
+ sctx->sync_ctx = NULL;
+ } else {
+ /* left + right */
+ ctx->sync_ctx = sctx;
+ sctx->sync_ctx = ctx;
+ }
+}
+
+/* only master ctl is valid and pingpong split with DSC is pending */
+static void mdss_mdp_cmd_dsc_reconfig(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_panel_info *pinfo;
+ bool changed = false;
+
+ if (!ctl || !ctl->is_master)
+ return;
+
+ pinfo = &ctl->panel_data->panel_info;
+ if (pinfo->compression_mode != COMPRESSION_DSC)
+ return;
+
+ changed = ctl->mixer_left->roi_changed;
+ if (is_split_lm(ctl->mfd))
+ changed |= ctl->mixer_right->roi_changed;
+
+ if (changed)
+ mdss_mdp_ctl_dsc_setup(ctl, pinfo);
+}
+
+static int mdss_mdp_cmd_set_partial_roi(struct mdss_mdp_ctl *ctl)
+{
+ int rc = -EINVAL;
+
+ if (!ctl->panel_data->panel_info.partial_update_enabled)
+ return rc;
+
+ /* set panel col and page addr */
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_ENABLE_PARTIAL_ROI,
+ NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+ return rc;
+}
+
+static int mdss_mdp_cmd_set_stream_size(struct mdss_mdp_ctl *ctl)
+{
+ int rc = -EINVAL;
+
+ if (!ctl->panel_data->panel_info.partial_update_enabled)
+ return rc;
+
+ /* set dsi controller stream size */
+ rc = mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_DSI_STREAM_SIZE, NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+ return rc;
+}
+
+static int mdss_mdp_cmd_panel_on(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_ctl *sctl)
+{
+ struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+ int rc = 0;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ if (sctl)
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+ /* In pingpong split we have single controller, dual context */
+ if (is_pingpong_split(ctl->mfd))
+ sctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[SLAVE_CTX];
+
+ if (!__mdss_mdp_cmd_is_panel_power_on_interactive(ctx)) {
+ if (ctl->pending_mode_switch != SWITCH_RESOLUTION) {
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_LINK_READY,
+ NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(rc, "intf %d link ready error (%d)\n",
+ ctl->intf_num, rc);
+
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK,
+ NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(rc, "intf %d unblank error (%d)\n",
+ ctl->intf_num, rc);
+
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_ON,
+ NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(rc, "intf %d panel on error (%d)\n",
+ ctl->intf_num, rc);
+
+ }
+
+ rc = mdss_mdp_tearcheck_enable(ctl, true);
+ WARN(rc, "intf %d tearcheck enable error (%d)\n",
+ ctl->intf_num, rc);
+
+ ctx->panel_power_state = MDSS_PANEL_POWER_ON;
+ if (sctx)
+ sctx->panel_power_state = MDSS_PANEL_POWER_ON;
+
+ mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+ (void *)&ctx->intf_recovery,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+
+ mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_REGISTER_MDP_CALLBACK,
+ (void *)&ctx->intf_mdp_callback,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+
+ ctx->intf_stopped = 0;
+ if (sctx)
+ sctx->intf_stopped = 0;
+ } else {
+ pr_err("%s: Panel already on\n", __func__);
+ }
+
+ return rc;
+}
+
+/*
+ * This function will be called from the sysfs node to enable and disable the
+ * feature with master ctl only.
+ */
+int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *mctl, int frame_cnt)
+{
+ int rc = 0;
+ struct mdss_mdp_cmd_ctx *ctx;
+ struct mdss_panel_info *pinfo;
+
+ if (!mctl || !mctl->is_master || !mctl->panel_data) {
+ pr_err("invalid ctl mctl:%pK pdata:%pK\n",
+ mctl, mctl ? mctl->panel_data : 0);
+ return -ENODEV;
+ }
+
+ ctx = mctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ pinfo = &mctl->panel_data->panel_info;
+ if (!pinfo->mipi.hw_vsync_mode) {
+ pr_err("hw vsync disabled, cannot handle autorefresh\n");
+ return -ENODEV;
+ }
+
+ if (frame_cnt < 0 || frame_cnt >= AUTOREFRESH_MAX_FRAME_CNT) {
+ pr_err("frame cnt %d is out of range (16 bits).\n", frame_cnt);
+ return -EINVAL;
+ }
+
+ if (ctx->intf_stopped) {
+ pr_debug("autorefresh cannot be changed when display is off\n");
+ return -EPERM;
+ }
+
+ mutex_lock(&ctx->autorefresh_lock);
+
+ if (frame_cnt == ctx->autorefresh_frame_cnt) {
+ pr_debug("No change to the refresh count\n");
+ goto exit;
+ }
+
+ MDSS_XLOG(ctx->autorefresh_state,
+ ctx->autorefresh_frame_cnt, frame_cnt);
+
+ pr_debug("curent autorfresh state=%d, frmae_cnt: old=%d new=%d\n",
+ ctx->autorefresh_state,
+ ctx->autorefresh_frame_cnt, frame_cnt);
+
+ switch (ctx->autorefresh_state) {
+ case MDP_AUTOREFRESH_OFF:
+ if (frame_cnt == 0) {
+ pr_debug("oops autorefresh is already disabled. We shouldn't get here\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ /*
+ * actual enable will happen in commit context when
+ * next update is kicked off.
+ */
+ ctx->autorefresh_state = MDP_AUTOREFRESH_ON_REQUESTED;
+ ctx->autorefresh_frame_cnt = frame_cnt;
+ mctl->mdata->serialize_wait4pp = true;
+
+ /* Cancel GATE Work Item */
+ if (cancel_work_sync(&ctx->gate_clk_work))
+ pr_debug("%s: gate work canceled\n", __func__);
+
+ /* Cancel OFF Work Item */
+ if (cancel_delayed_work_sync(&ctx->delayed_off_clk_work))
+ pr_debug("%s: off work canceled\n", __func__);
+ break;
+ case MDP_AUTOREFRESH_ON_REQUESTED:
+ if (frame_cnt == 0) {
+ ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
+ ctx->autorefresh_frame_cnt = 0;
+ mctl->mdata->serialize_wait4pp = false;
+ } else {
+ ctx->autorefresh_frame_cnt = frame_cnt;
+ mctl->mdata->serialize_wait4pp = true;
+ }
+ break;
+ case MDP_AUTOREFRESH_ON:
+ if (frame_cnt == 0) {
+ /*
+ * actual disable will happen in commit context when
+ * next update is kicked off.
+ */
+ ctx->autorefresh_state = MDP_AUTOREFRESH_OFF_REQUESTED;
+ } else {
+ ctx->autorefresh_frame_cnt = frame_cnt;
+ mctl->mdata->serialize_wait4pp = true;
+ }
+ break;
+ case MDP_AUTOREFRESH_OFF_REQUESTED:
+ if (frame_cnt == 0) {
+ pr_debug("autorefresh off is already requested\n");
+ } else {
+ pr_debug("cancelling autorefresh off request\n");
+ ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
+ ctx->autorefresh_frame_cnt = frame_cnt;
+ mctl->mdata->serialize_wait4pp = true;
+ }
+ break;
+ default:
+ pr_err("invalid autorefresh state\n");
+ }
+
+ MDSS_XLOG(ctx->autorefresh_state,
+ ctx->autorefresh_frame_cnt);
+
+exit:
+ mutex_unlock(&ctx->autorefresh_lock);
+ return rc;
+}
+
+int mdss_mdp_cmd_get_autorefresh_mode(struct mdss_mdp_ctl *mctl)
+{
+ struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
+ int autorefresh_frame_cnt;
+
+ /* check the ctl to make sure the lock was initialized */
+ if (!ctx || !ctx->ctl)
+ return 0;
+
+ mutex_lock(&ctx->autorefresh_lock);
+ autorefresh_frame_cnt = ctx->autorefresh_frame_cnt;
+ mutex_unlock(&ctx->autorefresh_lock);
+
+ return ctx->autorefresh_frame_cnt;
+}
+
+static void mdss_mdp_cmd_pre_programming(struct mdss_mdp_ctl *mctl)
+{
+ struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
+ char __iomem *pp_base;
+ u32 autorefresh_state;
+ u32 cfg;
+
+ if (!mctl->is_master)
+ return;
+
+ mutex_lock(&ctx->autorefresh_lock);
+
+ autorefresh_state = ctx->autorefresh_state;
+ MDSS_XLOG(autorefresh_state);
+ pr_debug("pre_programming state: %d\n", autorefresh_state);
+
+ if ((autorefresh_state == MDP_AUTOREFRESH_ON) ||
+ (autorefresh_state == MDP_AUTOREFRESH_OFF_REQUESTED)) {
+
+ pp_base = mctl->mixer_left->pingpong_base;
+
+ /*
+ * instruct MDP to ignore the panel TE so the next auto-refresh
+ * is delayed until flush bits are set.
+ */
+ cfg = mdss_mdp_pingpong_read(pp_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+ cfg &= ~BIT(20);
+ mdss_mdp_pingpong_write(pp_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+ ctx->ignore_external_te = true;
+
+ }
+ mutex_unlock(&ctx->autorefresh_lock);
+}
+
+/* this function assumes that autorefresh_lock is held by the caller */
+static void mdss_mdp_cmd_post_programming(struct mdss_mdp_ctl *mctl)
+{
+ struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
+ char __iomem *pp_base;
+ u32 cfg;
+
+ if (!mctl->is_master)
+ return;
+
+ /*
+ * If listening to the external panel TE was disabled
+ * (this happens when we get a kickoff with
+ * autorefresh enabled), enable the panel TE back.
+ */
+ if (ctx->ignore_external_te) {
+
+ MDSS_XLOG(ctx->ignore_external_te);
+ pr_debug("post_programming TE status: %d\n",
+ ctx->ignore_external_te);
+
+ pp_base = mctl->mixer_left->pingpong_base;
+
+ /* enable MDP to listen to the TE */
+ cfg = mdss_mdp_pingpong_read(pp_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+ cfg |= BIT(20);
+ mdss_mdp_pingpong_write(pp_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+ ctx->ignore_external_te = false;
+ }
+}
+
+static void mdss_mdp_cmd_wait4_autorefresh_pp(struct mdss_mdp_ctl *ctl)
+{
+ int rc;
+ u32 val, line_out, intr_type = MDSS_MDP_IRQ_TYPE_PING_PONG_COMP;
+ char __iomem *pp_base = ctl->mixer_left->pingpong_base;
+ struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+ line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
+
+ MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
+
+ if ((line_out < ctl->mixer_left->roi.h) && line_out) {
+ reinit_completion(&ctx->autorefresh_ppdone);
+
+ /* enable ping pong done */
+ mdss_mdp_set_intr_callback(intr_type, ctx->current_pp_num,
+ mdss_mdp_cmd_autorefresh_pp_done, ctl);
+ mdss_mdp_irq_enable(intr_type, ctx->current_pp_num);
+
+ /* wait for ping pong done */
+ rc = wait_for_completion_timeout(&ctx->autorefresh_ppdone,
+ KOFF_TIMEOUT);
+ if (rc <= 0) {
+ val = mdss_mdp_pingpong_read(pp_base,
+ MDSS_MDP_REG_PP_LINE_COUNT);
+ if (val == ctl->mixer_left->roi.h) {
+ mdss_mdp_irq_clear(ctl->mdata,
+ MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num);
+ mdss_mdp_irq_disable_nosync(intr_type,
+ ctx->current_pp_num);
+ mdss_mdp_set_intr_callback(intr_type,
+ ctx->current_pp_num, NULL, NULL);
+ } else {
+ pr_err("timedout waiting for ctl%d autorefresh pp done\n",
+ ctl->num);
+ MDSS_XLOG(0xbad3);
+ MDSS_XLOG_TOUT_HANDLER("mdp",
+ "vbif", "dbg_bus", "vbif_dbg_bus",
+ "panic");
+ }
+ }
+ }
+}
+
+static void mdss_mdp_cmd_autorefresh_done(void *arg)
+{
+ struct mdss_mdp_ctl *ctl = arg;
+ struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+
+ mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+ ctx->current_pp_num);
+ mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+ ctx->current_pp_num, NULL, NULL);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
+ complete_all(&ctx->autorefresh_done);
+}
+
+static u32 get_autorefresh_timeout(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_cmd_ctx *ctx, u32 frame_cnt)
+{
+ struct mdss_mdp_mixer *mixer;
+ struct mdss_panel_info *pinfo;
+ u32 line_count;
+ u32 fps, v_total;
+ unsigned long autorefresh_timeout;
+
+ pinfo = &ctl->panel_data->panel_info;
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+
+ if (!mixer || !pinfo)
+ return -EINVAL;
+
+ if (!ctx->ignore_external_te)
+ line_count = ctl->mixer_left->roi.h;
+ else
+ line_count = mdss_mdp_pingpong_read(mixer->pingpong_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
+
+ fps = mdss_panel_get_framerate(pinfo, FPS_RESOLUTION_HZ);
+ v_total = mdss_panel_get_vtotal(pinfo);
+
+ /*
+ * calculate the expected delay for the autorefresh to happen,
+ * this should be:
+ * autorefresh_done = line_count * frame_cnt * line_time
+ */
+ frame_cnt *= 1000; /* to use mS */
+ autorefresh_timeout = mult_frac(line_count, frame_cnt,
+ (fps * v_total));
+
+ /* multiply by two to consider worst case scenario */
+ autorefresh_timeout *= 2;
+ autorefresh_timeout = msecs_to_jiffies(autorefresh_timeout);
+
+ pr_debug("lines:%d fps:%d v_total:%d frames:%d timeout=%lu\n",
+ line_count, fps, v_total, frame_cnt, autorefresh_timeout);
+
+ autorefresh_timeout = (autorefresh_timeout > CMD_MODE_IDLE_TIMEOUT) ?
+ autorefresh_timeout : CMD_MODE_IDLE_TIMEOUT;
+
+ return autorefresh_timeout;
+}
+
+static void mdss_mdp_cmd_wait4_autorefresh_done(struct mdss_mdp_ctl *ctl)
+{
+ int rc;
+ u32 val, line_out;
+ char __iomem *pp_base = ctl->mixer_left->pingpong_base;
+ struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+ unsigned long flags;
+ unsigned long autorefresh_timeout;
+
+ line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
+
+ MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
+
+ reinit_completion(&ctx->autorefresh_done);
+
+ /* enable autorefresh done */
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+ ctx->current_pp_num, mdss_mdp_cmd_autorefresh_done, ctl);
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+ ctx->current_pp_num);
+
+ /*
+ * Wait for autorefresh done, note that this interrupt would happen
+ * once the RD_PTR is reset to init value for the number of frames
+ * programmed with "autorefresh_frame_cnt", so this wait would take
+ * one RD_PTR reset, if autorefresh_frame_cnt = 1
+ * or the number of RD_PTR resets set by "autorefresh_frame_cnt".
+ */
+ autorefresh_timeout = get_autorefresh_timeout(ctl,
+ ctx, ctx->autorefresh_frame_cnt);
+ rc = wait_for_completion_timeout(&ctx->autorefresh_done,
+ autorefresh_timeout);
+
+ if (rc <= 0) {
+ u32 status, mask;
+
+ mask = mdss_mdp_get_irq_mask(
+ MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+ ctx->current_pp_num);
+ status = mask & readl_relaxed(ctl->mdata->mdp_base +
+ MDSS_MDP_REG_INTR_STATUS);
+
+ if (status) {
+ pr_warn("autorefresh done but irq not triggered\n");
+ mdss_mdp_irq_clear(ctl->mdata,
+ MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+ ctx->current_pp_num);
+ local_irq_save(flags);
+ mdss_mdp_irq_disable_nosync(
+ MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+ ctx->current_pp_num);
+ mdss_mdp_set_intr_callback_nosync(
+ MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+ ctx->current_pp_num, NULL, NULL);
+ local_irq_restore(flags);
+ rc = 1;
+ }
+ }
+
+ if (rc <= 0) {
+ val = mdss_mdp_pingpong_read(pp_base,
+ MDSS_MDP_REG_PP_LINE_COUNT);
+
+ pr_err("timedout waiting for ctl%d autorefresh done line_cnt:%d frames:%d\n",
+ ctl->num, val, ctx->autorefresh_frame_cnt);
+ MDSS_XLOG(0xbad4, val);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+ "dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus", "panic");
+ }
+}
+
+/* caller needs to hold autorefresh_lock before calling this function */
+static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_ctl *sctl)
+{
+ u32 cfg;
+ struct mdss_mdp_cmd_ctx *ctx;
+ char __iomem *pp_base = ctl->mixer_left->pingpong_base;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ MDSS_XLOG(ctx->autorefresh_state, ctx->autorefresh_frame_cnt);
+
+ /*
+ * This can happen if driver gets sysfs request to enable autorefresh,
+ * and a CMD_STOP is received before autorefresh is turned on by
+ * the atomic commit.
+ */
+ if (ctx->autorefresh_state == MDP_AUTOREFRESH_ON_REQUESTED) {
+ ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
+ ctx->autorefresh_frame_cnt = 0;
+ return 0;
+ }
+
+ pr_debug("%pS->%s: disabling autorefresh\n",
+ __builtin_return_address(0), __func__);
+
+ /*
+ * Wait for autorefresh done before disabling it.
+ * This is intended for debug only; if enabled it would cause a large
+ * delay during disable due RD_PTR is program to wait for
+ * wrapping around, which can take hundreds of ms
+ */
+ if (mdata->wait4autorefresh)
+ mdss_mdp_cmd_wait4_autorefresh_done(ctl);
+
+ /*
+ * To disable auto-refresh we need to make sure that no transfer
+ * is on-going when we write the bit to disable it.
+ * But since when autorefresh is enabled the HW automatically
+ * will trigger a transfer whenever external TE is received and
+ * the hw frame_cnt matches the programmed autorefresh frame_cnt,
+ * in order to have enough time to disable the feature we will instruct
+ * MDP to ignore the panel TE first; when doing this, the hw frame_cnt
+ * will be increased only when the internal counter wraps-around
+ * (instead of each time that the external panel TE is genarated),
+ * this gives us enough margin to disable autorefresh.
+ */
+ cfg = mdss_mdp_pingpong_read(pp_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+ cfg &= ~BIT(20);
+ mdss_mdp_pingpong_write(pp_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+ MDSS_XLOG(cfg);
+
+ /* wait for previous transfer to finish */
+ mdss_mdp_cmd_wait4_autorefresh_pp(ctl);
+ if (sctl)
+ mdss_mdp_cmd_wait4_autorefresh_pp(sctl);
+
+ /* disable autorefresh */
+ mdss_mdp_pingpong_write(pp_base, MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0);
+
+ if (is_pingpong_split(ctl->mfd))
+ mdss_mdp_pingpong_write(mdata->slave_pingpong_base,
+ MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0);
+
+ ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
+ ctx->autorefresh_frame_cnt = 0;
+
+ /* enable MDP to listen to the TE */
+ cfg = mdss_mdp_pingpong_read(pp_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+ cfg |= BIT(20);
+ mdss_mdp_pingpong_write(pp_base,
+ MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+
+ ctl->mdata->serialize_wait4pp = false;
+ return 0;
+}
+
+
+static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_cmd_ctx *ctx)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool is_pp_split = is_pingpong_split(ctl->mfd);
+
+ MDSS_XLOG(ctx->autorefresh_state);
+
+ if ((ctx->autorefresh_state == MDP_AUTOREFRESH_ON_REQUESTED) ||
+ (ctx->autorefresh_state == MDP_AUTOREFRESH_ON)) {
+
+ pr_debug("enabling autorefresh for every %d frames state %d\n",
+ ctx->autorefresh_frame_cnt, ctx->autorefresh_state);
+
+ /* Program HW to take care of Kickoff */
+ mdss_mdp_pingpong_write(ctl->mixer_left->pingpong_base,
+ MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG,
+ BIT(31) | ctx->autorefresh_frame_cnt);
+
+ if (is_pp_split)
+ mdss_mdp_pingpong_write(mdata->slave_pingpong_base,
+ MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG,
+ BIT(31) | ctx->autorefresh_frame_cnt);
+
+ MDSS_XLOG(0x11, ctx->autorefresh_frame_cnt,
+ ctx->autorefresh_state, is_pp_split);
+ ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
+
+ } else {
+ /* SW Kickoff */
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
+ MDSS_XLOG(0x11, ctx->autorefresh_state);
+ }
+}
+
+/*
+ * There are 3 partial update possibilities
+ * left only ==> enable left pingpong_done
+ * left + right ==> enable both pingpong_done
+ * right only ==> enable right pingpong_done
+ *
+ * notification is triggered at pingpong_done which will
+ * signal timeline to release source buffer
+ *
+ * for left+right case, pingpong_done is enabled for both and
+ * only the last pingpong_done should trigger the notification
+ */
+static int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ struct mdss_mdp_ctl *sctl = NULL, *mctl = ctl;
+ struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ if (ctx->intf_stopped) {
+ pr_err("ctx=%d stopped already\n", ctx->current_pp_num);
+ return -EPERM;
+ }
+
+ if (!ctl->is_master) {
+ mctl = mdss_mdp_get_main_ctl(ctl);
+ } else {
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl && (sctl->roi.w == 0 || sctl->roi.h == 0)) {
+ /* left update only */
+ sctl = NULL;
+ }
+ }
+
+ mdss_mdp_ctl_perf_set_transaction_status(ctl,
+ PERF_HW_MDP_STATE, PERF_STATUS_BUSY);
+
+ if (sctl) {
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+ mdss_mdp_ctl_perf_set_transaction_status(sctl,
+ PERF_HW_MDP_STATE, PERF_STATUS_BUSY);
+ }
+
+ /*
+ * Turn on the panel, if not already. This is because the panel is
+ * turned on only when we send the first frame and not during cmd
+ * start. This is to ensure that no artifacts are seen on the panel.
+ */
+ if (__mdss_mdp_cmd_is_panel_power_off(ctx))
+ mdss_mdp_cmd_panel_on(ctl, sctl);
+
+ ctx->current_pp_num = ctx->default_pp_num;
+ if (sctx)
+ sctx->current_pp_num = sctx->default_pp_num;
+
+ if (__mdss_mdp_cmd_is_aux_pp_needed(mdata, mctl))
+ ctx->current_pp_num = ctx->aux_pp_num;
+
+ MDSS_XLOG(ctl->num, ctx->current_pp_num,
+ ctl->roi.x, ctl->roi.y, ctl->roi.w, ctl->roi.h);
+
+ atomic_inc(&ctx->koff_cnt);
+ if (sctx)
+ atomic_inc(&sctx->koff_cnt);
+
+ trace_mdp_cmd_kickoff(ctl->num, atomic_read(&ctx->koff_cnt));
+
+ /*
+ * Call state machine with kickoff event, we just do it for
+ * current CTL, but internally state machine will check and
+ * if this is a dual dsi, it will enable the power resources
+ * for both DSIs
+ */
+ mdss_mdp_resource_control(ctl, MDP_RSRC_CTL_EVENT_KICKOFF);
+
+ if (!ctl->is_master)
+ mctl = mdss_mdp_get_main_ctl(ctl);
+ mdss_mdp_cmd_dsc_reconfig(mctl);
+
+ mdss_mdp_cmd_set_partial_roi(ctl);
+
+ /*
+ * tx dcs command if had any
+ */
+ mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_CMDLIST_KOFF, NULL,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+
+ mdss_mdp_cmd_set_stream_size(ctl);
+
+ mdss_mdp_cmd_set_sync_ctx(ctl, sctl);
+
+ mutex_lock(&ctx->autorefresh_lock);
+ if (ctx->autorefresh_state == MDP_AUTOREFRESH_OFF_REQUESTED) {
+ pr_debug("%s: disable autorefresh ctl%d\n", __func__, ctl->num);
+ mdss_mdp_disable_autorefresh(ctl, sctl);
+ }
+
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num, mdss_mdp_cmd_pingpong_done, ctl);
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->current_pp_num);
+ if (sctx) {
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ sctx->current_pp_num, mdss_mdp_cmd_pingpong_done, sctl);
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ sctx->current_pp_num);
+ }
+
+ mdss_mdp_ctl_perf_set_transaction_status(ctl,
+ PERF_SW_COMMIT_STATE, PERF_STATUS_DONE);
+ if (sctl) {
+ mdss_mdp_ctl_perf_set_transaction_status(sctl,
+ PERF_SW_COMMIT_STATE, PERF_STATUS_DONE);
+ }
+
+ if (mdss_mdp_is_lineptr_supported(ctl)) {
+ if (mdss_mdp_is_full_frame_update(ctl))
+ mdss_mdp_cmd_lineptr_ctrl(ctl, true);
+ else if (ctx->lineptr_enabled)
+ mdss_mdp_cmd_lineptr_ctrl(ctl, false);
+ }
+
+ /* Kickoff */
+ __mdss_mdp_kickoff(ctl, ctx);
+
+ mdss_mdp_cmd_post_programming(ctl);
+
+ /*
+ * If auto-refresh is enabled, wait for an autorefresh done,
+ * to make sure configuration has taken effect.
+ * Do this after post-programming, so TE is enabled.
+ */
+ if (ctx->autorefresh_state == MDP_AUTOREFRESH_ON)
+ mdss_mdp_cmd_wait4_autorefresh_done(ctl);
+
+ mb(); /* make sure everything is written before enable */
+ mutex_unlock(&ctx->autorefresh_lock);
+
+ MDSS_XLOG(ctl->num, ctx->current_pp_num,
+ sctx ? sctx->current_pp_num : -1, atomic_read(&ctx->koff_cnt));
+ return 0;
+}
+
+int mdss_mdp_cmd_restore(struct mdss_mdp_ctl *ctl, bool locked)
+{
+ struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+
+ if (!ctl)
+ return -EINVAL;
+
+ pr_debug("%s: called for ctl%d\n", __func__, ctl->num);
+
+ ctx = (struct mdss_mdp_cmd_ctx *)ctl->intf_ctx[MASTER_CTX];
+ if (is_pingpong_split(ctl->mfd)) {
+ sctx = (struct mdss_mdp_cmd_ctx *)ctl->intf_ctx[SLAVE_CTX];
+ } else if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+
+ if (sctl)
+ sctx = (struct mdss_mdp_cmd_ctx *)
+ sctl->intf_ctx[MASTER_CTX];
+ }
+
+ if (mdss_mdp_cmd_tearcheck_setup(ctx, locked)) {
+ pr_warn("%s: ctx%d tearcheck setup failed\n", __func__,
+ ctx->current_pp_num);
+ } else {
+ if (sctx && mdss_mdp_cmd_tearcheck_setup(sctx, locked))
+ pr_warn("%s: ctx%d tearcheck setup failed\n", __func__,
+ sctx->current_pp_num);
+ else
+ mdss_mdp_tearcheck_enable(ctl, true);
+ }
+
+ return 0;
+}
+
+int mdss_mdp_cmd_ctx_stop(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_cmd_ctx *ctx, int panel_power_state)
+{
+ struct mdss_mdp_cmd_ctx *sctx = NULL;
+ struct mdss_mdp_ctl *sctl = NULL;
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+ /* intf stopped, no more kickoff */
+ ctx->intf_stopped = 1;
+
+ /* Make sure any rd ptr for dsi callback is done before disable vsync */
+ if (is_pingpong_split(ctl->mfd)) {
+ pr_debug("%s will wait for rd ptr:%d\n", __func__,
+ atomic_read(&ctx->rdptr_cnt));
+ MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+ mdss_mdp_cmd_wait4readptr(ctx);
+ }
+
+ /*
+ * if any vsyncs are still enabled, loop until the refcount
+ * goes to zero, so the rd ptr interrupt is disabled.
+ * Ideally this shouldn't be the case since vsync handlers
+ * has been flushed by now, so issue a warning in case
+ * that we hit this condition.
+ */
+ if (ctx->vsync_irq_cnt) {
+ WARN(1, "vsync still enabled\n");
+ while (mdss_mdp_setup_vsync(ctx, false))
+ ;
+ }
+ if (ctx->lineptr_irq_cnt) {
+ WARN(1, "lineptr irq still enabled\n");
+ while (mdss_mdp_setup_lineptr(ctx, false))
+ ;
+ }
+
+ if (!ctl->pending_mode_switch) {
+ mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+ NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+
+ mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_REGISTER_MDP_CALLBACK,
+ NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+ }
+
+ /* shut down the MDP/DSI resources if still enabled */
+ mdss_mdp_resource_control(ctl, MDP_RSRC_CTL_EVENT_STOP);
+
+ flush_work(&ctx->pp_done_work);
+
+ if (mdss_panel_is_power_off(panel_power_state) ||
+ mdss_panel_is_power_on_ulp(panel_power_state))
+ mdss_mdp_tearcheck_enable(ctl, false);
+
+ if (mdss_panel_is_power_on(panel_power_state)) {
+ pr_debug("%s: intf stopped with panel on\n", __func__);
+ return 0;
+ }
+
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+ ctx->default_pp_num, NULL, NULL);
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+ ctx->default_pp_num, NULL, NULL);
+ mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+ ctx->default_pp_num, NULL, NULL);
+
+ memset(ctx, 0, sizeof(*ctx));
+ /* intf stopped, no more kickoff */
+ ctx->intf_stopped = 1;
+
+ return 0;
+}
+
+static int mdss_mdp_cmd_intfs_stop(struct mdss_mdp_ctl *ctl, int session,
+ int panel_power_state)
+{
+ struct mdss_mdp_cmd_ctx *ctx;
+
+ if (session >= MAX_SESSIONS)
+ return 0;
+
+ ctx = ctl->intf_ctx[MASTER_CTX];
+ if (!ctx->ref_cnt) {
+ pr_err("invalid ctx session: %d\n", session);
+ return -ENODEV;
+ }
+
+ mdss_mdp_cmd_ctx_stop(ctl, ctx, panel_power_state);
+
+ if (is_pingpong_split(ctl->mfd)) {
+ session += 1;
+
+ if (session >= MAX_SESSIONS)
+ return 0;
+
+ ctx = ctl->intf_ctx[SLAVE_CTX];
+ if (!ctx->ref_cnt) {
+ pr_err("invalid ctx session: %d\n", session);
+ return -ENODEV;
+ }
+ mdss_mdp_cmd_ctx_stop(ctl, ctx, panel_power_state);
+ }
+ pr_debug("%s:-\n", __func__);
+ return 0;
+}
+
+static int mdss_mdp_cmd_stop_sub(struct mdss_mdp_ctl *ctl,
+ int panel_power_state)
+{
+ struct mdss_mdp_cmd_ctx *ctx;
+ struct mdss_mdp_vsync_handler *tmp, *handle;
+ int session;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list)
+ mdss_mdp_cmd_remove_vsync_handler(ctl, handle);
+ if (mdss_mdp_is_lineptr_supported(ctl))
+ mdss_mdp_cmd_lineptr_ctrl(ctl, false);
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), XLOG_FUNC_ENTRY);
+
+ /* Command mode is supported only starting at INTF1 */
+ session = ctl->intf_num - MDSS_MDP_INTF1;
+ return mdss_mdp_cmd_intfs_stop(ctl, session, panel_power_state);
+}
+
+int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl, int panel_power_state)
+{
+ struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+ struct mdss_mdp_cmd_ctx *sctx = NULL;
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+ bool panel_off = false;
+ bool turn_off_clocks = false;
+ bool send_panel_events = false;
+ int ret = 0;
+
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ if (__mdss_mdp_cmd_is_panel_power_off(ctx)) {
+ pr_debug("%s: panel already off\n", __func__);
+ return 0;
+ }
+
+ if (ctx->panel_power_state == panel_power_state) {
+ pr_debug("%s: no transition needed %d --> %d\n", __func__,
+ ctx->panel_power_state, panel_power_state);
+ return 0;
+ }
+
+ pr_debug("%s: transition from %d --> %d\n", __func__,
+ ctx->panel_power_state, panel_power_state);
+
+ if (sctl)
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+ MDSS_XLOG(ctx->panel_power_state, panel_power_state);
+
+ mutex_lock(&ctl->offlock);
+ mutex_lock(&cmd_off_mtx);
+ if (mdss_panel_is_power_off(panel_power_state)) {
+ /* Transition to display off */
+ send_panel_events = true;
+ turn_off_clocks = true;
+ panel_off = true;
+ } else if (__mdss_mdp_cmd_is_panel_power_on_interactive(ctx)) {
+ /*
+ * If we are transitioning from interactive to low
+ * power, then we need to send events to the interface
+ * so that the panel can be configured in low power
+ * mode.
+ */
+ send_panel_events = true;
+ if (mdss_panel_is_power_on_ulp(panel_power_state))
+ turn_off_clocks = true;
+ } else {
+ /* Transitions between low power and ultra low power */
+ if (mdss_panel_is_power_on_ulp(panel_power_state)) {
+ /*
+ * If we are transitioning from low power to ultra low
+ * power mode, no more display updates are expected.
+ * Turn off the interface clocks.
+ */
+ pr_debug("%s: turn off clocks\n", __func__);
+ turn_off_clocks = true;
+ } else {
+ /*
+ * Transition from ultra low power to low power does
+ * not require any special handling. Just rest the
+ * intf_stopped flag so that the clocks would
+ * get turned on when the first update comes.
+ */
+ pr_debug("%s: reset intf_stopped flag.\n", __func__);
+ mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+ (void *)&ctx->intf_recovery,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+
+ mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_REGISTER_MDP_CALLBACK,
+ (void *)&ctx->intf_mdp_callback,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+
+ ctx->intf_stopped = 0;
+ if (sctx)
+ sctx->intf_stopped = 0;
+ /*
+ * Tearcheck was disabled while entering LP2 state.
+ * Enable it back to allow updates in LP1 state.
+ */
+ mdss_mdp_tearcheck_enable(ctl, true);
+ goto end;
+ }
+ }
+
+ if (!turn_off_clocks)
+ goto panel_events;
+
+ if (ctl->pending_mode_switch)
+ send_panel_events = false;
+
+ pr_debug("%s: turn off interface clocks\n", __func__);
+ ret = mdss_mdp_cmd_stop_sub(ctl, panel_power_state);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("%s: unable to stop interface: %d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ if (sctl) {
+ mdss_mdp_cmd_stop_sub(sctl, panel_power_state);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("%s: unable to stop slave intf: %d\n",
+ __func__, ret);
+ goto end;
+ }
+ }
+
+panel_events:
+ if ((!is_panel_split(ctl->mfd) || is_pingpong_split(ctl->mfd) ||
+ (is_panel_split(ctl->mfd) && sctl)) && send_panel_events) {
+ pr_debug("%s: send panel events\n", __func__);
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK,
+ (void *) (long int) panel_power_state,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
+
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_OFF,
+ (void *) (long int) panel_power_state,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
+ }
+
+
+ if (!panel_off) {
+ pr_debug("%s: cmd_stop with panel always on\n", __func__);
+ goto end;
+ }
+
+ pr_debug("%s: turn off panel\n", __func__);
+ ctl->intf_ctx[MASTER_CTX] = NULL;
+ ctl->intf_ctx[SLAVE_CTX] = NULL;
+ ctl->ops.stop_fnc = NULL;
+ ctl->ops.display_fnc = NULL;
+ ctl->ops.wait_pingpong = NULL;
+ ctl->ops.add_vsync_handler = NULL;
+ ctl->ops.remove_vsync_handler = NULL;
+ ctl->ops.reconfigure = NULL;
+
+end:
+ if (!IS_ERR_VALUE((unsigned long)ret)) {
+ struct mdss_mdp_cmd_ctx *sctx = NULL;
+
+ ctx->panel_power_state = panel_power_state;
+ /* In pingpong split we have single controller, dual context */
+ if (is_pingpong_split(ctl->mfd))
+ sctx = (struct mdss_mdp_cmd_ctx *)
+ ctl->intf_ctx[SLAVE_CTX];
+ if (sctx)
+ sctx->panel_power_state = panel_power_state;
+ }
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), XLOG_FUNC_EXIT);
+ mutex_unlock(&cmd_off_mtx);
+ mutex_unlock(&ctl->offlock);
+ pr_debug("%s:-\n", __func__);
+
+ return ret;
+}
+
+static void early_wakeup_work(struct work_struct *work)
+{
+ int rc = 0;
+ struct mdss_mdp_cmd_ctx *ctx =
+ container_of(work, typeof(*ctx), early_wakeup_clk_work);
+ struct mdss_mdp_ctl *ctl;
+
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+
+ ATRACE_BEGIN(__func__);
+ ctl = ctx->ctl;
+
+ if (!ctl) {
+ pr_err("%s: invalid ctl\n", __func__);
+ goto fail;
+ }
+
+ rc = mdss_mdp_resource_control(ctl, MDP_RSRC_CTL_EVENT_EARLY_WAKE_UP);
+ if (rc)
+ pr_err("%s: failed to control resources\n", __func__);
+
+fail:
+ ATRACE_END(__func__);
+}
+
+static int mdss_mdp_cmd_early_wake_up(struct mdss_mdp_ctl *ctl)
+{
+ u64 curr_time;
+ struct mdss_mdp_cmd_ctx *ctx;
+
+ curr_time = ktime_to_us(ktime_get());
+
+ if ((curr_time - ctl->last_input_time) <
+ INPUT_EVENT_HANDLER_DELAY_USECS)
+ return 0;
+ ctl->last_input_time = curr_time;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ /*
+ * Early wake up event is called from an interrupt context and
+ * involves cancelling queued work items. So this will be
+ * scheduled in a work item.
+ * Only schedule if the interface has not been stopped.
+ */
+ if (ctx && !ctx->intf_stopped)
+ schedule_work(&ctx->early_wakeup_clk_work);
+ return 0;
+}
+
+static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_cmd_ctx *ctx, int default_pp_num, int aux_pp_num,
+ bool pingpong_split_slave)
+{
+ int ret = 0;
+
+ /*
+ * Initialize the mutex before the ctl is assigned,
+ * so we can prevent any race condition with the
+ * initialization of the the mutex and the autorefresh
+ * sysfs.
+ */
+ mutex_init(&ctx->autorefresh_lock);
+
+ ctx->ctl = ctl;
+ ctx->default_pp_num = default_pp_num;
+ ctx->aux_pp_num = aux_pp_num;
+ ctx->pingpong_split_slave = pingpong_split_slave;
+ ctx->pp_timeout_report_cnt = 0;
+ init_waitqueue_head(&ctx->pp_waitq);
+ init_waitqueue_head(&ctx->rdptr_waitq);
+ init_completion(&ctx->stop_comp);
+ init_completion(&ctx->autorefresh_ppdone);
+ init_completion(&ctx->rdptr_done);
+ init_completion(&ctx->pp_done);
+ init_completion(&ctx->autorefresh_done);
+ spin_lock_init(&ctx->clk_lock);
+ spin_lock_init(&ctx->koff_lock);
+ mutex_init(&ctx->clk_mtx);
+ mutex_init(&ctx->mdp_rdptr_lock);
+ mutex_init(&ctx->mdp_wrptr_lock);
+ INIT_WORK(&ctx->gate_clk_work, clk_ctrl_gate_work);
+ INIT_DELAYED_WORK(&ctx->delayed_off_clk_work,
+ clk_ctrl_delayed_off_work);
+ INIT_WORK(&ctx->pp_done_work, pingpong_done_work);
+ INIT_WORK(&ctx->early_wakeup_clk_work, early_wakeup_work);
+ atomic_set(&ctx->pp_done_cnt, 0);
+ ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
+ ctx->autorefresh_frame_cnt = 0;
+ INIT_LIST_HEAD(&ctx->vsync_handlers);
+ INIT_LIST_HEAD(&ctx->lineptr_handlers);
+
+ ctx->intf_recovery.fxn = mdss_mdp_cmd_intf_recovery;
+ ctx->intf_recovery.data = ctx;
+
+ ctx->intf_mdp_callback.fxn = mdss_mdp_cmd_intf_callback;
+ ctx->intf_mdp_callback.data = ctx;
+
+ ctx->intf_stopped = 0;
+
+ pr_debug("%s: ctx=%pK num=%d aux=%d\n", __func__, ctx,
+ default_pp_num, aux_pp_num);
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+ ctx->default_pp_num, mdss_mdp_cmd_readptr_done, ctl);
+
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+ ctx->default_pp_num, mdss_mdp_cmd_lineptr_done, ctl);
+
+ ret = mdss_mdp_cmd_tearcheck_setup(ctx, false);
+ if (ret)
+ pr_err("tearcheck setup failed\n");
+
+ return ret;
+}
+
+static int mdss_mdp_cmd_intfs_setup(struct mdss_mdp_ctl *ctl,
+ int session)
+{
+ struct mdss_mdp_cmd_ctx *ctx;
+ struct mdss_mdp_ctl *sctl = NULL;
+ struct mdss_mdp_mixer *mixer;
+ int ret;
+ u32 default_pp_num, aux_pp_num;
+
+ if (session >= MAX_SESSIONS)
+ return 0;
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ ctx = &mdss_mdp_cmd_ctx_list[session];
+ if (ctx->ref_cnt) {
+ if (mdss_panel_is_power_on(ctx->panel_power_state)) {
+ pr_debug("%s: cmd_start with panel always on\n",
+ __func__);
+ /*
+ * It is possible that the resume was called from the
+ * panel always on state without MDSS every
+ * power-collapsed (such as a case with any other
+ * interfaces connected). In such cases, we need to
+ * explicitly call the restore function to enable
+ * tearcheck logic.
+ */
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mdss_mdp_cmd_restore(ctl, false);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ /* Turn on panel so that it can exit low power mode */
+ return mdss_mdp_cmd_panel_on(ctl, sctl);
+ }
+ pr_err("Intf %d already in use\n", session);
+ return -EBUSY;
+ }
+ ctx->ref_cnt++;
+ ctl->intf_ctx[MASTER_CTX] = ctx;
+
+
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ if (!mixer) {
+ pr_err("mixer not setup correctly\n");
+ return -ENODEV;
+ }
+ default_pp_num = mixer->num;
+
+ if (is_split_lm(ctl->mfd)) {
+ if (is_dual_lm_single_display(ctl->mfd)) {
+ mixer = mdss_mdp_mixer_get(ctl,
+ MDSS_MDP_MIXER_MUX_RIGHT);
+ if (!mixer) {
+ pr_err("right mixer not setup correctly for dual_lm_single_display\n");
+ return -ENODEV;
+ }
+ aux_pp_num = mixer->num;
+ } else { /* DUAL_LM_DUAL_DISPLAY */
+ struct mdss_mdp_ctl *mctl = ctl;
+
+ if (!mctl->is_master) {
+ mctl = mdss_mdp_get_main_ctl(ctl);
+ if (!mctl) {
+ pr_err("%s master ctl cannot be NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ if (ctl->is_master) /* setup is called for master */
+ mixer = mdss_mdp_mixer_get(mctl,
+ MDSS_MDP_MIXER_MUX_RIGHT);
+ else
+ mixer = mdss_mdp_mixer_get(mctl,
+ MDSS_MDP_MIXER_MUX_LEFT);
+
+ if (!mixer) {
+ pr_err("right mixer not setup correctly for dual_lm_dual_display\n");
+ return -ENODEV;
+ }
+ aux_pp_num = mixer->num;
+ }
+ } else {
+ aux_pp_num = default_pp_num;
+ }
+
+ ret = mdss_mdp_cmd_ctx_setup(ctl, ctx,
+ default_pp_num, aux_pp_num, false);
+ if (ret) {
+ pr_err("mdss_mdp_cmd_ctx_setup failed for default_pp:%d aux_pp:%d\n",
+ default_pp_num, aux_pp_num);
+ ctx->ref_cnt--;
+ return -ENODEV;
+ }
+
+ if (is_pingpong_split(ctl->mfd)) {
+ session += 1;
+ if (session >= MAX_SESSIONS)
+ return 0;
+ ctx = &mdss_mdp_cmd_ctx_list[session];
+ if (ctx->ref_cnt) {
+ if (mdss_panel_is_power_on(ctx->panel_power_state)) {
+ pr_debug("%s: cmd_start with panel always on\n",
+ __func__);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mdss_mdp_cmd_restore(ctl, false);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return mdss_mdp_cmd_panel_on(ctl, sctl);
+ }
+ pr_err("Intf %d already in use\n", session);
+ return -EBUSY;
+ }
+ ctx->ref_cnt++;
+
+ ctl->intf_ctx[SLAVE_CTX] = ctx;
+
+ ret = mdss_mdp_cmd_ctx_setup(ctl, ctx, session, session, true);
+ if (ret) {
+ pr_err("mdss_mdp_cmd_ctx_setup failed for slave ping pong block");
+ ctx->ref_cnt--;
+ return -EPERM;
+ }
+ }
+ return 0;
+}
+
+void mdss_mdp_switch_roi_reset(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_ctl *mctl = ctl;
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+
+ if (!ctl->panel_data ||
+ !ctl->panel_data->panel_info.partial_update_supported)
+ return;
+
+ ctl->panel_data->panel_info.roi = ctl->roi;
+ if (sctl && sctl->panel_data)
+ sctl->panel_data->panel_info.roi = sctl->roi;
+
+ if (!ctl->is_master)
+ mctl = mdss_mdp_get_main_ctl(ctl);
+ mdss_mdp_cmd_dsc_reconfig(mctl);
+
+ mdss_mdp_cmd_set_partial_roi(ctl);
+}
+
+void mdss_mdp_switch_to_vid_mode(struct mdss_mdp_ctl *ctl, int prep)
+{
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+ struct dsi_panel_clk_ctrl clk_ctrl;
+ long int mode = MIPI_VIDEO_PANEL;
+
+ pr_debug("%s start, prep = %d\n", __func__, prep);
+
+ if (prep) {
+ /*
+ * In dsi_on there is an explicit decrement to dsi clk refcount
+ * if we are in cmd mode, using the dsi client handle. We need
+ * to rebalance clock in order to properly enable vid mode
+ * compnents.
+ */
+ clk_ctrl.state = MDSS_DSI_CLK_ON;
+ clk_ctrl.client = DSI_CLK_REQ_DSI_CLIENT;
+ if (sctl)
+ mdss_mdp_ctl_intf_event(sctl,
+ MDSS_EVENT_PANEL_CLK_CTRL, (void *)&clk_ctrl,
+ CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+
+ mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+
+ return;
+ }
+
+ mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_RECONFIG_CMD,
+ (void *) mode, CTL_INTF_EVENT_FLAG_DEFAULT);
+}
+
+static int mdss_mdp_cmd_reconfigure(struct mdss_mdp_ctl *ctl,
+ enum dynamic_switch_modes mode, bool prep)
+{
+ if (mdss_mdp_ctl_is_power_off(ctl))
+ return 0;
+
+ pr_debug("%s: ctl=%d mode=%d prep=%d\n", __func__,
+ ctl->num, mode, prep);
+
+ if (mode == SWITCH_TO_VIDEO_MODE) {
+ mdss_mdp_switch_to_vid_mode(ctl, prep);
+ } else if (mode == SWITCH_RESOLUTION) {
+ if (prep) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ /*
+ * Setup DSC conifg early, as DSI configuration during
+ * resolution switch would rely on DSC params for
+ * stream configs.
+ */
+ mdss_mdp_cmd_dsc_reconfig(ctl);
+
+ /*
+ * Make explicit cmd_panel_on call, when dynamic
+ * resolution switch request comes before cont-splash
+ * handoff, to match the ctl_stop/ctl_start done
+ * during the reconfiguration.
+ */
+ if (ctl->switch_with_handoff) {
+ struct mdss_mdp_cmd_ctx *ctx;
+ struct mdss_mdp_ctl *sctl;
+
+ ctx = (struct mdss_mdp_cmd_ctx *)
+ ctl->intf_ctx[MASTER_CTX];
+ if (ctx &&
+ __mdss_mdp_cmd_is_panel_power_off(ctx)) {
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ mdss_mdp_cmd_panel_on(ctl, sctl);
+ }
+ ctl->switch_with_handoff = false;
+ }
+
+ mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
+ mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_DSI_DYNAMIC_SWITCH,
+ (void *) mode, CTL_INTF_EVENT_FLAG_DEFAULT);
+ } else {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ }
+ }
+
+ return 0;
+}
+
+int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
+{
+ int ret, session = 0;
+
+ pr_debug("%s:+\n", __func__);
+
+ /* Command mode is supported only starting at INTF1 */
+ session = ctl->intf_num - MDSS_MDP_INTF1;
+ ret = mdss_mdp_cmd_intfs_setup(ctl, session);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("unable to set cmd interface: %d\n", ret);
+ return ret;
+ }
+
+ ctl->ops.stop_fnc = mdss_mdp_cmd_stop;
+ ctl->ops.display_fnc = mdss_mdp_cmd_kickoff;
+ ctl->ops.wait_pingpong = mdss_mdp_cmd_wait4pingpong;
+ ctl->ops.add_vsync_handler = mdss_mdp_cmd_add_vsync_handler;
+ ctl->ops.remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler;
+ ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count;
+ ctl->ops.restore_fnc = mdss_mdp_cmd_restore;
+ ctl->ops.early_wake_up_fnc = mdss_mdp_cmd_early_wake_up;
+ ctl->ops.reconfigure = mdss_mdp_cmd_reconfigure;
+ ctl->ops.pre_programming = mdss_mdp_cmd_pre_programming;
+ ctl->ops.update_lineptr = mdss_mdp_cmd_update_lineptr;
+ pr_debug("%s:-\n", __func__);
+
+ return 0;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
new file mode 100644
index 0000000..7b69aa3
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -0,0 +1,2195 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+#include <video/msm_hdmi_modes.h>
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_panel.h"
+#include "mdss_debug.h"
+#include "mdss_mdp_trace.h"
+
+/* wait for at least 2 vsyncs for lowest refresh rate (24hz) */
+#define VSYNC_TIMEOUT_US 100000
+
+/* Poll time to do recovery during active region */
+#define POLL_TIME_USEC_FOR_LN_CNT 500
+
+/* Filter out input events for 1 vsync time after receiving an input event*/
+#define INPUT_EVENT_HANDLER_DELAY_USECS 16000
+
+enum {
+ MDP_INTF_INTR_PROG_LINE,
+ MDP_INTF_INTR_MAX,
+};
+
+struct intr_callback {
+ void (*func)(void *);
+ void *arg;
+};
+
+/* intf timing settings */
+struct intf_timing_params {
+ u32 width;
+ u32 height;
+ u32 xres;
+ u32 yres;
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+};
+
+struct mdss_mdp_video_ctx {
+ struct mdss_mdp_ctl *ctl;
+ u32 intf_num;
+ char __iomem *base;
+ u32 intf_type;
+ u8 ref_cnt;
+
+ u8 timegen_en;
+ bool polling_en;
+ u32 poll_cnt;
+ struct completion vsync_comp;
+ int wait_pending;
+
+ atomic_t vsync_ref;
+ spinlock_t vsync_lock;
+ spinlock_t dfps_lock;
+ struct mutex vsync_mtx;
+ struct list_head vsync_handlers;
+ struct mdss_intf_recovery intf_recovery;
+ struct work_struct early_wakeup_dfps_work;
+
+ atomic_t lineptr_ref;
+ spinlock_t lineptr_lock;
+ struct mutex lineptr_mtx;
+ struct list_head lineptr_handlers;
+
+ struct intf_timing_params itp;
+ bool lineptr_enabled;
+ u32 prev_wr_ptr_irq;
+
+ struct intr_callback mdp_intf_intr_cb[MDP_INTF_INTR_MAX];
+ u32 intf_irq_mask;
+ spinlock_t mdss_mdp_video_lock;
+ spinlock_t mdss_mdp_intf_intr_lock;
+};
+
+static void mdss_mdp_fetch_start_config(struct mdss_mdp_video_ctx *ctx,
+ struct mdss_mdp_ctl *ctl);
+
+static void mdss_mdp_fetch_end_config(struct mdss_mdp_video_ctx *ctx,
+ struct mdss_mdp_ctl *ctl);
+
+static void early_wakeup_dfps_update_work(struct work_struct *work);
+
+static inline void mdp_video_write(struct mdss_mdp_video_ctx *ctx,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, ctx->base + reg);
+}
+
+static inline u32 mdp_video_read(struct mdss_mdp_video_ctx *ctx,
+ u32 reg)
+{
+ return readl_relaxed(ctx->base + reg);
+}
+
+static inline u32 mdss_mdp_video_line_count(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ u32 line_cnt = 0;
+
+ if (!ctl || !ctl->intf_ctx[MASTER_CTX])
+ goto line_count_exit;
+ ctx = ctl->intf_ctx[MASTER_CTX];
+ line_cnt = mdp_video_read(ctx, MDSS_MDP_REG_INTF_LINE_COUNT);
+line_count_exit:
+ return line_cnt;
+}
+
+static int mdss_mdp_intf_intr2index(u32 intr_type)
+{
+ int index = -1;
+
+ switch (intr_type) {
+ case MDSS_MDP_INTF_IRQ_PROG_LINE:
+ index = MDP_INTF_INTR_PROG_LINE;
+ break;
+ }
+ return index;
+}
+
+int mdss_mdp_set_intf_intr_callback(struct mdss_mdp_video_ctx *ctx,
+ u32 intr_type, void (*fnc_ptr)(void *), void *arg)
+{
+ unsigned long flags;
+ int index;
+
+ index = mdss_mdp_intf_intr2index(intr_type);
+ if (index < 0) {
+ pr_warn("invalid intr type=%u\n", intr_type);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ctx->mdss_mdp_intf_intr_lock, flags);
+ WARN(ctx->mdp_intf_intr_cb[index].func && fnc_ptr,
+ "replacing current intr callback for ndx=%d\n", index);
+ ctx->mdp_intf_intr_cb[index].func = fnc_ptr;
+ ctx->mdp_intf_intr_cb[index].arg = arg;
+ spin_unlock_irqrestore(&ctx->mdss_mdp_intf_intr_lock, flags);
+
+ return 0;
+}
+
+static inline void mdss_mdp_intf_intr_done(struct mdss_mdp_video_ctx *ctx,
+ int index)
+{
+ void (*fnc)(void *);
+ void *arg;
+
+ spin_lock(&ctx->mdss_mdp_intf_intr_lock);
+ fnc = ctx->mdp_intf_intr_cb[index].func;
+ arg = ctx->mdp_intf_intr_cb[index].arg;
+ spin_unlock(&ctx->mdss_mdp_intf_intr_lock);
+ if (fnc)
+ fnc(arg);
+}
+
+/*
+ * mdss_mdp_video_isr() - ISR handler for video mode interfaces
+ *
+ * @ptr: pointer to all the video ctx
+ * @count: number of interfaces which should match ctx
+ *
+ * The video isr is meant to handle all the interrupts in video interface,
+ * in MDSS_MDP_REG_INTF_INTR_EN register. Currently it handles only the
+ * programmable lineptr interrupt.
+ */
+void mdss_mdp_video_isr(void *ptr, u32 count)
+{
+ struct mdss_mdp_video_ctx *head = (struct mdss_mdp_video_ctx *) ptr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct mdss_mdp_video_ctx *ctx = &head[i];
+ u32 intr, mask;
+
+ if (!ctx->intf_irq_mask)
+ continue;
+
+ intr = mdp_video_read(ctx, MDSS_MDP_REG_INTF_INTR_STATUS);
+ mask = mdp_video_read(ctx, MDSS_MDP_REG_INTF_INTR_EN);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_INTR_CLEAR, intr);
+
+ pr_debug("%s: intf=%d intr=%x mask=%x\n", __func__,
+ i, intr, mask);
+
+ if (!(intr & mask))
+ continue;
+
+ if (intr & MDSS_MDP_INTF_INTR_PROG_LINE)
+ mdss_mdp_intf_intr_done(ctx, MDP_INTF_INTR_PROG_LINE);
+ }
+}
+
+static int mdss_mdp_video_intf_irq_enable(struct mdss_mdp_ctl *ctl,
+ u32 intr_type)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ unsigned long irq_flags;
+ int ret = 0;
+ u32 irq;
+
+ if (!ctl || !ctl->intf_ctx[MASTER_CTX])
+ return -ENODEV;
+
+ ctx = ctl->intf_ctx[MASTER_CTX];
+
+ irq = 1 << intr_type;
+
+ spin_lock_irqsave(&ctx->mdss_mdp_video_lock, irq_flags);
+ if (ctx->intf_irq_mask & irq) {
+ pr_warn("MDSS MDP Intf IRQ-0x%x is already set, mask=%x\n",
+ irq, ctx->intf_irq_mask);
+ ret = -EBUSY;
+ } else {
+ pr_debug("MDSS MDP Intf IRQ mask old=%x new=%x\n",
+ ctx->intf_irq_mask, irq);
+ ctx->intf_irq_mask |= irq;
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_INTR_CLEAR, irq);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_INTR_EN,
+ ctx->intf_irq_mask);
+ ctl->mdata->mdp_intf_irq_mask |=
+ (1 << (ctx->intf_num - MDSS_MDP_INTF0));
+ mdss_mdp_enable_hw_irq(ctl->mdata);
+ }
+ spin_unlock_irqrestore(&ctx->mdss_mdp_video_lock, irq_flags);
+
+ return ret;
+}
+
+void mdss_mdp_video_intf_irq_disable(struct mdss_mdp_ctl *ctl, u32 intr_type)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ unsigned long irq_flags;
+ u32 irq;
+
+ if (!ctl || !ctl->intf_ctx[MASTER_CTX])
+ return;
+
+ ctx = ctl->intf_ctx[MASTER_CTX];
+
+ irq = 1 << intr_type;
+
+ spin_lock_irqsave(&ctx->mdss_mdp_video_lock, irq_flags);
+ if (!(ctx->intf_irq_mask & irq)) {
+ pr_warn("MDSS MDP Intf IRQ-%x is NOT set, mask=%x\n",
+ irq, ctx->intf_irq_mask);
+ } else {
+ ctx->intf_irq_mask &= ~irq;
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_INTR_CLEAR, irq);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_INTR_EN,
+ ctx->intf_irq_mask);
+ if (ctx->intf_irq_mask == 0) {
+ ctl->mdata->mdp_intf_irq_mask &=
+ ~(1 << (ctx->intf_num - MDSS_MDP_INTF0));
+ mdss_mdp_disable_hw_irq(ctl->mdata);
+ }
+ }
+ spin_unlock_irqrestore(&ctx->mdss_mdp_video_lock, irq_flags);
+}
+
+int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata,
+ u32 *offsets, u32 count)
+{
+ struct mdss_mdp_video_ctx *head;
+ u32 i;
+
+ head = devm_kzalloc(&mdata->pdev->dev,
+ sizeof(struct mdss_mdp_video_ctx) * count, GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ head[i].base = mdata->mdss_io.base + offsets[i];
+ pr_debug("adding Video Intf #%d offset=0x%x virt=%pK\n", i,
+ offsets[i], head[i].base);
+ head[i].ref_cnt = 0;
+ head[i].intf_num = i + MDSS_MDP_INTF0;
+ INIT_LIST_HEAD(&head[i].vsync_handlers);
+ INIT_LIST_HEAD(&head[i].lineptr_handlers);
+ }
+
+ mdata->video_intf = head;
+ mdata->nintf = count;
+ return 0;
+}
+
+static int mdss_mdp_video_intf_recovery(void *data, int event)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ struct mdss_mdp_ctl *ctl = data;
+ struct mdss_panel_info *pinfo;
+ u32 line_cnt, min_ln_cnt, active_lns_cnt;
+ u64 clk_rate;
+ u32 clk_period, time_of_line;
+ u32 delay;
+
+ if (!data) {
+ pr_err("%s: invalid ctl\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Currently, only intf_fifo_overflow is
+ * supported for recovery sequence for video
+ * mode DSI interface
+ */
+ if (event != MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW) {
+ pr_warn("%s: unsupported recovery event:%d\n",
+ __func__, event);
+ return -EPERM;
+ }
+
+ ctx = ctl->intf_ctx[MASTER_CTX];
+ pr_debug("%s: ctl num = %d, event = %d\n",
+ __func__, ctl->num, event);
+
+ pinfo = &ctl->panel_data->panel_info;
+ clk_rate = ((ctl->intf_type == MDSS_INTF_DSI) ?
+ pinfo->mipi.dsi_pclk_rate :
+ pinfo->clk_rate);
+
+ clk_rate = DIV_ROUND_UP_ULL(clk_rate, 1000); /* in kHz */
+ if (!clk_rate) {
+ pr_err("Unable to get proper clk_rate\n");
+ return -EINVAL;
+ }
+ /*
+ * calculate clk_period as pico second to maintain good
+ * accuracy with high pclk rate and this number is in 17 bit
+ * range.
+ */
+ clk_period = DIV_ROUND_UP_ULL(1000000000, clk_rate);
+ if (!clk_period) {
+ pr_err("Unable to calculate clock period\n");
+ return -EINVAL;
+ }
+ min_ln_cnt = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+ active_lns_cnt = pinfo->yres;
+ time_of_line = (pinfo->lcdc.h_back_porch +
+ pinfo->lcdc.h_front_porch +
+ pinfo->lcdc.h_pulse_width +
+ pinfo->xres) * clk_period;
+
+ /* delay in micro seconds */
+ delay = (time_of_line * (min_ln_cnt +
+ pinfo->lcdc.v_front_porch)) / 1000000;
+
+ /*
+ * Wait for max delay before
+ * polling to check active region
+ */
+ if (delay > POLL_TIME_USEC_FOR_LN_CNT)
+ delay = POLL_TIME_USEC_FOR_LN_CNT;
+
+ mutex_lock(&ctl->offlock);
+ while (1) {
+ if (!ctl || ctl->mfd->shutdown_pending || !ctx ||
+ !ctx->timegen_en) {
+ pr_warn("Target is in suspend or shutdown pending\n");
+ mutex_unlock(&ctl->offlock);
+ return -EPERM;
+ }
+
+ line_cnt = mdss_mdp_video_line_count(ctl);
+
+ if ((line_cnt >= min_ln_cnt) && (line_cnt <
+ (active_lns_cnt + min_ln_cnt))) {
+ pr_debug("%s, Needed lines left line_cnt=%d\n",
+ __func__, line_cnt);
+ mutex_unlock(&ctl->offlock);
+ return 0;
+ }
+ pr_warn("line count is less. line_cnt = %d\n",
+ line_cnt);
+ /* Add delay so that line count is in active region */
+ udelay(delay);
+ }
+}
+
+static int mdss_mdp_video_timegen_setup(struct mdss_mdp_ctl *ctl,
+ struct intf_timing_params *p,
+ struct mdss_mdp_video_ctx *ctx)
+{
+ u32 hsync_period, vsync_period;
+ u32 hsync_start_x, hsync_end_x, display_v_start, display_v_end;
+ u32 active_h_start, active_h_end, active_v_start, active_v_end;
+ u32 den_polarity, hsync_polarity, vsync_polarity;
+ u32 display_hctl, active_hctl, hsync_ctl, polarity_ctl;
+ struct mdss_data_type *mdata;
+
+ mdata = ctl->mdata;
+ hsync_period = p->hsync_pulse_width + p->h_back_porch +
+ p->width + p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch +
+ p->height + p->v_front_porch;
+
+ MDSS_XLOG(p->vsync_pulse_width, p->v_back_porch,
+ p->height, p->v_front_porch);
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ if (ctx->intf_type == MDSS_INTF_EDP) {
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+ }
+
+ /* TIMING_2 flush bit on 8939 is BIT 31 */
+ if (mdata->mdp_rev == MDSS_MDP_HW_REV_108 &&
+ ctx->intf_num == MDSS_MDP_INTF2)
+ ctl->flush_bits |= BIT(31);
+ else
+ ctl->flush_bits |= BIT(31) >>
+ (ctx->intf_num - MDSS_MDP_INTF0);
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) {
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) {
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ active_hctl |= BIT(31); /* ACTIVE_H_ENABLE */
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ active_v_start |= BIT(31); /* ACTIVE_V_ENABLE */
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ den_polarity = 0;
+ if (ctx->intf_type == MDSS_INTF_HDMI) {
+ hsync_polarity = p->yres >= 720 ? 0 : 1;
+ vsync_polarity = p->yres >= 720 ? 0 : 1;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (vsync_polarity << 1) | /* VSYNC Polarity */
+ (hsync_polarity << 0); /* HSYNC Polarity */
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_CTL, hsync_ctl);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+ vsync_period * hsync_period);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_HCTL, display_hctl);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_START_F0,
+ display_v_start);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_END_F0, display_v_end);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_HCTL, active_hctl);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_V_START_F0,
+ active_v_start);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_V_END_F0, active_v_end);
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_BORDER_COLOR, p->border_clr);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_UNDERFLOW_COLOR,
+ p->underflow_clr);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_SKEW, p->hsync_skew);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_POLARITY_CTL, polarity_ctl);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN, 0x3);
+ MDSS_XLOG(hsync_period, vsync_period);
+
+ /*
+ * If CDM is present Interface should have destination
+ * format set to RGB
+ */
+ if (ctl->cdm) {
+ u32 reg = mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG);
+
+ reg &= ~BIT(18); /* CSC_DST_DATA_FORMAT = RGB */
+ reg &= ~BIT(17); /* CSC_SRC_DATA_FROMAT = RGB */
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_CONFIG, reg);
+ }
+ return 0;
+}
+
+static void mdss_mdp_video_timegen_flush(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_video_ctx *sctx)
+{
+ u32 ctl_flush;
+ struct mdss_data_type *mdata;
+
+ mdata = ctl->mdata;
+ ctl_flush = (BIT(31) >> (ctl->intf_num - MDSS_MDP_INTF0));
+ if (sctx) {
+ /* For 8939, sctx is always INTF2 and the flush bit is BIT 31 */
+ if (mdata->mdp_rev == MDSS_MDP_HW_REV_108)
+ ctl_flush |= BIT(31);
+ else
+ ctl_flush |= (BIT(31) >>
+ (sctx->intf_num - MDSS_MDP_INTF0));
+ }
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush);
+ MDSS_XLOG(ctl->intf_num, sctx?sctx->intf_num:0xf00, ctl_flush);
+}
+
+static inline void video_vsync_irq_enable(struct mdss_mdp_ctl *ctl, bool clear)
+{
+ struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+ mutex_lock(&ctx->vsync_mtx);
+ if (atomic_inc_return(&ctx->vsync_ref) == 1)
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+ ctl->intf_num);
+ else if (clear)
+ mdss_mdp_irq_clear(ctl->mdata, MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+ ctl->intf_num);
+ mutex_unlock(&ctx->vsync_mtx);
+}
+
+static inline void video_vsync_irq_disable(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+ mutex_lock(&ctx->vsync_mtx);
+ if (atomic_dec_return(&ctx->vsync_ref) == 0)
+ mdss_mdp_irq_disable(MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+ ctl->intf_num);
+ mutex_unlock(&ctx->vsync_mtx);
+}
+
+static int mdss_mdp_video_add_vsync_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_vsync_handler *handle)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ unsigned long flags;
+ int ret = 0;
+ bool irq_en = false;
+
+ if (!handle || !(handle->vsync_handler)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx for ctl=%d\n", ctl->num);
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ MDSS_XLOG(ctl->num, ctl->vsync_cnt, handle->enabled);
+
+ spin_lock_irqsave(&ctx->vsync_lock, flags);
+ if (!handle->enabled) {
+ handle->enabled = true;
+ list_add(&handle->list, &ctx->vsync_handlers);
+ irq_en = true;
+ }
+ spin_unlock_irqrestore(&ctx->vsync_lock, flags);
+ if (irq_en) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ video_vsync_irq_enable(ctl, false);
+ }
+exit:
+ return ret;
+}
+
+static int mdss_mdp_video_remove_vsync_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_vsync_handler *handle)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ unsigned long flags;
+ bool irq_dis = false;
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx for ctl=%d\n", ctl->num);
+ return -ENODEV;
+ }
+
+ MDSS_XLOG(ctl->num, ctl->vsync_cnt, handle->enabled);
+
+ spin_lock_irqsave(&ctx->vsync_lock, flags);
+ if (handle->enabled) {
+ handle->enabled = false;
+ list_del_init(&handle->list);
+ irq_dis = true;
+ }
+ spin_unlock_irqrestore(&ctx->vsync_lock, flags);
+ if (irq_dis) {
+ video_vsync_irq_disable(ctl);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ }
+ return 0;
+}
+
+static int mdss_mdp_video_add_lineptr_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_lineptr_handler *handle)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ unsigned long flags;
+ int ret = 0;
+ bool irq_en = false;
+
+ if (!handle || !(handle->lineptr_handler)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx for ctl=%d\n", ctl->num);
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ spin_lock_irqsave(&ctx->lineptr_lock, flags);
+ if (!handle->enabled) {
+ handle->enabled = true;
+ list_add(&handle->list, &ctx->lineptr_handlers);
+ irq_en = true;
+ }
+ spin_unlock_irqrestore(&ctx->lineptr_lock, flags);
+
+ if (irq_en) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mutex_lock(&ctx->lineptr_mtx);
+ if (atomic_inc_return(&ctx->lineptr_ref) == 1)
+ mdss_mdp_video_intf_irq_enable(ctl,
+ MDSS_MDP_INTF_IRQ_PROG_LINE);
+ mutex_unlock(&ctx->lineptr_mtx);
+ }
+ ctx->lineptr_enabled = true;
+
+exit:
+ return ret;
+}
+
+static int mdss_mdp_video_remove_lineptr_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_lineptr_handler *handle)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ unsigned long flags;
+ bool irq_dis = false;
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctx->lineptr_enabled)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctx->lineptr_lock, flags);
+ if (handle->enabled) {
+ handle->enabled = false;
+ list_del_init(&handle->list);
+ irq_dis = true;
+ }
+ spin_unlock_irqrestore(&ctx->lineptr_lock, flags);
+
+ if (irq_dis) {
+ mutex_lock(&ctx->lineptr_mtx);
+ if (atomic_dec_return(&ctx->lineptr_ref) == 0)
+ mdss_mdp_video_intf_irq_disable(ctl,
+ MDSS_MDP_INTF_IRQ_PROG_LINE);
+ mutex_unlock(&ctx->lineptr_mtx);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ }
+ ctx->lineptr_enabled = false;
+ ctx->prev_wr_ptr_irq = 0;
+
+ return 0;
+}
+
+static int mdss_mdp_video_set_lineptr(struct mdss_mdp_ctl *ctl,
+ u32 new_lineptr)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ u32 pixel_start, offset, hsync_period;
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx for ctl=%d\n", ctl->num);
+ return -ENODEV;
+ }
+
+ if (new_lineptr == 0) {
+ mdp_video_write(ctx,
+ MDSS_MDP_REG_INTF_PROG_LINE_INTR_CONF, UINT_MAX);
+ } else if (new_lineptr <= ctx->itp.yres) {
+ hsync_period = ctx->itp.hsync_pulse_width
+ + ctx->itp.h_back_porch + ctx->itp.width
+ + ctx->itp.h_front_porch;
+
+ offset = ((ctx->itp.vsync_pulse_width + ctx->itp.v_back_porch)
+ * hsync_period) + ctx->itp.hsync_skew;
+
+ /* convert from line to pixel */
+ pixel_start = offset + (hsync_period * (new_lineptr - 1));
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_PROG_LINE_INTR_CONF,
+ pixel_start);
+
+ mdss_mdp_video_timegen_flush(ctl, ctx);
+ } else {
+ pr_err("invalid new lineptr_value: new=%d yres=%d\n",
+ new_lineptr, ctx->itp.yres);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mdss_mdp_video_lineptr_ctrl(struct mdss_mdp_ctl *ctl, bool enable)
+{
+ struct mdss_mdp_pp_tear_check *te;
+ struct mdss_mdp_video_ctx *ctx;
+ int rc = 0;
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctl->is_master)
+ return -EINVAL;
+
+ te = &ctl->panel_data->panel_info.te;
+ pr_debug("%pS->%s: ctl=%d en=%d, prev_lineptr=%d, lineptr=%d\n",
+ __builtin_return_address(0), __func__, ctl->num,
+ enable, ctx->prev_wr_ptr_irq, te->wr_ptr_irq);
+
+ if (enable) {
+ /* update reg only if the value has changed */
+ if (ctx->prev_wr_ptr_irq != te->wr_ptr_irq) {
+ if (mdss_mdp_video_set_lineptr(ctl,
+ te->wr_ptr_irq) < 0) {
+ /* invalid new value, so restore the previous */
+ te->wr_ptr_irq = ctx->prev_wr_ptr_irq;
+ goto end;
+ }
+ ctx->prev_wr_ptr_irq = te->wr_ptr_irq;
+ }
+
+ /*
+ * add handler only when lineptr is not enabled
+ * and wr ptr is non zero
+ */
+ if (!ctx->lineptr_enabled && te->wr_ptr_irq)
+ rc = mdss_mdp_video_add_lineptr_handler(ctl,
+ &ctl->lineptr_handler);
+ /* Disable handler when the value is zero */
+ else if (ctx->lineptr_enabled && !te->wr_ptr_irq)
+ rc = mdss_mdp_video_remove_lineptr_handler(ctl,
+ &ctl->lineptr_handler);
+ } else {
+ if (ctx->lineptr_enabled)
+ rc = mdss_mdp_video_remove_lineptr_handler(ctl,
+ &ctl->lineptr_handler);
+ }
+
+end:
+ return rc;
+}
+
+void mdss_mdp_turn_off_time_engine(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_video_ctx *ctx, u32 sleep_time)
+{
+ struct mdss_mdp_ctl *sctl;
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+ /* wait for at least one VSYNC for proper TG OFF */
+ msleep(sleep_time);
+
+ mdss_iommu_ctrl(0);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ ctx->timegen_en = false;
+
+ mdss_mdp_irq_disable(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, ctl->intf_num);
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ mdss_mdp_irq_disable(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+ sctl->intf_num);
+}
+
+static int mdss_mdp_video_ctx_stop(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_info *pinfo, struct mdss_mdp_video_ctx *ctx)
+{
+ int rc = 0;
+ u32 frame_rate = 0;
+
+ mutex_lock(&ctl->offlock);
+ if (ctx->timegen_en) {
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ if (rc == -EBUSY) {
+ pr_debug("intf #%d busy don't turn off\n",
+ ctl->intf_num);
+ goto end;
+ }
+ WARN(rc, "intf %d blank error (%d)\n", ctl->intf_num, rc);
+
+ frame_rate = mdss_panel_get_framerate(pinfo,
+ FPS_RESOLUTION_HZ);
+ if (!(frame_rate >= 24 && frame_rate <= 240))
+ frame_rate = 24;
+
+ frame_rate = (1000/frame_rate) + 1;
+ mdss_mdp_turn_off_time_engine(ctl, ctx, frame_rate);
+
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_OFF, NULL,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(rc, "intf %d timegen off error (%d)\n", ctl->intf_num, rc);
+
+ mdss_bus_bandwidth_ctrl(false);
+ }
+
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+ ctx->intf_num, NULL, NULL);
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+ ctx->intf_num, NULL, NULL);
+ mdss_mdp_set_intf_intr_callback(ctx, MDSS_MDP_INTF_IRQ_PROG_LINE,
+ NULL, NULL);
+
+ ctx->ref_cnt--;
+end:
+ mutex_unlock(&ctl->offlock);
+ return rc;
+}
+
+static int mdss_mdp_video_intfs_stop(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_data *pdata, int inum)
+{
+ struct mdss_data_type *mdata;
+ struct mdss_panel_info *pinfo;
+ struct mdss_mdp_video_ctx *ctx, *sctx = NULL;
+ struct mdss_mdp_vsync_handler *tmp, *handle;
+ int ret = 0;
+
+ if (pdata == NULL)
+ return 0;
+
+ mdata = ctl->mdata;
+ pinfo = &pdata->panel_info;
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx->ref_cnt) {
+ pr_err("Intf %d not in use\n", (inum + MDSS_MDP_INTF0));
+ return -ENODEV;
+ }
+ pr_debug("stop ctl=%d video Intf #%d base=%pK", ctl->num, ctx->intf_num,
+ ctx->base);
+
+ ret = mdss_mdp_video_ctx_stop(ctl, pinfo, ctx);
+ if (ret) {
+ pr_err("mdss_mdp_video_ctx_stop failed for intf: %d",
+ ctx->intf_num);
+ return -EPERM;
+ }
+
+ if (is_pingpong_split(ctl->mfd)) {
+ pinfo = &pdata->next->panel_info;
+
+ sctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[SLAVE_CTX];
+ if (!sctx->ref_cnt) {
+ pr_err("Intf %d not in use\n", (inum + MDSS_MDP_INTF0));
+ return -ENODEV;
+ }
+ pr_debug("stop ctl=%d video Intf #%d base=%pK", ctl->num,
+ sctx->intf_num, sctx->base);
+
+ ret = mdss_mdp_video_ctx_stop(ctl, pinfo, sctx);
+ if (ret) {
+ pr_err("mdss_mdp_video_ctx_stop failed for intf: %d",
+ sctx->intf_num);
+ return -EPERM;
+ }
+ }
+
+ list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list)
+ mdss_mdp_video_remove_vsync_handler(ctl, handle);
+
+ if (mdss_mdp_is_lineptr_supported(ctl))
+ mdss_mdp_video_lineptr_ctrl(ctl, false);
+
+ return 0;
+}
+
+
+static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl, int panel_power_state)
+{
+ int intfs_num, ret = 0;
+
+ intfs_num = ctl->intf_num - MDSS_MDP_INTF0;
+ ret = mdss_mdp_video_intfs_stop(ctl, ctl->panel_data, intfs_num);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("unable to stop video interface: %d\n", ret);
+ return ret;
+ }
+
+ MDSS_XLOG(ctl->num, ctl->vsync_cnt);
+
+ mdss_mdp_ctl_reset(ctl, false);
+ ctl->intf_ctx[MASTER_CTX] = NULL;
+
+ if (ctl->cdm) {
+ mdss_mdp_cdm_destroy(ctl->cdm);
+ ctl->cdm = NULL;
+ }
+ return 0;
+}
+
+static void mdss_mdp_video_vsync_intr_done(void *arg)
+{
+ struct mdss_mdp_ctl *ctl = arg;
+ struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+ struct mdss_mdp_vsync_handler *tmp;
+ ktime_t vsync_time;
+
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return;
+ }
+
+ vsync_time = ktime_get();
+ ctl->vsync_cnt++;
+
+ mdss_debug_frc_add_vsync_sample(ctl, vsync_time);
+
+ MDSS_XLOG(ctl->num, ctl->vsync_cnt, ctl->vsync_cnt);
+
+ pr_debug("intr ctl=%d vsync cnt=%u vsync_time=%d\n",
+ ctl->num, ctl->vsync_cnt, (int)ktime_to_ms(vsync_time));
+
+ ctx->polling_en = false;
+ complete_all(&ctx->vsync_comp);
+ spin_lock(&ctx->vsync_lock);
+ list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
+ tmp->vsync_handler(ctl, vsync_time);
+ }
+ spin_unlock(&ctx->vsync_lock);
+}
+
+static void mdss_mdp_video_lineptr_intr_done(void *arg)
+{
+ struct mdss_mdp_ctl *ctl = arg;
+ struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+ struct mdss_mdp_lineptr_handler *tmp;
+ ktime_t lineptr_time;
+
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return;
+ }
+
+ lineptr_time = ktime_get();
+ pr_debug("intr lineptr_time=%lld\n", ktime_to_ms(lineptr_time));
+
+ spin_lock(&ctx->lineptr_lock);
+ list_for_each_entry(tmp, &ctx->lineptr_handlers, list) {
+ tmp->lineptr_handler(ctl, lineptr_time);
+ }
+ spin_unlock(&ctx->lineptr_lock);
+}
+
+static int mdss_mdp_video_pollwait(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+ u32 mask, status;
+ int rc;
+
+ mask = mdss_mdp_get_irq_mask(MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+ ctl->intf_num);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ rc = readl_poll_timeout(ctl->mdata->mdp_base + MDSS_MDP_REG_INTR_STATUS,
+ status,
+ (status & mask) || try_wait_for_completion(&ctx->vsync_comp),
+ 1000,
+ VSYNC_TIMEOUT_US);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ if (rc == 0) {
+ MDSS_XLOG(ctl->num, ctl->vsync_cnt);
+ pr_debug("vsync poll successful! rc=%d status=0x%x\n",
+ rc, status);
+ ctx->poll_cnt++;
+ if (status) {
+ struct mdss_mdp_vsync_handler *tmp;
+ unsigned long flags;
+ ktime_t vsync_time = ktime_get();
+
+ spin_lock_irqsave(&ctx->vsync_lock, flags);
+ list_for_each_entry(tmp, &ctx->vsync_handlers, list)
+ tmp->vsync_handler(ctl, vsync_time);
+ spin_unlock_irqrestore(&ctx->vsync_lock, flags);
+ }
+ } else {
+ pr_warn("vsync poll timed out! rc=%d status=0x%x mask=0x%x\n",
+ rc, status, mask);
+ }
+
+ return rc;
+}
+
+static int mdss_mdp_video_wait4comp(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ int rc;
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ WARN(!ctx->wait_pending, "waiting without commit! ctl=%d", ctl->num);
+
+ if (ctx->polling_en) {
+ rc = mdss_mdp_video_pollwait(ctl);
+ } else {
+ mutex_unlock(&ctl->lock);
+ rc = wait_for_completion_timeout(&ctx->vsync_comp,
+ usecs_to_jiffies(VSYNC_TIMEOUT_US));
+ mutex_lock(&ctl->lock);
+ if (rc == 0) {
+ pr_warn("vsync wait timeout %d, fallback to poll mode\n",
+ ctl->num);
+ ctx->polling_en++;
+ rc = mdss_mdp_video_pollwait(ctl);
+ } else {
+ rc = 0;
+ }
+ }
+ mdss_mdp_ctl_notify(ctl,
+ rc ? MDP_NOTIFY_FRAME_TIMEOUT : MDP_NOTIFY_FRAME_DONE);
+
+ if (ctx->wait_pending) {
+ ctx->wait_pending = 0;
+ video_vsync_irq_disable(ctl);
+ }
+
+ return rc;
+}
+
+static void recover_underrun_work(struct work_struct *work)
+{
+ struct mdss_mdp_ctl *ctl =
+ container_of(work, typeof(*ctl), recover_work);
+
+ if (!ctl || !ctl->ops.add_vsync_handler) {
+ pr_err("ctl or vsync handler is NULL\n");
+ return;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ ctl->ops.add_vsync_handler(ctl, &ctl->recover_underrun_handler);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+static void mdss_mdp_video_underrun_intr_done(void *arg)
+{
+ struct mdss_mdp_ctl *ctl = arg;
+
+ if (unlikely(!ctl))
+ return;
+
+ ctl->underrun_cnt++;
+ MDSS_XLOG(ctl->num, ctl->underrun_cnt);
+ trace_mdp_video_underrun_done(ctl->num, ctl->underrun_cnt);
+ pr_debug("display underrun detected for ctl=%d count=%d\n", ctl->num,
+ ctl->underrun_cnt);
+
+ if (!test_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
+ ctl->mdata->mdss_caps_map) &&
+ (ctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE))
+ schedule_work(&ctl->recover_work);
+}
+
+/**
+ * mdss_mdp_video_hfp_fps_update() - configure mdp with new fps.
+ * @ctx: pointer to the master context.
+ * @pdata: panel information data.
+ *
+ * This function configures the hardware to modify the fps.
+ * within mdp for the hfp method.
+ * Function assumes that timings for the new fps configuration
+ * are already updated in the panel data passed as parameter.
+ *
+ * Return: 0 - succeed, otherwise - fail
+ */
+static int mdss_mdp_video_hfp_fps_update(struct mdss_mdp_video_ctx *ctx,
+ struct mdss_panel_data *pdata)
+{
+ u32 hsync_period, vsync_period;
+ u32 hsync_start_x, hsync_end_x, display_v_start, display_v_end;
+ u32 display_hctl, hsync_ctl;
+ struct mdss_panel_info *pinfo = &pdata->panel_info;
+
+ hsync_period = mdss_panel_get_htotal(pinfo, true);
+ vsync_period = mdss_panel_get_vtotal(pinfo);
+
+ display_v_start = ((pinfo->lcdc.v_pulse_width +
+ pinfo->lcdc.v_back_porch) * hsync_period) +
+ pinfo->lcdc.hsync_skew;
+ display_v_end = ((vsync_period - pinfo->lcdc.v_front_porch) *
+ hsync_period) + pinfo->lcdc.hsync_skew - 1;
+
+ hsync_start_x = pinfo->lcdc.h_back_porch + pinfo->lcdc.h_pulse_width;
+ hsync_end_x = hsync_period - pinfo->lcdc.h_front_porch - 1;
+
+ hsync_ctl = (hsync_period << 16) | pinfo->lcdc.h_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_CTL, hsync_ctl);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+ vsync_period * hsync_period);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0,
+ pinfo->lcdc.v_pulse_width * hsync_period);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_HCTL, display_hctl);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_START_F0,
+ display_v_start);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_END_F0, display_v_end);
+ MDSS_XLOG(ctx->intf_num, hsync_ctl, vsync_period, hsync_period);
+
+ return 0;
+}
+
+/**
+ * mdss_mdp_video_vfp_fps_update() - configure mdp with new fps.
+ * @ctx: pointer to the master context.
+ * @pdata: panel information data.
+ *
+ * This function configures the hardware to modify the fps.
+ * within mdp for the vfp method.
+ * Function assumes that timings for the new fps configuration
+ * are already updated in the panel data passed as parameter.
+ *
+ * Return: 0 - succeed, otherwise - fail
+ */
+static int mdss_mdp_video_vfp_fps_update(struct mdss_mdp_video_ctx *ctx,
+ struct mdss_panel_data *pdata)
+{
+ u32 current_vsync_period_f0, new_vsync_period_f0;
+ int vsync_period, hsync_period;
+
+ /*
+ * Change in the blanking times are already in the
+ * panel info, so just get the vtotal and htotal expected
+ * for this panel to configure those in hw.
+ */
+ vsync_period = mdss_panel_get_vtotal(&pdata->panel_info);
+ hsync_period = mdss_panel_get_htotal(&pdata->panel_info, true);
+
+ current_vsync_period_f0 = mdp_video_read(ctx,
+ MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0);
+ new_vsync_period_f0 = (vsync_period * hsync_period);
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+ current_vsync_period_f0 | 0x800000);
+ if (new_vsync_period_f0 & 0x800000) {
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+ new_vsync_period_f0);
+ } else {
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+ new_vsync_period_f0 | 0x800000);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+ new_vsync_period_f0 & 0x7fffff);
+ }
+
+ pr_debug("if:%d vtotal:%d htotal:%d f0:0x%x nw_f0:0x%x\n",
+ ctx->intf_num, vsync_period, hsync_period,
+ current_vsync_period_f0, new_vsync_period_f0);
+
+ MDSS_XLOG(ctx->intf_num, current_vsync_period_f0,
+ hsync_period, vsync_period, new_vsync_period_f0);
+
+ return 0;
+}
+
+static int mdss_mdp_video_fps_update(struct mdss_mdp_video_ctx *ctx,
+ struct mdss_panel_data *pdata, int new_fps)
+{
+ int rc;
+
+ if (pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP)
+ rc = mdss_mdp_video_vfp_fps_update(ctx, pdata);
+ else
+ rc = mdss_mdp_video_hfp_fps_update(ctx, pdata);
+
+ return rc;
+}
+
+static int mdss_mdp_video_wait4vsync(struct mdss_mdp_ctl *ctl)
+{
+ int rc = 0;
+ struct mdss_mdp_video_ctx *ctx;
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ MDSS_XLOG(ctl->num, ctl->vsync_cnt, XLOG_FUNC_ENTRY);
+
+ video_vsync_irq_enable(ctl, true);
+ reinit_completion(&ctx->vsync_comp);
+ rc = wait_for_completion_timeout(&ctx->vsync_comp,
+ usecs_to_jiffies(VSYNC_TIMEOUT_US));
+
+ if (rc <= 0) {
+ pr_warn("vsync timeout %d fallback to poll mode\n",
+ ctl->num);
+ rc = mdss_mdp_video_pollwait(ctl);
+ MDSS_XLOG(ctl->num, ctl->vsync_cnt);
+ if (rc) {
+ pr_err("error polling for vsync\n");
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+ "dsi1_ctrl", "dsi1_phy", "vbif", "dbg_bus",
+ "vbif_dbg_bus", "panic");
+ }
+ } else {
+ rc = 0;
+ }
+ video_vsync_irq_disable(ctl);
+
+ MDSS_XLOG(ctl->num, ctl->vsync_cnt, XLOG_FUNC_EXIT);
+
+ return rc;
+}
+
+static int mdss_mdp_video_dfps_check_line_cnt(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_panel_data *pdata;
+ u32 line_cnt;
+
+ pdata = ctl->panel_data;
+ if (pdata == NULL) {
+ pr_err("%s: Invalid panel data\n", __func__);
+ return -EINVAL;
+ }
+
+ line_cnt = mdss_mdp_video_line_count(ctl);
+ if (line_cnt >= pdata->panel_info.yres/2) {
+ pr_debug("Too few lines left line_cnt=%d yres/2=%d\n",
+ line_cnt,
+ pdata->panel_info.yres/2);
+ return -EPERM;
+ }
+ return 0;
+}
+
+/**
+ * mdss_mdp_video_config_fps() - modify the fps.
+ * @ctl: pointer to the master controller.
+ * @new_fps: new fps to be set.
+ *
+ * This function configures the hardware to modify the fps.
+ * Note that this function will flush the DSI and MDP
+ * to reconfigure the fps in VFP and HFP methods.
+ * Given above statement, is callers responsibility to call
+ * this function at the beginning of the frame, so it can be
+ * guaranteed that flush of both (DSI and MDP) happen within
+ * the same frame.
+ *
+ * Return: 0 - succeed, otherwise - fail
+ */
+static int mdss_mdp_video_config_fps(struct mdss_mdp_ctl *ctl, int new_fps)
+{
+ struct mdss_mdp_video_ctx *ctx, *sctx = NULL;
+ struct mdss_panel_data *pdata;
+ int rc = 0;
+ struct mdss_data_type *mdata = ctl->mdata;
+ struct mdss_mdp_ctl *sctl = NULL;
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctx->timegen_en || !ctx->ref_cnt) {
+ pr_err("invalid ctx or interface is powered off\n");
+ return -EINVAL;
+ }
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl) {
+ sctx = (struct mdss_mdp_video_ctx *) sctl->intf_ctx[MASTER_CTX];
+ if (!sctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+ } else if (is_pingpong_split(ctl->mfd)) {
+ sctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[SLAVE_CTX];
+ if (!sctx || !sctx->ref_cnt) {
+ pr_err("invalid sctx or interface is powered off\n");
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&ctl->offlock);
+ pdata = ctl->panel_data;
+ if (pdata == NULL) {
+ pr_err("%s: Invalid panel data\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ pr_debug("ctl:%d dfps_update:%d fps:%d\n",
+ ctl->num, pdata->panel_info.dfps_update, new_fps);
+ MDSS_XLOG(ctl->num, pdata->panel_info.dfps_update,
+ new_fps, XLOG_FUNC_ENTRY);
+
+ if (pdata->panel_info.dfps_update
+ != DFPS_SUSPEND_RESUME_MODE) {
+ if (pdata->panel_info.dfps_update
+ == DFPS_IMMEDIATE_CLK_UPDATE_MODE) {
+ if (!ctx->timegen_en) {
+ pr_err("TG is OFF. DFPS mode invalid\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ rc = mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_PANEL_UPDATE_FPS,
+ (void *) (unsigned long) new_fps,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+
+ WARN(rc, "intf %d panel fps update error (%d)\n",
+ ctl->intf_num, rc);
+ } else if (pdata->panel_info.dfps_update
+ == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP ||
+ pdata->panel_info.dfps_update
+ == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP ||
+ pdata->panel_info.dfps_update
+ == DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP ||
+ pdata->panel_info.dfps_update
+ == DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
+ unsigned long flags;
+
+ if (!ctx->timegen_en) {
+ pr_err("TG is OFF. DFPS mode invalid\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ spin_lock_irqsave(&ctx->dfps_lock, flags);
+
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_105) {
+ rc = mdss_mdp_video_dfps_check_line_cnt(ctl);
+ if (rc < 0)
+ goto exit_dfps;
+ }
+
+ rc = mdss_mdp_video_fps_update(ctx, pdata, new_fps);
+ if (rc < 0) {
+ pr_err("%s: Error during DFPS: %d\n", __func__,
+ new_fps);
+ goto exit_dfps;
+ }
+ if (sctx) {
+ rc = mdss_mdp_video_fps_update(sctx,
+ pdata->next, new_fps);
+ if (rc < 0) {
+ pr_err("%s: DFPS error fps:%d\n",
+ __func__, new_fps);
+ goto exit_dfps;
+ }
+ }
+ rc = mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_PANEL_UPDATE_FPS,
+ (void *) (unsigned long) new_fps,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(rc, "intf %d panel fps update error (%d)\n",
+ ctl->intf_num, rc);
+
+ rc = 0;
+ mdss_mdp_fetch_start_config(ctx, ctl);
+ if (sctx)
+ mdss_mdp_fetch_start_config(sctx, ctl);
+
+ if (test_bit(MDSS_QOS_VBLANK_PANIC_CTRL,
+ mdata->mdss_qos_map)) {
+ mdss_mdp_fetch_end_config(ctx, ctl);
+ if (sctx)
+ mdss_mdp_fetch_end_config(sctx, ctl);
+ }
+
+ /*
+ * MDP INTF registers support DB on targets
+ * starting from MDP v1.5.
+ */
+ if (mdata->mdp_rev >= MDSS_MDP_HW_REV_105)
+ mdss_mdp_video_timegen_flush(ctl, sctx);
+
+exit_dfps:
+ spin_unlock_irqrestore(&ctx->dfps_lock, flags);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ /*
+ * Wait for one vsync to make sure these changes
+ * are applied as part of one single frame and
+ * no mixer changes happen at the same time.
+ * A potential optimization would be not to wait
+ * here, but next mixer programming would need
+ * to wait before programming the flush bits.
+ */
+ if (!rc) {
+ rc = mdss_mdp_video_wait4vsync(ctl);
+ if (rc < 0)
+ pr_err("Error in dfps_wait: %d\n", rc);
+ }
+
+ } else {
+ pr_err("intf %d panel, unknown FPS mode\n",
+ ctl->intf_num);
+ rc = -EINVAL;
+ goto end;
+ }
+ } else {
+ rc = mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_PANEL_UPDATE_FPS,
+ (void *) (unsigned long) new_fps,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(rc, "intf %d panel fps update error (%d)\n",
+ ctl->intf_num, rc);
+ }
+
+end:
+ MDSS_XLOG(ctl->num, new_fps, XLOG_FUNC_EXIT);
+ mutex_unlock(&ctl->offlock);
+ return rc;
+}
+
+static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ struct mdss_mdp_ctl *sctl;
+ struct mdss_panel_data *pdata = ctl->panel_data;
+ int rc;
+
+ pr_debug("kickoff ctl=%d\n", ctl->num);
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ if (!ctx->wait_pending) {
+ ctx->wait_pending++;
+ video_vsync_irq_enable(ctl, true);
+ reinit_completion(&ctx->vsync_comp);
+ } else {
+ WARN(1, "commit without wait! ctl=%d", ctl->num);
+ }
+
+ MDSS_XLOG(ctl->num, ctl->underrun_cnt);
+
+ if (!ctx->timegen_en) {
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_LINK_READY, NULL,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ if (rc) {
+ pr_warn("intf #%d link ready error (%d)\n",
+ ctl->intf_num, rc);
+ video_vsync_irq_disable(ctl);
+ ctx->wait_pending = 0;
+ return rc;
+ }
+
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
+
+ pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
+
+ if (pdata->panel_info.cont_splash_enabled &&
+ !ctl->mfd->splash_info.splash_logo_enabled) {
+ rc = wait_for_completion_timeout(&ctx->vsync_comp,
+ usecs_to_jiffies(VSYNC_TIMEOUT_US));
+ }
+
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE((unsigned long)rc)) {
+ pr_err("IOMMU attach failed\n");
+ return rc;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+ ctl->intf_num);
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+ sctl->intf_num);
+
+ mdss_bus_bandwidth_ctrl(true);
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
+ /* make sure MDP timing engine is enabled */
+ wmb();
+
+ rc = wait_for_completion_timeout(&ctx->vsync_comp,
+ usecs_to_jiffies(VSYNC_TIMEOUT_US));
+ WARN(rc == 0, "timeout (%d) enabling timegen on ctl=%d\n",
+ rc, ctl->num);
+
+ ctx->timegen_en = true;
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_ON, NULL,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ WARN(rc, "intf %d panel on error (%d)\n", ctl->intf_num, rc);
+ mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_POST_PANEL_ON, NULL,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ }
+
+ if (mdss_mdp_is_lineptr_supported(ctl))
+ mdss_mdp_video_lineptr_ctrl(ctl, true);
+
+ return 0;
+}
+
+int mdss_mdp_video_reconfigure_splash_done(struct mdss_mdp_ctl *ctl,
+ bool handoff)
+{
+ struct mdss_panel_data *pdata;
+ int i, ret = 0, off;
+ u32 data, flush;
+ struct mdss_mdp_video_ctx *ctx;
+ struct mdss_mdp_ctl *sctl;
+
+ if (!ctl) {
+ pr_err("invalid ctl\n");
+ return -ENODEV;
+ }
+
+ off = 0;
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("invalid ctx for ctl=%d\n", ctl->num);
+ return -ENODEV;
+ }
+
+ pdata = ctl->panel_data;
+ if (!pdata) {
+ pr_err("invalid pdata\n");
+ return -ENODEV;
+ }
+
+ pdata->panel_info.cont_splash_enabled = 0;
+ sctl = mdss_mdp_get_split_ctl(ctl);
+
+ if (sctl)
+ sctl->panel_data->panel_info.cont_splash_enabled = 0;
+ else if (ctl->panel_data->next && is_pingpong_split(ctl->mfd))
+ ctl->panel_data->next->panel_info.cont_splash_enabled = 0;
+
+ if (!handoff) {
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_BEGIN,
+ NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+ if (ret) {
+ pr_err("%s: Failed to handle 'CONT_SPLASH_BEGIN' event\n"
+ , __func__);
+ return ret;
+ }
+
+ /* clear up mixer0 and mixer1 */
+ flush = 0;
+ for (i = 0; i < 2; i++) {
+ data = mdss_mdp_ctl_read(ctl,
+ MDSS_MDP_REG_CTL_LAYER(i));
+ if (data) {
+ mdss_mdp_ctl_write(ctl,
+ MDSS_MDP_REG_CTL_LAYER(i),
+ MDSS_MDP_LM_BORDER_COLOR);
+ flush |= (0x40 << i);
+ }
+ }
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush);
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+ /* wait for 1 VSYNC for the pipe to be unstaged */
+ msleep(20);
+
+ ret = mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_CONT_SPLASH_FINISH, NULL,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+ }
+
+ return ret;
+}
+
+static void mdss_mdp_disable_prefill(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+ struct mdss_data_type *mdata = ctl->mdata;
+
+ if ((pinfo->prg_fet + pinfo->lcdc.v_back_porch +
+ pinfo->lcdc.v_pulse_width) > mdata->min_prefill_lines) {
+ ctl->disable_prefill = true;
+ pr_debug("disable prefill vbp:%d vpw:%d prg_fet:%d\n",
+ pinfo->lcdc.v_back_porch, pinfo->lcdc.v_pulse_width,
+ pinfo->prg_fet);
+ }
+}
+
+static void mdss_mdp_fetch_end_config(struct mdss_mdp_video_ctx *ctx,
+ struct mdss_mdp_ctl *ctl)
+{
+ int fetch_stop, h_total;
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+ u32 lines_before_active = ctl->mdata->lines_before_active ? : 2;
+ u32 vblank_lines = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+ u32 vblank_end_enable;
+
+ if (vblank_lines <= lines_before_active) {
+ pr_debug("cannot support fetch end vblank:%d lines:%d\n",
+ vblank_lines, lines_before_active);
+ return;
+ }
+
+ /* Fetch should always be stopped before the active start */
+ h_total = mdss_panel_get_htotal(pinfo, true);
+ fetch_stop = (vblank_lines - lines_before_active) * h_total;
+
+ vblank_end_enable = mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG);
+ vblank_end_enable |= BIT(22);
+
+ pr_debug("ctl:%d fetch_stop:%d lines:%d\n",
+ ctl->num, fetch_stop, lines_before_active);
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_VBLANK_END_CONF, fetch_stop);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_CONFIG, vblank_end_enable);
+ MDSS_XLOG(ctx->intf_num, fetch_stop, vblank_end_enable);
+}
+
+static void mdss_mdp_fetch_start_config(struct mdss_mdp_video_ctx *ctx,
+ struct mdss_mdp_ctl *ctl)
+{
+ int fetch_start, fetch_enable, v_total, h_total;
+ struct mdss_data_type *mdata;
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+
+ mdata = ctl->mdata;
+
+ pinfo->prg_fet = mdss_mdp_get_prefetch_lines(pinfo);
+ if (!pinfo->prg_fet) {
+ pr_debug("programmable fetch is not needed/supported\n");
+ return;
+ }
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+ v_total = mdss_panel_get_vtotal(pinfo);
+ h_total = mdss_panel_get_htotal(pinfo, true);
+
+ fetch_start = (v_total - pinfo->prg_fet) * h_total + 1;
+ fetch_enable = BIT(31);
+
+ if (pinfo->dynamic_fps && (pinfo->dfps_update ==
+ DFPS_IMMEDIATE_CLK_UPDATE_MODE))
+ fetch_enable |= BIT(23);
+
+ pr_debug("ctl:%d fetch_start:%d lines:%d\n",
+ ctl->num, fetch_start, pinfo->prg_fet);
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_PROG_FETCH_START, fetch_start);
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_CONFIG, fetch_enable);
+ MDSS_XLOG(ctx->intf_num, fetch_enable, fetch_start);
+}
+
+static inline bool mdss_mdp_video_need_pixel_drop(u32 vic)
+{
+ return vic == HDMI_VFRMT_4096x2160p50_256_135 ||
+ vic == HDMI_VFRMT_4096x2160p60_256_135;
+}
+
+static int mdss_mdp_video_cdm_setup(struct mdss_mdp_cdm *cdm,
+ struct mdss_panel_info *pinfo, struct mdss_mdp_format_params *fmt)
+{
+ struct mdp_cdm_cfg setup;
+
+ if (fmt->is_yuv)
+ setup.csc_type = MDSS_MDP_CSC_RGB2YUV_601FR;
+ else
+ setup.csc_type = MDSS_MDP_CSC_RGB2RGB;
+
+ switch (fmt->chroma_sample) {
+ case MDSS_MDP_CHROMA_RGB:
+ setup.horz_downsampling_type = MDP_CDM_CDWN_DISABLE;
+ setup.vert_downsampling_type = MDP_CDM_CDWN_DISABLE;
+ break;
+ case MDSS_MDP_CHROMA_H2V1:
+ setup.horz_downsampling_type = MDP_CDM_CDWN_COSITE;
+ setup.vert_downsampling_type = MDP_CDM_CDWN_DISABLE;
+ break;
+ case MDSS_MDP_CHROMA_420:
+ if (mdss_mdp_video_need_pixel_drop(pinfo->vic)) {
+ setup.horz_downsampling_type = MDP_CDM_CDWN_PIXEL_DROP;
+ setup.vert_downsampling_type = MDP_CDM_CDWN_PIXEL_DROP;
+ } else {
+ setup.horz_downsampling_type = MDP_CDM_CDWN_COSITE;
+ setup.vert_downsampling_type = MDP_CDM_CDWN_OFFSITE;
+ }
+ break;
+ case MDSS_MDP_CHROMA_H1V2:
+ default:
+ pr_err("%s: unsupported chroma sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ setup.out_format = pinfo->out_format;
+ setup.mdp_csc_bit_depth = MDP_CDM_CSC_8BIT;
+ setup.output_width = pinfo->xres + pinfo->lcdc.xres_pad;
+ setup.output_height = pinfo->yres + pinfo->lcdc.yres_pad;
+ return mdss_mdp_cdm_setup(cdm, &setup);
+}
+
+static void mdss_mdp_handoff_programmable_fetch(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_video_ctx *ctx)
+{
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+ u32 fetch_start_handoff, v_total_handoff, h_total_handoff;
+
+ pinfo->prg_fet = 0;
+ if (mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG) & BIT(31)) {
+ fetch_start_handoff = mdp_video_read(ctx,
+ MDSS_MDP_REG_INTF_PROG_FETCH_START);
+ h_total_handoff = mdp_video_read(ctx,
+ MDSS_MDP_REG_INTF_HSYNC_CTL) >> 16;
+ v_total_handoff = mdp_video_read(ctx,
+ MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0)/h_total_handoff;
+ if (h_total_handoff)
+ pinfo->prg_fet = v_total_handoff -
+ ((fetch_start_handoff - 1)/h_total_handoff);
+ pr_debug("programmable fetch lines %d start:%d\n",
+ pinfo->prg_fet, fetch_start_handoff);
+ MDSS_XLOG(pinfo->prg_fet, fetch_start_handoff,
+ h_total_handoff, v_total_handoff);
+ }
+}
+
+static int mdss_mdp_video_ctx_setup(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_video_ctx *ctx, struct mdss_panel_info *pinfo)
+{
+ struct intf_timing_params *itp = &ctx->itp;
+ u32 dst_bpp;
+ struct mdss_mdp_format_params *fmt;
+ struct mdss_data_type *mdata = ctl->mdata;
+ struct dsc_desc *dsc = NULL;
+
+ ctx->ctl = ctl;
+ ctx->intf_type = ctl->intf_type;
+ init_completion(&ctx->vsync_comp);
+ spin_lock_init(&ctx->vsync_lock);
+ spin_lock_init(&ctx->dfps_lock);
+ mutex_init(&ctx->vsync_mtx);
+ atomic_set(&ctx->vsync_ref, 0);
+ spin_lock_init(&ctx->lineptr_lock);
+ spin_lock_init(&ctx->mdss_mdp_video_lock);
+ spin_lock_init(&ctx->mdss_mdp_intf_intr_lock);
+ mutex_init(&ctx->lineptr_mtx);
+ atomic_set(&ctx->lineptr_ref, 0);
+ INIT_WORK(&ctl->recover_work, recover_underrun_work);
+
+ if (ctl->intf_type == MDSS_INTF_DSI) {
+ ctx->intf_recovery.fxn = mdss_mdp_video_intf_recovery;
+ ctx->intf_recovery.data = ctl;
+ if (mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+ (void *)&ctx->intf_recovery,
+ CTL_INTF_EVENT_FLAG_DEFAULT)) {
+ pr_err("Failed to register intf recovery handler\n");
+ return -EINVAL;
+ }
+ } else {
+ ctx->intf_recovery.fxn = NULL;
+ ctx->intf_recovery.data = NULL;
+ }
+
+ if (mdss_mdp_is_cdm_supported(mdata, ctl->intf_type, 0)) {
+
+ fmt = mdss_mdp_get_format_params(pinfo->out_format);
+ if (!fmt) {
+ pr_err("%s: format %d not supported\n", __func__,
+ pinfo->out_format);
+ return -EINVAL;
+ }
+ if (fmt->is_yuv) {
+ ctl->cdm =
+ mdss_mdp_cdm_init(ctl, MDP_CDM_CDWN_OUTPUT_HDMI);
+ if (!IS_ERR_OR_NULL(ctl->cdm)) {
+ if (mdss_mdp_video_cdm_setup(ctl->cdm,
+ pinfo, fmt)) {
+ pr_err("%s: setting up cdm failed\n",
+ __func__);
+ return -EINVAL;
+ }
+ ctl->flush_bits |= BIT(26);
+ } else {
+ pr_err("%s: failed to initialize cdm\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else {
+ pr_debug("%s: Format is not YUV,cdm not required\n",
+ __func__);
+ }
+ } else {
+ pr_debug("%s: cdm not supported\n", __func__);
+ }
+
+ if (pinfo->compression_mode == COMPRESSION_DSC)
+ dsc = &pinfo->dsc;
+
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+ ctx->intf_num, mdss_mdp_video_vsync_intr_done,
+ ctl);
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+ ctx->intf_num,
+ mdss_mdp_video_underrun_intr_done, ctl);
+ mdss_mdp_set_intf_intr_callback(ctx, MDSS_MDP_INTF_IRQ_PROG_LINE,
+ mdss_mdp_video_lineptr_intr_done, ctl);
+
+ dst_bpp = pinfo->fbc.enabled ? (pinfo->fbc.target_bpp) : (pinfo->bpp);
+
+ memset(itp, 0, sizeof(struct intf_timing_params));
+ itp->width = mult_frac((pinfo->xres + pinfo->lcdc.border_left +
+ pinfo->lcdc.border_right), dst_bpp, pinfo->bpp);
+ itp->height = pinfo->yres + pinfo->lcdc.border_top +
+ pinfo->lcdc.border_bottom;
+ itp->border_clr = pinfo->lcdc.border_clr;
+ itp->underflow_clr = pinfo->lcdc.underflow_clr;
+ itp->hsync_skew = pinfo->lcdc.hsync_skew;
+
+ /* tg active area is not work, hence yres should equal to height */
+ itp->xres = mult_frac((pinfo->xres + pinfo->lcdc.border_left +
+ pinfo->lcdc.border_right), dst_bpp, pinfo->bpp);
+
+ itp->yres = pinfo->yres + pinfo->lcdc.border_top +
+ pinfo->lcdc.border_bottom;
+
+ if (dsc) { /* compressed */
+ itp->width = dsc->pclk_per_line;
+ itp->xres = dsc->pclk_per_line;
+ }
+
+ itp->h_back_porch = pinfo->lcdc.h_back_porch;
+ itp->h_front_porch = pinfo->lcdc.h_front_porch;
+ itp->v_back_porch = pinfo->lcdc.v_back_porch;
+ itp->v_front_porch = pinfo->lcdc.v_front_porch;
+ itp->hsync_pulse_width = pinfo->lcdc.h_pulse_width;
+ itp->vsync_pulse_width = pinfo->lcdc.v_pulse_width;
+ /*
+ * In case of YUV420 output, MDP outputs data at half the rate. So
+ * reduce all horizontal parameters by half
+ */
+ if (ctl->cdm && pinfo->out_format == MDP_Y_CBCR_H2V2) {
+ itp->width >>= 1;
+ itp->hsync_skew >>= 1;
+ itp->xres >>= 1;
+ itp->h_back_porch >>= 1;
+ itp->h_front_porch >>= 1;
+ itp->hsync_pulse_width >>= 1;
+ }
+ if (!ctl->panel_data->panel_info.cont_splash_enabled) {
+ if (mdss_mdp_video_timegen_setup(ctl, itp, ctx)) {
+ pr_err("unable to set timing parameters intfs: %d\n",
+ ctx->intf_num);
+ return -EINVAL;
+ }
+ mdss_mdp_fetch_start_config(ctx, ctl);
+
+ if (test_bit(MDSS_QOS_VBLANK_PANIC_CTRL, mdata->mdss_qos_map))
+ mdss_mdp_fetch_end_config(ctx, ctl);
+
+ } else {
+ mdss_mdp_handoff_programmable_fetch(ctl, ctx);
+ }
+
+ mdss_mdp_disable_prefill(ctl);
+
+ mdp_video_write(ctx, MDSS_MDP_REG_INTF_PANEL_FORMAT, ctl->dst_format);
+
+ return 0;
+}
+
+static int mdss_mdp_video_intfs_setup(struct mdss_mdp_ctl *ctl,
+ struct mdss_panel_data *pdata, int inum)
+{
+ struct mdss_data_type *mdata;
+ struct mdss_panel_info *pinfo;
+ struct mdss_mdp_video_ctx *ctx;
+ int ret = 0;
+
+ if (pdata == NULL)
+ return 0;
+
+ mdata = ctl->mdata;
+ pinfo = &pdata->panel_info;
+
+ if (inum < mdata->nintf) {
+ ctx = ((struct mdss_mdp_video_ctx *) mdata->video_intf) + inum;
+ if (ctx->ref_cnt) {
+ pr_err("Intf %d already in use\n",
+ (inum + MDSS_MDP_INTF0));
+ return -EBUSY;
+ }
+ pr_debug("video Intf #%d base=%pK", ctx->intf_num, ctx->base);
+ ctx->ref_cnt++;
+ } else {
+ pr_err("Invalid intf number: %d\n", (inum + MDSS_MDP_INTF0));
+ return -EINVAL;
+ }
+
+ ctl->intf_ctx[MASTER_CTX] = ctx;
+ ret = mdss_mdp_video_ctx_setup(ctl, ctx, pinfo);
+ if (ret) {
+ pr_err("Video context setup failed for interface: %d\n",
+ ctx->intf_num);
+ ctx->ref_cnt--;
+ return -EPERM;
+ }
+
+ /* Initialize early wakeup for the master ctx */
+ INIT_WORK(&ctx->early_wakeup_dfps_work, early_wakeup_dfps_update_work);
+
+ if (is_pingpong_split(ctl->mfd)) {
+ if ((inum + 1) >= mdata->nintf) {
+ pr_err("Intf not available for ping pong split: (%d)\n",
+ (inum + 1 + MDSS_MDP_INTF0));
+ return -EINVAL;
+ }
+
+ ctx = ((struct mdss_mdp_video_ctx *) mdata->video_intf) +
+ inum + 1;
+ if (ctx->ref_cnt) {
+ pr_err("Intf %d already in use\n",
+ (inum + MDSS_MDP_INTF0));
+ return -EBUSY;
+ }
+ pr_debug("video Intf #%d base=%pK", ctx->intf_num, ctx->base);
+ ctx->ref_cnt++;
+
+ ctl->intf_ctx[SLAVE_CTX] = ctx;
+ pinfo = &pdata->next->panel_info;
+ ret = mdss_mdp_video_ctx_setup(ctl, ctx, pinfo);
+ if (ret) {
+ pr_err("Video context setup failed for interface: %d\n",
+ ctx->intf_num);
+ ctx->ref_cnt--;
+ return -EPERM;
+ }
+ }
+ return 0;
+}
+
+void mdss_mdp_switch_to_cmd_mode(struct mdss_mdp_ctl *ctl, int prep)
+{
+ struct mdss_mdp_video_ctx *ctx;
+ long int mode = MIPI_CMD_PANEL;
+ u32 frame_rate = 0;
+ int rc;
+
+ pr_debug("start, prep = %d\n", prep);
+
+ if (!prep) {
+ mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_RECONFIG_CMD,
+ (void *) mode, CTL_INTF_EVENT_FLAG_DEFAULT);
+ return;
+ }
+
+ ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+
+ if (!ctx->timegen_en) {
+ pr_err("Time engine not enabled, cannot switch from vid\n");
+ return;
+ }
+
+ /* Start off by sending command to initial cmd mode */
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_DYNAMIC_SWITCH,
+ (void *) mode, CTL_INTF_EVENT_FLAG_DEFAULT);
+ if (rc) {
+ pr_err("intf #%d busy don't turn off, rc=%d\n",
+ ctl->intf_num, rc);
+ return;
+ }
+
+ if (ctx->wait_pending) {
+ /* wait for at least commit to commplete */
+ wait_for_completion_interruptible_timeout(&ctx->vsync_comp,
+ usecs_to_jiffies(VSYNC_TIMEOUT_US));
+ }
+ frame_rate = mdss_panel_get_framerate(&(ctl->panel_data->panel_info),
+ FPS_RESOLUTION_HZ);
+ if (!(frame_rate >= 24 && frame_rate <= 240))
+ frame_rate = 24;
+ frame_rate = ((1000/frame_rate) + 1);
+ /*
+ * In order for panel to switch to cmd mode, we need
+ * to wait for one more video frame to be sent after
+ * issuing the switch command. We do this before
+ * turning off the timeing engine.
+ */
+ msleep(frame_rate);
+ mdss_mdp_turn_off_time_engine(ctl, ctx, frame_rate);
+ mdss_bus_bandwidth_ctrl(false);
+}
+
+static void early_wakeup_dfps_update_work(struct work_struct *work)
+{
+ struct mdss_mdp_video_ctx *ctx =
+ container_of(work, typeof(*ctx), early_wakeup_dfps_work);
+ struct mdss_panel_data *pdata;
+ struct mdss_panel_info *pinfo;
+ struct msm_fb_data_type *mfd;
+ struct mdss_mdp_ctl *ctl;
+ struct dynamic_fps_data data = {0};
+ int ret = 0;
+ int dfps;
+
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+
+ ctl = ctx->ctl;
+
+ if (!ctl || !ctl->panel_data || !ctl->mfd || !ctl->mfd->fbi) {
+ pr_err("%s: invalid ctl\n", __func__);
+ return;
+ }
+
+ pdata = ctl->panel_data;
+ pinfo = &ctl->panel_data->panel_info;
+ mfd = ctl->mfd;
+
+ if (!pinfo->dynamic_fps || !ctl->ops.config_fps_fnc ||
+ !pdata->panel_info.default_fps) {
+ pr_debug("%s: dfps not enabled on this panel\n", __func__);
+ return;
+ }
+
+ /* get the default fps that was cached before any dfps update */
+ dfps = pdata->panel_info.default_fps;
+
+ ATRACE_BEGIN(__func__);
+
+ if (dfps == pinfo->mipi.frame_rate) {
+ pr_debug("%s: FPS is already %d\n",
+ __func__, dfps);
+ goto exit;
+ }
+
+ data.fps = dfps;
+ if (mdss_mdp_dfps_update_params(mfd, pdata, &data))
+ pr_err("failed to set dfps params!\n");
+
+ /* update the HW with the new fps */
+ ATRACE_BEGIN("fps_update_wq");
+ ret = mdss_mdp_ctl_update_fps(ctl);
+ ATRACE_END("fps_update_wq");
+ if (ret)
+ pr_err("early wakeup failed to set %d fps ret=%d\n",
+ dfps, ret);
+
+exit:
+ ATRACE_END(__func__);
+}
+
+static int mdss_mdp_video_early_wake_up(struct mdss_mdp_ctl *ctl)
+{
+ u64 curr_time;
+
+ curr_time = ktime_to_us(ktime_get());
+
+ if ((curr_time - ctl->last_input_time) <
+ INPUT_EVENT_HANDLER_DELAY_USECS)
+ return 0;
+ ctl->last_input_time = curr_time;
+
+ /*
+ * If the idle timer is running when input event happens, the timeout
+ * will be delayed by idle_time again to ensure user space does not get
+ * an idle event when new frames are expected.
+ *
+ * It would be nice to have this logic in mdss_fb.c itself by
+ * implementing a new frame notification event. But input event handler
+ * is called from interrupt context and scheduling a work item adds a
+ * lot of latency rendering the input events useless in preventing the
+ * idle time out.
+ */
+ if ((ctl->mfd->idle_state == MDSS_FB_IDLE_TIMER_RUNNING) ||
+ (ctl->mfd->idle_state == MDSS_FB_IDLE)) {
+ /*
+ * Modify the idle time so that an idle fallback can be
+ * triggered for those cases, where we have no update
+ * despite of a touch event and idle time is 0.
+ */
+ if (!ctl->mfd->idle_time) {
+ ctl->mfd->idle_time = 70;
+ schedule_delayed_work(&ctl->mfd->idle_notify_work,
+ msecs_to_jiffies(200));
+ } else {
+ mod_delayed_work(system_wq, &ctl->mfd->idle_notify_work,
+ msecs_to_jiffies(ctl->mfd->idle_time));
+ }
+ pr_debug("Delayed idle time\n");
+ } else {
+ pr_debug("Nothing to done for this state (%d)\n",
+ ctl->mfd->idle_state);
+ }
+
+ /*
+ * Schedule an fps update, so we can go to default fps before
+ * commit. Early wake up event is called from an interrupt
+ * context, so do this from work queue
+ */
+ if (ctl->panel_data && ctl->panel_data->panel_info.dynamic_fps) {
+ struct mdss_mdp_video_ctx *ctx;
+
+ ctx = ctl->intf_ctx[MASTER_CTX];
+ if (ctx)
+ schedule_work(&ctx->early_wakeup_dfps_work);
+ }
+
+ return 0;
+}
+
+int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl)
+{
+ int intfs_num, ret = 0;
+
+ intfs_num = ctl->intf_num - MDSS_MDP_INTF0;
+ ret = mdss_mdp_video_intfs_setup(ctl, ctl->panel_data, intfs_num);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("unable to set video interface: %d\n", ret);
+ return ret;
+ }
+
+ ctl->ops.stop_fnc = mdss_mdp_video_stop;
+ ctl->ops.display_fnc = mdss_mdp_video_display;
+ ctl->ops.wait_fnc = mdss_mdp_video_wait4comp;
+ ctl->ops.wait_vsync_fnc = mdss_mdp_video_wait4vsync;
+ ctl->ops.read_line_cnt_fnc = mdss_mdp_video_line_count;
+ ctl->ops.add_vsync_handler = mdss_mdp_video_add_vsync_handler;
+ ctl->ops.remove_vsync_handler = mdss_mdp_video_remove_vsync_handler;
+ ctl->ops.config_fps_fnc = mdss_mdp_video_config_fps;
+ ctl->ops.early_wake_up_fnc = mdss_mdp_video_early_wake_up;
+ ctl->ops.update_lineptr = mdss_mdp_video_lineptr_ctrl;
+
+ return 0;
+}
+
+void *mdss_mdp_get_intf_base_addr(struct mdss_data_type *mdata,
+ u32 interface_id)
+{
+ struct mdss_mdp_video_ctx *ctx;
+
+ ctx = ((struct mdss_mdp_video_ctx *) mdata->video_intf) + interface_id;
+ return (void *)(ctx->base);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
new file mode 100644
index 0000000..46f25dd
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -0,0 +1,917 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_rotator_internal.h"
+#include "mdss_panel.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_debug.h"
+
+/*
+ * if BWC enabled and format is H1V2 or 420, do not use site C or I.
+ * Hence, set the bits 29:26 in format register, as zero.
+ */
+#define BWC_FMT_MASK 0xC3FFFFFF
+#define MDSS_DEFAULT_OT_SETTING 0x10
+
+enum mdss_mdp_writeback_type {
+ MDSS_MDP_WRITEBACK_TYPE_ROTATOR,
+ MDSS_MDP_WRITEBACK_TYPE_LINE,
+ MDSS_MDP_WRITEBACK_TYPE_WFD,
+};
+
+struct mdss_mdp_writeback_ctx {
+ u32 wb_num;
+ char __iomem *base;
+ u8 ref_cnt;
+ u8 type;
+ struct completion wb_comp;
+ int comp_cnt;
+
+ u32 intr_type;
+ u32 intf_num;
+
+ u32 xin_id;
+ u32 wr_lim;
+ struct mdss_mdp_shared_reg_ctrl clk_ctrl;
+
+ u32 opmode;
+ struct mdss_mdp_format_params *dst_fmt;
+ u16 img_width;
+ u16 img_height;
+ u16 width;
+ u16 height;
+ u16 frame_rate;
+ enum mdss_mdp_csc_type csc_type;
+ struct mdss_rect dst_rect;
+
+ u32 dnsc_factor_w;
+ u32 dnsc_factor_h;
+
+ u8 rot90;
+ u32 bwc_mode;
+ int initialized;
+
+ struct mdss_mdp_plane_sizes dst_planes;
+
+ spinlock_t wb_lock;
+ struct list_head vsync_handlers;
+
+ ktime_t start_time;
+ ktime_t end_time;
+};
+
+static struct mdss_mdp_writeback_ctx wb_ctx_list[MDSS_MDP_MAX_WRITEBACK] = {
+ {
+ .type = MDSS_MDP_WRITEBACK_TYPE_ROTATOR,
+ .intr_type = MDSS_MDP_IRQ_TYPE_WB_ROT_COMP,
+ .intf_num = 0,
+ .xin_id = 3,
+ .clk_ctrl.reg_off = 0x2BC,
+ .clk_ctrl.bit_off = 0x8,
+ },
+ {
+ .type = MDSS_MDP_WRITEBACK_TYPE_ROTATOR,
+ .intr_type = MDSS_MDP_IRQ_TYPE_WB_ROT_COMP,
+ .intf_num = 1,
+ .xin_id = 11,
+ .clk_ctrl.reg_off = 0x2BC,
+ .clk_ctrl.bit_off = 0xC,
+ },
+ {
+ .type = MDSS_MDP_WRITEBACK_TYPE_LINE,
+ .intr_type = MDSS_MDP_IRQ_TYPE_WB_ROT_COMP,
+ .intf_num = 0,
+ .xin_id = 3,
+ .clk_ctrl.reg_off = 0x2BC,
+ .clk_ctrl.bit_off = 0x8,
+ },
+ {
+ .type = MDSS_MDP_WRITEBACK_TYPE_LINE,
+ .intr_type = MDSS_MDP_IRQ_TYPE_WB_ROT_COMP,
+ .intf_num = 1,
+ .xin_id = 11,
+ .clk_ctrl.reg_off = 0x2BC,
+ .clk_ctrl.bit_off = 0xC,
+ },
+ {
+ .type = MDSS_MDP_WRITEBACK_TYPE_WFD,
+ .intr_type = MDSS_MDP_IRQ_TYPE_WB_WFD_COMP,
+ .intf_num = 0,
+ .xin_id = 6,
+ .clk_ctrl.reg_off = 0x2BC,
+ .clk_ctrl.bit_off = 0x10,
+ },
+};
+
+static inline void mdp_wb_write(struct mdss_mdp_writeback_ctx *ctx,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, ctx->base + reg);
+}
+
+static int mdss_mdp_writeback_addr_setup(struct mdss_mdp_writeback_ctx *ctx,
+ const struct mdss_mdp_data *in_data)
+{
+ int ret;
+ struct mdss_mdp_data data;
+
+ if (!in_data)
+ return -EINVAL;
+ data = *in_data;
+
+ pr_debug("wb_num=%d addr=0x%pa\n", ctx->wb_num, &data.p[0].addr);
+
+ ret = mdss_mdp_data_check(&data, &ctx->dst_planes, ctx->dst_fmt);
+ if (ret)
+ return ret;
+
+ mdss_mdp_data_calc_offset(&data, ctx->dst_rect.x, ctx->dst_rect.y,
+ &ctx->dst_planes, ctx->dst_fmt);
+
+ if ((ctx->dst_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR) &&
+ (ctx->dst_fmt->element[0] == C1_B_Cb))
+ swap(data.p[1].addr, data.p[2].addr);
+
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST0_ADDR, data.p[0].addr);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST1_ADDR, data.p[1].addr);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST2_ADDR, data.p[2].addr);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST3_ADDR, data.p[3].addr);
+
+ return 0;
+}
+
+static int mdss_mdp_writeback_cdm_setup(struct mdss_mdp_writeback_ctx *ctx,
+ struct mdss_mdp_cdm *cdm, struct mdss_mdp_format_params *fmt)
+{
+ struct mdp_cdm_cfg setup;
+
+ switch (fmt->chroma_sample) {
+ case MDSS_MDP_CHROMA_RGB:
+ setup.horz_downsampling_type = MDP_CDM_CDWN_DISABLE;
+ setup.vert_downsampling_type = MDP_CDM_CDWN_DISABLE;
+ break;
+ case MDSS_MDP_CHROMA_H2V1:
+ setup.horz_downsampling_type = MDP_CDM_CDWN_COSITE;
+ setup.vert_downsampling_type = MDP_CDM_CDWN_DISABLE;
+ break;
+ case MDSS_MDP_CHROMA_420:
+ setup.horz_downsampling_type = MDP_CDM_CDWN_COSITE;
+ setup.vert_downsampling_type = MDP_CDM_CDWN_OFFSITE;
+ break;
+ case MDSS_MDP_CHROMA_H1V2:
+ default:
+ pr_err("%s: unsupported chroma sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ setup.out_format = fmt->format;
+ setup.mdp_csc_bit_depth = MDP_CDM_CSC_8BIT;
+ setup.output_width = ctx->width;
+ setup.output_height = ctx->height;
+ setup.csc_type = ctx->csc_type;
+ return mdss_mdp_cdm_setup(cdm, &setup);
+}
+
+void mdss_mdp_set_wb_cdp(struct mdss_mdp_writeback_ctx *ctx,
+ struct mdss_mdp_format_params *fmt)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 cdp_settings = 0x0;
+
+ /* Disable CDP for rotator in v1 */
+ if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR &&
+ mdss_has_quirk(mdata, MDSS_QUIRK_ROTCDP))
+ goto exit;
+
+ cdp_settings = MDSS_MDP_CDP_ENABLE;
+
+ if (!mdss_mdp_is_linear_format(fmt))
+ cdp_settings |= MDSS_MDP_CDP_ENABLE_UBWCMETA;
+
+ /* 64-transactions for line mode otherwise we keep 32 */
+ if (ctx->type != MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
+ cdp_settings |= MDSS_MDP_CDP_AHEAD_64;
+
+exit:
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_CDP_CTRL, cdp_settings);
+}
+
+static int mdss_mdp_writeback_format_setup(struct mdss_mdp_writeback_ctx *ctx,
+ u32 format, struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_format_params *fmt;
+ u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+ u32 dnsc_factor, write_config = 0;
+ u32 opmode = ctx->opmode;
+ bool rotation = false;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int rc;
+
+ pr_debug("wb_num=%d format=%d\n", ctx->wb_num, format);
+
+ if (ctx->rot90)
+ rotation = true;
+
+ fmt = mdss_mdp_get_format_params(format);
+ if (!fmt) {
+ pr_err("wb format=%d not supported\n", format);
+ return -EINVAL;
+ }
+
+ mdss_mdp_get_plane_sizes(fmt, ctx->img_width, ctx->img_height,
+ &ctx->dst_planes,
+ ctx->opmode & MDSS_MDP_OP_BWC_EN, rotation);
+
+ ctx->dst_fmt = fmt;
+
+ chroma_samp = fmt->chroma_sample;
+
+ if (ctl->cdm) {
+ rc = mdss_mdp_writeback_cdm_setup(ctx, ctl->cdm, fmt);
+ if (rc) {
+ pr_err("%s: CDM config failed with error %d\n",
+ __func__, rc);
+ return rc;
+ }
+ ctl->flush_bits |= BIT(26);
+ }
+ if (ctx->type != MDSS_MDP_WRITEBACK_TYPE_ROTATOR &&
+ fmt->is_yuv && !ctl->cdm) {
+ mdss_mdp_csc_setup(MDSS_MDP_BLOCK_WB, ctx->wb_num,
+ MDSS_MDP_CSC_RGB2YUV_601L);
+ opmode |= (1 << 8) | /* CSC_EN */
+ (0 << 9) | /* SRC_DATA=RGB */
+ (1 << 10); /* DST_DATA=YCBCR */
+
+ switch (chroma_samp) {
+ case MDSS_MDP_CHROMA_RGB:
+ case MDSS_MDP_CHROMA_420:
+ case MDSS_MDP_CHROMA_H2V1:
+ opmode |= (chroma_samp << 11);
+ break;
+ case MDSS_MDP_CHROMA_H1V2:
+ default:
+ pr_err("unsupported wb chroma samp=%d\n", chroma_samp);
+ return -EINVAL;
+ }
+ }
+
+ dst_format = (chroma_samp << 23) |
+ (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C0_G_Y] << 0);
+
+ dst_format &= BWC_FMT_MASK;
+
+ if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+ dst_format |= BIT(8); /* DSTC3_EN */
+ if (!fmt->alpha_enable)
+ dst_format |= BIT(14); /* DST_ALPHA_X */
+ }
+
+ if (fmt->is_yuv && test_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map))
+ dst_format |= BIT(15);
+
+ if (mdss_has_quirk(mdata, MDSS_QUIRK_FMT_PACK_PATTERN)) {
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 15) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
+ } else {
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
+ }
+
+ dst_format |= (fmt->unpack_align_msb << 18) |
+ (fmt->unpack_tight << 17) |
+ ((fmt->unpack_count - 1) << 12) |
+ ((fmt->bpp - 1) << 9);
+
+ dst_format |= (fmt->unpack_dx_format << 21);
+
+ ystride0 = (ctx->dst_planes.ystride[0]) |
+ (ctx->dst_planes.ystride[1] << 16);
+ ystride1 = (ctx->dst_planes.ystride[2]) |
+ (ctx->dst_planes.ystride[3] << 16);
+ outsize = (ctx->dst_rect.h << 16) | ctx->dst_rect.w;
+
+ if (mdss_mdp_is_ubwc_format(fmt)) {
+ opmode |= BIT(0);
+ dst_format |= BIT(31);
+ if (mdata->highest_bank_bit)
+ write_config |= (mdata->highest_bank_bit << 8);
+ if (fmt->format == MDP_RGB_565_UBWC)
+ write_config |= 0x8;
+ }
+
+ if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR
+ && mdata->has_rot_dwnscale) {
+ dnsc_factor = (ctx->dnsc_factor_h) | (ctx->dnsc_factor_w << 16);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_ROTATOR_PIPE_DOWNSCALER,
+ dnsc_factor);
+ }
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_ALPHA_X_VALUE, 0xFF);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_FORMAT, dst_format);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_OP_MODE, opmode);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_PACK_PATTERN, pattern);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_YSTRIDE0, ystride0);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_YSTRIDE1, ystride1);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_OUT_SIZE, outsize);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_WRITE_CONFIG, write_config);
+
+ /* configure CDP */
+ if (test_bit(MDSS_QOS_CDP, mdata->mdss_qos_map))
+ mdss_mdp_set_wb_cdp(ctx, fmt);
+
+ return 0;
+}
+
+static int mdss_mdp_writeback_prepare_wfd(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ struct mdss_mdp_writeback_ctx *ctx;
+ int ret;
+
+ ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx)
+ return -ENODEV;
+
+ if (ctx->initialized && !ctl->shared_lock) /* already set */
+ return 0;
+
+ pr_debug("wfd setup ctl=%d\n", ctl->num);
+
+ ctx->opmode = 0;
+ ctx->img_width = ctl->width;
+ ctx->img_height = ctl->height;
+ ctx->width = ctl->width;
+ ctx->height = ctl->height;
+ ctx->frame_rate = ctl->frame_rate;
+ ctx->csc_type = ctl->csc_type;
+ ctx->dst_rect.x = 0;
+ ctx->dst_rect.y = 0;
+ ctx->dst_rect.w = ctx->width;
+ ctx->dst_rect.h = ctx->height;
+
+ ret = mdss_mdp_writeback_format_setup(ctx, ctl->dst_format, ctl);
+ if (ret) {
+ pr_err("format setup failed\n");
+ return ret;
+ }
+
+ ctx->initialized = true;
+
+ return 0;
+}
+
+static int mdss_mdp_writeback_prepare_rot(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ struct mdss_mdp_writeback_ctx *ctx;
+ struct mdss_mdp_writeback_arg *wb_args;
+ struct mdss_rot_entry *entry;
+ struct mdp_rotation_item *item;
+ struct mdss_rot_perf *perf;
+ struct mdss_data_type *mdata;
+ u32 format;
+
+ ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx)
+ return -ENODEV;
+ wb_args = (struct mdss_mdp_writeback_arg *) arg;
+ if (!wb_args)
+ return -ENOENT;
+
+ entry = (struct mdss_rot_entry *) wb_args->priv_data;
+ if (!entry) {
+ pr_err("unable to retrieve rot session ctl=%d\n", ctl->num);
+ return -ENODEV;
+ }
+ item = &entry->item;
+ perf = entry->perf;
+ mdata = ctl->mdata;
+ if (!mdata) {
+ pr_err("no mdata attached to ctl=%d", ctl->num);
+ return -ENODEV;
+ }
+ pr_debug("rot setup wb_num=%d\n", ctx->wb_num);
+
+ ctx->opmode = BIT(6); /* ROT EN */
+ if (ctl->mdata->rot_block_size == 128)
+ ctx->opmode |= BIT(4); /* block size 128 */
+
+ ctx->bwc_mode = 0;
+ ctx->opmode |= ctx->bwc_mode;
+
+ ctx->img_width = item->output.width;
+ ctx->img_height = item->output.height;
+ ctx->width = ctx->dst_rect.w = item->dst_rect.w;
+ ctx->height = ctx->dst_rect.h = item->dst_rect.h;
+ ctx->dst_rect.x = item->dst_rect.x;
+ ctx->dst_rect.y = item->dst_rect.y;
+ ctx->frame_rate = perf->config.frame_rate;
+ ctx->dnsc_factor_w = entry->dnsc_factor_w;
+ ctx->dnsc_factor_h = entry->dnsc_factor_h;
+
+ ctx->rot90 = !!(item->flags & MDP_ROTATION_90);
+
+ format = item->output.format;
+
+ if (ctx->rot90)
+ ctx->opmode |= BIT(5); /* ROT 90 */
+
+ return mdss_mdp_writeback_format_setup(ctx, format, ctl);
+}
+
+static int mdss_mdp_wb_add_vsync_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_vsync_handler *handle)
+{
+ struct mdss_mdp_writeback_ctx *ctx;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!handle || !(handle->vsync_handler)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx) {
+ pr_err("invalid ctx for ctl=%d\n", ctl->num);
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ spin_lock_irqsave(&ctx->wb_lock, flags);
+ if (!handle->enabled) {
+ handle->enabled = true;
+ list_add(&handle->list, &ctx->vsync_handlers);
+ }
+ spin_unlock_irqrestore(&ctx->wb_lock, flags);
+exit:
+ return ret;
+}
+
+static int mdss_mdp_wb_remove_vsync_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_vsync_handler *handle)
+{
+ struct mdss_mdp_writeback_ctx *ctx;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!handle || !(handle->vsync_handler)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx) {
+ pr_err("invalid ctx for ctl=%d\n", ctl->num);
+ ret = -ENODEV;
+ goto exit;
+ }
+ spin_lock_irqsave(&ctx->wb_lock, flags);
+ if (handle->enabled) {
+ handle->enabled = false;
+ list_del_init(&handle->list);
+ }
+ spin_unlock_irqrestore(&ctx->wb_lock, flags);
+exit:
+ return ret;
+}
+
+static int mdss_mdp_writeback_stop(struct mdss_mdp_ctl *ctl,
+ int panel_power_state)
+{
+ struct mdss_mdp_writeback_ctx *ctx;
+ struct mdss_mdp_vsync_handler *t, *handle;
+
+ pr_debug("stop ctl=%d\n", ctl->num);
+
+ ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+ if (ctx) {
+ list_for_each_entry_safe(handle, t, &ctx->vsync_handlers, list)
+ mdss_mdp_wb_remove_vsync_handler(ctl, handle);
+
+ mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+ NULL, NULL);
+
+ complete_all(&ctx->wb_comp);
+
+ ctl->priv_data = NULL;
+ ctx->ref_cnt--;
+ }
+
+ if (ctl->cdm) {
+ mdss_mdp_cdm_destroy(ctl->cdm);
+ ctl->cdm = NULL;
+ }
+ return 0;
+}
+
+static void mdss_mdp_writeback_intr_done(void *arg)
+{
+ struct mdss_mdp_ctl *ctl = arg;
+ struct mdss_mdp_writeback_ctx *ctx = ctl->priv_data;
+ struct mdss_mdp_vsync_handler *tmp;
+ ktime_t vsync_time;
+
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return;
+ }
+ vsync_time = ktime_get();
+
+ pr_debug("intr wb_num=%d\n", ctx->wb_num);
+
+ mdss_mdp_irq_disable_nosync(ctx->intr_type, ctx->intf_num);
+
+ spin_lock(&ctx->wb_lock);
+ list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
+ tmp->vsync_handler(ctl, vsync_time);
+ }
+ spin_unlock(&ctx->wb_lock);
+
+ complete_all(&ctx->wb_comp);
+ MDSS_XLOG(ctx->wb_num, ctx->type, ctx->xin_id, ctx->intf_num);
+}
+
+static bool mdss_mdp_traffic_shaper_helper(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_writeback_ctx *ctx,
+ bool enable)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool traffic_shaper_enabled = false;
+ struct mdss_mdp_mixer *mixer = ctl->mixer_left;
+ int i;
+ u32 clk_rate;
+ u64 bw_rate;
+
+ if (!mixer)
+ return traffic_shaper_enabled;
+
+ /* currently only for rotator pipes */
+ if (!mixer->rotator_mode)
+ return traffic_shaper_enabled;
+
+ for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_mdp_perf_params perf;
+ u32 traffic_shaper;
+
+ pipe = mixer->stage_pipe[i];
+
+ memset(&perf, 0, sizeof(perf));
+
+ if (pipe == NULL)
+ continue;
+
+ if (enable) {
+ if (mdss_mdp_perf_calc_pipe(pipe, &perf, &mixer->roi,
+ PERF_CALC_PIPE_SINGLE_LAYER))
+ continue;
+
+ clk_rate = max(mdss_mdp_get_mdp_clk_rate(ctl->mdata),
+ perf.mdp_clk_rate);
+ ctl->traffic_shaper_mdp_clk = clk_rate;
+ bw_rate = perf.bw_overlap;
+
+ /*
+ * Bandwidth vote accounts for both read and write
+ * rotator, divide by 2 to get only the write bandwidth.
+ */
+ do_div(bw_rate, 2);
+
+ /*
+ * Calculating bytes per clock in 4.4 form
+ * allowing up to 1/16 granularity.
+ */
+ do_div(bw_rate,
+ (clk_rate >>
+ MDSS_MDP_REG_TRAFFIC_SHAPER_FIXPOINT_FACTOR));
+
+ traffic_shaper = lower_32_bits(bw_rate) + 1;
+ traffic_shaper |= MDSS_MDP_REG_TRAFFIC_SHAPER_EN;
+ traffic_shaper_enabled = true;
+
+ pr_debug("pnum=%d inum:%d bw=%lld clk_rate=%u shaper=0x%x ena:%d\n",
+ pipe->num, ctx->intf_num, perf.bw_overlap,
+ clk_rate, traffic_shaper, enable);
+
+ } else {
+ traffic_shaper = 0;
+
+ pr_debug("inum:%d shaper=0x%x, ena:%d\n",
+ ctx->intf_num, traffic_shaper, enable);
+ }
+
+ writel_relaxed(traffic_shaper, mdata->mdp_base +
+ MDSS_MDP_REG_TRAFFIC_SHAPER_WR_CLIENT(ctx->intf_num));
+ }
+
+ return traffic_shaper_enabled;
+}
+
+static void mdss_mdp_traffic_shaper(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_writeback_ctx *ctx, bool enable)
+{
+ bool traffic_shaper_enabled = 0;
+
+ if (mdss_mdp_ctl_is_power_on(ctl)) {
+ traffic_shaper_enabled = mdss_mdp_traffic_shaper_helper
+ (ctl, ctx, enable);
+ }
+
+ ctl->traffic_shaper_enabled = traffic_shaper_enabled;
+
+ pr_debug("traffic shapper ctl:%d ena:%d\n", ctl->num,
+ ctl->traffic_shaper_enabled);
+}
+
+static int mdss_mdp_wb_wait4comp(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ struct mdss_mdp_writeback_ctx *ctx;
+ int rc = 0;
+ u64 rot_time;
+ u32 status, mask, isr;
+
+ ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ if (ctx->comp_cnt == 0)
+ return rc;
+
+ rc = wait_for_completion_timeout(&ctx->wb_comp,
+ KOFF_TIMEOUT);
+ mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+ NULL, NULL);
+
+ if (rc == 0) {
+ mask = BIT(ctx->intr_type + ctx->intf_num);
+
+ isr = readl_relaxed(ctl->mdata->mdp_base +
+ MDSS_MDP_REG_INTR_STATUS);
+ status = mask & isr;
+
+ pr_info_once("mask: 0x%x, isr: 0x%x, status: 0x%x\n",
+ mask, isr, status);
+
+ if (status) {
+ pr_warn_once("wb done but irq not triggered\n");
+ mdss_mdp_irq_clear(ctl->mdata,
+ ctx->intr_type,
+ ctx->intf_num);
+
+ mdss_mdp_writeback_intr_done(ctl);
+ rc = 0;
+ } else {
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_TIMEOUT);
+ rc = -ENODEV;
+ WARN(1, "writeback kickoff timed out (%d) ctl=%d\n",
+ rc, ctl->num);
+ }
+ } else {
+ rc = 0;
+ }
+
+ if (rc == 0) {
+ ctx->end_time = ktime_get();
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_DONE);
+ }
+
+ /* once operation is done, disable traffic shaper */
+ if (ctl->traffic_shaper_enabled)
+ mdss_mdp_traffic_shaper(ctl, ctx, false);
+
+ mdss_iommu_ctrl(0);
+ mdss_bus_bandwidth_ctrl(false);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ /* Set flag to release Controller Bandwidth */
+ ctl->perf_release_ctl_bw = true;
+
+ ctx->comp_cnt--;
+
+ if (!rc) {
+ rot_time = (u64)ktime_to_us(ctx->end_time) -
+ (u64)ktime_to_us(ctx->start_time);
+ pr_debug("ctx%d type:%d xin_id:%d intf_num:%d took %llu microsecs\n",
+ ctx->wb_num, ctx->type, ctx->xin_id,
+ ctx->intf_num, rot_time);
+ }
+
+ return rc;
+}
+
+static void mdss_mdp_set_ot_limit_wb(struct mdss_mdp_writeback_ctx *ctx)
+{
+ struct mdss_mdp_set_ot_params ot_params;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ ot_params.xin_id = ctx->xin_id;
+ ot_params.num = ctx->wb_num;
+ ot_params.width = ctx->width;
+ ot_params.height = ctx->height;
+ ot_params.frame_rate = ctx->frame_rate;
+ ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
+ ot_params.reg_off_mdp_clk_ctrl = ctx->clk_ctrl.reg_off;
+ ot_params.bit_off_mdp_clk_ctrl = ctx->clk_ctrl.bit_off;
+ ot_params.is_rot = (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR);
+ ot_params.is_wb = true;
+ ot_params.is_yuv = ctx->dst_fmt->is_yuv;
+ ot_params.is_vbif_nrt = mdss_mdp_is_nrt_vbif_base_defined(mdata);
+
+ mdss_mdp_set_ot_limit(&ot_params);
+
+}
+
+static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ struct mdss_mdp_writeback_ctx *ctx;
+ struct mdss_mdp_writeback_arg *wb_args;
+ u32 flush_bits = 0;
+ int ret;
+
+ if (!ctl || !ctl->mdata)
+ return -ENODEV;
+
+ ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx)
+ return -ENODEV;
+
+ if (ctx->comp_cnt) {
+ pr_err("previous kickoff not completed yet, ctl=%d\n",
+ ctl->num);
+ return -EPERM;
+ }
+
+ if (ctl->mdata->default_ot_wr_limit ||
+ ctl->mdata->default_ot_rd_limit)
+ mdss_mdp_set_ot_limit_wb(ctx);
+
+ wb_args = (struct mdss_mdp_writeback_arg *) arg;
+ if (!wb_args)
+ return -ENOENT;
+
+ if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR
+ && ctl->mdata->traffic_shaper_en)
+ mdss_mdp_traffic_shaper(ctl, ctx, true);
+
+ ret = mdss_mdp_writeback_addr_setup(ctx, wb_args->data);
+ if (ret) {
+ pr_err("writeback data setup error ctl=%d\n", ctl->num);
+ return ret;
+ }
+
+ mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+ mdss_mdp_writeback_intr_done, ctl);
+
+ flush_bits |= ctl->flush_reg_data;
+ flush_bits |= BIT(16); /* WB */
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS, ctl->is_secure);
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+ MDSS_XLOG(ctl->intf_num, flush_bits);
+
+ reinit_completion(&ctx->wb_comp);
+ mdss_mdp_irq_enable(ctx->intr_type, ctx->intf_num);
+
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("IOMMU attach failed\n");
+ return ret;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mdss_bus_bandwidth_ctrl(true);
+ ctx->start_time = ktime_get();
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
+ /* make sure MDP writeback is enabled */
+ wmb();
+
+ MDSS_XLOG(ctx->wb_num, ctx->type, ctx->xin_id, ctx->intf_num,
+ ctx->dst_rect.w, ctx->dst_rect.h);
+ pr_debug("ctx%d type:%d xin_id:%d intf_num:%d start\n",
+ ctx->wb_num, ctx->type, ctx->xin_id, ctx->intf_num);
+
+ ctx->comp_cnt++;
+
+ return 0;
+}
+
+int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_writeback_ctx *ctx;
+ struct mdss_mdp_writeback *wb;
+ u32 mem_sel;
+ u32 mixer_type = MDSS_MDP_MIXER_TYPE_UNUSED;
+ struct mdss_mdp_format_params *fmt = NULL;
+ bool is_rot;
+
+ pr_debug("start ctl=%d\n", ctl->num);
+
+ if (!ctl->wb) {
+ pr_debug("wb not setup in the ctl\n");
+ return 0;
+ }
+
+ wb = ctl->wb;
+ mem_sel = (ctl->opmode & 0xF) - 1;
+ if (mem_sel < MDSS_MDP_MAX_WRITEBACK) {
+ ctx = &wb_ctx_list[mem_sel];
+ if (ctx->ref_cnt) {
+ pr_err("writeback in use %d\n", mem_sel);
+ return -EBUSY;
+ }
+ ctx->ref_cnt++;
+ } else {
+ pr_err("invalid writeback mode %d\n", mem_sel);
+ return -EINVAL;
+ }
+
+ fmt = mdss_mdp_get_format_params(ctl->dst_format);
+ if (!fmt)
+ return -EINVAL;
+
+ is_rot = (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR) ? true : false;
+
+ if (ctl->mixer_left) {
+ mixer_type = ctl->mixer_left->type;
+ /*
+ * If the WB mixer is dedicated, the rotator uses a virtual
+ * mixer. Mark the mixer_type as UNUSED in such cases.
+ */
+ if ((mixer_type == MDSS_MDP_MIXER_TYPE_WRITEBACK) && is_rot)
+ mixer_type = MDSS_MDP_MIXER_TYPE_UNUSED;
+ }
+
+ if (mdss_mdp_is_cdm_supported(ctl->mdata, ctl->intf_type,
+ mixer_type) && fmt->is_yuv) {
+ ctl->cdm = mdss_mdp_cdm_init(ctl, MDP_CDM_CDWN_OUTPUT_WB);
+ if (IS_ERR_OR_NULL(ctl->cdm)) {
+ pr_err("cdm block already in use\n");
+ ctl->cdm = NULL;
+ return -EBUSY;
+ }
+ }
+ ctl->priv_data = ctx;
+ ctx->wb_num = wb->num;
+ ctx->base = wb->base;
+ ctx->initialized = false;
+ init_completion(&ctx->wb_comp);
+ spin_lock_init(&ctx->wb_lock);
+ INIT_LIST_HEAD(&ctx->vsync_handlers);
+
+ if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
+ ctl->ops.prepare_fnc = mdss_mdp_writeback_prepare_rot;
+ else { /* wfd or line mode */
+ ctl->ops.prepare_fnc = mdss_mdp_writeback_prepare_wfd;
+
+ /* WB2 Intr Enable is BIT(2) in MDSS 1.8.0 */
+ if (ctl->mdata->mdp_rev == MDSS_MDP_HW_REV_108) {
+ ctx->intr_type = MDSS_MDP_IRQ_TYPE_WB_ROT_COMP;
+ ctx->intf_num = 2;
+ }
+ }
+ ctl->ops.stop_fnc = mdss_mdp_writeback_stop;
+ ctl->ops.display_fnc = mdss_mdp_writeback_display;
+ ctl->ops.wait_fnc = mdss_mdp_wb_wait4comp;
+ ctl->ops.add_vsync_handler = mdss_mdp_wb_add_vsync_handler;
+ ctl->ops.remove_vsync_handler = mdss_mdp_wb_remove_vsync_handler;
+
+ return 0;
+}
+
+int mdss_mdp_writeback_display_commit(struct mdss_mdp_ctl *ctl, void *arg)
+{
+ if (ctl->shared_lock && !mutex_is_locked(ctl->shared_lock)) {
+ pr_err("shared mutex is not locked before commit on ctl=%d\n",
+ ctl->num);
+ return -EINVAL;
+ }
+
+ if (ctl->mdata->mixer_switched) {
+ if (ctl->mixer_left)
+ ctl->mixer_left->params_changed++;
+ if (ctl->mixer_right)
+ ctl->mixer_right->params_changed++;
+ }
+
+ return mdss_mdp_display_commit(ctl, arg, NULL);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
new file mode 100644
index 0000000..b1c8041
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -0,0 +1,2369 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/msm_mdp.h>
+#include <linux/memblock.h>
+#include <linux/file.h>
+
+#include <soc/qcom/event_timer.h>
+#include "mdss.h"
+#include "mdss_debug.h"
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_wfd.h"
+#include "mdss_sync.h"
+
+#define CHECK_LAYER_BOUNDS(offset, size, max_size) \
+ (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+
+#define SCALER_ENABLED \
+ (MDP_LAYER_ENABLE_PIXEL_EXT | MDP_LAYER_ENABLE_QSEED3_SCALE)
+
+enum {
+ MDSS_MDP_RELEASE_FENCE = 0,
+ MDSS_MDP_RETIRE_FENCE,
+};
+
+enum layer_pipe_q {
+ LAYER_USES_NEW_PIPE_Q = 0,
+ LAYER_USES_USED_PIPE_Q,
+ LAYER_USES_DESTROY_PIPE_Q,
+};
+
+enum layer_zorder_used {
+ LAYER_ZORDER_NONE = 0,
+ LAYER_ZORDER_LEFT = 1,
+ LAYER_ZORDER_RIGHT = 2,
+ LAYER_ZORDER_BOTH = 3,
+};
+
+struct mdss_mdp_validate_info_t {
+ struct mdp_input_layer *layer;
+ struct mdss_mdp_pipe_multirect_params multirect;
+};
+
+/*
+ * __layer_needs_src_split() - check needs source split configuration
+ * @layer: input layer
+ *
+ * return true if the layer should be used as source split
+ */
+static bool __layer_needs_src_split(struct mdp_input_layer *layer)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ return (layer->flags & MDP_LAYER_ASYNC) ||
+ mdss_has_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
+}
+
+static int __async_update_position_check(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_pipe *pipe, struct mdp_point *src,
+ struct mdp_point *dst)
+{
+ struct fb_var_screeninfo *var = &mfd->fbi->var;
+ u32 xres = var->xres;
+ u32 yres = var->yres;
+
+ if (!pipe->async_update
+ || CHECK_LAYER_BOUNDS(src->x, pipe->src.w, pipe->img_width)
+ || CHECK_LAYER_BOUNDS(src->y, pipe->src.h, pipe->img_height)
+ || CHECK_LAYER_BOUNDS(dst->x, pipe->dst.w, xres)
+ || CHECK_LAYER_BOUNDS(dst->y, pipe->dst.h, yres)) {
+ pr_err("invalid configs: async_update=%d, src:{%d,%d}, dst:{%d,%d}\n",
+ pipe->async_update, src->x, src->y, dst->x, dst->y);
+ pr_err("pipe:- src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
+ pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+ pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __cursor_layer_check(struct msm_fb_data_type *mfd,
+ struct mdp_input_layer *layer)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if ((layer->z_order != HW_CURSOR_STAGE(mdata))
+ || layer->src_rect.w > mdata->max_cursor_size
+ || layer->src_rect.h > mdata->max_cursor_size
+ || layer->src_rect.w != layer->dst_rect.w
+ || layer->src_rect.h != layer->dst_rect.h
+ || !mdata->ncursor_pipes) {
+ pr_err("Incorrect cursor configs for pipe:%d, cursor_pipes:%d, z_order:%d\n",
+ layer->pipe_ndx, mdata->ncursor_pipes,
+ layer->z_order);
+ pr_err("src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
+ layer->src_rect.x, layer->src_rect.y,
+ layer->src_rect.w, layer->src_rect.h,
+ layer->dst_rect.x, layer->dst_rect.y,
+ layer->dst_rect.w, layer->dst_rect.h);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __layer_xres_check(struct msm_fb_data_type *mfd,
+ struct mdp_input_layer *layer)
+{
+ u32 xres = 0;
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+
+ if (layer->dst_rect.x >= left_lm_w) {
+ if (mdata->has_src_split)
+ xres = left_lm_w;
+ else
+ layer->dst_rect.x -= left_lm_w;
+
+ if (ctl->mixer_right) {
+ xres += ctl->mixer_right->width;
+ } else {
+ pr_err("ov cannot be placed on right mixer\n");
+ return -EPERM;
+ }
+ } else {
+ if (ctl->mixer_left) {
+ xres = ctl->mixer_left->width;
+ } else {
+ pr_err("ov cannot be placed on left mixer\n");
+ return -EPERM;
+ }
+
+ if (mdata->has_src_split && ctl->mixer_right)
+ xres += ctl->mixer_right->width;
+ }
+
+ if (CHECK_LAYER_BOUNDS(layer->dst_rect.x, layer->dst_rect.w, xres)) {
+ pr_err("dst_xres is invalid. dst_x:%d, dst_w:%d, xres:%d\n",
+ layer->dst_rect.x, layer->dst_rect.w, xres);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __layer_param_check(struct msm_fb_data_type *mfd,
+ struct mdp_input_layer *layer, struct mdss_mdp_format_params *fmt,
+ enum mdss_mdp_pipe_rect rect_num)
+{
+ u32 yres;
+ u32 min_src_size, min_dst_size = 1;
+ int content_secure;
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ u32 src_w, src_h, dst_w, dst_h, width, height;
+
+ if (!ctl) {
+ pr_err("ctl is null\n");
+ return -EINVAL;
+ }
+
+ if (ctl->mixer_left) {
+ yres = ctl->mixer_left->height;
+ } else {
+ pr_debug("Using fb var screen infor for height\n");
+ yres = mfd->fbi->var.yres;
+ }
+
+ content_secure = (layer->flags & MDP_LAYER_SECURE_SESSION);
+ if (!ctl->is_secure && content_secure &&
+ (mfd->panel.type == WRITEBACK_PANEL)) {
+ pr_debug("return due to security concerns\n");
+ return -EPERM;
+ }
+ min_src_size = fmt->is_yuv ? 2 : 1;
+
+ if (layer->z_order >= (mdata->max_target_zorder + MDSS_MDP_STAGE_0)) {
+ pr_err("zorder %d out of range\n", layer->z_order);
+ return -EINVAL;
+ }
+
+ if (!mdss_mdp_pipe_search(mdata, layer->pipe_ndx, rect_num)) {
+ pr_err("layer pipe is invalid: 0x%x rect:%d\n",
+ layer->pipe_ndx, rect_num);
+ return -EINVAL;
+ }
+
+ width = layer->buffer.width;
+ height = layer->buffer.height;
+ if (layer->flags & MDP_LAYER_DEINTERLACE) {
+ width *= 2;
+ height /= 2;
+ }
+
+ if (layer->buffer.width > MAX_IMG_WIDTH ||
+ layer->buffer.height > MAX_IMG_HEIGHT ||
+ layer->src_rect.w < min_src_size ||
+ layer->src_rect.h < min_src_size ||
+ CHECK_LAYER_BOUNDS(layer->src_rect.x, layer->src_rect.w, width) ||
+ CHECK_LAYER_BOUNDS(layer->src_rect.y, layer->src_rect.h, height)) {
+ pr_err("invalid source image img flag=%d wh=%dx%d rect=%d,%d,%d,%d\n",
+ layer->flags, width, height,
+ layer->src_rect.x, layer->src_rect.y,
+ layer->src_rect.w, layer->src_rect.h);
+ return -EINVAL;
+ }
+
+ if (layer->dst_rect.w < min_dst_size ||
+ layer->dst_rect.h < min_dst_size) {
+ pr_err("invalid destination resolution (%dx%d)",
+ layer->dst_rect.w, layer->dst_rect.h);
+ return -EINVAL;
+ }
+
+ if (layer->horz_deci || layer->vert_deci) {
+ if (!mdata->has_decimation) {
+ pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
+ return -EINVAL;
+ } else if ((layer->horz_deci > MAX_DECIMATION) ||
+ (layer->vert_deci > MAX_DECIMATION)) {
+ pr_err("Invalid decimation factors horz=%d vert=%d\n",
+ layer->horz_deci, layer->vert_deci);
+ return -EINVAL;
+ } else if (layer->flags & MDP_LAYER_BWC) {
+ pr_err("Decimation can't be enabled with BWC\n");
+ return -EINVAL;
+ } else if (fmt->fetch_mode != MDSS_MDP_FETCH_LINEAR) {
+ pr_err("Decimation can't be enabled with MacroTile format\n");
+ return -EINVAL;
+ }
+ }
+
+ if (CHECK_LAYER_BOUNDS(layer->dst_rect.y, layer->dst_rect.h, yres)) {
+ pr_err("invalid vertical destination: y=%d, h=%d, yres=%d\n",
+ layer->dst_rect.y, layer->dst_rect.h, yres);
+ return -EOVERFLOW;
+ }
+
+ dst_w = layer->dst_rect.w;
+ dst_h = layer->dst_rect.h;
+
+ src_w = layer->src_rect.w >> layer->horz_deci;
+ src_h = layer->src_rect.h >> layer->vert_deci;
+
+ if (src_w > mdata->max_mixer_width) {
+ pr_err("invalid source width=%d HDec=%d\n",
+ layer->src_rect.w, layer->horz_deci);
+ return -EINVAL;
+ }
+
+ if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
+ pr_err("too much upscaling Width %d->%d\n",
+ layer->src_rect.w, layer->dst_rect.w);
+ return -E2BIG;
+ }
+
+ if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
+ pr_err("too much upscaling. Height %d->%d\n",
+ layer->src_rect.h, layer->dst_rect.h);
+ return -E2BIG;
+ }
+
+ if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
+ pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
+ src_w, layer->dst_rect.w, layer->horz_deci);
+ return -E2BIG;
+ }
+
+ if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
+ pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
+ src_h, layer->dst_rect.h, layer->vert_deci);
+ return -E2BIG;
+ }
+
+ if (layer->flags & MDP_LAYER_BWC) {
+ if ((layer->buffer.width != layer->src_rect.w) ||
+ (layer->buffer.height != layer->src_rect.h)) {
+ pr_err("BWC: mismatch of src img=%dx%d rect=%dx%d\n",
+ layer->buffer.width, layer->buffer.height,
+ layer->src_rect.w, layer->src_rect.h);
+ return -EINVAL;
+ }
+
+ if (layer->horz_deci || layer->vert_deci) {
+ pr_err("Can't enable BWC decode && decimate\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((layer->flags & MDP_LAYER_DEINTERLACE) &&
+ !(layer->flags & SCALER_ENABLED)) {
+ if (layer->flags & MDP_SOURCE_ROTATED_90) {
+ if ((layer->src_rect.w % 4) != 0) {
+ pr_err("interlaced rect not h/4\n");
+ return -EINVAL;
+ }
+ } else if ((layer->src_rect.h % 4) != 0) {
+ pr_err("interlaced rect not h/4\n");
+ return -EINVAL;
+ }
+ }
+
+ if (fmt->is_yuv) {
+ if ((layer->src_rect.x & 0x1) || (layer->src_rect.y & 0x1) ||
+ (layer->src_rect.w & 0x1) || (layer->src_rect.h & 0x1)) {
+ pr_err("invalid odd src resolution or coordinates\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* compare all reconfiguration parameter validation in this API */
+static int __validate_layer_reconfig(struct mdp_input_layer *layer,
+ struct mdss_mdp_pipe *pipe)
+{
+ int status = 0;
+ struct mdss_mdp_format_params *src_fmt;
+
+ /*
+ * csc registers are not double buffered. It is not permitted
+ * to change them on staged pipe with YUV layer.
+ */
+ if (pipe->csc_coeff_set != layer->color_space) {
+ src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
+ if (pipe->src_fmt->is_yuv && src_fmt && src_fmt->is_yuv) {
+ status = -EPERM;
+ pr_err("csc change is not permitted on used pipe\n");
+ }
+ }
+
+ return status;
+}
+
+static int __validate_single_layer(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_validate_info_t *layer_info, u32 mixer_mux)
+{
+ u32 bwc_enabled;
+ int ret;
+ bool is_vig_needed = false;
+ struct mdss_mdp_format_params *fmt;
+ struct mdss_mdp_mixer *mixer = NULL;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdp_input_layer *layer = layer_info->layer;
+ int ptype = get_pipe_type_from_ndx(layer->pipe_ndx);
+
+ if (ptype == MDSS_MDP_PIPE_TYPE_INVALID) {
+ pr_err("Invalid pipe ndx=%d\n", layer->pipe_ndx);
+ return -EINVAL;
+ }
+
+ if ((layer->dst_rect.w > mdata->max_mixer_width) ||
+ (layer->dst_rect.h > MAX_DST_H)) {
+ pr_err("exceeded max mixer supported resolution %dx%d\n",
+ layer->dst_rect.w, layer->dst_rect.h);
+ ret = -EINVAL;
+ goto exit_fail;
+ }
+
+ pr_debug("ctl=%u mux=%d z_order=%d flags=0x%x dst_x:%d\n",
+ mdp5_data->ctl->num, mixer_mux, layer->z_order,
+ layer->flags, layer->dst_rect.x);
+
+ fmt = mdss_mdp_get_format_params(layer->buffer.format);
+ if (!fmt) {
+ pr_err("invalid layer format %d\n", layer->buffer.format);
+ ret = -EINVAL;
+ goto exit_fail;
+ }
+
+ bwc_enabled = layer->flags & MDP_LAYER_BWC;
+
+ if (bwc_enabled) {
+ if (!mdp5_data->mdata->has_bwc) {
+ pr_err("layer uses bwc format but MDP does not support it\n");
+ ret = -EINVAL;
+ goto exit_fail;
+ }
+
+ layer->buffer.format =
+ mdss_mdp_get_rotator_dst_format(
+ layer->buffer.format, false, bwc_enabled);
+ fmt = mdss_mdp_get_format_params(layer->buffer.format);
+ if (!fmt) {
+ pr_err("invalid layer format %d\n",
+ layer->buffer.format);
+ ret = -EINVAL;
+ goto exit_fail;
+ }
+ }
+
+ if (ptype == MDSS_MDP_PIPE_TYPE_CURSOR) {
+ ret = __cursor_layer_check(mfd, layer);
+ if (ret)
+ goto exit_fail;
+ }
+
+ ret = __layer_xres_check(mfd, layer);
+ if (ret)
+ goto exit_fail;
+
+ ret = __layer_param_check(mfd, layer, fmt, layer_info->multirect.num);
+ if (ret)
+ goto exit_fail;
+
+ mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
+ if (!mixer) {
+ pr_err("unable to get %s mixer\n",
+ (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) ?
+ "right" : "left");
+ ret = -EPERM;
+ goto exit_fail;
+ }
+
+ if (fmt->is_yuv || (mdata->has_non_scalar_rgb &&
+ ((layer->src_rect.w != layer->dst_rect.w) ||
+ (layer->src_rect.h != layer->dst_rect.h))))
+ is_vig_needed = true;
+
+ if (is_vig_needed && ptype != MDSS_MDP_PIPE_TYPE_VIG) {
+ pr_err("pipe is non-scalar ndx=%x\n", layer->pipe_ndx);
+ ret = -EINVAL;
+ goto exit_fail;
+ }
+
+ if (((ptype == MDSS_MDP_PIPE_TYPE_DMA) ||
+ (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) &&
+ (layer->dst_rect.h != layer->src_rect.h ||
+ layer->dst_rect.w != layer->src_rect.w)) {
+ pr_err("no scaling supported on dma/cursor pipe, pipe num:%d\n",
+ layer->pipe_ndx);
+ return -EINVAL;
+ }
+
+exit_fail:
+ return ret;
+}
+
+static int __configure_pipe_params(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_validate_info_t *vinfo, struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer,
+ u32 mixer_mux)
+{
+ int ret = 0;
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ u32 flags;
+ bool is_right_blend = false;
+
+ struct mdss_mdp_mixer *mixer = NULL;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdp_input_layer *layer = vinfo->layer;
+
+ mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
+ pipe->src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
+ if (!pipe->src_fmt || !mixer) {
+ pr_err("invalid layer format:%d or mixer:%pK\n",
+ layer->buffer.format, pipe->mixer_left);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ pipe->comp_ratio = layer->buffer.comp_ratio;
+
+ if (mfd->panel_orientation)
+ layer->flags ^= mfd->panel_orientation;
+
+ pipe->mixer_left = mixer;
+ pipe->mfd = mfd;
+ pipe->play_cnt = 0;
+ pipe->flags = 0;
+
+ if (layer->flags & MDP_LAYER_FLIP_LR)
+ pipe->flags = MDP_FLIP_LR;
+ if (layer->flags & MDP_LAYER_FLIP_UD)
+ pipe->flags |= MDP_FLIP_UD;
+ if (layer->flags & MDP_LAYER_SECURE_SESSION)
+ pipe->flags |= MDP_SECURE_OVERLAY_SESSION;
+ if (layer->flags & MDP_LAYER_SECURE_DISPLAY_SESSION)
+ pipe->flags |= MDP_SECURE_DISPLAY_OVERLAY_SESSION;
+ if (layer->flags & MDP_LAYER_SOLID_FILL)
+ pipe->flags |= MDP_SOLID_FILL;
+ if (layer->flags & MDP_LAYER_DEINTERLACE)
+ pipe->flags |= MDP_DEINTERLACE;
+ if (layer->flags & MDP_LAYER_BWC)
+ pipe->flags |= MDP_BWC_EN;
+ if (layer->flags & MDP_LAYER_PP)
+ pipe->flags |= MDP_OVERLAY_PP_CFG_EN;
+
+ pipe->is_fg = layer->flags & MDP_LAYER_FORGROUND;
+ pipe->img_width = layer->buffer.width & 0x3fff;
+ pipe->img_height = layer->buffer.height & 0x3fff;
+ pipe->src.x = layer->src_rect.x;
+ pipe->src.y = layer->src_rect.y;
+ pipe->src.w = layer->src_rect.w;
+ pipe->src.h = layer->src_rect.h;
+ pipe->dst.x = layer->dst_rect.x;
+ pipe->dst.y = layer->dst_rect.y;
+ pipe->dst.w = layer->dst_rect.w;
+ pipe->dst.h = layer->dst_rect.h;
+ pipe->horz_deci = layer->horz_deci;
+ pipe->vert_deci = layer->vert_deci;
+ pipe->bg_color = layer->bg_color;
+ pipe->alpha = layer->alpha;
+ pipe->transp = layer->transp_mask;
+ pipe->blend_op = layer->blend_op;
+ pipe->is_handed_off = false;
+ pipe->async_update = (layer->flags & MDP_LAYER_ASYNC) ? true : false;
+ pipe->csc_coeff_set = layer->color_space;
+
+ if (mixer->ctl) {
+ pipe->dst.x += mixer->ctl->border_x_off;
+ pipe->dst.y += mixer->ctl->border_y_off;
+ pr_debug("border{%d,%d}\n", mixer->ctl->border_x_off,
+ mixer->ctl->border_y_off);
+ }
+ pr_debug("src{%d,%d,%d,%d}, dst{%d,%d,%d,%d}\n",
+ pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+ pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+ if (layer->flags & SCALER_ENABLED)
+ memcpy(&pipe->scaler, layer->scale,
+ sizeof(struct mdp_scale_data_v2));
+
+ pipe->scaler.enable = (layer->flags & SCALER_ENABLED);
+
+ flags = pipe->flags;
+ if (is_single_layer)
+ flags |= PERF_CALC_PIPE_SINGLE_LAYER;
+
+ /*
+ * async update is allowed only in video mode panels with single LM
+ * or dual LM with src_split enabled.
+ */
+ if (pipe->async_update && ((is_split_lm(mfd) && !mdata->has_src_split)
+ || (!mdp5_data->ctl->is_video_mode))) {
+ pr_err("async update allowed only in video mode panel with src_split\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /*
+ * unstage the pipe if it's current z_order does not match with new
+ * z_order because client may only call the validate.
+ */
+ if (pipe->mixer_stage != layer->z_order)
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+
+ /*
+ * check if overlay span across two mixers and if source split is
+ * available. If yes, enable src_split_req flag so that during mixer
+ * staging, same pipe will be stagged on both layer mixers.
+ */
+ if (mdata->has_src_split) {
+ is_right_blend = pipe->is_right_blend;
+ if (left_blend_pipe) {
+ if (pipe->priority <= left_blend_pipe->priority) {
+ pr_err("priority limitation. left:%d right%d\n",
+ left_blend_pipe->priority,
+ pipe->priority);
+ ret = -EPERM;
+ goto end;
+ } else {
+ pr_debug("pipe%d is a right_pipe\n", pipe->num);
+ is_right_blend = true;
+ }
+ } else if (pipe->is_right_blend) {
+ /*
+ * pipe used to be right blend. So need to update mixer
+ * configuration to remove it as a right blend.
+ */
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+ is_right_blend = false;
+ }
+
+ if (is_split_lm(mfd) && __layer_needs_src_split(layer)) {
+ pipe->src_split_req = true;
+ } else if ((mixer_mux == MDSS_MDP_MIXER_MUX_LEFT) &&
+ ((layer->dst_rect.x + layer->dst_rect.w) > mixer->width)) {
+ if (layer->dst_rect.x >= mixer->width) {
+ pr_err("%pS: err dst_x can't lie in right half",
+ __builtin_return_address(0));
+ pr_cont(" flags:0x%x dst x:%d w:%d lm_w:%d\n",
+ layer->flags, layer->dst_rect.x,
+ layer->dst_rect.w, mixer->width);
+ ret = -EINVAL;
+ goto end;
+ } else {
+ pipe->src_split_req = true;
+ }
+ } else {
+ if (pipe->src_split_req) {
+ mdss_mdp_mixer_pipe_unstage(pipe,
+ pipe->mixer_right);
+ pipe->mixer_right = NULL;
+ }
+ pipe->src_split_req = false;
+ }
+ pipe->is_right_blend = is_right_blend;
+ }
+
+ pipe->multirect.mode = vinfo->multirect.mode;
+ pipe->mixer_stage = layer->z_order;
+
+ if (mfd->panel_orientation & MDP_FLIP_LR)
+ pipe->dst.x = pipe->mixer_left->width - pipe->dst.x -
+ pipe->dst.w;
+ if (mfd->panel_orientation & MDP_FLIP_UD)
+ pipe->dst.y = pipe->mixer_left->height - pipe->dst.y -
+ pipe->dst.h;
+
+ memcpy(&pipe->layer, layer, sizeof(struct mdp_input_layer));
+
+ mdss_mdp_overlay_set_chroma_sample(pipe);
+
+ if (pipe->blend_op == BLEND_OP_NOT_DEFINED)
+ pipe->blend_op = pipe->src_fmt->alpha_enable ?
+ BLEND_OP_PREMULTIPLIED : BLEND_OP_OPAQUE;
+
+ if (pipe->src_fmt->is_yuv && !(pipe->flags & MDP_SOURCE_ROTATED_90) &&
+ !pipe->scaler.enable) {
+ pipe->overfetch_disable = OVERFETCH_DISABLE_BOTTOM;
+
+ if (pipe->dst.x >= left_lm_w)
+ pipe->overfetch_disable |= OVERFETCH_DISABLE_RIGHT;
+ pr_debug("overfetch flags=%x\n", pipe->overfetch_disable);
+ } else {
+ pipe->overfetch_disable = 0;
+ }
+
+ /*
+ * When scaling is enabled src crop and image
+ * width and height is modified by user
+ */
+ if ((pipe->flags & MDP_DEINTERLACE) && !pipe->scaler.enable) {
+ if (pipe->flags & MDP_SOURCE_ROTATED_90) {
+ pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
+ pipe->src.x &= ~1;
+ pipe->src.w /= 2;
+ pipe->img_width /= 2;
+ } else {
+ pipe->src.h /= 2;
+ pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
+ pipe->src.y &= ~1;
+ }
+ }
+
+ ret = mdss_mdp_overlay_setup_scaling(pipe);
+ if (ret) {
+ pr_err("scaling setup failed %d\n", ret);
+ goto end;
+ }
+
+ if (layer->flags & MDP_LAYER_PP) {
+ memcpy(&pipe->pp_cfg, layer->pp_info,
+ sizeof(struct mdp_overlay_pp_params));
+ ret = mdss_mdp_pp_sspp_config(pipe);
+ if (ret) {
+ pr_err("pp setup failed %d\n", ret);
+ goto end;
+ }
+ }
+
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
+ goto end;
+
+ ret = mdp_pipe_tune_perf(pipe, flags);
+ if (ret) {
+ pr_err("unable to satisfy performance. ret=%d\n", ret);
+ goto end;
+ }
+
+ ret = mdss_mdp_smp_reserve(pipe);
+ if (ret) {
+ pr_err("mdss_mdp_smp_reserve failed. pnum:%d ret=%d\n",
+ pipe->num, ret);
+ goto end;
+ }
+end:
+ return ret;
+}
+
+static struct mdss_fence *__create_fence(struct msm_fb_data_type *mfd,
+ struct msm_sync_pt_data *sync_pt_data, u32 fence_type,
+ int *fence_fd, int value)
+{
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_fence *sync_fence = NULL;
+ char fence_name[32];
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ ctl = mdp5_data->ctl;
+ if (!ctl->ops.add_vsync_handler) {
+ pr_err("fb%d vsync pending first update\n", mfd->index);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (!mdss_mdp_ctl_is_power_on(ctl)) {
+ pr_err("fb%d ctl power on failed\n", mfd->index);
+ return ERR_PTR(-EPERM);
+ }
+
+ if (fence_type == MDSS_MDP_RETIRE_FENCE)
+ snprintf(fence_name, sizeof(fence_name), "fb%d_retire",
+ mfd->index);
+ else
+ snprintf(fence_name, sizeof(fence_name), "fb%d_release",
+ mfd->index);
+
+ if ((fence_type == MDSS_MDP_RETIRE_FENCE) &&
+ (mfd->panel.type == MIPI_CMD_PANEL)) {
+ if (mdp5_data->vsync_timeline) {
+ value = 1 + mdp5_data->retire_cnt++;
+ sync_fence = mdss_fb_sync_get_fence(
+ mdp5_data->vsync_timeline, fence_name,
+ value);
+ } else {
+ return ERR_PTR(-EPERM);
+ }
+ } else {
+ if (fence_type == MDSS_MDP_RETIRE_FENCE)
+ sync_fence = mdss_fb_sync_get_fence(
+ sync_pt_data->timeline_retire,
+ fence_name, value);
+ else
+ sync_fence = mdss_fb_sync_get_fence(
+ sync_pt_data->timeline,
+ fence_name, value);
+
+ }
+
+ if (IS_ERR_OR_NULL(sync_fence)) {
+ pr_err("%s: unable to retrieve release fence\n", fence_name);
+ goto end;
+ }
+
+ /* get fence fd */
+ *fence_fd = mdss_get_sync_fence_fd(sync_fence);
+ if (*fence_fd < 0) {
+ pr_err("%s: get_unused_fd_flags failed error:0x%x\n",
+ fence_name, *fence_fd);
+ mdss_put_sync_fence(sync_fence);
+ sync_fence = NULL;
+ goto end;
+ }
+ pr_debug("%s:val=%d\n", mdss_get_sync_fence_name(sync_fence), value);
+
+end:
+ return sync_fence;
+}
+
+/*
+ * __handle_buffer_fences() - copy sync fences and return release/retire
+ * fence to caller.
+ *
+ * This function copies all input sync fences to acquire fence array and
+ * returns release/retire fences to caller. It acts like buff_sync ioctl.
+ */
+static int __handle_buffer_fences(struct msm_fb_data_type *mfd,
+ struct mdp_layer_commit_v1 *commit, struct mdp_input_layer *layer_list)
+{
+ struct mdss_fence *fence, *release_fence, *retire_fence;
+ struct msm_sync_pt_data *sync_pt_data = NULL;
+ struct mdp_input_layer *layer;
+ int value;
+
+ u32 acq_fen_count, i, ret = 0;
+ u32 layer_count = commit->input_layer_cnt;
+
+ sync_pt_data = &mfd->mdp_sync_pt_data;
+ if (!sync_pt_data) {
+ pr_err("sync point data are NULL\n");
+ return -EINVAL;
+ }
+
+ i = mdss_fb_wait_for_fence(sync_pt_data);
+ if (i > 0)
+ pr_warn("%s: waited on %d active fences\n",
+ sync_pt_data->fence_name, i);
+
+ mutex_lock(&sync_pt_data->sync_mutex);
+ for (i = 0, acq_fen_count = 0; i < layer_count; i++) {
+ layer = &layer_list[i];
+
+ if (layer->buffer.fence < 0)
+ continue;
+
+ fence = mdss_get_fd_sync_fence(layer->buffer.fence);
+ if (!fence) {
+ pr_err("%s: sync fence get failed! fd=%d\n",
+ sync_pt_data->fence_name, layer->buffer.fence);
+ ret = -EINVAL;
+ break;
+ }
+ sync_pt_data->acq_fen[acq_fen_count++] = fence;
+ }
+ sync_pt_data->acq_fen_cnt = acq_fen_count;
+ if (ret)
+ goto sync_fence_err;
+
+ value = sync_pt_data->threshold +
+ atomic_read(&sync_pt_data->commit_cnt);
+
+ release_fence = __create_fence(mfd, sync_pt_data,
+ MDSS_MDP_RELEASE_FENCE, &commit->release_fence, value);
+ if (IS_ERR_OR_NULL(release_fence)) {
+ pr_err("unable to retrieve release fence\n");
+ ret = PTR_ERR(release_fence);
+ goto release_fence_err;
+ }
+
+ retire_fence = __create_fence(mfd, sync_pt_data,
+ MDSS_MDP_RETIRE_FENCE, &commit->retire_fence, value);
+ if (IS_ERR_OR_NULL(retire_fence)) {
+ pr_err("unable to retrieve retire fence\n");
+ ret = PTR_ERR(retire_fence);
+ goto retire_fence_err;
+ }
+
+ mutex_unlock(&sync_pt_data->sync_mutex);
+ return ret;
+
+retire_fence_err:
+ put_unused_fd(commit->release_fence);
+ mdss_put_sync_fence(release_fence);
+release_fence_err:
+ commit->retire_fence = -1;
+ commit->release_fence = -1;
+sync_fence_err:
+ for (i = 0; i < sync_pt_data->acq_fen_cnt; i++)
+ mdss_put_sync_fence(sync_pt_data->acq_fen[i]);
+ sync_pt_data->acq_fen_cnt = 0;
+
+ mutex_unlock(&sync_pt_data->sync_mutex);
+
+ return ret;
+}
+
+/*
+ * __map_layer_buffer() - map input layer buffer
+ *
+ * This function maps input layer buffer. It supports only single layer
+ * buffer mapping right now. This is case for all formats including UBWC.
+ */
+static struct mdss_mdp_data *__map_layer_buffer(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_validate_info_t *validate_info_list,
+ u32 layer_count)
+{
+ struct mdss_mdp_data *src_data;
+ struct mdp_input_layer *layer = NULL;
+ struct mdp_layer_buffer *buffer;
+ struct msmfb_data image;
+ int i, ret;
+ u32 flags;
+ struct mdss_mdp_validate_info_t *vitem;
+
+ for (i = 0; i < layer_count; i++) {
+ vitem = &validate_info_list[i];
+ layer = vitem->layer;
+ if ((layer->pipe_ndx == pipe->ndx) &&
+ (vitem->multirect.num == pipe->multirect.num))
+ break;
+ }
+
+ if (i == layer_count) {
+ pr_err("layer count index is out of bound\n");
+ src_data = ERR_PTR(-EINVAL);
+ goto end;
+ }
+
+ buffer = &layer->buffer;
+
+ if (pipe->flags & MDP_SOLID_FILL) {
+ pr_err("Unexpected buffer queue to a solid fill pipe\n");
+ src_data = ERR_PTR(-EINVAL);
+ goto end;
+ }
+
+ flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+ MDP_SECURE_DISPLAY_OVERLAY_SESSION));
+
+ if (buffer->planes[0].fd < 0) {
+ pr_err("invalid file descriptor for layer buffer\n");
+ src_data = ERR_PTR(-EINVAL);
+ goto end;
+ }
+
+ src_data = mdss_mdp_overlay_buf_alloc(mfd, pipe);
+ if (!src_data) {
+ pr_err("unable to allocate source buffer\n");
+ src_data = ERR_PTR(-ENOMEM);
+ goto end;
+ }
+ memset(&image, 0, sizeof(image));
+
+ image.memory_id = buffer->planes[0].fd;
+ image.offset = buffer->planes[0].offset;
+ ret = mdss_mdp_data_get_and_validate_size(src_data, &image, 1,
+ flags, &mfd->pdev->dev, false, DMA_TO_DEVICE,
+ buffer);
+ if (ret)
+ goto end_buf_free;
+
+ src_data->num_planes = 1;
+ return src_data;
+
+end_buf_free:
+ mdss_mdp_overlay_buf_free(mfd, src_data);
+ src_data = ERR_PTR(ret);
+end:
+ return src_data;
+}
+
+static inline bool __compare_layer_config(struct mdp_input_layer *validate,
+ struct mdss_mdp_pipe *pipe)
+{
+ struct mdp_input_layer *layer = &pipe->layer;
+ bool status = true;
+
+ status = !memcmp(&validate->src_rect, &layer->src_rect,
+ sizeof(validate->src_rect)) &&
+ !memcmp(&validate->dst_rect, &layer->dst_rect,
+ sizeof(validate->dst_rect)) &&
+ validate->flags == layer->flags &&
+ validate->horz_deci == layer->horz_deci &&
+ validate->vert_deci == layer->vert_deci &&
+ validate->alpha == layer->alpha &&
+ validate->color_space == layer->color_space &&
+ validate->z_order == (layer->z_order - MDSS_MDP_STAGE_0) &&
+ validate->transp_mask == layer->transp_mask &&
+ validate->bg_color == layer->bg_color &&
+ validate->blend_op == layer->blend_op &&
+ validate->buffer.width == layer->buffer.width &&
+ validate->buffer.height == layer->buffer.height &&
+ validate->buffer.format == layer->buffer.format;
+
+ if (status && (validate->flags & SCALER_ENABLED))
+ status = !memcmp(validate->scale, &pipe->scaler,
+ sizeof(pipe->scaler));
+
+ return status;
+}
+
+/*
+ * __find_layer_in_validate_q() - Search layer in validation queue
+ *
+ * This functions helps to skip validation for layers where only buffer is
+ * changing. For ex: video playback case. In order to skip validation, it
+ * compares all input layer params except buffer handle, offset, fences.
+ */
+static struct mdss_mdp_pipe *__find_layer_in_validate_q(
+ struct mdss_mdp_validate_info_t *vinfo,
+ struct mdss_overlay_private *mdp5_data)
+{
+ bool found = false;
+ struct mdss_mdp_pipe *pipe;
+ struct mdp_input_layer *layer = vinfo->layer;
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+ if ((pipe->ndx == layer->pipe_ndx) &&
+ (pipe->multirect.num == vinfo->multirect.num)) {
+ if (__compare_layer_config(layer, pipe))
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+
+ return found ? pipe : NULL;
+}
+
+static bool __find_pipe_in_list(struct list_head *head,
+ int pipe_ndx, struct mdss_mdp_pipe **out_pipe,
+ enum mdss_mdp_pipe_rect rect_num)
+{
+ struct mdss_mdp_pipe *pipe;
+
+ list_for_each_entry(pipe, head, list) {
+ if ((pipe_ndx == pipe->ndx) &&
+ (rect_num == pipe->multirect.num)) {
+ *out_pipe = pipe;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Search pipe from destroy and cleanup list to avoid validation failure.
+ * It is caller responsibility to hold the list lock before calling this API.
+ */
+static struct mdss_mdp_pipe *__find_and_move_cleanup_pipe(
+ struct mdss_overlay_private *mdp5_data, u32 pipe_ndx,
+ enum mdss_mdp_pipe_rect rect_num)
+{
+ struct mdss_mdp_pipe *pipe = NULL;
+
+ if (__find_pipe_in_list(&mdp5_data->pipes_destroy,
+ pipe_ndx, &pipe, rect_num)) {
+ pr_debug("reuse destroy pipe id:%d ndx:%d rect:%d\n",
+ pipe->num, pipe_ndx, rect_num);
+ list_move(&pipe->list, &mdp5_data->pipes_used);
+ } else if (__find_pipe_in_list(&mdp5_data->pipes_cleanup,
+ pipe_ndx, &pipe, rect_num)) {
+ pr_debug("reuse cleanup pipe id:%d ndx:%d rect:%d\n",
+ pipe->num, pipe_ndx, rect_num);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+ pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+ list_move(&pipe->list, &mdp5_data->pipes_used);
+ }
+
+ return pipe;
+}
+
+/*
+ * __assign_pipe_for_layer() - get a pipe for layer
+ *
+ * This function first searches the pipe from used list, cleanup list and
+ * destroy list. On successful search, it returns the same pipe for current
+ * layer. It also un-stage the pipe from current mixer for used, cleanup,
+ * destroy pipes if they switches the mixer. On failure search, it returns
+ * the null pipe.
+ */
+static struct mdss_mdp_pipe *__assign_pipe_for_layer(
+ struct msm_fb_data_type *mfd,
+ struct mdss_mdp_mixer *mixer, u32 pipe_ndx,
+ enum layer_pipe_q *pipe_q_type,
+ enum mdss_mdp_pipe_rect rect_num)
+{
+ struct mdss_mdp_pipe *pipe = NULL;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+
+ mutex_lock(&mdp5_data->list_lock);
+ __find_pipe_in_list(&mdp5_data->pipes_used, pipe_ndx, &pipe, rect_num);
+ if (IS_ERR_OR_NULL(pipe)) {
+ pipe = __find_and_move_cleanup_pipe(mdp5_data,
+ pipe_ndx, rect_num);
+ if (IS_ERR_OR_NULL(pipe))
+ *pipe_q_type = LAYER_USES_NEW_PIPE_Q;
+ else
+ *pipe_q_type = LAYER_USES_DESTROY_PIPE_Q;
+ } else {
+ *pipe_q_type = LAYER_USES_USED_PIPE_Q;
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+
+ /* found the pipe from used, destroy or cleanup list */
+ if (!IS_ERR_OR_NULL(pipe)) {
+ if (pipe->mixer_left != mixer) {
+ if (!mixer->ctl || (mixer->ctl->mfd != mfd)) {
+ pr_err("Can't switch mixer %d->%d pnum %d!\n",
+ pipe->mixer_left->num, mixer->num,
+ pipe->num);
+ pipe = ERR_PTR(-EINVAL);
+ goto end;
+ }
+ pr_debug("switching pipe%d mixer %d->%d\n",
+ pipe->num,
+ pipe->mixer_left ? pipe->mixer_left->num : -1,
+ mixer->num);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+ pipe->mixer_left = mixer;
+ }
+ goto end;
+ }
+
+ pipe = mdss_mdp_pipe_assign(mdata, mixer, pipe_ndx, rect_num);
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_err("error reserving pipe. pipe_ndx=0x%x rect_num=%d mfd ndx=%d\n",
+ pipe_ndx, rect_num, mfd->index);
+ goto end;
+ }
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_add(&pipe->list, &mdp5_data->pipes_used);
+ mutex_unlock(&mdp5_data->list_lock);
+
+end:
+ if (!IS_ERR_OR_NULL(pipe)) {
+ pipe->dirty = false;
+ pipe->params_changed++;
+ }
+ return pipe;
+}
+
+/*
+ * __is_sd_state_valid() - validate secure display state
+ *
+ * This function checks if the current state of secrure display is valid,
+ * based on the new settings.
+ * For command mode panels, the sd state would be invalid if a non secure pipe
+ * comes and one of the below condition is met:
+ * 1) Secure Display is enabled for current client, and there is other
+ secure client.
+ * 2) Secure Display is disabled for current client, and there is other
+ secure client.
+ * 3) Secure pipes are already staged for the current client.
+ * For other panels, the sd state would be invalid if a non secure pipe comes
+ * and one of the below condition is met:
+ * 1) Secure Display is enabled for current or other client.
+ * 2) Secure pipes are already staged for the current client.
+ *
+ */
+static inline bool __is_sd_state_valid(uint32_t sd_pipes, uint32_t nonsd_pipes,
+ int panel_type, u32 sd_enabled)
+{
+ if (panel_type == MIPI_CMD_PANEL) {
+ if ((((mdss_get_sd_client_cnt() > 1) && sd_enabled) ||
+ (mdss_get_sd_client_cnt() && !sd_enabled) ||
+ sd_pipes)
+ && nonsd_pipes)
+ return false;
+ } else {
+ if ((sd_pipes || mdss_get_sd_client_cnt()) && nonsd_pipes)
+ return false;
+ }
+ return true;
+}
+
+/*
+ * __validate_secure_display() - validate secure display
+ *
+ * This function travers through used pipe list and checks if any pipe
+ * is with secure display enabled flag. It fails if client tries to stage
+ * unsecure content with secure display session.
+ *
+ */
+static int __validate_secure_display(struct mdss_overlay_private *mdp5_data)
+{
+ struct mdss_mdp_pipe *pipe, *tmp;
+ uint32_t sd_pipes = 0, nonsd_pipes = 0;
+ int panel_type = mdp5_data->ctl->panel_data->panel_info.type;
+ int ret = 0;
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+ if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
+ sd_pipes++;
+ else
+ nonsd_pipes++;
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+
+ pr_debug("pipe count:: secure display:%d non-secure:%d\n",
+ sd_pipes, nonsd_pipes);
+
+ mdp5_data->sd_transition_state = SD_TRANSITION_NONE;
+ if (!__is_sd_state_valid(sd_pipes, nonsd_pipes, panel_type,
+ mdp5_data->sd_enabled)) {
+ pr_err("non-secure layer validation request during secure display session\n");
+ pr_err(" secure client cnt:%d secure pipe cnt:%d non-secure pipe cnt:%d\n",
+ mdss_get_sd_client_cnt(), sd_pipes, nonsd_pipes);
+ ret = -EINVAL;
+ } else if (!mdp5_data->sd_enabled && sd_pipes) {
+ mdp5_data->sd_transition_state =
+ SD_TRANSITION_NON_SECURE_TO_SECURE;
+ } else if (mdp5_data->sd_enabled && !sd_pipes) {
+ mdp5_data->sd_transition_state =
+ SD_TRANSITION_SECURE_TO_NON_SECURE;
+ }
+ return ret;
+}
+
+/*
+ * __handle_free_list() - updates free pipe list
+ *
+ * This function travers through used pipe list and checks if any pipe
+ * is not staged in current validation cycle. It moves the pipe to cleanup
+ * list if no layer is attached for that pipe.
+ *
+ * This should be called after validation is successful for current cycle.
+ * Moving pipes before can affects staged pipe for previous cycle.
+ */
+static void __handle_free_list(struct mdss_overlay_private *mdp5_data,
+ struct mdss_mdp_validate_info_t *validate_info_list, u32 layer_count)
+{
+ int i;
+ struct mdp_input_layer *layer;
+ struct mdss_mdp_validate_info_t *vinfo;
+ struct mdss_mdp_pipe *pipe, *tmp;
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+ for (i = 0; i < layer_count; i++) {
+ vinfo = &validate_info_list[i];
+ layer = vinfo->layer;
+
+ if ((pipe->ndx == layer->pipe_ndx) &&
+ (pipe->multirect.num == vinfo->multirect.num))
+ break;
+ }
+
+ /*
+ * if validate cycle is not attaching any layer for this
+ * pipe then move it to cleanup list. It does overlay_unset
+ * task.
+ */
+ if (i == layer_count)
+ list_move(&pipe->list, &mdp5_data->pipes_cleanup);
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+}
+
+static bool __multirect_validate_flip(struct mdp_input_layer **layers,
+ size_t count)
+{
+ /* not supporting more than 2 layers */
+ if (count != 2)
+ return false;
+
+ /* flip related validation */
+ if ((layers[0]->flags & MDP_LAYER_FLIP_LR) ||
+ (layers[1]->flags & MDP_LAYER_FLIP_LR)) {
+ pr_err("multirect and HFLIP is not allowed. input layer flags=0x%x paired layer flags=0x%x\n",
+ layers[0]->flags, layers[1]->flags);
+ return false;
+ }
+ if ((layers[0]->flags & MDP_LAYER_FLIP_UD) !=
+ (layers[1]->flags & MDP_LAYER_FLIP_UD)) {
+ pr_err("multirect VLFIP mismatch is not allowed\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool __multirect_validate_format(struct mdp_input_layer **layers,
+ size_t count)
+{
+ struct mdss_mdp_format_params *rec0_fmt, *rec1_fmt;
+ bool is_ubwc;
+
+ /* not supporting more than 2 layers */
+ if (count != 2)
+ return false;
+
+ /* format related validation */
+ rec0_fmt = mdss_mdp_get_format_params(layers[0]->buffer.format);
+ if (!rec0_fmt) {
+ pr_err("invalid input layer format %d\n",
+ layers[0]->buffer.format);
+ return false;
+ }
+ rec1_fmt = mdss_mdp_get_format_params(layers[1]->buffer.format);
+ if (!rec1_fmt) {
+ pr_err("invalid paired layer format %d\n",
+ layers[1]->buffer.format);
+ return false;
+ }
+ if (rec0_fmt->is_yuv || rec1_fmt->is_yuv) {
+ pr_err("multirect on YUV format is not supported. input=%d paired=%d\n",
+ rec0_fmt->is_yuv, rec1_fmt->is_yuv);
+ return false;
+ }
+ if (rec0_fmt->fetch_mode != rec1_fmt->fetch_mode) {
+ pr_err("multirect fetch_mode mismatch is not allowed. input=%d paired=%d\n",
+ rec0_fmt->fetch_mode, rec1_fmt->fetch_mode);
+ return false;
+ }
+ is_ubwc = mdss_mdp_is_ubwc_format(rec0_fmt);
+ if (is_ubwc && (rec0_fmt != rec1_fmt)) {
+ pr_err("multirect UBWC format mismatch is not allowed\n");
+ return false;
+ } else if (rec0_fmt->bpp != rec1_fmt->bpp) {
+ pr_err("multirect linear format bpp mismatch is not allowed. input=%d paired=%d\n",
+ rec0_fmt->bpp, rec1_fmt->bpp);
+ return false;
+ } else if (rec0_fmt->unpack_dx_format != rec1_fmt->unpack_dx_format) {
+ pr_err("multirect linear format 10bit vs 8bit mismatch is not allowed. input=%d paired=%d\n",
+ rec0_fmt->unpack_dx_format, rec1_fmt->unpack_dx_format);
+ return false;
+ }
+
+ if ((layers[0]->flags & MDP_LAYER_SOLID_FILL) !=
+ (layers[1]->flags & MDP_LAYER_SOLID_FILL)) {
+ pr_err("solid fill mismatch between multirect layers\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool __multirect_validate_rects(struct mdp_input_layer **layers,
+ size_t count)
+{
+ struct mdss_rect dst[MDSS_MDP_PIPE_MAX_RECTS];
+ int i;
+
+ /* not supporting more than 2 layers */
+ if (count != 2)
+ return false;
+
+ for (i = 0; i < count; i++) {
+ if ((layers[i]->src_rect.w != layers[i]->dst_rect.w) ||
+ (layers[i]->src_rect.h != layers[i]->dst_rect.h)) {
+ pr_err("multirect layers cannot have scaling: src: %dx%d dst: %dx%d\n",
+ layers[i]->src_rect.w, layers[i]->src_rect.h,
+ layers[i]->dst_rect.w, layers[i]->dst_rect.h);
+ return false;
+ }
+
+ dst[i] = (struct mdss_rect) {layers[i]->dst_rect.x,
+ layers[i]->dst_rect.y,
+ layers[i]->dst_rect.w,
+ layers[i]->dst_rect.h};
+ }
+
+ /* resolution related validation */
+ if (mdss_rect_overlap_check(&dst[0], &dst[1])) {
+ pr_err("multirect dst overlap is not allowed. input: %d,%d,%d,%d paired %d,%d,%d,%d\n",
+ dst[0].x, dst[0].y, dst[0].w, dst[0].y,
+ dst[1].x, dst[1].y, dst[1].w, dst[1].y);
+ return false;
+ }
+
+ return true;
+}
+
+static bool __multirect_validate_properties(struct mdp_input_layer **layers,
+ size_t count)
+{
+ /* not supporting more than 2 layers */
+ if (count != 2)
+ return false;
+
+ if ((layers[0]->flags & MDP_LAYER_ASYNC) ||
+ (layers[1]->flags & MDP_LAYER_ASYNC)) {
+ pr_err("ASYNC update is not allowed with multirect\n");
+ return false;
+ }
+
+ if (layers[0]->z_order == layers[1]->z_order) {
+ pr_err("multirect layers cannot have same z_order=%d\n",
+ layers[0]->z_order);
+ return false;
+ }
+
+ return true;
+}
+
+static bool (*__multirect_validators[])(struct mdp_input_layer **layers,
+ size_t count) = {
+ __multirect_validate_flip,
+ __multirect_validate_format,
+ __multirect_validate_rects,
+ __multirect_validate_properties,
+};
+
+static inline int __multirect_layer_flags_to_mode(u32 flags)
+{
+ int mode;
+
+ if (flags & MDP_LAYER_MULTIRECT_ENABLE) {
+ if (flags & MDP_LAYER_MULTIRECT_PARALLEL_MODE)
+ mode = MDSS_MDP_PIPE_MULTIRECT_PARALLEL;
+ else
+ mode = MDSS_MDP_PIPE_MULTIRECT_SERIAL;
+ } else {
+ if (flags & MDP_LAYER_MULTIRECT_PARALLEL_MODE) {
+ pr_err("Invalid parallel mode flag set without multirect enabled\n");
+ return -EINVAL;
+ }
+
+ mode = MDSS_MDP_PIPE_MULTIRECT_NONE;
+ }
+ return mode;
+}
+
+static int __multirect_validate_mode(struct msm_fb_data_type *mfd,
+ struct mdp_input_layer **layers,
+ size_t count)
+{
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_mdp_format_params *rec0_fmt;
+ bool is_ubwc;
+ int i, mode;
+ struct mdp_rect *dst[MDSS_MDP_PIPE_MAX_RECTS];
+
+ /* not supporting more than 2 layers */
+ if (count != 2)
+ return false;
+
+ for (i = 0; i < count; i++)
+ dst[i] = &layers[i]->dst_rect;
+
+ mode = __multirect_layer_flags_to_mode(layers[0]->flags);
+
+ /* format related validation */
+ rec0_fmt = mdss_mdp_get_format_params(layers[0]->buffer.format);
+ if (!rec0_fmt) {
+ pr_err("invalid input layer format %d\n",
+ layers[0]->buffer.format);
+ return false;
+ }
+
+ is_ubwc = mdss_mdp_is_ubwc_format(rec0_fmt);
+
+ if (mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL) {
+ int threshold, yoffset;
+
+ if (dst[0]->y < dst[1]->y)
+ yoffset = dst[1]->y - (dst[0]->y + dst[0]->h);
+ else if (dst[1]->y < dst[0]->y)
+ yoffset = dst[0]->y - (dst[1]->y + dst[1]->h);
+ else
+ yoffset = 0;
+
+ /*
+ * time multiplexed is possible only if the y position of layers
+ * is not overlapping and there is sufficient time to buffer
+ * 2 lines/tiles. Otherwise use parallel fetch mode
+ */
+ threshold = 2;
+ if (is_ubwc) {
+ struct mdss_mdp_format_params_ubwc *uf;
+
+ /* in ubwc all layers would need to be same format */
+ uf = (struct mdss_mdp_format_params_ubwc *)rec0_fmt;
+ threshold *= uf->micro.tile_height;
+ }
+
+ if (yoffset < threshold) {
+ pr_err("Unable to operate in serial fetch mode with yoffset=%d dst[0]=%d,%d dst[1]=%d,%d\n",
+ yoffset, dst[0]->y, dst[0]->h,
+ dst[1]->y, dst[1]->h);
+ return -EINVAL;
+ }
+ } else if (mode == MDSS_MDP_PIPE_MULTIRECT_PARALLEL) {
+ u32 left_lm_w, rec0_mixer, rec1_mixer;
+
+ /*
+ * For UBWC, 5 lines worth of buffering is needed in to meet
+ * the performance which requires 2560w*4bpp*5lines = 50KB,
+ * where 2560 is max width. Now let's say pixel ram is fixed to
+ * 50KB then in UBWC parellel fetch, maximum width of each
+ * rectangle would be 2560/2 = 1280.
+ *
+ * For Linear, this restriction is avoided because maximum
+ * buffering of 2 lines is enough which yields to
+ * 2560w*4bpp*2lines=20KB. Based on this, we can have 2 max
+ * width rectangles in parrellel fetch mode.
+ */
+ if (is_ubwc &&
+ ((dst[0]->w > (mdata->max_mixer_width / 2)) ||
+ (dst[1]->w > (mdata->max_mixer_width / 2)))) {
+ pr_err("in UBWC multirect parallel mode, max dst_w cannot be greater than %d. rec0_w=%d rec1_w=%d\n",
+ mdata->max_mixer_width / 2,
+ dst[0]->w, dst[1]->w);
+ return -EINVAL;
+ }
+
+ left_lm_w = left_lm_w_from_mfd(mfd);
+ if (dst[0]->x < left_lm_w) {
+ if (dst[0]->w > (left_lm_w - dst[0]->x)) {
+ pr_err("multirect parallel mode, rec0 dst (%d,%d) cannot cross lm boundary (%d)\n",
+ dst[0]->x, dst[0]->w, left_lm_w);
+ return -EINVAL;
+ }
+ rec0_mixer = MDSS_MDP_MIXER_MUX_LEFT;
+ } else {
+ rec0_mixer = MDSS_MDP_MIXER_MUX_RIGHT;
+ }
+
+ if (dst[1]->x < left_lm_w) {
+ if (dst[0]->w > (left_lm_w - dst[0]->x)) {
+ pr_err("multirect parallel mode, rec1 dst (%d,%d) cannot cross lm boundary (%d)\n",
+ dst[1]->x, dst[1]->w, left_lm_w);
+ return -EINVAL;
+ }
+ rec1_mixer = MDSS_MDP_MIXER_MUX_LEFT;
+ } else {
+ rec1_mixer = MDSS_MDP_MIXER_MUX_RIGHT;
+ }
+
+ if (rec0_mixer != rec1_mixer) {
+ pr_err("multirect parallel mode mixer mismatch. rec0_mix=%d rec1_mix=%d\n",
+ rec0_mixer, rec1_mixer);
+ return -EINVAL;
+ }
+ } else {
+ pr_err("Invalid multirect mode %d\n", mode);
+ }
+
+ pr_debug("layer->pndx:%d mode=%d\n", layers[0]->pipe_ndx, mode);
+
+ return 0;
+}
+
+static int __update_multirect_info(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_validate_info_t *validate_info_list,
+ struct mdp_input_layer *layer_list, int ndx, int layer_cnt)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_validate_info_t *vinfo[MDSS_MDP_PIPE_MAX_RECTS];
+ int i, ptype, max_rects, mode;
+ int cnt = 1;
+
+ mode = __multirect_layer_flags_to_mode(layer_list[ndx].flags);
+ if (IS_ERR_VALUE((unsigned long)mode))
+ return mode;
+
+ pr_debug("layer #%d pipe_ndx=%d multirect mode=%d\n",
+ ndx, layer_list[ndx].pipe_ndx, mode);
+
+ vinfo[0] = &validate_info_list[ndx];
+ vinfo[0]->layer = &layer_list[ndx];
+ vinfo[0]->multirect.mode = mode;
+ vinfo[0]->multirect.num = MDSS_MDP_PIPE_RECT0;
+ vinfo[0]->multirect.next = NULL;
+
+ /* nothing to be done if multirect is disabled */
+ if (mode == MDSS_MDP_PIPE_MULTIRECT_NONE)
+ return cnt;
+
+ ptype = get_pipe_type_from_ndx(layer_list[ndx].pipe_ndx);
+ if (ptype == MDSS_MDP_PIPE_TYPE_INVALID) {
+ pr_err("invalid pipe ndx %d\n", layer_list[ndx].pipe_ndx);
+ return -EINVAL;
+ }
+
+ max_rects = mdata->rects_per_sspp[ptype] ? : 1;
+
+ for (i = ndx + 1; i < layer_cnt; i++) {
+ if (layer_list[ndx].pipe_ndx == layer_list[i].pipe_ndx) {
+ if (cnt >= max_rects) {
+ pr_err("more than %d layers of type %d with same pipe_ndx=%d indexes=%d %d\n",
+ max_rects, ptype,
+ layer_list[ndx].pipe_ndx, ndx, i);
+ return -EINVAL;
+ }
+
+ mode = __multirect_layer_flags_to_mode(
+ layer_list[i].flags);
+ if (IS_ERR_VALUE((unsigned long)mode))
+ return mode;
+
+ if (mode != vinfo[0]->multirect.mode) {
+ pr_err("unable to set different multirect modes for pipe_ndx=%d (%d %d)\n",
+ layer_list[ndx].pipe_ndx, ndx, i);
+ return -EINVAL;
+ }
+
+ pr_debug("found matching pair for pipe_ndx=%d (%d %d)\n",
+ layer_list[i].pipe_ndx, ndx, i);
+
+ vinfo[cnt] = &validate_info_list[i];
+ vinfo[cnt]->multirect.num = cnt;
+ vinfo[cnt]->multirect.next = vinfo[0]->layer;
+ vinfo[cnt]->multirect.mode = mode;
+ vinfo[cnt]->layer = &layer_list[i];
+
+ vinfo[cnt - 1]->multirect.next = vinfo[cnt]->layer;
+ cnt++;
+ }
+ }
+
+ if (cnt == 1) {
+ pr_err("multirect mode enabled but unable to find extra rects for pipe_ndx=%x\n",
+ layer_list[ndx].pipe_ndx);
+ return -EINVAL;
+ }
+
+ return cnt;
+}
+
+static int __validate_multirect(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_validate_info_t *validate_info_list,
+ struct mdp_input_layer *layer_list, int ndx, int layer_cnt)
+{
+ struct mdp_input_layer *layers[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
+ int i, cnt, rc;
+
+ cnt = __update_multirect_info(mfd, validate_info_list,
+ layer_list, ndx, layer_cnt);
+ if (IS_ERR_VALUE((unsigned long)cnt))
+ return cnt;
+
+ if (cnt <= 1) {
+ /* nothing to validate in single rect mode */
+ return 0;
+ } else if (cnt > 2) {
+ pr_err("unsupported multirect configuration, multirect cnt=%d\n",
+ cnt);
+ return -EINVAL;
+ }
+
+ layers[0] = validate_info_list[ndx].layer;
+ layers[1] = validate_info_list[ndx].multirect.next;
+
+ for (i = 0; i < ARRAY_SIZE(__multirect_validators); i++) {
+ if (!__multirect_validators[i](layers, cnt))
+ return -EINVAL;
+ }
+
+ rc = __multirect_validate_mode(mfd, layers, cnt);
+ if (IS_ERR_VALUE((unsigned long)rc))
+ return rc;
+
+ return 0;
+}
+
+/*
+ * __validate_layers() - validate input layers
+ * @mfd: Framebuffer data structure for display
+ * @commit: Commit version-1 structure for display
+ *
+ * This function validates all input layers present in layer_list. In case
+ * of failure, it updates the "error_code" for failed layer. It is possible
+ * to find failed layer from layer_list based on "error_code".
+ */
+static int __validate_layers(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+ int ret, i = 0;
+ int rec_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
+ int rec_release_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
+ int rec_destroy_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
+ u32 left_lm_layers = 0, right_lm_layers = 0;
+ u32 left_cnt = 0, right_cnt = 0;
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ u32 mixer_mux, dst_x;
+ int layer_count = commit->input_layer_cnt;
+
+ struct mdss_mdp_pipe *pipe = NULL, *tmp, *left_blend_pipe;
+ struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM] = {0};
+ struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM] = {0};
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ struct mdss_mdp_mixer *mixer = NULL;
+ struct mdp_input_layer *layer, *prev_layer, *layer_list;
+ struct mdss_mdp_validate_info_t *validate_info_list = NULL;
+ bool is_single_layer = false, force_validate;
+ enum layer_pipe_q pipe_q_type;
+ enum layer_zorder_used zorder_used[MDSS_MDP_MAX_STAGE] = {0};
+ enum mdss_mdp_pipe_rect rect_num;
+
+ ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+ if (ret)
+ return ret;
+
+ if (!layer_count)
+ goto validate_skip;
+
+ layer_list = commit->input_layers;
+
+ validate_info_list = kcalloc(layer_count, sizeof(*validate_info_list),
+ GFP_KERNEL);
+ if (!validate_info_list) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ for (i = 0; i < layer_count; i++) {
+ if (layer_list[i].dst_rect.x >= left_lm_w)
+ right_lm_layers++;
+ else
+ left_lm_layers++;
+
+ if (right_lm_layers >= MAX_PIPES_PER_LM ||
+ left_lm_layers >= MAX_PIPES_PER_LM) {
+ pr_err("too many pipes stagged mixer left: %d mixer right:%d\n",
+ left_lm_layers, right_lm_layers);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (!validate_info_list[i].layer) {
+ ret = __validate_multirect(mfd, validate_info_list,
+ layer_list, i, layer_count);
+ if (ret) {
+ pr_err("error validating multirect config. ret=%d i=%d\n",
+ ret, i);
+ goto end;
+ }
+ }
+
+ rect_num = validate_info_list[i].multirect.num;
+ WARN_ON(rect_num >= MDSS_MDP_PIPE_MAX_RECTS);
+
+ if (rec_ndx[rect_num] & layer_list[i].pipe_ndx) {
+ pr_err("duplicate layer found pipe_ndx=%d rect=%d (0x%x)\n",
+ layer_list[i].pipe_ndx, rect_num,
+ rec_ndx[rect_num]);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ rec_ndx[rect_num] |= layer_list[i].pipe_ndx;
+ }
+
+ /*
+ * Force all layers to go through full validation after
+ * dynamic resolution switch, immaterial of the configs in
+ * the layer.
+ */
+ mutex_lock(&mfd->switch_lock);
+ force_validate = (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED);
+ mutex_unlock(&mfd->switch_lock);
+
+ for (i = 0; i < layer_count; i++) {
+ enum layer_zorder_used z = LAYER_ZORDER_NONE;
+
+ layer = &layer_list[i];
+ dst_x = layer->dst_rect.x;
+ left_blend_pipe = NULL;
+
+ prev_layer = (i > 0) ? &layer_list[i - 1] : NULL;
+ /*
+ * check if current layer is at same z_order as
+ * previous one, and fail if any or both are async layers,
+ * as async layers should have unique z_order.
+ *
+ * If it has same z_order and qualifies as a right blend,
+ * pass a pointer to the pipe representing previous overlay or
+ * in other terms left blend layer.
+ *
+ * Following logic of selecting left_blend has an inherent
+ * assumption that layer list is sorted on dst_x within a
+ * same z_order. Otherwise it will fail based on z_order checks.
+ */
+ if (prev_layer && (prev_layer->z_order == layer->z_order)) {
+ struct mdp_rect *left = &prev_layer->dst_rect;
+ struct mdp_rect *right = &layer->dst_rect;
+
+ if ((layer->flags & MDP_LAYER_ASYNC)
+ || (prev_layer->flags & MDP_LAYER_ASYNC)) {
+ ret = -EINVAL;
+ layer->error_code = ret;
+ pr_err("async layer should have unique z_order\n");
+ goto validate_exit;
+ }
+
+ /*
+ * check if layer is right blend by checking it's
+ * directly to the right.
+ */
+ if (((left->x + left->w) == right->x) &&
+ (left->y == right->y) && (left->h == right->h))
+ left_blend_pipe = pipe;
+
+ /*
+ * if the layer is right at the left lm boundary and
+ * src split is not required then right blend is not
+ * required as it will lie only on the left mixer
+ */
+ if (!__layer_needs_src_split(prev_layer) &&
+ ((left->x + left->w) == left_lm_w))
+ left_blend_pipe = NULL;
+ }
+
+ if (!is_split_lm(mfd) || __layer_needs_src_split(layer))
+ z = LAYER_ZORDER_BOTH;
+ else if (dst_x >= left_lm_w)
+ z = LAYER_ZORDER_RIGHT;
+ else if ((dst_x + layer->dst_rect.w) <= left_lm_w)
+ z = LAYER_ZORDER_LEFT;
+ else
+ z = LAYER_ZORDER_BOTH;
+
+ if (!left_blend_pipe && (layer->z_order >= MDSS_MDP_MAX_STAGE ||
+ (z & zorder_used[layer->z_order]))) {
+ pr_err("invalid z_order=%d or already in use %x\n",
+ layer->z_order, z);
+ ret = -EINVAL;
+ layer->error_code = ret;
+ goto validate_exit;
+ } else {
+ zorder_used[layer->z_order] |= z;
+ }
+
+ if ((layer->dst_rect.x < left_lm_w) ||
+ __layer_needs_src_split(layer)) {
+ is_single_layer = (left_lm_layers == 1);
+ mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
+ } else {
+ is_single_layer = (right_lm_layers == 1);
+ mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
+ }
+
+ /**
+ * search pipe in current used list to find if parameters
+ * are same. validation can be skipped if only buffer handle
+ * is changed.
+ */
+ pipe = (force_validate) ? NULL :
+ __find_layer_in_validate_q(
+ &validate_info_list[i], mdp5_data);
+ if (pipe) {
+ if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
+ right_plist[right_cnt++] = pipe;
+ else
+ left_plist[left_cnt++] = pipe;
+
+ if (layer->flags & MDP_LAYER_PP) {
+ memcpy(&pipe->pp_cfg, layer->pp_info,
+ sizeof(struct mdp_overlay_pp_params));
+ ret = mdss_mdp_pp_sspp_config(pipe);
+ if (ret)
+ pr_err("pp setup failed %d\n", ret);
+ else
+ pipe->params_changed++;
+ }
+ pipe->dirty = false;
+ continue;
+ }
+
+ mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
+ if (!mixer) {
+ pr_err("unable to get %s mixer\n",
+ (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) ?
+ "right" : "left");
+ ret = -EINVAL;
+ layer->error_code = ret;
+ goto validate_exit;
+ }
+
+ layer->z_order += MDSS_MDP_STAGE_0;
+ ret = __validate_single_layer(mfd, &validate_info_list[i],
+ mixer_mux);
+ if (ret) {
+ pr_err("layer:%d validation failed ret=%d\n", i, ret);
+ layer->error_code = ret;
+ goto validate_exit;
+ }
+
+ rect_num = validate_info_list[i].multirect.num;
+
+ pipe = __assign_pipe_for_layer(mfd, mixer, layer->pipe_ndx,
+ &pipe_q_type, rect_num);
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_err("error assigning pipe id=0x%x rc:%ld\n",
+ layer->pipe_ndx, PTR_ERR(pipe));
+ ret = PTR_ERR(pipe);
+ layer->error_code = ret;
+ goto validate_exit;
+ }
+
+ if (pipe_q_type == LAYER_USES_NEW_PIPE_Q)
+ rec_release_ndx[rect_num] |= pipe->ndx;
+ if (pipe_q_type == LAYER_USES_DESTROY_PIPE_Q)
+ rec_destroy_ndx[rect_num] |= pipe->ndx;
+
+ ret = mdss_mdp_pipe_map(pipe);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("Unable to map used pipe%d ndx=%x\n",
+ pipe->num, pipe->ndx);
+ layer->error_code = ret;
+ goto validate_exit;
+ }
+
+ if (pipe_q_type == LAYER_USES_USED_PIPE_Q) {
+ /*
+ * reconfig is allowed on new/destroy pipes. Only used
+ * pipe needs this extra validation.
+ */
+ ret = __validate_layer_reconfig(layer, pipe);
+ if (ret) {
+ pr_err("layer reconfig validation failed=%d\n",
+ ret);
+ mdss_mdp_pipe_unmap(pipe);
+ layer->error_code = ret;
+ goto validate_exit;
+ }
+ }
+
+ ret = __configure_pipe_params(mfd, &validate_info_list[i], pipe,
+ left_blend_pipe, is_single_layer, mixer_mux);
+ if (ret) {
+ pr_err("configure pipe param failed: pipe index= %d\n",
+ pipe->ndx);
+ mdss_mdp_pipe_unmap(pipe);
+ layer->error_code = ret;
+ goto validate_exit;
+ }
+
+ mdss_mdp_pipe_unmap(pipe);
+
+ /* keep the original copy of dst_x */
+ pipe->layer.dst_rect.x = layer->dst_rect.x = dst_x;
+
+ if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
+ right_plist[right_cnt++] = pipe;
+ else
+ left_plist[left_cnt++] = pipe;
+
+ pr_debug("id:0x%x flags:0x%x dst_x:%d\n",
+ layer->pipe_ndx, layer->flags, layer->dst_rect.x);
+ layer->z_order -= MDSS_MDP_STAGE_0;
+ }
+
+ ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
+ right_plist, right_cnt);
+ if (ret) {
+ pr_err("bw validation check failed: %d\n", ret);
+ goto validate_exit;
+ }
+
+validate_skip:
+ __handle_free_list(mdp5_data, validate_info_list, layer_count);
+
+ ret = __validate_secure_display(mdp5_data);
+
+validate_exit:
+ pr_debug("err=%d total_layer:%d left:%d right:%d rec0_rel_ndx=0x%x rec1_rel_ndx=0x%x rec0_destroy_ndx=0x%x rec1_destroy_ndx=0x%x processed=%d\n",
+ ret, layer_count, left_lm_layers, right_lm_layers,
+ rec_release_ndx[0], rec_release_ndx[1],
+ rec_destroy_ndx[0], rec_destroy_ndx[1], i);
+ MDSS_XLOG(rec_ndx[0], rec_ndx[1], layer_count,
+ left_lm_layers, right_lm_layers,
+ rec_release_ndx[0], rec_release_ndx[1],
+ rec_destroy_ndx[0], rec_destroy_ndx[1], ret);
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ if (((pipe->ndx & rec_release_ndx[0]) &&
+ (pipe->multirect.num == 0)) ||
+ ((pipe->ndx & rec_release_ndx[1]) &&
+ (pipe->multirect.num == 1))) {
+ mdss_mdp_smp_unreserve(pipe);
+ pipe->params_changed = 0;
+ pipe->dirty = true;
+ if (!list_empty(&pipe->list))
+ list_del_init(&pipe->list);
+ mdss_mdp_pipe_destroy(pipe);
+ } else if (((pipe->ndx & rec_destroy_ndx[0]) &&
+ (pipe->multirect.num == 0)) ||
+ ((pipe->ndx & rec_destroy_ndx[1]) &&
+ (pipe->multirect.num == 1))) {
+ /*
+ * cleanup/destroy list pipes should move back
+ * to destroy list. Next/current kickoff cycle
+ * will release the pipe because validate also
+ * acquires ov_lock.
+ */
+ list_move(&pipe->list,
+ &mdp5_data->pipes_destroy);
+ }
+ } else {
+ pipe->file = file;
+ pr_debug("file pointer attached with pipe is %pK\n",
+ file);
+ }
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+end:
+ kfree(validate_info_list);
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ pr_debug("fb%d validated layers =%d\n", mfd->index, i);
+
+ return ret;
+}
+
+/*
+ * __parse_frc_info() - parse frc info from userspace
+ * @mdp5_data: mdss data per FB device
+ * @input_frc: frc info from user space
+ *
+ * This function fills the FRC info of current device which will be used
+ * during following kickoff.
+ */
+static void __parse_frc_info(struct mdss_overlay_private *mdp5_data,
+ struct mdp_frc_info *input_frc)
+{
+ struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+ struct mdss_mdp_frc_fsm *frc_fsm = mdp5_data->frc_fsm;
+
+ if (input_frc->flags & MDP_VIDEO_FRC_ENABLE) {
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+ if (!frc_fsm->enable) {
+ /* init frc_fsm when first entry */
+ mdss_mdp_frc_fsm_init_state(frc_fsm);
+ /* keep vsync on when FRC is enabled */
+ ctl->ops.add_vsync_handler(ctl,
+ &ctl->frc_vsync_handler);
+ }
+
+ frc_info->cur_frc.frame_cnt = input_frc->frame_cnt;
+ frc_info->cur_frc.timestamp = input_frc->timestamp;
+ } else if (frc_fsm->enable) {
+ /* remove vsync handler when FRC is disabled */
+ ctl->ops.remove_vsync_handler(ctl, &ctl->frc_vsync_handler);
+ }
+
+ frc_fsm->enable = input_frc->flags & MDP_VIDEO_FRC_ENABLE;
+
+ pr_debug("frc_enable=%d\n", frc_fsm->enable);
+}
+
+/*
+ * mdss_mdp_layer_pre_commit() - pre commit validation for input layers
+ * @mfd: Framebuffer data structure for display
+ * @commit: Commit version-1 structure for display
+ *
+ * This function checks if layers present in commit request are already
+ * validated or not. If there is mismatch in validate and commit layers
+ * then it validate all input layers again. On successful validation, it
+ * maps the input layer buffer and creates release/retire fences.
+ *
+ * This function is called from client context and can return the error.
+ */
+int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+ int ret, i;
+ int layer_count = commit->input_layer_cnt;
+ bool validate_failed = false;
+
+ struct mdss_mdp_pipe *pipe, *tmp;
+ struct mdp_input_layer *layer_list;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_mdp_data *src_data[MDSS_MDP_MAX_SSPP];
+ struct mdss_mdp_validate_info_t *validate_info_list;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (!mdp5_data || !mdp5_data->ctl)
+ return -EINVAL;
+
+ layer_list = commit->input_layers;
+
+ /* handle null commit */
+ if (!layer_count) {
+ __handle_free_list(mdp5_data, NULL, layer_count);
+ /* Check for secure state transition. */
+ return __validate_secure_display(mdp5_data);
+ }
+
+ validate_info_list = kcalloc(layer_count, sizeof(*validate_info_list),
+ GFP_KERNEL);
+ if (!validate_info_list)
+ return -ENOMEM;
+
+ for (i = 0; i < layer_count; i++) {
+ if (!validate_info_list[i].layer) {
+ ret = __update_multirect_info(mfd, validate_info_list,
+ layer_list, i, layer_count);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("error updating multirect config. ret=%d i=%d\n",
+ ret, i);
+ goto end;
+ }
+ }
+ }
+
+ for (i = 0; i < layer_count; i++) {
+ pipe = __find_layer_in_validate_q(&validate_info_list[i],
+ mdp5_data);
+ if (!pipe) {
+ validate_failed = true;
+ break;
+ }
+ }
+
+ if (validate_failed) {
+ ret = __validate_layers(mfd, file, commit);
+ if (ret) {
+ pr_err("__validate_layers failed. rc=%d\n", ret);
+ goto end;
+ }
+ } else {
+ /*
+ * move unassigned pipes to cleanup list since commit
+ * supports validate+commit operation.
+ */
+ __handle_free_list(mdp5_data, validate_info_list, layer_count);
+ }
+
+ i = 0;
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+ if (pipe->flags & MDP_SOLID_FILL) {
+ src_data[i] = NULL;
+ continue;
+ }
+ src_data[i] = __map_layer_buffer(mfd, pipe, validate_info_list,
+ layer_count);
+ if (IS_ERR_OR_NULL(src_data[i++])) {
+ i--;
+ mutex_unlock(&mdp5_data->list_lock);
+ ret = PTR_ERR(src_data[i]);
+ goto map_err;
+ }
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+
+ ret = mdss_mdp_overlay_start(mfd);
+ if (ret) {
+ pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+ goto map_err;
+ }
+
+ if (commit->frc_info)
+ __parse_frc_info(mdp5_data, commit->frc_info);
+
+ ret = __handle_buffer_fences(mfd, commit, layer_list);
+
+map_err:
+ if (ret) {
+ mutex_lock(&mdp5_data->list_lock);
+ for (i--; i >= 0; i--)
+ if (src_data[i])
+ mdss_mdp_overlay_buf_free(mfd, src_data[i]);
+ mutex_unlock(&mdp5_data->list_lock);
+ }
+end:
+ kfree(validate_info_list);
+
+ return ret;
+}
+
+/*
+ * mdss_mdp_layer_atomic_validate() - validate input layers
+ * @mfd: Framebuffer data structure for display
+ * @commit: Commit version-1 structure for display
+ *
+ * This function validates only input layers received from client. It
+ * does perform any validation for mdp_output_layer defined for writeback
+ * display.
+ */
+int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+ struct mdss_overlay_private *mdp5_data;
+
+ if (!mfd || !commit) {
+ pr_err("invalid input params\n");
+ return -EINVAL;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (!mdp5_data || !mdp5_data->ctl) {
+ pr_err("invalid input params\n");
+ return -ENODEV;
+ }
+
+ if (mdss_fb_is_power_off(mfd)) {
+ pr_err("display interface is in off state fb:%d\n",
+ mfd->index);
+ return -EPERM;
+ }
+
+ return __validate_layers(mfd, file, commit);
+}
+
+int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+ int rc, count;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_mdp_wfd *wfd = NULL;
+ struct mdp_output_layer *output_layer = NULL;
+ struct mdss_mdp_wfd_data *data = NULL;
+ struct mdss_fence *fence = NULL;
+ struct msm_sync_pt_data *sync_pt_data = NULL;
+
+ if (!mfd || !commit)
+ return -EINVAL;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (!mdp5_data || !mdp5_data->ctl || !mdp5_data->wfd) {
+ pr_err("invalid wfd state\n");
+ return -ENODEV;
+ }
+
+ if (commit->output_layer) {
+ wfd = mdp5_data->wfd;
+ output_layer = commit->output_layer;
+
+ if (output_layer->buffer.plane_count > MAX_PLANES) {
+ pr_err("Output buffer plane_count exceeds MAX_PLANES limit:%d\n",
+ output_layer->buffer.plane_count);
+ return -EINVAL;
+ }
+
+ data = mdss_mdp_wfd_add_data(wfd, output_layer);
+ if (IS_ERR_OR_NULL(data))
+ return PTR_ERR(data);
+
+ if (output_layer->buffer.fence >= 0) {
+ fence = mdss_get_fd_sync_fence(
+ output_layer->buffer.fence);
+ if (!fence) {
+ pr_err("fail to get output buffer fence\n");
+ rc = -EINVAL;
+ goto fence_get_err;
+ }
+ }
+ } else {
+ wfd = mdp5_data->wfd;
+ if (!wfd->ctl || !wfd->ctl->wb) {
+ pr_err("wfd commit with null out layer and no validate\n");
+ return -EINVAL;
+ }
+ }
+
+ rc = mdss_mdp_layer_pre_commit(mfd, file, commit);
+ if (rc) {
+ pr_err("fail to import input layer buffers. rc=%d\n", rc);
+ goto input_layer_err;
+ }
+
+ if (fence) {
+ sync_pt_data = &mfd->mdp_sync_pt_data;
+ mutex_lock(&sync_pt_data->sync_mutex);
+ count = sync_pt_data->acq_fen_cnt;
+
+ if (count >= MDP_MAX_FENCE_FD) {
+ pr_err("Reached maximum possible value for fence count\n");
+ mutex_unlock(&sync_pt_data->sync_mutex);
+ rc = -EINVAL;
+ goto input_layer_err;
+ }
+
+ sync_pt_data->acq_fen[count] = fence;
+ sync_pt_data->acq_fen_cnt++;
+ mutex_unlock(&sync_pt_data->sync_mutex);
+ }
+ return rc;
+
+input_layer_err:
+ if (fence)
+ mdss_put_sync_fence(fence);
+fence_get_err:
+ if (data)
+ mdss_mdp_wfd_remove_data(wfd, data);
+ return rc;
+}
+
+int mdss_mdp_layer_atomic_validate_wfd(struct msm_fb_data_type *mfd,
+ struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+ int rc = 0;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_mdp_wfd *wfd;
+ struct mdp_output_layer *output_layer;
+
+ if (!mfd || !commit)
+ return -EINVAL;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (!mdp5_data || !mdp5_data->ctl || !mdp5_data->wfd) {
+ pr_err("invalid wfd state\n");
+ return -ENODEV;
+ }
+
+ if (!commit->output_layer) {
+ pr_err("no output layer defined\n");
+ return -EINVAL;
+ }
+
+ wfd = mdp5_data->wfd;
+ output_layer = commit->output_layer;
+
+ rc = mdss_mdp_wfd_validate(wfd, output_layer);
+ if (rc) {
+ pr_err("fail to validate the output layer = %d\n", rc);
+ goto validate_failed;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ rc = mdss_mdp_wfd_setup(wfd, output_layer);
+ if (rc) {
+ pr_err("fail to prepare wfd = %d\n", rc);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ goto validate_failed;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ rc = mdss_mdp_layer_atomic_validate(mfd, file, commit);
+ if (rc) {
+ pr_err("fail to validate the input layers = %d\n", rc);
+ goto validate_failed;
+ }
+
+validate_failed:
+ return rc;
+}
+
+int mdss_mdp_async_position_update(struct msm_fb_data_type *mfd,
+ struct mdp_position_update *update_pos)
+{
+ int i, rc = 0;
+ struct mdss_mdp_pipe *pipe = NULL;
+ struct mdp_async_layer *layer;
+ struct mdss_rect dst, src;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ u32 flush_bits = 0, inputndx = 0;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ for (i = 0; i < update_pos->input_layer_cnt; i++) {
+ layer = &update_pos->input_layers[i];
+ mutex_lock(&mdp5_data->list_lock);
+ __find_pipe_in_list(&mdp5_data->pipes_used, layer->pipe_ndx,
+ &pipe, MDSS_MDP_PIPE_RECT0);
+ mutex_unlock(&mdp5_data->list_lock);
+ if (!pipe) {
+ pr_err("invalid pipe ndx=0x%x for async update\n",
+ layer->pipe_ndx);
+ rc = -ENODEV;
+ layer->error_code = rc;
+ goto done;
+ }
+
+ rc = __async_update_position_check(mfd, pipe, &layer->src,
+ &layer->dst);
+ if (rc) {
+ layer->error_code = rc;
+ goto done;
+ }
+
+ src = (struct mdss_rect) {layer->src.x, layer->src.y,
+ pipe->src.w, pipe->src.h};
+ dst = (struct mdss_rect) {layer->dst.x, layer->dst.y,
+ pipe->src.w, pipe->src.h};
+
+ pr_debug("src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
+ src.x, src.y, src.w, src.h,
+ dst.x, dst.y, dst.w, dst.h);
+
+ mdss_mdp_pipe_position_update(pipe, &src, &dst);
+
+ flush_bits |= mdss_mdp_get_pipe_flush_bits(pipe);
+ inputndx |= layer->pipe_ndx;
+ }
+ mdss_mdp_async_ctl_flush(mfd, flush_bits);
+
+done:
+ MDSS_XLOG(inputndx, update_pos->input_layer_cnt, flush_bits, rc);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return rc;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
new file mode 100644
index 0000000..3144b6c
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -0,0 +1,6899 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/msm_mdp.h>
+#include <linux/memblock.h>
+#include <linux/sort.h>
+#include <linux/kmemleak.h>
+#include <linux/kthread.h>
+#include <asm/div64.h>
+
+#include <soc/qcom/event_timer.h>
+#include <linux/msm-bus.h>
+#include "mdss.h"
+#include "mdss_debug.h"
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_smmu.h"
+#include "mdss_mdp_wfd.h"
+#include "mdss_dsi_clk.h"
+#include "mdss_sync.h"
+
+#define VSYNC_PERIOD 16
+#define BORDERFILL_NDX 0x0BF000BF
+#define CHECK_BOUNDS(offset, size, max_size) \
+ (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+
+#define IS_RIGHT_MIXER_OV(flags, dst_x, left_lm_w) \
+ ((flags & MDSS_MDP_RIGHT_MIXER) || (dst_x >= left_lm_w))
+
+#define BUF_POOL_SIZE 32
+
+#define DFPS_DATA_MAX_HFP 8192
+#define DFPS_DATA_MAX_HBP 8192
+#define DFPS_DATA_MAX_HPW 8192
+#define DFPS_DATA_MAX_FPS 0x7fffffff
+#define DFPS_DATA_MAX_CLK_RATE 250000
+
+static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
+static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
+static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd);
+static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd);
+static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val);
+static int __vsync_set_vsync_handler(struct msm_fb_data_type *mfd);
+static int mdss_mdp_update_panel_info(struct msm_fb_data_type *mfd,
+ int mode, int dest_ctrl);
+static int mdss_mdp_set_cfg(struct msm_fb_data_type *mfd,
+ struct mdp_set_cfg *cfg);
+
+static inline bool is_ov_right_blend(struct mdp_rect *left_blend,
+ struct mdp_rect *right_blend, u32 left_lm_w)
+{
+ return (((left_blend->x + left_blend->w) == right_blend->x) &&
+ ((left_blend->x + left_blend->w) != left_lm_w) &&
+ (left_blend->x != right_blend->x) &&
+ (left_blend->y == right_blend->y) &&
+ (left_blend->h == right_blend->h));
+}
+
+/**
+ * __is_more_decimation_doable() -
+ * @pipe: pointer to pipe data structure
+ *
+ * if per pipe BW exceeds the limit and user
+ * has not requested decimation then return
+ * -E2BIG error back to user else try more
+ * decimation based on following table config.
+ *
+ * ----------------------------------------------------------
+ * error | split mode | src_split | v_deci | action |
+ * ------|------------|-----------|--------|----------------|
+ * | | | 00 | return error |
+ * | | enabled |--------|----------------|
+ * | | | >1 | more decmation |
+ * | yes |-----------|--------|----------------|
+ * | | | 00 | return error |
+ * | | disabled |--------|----------------|
+ * | | | >1 | return error |
+ * E2BIG |------------|-----------|--------|----------------|
+ * | | | 00 | return error |
+ * | | enabled |--------|----------------|
+ * | | | >1 | more decmation |
+ * | no |-----------|--------|----------------|
+ * | | | 00 | return error |
+ * | | disabled |--------|----------------|
+ * | | | >1 | more decmation |
+ * ----------------------------------------------------------
+ */
+static inline bool __is_more_decimation_doable(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
+ struct msm_fb_data_type *mfd = pipe->mixer_left->ctl->mfd;
+
+ if (!mfd->split_mode && !pipe->vert_deci)
+ return false;
+ else if (mfd->split_mode && (!mdata->has_src_split ||
+ (mdata->has_src_split && !pipe->vert_deci)))
+ return false;
+ else
+ return true;
+}
+
+static struct mdss_mdp_pipe *__overlay_find_pipe(
+ struct msm_fb_data_type *mfd, u32 ndx)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_pipe *tmp, *pipe = NULL;
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry(tmp, &mdp5_data->pipes_used, list) {
+ if (tmp->ndx == ndx) {
+ pipe = tmp;
+ break;
+ }
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+
+ return pipe;
+}
+
+static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req)
+{
+ struct mdss_mdp_pipe *pipe;
+
+ pipe = __overlay_find_pipe(mfd, req->id);
+ if (!pipe) {
+ pr_err("invalid pipe ndx=%x\n", req->id);
+ return pipe ? PTR_ERR(pipe) : -ENODEV;
+ }
+
+ *req = pipe->req_data;
+
+ return 0;
+}
+
+static int mdss_mdp_ov_xres_check(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req)
+{
+ u32 xres = 0;
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+ if (IS_RIGHT_MIXER_OV(req->flags, req->dst_rect.x, left_lm_w)) {
+ if (mdata->has_src_split) {
+ xres = left_lm_w;
+
+ if (req->flags & MDSS_MDP_RIGHT_MIXER) {
+ pr_warn("invalid use of RIGHT_MIXER flag.\n");
+ /*
+ * if chip-set is capable of source split then
+ * all layers which are only on right LM should
+ * have their x offset relative to left LM's
+ * left-top or in other words relative to
+ * panel width.
+ * By modifying dst_x below, we are assuming
+ * that client is running in legacy mode
+ * chipset capable of source split.
+ */
+ if (req->dst_rect.x < left_lm_w)
+ req->dst_rect.x += left_lm_w;
+
+ req->flags &= ~MDSS_MDP_RIGHT_MIXER;
+ }
+ } else if (req->dst_rect.x >= left_lm_w) {
+ /*
+ * this is a step towards removing a reliance on
+ * MDSS_MDP_RIGHT_MIXER flags. With the new src split
+ * code, some clients of non-src-split chipsets have
+ * stopped sending MDSS_MDP_RIGHT_MIXER flag and
+ * modified their xres relative to full panel
+ * dimensions. In such cases, we need to deduct left
+ * layer mixer width before we program this HW.
+ */
+ req->dst_rect.x -= left_lm_w;
+ req->flags |= MDSS_MDP_RIGHT_MIXER;
+ }
+
+ if (ctl->mixer_right) {
+ xres += ctl->mixer_right->width;
+ } else {
+ pr_err("ov cannot be placed on right mixer\n");
+ return -EPERM;
+ }
+ } else {
+ if (ctl->mixer_left) {
+ xres = ctl->mixer_left->width;
+ } else {
+ pr_err("ov cannot be placed on left mixer\n");
+ return -EPERM;
+ }
+
+ if (mdata->has_src_split && ctl->mixer_right)
+ xres += ctl->mixer_right->width;
+ }
+
+ if (CHECK_BOUNDS(req->dst_rect.x, req->dst_rect.w, xres)) {
+ pr_err("dst_xres is invalid. dst_x:%d, dst_w:%d, xres:%d\n",
+ req->dst_rect.x, req->dst_rect.w, xres);
+ return -EOVERFLOW;
+ }
+
+ return 0;
+}
+
+int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req,
+ struct mdss_mdp_format_params *fmt)
+{
+ u32 yres;
+ u32 min_src_size, min_dst_size;
+ int content_secure;
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+ yres = mfd->fbi->var.yres;
+
+ content_secure = (req->flags & MDP_SECURE_OVERLAY_SESSION);
+ if (!ctl->is_secure && content_secure &&
+ (mfd->panel.type == WRITEBACK_PANEL)) {
+ pr_debug("return due to security concerns\n");
+ return -EPERM;
+ }
+ if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
+ min_src_size = fmt->is_yuv ? 2 : 1;
+ min_dst_size = 1;
+ } else {
+ min_src_size = fmt->is_yuv ? 10 : 5;
+ min_dst_size = 2;
+ }
+
+ if (req->z_order >= (mdata->max_target_zorder + MDSS_MDP_STAGE_0)) {
+ pr_err("zorder %d out of range\n", req->z_order);
+ return -ERANGE;
+ }
+
+ /*
+ * Cursor overlays are only supported for targets
+ * with dedicated cursors within VP
+ */
+ if ((req->pipe_type == MDSS_MDP_PIPE_TYPE_CURSOR) &&
+ ((req->z_order != HW_CURSOR_STAGE(mdata)) ||
+ !mdata->ncursor_pipes ||
+ (req->src_rect.w > mdata->max_cursor_size))) {
+ pr_err("Incorrect cursor overlay cursor_pipes=%d zorder=%d\n",
+ mdata->ncursor_pipes, req->z_order);
+ return -EINVAL;
+ }
+
+ if (req->src.width > MAX_IMG_WIDTH ||
+ req->src.height > MAX_IMG_HEIGHT ||
+ req->src_rect.w < min_src_size || req->src_rect.h < min_src_size ||
+ CHECK_BOUNDS(req->src_rect.x, req->src_rect.w, req->src.width) ||
+ CHECK_BOUNDS(req->src_rect.y, req->src_rect.h, req->src.height)) {
+ pr_err("invalid source image img wh=%dx%d rect=%d,%d,%d,%d\n",
+ req->src.width, req->src.height,
+ req->src_rect.x, req->src_rect.y,
+ req->src_rect.w, req->src_rect.h);
+ return -EOVERFLOW;
+ }
+
+ if (req->dst_rect.w < min_dst_size || req->dst_rect.h < min_dst_size) {
+ pr_err("invalid destination resolution (%dx%d)",
+ req->dst_rect.w, req->dst_rect.h);
+ return -EOVERFLOW;
+ }
+
+ if (req->horz_deci || req->vert_deci) {
+ if (!mdata->has_decimation) {
+ pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
+ return -EINVAL;
+ } else if ((req->horz_deci > MAX_DECIMATION) ||
+ (req->vert_deci > MAX_DECIMATION)) {
+ pr_err("Invalid decimation factors horz=%d vert=%d\n",
+ req->horz_deci, req->vert_deci);
+ return -EINVAL;
+ } else if (req->flags & MDP_BWC_EN) {
+ pr_err("Decimation can't be enabled with BWC\n");
+ return -EINVAL;
+ } else if (fmt->fetch_mode != MDSS_MDP_FETCH_LINEAR) {
+ pr_err("Decimation can't be enabled with MacroTile format\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!(req->flags & MDSS_MDP_ROT_ONLY)) {
+ u32 src_w, src_h, dst_w, dst_h;
+
+ if (CHECK_BOUNDS(req->dst_rect.y, req->dst_rect.h, yres)) {
+ pr_err("invalid vertical destination: y=%d, h=%d\n",
+ req->dst_rect.y, req->dst_rect.h);
+ return -EOVERFLOW;
+ }
+
+ if (req->flags & MDP_ROT_90) {
+ dst_h = req->dst_rect.w;
+ dst_w = req->dst_rect.h;
+ } else {
+ dst_w = req->dst_rect.w;
+ dst_h = req->dst_rect.h;
+ }
+
+ src_w = DECIMATED_DIMENSION(req->src_rect.w, req->horz_deci);
+ src_h = DECIMATED_DIMENSION(req->src_rect.h, req->vert_deci);
+
+ if (src_w > mdata->max_pipe_width) {
+ pr_err("invalid source width=%d HDec=%d\n",
+ req->src_rect.w, req->horz_deci);
+ return -EINVAL;
+ }
+
+ if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
+ pr_err("too much upscaling Width %d->%d\n",
+ req->src_rect.w, req->dst_rect.w);
+ return -EINVAL;
+ }
+
+ if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
+ pr_err("too much upscaling. Height %d->%d\n",
+ req->src_rect.h, req->dst_rect.h);
+ return -EINVAL;
+ }
+
+ if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
+ pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
+ src_w, req->dst_rect.w, req->horz_deci);
+ return -EINVAL;
+ }
+
+ if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
+ pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
+ src_h, req->dst_rect.h, req->vert_deci);
+ return -EINVAL;
+ }
+
+ if (req->flags & MDP_BWC_EN) {
+ if ((req->src.width != req->src_rect.w) ||
+ (req->src.height != req->src_rect.h)) {
+ pr_err("BWC: mismatch of src img=%dx%d rect=%dx%d\n",
+ req->src.width, req->src.height,
+ req->src_rect.w, req->src_rect.h);
+ return -EINVAL;
+ }
+
+ if ((req->flags & MDP_DECIMATION_EN) ||
+ req->vert_deci || req->horz_deci) {
+ pr_err("Can't enable BWC and decimation\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((req->flags & MDP_DEINTERLACE) &&
+ !req->scale.enable_pxl_ext) {
+ if (req->flags & MDP_SOURCE_ROTATED_90) {
+ if ((req->src_rect.w % 4) != 0) {
+ pr_err("interlaced rect not h/4\n");
+ return -EINVAL;
+ }
+ } else if ((req->src_rect.h % 4) != 0) {
+ pr_err("interlaced rect not h/4\n");
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (req->flags & MDP_DEINTERLACE) {
+ if ((req->src_rect.h % 4) != 0) {
+ pr_err("interlaced rect h not multiple of 4\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (fmt->is_yuv) {
+ if ((req->src_rect.x & 0x1) || (req->src_rect.y & 0x1) ||
+ (req->src_rect.w & 0x1) || (req->src_rect.h & 0x1)) {
+ pr_err("invalid odd src resolution or coordinates\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
+ u32 flags)
+{
+ struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
+ struct mdss_mdp_perf_params perf;
+ int rc;
+
+ memset(&perf, 0, sizeof(perf));
+
+ flags |= PERF_CALC_PIPE_APPLY_CLK_FUDGE |
+ PERF_CALC_PIPE_CALC_SMP_SIZE;
+
+ for (;;) {
+ rc = mdss_mdp_perf_calc_pipe(pipe, &perf, NULL,
+ flags);
+
+ if (!rc && (perf.mdp_clk_rate <= mdata->max_mdp_clk_rate)) {
+ rc = mdss_mdp_perf_bw_check_pipe(&perf, pipe);
+ if (!rc) {
+ break;
+ } else if (rc == -E2BIG &&
+ !__is_more_decimation_doable(pipe)) {
+ pr_debug("pipe%d exceeded per pipe BW\n",
+ pipe->num);
+ return rc;
+ }
+ }
+
+ /*
+ * if decimation is available try to reduce minimum clock rate
+ * requirement by applying vertical decimation and reduce
+ * mdp clock requirement
+ */
+ if (mdata->has_decimation && (pipe->vert_deci < MAX_DECIMATION)
+ && !pipe->bwc_mode && !pipe->scaler.enable &&
+ mdss_mdp_is_linear_format(pipe->src_fmt))
+ pipe->vert_deci++;
+ else
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static int __mdss_mdp_validate_pxl_extn(struct mdss_mdp_pipe *pipe)
+{
+ int plane;
+
+ for (plane = 0; plane < MAX_PLANES; plane++) {
+ u32 hor_req_pixels, hor_fetch_pixels;
+ u32 hor_ov_fetch, vert_ov_fetch;
+ u32 vert_req_pixels, vert_fetch_pixels;
+ u32 src_w = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
+ u32 src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+
+ /*
+ * plane 1 and 2 are for chroma and are same. While configuring
+ * HW, programming only one of the chroma components is
+ * sufficient.
+ */
+ if (plane == 2)
+ continue;
+
+ /*
+ * For chroma plane, width is half for the following sub sampled
+ * formats. Except in case of decimation, where hardware avoids
+ * 1 line of decimation instead of downsampling.
+ */
+ if (plane == 1 && !pipe->horz_deci &&
+ ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1))) {
+ src_w >>= 1;
+ }
+
+ if (plane == 1 && !pipe->vert_deci &&
+ ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2)))
+ src_h >>= 1;
+
+ hor_req_pixels = pipe->scaler.roi_w[plane] +
+ pipe->scaler.num_ext_pxls_left[plane] +
+ pipe->scaler.num_ext_pxls_right[plane];
+
+ hor_fetch_pixels = src_w +
+ (pipe->scaler.left_ftch[plane] >> pipe->horz_deci) +
+ pipe->scaler.left_rpt[plane] +
+ (pipe->scaler.right_ftch[plane] >> pipe->horz_deci) +
+ pipe->scaler.right_rpt[plane];
+
+ hor_ov_fetch = src_w +
+ (pipe->scaler.left_ftch[plane] >> pipe->horz_deci)+
+ (pipe->scaler.right_ftch[plane] >> pipe->horz_deci);
+
+ vert_req_pixels = pipe->scaler.num_ext_pxls_top[plane] +
+ pipe->scaler.num_ext_pxls_btm[plane];
+
+ vert_fetch_pixels =
+ (pipe->scaler.top_ftch[plane] >> pipe->vert_deci) +
+ pipe->scaler.top_rpt[plane] +
+ (pipe->scaler.btm_ftch[plane] >> pipe->vert_deci)+
+ pipe->scaler.btm_rpt[plane];
+
+ vert_ov_fetch = src_h +
+ (pipe->scaler.top_ftch[plane] >> pipe->vert_deci)+
+ (pipe->scaler.btm_ftch[plane] >> pipe->vert_deci);
+
+ if ((hor_req_pixels != hor_fetch_pixels) ||
+ (hor_ov_fetch > pipe->img_width) ||
+ (vert_req_pixels != vert_fetch_pixels) ||
+ (vert_ov_fetch > pipe->img_height)) {
+ pr_err("err: plane=%d h_req:%d h_fetch:%d v_req:%d v_fetch:%d\n",
+ plane,
+ hor_req_pixels, hor_fetch_pixels,
+ vert_req_pixels, vert_fetch_pixels);
+ pr_err("roi_w[%d]=%d, src_img:[%d, %d]\n",
+ plane, pipe->scaler.roi_w[plane],
+ pipe->img_width, pipe->img_height);
+ pipe->scaler.enable = 0;
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe)
+{
+ u32 src;
+ int rc = 0;
+ struct mdss_data_type *mdata;
+
+ mdata = mdss_mdp_get_mdata();
+ if (pipe->scaler.enable) {
+ if (!test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
+ rc = __mdss_mdp_validate_pxl_extn(pipe);
+ return rc;
+ }
+
+ memset(&pipe->scaler, 0, sizeof(struct mdp_scale_data_v2));
+ src = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
+ rc = mdss_mdp_calc_phase_step(src, pipe->dst.w,
+ &pipe->scaler.phase_step_x[0]);
+ if (rc == -EOVERFLOW) {
+ /* overflow on horizontal direction is acceptable */
+ rc = 0;
+ } else if (rc) {
+ pr_err("Horizontal scaling calculation failed=%d! %d->%d\n",
+ rc, src, pipe->dst.w);
+ return rc;
+ }
+
+ src = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+ rc = mdss_mdp_calc_phase_step(src, pipe->dst.h,
+ &pipe->scaler.phase_step_y[0]);
+
+ if ((rc == -EOVERFLOW) && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)) {
+ /* overflow on Qseed2 scaler is acceptable */
+ rc = 0;
+ } else if (rc == -EOVERFLOW) {
+ /* overflow expected and should fallback to GPU */
+ rc = -ECANCELED;
+ } else if (rc) {
+ pr_err("Vertical scaling calculation failed=%d! %d->%d\n",
+ rc, src, pipe->dst.h);
+ }
+
+ if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
+ mdss_mdp_pipe_calc_qseed3_cfg(pipe);
+ else
+ mdss_mdp_pipe_calc_pixel_extn(pipe);
+
+ return rc;
+}
+
+inline void mdss_mdp_overlay_set_chroma_sample(
+ struct mdss_mdp_pipe *pipe)
+{
+ pipe->chroma_sample_v = pipe->chroma_sample_h = 0;
+
+ switch (pipe->src_fmt->chroma_sample) {
+ case MDSS_MDP_CHROMA_H1V2:
+ pipe->chroma_sample_v = 1;
+ break;
+ case MDSS_MDP_CHROMA_H2V1:
+ pipe->chroma_sample_h = 1;
+ break;
+ case MDSS_MDP_CHROMA_420:
+ pipe->chroma_sample_v = 1;
+ pipe->chroma_sample_h = 1;
+ break;
+ }
+ if (pipe->horz_deci)
+ pipe->chroma_sample_h = 0;
+ if (pipe->vert_deci)
+ pipe->chroma_sample_v = 0;
+}
+
+int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req, struct mdss_mdp_pipe **ppipe,
+ struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer)
+{
+ struct mdss_mdp_format_params *fmt;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_mdp_mixer *mixer = NULL;
+ u32 pipe_type, mixer_mux;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ int ret;
+ u32 bwc_enabled;
+ u32 rot90;
+ bool is_vig_needed = false;
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ u32 flags = 0;
+
+ if (mdp5_data->ctl == NULL)
+ return -ENODEV;
+
+ if (req->flags & MDP_ROT_90) {
+ pr_err("unsupported inline rotation\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((req->dst_rect.w > mdata->max_mixer_width) ||
+ (req->dst_rect.h > MAX_DST_H)) {
+ pr_err("exceeded max mixer supported resolution %dx%d\n",
+ req->dst_rect.w, req->dst_rect.h);
+ return -EOVERFLOW;
+ }
+
+ if (IS_RIGHT_MIXER_OV(req->flags, req->dst_rect.x, left_lm_w))
+ mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
+ else
+ mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
+
+ pr_debug("ctl=%u req id=%x mux=%d z_order=%d flags=0x%x dst_x:%d\n",
+ mdp5_data->ctl->num, req->id, mixer_mux, req->z_order,
+ req->flags, req->dst_rect.x);
+
+ fmt = mdss_mdp_get_format_params(req->src.format);
+ if (!fmt) {
+ pr_err("invalid pipe format %d\n", req->src.format);
+ return -EINVAL;
+ }
+
+ bwc_enabled = req->flags & MDP_BWC_EN;
+ rot90 = req->flags & MDP_SOURCE_ROTATED_90;
+
+ /*
+ * Always set yuv rotator output to pseudo planar.
+ */
+ if (bwc_enabled || rot90) {
+ req->src.format =
+ mdss_mdp_get_rotator_dst_format(req->src.format, rot90,
+ bwc_enabled);
+ fmt = mdss_mdp_get_format_params(req->src.format);
+ if (!fmt) {
+ pr_err("invalid pipe format %d\n", req->src.format);
+ return -EINVAL;
+ }
+ }
+
+ ret = mdss_mdp_ov_xres_check(mfd, req);
+ if (ret)
+ return ret;
+
+ ret = mdss_mdp_overlay_req_check(mfd, req, fmt);
+ if (ret)
+ return ret;
+
+ mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
+ if (!mixer) {
+ pr_err("unable to get mixer\n");
+ return -ENODEV;
+ }
+
+ if ((mdata->has_non_scalar_rgb) &&
+ ((req->src_rect.w != req->dst_rect.w) ||
+ (req->src_rect.h != req->dst_rect.h)))
+ is_vig_needed = true;
+
+ if (req->id == MSMFB_NEW_REQUEST) {
+ switch (req->pipe_type) {
+ case PIPE_TYPE_VIG:
+ pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
+ break;
+ case PIPE_TYPE_RGB:
+ pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+ break;
+ case PIPE_TYPE_DMA:
+ pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
+ break;
+ case PIPE_TYPE_CURSOR:
+ pipe_type = MDSS_MDP_PIPE_TYPE_CURSOR;
+ break;
+ case PIPE_TYPE_AUTO:
+ default:
+ if (req->flags & MDP_OV_PIPE_FORCE_DMA)
+ pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
+ else if (fmt->is_yuv ||
+ (req->flags & MDP_OV_PIPE_SHARE) ||
+ is_vig_needed)
+ pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
+ else
+ pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+ break;
+ }
+
+ pipe = mdss_mdp_pipe_alloc(mixer, pipe_type, left_blend_pipe);
+
+ /* RGB pipes can be used instead of DMA */
+ if (IS_ERR_OR_NULL(pipe) &&
+ (req->pipe_type == PIPE_TYPE_AUTO) &&
+ (pipe_type == MDSS_MDP_PIPE_TYPE_DMA)) {
+ pr_debug("giving RGB pipe for fb%d. flags:0x%x\n",
+ mfd->index, req->flags);
+ pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+ pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
+ left_blend_pipe);
+ }
+
+ /* VIG pipes can also support RGB format */
+ if (IS_ERR_OR_NULL(pipe) &&
+ (req->pipe_type == PIPE_TYPE_AUTO) &&
+ (pipe_type == MDSS_MDP_PIPE_TYPE_RGB)) {
+ pr_debug("giving ViG pipe for fb%d. flags:0x%x\n",
+ mfd->index, req->flags);
+ pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
+ pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
+ left_blend_pipe);
+ }
+
+ if (IS_ERR(pipe)) {
+ return PTR_ERR(pipe);
+ } else if (!pipe) {
+ pr_err("error allocating pipe. flags=0x%x req->pipe_type=%d pipe_type=%d\n",
+ req->flags, req->pipe_type, pipe_type);
+ return -ENODEV;
+ }
+
+ ret = mdss_mdp_pipe_map(pipe);
+ if (ret) {
+ pr_err("unable to map pipe=%d\n", pipe->num);
+ return ret;
+ }
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_add(&pipe->list, &mdp5_data->pipes_used);
+ mutex_unlock(&mdp5_data->list_lock);
+ pipe->mixer_left = mixer;
+ pipe->mfd = mfd;
+ pipe->play_cnt = 0;
+ } else {
+ pipe = __overlay_find_pipe(mfd, req->id);
+ if (!pipe) {
+ pr_err("invalid pipe ndx=%x\n", req->id);
+ return -ENODEV;
+ }
+
+ ret = mdss_mdp_pipe_map(pipe);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("Unable to map used pipe%d ndx=%x\n",
+ pipe->num, pipe->ndx);
+ return ret;
+ }
+
+ if (is_vig_needed && (pipe->type != MDSS_MDP_PIPE_TYPE_VIG)) {
+ pr_err("pipe is non-scalar ndx=%x\n", req->id);
+ ret = -EINVAL;
+ goto exit_fail;
+ }
+
+ if ((pipe->mixer_left != mixer) &&
+ (pipe->type != MDSS_MDP_PIPE_TYPE_CURSOR)) {
+ if (!mixer->ctl || (mixer->ctl->mfd != mfd)) {
+ pr_err("Can't switch mixer %d->%d pnum %d!\n",
+ pipe->mixer_left->num, mixer->num,
+ pipe->num);
+ ret = -EINVAL;
+ goto exit_fail;
+ }
+ pr_debug("switching pipe%d mixer %d->%d stage%d\n",
+ pipe->num,
+ pipe->mixer_left ? pipe->mixer_left->num : -1,
+ mixer->num, req->z_order);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+ pipe->mixer_left = mixer;
+ }
+ }
+
+ if (left_blend_pipe) {
+ if (pipe->priority <= left_blend_pipe->priority) {
+ pr_err("priority limitation. left:%d right%d\n",
+ left_blend_pipe->priority, pipe->priority);
+ ret = -EBADSLT;
+ goto exit_fail;
+ } else {
+ pr_debug("pipe%d is a right_pipe\n", pipe->num);
+ pipe->is_right_blend = true;
+ }
+ } else if (pipe->is_right_blend) {
+ /*
+ * pipe used to be right blend need to update mixer
+ * configuration to remove it as a right blend
+ */
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+ pipe->is_right_blend = false;
+ }
+
+ if (mfd->panel_orientation)
+ req->flags ^= mfd->panel_orientation;
+
+ req->priority = pipe->priority;
+ if (!pipe->dirty && !memcmp(req, &pipe->req_data, sizeof(*req))) {
+ pr_debug("skipping pipe_reconfiguration\n");
+ goto skip_reconfigure;
+ }
+
+ pipe->flags = req->flags;
+ if (bwc_enabled && !mdp5_data->mdata->has_bwc) {
+ pr_err("BWC is not supported in MDP version %x\n",
+ mdp5_data->mdata->mdp_rev);
+ pipe->bwc_mode = 0;
+ } else {
+ pipe->bwc_mode = pipe->mixer_left->rotator_mode ?
+ 0 : (bwc_enabled ? 1 : 0);
+ }
+ pipe->img_width = req->src.width & 0x3fff;
+ pipe->img_height = req->src.height & 0x3fff;
+ pipe->src.x = req->src_rect.x;
+ pipe->src.y = req->src_rect.y;
+ pipe->src.w = req->src_rect.w;
+ pipe->src.h = req->src_rect.h;
+ pipe->dst.x = req->dst_rect.x;
+ pipe->dst.y = req->dst_rect.y;
+ pipe->dst.w = req->dst_rect.w;
+ pipe->dst.h = req->dst_rect.h;
+
+ if (mixer->ctl) {
+ pipe->dst.x += mixer->ctl->border_x_off;
+ pipe->dst.y += mixer->ctl->border_y_off;
+ }
+
+ if (mfd->panel_orientation & MDP_FLIP_LR)
+ pipe->dst.x = pipe->mixer_left->width
+ - pipe->dst.x - pipe->dst.w;
+ if (mfd->panel_orientation & MDP_FLIP_UD)
+ pipe->dst.y = pipe->mixer_left->height
+ - pipe->dst.y - pipe->dst.h;
+
+ pipe->horz_deci = req->horz_deci;
+ pipe->vert_deci = req->vert_deci;
+
+ /*
+ * check if overlay span across two mixers and if source split is
+ * available. If yes, enable src_split_req flag so that during mixer
+ * staging, same pipe will be stagged on both layer mixers.
+ */
+ if (mdata->has_src_split) {
+ if ((pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR) &&
+ is_split_lm(mfd)) {
+ pipe->src_split_req = true;
+ } else if ((mixer_mux == MDSS_MDP_MIXER_MUX_LEFT) &&
+ ((req->dst_rect.x + req->dst_rect.w) > mixer->width)) {
+ if (req->dst_rect.x >= mixer->width) {
+ pr_err("%pS: err dst_x can't lie in right half",
+ __builtin_return_address(0));
+ pr_cont(" flags:0x%x dst x:%d w:%d lm_w:%d\n",
+ req->flags, req->dst_rect.x,
+ req->dst_rect.w, mixer->width);
+ ret = -EINVAL;
+ goto exit_fail;
+ } else {
+ pipe->src_split_req = true;
+ }
+ } else {
+ if (pipe->src_split_req) {
+ mdss_mdp_mixer_pipe_unstage(pipe,
+ pipe->mixer_right);
+ pipe->mixer_right = NULL;
+ }
+ pipe->src_split_req = false;
+ }
+ }
+
+ memcpy(&pipe->scaler, &req->scale, sizeof(struct mdp_scale_data));
+ pipe->src_fmt = fmt;
+ mdss_mdp_overlay_set_chroma_sample(pipe);
+
+ pipe->mixer_stage = req->z_order;
+ pipe->is_fg = req->is_fg;
+ pipe->alpha = req->alpha;
+ pipe->transp = req->transp_mask;
+ pipe->blend_op = req->blend_op;
+ if (pipe->blend_op == BLEND_OP_NOT_DEFINED)
+ pipe->blend_op = fmt->alpha_enable ?
+ BLEND_OP_PREMULTIPLIED :
+ BLEND_OP_OPAQUE;
+
+ if (!fmt->alpha_enable && (pipe->blend_op != BLEND_OP_OPAQUE))
+ pr_debug("Unintended blend_op %d on layer with no alpha plane\n",
+ pipe->blend_op);
+
+ if (fmt->is_yuv && !(pipe->flags & MDP_SOURCE_ROTATED_90) &&
+ !pipe->scaler.enable) {
+ pipe->overfetch_disable = OVERFETCH_DISABLE_BOTTOM;
+
+ if (!(pipe->flags & MDSS_MDP_DUAL_PIPE) ||
+ IS_RIGHT_MIXER_OV(pipe->flags, pipe->dst.x, left_lm_w))
+ pipe->overfetch_disable |= OVERFETCH_DISABLE_RIGHT;
+ pr_debug("overfetch flags=%x\n", pipe->overfetch_disable);
+ } else {
+ pipe->overfetch_disable = 0;
+ }
+ pipe->bg_color = req->bg_color;
+
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
+ goto cursor_done;
+
+ mdss_mdp_pipe_pp_clear(pipe);
+ if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
+ memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
+ sizeof(struct mdp_overlay_pp_params));
+ ret = mdss_mdp_pp_sspp_config(pipe);
+ if (ret) {
+ pr_err("failed to configure pp params ret %d\n", ret);
+ goto exit_fail;
+ }
+ }
+
+ /*
+ * Populate Color Space.
+ */
+ if (pipe->src_fmt->is_yuv && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG))
+ pipe->csc_coeff_set = req->color_space;
+ /*
+ * When scaling is enabled src crop and image
+ * width and height is modified by user
+ */
+ if ((pipe->flags & MDP_DEINTERLACE) && !pipe->scaler.enable) {
+ if (pipe->flags & MDP_SOURCE_ROTATED_90) {
+ pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
+ pipe->src.x &= ~1;
+ pipe->src.w /= 2;
+ pipe->img_width /= 2;
+ } else {
+ pipe->src.h /= 2;
+ pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
+ pipe->src.y &= ~1;
+ }
+ }
+
+ if (is_single_layer)
+ flags |= PERF_CALC_PIPE_SINGLE_LAYER;
+
+ ret = mdp_pipe_tune_perf(pipe, flags);
+ if (ret) {
+ pr_debug("unable to satisfy performance. ret=%d\n", ret);
+ goto exit_fail;
+ }
+
+ ret = mdss_mdp_overlay_setup_scaling(pipe);
+ if (ret)
+ goto exit_fail;
+
+ if ((mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
+ (mdp5_data->mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
+ mdss_mdp_smp_release(pipe);
+
+ ret = mdss_mdp_smp_reserve(pipe);
+ if (ret) {
+ pr_debug("mdss_mdp_smp_reserve failed. pnum:%d ret=%d\n",
+ pipe->num, ret);
+ goto exit_fail;
+ }
+
+
+ req->id = pipe->ndx;
+
+cursor_done:
+ req->vert_deci = pipe->vert_deci;
+
+ pipe->req_data = *req;
+ pipe->dirty = false;
+
+ pipe->params_changed++;
+skip_reconfigure:
+ *ppipe = pipe;
+
+ mdss_mdp_pipe_unmap(pipe);
+
+ return ret;
+exit_fail:
+ mdss_mdp_pipe_unmap(pipe);
+
+ mutex_lock(&mdp5_data->list_lock);
+ if (pipe->play_cnt == 0) {
+ pr_debug("failed for pipe %d\n", pipe->num);
+ if (!list_empty(&pipe->list))
+ list_del_init(&pipe->list);
+ mdss_mdp_pipe_destroy(pipe);
+ }
+
+ /* invalidate any overlays in this framebuffer after failure */
+ list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+ pr_debug("freeing allocations for pipe %d\n", pipe->num);
+ mdss_mdp_smp_unreserve(pipe);
+ pipe->params_changed = 0;
+ pipe->dirty = true;
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+ return ret;
+}
+
+static int mdss_mdp_overlay_set(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret;
+
+ ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+ if (ret)
+ return ret;
+
+ if (mdss_fb_is_power_off(mfd)) {
+ mutex_unlock(&mdp5_data->ov_lock);
+ return -EPERM;
+ }
+
+ if (req->src.format == MDP_RGB_BORDERFILL) {
+ req->id = BORDERFILL_NDX;
+ } else {
+ struct mdss_mdp_pipe *pipe;
+
+ /* userspace zorder start with stage 0 */
+ req->z_order += MDSS_MDP_STAGE_0;
+
+ ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, false);
+
+ req->z_order -= MDSS_MDP_STAGE_0;
+ }
+
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ return ret;
+}
+
+/*
+ * it's caller responsibility to acquire mdp5_data->list_lock while calling
+ * this function
+ */
+struct mdss_mdp_data *mdss_mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_data *buf;
+ int i;
+
+ if (list_empty(&mdp5_data->bufs_pool)) {
+ pr_debug("allocating %u bufs for fb%d\n",
+ BUF_POOL_SIZE, mfd->index);
+
+ buf = kcalloc(BUF_POOL_SIZE, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ list_add(&buf->chunk_list, &mdp5_data->bufs_chunks);
+ kmemleak_not_leak(buf);
+
+ for (i = 0; i < BUF_POOL_SIZE; i++) {
+ buf->state = MDP_BUF_STATE_UNUSED;
+ list_add(&buf[i].buf_list, &mdp5_data->bufs_pool);
+ }
+ }
+
+ buf = list_first_entry(&mdp5_data->bufs_pool,
+ struct mdss_mdp_data, buf_list);
+ WARN_ON(buf->state != MDP_BUF_STATE_UNUSED);
+ buf->state = MDP_BUF_STATE_READY;
+ buf->last_alloc = local_clock();
+ buf->last_pipe = pipe;
+
+ list_move_tail(&buf->buf_list, &mdp5_data->bufs_used);
+ list_add_tail(&buf->pipe_list, &pipe->buf_queue);
+
+ pr_debug("buffer alloc: %pK\n", buf);
+
+ return buf;
+}
+
+static
+struct mdss_mdp_data *__mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_data *buf;
+
+ mutex_lock(&mdp5_data->list_lock);
+ buf = mdss_mdp_overlay_buf_alloc(mfd, pipe);
+ mutex_unlock(&mdp5_data->list_lock);
+
+ return buf;
+}
+
+static void mdss_mdp_overlay_buf_deinit(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_data *buf, *t;
+
+ pr_debug("performing cleanup of buffers pool on fb%d\n", mfd->index);
+
+ WARN_ON(!list_empty(&mdp5_data->bufs_used));
+
+ list_for_each_entry_safe(buf, t, &mdp5_data->bufs_pool, buf_list)
+ list_del(&buf->buf_list);
+
+ list_for_each_entry_safe(buf, t, &mdp5_data->bufs_chunks, chunk_list) {
+ list_del(&buf->chunk_list);
+ kfree(buf);
+ }
+}
+
+/*
+ * it's caller responsibility to acquire mdp5_data->list_lock while calling
+ * this function
+ */
+void mdss_mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_data *buf)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (!list_empty(&buf->pipe_list))
+ list_del_init(&buf->pipe_list);
+
+ mdss_mdp_data_free(buf, false, DMA_TO_DEVICE);
+
+ buf->last_freed = local_clock();
+ buf->state = MDP_BUF_STATE_UNUSED;
+
+ pr_debug("buffer freed: %pK\n", buf);
+
+ list_move_tail(&buf->buf_list, &mdp5_data->bufs_pool);
+}
+
+static void __mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_data *buf)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ mutex_lock(&mdp5_data->list_lock);
+ mdss_mdp_overlay_buf_free(mfd, buf);
+ mutex_unlock(&mdp5_data->list_lock);
+}
+
+static inline void __pipe_buf_mark_cleanup(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_data *buf)
+{
+ /* buffer still in bufs_used, marking it as cleanup will clean it up */
+ buf->state = MDP_BUF_STATE_CLEANUP;
+ list_del_init(&buf->pipe_list);
+}
+
+/**
+ * __mdss_mdp_overlay_free_list_purge() - clear free list of buffers
+ * @mfd: Msm frame buffer data structure for the associated fb
+ *
+ * Frees memory and clears current list of buffers which are pending free
+ */
+static void __mdss_mdp_overlay_free_list_purge(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_data *buf, *t;
+
+ pr_debug("purging fb%d free list\n", mfd->index);
+
+ list_for_each_entry_safe(buf, t, &mdp5_data->bufs_freelist, buf_list)
+ mdss_mdp_overlay_buf_free(mfd, buf);
+}
+
+static void __overlay_pipe_cleanup(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_data *buf, *tmpbuf;
+
+ list_for_each_entry_safe(buf, tmpbuf, &pipe->buf_queue, pipe_list) {
+ __pipe_buf_mark_cleanup(mfd, buf);
+ list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
+
+ /*
+ * in case of secure UI, the buffer needs to be released as
+ * soon as session is closed.
+ */
+ if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
+ mdss_mdp_overlay_buf_free(mfd, buf);
+ }
+
+ mdss_mdp_pipe_destroy(pipe);
+}
+
+/**
+ * mdss_mdp_overlay_cleanup() - handles cleanup after frame commit
+ * @mfd: Msm frame buffer data structure for the associated fb
+ * @destroy_pipes: list of pipes that should be destroyed as part of cleanup
+ *
+ * Goes through destroy_pipes list and ensures they are ready to be destroyed
+ * and cleaned up. Also cleanup of any pipe buffers after flip.
+ */
+static void mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd,
+ struct list_head *destroy_pipes)
+{
+ struct mdss_mdp_pipe *pipe, *tmp;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ bool recovery_mode = false;
+ bool skip_fetch_halt, pair_found;
+ struct mdss_mdp_data *buf, *tmpbuf;
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry(pipe, destroy_pipes, list) {
+ pair_found = false;
+ skip_fetch_halt = false;
+ tmp = pipe;
+
+ /*
+ * Find if second rect is in the destroy list from the current
+ * position. So if both rects are part of the destroy list then
+ * fetch halt will be skipped for the 1st rect.
+ */
+ list_for_each_entry_from(tmp, destroy_pipes, list) {
+ if (tmp->num == pipe->num) {
+ pair_found = true;
+ break;
+ }
+ }
+
+ /* skip fetch halt if pipe's other rect is still in use */
+ if (!pair_found) {
+ tmp = (struct mdss_mdp_pipe *)pipe->multirect.next;
+ if (tmp)
+ skip_fetch_halt =
+ atomic_read(&tmp->kref.refcount);
+ }
+
+ /* make sure pipe fetch has been halted before freeing buffer */
+ if (!skip_fetch_halt && mdss_mdp_pipe_fetch_halt(pipe, false)) {
+ /*
+ * if pipe is not able to halt. Enter recovery mode,
+ * by un-staging any pipes that are attached to mixer
+ * so that any freed pipes that are not able to halt
+ * can be staged in solid fill mode and be reset
+ * with next vsync
+ */
+ if (!recovery_mode) {
+ recovery_mode = true;
+ mdss_mdp_mixer_unstage_all(ctl->mixer_left);
+ mdss_mdp_mixer_unstage_all(ctl->mixer_right);
+ }
+ pipe->params_changed++;
+ pipe->unhalted = true;
+ mdss_mdp_pipe_queue_data(pipe, NULL);
+ }
+ }
+
+ if (recovery_mode) {
+ pr_warn("performing recovery sequence for fb%d\n", mfd->index);
+ __overlay_kickoff_requeue(mfd);
+ }
+
+ __mdss_mdp_overlay_free_list_purge(mfd);
+
+ list_for_each_entry_safe(buf, tmpbuf, &mdp5_data->bufs_used, buf_list) {
+ if (buf->state == MDP_BUF_STATE_CLEANUP)
+ list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
+ }
+
+ list_for_each_entry_safe(pipe, tmp, destroy_pipes, list) {
+ list_del_init(&pipe->list);
+ if (recovery_mode) {
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+ pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+ }
+ __overlay_pipe_cleanup(mfd, pipe);
+
+ if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+ /*
+ * track only RECT0, since at any given point there
+ * can only be RECT0 only or RECT0 + RECT1
+ */
+ ctl->mixer_left->next_pipe_map &= ~pipe->ndx;
+ if (ctl->mixer_right)
+ ctl->mixer_right->next_pipe_map &= ~pipe->ndx;
+ }
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+}
+
+void mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
+ u32 type)
+{
+ u32 i, npipes;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+
+ switch (type) {
+ case MDSS_MDP_PIPE_TYPE_VIG:
+ pipe = mdata->vig_pipes;
+ npipes = mdata->nvig_pipes;
+ break;
+ case MDSS_MDP_PIPE_TYPE_RGB:
+ pipe = mdata->rgb_pipes;
+ npipes = mdata->nrgb_pipes;
+ break;
+ case MDSS_MDP_PIPE_TYPE_DMA:
+ pipe = mdata->dma_pipes;
+ npipes = mdata->ndma_pipes;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < npipes; i++) {
+ /* only check for first rect and ignore additional */
+ if (pipe->is_handed_off) {
+ pr_debug("Unmapping handed off pipe %d\n", pipe->num);
+ list_move(&pipe->list, &mdp5_data->pipes_cleanup);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+ pipe->is_handed_off = false;
+ }
+ pipe += pipe->multirect.max_rects;
+ }
+}
+
+/**
+ * mdss_mdp_overlay_start() - Programs the MDP control data path to hardware
+ * @mfd: Msm frame buffer structure associated with fb device.
+ *
+ * Program the MDP hardware with the control settings for the framebuffer
+ * device. In addition to this, this function also handles the transition
+ * from the the splash screen to the android boot animation when the
+ * continuous splash screen feature is enabled.
+ */
+int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
+{
+ int rc;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+
+ if (mdss_mdp_ctl_is_power_on(ctl)) {
+ if (!mdp5_data->mdata->batfet)
+ mdss_mdp_batfet_ctrl(mdp5_data->mdata, true);
+ mdss_mdp_release_splash_pipe(mfd);
+ return 0;
+ } else if (mfd->panel_info->cont_splash_enabled) {
+ if (mdp5_data->allow_kickoff) {
+ mdp5_data->allow_kickoff = false;
+ } else {
+ mutex_lock(&mdp5_data->list_lock);
+ rc = list_empty(&mdp5_data->pipes_used);
+ mutex_unlock(&mdp5_data->list_lock);
+ if (rc) {
+ pr_debug("empty kickoff on fb%d during cont splash\n",
+ mfd->index);
+ return -EPERM;
+ }
+ }
+ } else if (mdata->handoff_pending) {
+ pr_warn("fb%d: commit while splash handoff pending\n",
+ mfd->index);
+ return -EPERM;
+ }
+
+ pr_debug("starting fb%d overlay\n", mfd->index);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ /*
+ * If idle pc feature is not enabled, then get a reference to the
+ * runtime device which will be released when overlay is turned off
+ */
+ if (!mdp5_data->mdata->idle_pc_enabled ||
+ (mfd->panel_info->type != MIPI_CMD_PANEL)) {
+ rc = pm_runtime_get_sync(&mfd->pdev->dev);
+ if (IS_ERR_VALUE((unsigned long)rc)) {
+ pr_err("unable to resume with pm_runtime_get_sync rc=%d\n",
+ rc);
+ goto end;
+ }
+ }
+
+ /*
+ * We need to do hw init before any hw programming.
+ * Also, hw init involves programming the VBIF registers which
+ * should be done only after attaching IOMMU which in turn would call
+ * in to TZ to restore security configs on the VBIF registers.
+ * This is not needed when continuous splash screen is enabled since
+ * we would have called in to TZ to restore security configs from LK.
+ */
+ if (!mfd->panel_info->cont_splash_enabled) {
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE((unsigned long)rc)) {
+ pr_err("iommu attach failed rc=%d\n", rc);
+ goto end;
+ }
+ mdss_hw_init(mdss_res);
+ mdss_iommu_ctrl(0);
+ }
+
+ /*
+ * Increment the overlay active count prior to calling ctl_start.
+ * This is needed to ensure that if idle power collapse kicks in
+ * right away, it would be handled correctly.
+ */
+ atomic_inc(&mdp5_data->mdata->active_intf_cnt);
+ rc = mdss_mdp_ctl_start(ctl, false);
+ if (rc == 0) {
+ mdss_mdp_ctl_notifier_register(mdp5_data->ctl,
+ &mfd->mdp_sync_pt_data.notifier);
+ } else {
+ pr_err("mdp ctl start failed.\n");
+ goto ctl_error;
+ }
+
+ /* Restore any previously configured PP features by resetting the dirty
+ * bits for enabled features. The dirty bits will be consumed during the
+ * first display commit when the PP hardware blocks are updated
+ */
+ rc = mdss_mdp_pp_resume(mfd);
+ if (rc && (rc != -EPERM) && (rc != -ENODEV))
+ pr_err("PP resume err %d\n", rc);
+
+ rc = mdss_mdp_splash_cleanup(mfd, true);
+ if (!rc)
+ goto end;
+
+ctl_error:
+ mdss_mdp_ctl_destroy(ctl);
+ atomic_dec(&mdp5_data->mdata->active_intf_cnt);
+ mdp5_data->ctl = NULL;
+end:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return rc;
+}
+
+static void mdss_mdp_overlay_update_pm(struct mdss_overlay_private *mdp5_data)
+{
+ ktime_t wakeup_time;
+
+ if (!mdp5_data->cpu_pm_hdl)
+ return;
+
+ if (mdss_mdp_display_wakeup_time(mdp5_data->ctl, &wakeup_time))
+ return;
+
+ activate_event_timer(mdp5_data->cpu_pm_hdl, wakeup_time);
+}
+
+static void __unstage_pipe_and_clean_buf(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_pipe *pipe, struct mdss_mdp_data *buf)
+{
+
+ pr_debug("unstaging pipe:%d rect:%d buf:%d\n",
+ pipe->num, pipe->multirect.num, !buf);
+ MDSS_XLOG(pipe->num, pipe->multirect.num, !buf);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+ pipe->dirty = true;
+
+ if (buf)
+ __pipe_buf_mark_cleanup(mfd, buf);
+}
+
+static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_mdp_ctl *tmp;
+ int ret = 0;
+
+ list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+ struct mdss_mdp_data *buf;
+
+ if (pipe->dirty) {
+ pr_err("fb%d: pipe %d dirty! skipping configuration\n",
+ mfd->index, pipe->num);
+ continue;
+ }
+
+ /*
+ * When secure display is enabled, if there is a non secure
+ * display pipe, skip that
+ */
+ if (mdss_get_sd_client_cnt() &&
+ !(pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)) {
+ pr_warn("Non secure pipe during secure display: %u: %08X, skip\n",
+ pipe->num, pipe->flags);
+ continue;
+ }
+ /*
+ * When external is connected and no dedicated wfd is present,
+ * reprogram DMA pipe before kickoff to clear out any previous
+ * block mode configuration.
+ */
+ if ((pipe->type == MDSS_MDP_PIPE_TYPE_DMA) &&
+ (ctl->shared_lock &&
+ (ctl->mdata->wfd_mode == MDSS_MDP_WFD_SHARED))) {
+ if (ctl->mdata->mixer_switched) {
+ ret = mdss_mdp_overlay_pipe_setup(mfd,
+ &pipe->req_data, &pipe, NULL, false);
+ pr_debug("resetting DMA pipe for ctl=%d",
+ ctl->num);
+ }
+ if (ret) {
+ pr_err("can't reset DMA pipe ret=%d ctl=%d\n",
+ ret, ctl->num);
+ return ret;
+ }
+
+ tmp = mdss_mdp_ctl_mixer_switch(ctl,
+ MDSS_MDP_WB_CTL_TYPE_LINE);
+ if (!tmp)
+ return -EINVAL;
+ pipe->mixer_left = mdss_mdp_mixer_get(tmp,
+ MDSS_MDP_MIXER_MUX_DEFAULT);
+ }
+
+ buf = list_first_entry_or_null(&pipe->buf_queue,
+ struct mdss_mdp_data, pipe_list);
+ if (buf) {
+ switch (buf->state) {
+ case MDP_BUF_STATE_READY:
+ pr_debug("pnum=%d buf=%pK first buffer ready\n",
+ pipe->num, buf);
+ break;
+ case MDP_BUF_STATE_ACTIVE:
+ if (list_is_last(&buf->pipe_list,
+ &pipe->buf_queue)) {
+ pr_debug("pnum=%d no buf update\n",
+ pipe->num);
+ } else {
+ struct mdss_mdp_data *tmp = buf;
+ /*
+ * buffer flip, new buffer will
+ * replace currently active one,
+ * mark currently active for cleanup
+ */
+ buf = list_next_entry(tmp, pipe_list);
+ __pipe_buf_mark_cleanup(mfd, tmp);
+ }
+ break;
+ default:
+ pr_err("invalid state of buf %pK=%d\n",
+ buf, buf->state);
+ WARN_ON(1);
+ break;
+ }
+ }
+
+ /* ensure pipes are reconfigured after power off/on */
+ if (ctl->play_cnt == 0)
+ pipe->params_changed++;
+
+ if (buf && (buf->state == MDP_BUF_STATE_READY)) {
+ buf->state = MDP_BUF_STATE_ACTIVE;
+ ret = mdss_mdp_data_map(buf, false, DMA_TO_DEVICE);
+ } else if (!pipe->params_changed &&
+ !mdss_mdp_is_roi_changed(pipe->mfd)) {
+
+ /*
+ * no update for the given pipe nor any change in the
+ * ROI so skip pipe programming and continue with next.
+ */
+ continue;
+ } else if (buf) {
+ WARN_ON(buf->state != MDP_BUF_STATE_ACTIVE);
+ pr_debug("requeueing active buffer on pnum=%d\n",
+ pipe->num);
+ } else if ((pipe->flags & MDP_SOLID_FILL) == 0) {
+ pr_warn("commit without buffer on pipe %d\n",
+ pipe->num);
+ ret = -EINVAL;
+ }
+ /*
+ * if we reach here without errors and buf == NULL
+ * then solid fill will be set
+ */
+ if (!IS_ERR_VALUE((unsigned long)ret))
+ ret = mdss_mdp_pipe_queue_data(pipe, buf);
+
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_warn("Unable to queue data for pnum=%d rect=%d\n",
+ pipe->num, pipe->multirect.num);
+
+ /*
+ * If we fail for a multi-rect pipe, unstage both rects
+ * so we don't leave the pipe configured in multi-rect
+ * mode with only one rectangle staged.
+ */
+ if (pipe->multirect.mode !=
+ MDSS_MDP_PIPE_MULTIRECT_NONE) {
+ struct mdss_mdp_pipe *next_pipe =
+ (struct mdss_mdp_pipe *)
+ pipe->multirect.next;
+
+ if (next_pipe) {
+ struct mdss_mdp_data *next_buf =
+ list_first_entry_or_null(
+ &next_pipe->buf_queue,
+ struct mdss_mdp_data,
+ pipe_list);
+
+ __unstage_pipe_and_clean_buf(mfd,
+ next_pipe, next_buf);
+ } else {
+ pr_warn("cannot find rect pnum=%d\n",
+ pipe->num);
+ }
+ }
+
+ __unstage_pipe_and_clean_buf(mfd, pipe, buf);
+ }
+ }
+
+ return 0;
+}
+
+static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+ mdss_mdp_display_commit(ctl, NULL, NULL);
+ mdss_mdp_display_wait4comp(ctl);
+
+ /* unstage any recovery pipes and re-queue used pipes */
+ mdss_mdp_mixer_unstage_all(ctl->mixer_left);
+ mdss_mdp_mixer_unstage_all(ctl->mixer_right);
+
+ __overlay_queue_pipes(mfd);
+
+ mdss_mdp_display_commit(ctl, NULL, NULL);
+ mdss_mdp_display_wait4comp(ctl);
+}
+
+static int mdss_mdp_commit_cb(enum mdp_commit_stage_type commit_stage,
+ void *data)
+{
+ int ret = 0;
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl;
+
+ switch (commit_stage) {
+ case MDP_COMMIT_STAGE_SETUP_DONE:
+ ctl = mfd_to_ctl(mfd);
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
+ mdp5_data->kickoff_released = true;
+ mutex_unlock(&mdp5_data->ov_lock);
+ break;
+ case MDP_COMMIT_STAGE_READY_FOR_KICKOFF:
+ mutex_lock(&mdp5_data->ov_lock);
+ break;
+ default:
+ pr_err("Invalid commit stage %x", commit_stage);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * __is_roi_valid() - Check if ctl roi is valid for a given pipe.
+ * @pipe: pipe to check against.
+ * @l_roi: roi of the left ctl path.
+ * @r_roi: roi of the right ctl path.
+ *
+ * Validate roi against pipe's destination rectangle by checking following
+ * conditions. If any of these conditions are met then return failure,
+ * success otherwise.
+ *
+ * 1. Pipe has scaling and pipe's destination is intersecting with roi.
+ * 2. Pipe's destination and roi do not overlap, In such cases, pipe should
+ * not be part of used list and should have been omitted by user program.
+ */
+static bool __is_roi_valid(struct mdss_mdp_pipe *pipe,
+ struct mdss_rect *l_roi, struct mdss_rect *r_roi)
+{
+ bool ret = true;
+ bool is_right_mixer = pipe->mixer_left->is_right_mixer;
+ struct mdss_rect roi = is_right_mixer ? *r_roi : *l_roi;
+ struct mdss_rect dst = pipe->dst;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 left_lm_w = left_lm_w_from_mfd(pipe->mfd);
+
+ if (pipe->src_split_req) {
+ if (roi.w) {
+ /* left_roi is valid */
+ roi.w += r_roi->w;
+ } else {
+ /*
+ * if we come here then left_roi is zero but pipe's
+ * output is crossing LM boundary if it was Full Screen
+ * update. In such case, if right ROI's (x+w) is less
+ * than pipe's dst_x then #2 check will fail even
+ * though in full coordinate system it is valid.
+ * ex:
+ * left_lm_w = 800;
+ * pipe->dst.x = 400;
+ * pipe->dst.w = 800;
+ * r_roi.x + r_roi.w = 300;
+ * To avoid such pitfall, extend ROI for comparison.
+ */
+ roi.w += left_lm_w + r_roi->w;
+ }
+ }
+
+ if (mdata->has_src_split && is_right_mixer)
+ dst.x -= left_lm_w;
+
+ /* condition #1 above */
+ if ((pipe->scaler.enable) ||
+ (pipe->src.w != dst.w) || (pipe->src.h != dst.h)) {
+ struct mdss_rect res;
+
+ mdss_mdp_intersect_rect(&res, &dst, &roi);
+
+ if (!mdss_rect_cmp(&res, &dst)) {
+ pr_err("error. pipe%d has scaling and its output is interesecting with roi.\n",
+ pipe->num);
+ pr_err("pipe_dst:-> %d %d %d %d roi:-> %d %d %d %d\n",
+ dst.x, dst.y, dst.w, dst.h,
+ roi.x, roi.y, roi.w, roi.h);
+ ret = false;
+ goto end;
+ }
+ }
+
+ /* condition #2 above */
+ if (!mdss_rect_overlap_check(&dst, &roi)) {
+ pr_err("error. pipe%d's output is outside of ROI.\n",
+ pipe->num);
+ ret = false;
+ }
+end:
+ return ret;
+}
+
+int mdss_mode_switch(struct msm_fb_data_type *mfd, u32 mode)
+{
+ struct mdss_rect l_roi, r_roi;
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *sctl;
+ int rc = 0;
+
+ pr_debug("fb%d switch to mode=%x\n", mfd->index, mode);
+ ATRACE_FUNC();
+
+ ctl->pending_mode_switch = mode;
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ sctl->pending_mode_switch = mode;
+
+ /* No need for mode validation. It has been done in ioctl call */
+ if (mode == SWITCH_RESOLUTION) {
+ if (ctl->ops.reconfigure) {
+ /* wait for previous frame to complete before switch */
+ if (ctl->ops.wait_pingpong)
+ rc = ctl->ops.wait_pingpong(ctl, NULL);
+ if (!rc && sctl && sctl->ops.wait_pingpong)
+ rc = sctl->ops.wait_pingpong(sctl, NULL);
+ if (rc) {
+ pr_err("wait for pp failed before resolution switch\n");
+ return rc;
+ }
+
+ /*
+ * Configure the mixer parameters before the switch as
+ * the DSC parameter calculation is based on the mixer
+ * ROI. And set it to full ROI as driver expects the
+ * first frame after the resolution switch to be a
+ * full frame update.
+ */
+ if (ctl->mixer_left) {
+ l_roi = (struct mdss_rect) {0, 0,
+ ctl->mixer_left->width,
+ ctl->mixer_left->height};
+ ctl->mixer_left->roi_changed = true;
+ ctl->mixer_left->valid_roi = true;
+ }
+ if (ctl->mixer_right) {
+ r_roi = (struct mdss_rect) {0, 0,
+ ctl->mixer_right->width,
+ ctl->mixer_right->height};
+ ctl->mixer_right->roi_changed = true;
+ ctl->mixer_right->valid_roi = true;
+ }
+ mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
+
+ mutex_lock(&mdp5_data->ov_lock);
+ ctl->ops.reconfigure(ctl, mode, 1);
+ mutex_unlock(&mdp5_data->ov_lock);
+ /*
+ * For Video mode panels, reconfigure is not defined.
+ * So doing an explicit ctrl stop during resolution switch
+ * to balance the ctrl start at the end of this function.
+ */
+ } else {
+ mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
+ }
+ } else if (mode == MIPI_CMD_PANEL) {
+ /*
+ * Need to reset roi if there was partial update in previous
+ * Command frame
+ */
+ l_roi = (struct mdss_rect){0, 0,
+ ctl->mixer_left->width,
+ ctl->mixer_left->height};
+ if (ctl->mixer_right) {
+ r_roi = (struct mdss_rect) {0, 0,
+ ctl->mixer_right->width,
+ ctl->mixer_right->height};
+ }
+ mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
+ mdss_mdp_switch_roi_reset(ctl);
+
+ mdss_mdp_switch_to_cmd_mode(ctl, 1);
+ mdss_mdp_update_panel_info(mfd, 1, 0);
+ mdss_mdp_switch_to_cmd_mode(ctl, 0);
+ mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
+ } else if (mode == MIPI_VIDEO_PANEL) {
+ if (ctl->ops.wait_pingpong)
+ rc = ctl->ops.wait_pingpong(ctl, NULL);
+ mdss_mdp_update_panel_info(mfd, 0, 0);
+ mdss_mdp_switch_to_vid_mode(ctl, 1);
+ mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
+ mdss_mdp_switch_to_vid_mode(ctl, 0);
+ } else {
+ pr_err("Invalid mode switch arg %d\n", mode);
+ return -EINVAL;
+ }
+
+ mdss_mdp_ctl_start(ctl, true);
+ ATRACE_END(__func__);
+
+ return 0;
+}
+
+int mdss_mode_switch_post(struct msm_fb_data_type *mfd, u32 mode)
+{
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+ struct dsi_panel_clk_ctrl clk_ctrl;
+ int rc = 0;
+ u32 frame_rate = 0;
+
+ if (mode == MIPI_VIDEO_PANEL) {
+ /*
+ * Need to make sure one frame has been sent in
+ * video mode prior to issuing the mode switch
+ * DCS to panel.
+ */
+ frame_rate = mdss_panel_get_framerate
+ (&(ctl->panel_data->panel_info),
+ FPS_RESOLUTION_HZ);
+ if (!(frame_rate >= 24 && frame_rate <= 240))
+ frame_rate = 24;
+ frame_rate = ((1000/frame_rate) + 1);
+ msleep(frame_rate);
+
+ pr_debug("%s, start\n", __func__);
+ rc = mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_DSI_DYNAMIC_SWITCH,
+ (void *) MIPI_VIDEO_PANEL, CTL_INTF_EVENT_FLAG_DEFAULT);
+ pr_debug("%s, end\n", __func__);
+ } else if (mode == MIPI_CMD_PANEL) {
+ /*
+ * Needed to balance out clk refcount when going
+ * from video to command. This allows for idle
+ * power collapse to work as intended.
+ */
+ clk_ctrl.state = MDSS_DSI_CLK_OFF;
+ clk_ctrl.client = DSI_CLK_REQ_DSI_CLIENT;
+ if (sctl)
+ mdss_mdp_ctl_intf_event(sctl,
+ MDSS_EVENT_PANEL_CLK_CTRL, (void *)&clk_ctrl,
+ CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+
+ mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+ } else if (mode == SWITCH_RESOLUTION) {
+ if (ctl->ops.reconfigure)
+ rc = ctl->ops.reconfigure(ctl, mode, 0);
+ }
+ ctl->pending_mode_switch = 0;
+ if (sctl)
+ sctl->pending_mode_switch = 0;
+
+ return rc;
+}
+
+static void __validate_and_set_roi(struct msm_fb_data_type *mfd,
+ struct mdp_display_commit *commit)
+{
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_rect l_roi = {0}, r_roi = {0};
+ struct mdp_rect tmp_roi = {0};
+ bool skip_partial_update = true;
+
+ if (!commit)
+ goto set_roi;
+
+ if (!memcmp(&commit->l_roi, &tmp_roi, sizeof(tmp_roi)) &&
+ !memcmp(&commit->r_roi, &tmp_roi, sizeof(tmp_roi)))
+ goto set_roi;
+
+ rect_copy_mdp_to_mdss(&commit->l_roi, &l_roi);
+ rect_copy_mdp_to_mdss(&commit->r_roi, &r_roi);
+
+ pr_debug("input: l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
+ l_roi.x, l_roi.y, l_roi.w, l_roi.h,
+ r_roi.x, r_roi.y, r_roi.w, r_roi.h);
+
+ /*
+ * Configure full ROI
+ * - If partial update is disabled
+ * - If it is the first frame update after dynamic resolution switch
+ */
+ if (!ctl->panel_data->panel_info.partial_update_enabled
+ || (ctl->pending_mode_switch == SWITCH_RESOLUTION))
+ goto set_roi;
+
+ skip_partial_update = false;
+
+ if (is_split_lm(mfd) && mdp5_data->mdata->has_src_split) {
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ struct mdss_rect merged_roi = l_roi;
+
+ /*
+ * When source split is enabled on split LM displays,
+ * user program merges left and right ROI and sends
+ * it through l_roi. Split this merged ROI into
+ * left/right ROI for validation.
+ */
+ mdss_rect_split(&merged_roi, &l_roi, &r_roi, left_lm_w);
+
+ /*
+ * When source split is enabled on split LM displays,
+ * it is a HW requirement that both LM have same width
+ * if update is on both sides. Since ROIs are
+ * generated by user-land program, validate against
+ * this requirement.
+ */
+ if (l_roi.w && r_roi.w && (l_roi.w != r_roi.w)) {
+ pr_err("error. ROI's do not match. violating src_split requirement\n");
+ pr_err("l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
+ l_roi.x, l_roi.y, l_roi.w, l_roi.h,
+ r_roi.x, r_roi.y, r_roi.w, r_roi.h);
+ skip_partial_update = true;
+ goto set_roi;
+ }
+ }
+
+ list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+ if (!__is_roi_valid(pipe, &l_roi, &r_roi)) {
+ skip_partial_update = true;
+ pr_err("error. invalid pu config for pipe%d: %d,%d,%d,%d\n",
+ pipe->num,
+ pipe->dst.x, pipe->dst.y,
+ pipe->dst.w, pipe->dst.h);
+ break;
+ }
+ }
+
+set_roi:
+ if (skip_partial_update) {
+ l_roi = (struct mdss_rect){0, 0,
+ ctl->mixer_left->width,
+ ctl->mixer_left->height};
+ if (ctl->mixer_right) {
+ r_roi = (struct mdss_rect) {0, 0,
+ ctl->mixer_right->width,
+ ctl->mixer_right->height};
+ }
+ }
+
+ pr_debug("after processing: %s l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
+ (l_roi.w && l_roi.h && r_roi.w && r_roi.h) ? "left+right" :
+ ((l_roi.w && l_roi.h) ? "left-only" : "right-only"),
+ l_roi.x, l_roi.y, l_roi.w, l_roi.h,
+ r_roi.x, r_roi.y, r_roi.w, r_roi.h);
+
+ mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
+}
+
+static bool __is_supported_candence(int cadence)
+{
+ return (cadence == FRC_CADENCE_22) ||
+ (cadence == FRC_CADENCE_23) ||
+ (cadence == FRC_CADENCE_23223);
+}
+
+/* compute how many vsyncs between these 2 timestamp */
+static int __compute_vsync_diff(s64 cur_ts,
+ s64 base_ts, int display_fp1000s)
+{
+ int vsync_diff;
+ int round_up = 0;
+ s64 ts_diff = (cur_ts - base_ts) * display_fp1000s;
+
+ do_div(ts_diff, 1000000);
+ vsync_diff = (int)ts_diff;
+ /*
+ * In most case DIV_ROUND_UP_ULL is enough, but calculation might be
+ * impacted by possible jitter when vsync_diff is close to boundaries.
+ * E.g., we have 30fps like 12.0->13.998->15.999->18.0->19.998->21.999
+ * and 7460.001->7462.002->7464.0->7466.001->7468.002. DIV_ROUND_UP_ULL
+ * fails in the later case.
+ */
+ round_up = ((vsync_diff % 1000) >= 900) ? 1 : 0;
+ /* round up vsync count to accommodate fractions: base & diff */
+ vsync_diff = (vsync_diff / 1000) + round_up + 1;
+ return vsync_diff;
+}
+
+static bool __validate_frc_info(struct mdss_mdp_frc_info *frc_info)
+{
+ struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
+ struct mdss_mdp_frc_data *last_frc = &frc_info->last_frc;
+ struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
+
+ pr_debug("frc: cur_fcnt=%d, cur_ts=%lld, last_fcnt=%d, last_ts=%lld, base_fcnt=%d, base_ts=%lld last_v_cnt=%d, last_repeat=%d base_v_cnt=%d\n",
+ cur_frc->frame_cnt, cur_frc->timestamp,
+ last_frc->frame_cnt, last_frc->timestamp,
+ base_frc->frame_cnt, base_frc->timestamp,
+ frc_info->last_vsync_cnt, frc_info->last_repeat,
+ frc_info->base_vsync_cnt);
+
+ if ((cur_frc->frame_cnt == last_frc->frame_cnt) &&
+ (cur_frc->timestamp == last_frc->timestamp)) {
+ /* ignore repeated frame: video w/ UI layers */
+ pr_debug("repeated frame input\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void __init_cadence_calc(struct mdss_mdp_frc_cadence_calc *calc)
+{
+ memset(calc, 0, sizeof(struct mdss_mdp_frc_cadence_calc));
+}
+
+static int __calculate_cadence_id(struct mdss_mdp_frc_info *frc_info, int cnt)
+{
+ struct mdss_mdp_frc_cadence_calc *calc = &frc_info->calc;
+ struct mdss_mdp_frc_data *first = &calc->samples[0];
+ struct mdss_mdp_frc_data *last = &calc->samples[cnt-1];
+ s64 ts_diff =
+ (last->timestamp - first->timestamp)
+ * frc_info->display_fp1000s;
+ u32 fcnt_diff =
+ last->frame_cnt - first->frame_cnt;
+ u32 fps_ratio;
+ u32 cadence_id = FRC_CADENCE_NONE;
+
+ do_div(ts_diff, fcnt_diff);
+ fps_ratio = (u32)ts_diff;
+
+ if ((fps_ratio > FRC_CADENCE_23_RATIO_LOW) &&
+ (fps_ratio < FRC_CADENCE_23_RATIO_HIGH))
+ cadence_id = FRC_CADENCE_23;
+ else if ((fps_ratio > FRC_CADENCE_22_RATIO_LOW) &&
+ (fps_ratio < FRC_CADENCE_22_RATIO_HIGH))
+ cadence_id = FRC_CADENCE_22;
+ else if ((fps_ratio > FRC_CADENCE_23223_RATIO_LOW) &&
+ (fps_ratio < FRC_CADENCE_23223_RATIO_HIGH))
+ cadence_id = FRC_CADENCE_23223;
+
+ pr_debug("frc: first=%lld, last=%lld, cnt=%d, fps_ratio=%u, cadence_id=%d\n",
+ first->timestamp, last->timestamp, fcnt_diff,
+ fps_ratio, cadence_id);
+
+ return cadence_id;
+}
+
+static void __init_seq_gen(struct mdss_mdp_frc_seq_gen *gen, int cadence_id)
+{
+ int cadence22[2] = {2, 2};
+ int cadence23[2] = {2, 3};
+ int cadence23223[5] = {2, 3, 2, 2, 3};
+ int *cadence = NULL;
+ int len = 0;
+
+ memset(gen, 0, sizeof(struct mdss_mdp_frc_seq_gen));
+ gen->pos = -EBADSLT;
+ gen->base = -1;
+
+ switch (cadence_id) {
+ case FRC_CADENCE_22:
+ cadence = cadence22;
+ len = 2;
+ break;
+ case FRC_CADENCE_23:
+ cadence = cadence23;
+ len = 2;
+ break;
+ case FRC_CADENCE_23223:
+ cadence = cadence23223;
+ len = 5;
+ break;
+ default:
+ break;
+ }
+
+ if (len > 0) {
+ memcpy(gen->seq, cadence, len * sizeof(int));
+ gen->len = len;
+ gen->retry = 0;
+ }
+
+ pr_debug("init sequence, cadence=%d len=%d\n", cadence_id, len);
+}
+
+static int __match_sequence(struct mdss_mdp_frc_seq_gen *gen)
+{
+ int pos, i;
+ int len = gen->len;
+
+ /* use default position if many attempts have failed */
+ if (gen->retry++ >= FRC_CADENCE_SEQUENCE_MAX_RETRY)
+ return 0;
+
+ for (pos = 0; pos < len; pos++) {
+ for (i = 0; i < len; i++) {
+ if (gen->cache[(i+len-1) % len]
+ != gen->seq[(pos+i) % len])
+ break;
+ }
+ if (i == len)
+ return pos;
+ }
+
+ return -EBADSLT;
+}
+
+static void __reset_cache(struct mdss_mdp_frc_seq_gen *gen)
+{
+ memset(gen->cache, 0, gen->len * sizeof(int));
+ gen->base = -1;
+}
+
+static void __cache_last(struct mdss_mdp_frc_seq_gen *gen, int expected_vsync)
+{
+ int i = 0;
+
+ /* only cache last in case of pre-defined cadence */
+ if ((gen->pos < 0) && (gen->len > 0)) {
+ /* set first sample's expected vsync as base */
+ if (gen->base < 0) {
+ gen->base = expected_vsync;
+ return;
+ }
+
+ /* cache is 0 if not filled */
+ while (gen->cache[i] && (i < gen->len))
+ i++;
+
+ gen->cache[i] = expected_vsync - gen->base;
+ gen->base = expected_vsync;
+
+ if (i == (gen->len - 1)) {
+ /* find init pos in sequence when cache is full */
+ gen->pos = __match_sequence(gen);
+ /* reset cache and re-collect samples for matching */
+ if (gen->pos < 0)
+ __reset_cache(gen);
+ }
+ }
+}
+
+static inline bool __is_seq_gen_matched(struct mdss_mdp_frc_seq_gen *gen)
+{
+ return (gen->len > 0) && (gen->pos >= 0);
+}
+
+static int __expected_repeat(struct mdss_mdp_frc_seq_gen *gen)
+{
+ int next_repeat = -1;
+
+ if (__is_seq_gen_matched(gen)) {
+ next_repeat = gen->seq[gen->pos];
+ gen->pos = (gen->pos + 1) % gen->len;
+ }
+
+ return next_repeat;
+}
+
+static bool __is_display_fps_changed(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_frc_info *frc_info)
+{
+ bool display_fps_changed = false;
+ u32 display_fp1000s = mdss_panel_get_framerate(mfd->panel_info,
+ FPS_RESOLUTION_KHZ);
+
+ if (frc_info->display_fp1000s != display_fp1000s) {
+ pr_debug("fps changes from %d to %d\n",
+ frc_info->display_fp1000s, display_fp1000s);
+ display_fps_changed = true;
+ }
+
+ return display_fps_changed;
+}
+
+static bool __is_video_fps_changed(struct mdss_mdp_frc_info *frc_info)
+{
+ bool video_fps_changed = false;
+
+ if ((frc_info->cur_frc.frame_cnt - frc_info->video_stat.frame_cnt)
+ == FRC_VIDEO_FPS_DETECT_WINDOW) {
+ s64 delta_t = frc_info->cur_frc.timestamp -
+ frc_info->video_stat.timestamp;
+
+ if (frc_info->video_stat.last_delta) {
+ video_fps_changed =
+ abs(delta_t - frc_info->video_stat.last_delta)
+ > (FRC_VIDEO_FPS_CHANGE_THRESHOLD_US *
+ FRC_VIDEO_FPS_DETECT_WINDOW);
+
+ if (video_fps_changed)
+ pr_info("video fps changed from [%d]%lld to [%d]%lld\n",
+ frc_info->video_stat.frame_cnt,
+ frc_info->video_stat.last_delta,
+ frc_info->cur_frc.frame_cnt,
+ delta_t);
+ }
+
+ frc_info->video_stat.frame_cnt = frc_info->cur_frc.frame_cnt;
+ frc_info->video_stat.timestamp = frc_info->cur_frc.timestamp;
+ frc_info->video_stat.last_delta = delta_t;
+ }
+
+ return video_fps_changed;
+}
+
+static bool __is_video_seeking(struct mdss_mdp_frc_info *frc_info)
+{
+ s64 ts_diff =
+ frc_info->cur_frc.timestamp - frc_info->last_frc.timestamp;
+ bool video_seek = false;
+
+ video_seek = (ts_diff < 0)
+ || (ts_diff > FRC_VIDEO_TS_DELTA_THRESHOLD_US);
+
+ if (video_seek)
+ pr_debug("video seeking: %lld -> %lld\n",
+ frc_info->last_frc.timestamp,
+ frc_info->cur_frc.timestamp);
+
+ return video_seek;
+}
+
+static bool __is_buffer_dropped(struct mdss_mdp_frc_info *frc_info)
+{
+ int buffer_drop_cnt
+ = frc_info->cur_frc.frame_cnt - frc_info->last_frc.frame_cnt;
+
+ if (buffer_drop_cnt > 1) {
+ struct mdss_mdp_frc_drop_stat *drop_stat = &frc_info->drop_stat;
+
+ /* collect dropping statistics */
+ if (!drop_stat->drop_cnt)
+ drop_stat->frame_cnt = frc_info->last_frc.frame_cnt;
+
+ drop_stat->drop_cnt++;
+
+ pr_info("video buffer drop from %d to %d\n",
+ frc_info->last_frc.frame_cnt,
+ frc_info->cur_frc.frame_cnt);
+ }
+ return buffer_drop_cnt > 1;
+}
+
+static bool __is_too_many_drops(struct mdss_mdp_frc_info *frc_info)
+{
+ struct mdss_mdp_frc_drop_stat *drop_stat = &frc_info->drop_stat;
+ bool too_many = false;
+
+ if (drop_stat->drop_cnt > FRC_MAX_VIDEO_DROPPING_CNT) {
+ too_many = (frc_info->cur_frc.frame_cnt - drop_stat->frame_cnt
+ < FRC_VIDEO_DROP_TOLERANCE_WINDOW);
+ frc_info->drop_stat.drop_cnt = 0;
+ }
+
+ return too_many;
+}
+
+static bool __is_video_cnt_rollback(struct mdss_mdp_frc_info *frc_info)
+{
+ /* video frame_cnt is assumed to increase monotonically */
+ bool video_rollback
+ = (frc_info->cur_frc.frame_cnt < frc_info->last_frc.frame_cnt)
+ || (frc_info->cur_frc.frame_cnt <
+ frc_info->base_frc.frame_cnt);
+
+ if (video_rollback)
+ pr_info("video frame_cnt rolls back from %d to %d\n",
+ frc_info->last_frc.frame_cnt,
+ frc_info->cur_frc.frame_cnt);
+
+ return video_rollback;
+}
+
+static bool __is_video_pause(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_frc_info *frc_info)
+{
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ bool video_pause =
+ (frc_info->cur_frc.frame_cnt - frc_info->last_frc.frame_cnt
+ == 1)
+ && (ctl->vsync_cnt - frc_info->last_vsync_cnt >
+ FRC_VIDEO_PAUSE_THRESHOLD);
+
+ if (video_pause)
+ pr_debug("video paused: vsync elapsed %d\n",
+ ctl->vsync_cnt - frc_info->last_vsync_cnt);
+
+ return video_pause;
+}
+
+/*
+ * Workaround for some cases that video has the same timestamp for
+ * different frame. E.g., video player might provide the same frame
+ * twice to codec when seeking/flushing.
+ */
+static bool __is_timestamp_duplicated(struct mdss_mdp_frc_info *frc_info)
+{
+ bool ts_dup =
+ (frc_info->cur_frc.frame_cnt != frc_info->last_frc.frame_cnt)
+ && (frc_info->cur_frc.timestamp
+ == frc_info->last_frc.timestamp);
+
+ if (ts_dup)
+ pr_info("timestamp of frame %d and %d are duplicated\n",
+ frc_info->last_frc.frame_cnt,
+ frc_info->cur_frc.frame_cnt);
+
+ return ts_dup;
+}
+
+static void __set_frc_base(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_frc_info *frc_info)
+{
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+ frc_info->base_vsync_cnt = ctl->vsync_cnt;
+ frc_info->base_frc = frc_info->cur_frc;
+ frc_info->last_frc = frc_info->cur_frc;
+ frc_info->last_repeat = 0;
+ frc_info->last_vsync_cnt = 0;
+ frc_info->cadence_id = FRC_CADENCE_NONE;
+ frc_info->video_stat.last_delta = 0;
+ frc_info->video_stat.frame_cnt = frc_info->cur_frc.frame_cnt;
+ frc_info->video_stat.timestamp = frc_info->cur_frc.timestamp;
+ frc_info->display_fp1000s =
+ mdss_panel_get_framerate(mfd->panel_info, FPS_RESOLUTION_KHZ);
+
+
+ pr_debug("frc_base: vsync_cnt=%d frame_cnt=%d timestamp=%lld\n",
+ frc_info->base_vsync_cnt, frc_info->cur_frc.frame_cnt,
+ frc_info->cur_frc.timestamp);
+}
+
+/* calculate when we'd like to kickoff current frame based on its timestamp */
+static int __calculate_remaining_vsync(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_frc_info *frc_info)
+{
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
+ struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
+ int vsync_diff, expected_vsync_cnt, remaining_vsync;
+
+ /* how many vsync intervals between current & base */
+ vsync_diff = __compute_vsync_diff(cur_frc->timestamp,
+ base_frc->timestamp, frc_info->display_fp1000s);
+
+ /* expected vsync where we'd like to kickoff current frame */
+ expected_vsync_cnt = frc_info->base_vsync_cnt + vsync_diff;
+ /* how many remaining vsync we need display till kickoff */
+ remaining_vsync = expected_vsync_cnt - ctl->vsync_cnt;
+
+ pr_debug("frc: expected_vsync_cnt=%d, cur_vsync_cnt=%d, remaining=%d\n",
+ expected_vsync_cnt, ctl->vsync_cnt, remaining_vsync);
+
+ return remaining_vsync;
+}
+
+/* tune latency computed previously if possible jitter exists */
+static int __tune_possible_jitter(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_frc_info *frc_info, int remaining_vsync)
+{
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ int cadence_id = frc_info->cadence_id;
+ int remaining = remaining_vsync;
+ int expected_repeat = __expected_repeat(&frc_info->gen);
+
+ if (cadence_id && (expected_repeat > 0)) {
+ int expected_vsync_cnt = remaining + ctl->vsync_cnt;
+ /* how many times current frame will be repeated */
+ int cur_repeat = expected_vsync_cnt - frc_info->last_vsync_cnt;
+
+ remaining -= cur_repeat - expected_repeat;
+ pr_debug("frc: tune vsync, input=%d, output=%d, last_repeat=%d, cur_repeat=%d, expected_repeat=%d\n",
+ remaining_vsync, remaining, frc_info->last_repeat,
+ cur_repeat, expected_repeat);
+ }
+
+ return remaining;
+}
+
+/* compute how many vsync we still need to wait for keeping cadence */
+static int __calculate_remaining_repeat(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_frc_info *frc_info)
+{
+ int remaining_vsync = __calculate_remaining_vsync(mfd, frc_info);
+
+ remaining_vsync =
+ __tune_possible_jitter(mfd, frc_info, remaining_vsync);
+
+ return remaining_vsync;
+}
+
+static int __repeat_current_frame(struct mdss_mdp_ctl *ctl, int repeat)
+{
+ int expected_vsync = ctl->vsync_cnt + repeat;
+ int cnt = 0;
+ int ret = 0;
+
+ while (ctl->vsync_cnt < expected_vsync) {
+ cnt++;
+ if (ctl->ops.wait_vsync_fnc) {
+ ret = ctl->ops.wait_vsync_fnc(ctl);
+ if (ret < 0)
+ break;
+ }
+ }
+
+ if (ret)
+ pr_err("wrong waiting: repeat %d, actual: %d\n", repeat, cnt);
+
+ return ret;
+}
+
+static void __save_last_frc_info(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_frc_info *frc_info)
+{
+ /* save last data */
+ frc_info->last_frc = frc_info->cur_frc;
+ frc_info->last_repeat = ctl->vsync_cnt - frc_info->last_vsync_cnt;
+ frc_info->last_vsync_cnt = ctl->vsync_cnt;
+}
+
+static void cadence_detect_callback(struct mdss_mdp_frc_fsm *frc_fsm)
+{
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+ __init_cadence_calc(&frc_info->calc);
+}
+
+static void seq_match_callback(struct mdss_mdp_frc_fsm *frc_fsm)
+{
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+ __init_seq_gen(&frc_info->gen, frc_info->cadence_id);
+}
+
+static void frc_disable_callback(struct mdss_mdp_frc_fsm *frc_fsm)
+{
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+ frc_info->cadence_id = FRC_CADENCE_DISABLE;
+}
+
+/* default behavior of FRC FSM */
+static bool __is_frc_state_changed_in_default(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_frc_info *frc_info)
+{
+ /*
+ * Need change to INIT state in case of 2 changes:
+ *
+ * 1) video frame_cnt has been rolled back by codec.
+ * 2) video fast-foward or rewind. Sometimes video seeking might cause
+ * buffer drop as well, so check seek ahead of buffer drop in order
+ * to avoid duplicated check.
+ * 3) buffer drop.
+ * 4) display fps has changed.
+ * 5) video frame rate has changed.
+ * 6) video pauses. it could be considered as lag case.
+ * 7) duplicated timestamp of different frames which breaks FRC.
+ */
+ return (__is_video_cnt_rollback(frc_info) ||
+ __is_video_seeking(frc_info) ||
+ __is_buffer_dropped(frc_info) ||
+ __is_display_fps_changed(mfd, frc_info) ||
+ __is_video_fps_changed(frc_info) ||
+ __is_video_pause(mfd, frc_info) ||
+ __is_timestamp_duplicated(frc_info));
+}
+
+static void __pre_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+ if (__is_too_many_drops(frc_info)) {
+ /*
+ * disable frc when dropping too many buffers, this might happen
+ * in some extreme cases like video is heavily loaded so any
+ * extra latency could make things worse.
+ */
+ pr_info("disable frc because there're too many drops\n");
+ mdss_mdp_frc_fsm_change_state(frc_fsm,
+ FRC_STATE_DISABLE, frc_disable_callback);
+ mdss_mdp_frc_fsm_update_state(frc_fsm);
+ } else if (__is_frc_state_changed_in_default(mfd, frc_info)) {
+ /* FRC status changed so reset to INIT state */
+ mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
+ mdss_mdp_frc_fsm_update_state(frc_fsm);
+ }
+}
+
+static void __do_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
+{
+ /* do nothing */
+}
+
+static void __post_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+ __save_last_frc_info(ctl, frc_info);
+
+ /* update frc_fsm state to new state for the next round */
+ mdss_mdp_frc_fsm_update_state(frc_fsm);
+}
+
+/* behavior of FRC FSM in INIT state */
+static void __do_frc_in_init_state(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+ __set_frc_base(mfd, frc_info);
+
+ mdss_mdp_frc_fsm_change_state(frc_fsm,
+ FRC_STATE_CADENCE_DETECT, cadence_detect_callback);
+}
+
+/* behavior of FRC FSM in CADENCE_DETECT state */
+static void __do_frc_in_cadence_detect_state(struct mdss_mdp_frc_fsm *frc_fsm,
+ void *arg)
+{
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+ struct mdss_mdp_frc_cadence_calc *calc = &frc_info->calc;
+
+ if (calc->sample_cnt < FRC_CADENCE_DETECT_WINDOW) {
+ calc->samples[calc->sample_cnt++] = frc_info->cur_frc;
+ } else {
+ /*
+ * Get enough samples and check candence. FRC_CADENCE_23
+ * and FRC_CADENCE_22 need >= 2 deltas, and >= 5 deltas
+ * are necessary for computing FRC_CADENCE_23223.
+ */
+ u32 cadence_id = FRC_CADENCE_23;
+ u32 sample_cnt[FRC_MAX_SUPPORT_CADENCE] = {0, 5, 5, 6};
+
+ while (cadence_id < FRC_CADENCE_FREE_RUN) {
+ if (cadence_id ==
+ __calculate_cadence_id(frc_info,
+ sample_cnt[cadence_id]))
+ break;
+ cadence_id++;
+ }
+
+ frc_info->cadence_id = cadence_id;
+ pr_info("frc: cadence_id=%d\n", cadence_id);
+
+ /* detected supported cadence, start sequence match */
+ if (__is_supported_candence(frc_info->cadence_id))
+ mdss_mdp_frc_fsm_change_state(frc_fsm,
+ FRC_STATE_SEQ_MATCH, seq_match_callback);
+ else
+ mdss_mdp_frc_fsm_change_state(frc_fsm,
+ FRC_STATE_FREERUN, NULL);
+ }
+}
+
+/* behavior of FRC FSM in SEQ_MATCH state */
+static void __do_frc_in_seq_match_state(struct mdss_mdp_frc_fsm *frc_fsm,
+ void *arg)
+{
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+ struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
+ struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
+ int vsync_diff;
+
+ /* how many vsync intervals between current & base */
+ vsync_diff = __compute_vsync_diff(cur_frc->timestamp,
+ base_frc->timestamp, frc_info->display_fp1000s);
+
+ /* cache vsync diff to compute start pos in cadence */
+ __cache_last(&frc_info->gen, vsync_diff);
+
+ if (__is_seq_gen_matched(&frc_info->gen))
+ mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_READY, NULL);
+}
+
+/* behavior of FRC FSM in FREE_RUN state */
+static bool __is_frc_state_changed_in_freerun_state(
+ struct msm_fb_data_type *mfd,
+ struct mdss_mdp_frc_info *frc_info)
+{
+ /*
+ * Only need change to INIT state in case of 2 changes:
+ *
+ * 1) display fps has changed.
+ * 2) video frame rate has changed.
+ */
+ return (__is_display_fps_changed(mfd, frc_info) ||
+ __is_video_fps_changed(frc_info));
+}
+
+static void __pre_frc_in_freerun_state(struct mdss_mdp_frc_fsm *frc_fsm,
+ void *arg)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+ /* FRC status changed so reset to INIT state */
+ if (__is_frc_state_changed_in_freerun_state(mfd, frc_info)) {
+ /* update state to INIT immediately */
+ mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
+ mdss_mdp_frc_fsm_update_state(frc_fsm);
+ }
+}
+
+/* behavior of FRC FSM in READY state */
+static void __do_frc_in_ready_state(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+ struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
+
+ int remaining_repeat =
+ __calculate_remaining_repeat(mfd, frc_info);
+
+ mdss_debug_frc_add_kickoff_sample_pre(ctl, frc_info, remaining_repeat);
+
+ /* video arrives later than expected */
+ if (remaining_repeat < 0) {
+ pr_info("Frame %d lags behind %d vsync\n",
+ cur_frc->frame_cnt, -remaining_repeat);
+ mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
+ remaining_repeat = 0;
+ }
+
+ if (mdss_debug_frc_frame_repeat_disabled())
+ remaining_repeat = 0;
+
+ __repeat_current_frame(ctl, remaining_repeat);
+
+ mdss_debug_frc_add_kickoff_sample_post(ctl, frc_info, remaining_repeat);
+}
+
+/* behavior of FRC FSM in DISABLE state */
+static void __pre_frc_in_disable_state(struct mdss_mdp_frc_fsm *frc_fsm,
+ void *arg)
+{
+ /* do nothing */
+}
+
+static void __post_frc_in_disable_state(struct mdss_mdp_frc_fsm *frc_fsm,
+ void *arg)
+{
+ /* do nothing */
+}
+
+static int __config_secure_display(struct mdss_overlay_private *mdp5_data)
+{
+ int panel_type = mdp5_data->ctl->panel_data->panel_info.type;
+ int sd_enable = -1; /* Since 0 is a valid state, initialize with -1 */
+ int ret = 0;
+
+ if (panel_type == MIPI_CMD_PANEL)
+ mdss_mdp_display_wait4pingpong(mdp5_data->ctl, true);
+
+ /*
+ * Start secure display session if we are transitioning from non secure
+ * to secure display.
+ */
+ if (mdp5_data->sd_transition_state ==
+ SD_TRANSITION_NON_SECURE_TO_SECURE)
+ sd_enable = 1;
+
+ /*
+ * For command mode panels, if we are trasitioning from secure to
+ * non secure session, disable the secure display, as we've already
+ * waited for the previous frame transfer.
+ */
+ if ((panel_type == MIPI_CMD_PANEL) &&
+ (mdp5_data->sd_transition_state ==
+ SD_TRANSITION_SECURE_TO_NON_SECURE))
+ sd_enable = 0;
+
+ if (sd_enable != -1) {
+ ret = mdss_mdp_secure_display_ctrl(mdp5_data->mdata, sd_enable);
+ if (!ret)
+ mdp5_data->sd_enabled = sd_enable;
+ }
+
+ return ret;
+}
+
+/* predefined state table of FRC FSM */
+static struct mdss_mdp_frc_fsm_state frc_fsm_states[FRC_STATE_MAX] = {
+ {
+ .name = "FRC_FSM_INIT",
+ .state = FRC_STATE_INIT,
+ .ops = {
+ .pre_frc = __pre_frc_in_default,
+ .do_frc = __do_frc_in_init_state,
+ .post_frc = __post_frc_in_default,
+ },
+ },
+
+ {
+ .name = "FRC_FSM_CADENCE_DETECT",
+ .state = FRC_STATE_CADENCE_DETECT,
+ .ops = {
+ .pre_frc = __pre_frc_in_default,
+ .do_frc = __do_frc_in_cadence_detect_state,
+ .post_frc = __post_frc_in_default,
+ },
+ },
+
+ {
+ .name = "FRC_FSM_SEQ_MATCH",
+ .state = FRC_STATE_SEQ_MATCH,
+ .ops = {
+ .pre_frc = __pre_frc_in_default,
+ .do_frc = __do_frc_in_seq_match_state,
+ .post_frc = __post_frc_in_default,
+ },
+ },
+
+ {
+ .name = "FRC_FSM_FREERUN",
+ .state = FRC_STATE_FREERUN,
+ .ops = {
+ .pre_frc = __pre_frc_in_freerun_state,
+ .do_frc = __do_frc_in_default,
+ .post_frc = __post_frc_in_default,
+ },
+ },
+
+ {
+ .name = "FRC_FSM_READY",
+ .state = FRC_STATE_READY,
+ .ops = {
+ .pre_frc = __pre_frc_in_default,
+ .do_frc = __do_frc_in_ready_state,
+ .post_frc = __post_frc_in_default,
+ },
+ },
+
+ {
+ .name = "FRC_FSM_DISABLE",
+ .state = FRC_STATE_DISABLE,
+ .ops = {
+ .pre_frc = __pre_frc_in_disable_state,
+ .do_frc = __do_frc_in_default,
+ .post_frc = __post_frc_in_disable_state,
+ },
+ },
+};
+
+/*
+ * FRC FSM operations:
+ * mdss_mdp_frc_fsm_init_state: Init FSM state.
+ * mdss_mdp_frc_fsm_change_state: Change FSM state. The desired state will not
+ * be effective till update_state is called.
+ * mdss_mdp_frc_fsm_update_state: Update FSM state. Changed state is effective
+ * immediately once this function is called.
+ */
+void mdss_mdp_frc_fsm_init_state(struct mdss_mdp_frc_fsm *frc_fsm)
+{
+ pr_debug("frc_fsm: init frc fsm state\n");
+ frc_fsm->state = frc_fsm->to_state = frc_fsm_states[FRC_STATE_INIT];
+ memset(&frc_fsm->frc_info, 0, sizeof(struct mdss_mdp_frc_info));
+}
+
+void mdss_mdp_frc_fsm_change_state(struct mdss_mdp_frc_fsm *frc_fsm,
+ enum mdss_mdp_frc_state_type state,
+ void (*cb)(struct mdss_mdp_frc_fsm *frc_fsm))
+{
+ if (state != frc_fsm->state.state) {
+ pr_debug("frc_fsm: state changes from %s to %s\n",
+ frc_fsm->state.name,
+ frc_fsm_states[state].name);
+ frc_fsm->to_state = frc_fsm_states[state];
+ frc_fsm->cbs.update_state_cb = cb;
+ }
+}
+
+void mdss_mdp_frc_fsm_update_state(struct mdss_mdp_frc_fsm *frc_fsm)
+{
+ if (frc_fsm->to_state.state != frc_fsm->state.state) {
+ pr_debug("frc_fsm: state updates from %s to %s\n",
+ frc_fsm->state.name,
+ frc_fsm->to_state.name);
+
+ if (frc_fsm->cbs.update_state_cb)
+ frc_fsm->cbs.update_state_cb(frc_fsm);
+
+ frc_fsm->state = frc_fsm->to_state;
+ }
+}
+
+static void mdss_mdp_overlay_update_frc(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_frc_fsm *frc_fsm = mdp5_data->frc_fsm;
+ struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+ if (__validate_frc_info(frc_info)) {
+ struct mdss_mdp_frc_fsm_state *state = &frc_fsm->state;
+
+ state->ops.pre_frc(frc_fsm, mfd);
+ state->ops.do_frc(frc_fsm, mfd);
+ state->ops.post_frc(frc_fsm, mfd);
+ }
+}
+
+int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
+ struct mdp_display_commit *data)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_pipe *pipe, *tmp;
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ int ret = 0;
+ struct mdss_mdp_commit_cb commit_cb;
+ u8 sd_transition_state = 0;
+
+ if (!ctl || !ctl->mixer_left)
+ return -ENODEV;
+
+ ATRACE_BEGIN(__func__);
+ if (ctl->shared_lock) {
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
+ mutex_lock(ctl->shared_lock);
+ }
+
+ mutex_lock(&mdp5_data->ov_lock);
+ ctl->bw_pending = 0;
+ ret = mdss_mdp_overlay_start(mfd);
+ if (ret) {
+ pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+ mutex_unlock(&mdp5_data->ov_lock);
+ if (ctl->shared_lock)
+ mutex_unlock(ctl->shared_lock);
+ return ret;
+ }
+
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("iommu attach failed rc=%d\n", ret);
+ mutex_unlock(&mdp5_data->ov_lock);
+ if (ctl->shared_lock)
+ mutex_unlock(ctl->shared_lock);
+ return ret;
+ }
+ mutex_lock(&mdp5_data->list_lock);
+
+ if (!ctl->shared_lock)
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ mdss_mdp_check_ctl_reset_status(ctl);
+ __validate_and_set_roi(mfd, data);
+
+ if (ctl->ops.wait_pingpong && mdp5_data->mdata->serialize_wait4pp)
+ mdss_mdp_display_wait4pingpong(ctl, true);
+
+ sd_transition_state = mdp5_data->sd_transition_state;
+ if (sd_transition_state != SD_TRANSITION_NONE) {
+ ret = __config_secure_display(mdp5_data);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("Secure session config failed\n");
+ goto commit_fail;
+ }
+ }
+
+ /*
+ * Setup pipe in solid fill before unstaging,
+ * to ensure no fetches are happening after dettach or reattach.
+ */
+ list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_cleanup, list) {
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+ pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+ list_move(&pipe->list, &mdp5_data->pipes_destroy);
+ }
+
+ /* call this function before any registers programming */
+ if (ctl->ops.pre_programming)
+ ctl->ops.pre_programming(ctl);
+
+ ATRACE_BEGIN("sspp_programming");
+ ret = __overlay_queue_pipes(mfd);
+ ATRACE_END("sspp_programming");
+ mutex_unlock(&mdp5_data->list_lock);
+
+ mdp5_data->kickoff_released = false;
+
+ if (mdp5_data->frc_fsm->enable)
+ mdss_mdp_overlay_update_frc(mfd);
+
+ if (mfd->panel.type == WRITEBACK_PANEL) {
+ ATRACE_BEGIN("wb_kickoff");
+ commit_cb.commit_cb_fnc = mdss_mdp_commit_cb;
+ commit_cb.data = mfd;
+ ret = mdss_mdp_wfd_kickoff(mdp5_data->wfd, &commit_cb);
+ ATRACE_END("wb_kickoff");
+ } else {
+ ATRACE_BEGIN("display_commit");
+ commit_cb.commit_cb_fnc = mdss_mdp_commit_cb;
+ commit_cb.data = mfd;
+ ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL,
+ &commit_cb);
+ ATRACE_END("display_commit");
+ }
+ __vsync_set_vsync_handler(mfd);
+
+ /*
+ * release the commit pending flag; we are releasing this flag
+ * after the commit, since now the transaction status
+ * in the cmd mode controllers is busy.
+ */
+ mfd->atomic_commit_pending = false;
+
+ if (!mdp5_data->kickoff_released)
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
+
+ if (IS_ERR_VALUE((unsigned long)ret))
+ goto commit_fail;
+
+ mutex_unlock(&mdp5_data->ov_lock);
+ mdss_mdp_overlay_update_pm(mdp5_data);
+
+ ATRACE_BEGIN("display_wait4comp");
+ ret = mdss_mdp_display_wait4comp(mdp5_data->ctl);
+ ATRACE_END("display_wait4comp");
+ mdss_mdp_splash_cleanup(mfd, true);
+
+ /*
+ * Configure Timing Engine, if new fps was set.
+ * We need to do this after the wait for vsync
+ * to guarantee that mdp flush bit and dsi flush
+ * bit are set within the same vsync period
+ * regardless of mdp revision.
+ */
+ ATRACE_BEGIN("fps_update");
+ ret = mdss_mdp_ctl_update_fps(ctl);
+ ATRACE_END("fps_update");
+
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("failed to update fps!\n");
+ goto commit_fail;
+ }
+
+ mutex_lock(&mdp5_data->ov_lock);
+ /*
+ * If we are transitioning from secure to non-secure display,
+ * disable the secure display.
+ */
+ if (mdp5_data->sd_enabled && (sd_transition_state ==
+ SD_TRANSITION_SECURE_TO_NON_SECURE)) {
+ ret = mdss_mdp_secure_display_ctrl(mdp5_data->mdata, 0);
+ if (!ret)
+ mdp5_data->sd_enabled = 0;
+ }
+
+ mdss_fb_update_notify_update(mfd);
+commit_fail:
+ ATRACE_BEGIN("overlay_cleanup");
+ mdss_mdp_overlay_cleanup(mfd, &mdp5_data->pipes_destroy);
+ ATRACE_END("overlay_cleanup");
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_FLUSHED);
+ if (!mdp5_data->kickoff_released)
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
+
+ mutex_unlock(&mdp5_data->ov_lock);
+ if (ctl->shared_lock)
+ mutex_unlock(ctl->shared_lock);
+ mdss_iommu_ctrl(0);
+ ATRACE_END(__func__);
+
+ return ret;
+}
+
+int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
+{
+ struct mdss_mdp_pipe *pipe, *tmp;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ u32 unset_ndx = 0;
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+ if (pipe->ndx & ndx) {
+ if (mdss_mdp_pipe_map(pipe)) {
+ pr_err("Unable to map used pipe%d ndx=%x\n",
+ pipe->num, pipe->ndx);
+ continue;
+ }
+
+ unset_ndx |= pipe->ndx;
+
+ pipe->file = NULL;
+ list_move(&pipe->list, &mdp5_data->pipes_cleanup);
+
+ mdss_mdp_pipe_unmap(pipe);
+
+ if (unset_ndx == ndx)
+ break;
+ }
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+
+ if (unset_ndx != ndx) {
+ pr_warn("Unable to unset pipe(s) ndx=0x%x unset=0x%x\n",
+ ndx, unset_ndx);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+{
+ int ret = 0;
+ struct mdss_overlay_private *mdp5_data;
+
+ if (!mfd)
+ return -ENODEV;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (!mdp5_data || !mdp5_data->ctl)
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+ if (ret)
+ return ret;
+
+ if (ndx == BORDERFILL_NDX) {
+ pr_debug("borderfill disable\n");
+ mdp5_data->borderfill_enable = false;
+ ret = 0;
+ goto done;
+ }
+
+ if (mdss_fb_is_power_off(mfd)) {
+ ret = -EPERM;
+ goto done;
+ }
+
+ pr_debug("unset ndx=%x\n", ndx);
+
+ ret = mdss_mdp_overlay_release(mfd, ndx);
+
+done:
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ return ret;
+}
+
+/**
+ * mdss_mdp_overlay_release_all() - release any overlays associated with fb dev
+ * @mfd: Msm frame buffer structure associated with fb device
+ * @release_all: ignore pid and release all the pipes
+ *
+ * Release any resources allocated by calling process, this can be called
+ * on fb_release to release any overlays/rotator sessions left open.
+ *
+ * Return number of resources released
+ */
+static int __mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd,
+ struct file *file)
+{
+ struct mdss_mdp_pipe *pipe, *tmp;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ u32 unset_ndx = 0;
+ int cnt = 0;
+
+ pr_debug("releasing all resources for fb%d file:%pK\n",
+ mfd->index, file);
+
+ mutex_lock(&mdp5_data->ov_lock);
+ mutex_lock(&mdp5_data->list_lock);
+ if (!mfd->ref_cnt && !list_empty(&mdp5_data->pipes_cleanup)) {
+ pr_debug("fb%d:: free pipes present in cleanup list",
+ mfd->index);
+ cnt++;
+ }
+
+ list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+ if (!file || pipe->file == file) {
+ unset_ndx |= pipe->ndx;
+ pipe->file = NULL;
+ list_move(&pipe->list, &mdp5_data->pipes_cleanup);
+ cnt++;
+ }
+ }
+
+ pr_debug("mfd->ref_cnt=%d unset_ndx=0x%x cnt=%d\n",
+ mfd->ref_cnt, unset_ndx, cnt);
+
+ mutex_unlock(&mdp5_data->list_lock);
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ return cnt;
+}
+
+static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
+ struct msmfb_overlay_data *req)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_mdp_data *src_data;
+ struct mdp_layer_buffer buffer;
+ int ret;
+ u32 flags;
+
+ pipe = __overlay_find_pipe(mfd, req->id);
+ if (!pipe) {
+ pr_err("pipe ndx=%x doesn't exist\n", req->id);
+ return -ENODEV;
+ }
+
+ if (pipe->dirty) {
+ pr_warn("dirty pipe, will not queue pipe pnum=%d\n", pipe->num);
+ return -ENODEV;
+ }
+
+ ret = mdss_mdp_pipe_map(pipe);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("Unable to map used pipe%d ndx=%x\n",
+ pipe->num, pipe->ndx);
+ return ret;
+ }
+
+ pr_debug("ov queue pnum=%d\n", pipe->num);
+
+ if (pipe->flags & MDP_SOLID_FILL)
+ pr_warn("Unexpected buffer queue to a solid fill pipe\n");
+
+ flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+ MDP_SECURE_DISPLAY_OVERLAY_SESSION));
+
+ mutex_lock(&mdp5_data->list_lock);
+ src_data = mdss_mdp_overlay_buf_alloc(mfd, pipe);
+ if (!src_data) {
+ pr_err("unable to allocate source buffer\n");
+ ret = -ENOMEM;
+ } else {
+ buffer.width = pipe->img_width;
+ buffer.height = pipe->img_height;
+ buffer.format = pipe->src_fmt->format;
+ ret = mdss_mdp_data_get_and_validate_size(src_data, &req->data,
+ 1, flags, &mfd->pdev->dev, false, DMA_TO_DEVICE,
+ &buffer);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ mdss_mdp_overlay_buf_free(mfd, src_data);
+ pr_err("src_data pmem error\n");
+ }
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+
+ mdss_mdp_pipe_unmap(pipe);
+
+ return ret;
+}
+
+static int mdss_mdp_overlay_play(struct msm_fb_data_type *mfd,
+ struct msmfb_overlay_data *req)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret = 0;
+
+ pr_debug("play req id=%x\n", req->id);
+
+ ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+ if (ret)
+ return ret;
+
+ if (mdss_fb_is_power_off(mfd)) {
+ ret = -EPERM;
+ goto done;
+ }
+
+ if (req->id == BORDERFILL_NDX) {
+ pr_debug("borderfill enable\n");
+ mdp5_data->borderfill_enable = true;
+ ret = mdss_mdp_overlay_free_fb_pipe(mfd);
+ } else {
+ ret = mdss_mdp_overlay_queue(mfd, req);
+ }
+
+done:
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ return ret;
+}
+
+static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_pipe *pipe;
+ u32 fb_ndx = 0;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl,
+ MDSS_MDP_MIXER_MUX_LEFT, MDSS_MDP_STAGE_BASE, false);
+ if (pipe)
+ fb_ndx |= pipe->ndx;
+
+ pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl,
+ MDSS_MDP_MIXER_MUX_RIGHT, MDSS_MDP_STAGE_BASE, false);
+ if (pipe)
+ fb_ndx |= pipe->ndx;
+
+ if (fb_ndx) {
+ pr_debug("unstaging framebuffer pipes %x\n", fb_ndx);
+ mdss_mdp_overlay_release(mfd, fb_ndx);
+ }
+ return 0;
+}
+
+static int mdss_mdp_overlay_get_fb_pipe(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_pipe **ppipe,
+ int mixer_mux, bool *pipe_allocated)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_pipe *pipe;
+ int ret = 0;
+ struct mdp_overlay *req = NULL;
+
+ *pipe_allocated = false;
+ pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl, mixer_mux,
+ MDSS_MDP_STAGE_BASE, false);
+
+ if (pipe == NULL) {
+ struct fb_info *fbi = mfd->fbi;
+ struct mdss_mdp_mixer *mixer;
+ int bpp;
+ bool rotate_180 = (fbi->var.rotate == FB_ROTATE_UD);
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ bool split_lm = (fbi->var.xres > mdata->max_mixer_width ||
+ is_split_lm(mfd));
+ struct mdp_rect left_rect, right_rect;
+
+ mixer = mdss_mdp_mixer_get(mdp5_data->ctl,
+ MDSS_MDP_MIXER_MUX_LEFT);
+ if (!mixer) {
+ pr_err("unable to retrieve mixer\n");
+ return -ENODEV;
+ }
+
+ req = kcalloc(1, sizeof(struct mdp_overlay), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ bpp = fbi->var.bits_per_pixel / 8;
+ req->id = MSMFB_NEW_REQUEST;
+ req->src.format = mfd->fb_imgType;
+ req->src.height = fbi->var.yres;
+ req->src.width = fbi->fix.line_length / bpp;
+
+ left_rect.x = 0;
+ left_rect.w = MIN(fbi->var.xres, mixer->width);
+ left_rect.y = 0;
+ left_rect.h = req->src.height;
+
+ right_rect.x = mixer->width;
+ right_rect.w = fbi->var.xres - mixer->width;
+ right_rect.y = 0;
+ right_rect.h = req->src.height;
+
+ if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
+ if (req->src.width <= mixer->width) {
+ pr_warn("right fb pipe not needed\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ req->src_rect = req->dst_rect = right_rect;
+ if (split_lm && rotate_180)
+ req->src_rect = left_rect;
+ } else {
+ req->src_rect = req->dst_rect = left_rect;
+ if (split_lm && rotate_180)
+ req->src_rect = right_rect;
+ }
+
+ req->z_order = MDSS_MDP_STAGE_BASE;
+ if (rotate_180)
+ req->flags |= (MDP_FLIP_LR | MDP_FLIP_UD);
+
+ pr_debug("allocating base pipe mux=%d\n", mixer_mux);
+
+ ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL,
+ false);
+ if (ret)
+ goto done;
+
+ *pipe_allocated = true;
+ }
+ pr_debug("ctl=%d pnum=%d\n", mdp5_data->ctl->num, pipe->num);
+
+ *ppipe = pipe;
+
+done:
+ kfree(req);
+ return ret;
+}
+
+static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_data *buf_l = NULL, *buf_r = NULL;
+ struct mdss_mdp_pipe *l_pipe, *r_pipe, *pipe, *tmp;
+ struct fb_info *fbi;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_data_type *mdata;
+ u32 offset;
+ int bpp, ret;
+ bool l_pipe_allocated = false, r_pipe_allocated = false;
+
+ if (!mfd || !mfd->mdp.private1)
+ return;
+
+ mdata = mfd_to_mdata(mfd);
+ fbi = mfd->fbi;
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (!mdp5_data || !mdp5_data->ctl)
+ return;
+
+ /*
+ * Ignore writeback updates through pan_display as output
+ * buffer is not available.
+ */
+ if (mfd->panel_info->type == WRITEBACK_PANEL) {
+ pr_err_once("writeback update not supported through pan display\n");
+ return;
+ }
+
+ if (IS_ERR_OR_NULL(mfd->fbmem_buf) || fbi->fix.smem_len == 0 ||
+ mdp5_data->borderfill_enable) {
+ if (mdata->handoff_pending) {
+ /*
+ * Move pipes to cleanup queue and avoid kickoff if
+ * pan display is called before handoff is completed.
+ */
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry_safe(pipe, tmp,
+ &mdp5_data->pipes_used, list) {
+ list_move(&pipe->list,
+ &mdp5_data->pipes_cleanup);
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+ }
+ mfd->mdp.kickoff_fnc(mfd, NULL);
+ return;
+ }
+
+ if (mutex_lock_interruptible(&mdp5_data->ov_lock))
+ return;
+
+ if ((mdss_fb_is_power_off(mfd)) &&
+ !((mfd->dcm_state == DCM_ENTER) &&
+ (mfd->panel.type == MIPI_CMD_PANEL))) {
+ mutex_unlock(&mdp5_data->ov_lock);
+ return;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ bpp = fbi->var.bits_per_pixel / 8;
+ offset = fbi->var.xoffset * bpp +
+ fbi->var.yoffset * fbi->fix.line_length;
+
+ if (offset > fbi->fix.smem_len) {
+ pr_err("invalid fb offset=%u total length=%u\n",
+ offset, fbi->fix.smem_len);
+ goto clk_disable;
+ }
+
+ ret = mdss_mdp_overlay_get_fb_pipe(mfd, &l_pipe,
+ MDSS_MDP_MIXER_MUX_LEFT, &l_pipe_allocated);
+ if (ret) {
+ pr_err("unable to allocate base pipe\n");
+ goto iommu_disable;
+ }
+
+ if (mdss_mdp_pipe_map(l_pipe)) {
+ pr_err("unable to map base pipe\n");
+ goto pipe_release;
+ }
+
+ ret = mdss_mdp_overlay_start(mfd);
+ if (ret) {
+ pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+ goto clk_disable;
+ }
+
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("IOMMU attach failed\n");
+ goto clk_disable;
+ }
+
+ buf_l = __mdp_overlay_buf_alloc(mfd, l_pipe);
+ if (!buf_l) {
+ pr_err("unable to allocate memory for fb buffer\n");
+ mdss_mdp_pipe_unmap(l_pipe);
+ goto pipe_release;
+ }
+
+ buf_l->p[0].srcp_table = mfd->fb_table;
+ buf_l->p[0].srcp_dma_buf = mfd->fbmem_buf;
+ buf_l->p[0].len = 0;
+ buf_l->p[0].addr = 0;
+ buf_l->p[0].offset = offset;
+ buf_l->p[0].skip_detach = true;
+ buf_l->p[0].mapped = false;
+ buf_l->num_planes = 1;
+
+ mdss_mdp_pipe_unmap(l_pipe);
+
+ if (fbi->var.xres > mdata->max_pipe_width || is_split_lm(mfd)) {
+ /*
+ * TODO: Need to revisit the function for panels with width more
+ * than max_pipe_width and less than max_mixer_width.
+ */
+ ret = mdss_mdp_overlay_get_fb_pipe(mfd, &r_pipe,
+ MDSS_MDP_MIXER_MUX_RIGHT, &r_pipe_allocated);
+ if (ret) {
+ pr_err("unable to allocate right base pipe\n");
+ goto pipe_release;
+ }
+
+ if (mdss_mdp_pipe_map(r_pipe)) {
+ pr_err("unable to map right base pipe\n");
+ goto pipe_release;
+ }
+
+ buf_r = __mdp_overlay_buf_alloc(mfd, r_pipe);
+ if (!buf_r) {
+ pr_err("unable to allocate memory for fb buffer\n");
+ mdss_mdp_pipe_unmap(r_pipe);
+ goto pipe_release;
+ }
+
+ buf_r->p[0] = buf_l->p[0];
+ buf_r->num_planes = 1;
+
+ mdss_mdp_pipe_unmap(r_pipe);
+ }
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ if ((fbi->var.activate & FB_ACTIVATE_VBL) ||
+ (fbi->var.activate & FB_ACTIVATE_FORCE))
+ mfd->mdp.kickoff_fnc(mfd, NULL);
+
+ mdss_iommu_ctrl(0);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return;
+
+pipe_release:
+ if (r_pipe_allocated)
+ mdss_mdp_overlay_release(mfd, r_pipe->ndx);
+ if (buf_l)
+ __mdp_overlay_buf_free(mfd, buf_l);
+ if (l_pipe_allocated)
+ mdss_mdp_overlay_release(mfd, l_pipe->ndx);
+iommu_disable:
+ mdss_iommu_ctrl(0);
+clk_disable:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ mutex_unlock(&mdp5_data->ov_lock);
+}
+
+static void remove_underrun_vsync_handler(struct work_struct *work)
+{
+ int rc;
+ struct mdss_mdp_ctl *ctl =
+ container_of(work, typeof(*ctl), remove_underrun_handler);
+
+ if (!ctl || !ctl->ops.remove_vsync_handler) {
+ pr_err("ctl or vsync handler is NULL\n");
+ return;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ rc = ctl->ops.remove_vsync_handler(ctl,
+ &ctl->recover_underrun_handler);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+static void mdss_mdp_recover_underrun_handler(struct mdss_mdp_ctl *ctl,
+ ktime_t t)
+{
+ if (!ctl) {
+ pr_err("ctl is NULL\n");
+ return;
+ }
+
+ mdss_mdp_ctl_reset(ctl, true);
+ schedule_work(&ctl->remove_underrun_handler);
+}
+
+/* do nothing in case of deterministic frame rate control, only keep vsync on */
+static void mdss_mdp_overlay_frc_handler(struct mdss_mdp_ctl *ctl,
+ ktime_t t)
+{
+ pr_debug("vsync on ctl%d vsync_cnt=%d\n", ctl->num, ctl->vsync_cnt);
+}
+
+/* function is called in irq context should have minimum processing */
+static void mdss_mdp_overlay_handle_vsync(struct mdss_mdp_ctl *ctl,
+ ktime_t t)
+{
+ struct msm_fb_data_type *mfd = NULL;
+ struct mdss_overlay_private *mdp5_data = NULL;
+
+ if (!ctl) {
+ pr_err("ctl is NULL\n");
+ return;
+ }
+
+ mfd = ctl->mfd;
+ if (!mfd || !mfd->mdp.private1) {
+ pr_warn("Invalid handle for vsync\n");
+ return;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ if (!mdp5_data) {
+ pr_err("mdp5_data is NULL\n");
+ return;
+ }
+
+ pr_debug("vsync on fb%d play_cnt=%d\n", mfd->index, ctl->play_cnt);
+
+ mdp5_data->vsync_time = t;
+ sysfs_notify_dirent(mdp5_data->vsync_event_sd);
+}
+
+/* function is called in irq context should have minimum processing */
+static void mdss_mdp_overlay_handle_lineptr(struct mdss_mdp_ctl *ctl,
+ ktime_t t)
+{
+ struct mdss_overlay_private *mdp5_data = NULL;
+
+ if (!ctl || !ctl->mfd) {
+ pr_warn("Invalid handle for lineptr\n");
+ return;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ if (!mdp5_data) {
+ pr_err("mdp5_data is NULL\n");
+ return;
+ }
+
+ pr_debug("lineptr irq on fb%d play_cnt=%d\n",
+ ctl->mfd->index, ctl->play_cnt);
+
+ mdp5_data->lineptr_time = t;
+ sysfs_notify_dirent(mdp5_data->lineptr_event_sd);
+}
+
+int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ int rc;
+
+ if (!ctl)
+ return -ENODEV;
+
+ mutex_lock(&mdp5_data->ov_lock);
+ if (!ctl->ops.add_vsync_handler || !ctl->ops.remove_vsync_handler) {
+ rc = -EOPNOTSUPP;
+ pr_err_once("fb%d vsync handlers are not registered\n",
+ mfd->index);
+ goto end;
+ }
+
+ if (!ctl->panel_data->panel_info.cont_splash_enabled
+ && (!mdss_mdp_ctl_is_power_on(ctl) ||
+ mdss_panel_is_power_on_ulp(ctl->power_state))) {
+ pr_debug("fb%d vsync pending first update en=%d, ctl power state:%d\n",
+ mfd->index, en, ctl->power_state);
+ rc = -EPERM;
+ goto end;
+ }
+
+ pr_debug("fb%d vsync en=%d\n", mfd->index, en);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if (en)
+ rc = ctl->ops.add_vsync_handler(ctl, &ctl->vsync_handler);
+ else
+ rc = ctl->ops.remove_vsync_handler(ctl, &ctl->vsync_handler);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+end:
+ mutex_unlock(&mdp5_data->ov_lock);
+ return rc;
+}
+
+static ssize_t dynamic_fps_sysfs_rda_dfps(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct mdss_panel_data *pdata;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl))
+ return 0;
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected for fb%d\n", mfd->index);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mdp5_data->dfps_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ pdata->panel_info.mipi.frame_rate);
+ pr_debug("%s: '%d'\n", __func__,
+ pdata->panel_info.mipi.frame_rate);
+ mutex_unlock(&mdp5_data->dfps_lock);
+
+ return ret;
+} /* dynamic_fps_sysfs_rda_dfps */
+
+static int calc_extra_blanking(struct mdss_panel_data *pdata, u32 new_fps)
+{
+ int add_porches, diff;
+
+ /* calculate extra: lines for vfp-method, pixels for hfp-method */
+ diff = abs(pdata->panel_info.default_fps - new_fps);
+ add_porches = mult_frac(pdata->panel_info.saved_total,
+ diff, new_fps);
+
+ return add_porches;
+}
+
+static void cache_initial_timings(struct mdss_panel_data *pdata)
+{
+ if (!pdata->panel_info.default_fps) {
+
+ /*
+ * This value will change dynamically once the
+ * actual dfps update happen in hw.
+ */
+ pdata->panel_info.current_fps =
+ mdss_panel_get_framerate(&pdata->panel_info,
+ FPS_RESOLUTION_DEFAULT);
+
+ /*
+ * Keep the initial fps and porch values for this panel before
+ * any dfps update happen, this is to prevent losing precision
+ * in further calculations.
+ */
+ pdata->panel_info.default_fps =
+ mdss_panel_get_framerate(&pdata->panel_info,
+ FPS_RESOLUTION_DEFAULT);
+
+ if (pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) {
+ pdata->panel_info.saved_total =
+ mdss_panel_get_vtotal(&pdata->panel_info);
+ pdata->panel_info.saved_fporch =
+ pdata->panel_info.lcdc.v_front_porch;
+
+ } else if (pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP ||
+ pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP ||
+ pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
+ pdata->panel_info.saved_total =
+ mdss_panel_get_htotal(&pdata->panel_info, true);
+ pdata->panel_info.saved_fporch =
+ pdata->panel_info.lcdc.h_front_porch;
+ }
+ }
+}
+
+static inline void dfps_update_fps(struct mdss_panel_info *pinfo, u32 fps)
+{
+ if (pinfo->type == DTV_PANEL)
+ pinfo->lcdc.frame_rate = fps;
+ else
+ pinfo->mipi.frame_rate = fps;
+}
+
+static void dfps_update_panel_params(struct mdss_panel_data *pdata,
+ struct dynamic_fps_data *data)
+{
+ u32 new_fps = data->fps;
+
+ /* Keep initial values before any dfps update */
+ cache_initial_timings(pdata);
+
+ if (pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) {
+ int add_v_lines;
+
+ /* calculate extra vfp lines */
+ add_v_lines = calc_extra_blanking(pdata, new_fps);
+
+ /* update panel info with new values */
+ pdata->panel_info.lcdc.v_front_porch =
+ pdata->panel_info.saved_fporch + add_v_lines;
+
+ dfps_update_fps(&pdata->panel_info, new_fps);
+
+ pdata->panel_info.prg_fet =
+ mdss_mdp_get_prefetch_lines(&pdata->panel_info);
+
+ } else if (pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP) {
+ int add_h_pixels;
+
+ /* calculate extra hfp pixels */
+ add_h_pixels = calc_extra_blanking(pdata, new_fps);
+
+ /* update panel info */
+ if (pdata->panel_info.default_fps > new_fps)
+ pdata->panel_info.lcdc.h_front_porch =
+ pdata->panel_info.saved_fporch + add_h_pixels;
+ else
+ pdata->panel_info.lcdc.h_front_porch =
+ pdata->panel_info.saved_fporch - add_h_pixels;
+
+ dfps_update_fps(&pdata->panel_info, new_fps);
+ } else if (pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP) {
+
+ pr_debug("hfp=%d, hbp=%d, hpw=%d, clk=%d, fps=%d\n",
+ data->hfp, data->hbp, data->hpw,
+ data->clk_rate, data->fps);
+
+ pdata->panel_info.lcdc.h_front_porch = data->hfp;
+ pdata->panel_info.lcdc.h_back_porch = data->hbp;
+ pdata->panel_info.lcdc.h_pulse_width = data->hpw;
+
+ pdata->panel_info.clk_rate = data->clk_rate;
+ if (pdata->panel_info.type == DTV_PANEL)
+ pdata->panel_info.clk_rate *= 1000;
+
+ dfps_update_fps(&pdata->panel_info, new_fps);
+ } else if (pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
+
+ pr_debug("hfp=%d, hbp=%d, hpw=%d, clk=%d, fps=%d\n",
+ data->hfp, data->hbp, data->hpw,
+ data->clk_rate, data->fps);
+
+ pdata->panel_info.lcdc.h_front_porch = data->hfp;
+ pdata->panel_info.lcdc.h_back_porch = data->hbp;
+ pdata->panel_info.lcdc.h_pulse_width = data->hpw;
+
+ pdata->panel_info.clk_rate = data->clk_rate;
+
+ dfps_update_fps(&pdata->panel_info, new_fps);
+ mdss_panel_update_clk_rate(&pdata->panel_info, new_fps);
+ } else {
+ dfps_update_fps(&pdata->panel_info, new_fps);
+ mdss_panel_update_clk_rate(&pdata->panel_info, new_fps);
+ }
+}
+
+int mdss_mdp_dfps_update_params(struct msm_fb_data_type *mfd,
+ struct mdss_panel_data *pdata, struct dynamic_fps_data *dfps_data)
+{
+ struct fb_var_screeninfo *var = &mfd->fbi->var;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ u32 dfps = dfps_data->fps;
+
+ mutex_lock(&mdp5_data->dfps_lock);
+
+ pr_debug("new_fps:%d\n", dfps);
+
+ if (dfps < pdata->panel_info.min_fps) {
+ pr_err("Unsupported FPS. min_fps = %d\n",
+ pdata->panel_info.min_fps);
+ mutex_unlock(&mdp5_data->dfps_lock);
+ return -EINVAL;
+ } else if (dfps > pdata->panel_info.max_fps) {
+ pr_warn("Unsupported FPS. Configuring to max_fps = %d\n",
+ pdata->panel_info.max_fps);
+ dfps = pdata->panel_info.max_fps;
+ dfps_data->fps = dfps;
+ }
+
+ dfps_update_panel_params(pdata, dfps_data);
+ if (pdata->next)
+ dfps_update_panel_params(pdata->next, dfps_data);
+
+ /*
+ * Update the panel info in the upstream
+ * data, so any further call to get the screen
+ * info has the updated timings.
+ */
+ mdss_panelinfo_to_fb_var(&pdata->panel_info, var);
+
+ MDSS_XLOG(dfps);
+ mutex_unlock(&mdp5_data->dfps_lock);
+
+ return 0;
+}
+
+
+static ssize_t dynamic_fps_sysfs_wta_dfps(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int panel_fps, rc = 0;
+ struct mdss_panel_data *pdata;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct dynamic_fps_data data = {0};
+
+ if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
+ pr_debug("panel is off\n");
+ return count;
+ }
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected for fb%d\n", mfd->index);
+ return -ENODEV;
+ }
+
+ if (!pdata->panel_info.dynamic_fps) {
+ pr_err_once("%s: Dynamic fps not enabled for this panel\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP ||
+ pdata->panel_info.dfps_update ==
+ DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
+ if (sscanf(buf, "%u %u %u %u %u",
+ &data.hfp, &data.hbp, &data.hpw,
+ &data.clk_rate, &data.fps) != 5) {
+ pr_err("could not read input\n");
+ return -EINVAL;
+ }
+ } else {
+ rc = kstrtoint(buf, 10, &data.fps);
+ if (rc) {
+ pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ return rc;
+ }
+ }
+
+ panel_fps = mdss_panel_get_framerate(&pdata->panel_info,
+ FPS_RESOLUTION_DEFAULT);
+
+ if (data.fps == panel_fps) {
+ pr_debug("%s: FPS is already %d\n",
+ __func__, data.fps);
+ return count;
+ }
+
+ if (data.hfp > DFPS_DATA_MAX_HFP || data.hbp > DFPS_DATA_MAX_HBP ||
+ data.hpw > DFPS_DATA_MAX_HPW || data.fps > DFPS_DATA_MAX_FPS ||
+ data.clk_rate > DFPS_DATA_MAX_CLK_RATE){
+ pr_err("Data values out of bound.\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_mdp_dfps_update_params(mfd, pdata, &data);
+ if (rc) {
+ pr_err("failed to set dfps params\n");
+ return rc;
+ }
+
+ return count;
+} /* dynamic_fps_sysfs_wta_dfps */
+
+
+static DEVICE_ATTR(dynamic_fps, 0644, dynamic_fps_sysfs_rda_dfps,
+ dynamic_fps_sysfs_wta_dfps);
+
+static struct attribute *dynamic_fps_fs_attrs[] = {
+ &dev_attr_dynamic_fps.attr,
+ NULL,
+};
+static struct attribute_group dynamic_fps_fs_attrs_group = {
+ .attrs = dynamic_fps_fs_attrs,
+};
+
+static ssize_t mdss_mdp_vsync_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ u64 vsync_ticks;
+ int ret;
+
+ if (!mdp5_data->ctl ||
+ (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
+ && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
+ return -EAGAIN;
+
+ vsync_ticks = ktime_to_ns(mdp5_data->vsync_time);
+
+ pr_debug("fb%d vsync=%llu\n", mfd->index, vsync_ticks);
+ ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks);
+
+ return ret;
+}
+
+static ssize_t mdss_mdp_lineptr_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ u64 lineptr_ticks;
+ int ret;
+
+ if (!mdp5_data->ctl ||
+ (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
+ && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
+ return -EPERM;
+
+ lineptr_ticks = ktime_to_ns(mdp5_data->lineptr_time);
+
+ pr_debug("fb%d lineptr=%llu\n", mfd->index, lineptr_ticks);
+ ret = scnprintf(buf, PAGE_SIZE, "LINEPTR=%llu\n", lineptr_ticks);
+
+ return ret;
+}
+
+static ssize_t mdss_mdp_lineptr_show_value(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret, lineptr_val;
+
+ if (!mdp5_data->ctl ||
+ (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
+ && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
+ return -EPERM;
+
+ lineptr_val = mfd->panel_info->te.wr_ptr_irq;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", lineptr_val);
+
+ return ret;
+}
+
+static ssize_t mdss_mdp_lineptr_set_value(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+ int ret, lineptr_value;
+
+ if (!ctl || (!ctl->panel_data->panel_info.cont_splash_enabled
+ && !mdss_mdp_ctl_is_power_on(ctl)))
+ return -EAGAIN;
+
+ ret = kstrtoint(buf, 10, &lineptr_value);
+ if (ret || (lineptr_value < 0)
+ || (lineptr_value > mfd->panel_info->yres)) {
+ pr_err("Invalid input for lineptr\n");
+ return -EINVAL;
+ }
+
+ if (!mdss_mdp_is_lineptr_supported(ctl)) {
+ pr_err("lineptr not supported\n");
+ return -ENOTSUPP;
+ }
+
+ mutex_lock(&mdp5_data->ov_lock);
+ mfd->panel_info->te.wr_ptr_irq = lineptr_value;
+ if (ctl && ctl->ops.update_lineptr)
+ ctl->ops.update_lineptr(ctl, true);
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ return count;
+}
+
+static ssize_t mdss_mdp_bl_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->bl_events);
+ return ret;
+}
+
+static ssize_t mdss_mdp_hist_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->hist_events);
+ return ret;
+}
+
+static ssize_t mdss_mdp_ad_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->ad_events);
+ return ret;
+}
+
+static ssize_t mdss_mdp_ad_bl_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->ad_bl_events);
+ return ret;
+}
+
+static inline int mdss_mdp_ad_is_supported(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_mdp_mixer *mixer;
+
+ if (!ctl) {
+ pr_debug("there is no ctl attached to fb\n");
+ return 0;
+ }
+
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ if (mixer && (mixer->num > ctl->mdata->nad_cfgs)) {
+ if (!mixer)
+ pr_warn("there is no mixer attached to fb\n");
+ else
+ pr_debug("mixer attached (%d) doesn't support ad\n",
+ mixer->num);
+ return 0;
+ }
+
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+ if (mixer && (mixer->num > ctl->mdata->nad_cfgs))
+ return 0;
+
+ return 1;
+}
+
+static ssize_t mdss_mdp_ad_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret, state;
+
+ state = mdss_mdp_ad_is_supported(mfd) ? mdp5_data->ad_state : -1;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d", state);
+
+ return ret;
+}
+
+static ssize_t mdss_mdp_ad_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret, ad;
+
+ ret = kstrtoint(buf, 10, &ad);
+ if (ret) {
+ pr_err("Invalid input for ad\n");
+ return -EINVAL;
+ }
+
+ mdp5_data->ad_state = ad;
+ sysfs_notify(&dev->kobj, NULL, "ad");
+
+ return count;
+}
+
+static ssize_t mdss_mdp_dyn_pu_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret, state;
+
+ state = (mdp5_data->dyn_pu_state >= 0) ? mdp5_data->dyn_pu_state : -1;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d", state);
+
+ return ret;
+}
+
+static ssize_t mdss_mdp_dyn_pu_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret, dyn_pu;
+
+ ret = kstrtoint(buf, 10, &dyn_pu);
+ if (ret) {
+ pr_err("Invalid input for partial update: ret = %d\n", ret);
+ return ret;
+ }
+
+ mdp5_data->dyn_pu_state = dyn_pu;
+ sysfs_notify(&dev->kobj, NULL, "dyn_pu");
+
+ return count;
+}
+static ssize_t mdss_mdp_cmd_autorefresh_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_mdp_ctl *ctl;
+
+ if (!mfd) {
+ pr_err("Invalid mfd structure\n");
+ return -EINVAL;
+ }
+
+ ctl = mfd_to_ctl(mfd);
+ if (!ctl) {
+ pr_err("Invalid ctl structure\n");
+ return -EINVAL;
+ }
+
+
+ if (mfd->panel_info->type != MIPI_CMD_PANEL) {
+ pr_err("Panel doesn't support autorefresh\n");
+ ret = -EINVAL;
+ } else {
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ mdss_mdp_ctl_cmd_get_autorefresh(ctl));
+ }
+ return ret;
+}
+
+static ssize_t mdss_mdp_cmd_autorefresh_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int frame_cnt, rc;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_mdp_ctl *ctl;
+
+ if (!mfd) {
+ pr_err("Invalid mfd structure\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ ctl = mfd_to_ctl(mfd);
+ if (!ctl) {
+ pr_err("Invalid ctl structure\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ if (mfd->panel_info->type != MIPI_CMD_PANEL) {
+ pr_err("Panel doesn't support autorefresh\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ rc = kstrtoint(buf, 10, &frame_cnt);
+ if (rc) {
+ pr_err("kstrtoint failed. rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = mdss_mdp_ctl_cmd_set_autorefresh(ctl, frame_cnt);
+ if (rc) {
+ pr_err("cmd_set_autorefresh failed, rc=%d, frame_cnt=%d\n",
+ rc, frame_cnt);
+ return rc;
+ }
+
+ if (frame_cnt) {
+ /* enable/reconfig autorefresh */
+ mfd->mdp_sync_pt_data.threshold = 2;
+ mfd->mdp_sync_pt_data.retire_threshold = 0;
+ } else {
+ /* disable autorefresh */
+ mfd->mdp_sync_pt_data.threshold = 1;
+ mfd->mdp_sync_pt_data.retire_threshold = 1;
+ }
+
+ pr_debug("setting cmd autorefresh to cnt=%d\n", frame_cnt);
+
+ return len;
+}
+
+
+/* Print the last CRC Value read for batch mode */
+static ssize_t mdss_mdp_misr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_mdp_ctl *ctl;
+
+ if (!mfd) {
+ pr_err("Invalid mfd structure\n");
+ return -EINVAL;
+ }
+
+ ctl = mfd_to_ctl(mfd);
+ if (!ctl) {
+ pr_err("Invalid ctl structure\n");
+ return -EINVAL;
+ }
+
+ ret = mdss_dump_misr_data(&buf, PAGE_SIZE);
+
+ return ret;
+}
+
+/*
+ * Enable crc batch mode. By enabling this mode through sysfs
+ * driver will keep collecting the misr in ftrace during interrupts,
+ * until disabled.
+ */
+static ssize_t mdss_mdp_misr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int enable_misr, rc;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_ctl *ctl;
+ struct mdp_misr req, sreq;
+
+ if (!mfd) {
+ pr_err("Invalid mfd structure\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ ctl = mfd_to_ctl(mfd);
+ if (!ctl) {
+ pr_err("Invalid ctl structure\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ rc = kstrtoint(buf, 10, &enable_misr);
+ if (rc) {
+ pr_err("kstrtoint failed. rc=%d\n", rc);
+ return rc;
+ }
+
+ req.block_id = DISPLAY_MISR_MAX;
+ sreq.block_id = DISPLAY_MISR_MAX;
+
+ pr_debug("intf_type:%d enable:%d\n", ctl->intf_type, enable_misr);
+ if (ctl->intf_type == MDSS_INTF_DSI) {
+
+ req.block_id = DISPLAY_MISR_DSI0;
+ req.crc_op_mode = MISR_OP_BM;
+ req.frame_count = 1;
+ if (is_panel_split(mfd)) {
+
+ sreq.block_id = DISPLAY_MISR_DSI1;
+ sreq.crc_op_mode = MISR_OP_BM;
+ sreq.frame_count = 1;
+ }
+ } else if (ctl->intf_type == MDSS_INTF_HDMI) {
+
+ req.block_id = DISPLAY_MISR_HDMI;
+ req.crc_op_mode = MISR_OP_BM;
+ req.frame_count = 1;
+ } else {
+ pr_err("misr not supported fo this fb:%d\n", mfd->index);
+ rc = -ENODEV;
+ return rc;
+ }
+
+ if (enable_misr) {
+ mdss_misr_set(mdata, &req, ctl);
+
+ if ((ctl->intf_type == MDSS_INTF_DSI) && is_panel_split(mfd))
+ mdss_misr_set(mdata, &sreq, ctl);
+
+ } else {
+ mdss_misr_disable(mdata, &req, ctl);
+
+ if ((ctl->intf_type == MDSS_INTF_DSI) && is_panel_split(mfd))
+ mdss_misr_disable(mdata, &sreq, ctl);
+ }
+
+ pr_debug("misr %s\n", enable_misr ? "enabled" : "disabled");
+
+ return len;
+}
+
+static DEVICE_ATTR(msm_misr_en, 0644,
+ mdss_mdp_misr_show, mdss_mdp_misr_store);
+static DEVICE_ATTR(msm_cmd_autorefresh_en, 0644,
+ mdss_mdp_cmd_autorefresh_show, mdss_mdp_cmd_autorefresh_store);
+static DEVICE_ATTR(vsync_event, 0444, mdss_mdp_vsync_show_event, NULL);
+static DEVICE_ATTR(lineptr_event, 0444, mdss_mdp_lineptr_show_event, NULL);
+static DEVICE_ATTR(lineptr_value, 0664,
+ mdss_mdp_lineptr_show_value, mdss_mdp_lineptr_set_value);
+static DEVICE_ATTR(ad, 0664, mdss_mdp_ad_show,
+ mdss_mdp_ad_store);
+static DEVICE_ATTR(dyn_pu, 0664, mdss_mdp_dyn_pu_show,
+ mdss_mdp_dyn_pu_store);
+static DEVICE_ATTR(hist_event, 0444, mdss_mdp_hist_show_event, NULL);
+static DEVICE_ATTR(bl_event, 0444, mdss_mdp_bl_show_event, NULL);
+static DEVICE_ATTR(ad_event, 0444, mdss_mdp_ad_show_event, NULL);
+static DEVICE_ATTR(ad_bl_event, 0444, mdss_mdp_ad_bl_show_event, NULL);
+
+static struct attribute *mdp_overlay_sysfs_attrs[] = {
+ &dev_attr_vsync_event.attr,
+ &dev_attr_lineptr_event.attr,
+ &dev_attr_lineptr_value.attr,
+ &dev_attr_ad.attr,
+ &dev_attr_dyn_pu.attr,
+ &dev_attr_msm_misr_en.attr,
+ &dev_attr_msm_cmd_autorefresh_en.attr,
+ &dev_attr_hist_event.attr,
+ &dev_attr_bl_event.attr,
+ &dev_attr_ad_event.attr,
+ &dev_attr_ad_bl_event.attr,
+ NULL,
+};
+
+static struct attribute_group mdp_overlay_sysfs_group = {
+ .attrs = mdp_overlay_sysfs_attrs,
+};
+
+static void mdss_mdp_hw_cursor_setpos(struct mdss_mdp_mixer *mixer,
+ struct mdss_rect *roi, u32 start_x, u32 start_y)
+{
+ int roi_xy = (roi->y << 16) | roi->x;
+ int start_xy = (start_y << 16) | start_x;
+ int roi_size = (roi->h << 16) | roi->w;
+
+ if (!mixer) {
+ pr_err("mixer not available\n");
+ return;
+ }
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_XY, roi_xy);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_START_XY, start_xy);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
+}
+
+static void mdss_mdp_hw_cursor_setimage(struct mdss_mdp_mixer *mixer,
+ struct fb_cursor *cursor, u32 cursor_addr, struct mdss_rect *roi)
+{
+ int calpha_en, transp_en, alpha, size;
+ struct fb_image *img = &cursor->image;
+ u32 blendcfg;
+ int roi_size = 0;
+
+ if (!mixer) {
+ pr_err("mixer not available\n");
+ return;
+ }
+
+ if (img->bg_color == 0xffffffff)
+ transp_en = 0;
+ else
+ transp_en = 1;
+
+ alpha = (img->fg_color & 0xff000000) >> 24;
+
+ if (alpha)
+ calpha_en = 0x0; /* xrgb */
+ else
+ calpha_en = 0x2; /* argb */
+
+ roi_size = (roi->h << 16) | roi->w;
+ size = (img->height << 16) | img->width;
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_IMG_SIZE, size);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_STRIDE,
+ img->width * 4);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BASE_ADDR,
+ cursor_addr);
+ blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
+ blendcfg &= ~0x1;
+ blendcfg |= (transp_en << 3) | (calpha_en << 1);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
+ blendcfg);
+ if (calpha_en)
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_PARAM,
+ alpha);
+
+ if (transp_en) {
+ mdp_mixer_write(mixer,
+ MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW0,
+ ((img->bg_color & 0xff00) << 8) |
+ (img->bg_color & 0xff));
+ mdp_mixer_write(mixer,
+ MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW1,
+ ((img->bg_color & 0xff0000) >> 16));
+ mdp_mixer_write(mixer,
+ MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH0,
+ ((img->bg_color & 0xff00) << 8) |
+ (img->bg_color & 0xff));
+ mdp_mixer_write(mixer,
+ MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH1,
+ ((img->bg_color & 0xff0000) >> 16));
+ }
+}
+
+static void mdss_mdp_hw_cursor_blend_config(struct mdss_mdp_mixer *mixer,
+ struct fb_cursor *cursor)
+{
+ u32 blendcfg;
+
+ if (!mixer) {
+ pr_err("mixer not availbale\n");
+ return;
+ }
+
+ blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
+ if (!cursor->enable != !(blendcfg & 0x1)) {
+ if (cursor->enable) {
+ pr_debug("enable hw cursor on mixer=%d\n", mixer->num);
+ blendcfg |= 0x1;
+ } else {
+ pr_debug("disable hw cursor on mixer=%d\n", mixer->num);
+ blendcfg &= ~0x1;
+ }
+
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
+ blendcfg);
+ mixer->cursor_enabled = cursor->enable;
+ mixer->params_changed++;
+ }
+
+}
+
+static void mdss_mdp_set_rect(struct mdp_rect *rect, u16 x, u16 y, u16 w,
+ u16 h)
+{
+ rect->x = x;
+ rect->y = y;
+ rect->w = w;
+ rect->h = h;
+}
+
+static void mdss_mdp_curor_pipe_cleanup(struct msm_fb_data_type *mfd,
+ int cursor_pipe)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (mdp5_data->cursor_ndx[cursor_pipe] != MSMFB_NEW_REQUEST) {
+ mdss_mdp_overlay_release(mfd,
+ mdp5_data->cursor_ndx[cursor_pipe]);
+ mdp5_data->cursor_ndx[cursor_pipe] = MSMFB_NEW_REQUEST;
+ }
+}
+
+int mdss_mdp_cursor_flush(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_pipe *pipe, int cursor_pipe)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+ struct mdss_mdp_ctl *sctl = NULL;
+ u32 flush_bits = BIT(22 + pipe->num - MDSS_MDP_SSPP_CURSOR0);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+ MDSS_XLOG(ctl->intf_num, flush_bits);
+ if ((!ctl->split_flush_en) && pipe->mixer_right) {
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (!sctl) {
+ pr_err("not able to get the other ctl\n");
+ return -ENODEV;
+ }
+ mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+ MDSS_XLOG(sctl->intf_num, flush_bits);
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ return 0;
+}
+
+static int mdss_mdp_cursor_pipe_setup(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req, int cursor_pipe) {
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int ret = 0;
+ u32 cursor_addr;
+ struct mdss_mdp_data *buf = NULL;
+
+ req->id = mdp5_data->cursor_ndx[cursor_pipe];
+ ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, false);
+ if (ret) {
+ pr_err("cursor pipe setup failed, cursor_pipe:%d, ret:%d\n",
+ cursor_pipe, ret);
+ mdp5_data->cursor_ndx[cursor_pipe] = MSMFB_NEW_REQUEST;
+ return ret;
+ }
+
+ pr_debug("req id:%d cursor_pipe:%d pnum:%d\n",
+ req->id, cursor_pipe, pipe->ndx);
+
+ if (mdata->mdss_util->iommu_attached()) {
+ cursor_addr = mfd->cursor_buf_iova;
+ } else {
+ if (MDSS_LPAE_CHECK(mfd->cursor_buf_phys)) {
+ pr_err("can't access phy mem >4GB w/o iommu\n");
+ ret = -ERANGE;
+ goto done;
+ }
+ cursor_addr = mfd->cursor_buf_phys;
+ }
+
+ buf = __mdp_overlay_buf_alloc(mfd, pipe);
+ if (!buf) {
+ pr_err("unable to allocate memory for cursor buffer\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+ mdp5_data->cursor_ndx[cursor_pipe] = pipe->ndx;
+ buf->p[0].addr = cursor_addr;
+ buf->p[0].len = mdss_mdp_get_cursor_frame_size(mdata);
+ buf->num_planes = 1;
+
+ buf->state = MDP_BUF_STATE_ACTIVE;
+ if (!(req->flags & MDP_SOLID_FILL))
+ ret = mdss_mdp_pipe_queue_data(pipe, buf);
+ else
+ ret = mdss_mdp_pipe_queue_data(pipe, NULL);
+
+ if (ret) {
+ pr_err("cursor pipe queue data failed in async mode\n");
+ return ret;
+ }
+
+ ret = mdss_mdp_cursor_flush(mfd, pipe, cursor_pipe);
+done:
+ if (ret && mdp5_data->cursor_ndx[cursor_pipe] == MSMFB_NEW_REQUEST)
+ mdss_mdp_overlay_release(mfd, pipe->ndx);
+
+ return ret;
+}
+
+static int mdss_mdp_hw_cursor_pipe_update(struct msm_fb_data_type *mfd,
+ struct fb_cursor *cursor)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_mixer *mixer;
+ struct fb_image *img = &cursor->image;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdp_overlay *req = NULL;
+ struct mdss_rect roi;
+ int ret = 0;
+ struct fb_var_screeninfo *var = &mfd->fbi->var;
+ u32 xres = var->xres;
+ u32 yres = var->yres;
+ u32 start_x = img->dx;
+ u32 start_y = img->dy;
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ struct platform_device *pdev = mfd->pdev;
+ u32 cursor_frame_size = mdss_mdp_get_cursor_frame_size(mdata);
+
+ ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+ if (ret)
+ return ret;
+
+ if (mdss_fb_is_power_off(mfd)) {
+ ret = -EPERM;
+ goto done;
+ }
+
+ if (!cursor->enable) {
+ mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_LEFT);
+ mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_RIGHT);
+ goto done;
+ }
+
+ mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
+ if (!mixer) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
+ ret = mdss_smmu_dma_alloc_coherent(&pdev->dev,
+ cursor_frame_size, (dma_addr_t *) &mfd->cursor_buf_phys,
+ &mfd->cursor_buf_iova, &mfd->cursor_buf,
+ GFP_KERNEL, MDSS_IOMMU_DOMAIN_UNSECURE);
+ if (ret) {
+ pr_err("can't allocate cursor buffer rc:%d\n", ret);
+ goto done;
+ }
+
+ mixer->cursor_hotx = 0;
+ mixer->cursor_hoty = 0;
+ }
+
+ pr_debug("mixer=%d enable=%x set=%x\n", mixer->num, cursor->enable,
+ cursor->set);
+
+ if (cursor->set & FB_CUR_SETHOT) {
+ if ((cursor->hot.x < img->width) &&
+ (cursor->hot.y < img->height)) {
+ mixer->cursor_hotx = cursor->hot.x;
+ mixer->cursor_hoty = cursor->hot.y;
+ /* Update cursor position */
+ cursor->set |= FB_CUR_SETPOS;
+ } else {
+ pr_err("Invalid cursor hotspot coordinates\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+ memset(&roi, 0, sizeof(struct mdss_rect));
+ if (start_x > mixer->cursor_hotx) {
+ start_x -= mixer->cursor_hotx;
+ } else {
+ roi.x = mixer->cursor_hotx - start_x;
+ start_x = 0;
+ }
+ if (start_y > mixer->cursor_hoty) {
+ start_y -= mixer->cursor_hoty;
+ } else {
+ roi.y = mixer->cursor_hoty - start_y;
+ start_y = 0;
+ }
+
+ if ((img->width > mdata->max_cursor_size) ||
+ (img->height > mdata->max_cursor_size) ||
+ (img->depth != 32) || (start_x >= xres) ||
+ (start_y >= yres)) {
+ pr_err("Invalid cursor image coordinates\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ roi.w = min(xres - start_x, img->width - roi.x);
+ roi.h = min(yres - start_y, img->height - roi.y);
+
+ if ((roi.w > mdata->max_cursor_size) ||
+ (roi.h > mdata->max_cursor_size)) {
+ pr_err("Invalid cursor ROI size\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ req = kcalloc(1, sizeof(struct mdp_overlay), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ req->pipe_type = PIPE_TYPE_CURSOR;
+ req->z_order = HW_CURSOR_STAGE(mdata);
+
+ req->src.width = img->width;
+ req->src.height = img->height;
+ req->src.format = mfd->fb_imgType;
+
+ mdss_mdp_set_rect(&req->src_rect, roi.x, roi.y, roi.w, roi.h);
+ mdss_mdp_set_rect(&req->dst_rect, start_x, start_y, roi.w, roi.h);
+
+ req->bg_color = img->bg_color;
+ req->alpha = (img->fg_color >> ((32 - var->transp.offset) - 8)) & 0xff;
+ if (req->alpha)
+ req->blend_op = BLEND_OP_PREMULTIPLIED;
+ else
+ req->blend_op = BLEND_OP_COVERAGE;
+ req->transp_mask = img->bg_color & ~(0xff << var->transp.offset);
+
+ if (mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
+ ret = copy_from_user(mfd->cursor_buf, img->data,
+ img->width * img->height * 4);
+ if (ret) {
+ pr_err("copy_from_user error. rc=%d\n", ret);
+ goto done;
+ }
+
+ mixer->cursor_hotx = 0;
+ mixer->cursor_hoty = 0;
+ }
+
+ /*
+ * When source split is enabled, only CURSOR_PIPE_LEFT is used,
+ * with both mixers of the pipe staged all the time.
+ * When source split is disabled, 2 pipes are staged, with one
+ * pipe containing the actual data and another one a transparent
+ * solid fill when the data falls only in left or right dsi.
+ * Both are done to support async cursor functionality.
+ */
+ if (mdata->has_src_split || (!is_split_lm(mfd))
+ || (mdata->ncursor_pipes == 1)) {
+ ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
+ } else if ((start_x + roi.w) <= left_lm_w) {
+ ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
+ if (ret)
+ goto done;
+ req->bg_color = 0;
+ req->flags |= MDP_SOLID_FILL;
+ req->dst_rect.x = left_lm_w;
+ ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
+ } else if (start_x >= left_lm_w) {
+ ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
+ if (ret)
+ goto done;
+ req->bg_color = 0;
+ req->flags |= MDP_SOLID_FILL;
+ req->dst_rect.x = 0;
+ ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
+ } else if ((start_x <= left_lm_w) && ((start_x + roi.w) >= left_lm_w)) {
+ mdss_mdp_set_rect(&req->dst_rect, start_x, start_y,
+ (left_lm_w - start_x), roi.h);
+ mdss_mdp_set_rect(&req->src_rect, 0, 0, (left_lm_w -
+ start_x), roi.h);
+ ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
+ if (ret)
+ goto done;
+
+ mdss_mdp_set_rect(&req->dst_rect, left_lm_w, start_y, ((start_x
+ + roi.w) - left_lm_w), roi.h);
+ mdss_mdp_set_rect(&req->src_rect, (left_lm_w - start_x), 0,
+ (roi.w - (left_lm_w - start_x)), roi.h);
+ ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
+ } else {
+ pr_err("Invalid case for cursor pipe setup\n");
+ ret = -EINVAL;
+ }
+
+done:
+ if (ret) {
+ mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_LEFT);
+ mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_RIGHT);
+ }
+
+ kfree(req);
+ mutex_unlock(&mdp5_data->ov_lock);
+ return ret;
+}
+
+static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
+ struct fb_cursor *cursor)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_mixer *mixer_left = NULL;
+ struct mdss_mdp_mixer *mixer_right = NULL;
+ struct fb_image *img = &cursor->image;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct fbcurpos cursor_hot;
+ struct mdss_rect roi;
+ int ret = 0;
+ u32 xres = mfd->fbi->var.xres;
+ u32 yres = mfd->fbi->var.yres;
+ u32 start_x = img->dx;
+ u32 start_y = img->dy;
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ struct platform_device *pdev = mfd->pdev;
+ u32 cursor_frame_size = mdss_mdp_get_cursor_frame_size(mdata);
+
+ mixer_left = mdss_mdp_mixer_get(mdp5_data->ctl,
+ MDSS_MDP_MIXER_MUX_DEFAULT);
+ if (!mixer_left)
+ return -ENODEV;
+ if (is_split_lm(mfd)) {
+ mixer_right = mdss_mdp_mixer_get(mdp5_data->ctl,
+ MDSS_MDP_MIXER_MUX_RIGHT);
+ if (!mixer_right)
+ return -ENODEV;
+ }
+
+ if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
+ ret = mdss_smmu_dma_alloc_coherent(&pdev->dev,
+ cursor_frame_size, (dma_addr_t *) &mfd->cursor_buf_phys,
+ &mfd->cursor_buf_iova, &mfd->cursor_buf,
+ GFP_KERNEL, MDSS_IOMMU_DOMAIN_UNSECURE);
+ if (ret) {
+ pr_err("can't allocate cursor buffer rc:%d\n", ret);
+ return ret;
+ }
+ }
+
+ if ((img->width > mdata->max_cursor_size) ||
+ (img->height > mdata->max_cursor_size) ||
+ (img->depth != 32) || (start_x >= xres) || (start_y >= yres))
+ return -EINVAL;
+
+ pr_debug("enable=%x set=%x\n", cursor->enable, cursor->set);
+
+ memset(&cursor_hot, 0, sizeof(struct fbcurpos));
+ memset(&roi, 0, sizeof(struct mdss_rect));
+ if (cursor->set & FB_CUR_SETHOT) {
+ if ((cursor->hot.x < img->width) &&
+ (cursor->hot.y < img->height)) {
+ cursor_hot.x = cursor->hot.x;
+ cursor_hot.y = cursor->hot.y;
+ /* Update cursor position */
+ cursor->set |= FB_CUR_SETPOS;
+ } else {
+ pr_err("Invalid cursor hotspot coordinates\n");
+ return -EINVAL;
+ }
+ }
+
+ if (start_x > cursor_hot.x) {
+ start_x -= cursor_hot.x;
+ } else {
+ roi.x = cursor_hot.x - start_x;
+ start_x = 0;
+ }
+ if (start_y > cursor_hot.y) {
+ start_y -= cursor_hot.y;
+ } else {
+ roi.y = cursor_hot.y - start_y;
+ start_y = 0;
+ }
+
+ roi.w = min(xres - start_x, img->width - roi.x);
+ roi.h = min(yres - start_y, img->height - roi.y);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ if (mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
+ u32 cursor_addr;
+
+ ret = copy_from_user(mfd->cursor_buf, img->data,
+ img->width * img->height * 4);
+ if (ret) {
+ pr_err("copy_from_user error. rc=%d\n", ret);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return ret;
+ }
+
+ if (mdata->mdss_util->iommu_attached()) {
+ cursor_addr = mfd->cursor_buf_iova;
+ } else {
+ if (MDSS_LPAE_CHECK(mfd->cursor_buf_phys)) {
+ pr_err("can't access phy mem >4GB w/o iommu\n");
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return -ERANGE;
+ }
+ cursor_addr = mfd->cursor_buf_phys;
+ }
+ mdss_mdp_hw_cursor_setimage(mixer_left, cursor, cursor_addr,
+ &roi);
+ if (is_split_lm(mfd))
+ mdss_mdp_hw_cursor_setimage(mixer_right, cursor,
+ cursor_addr, &roi);
+ }
+
+ if ((start_x + roi.w) <= left_lm_w) {
+ if (cursor->set & FB_CUR_SETPOS)
+ mdss_mdp_hw_cursor_setpos(mixer_left, &roi, start_x,
+ start_y);
+ mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
+ cursor->enable = false;
+ mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
+ } else if (start_x >= left_lm_w) {
+ start_x -= left_lm_w;
+ if (cursor->set & FB_CUR_SETPOS)
+ mdss_mdp_hw_cursor_setpos(mixer_right, &roi, start_x,
+ start_y);
+ mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
+ cursor->enable = false;
+ mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
+ } else {
+ struct mdss_rect roi_right = roi;
+
+ roi.w = left_lm_w - start_x;
+ if (cursor->set & FB_CUR_SETPOS)
+ mdss_mdp_hw_cursor_setpos(mixer_left, &roi, start_x,
+ start_y);
+ mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
+
+ roi_right.x = 0;
+ roi_right.w = (start_x + roi_right.w) - left_lm_w;
+ start_x = 0;
+ if (cursor->set & FB_CUR_SETPOS)
+ mdss_mdp_hw_cursor_setpos(mixer_right, &roi_right,
+ start_x, start_y);
+ mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
+ }
+
+ mixer_left->ctl->flush_bits |= BIT(6) << mixer_left->num;
+ if (is_split_lm(mfd))
+ mixer_right->ctl->flush_bits |= BIT(6) << mixer_right->num;
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return 0;
+}
+
+static int mdss_bl_scale_config(struct msm_fb_data_type *mfd,
+ struct mdp_bl_scale_data *data)
+{
+ int ret = 0;
+ int curr_bl;
+
+ mutex_lock(&mfd->bl_lock);
+ curr_bl = mfd->bl_level;
+ mfd->bl_scale = data->scale;
+ mfd->bl_min_lvl = data->min_lvl;
+ pr_debug("update scale = %d, min_lvl = %d\n", mfd->bl_scale,
+ mfd->bl_min_lvl);
+
+ /* Update current backlight to use new scaling, if it is not zero */
+ if (curr_bl)
+ mdss_fb_set_backlight(mfd, curr_bl);
+
+ mutex_unlock(&mfd->bl_lock);
+ return ret;
+}
+
+static int mdss_mdp_pp_ioctl(struct msm_fb_data_type *mfd,
+ void __user *argp)
+{
+ int ret;
+ struct msmfb_mdp_pp mdp_pp;
+ u32 copyback = 0;
+ u32 copy_from_kernel = 0;
+
+ ret = copy_from_user(&mdp_pp, argp, sizeof(mdp_pp));
+ if (ret)
+ return ret;
+
+ /* Supprt only MDP register read/write and
+ * exit_dcm in DCM state
+ */
+ if (mfd->dcm_state == DCM_ENTER &&
+ (mdp_pp.op != mdp_op_calib_buffer &&
+ mdp_pp.op != mdp_op_calib_dcm_state))
+ return -EPERM;
+
+ switch (mdp_pp.op) {
+ case mdp_op_pa_cfg:
+ ret = mdss_mdp_pa_config(mfd, &mdp_pp.data.pa_cfg_data,
+ ©back);
+ break;
+
+ case mdp_op_pa_v2_cfg:
+ ret = mdss_mdp_pa_v2_config(mfd, &mdp_pp.data.pa_v2_cfg_data,
+ ©back);
+ break;
+
+ case mdp_op_pcc_cfg:
+ ret = mdss_mdp_pcc_config(mfd, &mdp_pp.data.pcc_cfg_data,
+ ©back);
+ break;
+
+ case mdp_op_lut_cfg:
+ switch (mdp_pp.data.lut_cfg_data.lut_type) {
+ case mdp_lut_igc:
+ ret = mdss_mdp_igc_lut_config(mfd,
+ (struct mdp_igc_lut_data *)
+ &mdp_pp.data.lut_cfg_data.data,
+ ©back, copy_from_kernel);
+ break;
+
+ case mdp_lut_pgc:
+ ret = mdss_mdp_argc_config(mfd,
+ &mdp_pp.data.lut_cfg_data.data.pgc_lut_data,
+ ©back);
+ break;
+
+ case mdp_lut_hist:
+ ret = mdss_mdp_hist_lut_config(mfd,
+ (struct mdp_hist_lut_data *)
+ &mdp_pp.data.lut_cfg_data.data, ©back);
+ break;
+
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+ break;
+ case mdp_op_dither_cfg:
+ ret = mdss_mdp_dither_config(mfd,
+ &mdp_pp.data.dither_cfg_data,
+ ©back,
+ false);
+ break;
+ case mdp_op_gamut_cfg:
+ ret = mdss_mdp_gamut_config(mfd,
+ &mdp_pp.data.gamut_cfg_data,
+ ©back);
+ break;
+ case mdp_bl_scale_cfg:
+ ret = mdss_bl_scale_config(mfd, (struct mdp_bl_scale_data *)
+ &mdp_pp.data.bl_scale_data);
+ break;
+ case mdp_op_ad_cfg:
+ ret = mdss_mdp_ad_config(mfd, &mdp_pp.data.ad_init_cfg);
+ break;
+ case mdp_op_ad_input:
+ ret = mdss_mdp_ad_input(mfd, &mdp_pp.data.ad_input, 1);
+ if (ret > 0) {
+ ret = 0;
+ copyback = 1;
+ }
+ break;
+ case mdp_op_calib_cfg:
+ ret = mdss_mdp_calib_config((struct mdp_calib_config_data *)
+ &mdp_pp.data.calib_cfg, ©back);
+ break;
+ case mdp_op_calib_mode:
+ ret = mdss_mdp_calib_mode(mfd, &mdp_pp.data.mdss_calib_cfg);
+ break;
+ case mdp_op_calib_buffer:
+ ret = mdss_mdp_calib_config_buffer(
+ (struct mdp_calib_config_buffer *)
+ &mdp_pp.data.calib_buffer, ©back);
+ break;
+ case mdp_op_calib_dcm_state:
+ ret = mdss_fb_dcm(mfd, mdp_pp.data.calib_dcm.dcm_state);
+ break;
+ default:
+ pr_err("Unsupported request to MDP_PP IOCTL. %d = op\n",
+ mdp_pp.op);
+ ret = -EINVAL;
+ break;
+ }
+ if ((ret == 0) && copyback)
+ ret = copy_to_user(argp, &mdp_pp, sizeof(struct msmfb_mdp_pp));
+ return ret;
+}
+
+static int mdss_mdp_histo_ioctl(struct msm_fb_data_type *mfd, u32 cmd,
+ void __user *argp)
+{
+ int ret = -ENOTSUPP;
+ struct mdp_histogram_data hist;
+ struct mdp_histogram_start_req hist_req;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 block;
+
+ if (!mdata)
+ return -EPERM;
+
+ switch (cmd) {
+ case MSMFB_HISTOGRAM_START:
+ if (mdss_fb_is_power_off(mfd))
+ return -EPERM;
+
+ ret = copy_from_user(&hist_req, argp, sizeof(hist_req));
+ if (ret)
+ return ret;
+
+ ret = mdss_mdp_hist_start(&hist_req);
+ break;
+
+ case MSMFB_HISTOGRAM_STOP:
+ ret = copy_from_user(&block, argp, sizeof(int));
+ if (ret)
+ return ret;
+
+ ret = mdss_mdp_hist_stop(block);
+ if (ret)
+ return ret;
+ break;
+
+ case MSMFB_HISTOGRAM:
+ if (mdss_fb_is_power_off(mfd)) {
+ pr_err("mfd is turned off MSMFB_HISTOGRAM failed\n");
+ return -EPERM;
+ }
+
+ ret = copy_from_user(&hist, argp, sizeof(hist));
+ if (ret)
+ return ret;
+
+ ret = mdss_mdp_hist_collect(&hist);
+ if (!ret)
+ ret = copy_to_user(argp, &hist, sizeof(hist));
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int mdss_fb_set_metadata(struct msm_fb_data_type *mfd,
+ struct msmfb_metadata *metadata)
+{
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ int ret = 0;
+
+ if (!ctl)
+ return -EPERM;
+ switch (metadata->op) {
+ case metadata_op_vic:
+ if (mfd->panel_info)
+ mfd->panel_info->vic =
+ metadata->data.video_info_code;
+ else
+ ret = -EINVAL;
+ break;
+ case metadata_op_crc:
+ if (mdss_fb_is_power_off(mfd))
+ return -EPERM;
+ ret = mdss_misr_set(mdata, &metadata->data.misr_request, ctl);
+ break;
+ default:
+ pr_warn("unsupported request to MDP META IOCTL\n");
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int mdss_fb_get_hw_caps(struct msm_fb_data_type *mfd,
+ struct mdss_hw_caps *caps)
+{
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+
+ caps->mdp_rev = mdata->mdp_rev;
+ caps->vig_pipes = mdata->nvig_pipes;
+ caps->rgb_pipes = mdata->nrgb_pipes;
+ caps->dma_pipes = mdata->ndma_pipes;
+ if (mdata->has_bwc)
+ caps->features |= MDP_BWC_EN;
+ if (mdata->has_decimation)
+ caps->features |= MDP_DECIMATION_EN;
+
+ if (mdata->smp_mb_cnt) {
+ caps->max_smp_cnt = mdata->smp_mb_cnt;
+ caps->smp_per_pipe = mdata->smp_mb_per_pipe;
+ }
+
+ return 0;
+}
+
+static int mdss_fb_get_metadata(struct msm_fb_data_type *mfd,
+ struct msmfb_metadata *metadata)
+{
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_mdp_ctl *ctl = NULL;
+ int ret = 0;
+
+ switch (metadata->op) {
+ case metadata_op_frame_rate:
+ metadata->data.panel_frame_rate =
+ mdss_panel_get_framerate(mfd->panel_info,
+ FPS_RESOLUTION_DEFAULT);
+ pr_debug("current fps:%d\n", metadata->data.panel_frame_rate);
+ break;
+ case metadata_op_get_caps:
+ ret = mdss_fb_get_hw_caps(mfd, &metadata->data.caps);
+ break;
+ case metadata_op_get_ion_fd:
+ if (mfd->fb_ion_handle && mfd->fb_ion_client) {
+ get_dma_buf(mfd->fbmem_buf);
+ metadata->data.fbmem_ionfd =
+ ion_share_dma_buf_fd(mfd->fb_ion_client,
+ mfd->fb_ion_handle);
+ if (metadata->data.fbmem_ionfd < 0) {
+ dma_buf_put(mfd->fbmem_buf);
+ pr_err("fd allocation failed. fd = %d\n",
+ metadata->data.fbmem_ionfd);
+ }
+ }
+ break;
+ case metadata_op_crc:
+ ctl = mfd_to_ctl(mfd);
+ if (!ctl || mdss_fb_is_power_off(mfd))
+ return -EPERM;
+ ret = mdss_misr_get(mdata, &metadata->data.misr_request, ctl,
+ ctl->is_video_mode);
+ break;
+ default:
+ pr_warn("Unsupported request to MDP META IOCTL.\n");
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int __mdss_mdp_clean_dirty_pipes(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_pipe *pipe;
+ int unset_ndx = 0;
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+ if (pipe->dirty)
+ unset_ndx |= pipe->ndx;
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+ if (unset_ndx)
+ mdss_mdp_overlay_release(mfd, unset_ndx);
+
+ return unset_ndx;
+}
+
+static int mdss_mdp_overlay_precommit(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data;
+ int ret;
+
+ if (!mfd)
+ return -ENODEV;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ if (!mdp5_data)
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+ if (ret)
+ return ret;
+
+ /*
+ * we can assume that any pipes that are still dirty at this point are
+ * not properly tracked by user land. This could be for any reason,
+ * mark them for cleanup at this point.
+ */
+ ret = __mdss_mdp_clean_dirty_pipes(mfd);
+ if (ret) {
+ pr_warn("fb%d: dirty pipes remaining %x\n",
+ mfd->index, ret);
+ ret = -EPIPE;
+ }
+
+ /*
+ * If we are in process of mode switch we may have an invalid state.
+ * We can allow commit to happen if there are no pipes attached as only
+ * border color will be seen regardless of resolution or mode.
+ */
+ if ((mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) &&
+ (mfd->switch_state != MDSS_MDP_WAIT_FOR_COMMIT)) {
+ if (list_empty(&mdp5_data->pipes_used)) {
+ mfd->switch_state = MDSS_MDP_WAIT_FOR_COMMIT;
+ } else {
+ pr_warn("Invalid commit on fb%d with state=%d\n",
+ mfd->index, mfd->switch_state);
+ ret = -EINVAL;
+ }
+ }
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ return ret;
+}
+
+/*
+ * This routine serves two purposes.
+ * 1. Propagate overlay_id returned from sorted list to original list
+ * to user-space.
+ * 2. In case of error processing sorted list, map the error overlay's
+ * index to original list because user-space is not aware of the sorted list.
+ */
+static int __mdss_overlay_map(struct mdp_overlay *ovs,
+ struct mdp_overlay *op_ovs, int num_ovs, int num_ovs_processed)
+{
+ int mapped = num_ovs_processed;
+ int j, k;
+
+ for (j = 0; j < num_ovs; j++) {
+ for (k = 0; k < num_ovs; k++) {
+ if ((ovs[j].dst_rect.x == op_ovs[k].dst_rect.x) &&
+ (ovs[j].z_order == op_ovs[k].z_order)) {
+ op_ovs[k].id = ovs[j].id;
+ op_ovs[k].priority = ovs[j].priority;
+ break;
+ }
+ }
+
+ if ((mapped != num_ovs) && (mapped == j)) {
+ pr_debug("mapped %d->%d\n", mapped, k);
+ mapped = k;
+ }
+ }
+
+ return mapped;
+}
+
+static inline void __overlay_swap_func(void *a, void *b, int size)
+{
+ swap(*(struct mdp_overlay *)a, *(struct mdp_overlay *)b);
+}
+
+static inline int __zorder_dstx_cmp_func(const void *a, const void *b)
+{
+ int rc = 0;
+ const struct mdp_overlay *ov1 = a;
+ const struct mdp_overlay *ov2 = b;
+
+ if (ov1->z_order < ov2->z_order)
+ rc = -1;
+ else if ((ov1->z_order == ov2->z_order) &&
+ (ov1->dst_rect.x < ov2->dst_rect.x))
+ rc = -1;
+
+ return rc;
+}
+
+/*
+ * first sort list of overlays based on z_order and then within
+ * same z_order sort them on dst_x.
+ */
+static int __mdss_overlay_src_split_sort(struct msm_fb_data_type *mfd,
+ struct mdp_overlay *ovs, int num_ovs)
+{
+ int i;
+ int left_lm_zo_cnt[MDSS_MDP_MAX_STAGE] = {0};
+ int right_lm_zo_cnt[MDSS_MDP_MAX_STAGE] = {0};
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
+
+ sort(ovs, num_ovs, sizeof(struct mdp_overlay), __zorder_dstx_cmp_func,
+ __overlay_swap_func);
+
+ for (i = 0; i < num_ovs; i++) {
+ if (ovs[i].z_order >= MDSS_MDP_MAX_STAGE) {
+ pr_err("invalid stage:%u\n", ovs[i].z_order);
+ return -EINVAL;
+ }
+ if (ovs[i].dst_rect.x < left_lm_w) {
+ if (left_lm_zo_cnt[ovs[i].z_order] == 2) {
+ pr_err("more than 2 ov @ stage%u on left lm\n",
+ ovs[i].z_order);
+ return -EINVAL;
+ }
+ left_lm_zo_cnt[ovs[i].z_order]++;
+ } else {
+ if (right_lm_zo_cnt[ovs[i].z_order] == 2) {
+ pr_err("more than 2 ov @ stage%u on right lm\n",
+ ovs[i].z_order);
+ return -EINVAL;
+ }
+ right_lm_zo_cnt[ovs[i].z_order]++;
+ }
+ }
+
+ return 0;
+}
+
+static int __handle_overlay_prepare(struct msm_fb_data_type *mfd,
+ struct mdp_overlay_list *ovlist, struct mdp_overlay *ip_ovs)
+{
+ int ret, i;
+ int new_reqs = 0, left_cnt = 0, right_cnt = 0;
+ int num_ovs = ovlist->num_overlays;
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ u32 left_lm_ovs = 0, right_lm_ovs = 0;
+ bool is_single_layer = false;
+
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ struct mdp_overlay *sorted_ovs = NULL;
+ struct mdp_overlay *req, *prev_req;
+
+ struct mdss_mdp_pipe *pipe, *left_blend_pipe;
+ struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM] = { 0 };
+ struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM] = { 0 };
+
+ bool sort_needed = mdata->has_src_split && (num_ovs > 1);
+
+ ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+ if (ret)
+ return ret;
+
+ if (mdss_fb_is_power_off(mfd)) {
+ mutex_unlock(&mdp5_data->ov_lock);
+ return -EPERM;
+ }
+
+ if (sort_needed) {
+ sorted_ovs = kcalloc(num_ovs, sizeof(*ip_ovs), GFP_KERNEL);
+ if (!sorted_ovs) {
+ pr_err("error allocating ovlist mem\n");
+ return -ENOMEM;
+ }
+ memcpy(sorted_ovs, ip_ovs, num_ovs * sizeof(*ip_ovs));
+ ret = __mdss_overlay_src_split_sort(mfd, sorted_ovs, num_ovs);
+ if (ret) {
+ pr_err("src_split_sort failed. ret=%d\n", ret);
+ kfree(sorted_ovs);
+ return ret;
+ }
+ }
+
+ pr_debug("prepare fb%d num_ovs=%d\n", mfd->index, num_ovs);
+
+ for (i = 0; i < num_ovs; i++) {
+ if (IS_RIGHT_MIXER_OV(ip_ovs[i].flags, ip_ovs[i].dst_rect.x,
+ left_lm_w))
+ right_lm_ovs++;
+ else
+ left_lm_ovs++;
+
+ if ((left_lm_ovs > 1) && (right_lm_ovs > 1))
+ break;
+ }
+
+ for (i = 0; i < num_ovs; i++) {
+ left_blend_pipe = NULL;
+
+ if (sort_needed) {
+ req = &sorted_ovs[i];
+ prev_req = (i > 0) ? &sorted_ovs[i - 1] : NULL;
+
+ /*
+ * check if current overlay is at same z_order as
+ * previous one and qualifies as a right blend. If yes,
+ * pass a pointer to the pipe representing previous
+ * overlay or in other terms left blend overlay.
+ */
+ if (prev_req && (prev_req->z_order == req->z_order) &&
+ is_ov_right_blend(&prev_req->dst_rect,
+ &req->dst_rect, left_lm_w)) {
+ left_blend_pipe = pipe;
+ }
+ } else {
+ req = &ip_ovs[i];
+ }
+
+ if (IS_RIGHT_MIXER_OV(ip_ovs[i].flags, ip_ovs[i].dst_rect.x,
+ left_lm_w))
+ is_single_layer = (right_lm_ovs == 1);
+ else
+ is_single_layer = (left_lm_ovs == 1);
+
+ req->z_order += MDSS_MDP_STAGE_0;
+ ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe,
+ left_blend_pipe, is_single_layer);
+ req->z_order -= MDSS_MDP_STAGE_0;
+
+ if (IS_ERR_VALUE((unsigned long)ret))
+ goto validate_exit;
+
+ pr_debug("pnum:%d id:0x%x flags:0x%x dst_x:%d l_blend_pnum%d\n",
+ pipe->num, req->id, req->flags, req->dst_rect.x,
+ left_blend_pipe ? left_blend_pipe->num : -1);
+
+ /* keep track of the new overlays to unset in case of errors */
+ if (pipe->play_cnt == 0)
+ new_reqs |= pipe->ndx;
+
+ if (IS_RIGHT_MIXER_OV(pipe->flags, pipe->dst.x, left_lm_w)) {
+ if (right_cnt >= MAX_PIPES_PER_LM) {
+ pr_err("too many pipes on right mixer\n");
+ ret = -EINVAL;
+ goto validate_exit;
+ }
+ right_plist[right_cnt] = pipe;
+ right_cnt++;
+ } else {
+ if (left_cnt >= MAX_PIPES_PER_LM) {
+ pr_err("too many pipes on left mixer\n");
+ ret = -EINVAL;
+ goto validate_exit;
+ }
+ left_plist[left_cnt] = pipe;
+ left_cnt++;
+ }
+ }
+
+ ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
+ right_plist, right_cnt);
+
+validate_exit:
+ if (sort_needed)
+ ovlist->processed_overlays =
+ __mdss_overlay_map(sorted_ovs, ip_ovs, num_ovs, i);
+ else
+ ovlist->processed_overlays = i;
+
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_debug("err=%d total_ovs:%d processed:%d left:%d right:%d\n",
+ ret, num_ovs, ovlist->processed_overlays, left_lm_ovs,
+ right_lm_ovs);
+ mdss_mdp_overlay_release(mfd, new_reqs);
+ }
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ kfree(sorted_ovs);
+
+ return ret;
+}
+
+static int __handle_ioctl_overlay_prepare(struct msm_fb_data_type *mfd,
+ void __user *argp)
+{
+ struct mdp_overlay_list ovlist;
+ struct mdp_overlay *req_list[OVERLAY_MAX];
+ struct mdp_overlay *overlays;
+ int i, ret;
+
+ if (!mfd_to_ctl(mfd))
+ return -ENODEV;
+
+ if (copy_from_user(&ovlist, argp, sizeof(ovlist)))
+ return -EFAULT;
+
+ if (ovlist.num_overlays > OVERLAY_MAX) {
+ pr_err("Number of overlays exceeds max\n");
+ return -EINVAL;
+ }
+
+ overlays = kmalloc_array(ovlist.num_overlays, sizeof(*overlays),
+ GFP_KERNEL);
+ if (!overlays)
+ return -ENOMEM;
+
+ if (copy_from_user(req_list, ovlist.overlay_list,
+ sizeof(struct mdp_overlay *) *
+ ovlist.num_overlays)) {
+ ret = -EFAULT;
+ goto validate_exit;
+ }
+
+ for (i = 0; i < ovlist.num_overlays; i++) {
+ if (copy_from_user(overlays + i, req_list[i],
+ sizeof(struct mdp_overlay))) {
+ ret = -EFAULT;
+ goto validate_exit;
+ }
+ }
+
+ ret = __handle_overlay_prepare(mfd, &ovlist, overlays);
+ if (!IS_ERR_VALUE((unsigned long)ret)) {
+ for (i = 0; i < ovlist.num_overlays; i++) {
+ if (copy_to_user(req_list[i], overlays + i,
+ sizeof(struct mdp_overlay))) {
+ ret = -EFAULT;
+ goto validate_exit;
+ }
+ }
+ }
+
+ if (copy_to_user(argp, &ovlist, sizeof(ovlist)))
+ ret = -EFAULT;
+
+validate_exit:
+ kfree(overlays);
+
+ return ret;
+}
+
+static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
+ u32 cmd, void __user *argp)
+{
+ struct mdp_overlay *req = NULL;
+ int val, ret = -ENOTSUPP;
+ struct msmfb_metadata metadata;
+ struct mdp_pp_feature_version pp_feature_version;
+ struct msmfb_overlay_data data;
+ struct mdp_set_cfg cfg;
+
+ switch (cmd) {
+ case MSMFB_MDP_PP:
+ ret = mdss_mdp_pp_ioctl(mfd, argp);
+ break;
+ case MSMFB_MDP_PP_GET_FEATURE_VERSION:
+ ret = copy_from_user(&pp_feature_version, argp,
+ sizeof(pp_feature_version));
+ if (ret) {
+ pr_err("copy_from_user failed for pp_feature_version\n");
+ ret = -EFAULT;
+ } else {
+ ret = mdss_mdp_pp_get_version(&pp_feature_version);
+ if (!ret) {
+ ret = copy_to_user(argp, &pp_feature_version,
+ sizeof(pp_feature_version));
+ if (ret) {
+ pr_err("copy_to_user failed for pp_feature_version\n");
+ ret = -EFAULT;
+ }
+ } else {
+ pr_err("get pp version failed ret %d\n", ret);
+ }
+ }
+ break;
+ case MSMFB_HISTOGRAM_START:
+ case MSMFB_HISTOGRAM_STOP:
+ case MSMFB_HISTOGRAM:
+ ret = mdss_mdp_histo_ioctl(mfd, cmd, argp);
+ break;
+
+ case MSMFB_OVERLAY_GET:
+ req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ ret = copy_from_user(req, argp, sizeof(*req));
+ if (!ret) {
+ ret = mdss_mdp_overlay_get(mfd, req);
+
+ if (!IS_ERR_VALUE((unsigned long)ret))
+ ret = copy_to_user(argp, req, sizeof(*req));
+ }
+
+ if (ret)
+ pr_debug("OVERLAY_GET failed (%d)\n", ret);
+ break;
+
+ case MSMFB_OVERLAY_SET:
+ req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ ret = copy_from_user(req, argp, sizeof(*req));
+ if (!ret) {
+ ret = mdss_mdp_overlay_set(mfd, req);
+
+ if (!IS_ERR_VALUE((unsigned long)ret))
+ ret = copy_to_user(argp, req, sizeof(*req));
+ }
+ if (ret)
+ pr_debug("OVERLAY_SET failed (%d)\n", ret);
+ break;
+
+ case MSMFB_OVERLAY_UNSET:
+ if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val))))
+ ret = mdss_mdp_overlay_unset(mfd, val);
+ break;
+
+ case MSMFB_OVERLAY_PLAY:
+ ret = copy_from_user(&data, argp, sizeof(data));
+ if (!ret)
+ ret = mdss_mdp_overlay_play(mfd, &data);
+
+ if (ret)
+ pr_debug("OVERLAY_PLAY failed (%d)\n", ret);
+ break;
+
+ case MSMFB_OVERLAY_VSYNC_CTRL:
+ if (!copy_from_user(&val, argp, sizeof(val))) {
+ ret = mdss_mdp_overlay_vsync_ctrl(mfd, val);
+ } else {
+ pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed (%d)\n", ret);
+ ret = -EFAULT;
+ }
+ break;
+
+ case MSMFB_METADATA_SET:
+ ret = copy_from_user(&metadata, argp, sizeof(metadata));
+ if (ret)
+ return ret;
+ ret = mdss_fb_set_metadata(mfd, &metadata);
+ break;
+
+ case MSMFB_METADATA_GET:
+ ret = copy_from_user(&metadata, argp, sizeof(metadata));
+ if (ret)
+ return ret;
+ ret = mdss_fb_get_metadata(mfd, &metadata);
+ if (!ret)
+ ret = copy_to_user(argp, &metadata, sizeof(metadata));
+ break;
+
+ case MSMFB_OVERLAY_PREPARE:
+ ret = __handle_ioctl_overlay_prepare(mfd, argp);
+ break;
+ case MSMFB_MDP_SET_CFG:
+ ret = copy_from_user(&cfg, argp, sizeof(cfg));
+ if (ret) {
+ pr_err("copy failed MSMFB_MDP_SET_CFG ret %d\n", ret);
+ ret = -EFAULT;
+ break;
+ }
+ ret = mdss_mdp_set_cfg(mfd, &cfg);
+ break;
+
+ default:
+ break;
+ }
+
+ kfree(req);
+ return ret;
+}
+
+/**
+ * __mdss_mdp_overlay_ctl_init - Helper function to initialize control structure
+ * @mfd: msm frame buffer data structure associated with the fb device.
+ *
+ * Helper function that allocates and initializes the mdp control structure
+ * for a frame buffer device. Whenever applicable, this function will also setup
+ * the control for the split display path as well.
+ *
+ * Return: pointer to the newly allocated control structure.
+ */
+static struct mdss_mdp_ctl *__mdss_mdp_overlay_ctl_init(
+ struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_panel_data *pdata;
+ struct mdss_overlay_private *mdp5_data;
+
+ if (!mfd)
+ return ERR_PTR(-EINVAL);
+
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected for fb%d\n", mfd->index);
+ rc = -ENODEV;
+ goto error;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ if (!mdp5_data) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ ctl = mdss_mdp_ctl_init(pdata, mfd);
+ if (IS_ERR_OR_NULL(ctl)) {
+ pr_err("Unable to initialize ctl for fb%d\n",
+ mfd->index);
+ rc = PTR_ERR(ctl);
+ goto error;
+ }
+ ctl->is_master = true;
+ ctl->vsync_handler.vsync_handler =
+ mdss_mdp_overlay_handle_vsync;
+ ctl->vsync_handler.cmd_post_flush = false;
+
+ ctl->recover_underrun_handler.vsync_handler =
+ mdss_mdp_recover_underrun_handler;
+ ctl->recover_underrun_handler.cmd_post_flush = false;
+
+ ctl->frc_vsync_handler.vsync_handler =
+ mdss_mdp_overlay_frc_handler;
+ ctl->frc_vsync_handler.cmd_post_flush = false;
+
+ ctl->lineptr_handler.lineptr_handler =
+ mdss_mdp_overlay_handle_lineptr;
+
+ INIT_WORK(&ctl->remove_underrun_handler,
+ remove_underrun_vsync_handler);
+
+ if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ /* enable split display */
+ rc = mdss_mdp_ctl_split_display_setup(ctl, pdata->next);
+ if (rc) {
+ mdss_mdp_ctl_destroy(ctl);
+ goto error;
+ }
+ }
+
+ mdp5_data->ctl = ctl;
+error:
+ if (rc)
+ return ERR_PTR(rc);
+ else
+ return ctl;
+}
+
+static void mdss_mdp_set_lm_flag(struct msm_fb_data_type *mfd)
+{
+ u32 width;
+ struct mdss_data_type *mdata;
+
+ /* if lm_widths are set, the split_mode would have been set */
+ if (mfd->panel_info->lm_widths[0] && mfd->panel_info->lm_widths[1])
+ return;
+
+ mdata = mdss_mdp_get_mdata();
+ width = mfd->fbi->var.xres;
+
+ /* setting the appropriate split_mode for HDMI usecases */
+ if ((mfd->split_mode == MDP_SPLIT_MODE_NONE ||
+ mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
+ (width > mdata->max_mixer_width)) {
+ width /= 2;
+ mfd->split_mode = MDP_DUAL_LM_SINGLE_DISPLAY;
+ mfd->split_fb_left = width;
+ mfd->split_fb_right = width;
+ } else if (is_dual_lm_single_display(mfd) &&
+ (width <= mdata->max_mixer_width)) {
+ mfd->split_mode = MDP_SPLIT_MODE_NONE;
+ mfd->split_fb_left = 0;
+ mfd->split_fb_right = 0;
+ }
+}
+
+static void mdss_mdp_handle_invalid_switch_state(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+ struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+ struct mdss_mdp_data *buf, *tmpbuf;
+
+ mfd->switch_state = MDSS_MDP_NO_UPDATE_REQUESTED;
+
+ /*
+ * Handle only for cmd mode panels as for video mode, buffers
+ * cannot be freed at this point. Needs revisting to handle the
+ * use case for video mode panels.
+ */
+ if (mfd->panel_info->type == MIPI_CMD_PANEL) {
+ if (ctl->ops.wait_pingpong)
+ rc = ctl->ops.wait_pingpong(ctl, NULL);
+ if (!rc && sctl && sctl->ops.wait_pingpong)
+ rc = sctl->ops.wait_pingpong(sctl, NULL);
+ if (rc) {
+ pr_err("wait for pp failed\n");
+ return;
+ }
+
+ mutex_lock(&mdp5_data->list_lock);
+ list_for_each_entry_safe(buf, tmpbuf,
+ &mdp5_data->bufs_used, buf_list)
+ list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
+ mutex_unlock(&mdp5_data->list_lock);
+ }
+}
+
+static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
+{
+ int rc;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_mdp_ctl *ctl = NULL;
+ struct mdss_data_type *mdata;
+
+ if (!mfd)
+ return -ENODEV;
+
+ if (mfd->key != MFD_KEY)
+ return -EINVAL;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ if (!mdp5_data)
+ return -EINVAL;
+
+ mdata = mfd_to_mdata(mfd);
+ if (!mdata)
+ return -EINVAL;
+
+ mdss_mdp_set_lm_flag(mfd);
+
+ if (!mdp5_data->ctl) {
+ ctl = __mdss_mdp_overlay_ctl_init(mfd);
+ if (IS_ERR_OR_NULL(ctl))
+ return PTR_ERR(ctl);
+ } else {
+ ctl = mdp5_data->ctl;
+ }
+
+ if (mfd->panel_info->type == WRITEBACK_PANEL && !mdp5_data->wfd) {
+ mdp5_data->wfd = mdss_mdp_wfd_init(&mfd->pdev->dev, ctl);
+ if (IS_ERR_OR_NULL(mdp5_data->wfd)) {
+ rc = PTR_ERR(mdp5_data->wfd);
+ goto panel_on;
+ }
+ }
+
+ if (mdss_fb_is_power_on(mfd)) {
+ pr_debug("panel was never turned off\n");
+ rc = mdss_mdp_ctl_start(ctl, false);
+ goto panel_on;
+ }
+
+ rc = mdss_mdp_ctl_intf_event(mdp5_data->ctl, MDSS_EVENT_RESET,
+ NULL, false);
+ if (rc)
+ goto panel_on;
+
+ /* Skip the overlay start and kickoff for all displays
+ * if handoff is pending. Previously we skipped it for DTV
+ * panel and pluggable panels (bridge chip hdmi case). But
+ * it does not cover the case where there is a non pluggable
+ * tertiary display. Using the flag handoff_pending to skip
+ * overlay start and kickoff should cover all cases
+ * TODO: In the long run, the overlay start and kickoff
+ * should not be skipped, instead, the handoff can be done
+ */
+ if (!mfd->panel_info->cont_splash_enabled &&
+ !mdata->handoff_pending) {
+ rc = mdss_mdp_overlay_start(mfd);
+ if (rc)
+ goto end;
+ if (mfd->panel_info->type != WRITEBACK_PANEL) {
+ atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
+ rc = mdss_mdp_overlay_kickoff(mfd, NULL);
+ }
+ } else {
+ rc = mdss_mdp_ctl_setup(ctl);
+ if (rc)
+ goto end;
+ }
+
+panel_on:
+ if (IS_ERR_VALUE((unsigned long)rc)) {
+ pr_err("Failed to turn on fb%d\n", mfd->index);
+ mdss_mdp_overlay_off(mfd);
+ goto end;
+ }
+
+end:
+ return rc;
+}
+
+static int mdss_mdp_handoff_cleanup_ctl(struct msm_fb_data_type *mfd)
+{
+ int rc;
+ int need_cleanup;
+ struct mdss_overlay_private *mdp5_data;
+
+ if (!mfd)
+ return -ENODEV;
+
+ if (mfd->key != MFD_KEY)
+ return -EINVAL;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ mdss_mdp_overlay_free_fb_pipe(mfd);
+
+ mutex_lock(&mdp5_data->list_lock);
+ need_cleanup = !list_empty(&mdp5_data->pipes_cleanup) ||
+ !list_empty(&mdp5_data->pipes_used);
+ mutex_unlock(&mdp5_data->list_lock);
+
+ if (need_cleanup)
+ mdss_mdp_overlay_kickoff(mfd, NULL);
+
+ rc = mdss_mdp_ctl_stop(mdp5_data->ctl, mfd->panel_power_state);
+ if (!rc) {
+ if (mdss_fb_is_power_off(mfd)) {
+ mutex_lock(&mdp5_data->list_lock);
+ __mdss_mdp_overlay_free_list_purge(mfd);
+ mutex_unlock(&mdp5_data->list_lock);
+ }
+ }
+
+ rc = mdss_mdp_splash_cleanup(mfd, false);
+ if (rc)
+ pr_err("%s: failed splash clean up %d\n", __func__, rc);
+
+ return rc;
+}
+
+static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
+{
+ int rc;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_mixer *mixer;
+ int need_cleanup;
+ int retire_cnt;
+ bool destroy_ctl = false;
+
+ if (!mfd)
+ return -ENODEV;
+
+ if (mfd->key != MFD_KEY)
+ return -EINVAL;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (!mdp5_data || !mdp5_data->ctl) {
+ pr_err("ctl not initialized\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Keep a reference to the runtime pm until the overlay is turned
+ * off, and then release this last reference at the end. This will
+ * help in distinguishing between idle power collapse versus suspend
+ * power collapse
+ */
+ pm_runtime_get_sync(&mfd->pdev->dev);
+
+ if (mdss_fb_is_power_on_lp(mfd)) {
+ pr_debug("panel not turned off. keeping overlay on\n");
+ goto ctl_stop;
+ }
+
+ mutex_lock(&mdp5_data->ov_lock);
+
+ mdss_mdp_overlay_free_fb_pipe(mfd);
+
+ mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ if (mixer)
+ mixer->cursor_enabled = 0;
+
+ mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+ if (mixer)
+ mixer->cursor_enabled = 0;
+
+ mutex_lock(&mdp5_data->list_lock);
+ need_cleanup = !list_empty(&mdp5_data->pipes_cleanup);
+ mutex_unlock(&mdp5_data->list_lock);
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ destroy_ctl = !mfd->ref_cnt || mfd->panel_reconfig;
+
+ mutex_lock(&mfd->switch_lock);
+ if (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) {
+ destroy_ctl = true;
+ need_cleanup = false;
+ pr_warn("fb%d blank while mode switch (%d) in progress\n",
+ mfd->index, mfd->switch_state);
+ mdss_mdp_handle_invalid_switch_state(mfd);
+ }
+ mutex_unlock(&mfd->switch_lock);
+
+ if (need_cleanup) {
+ pr_debug("cleaning up pipes on fb%d\n", mfd->index);
+ if (mdata->handoff_pending)
+ mdp5_data->allow_kickoff = true;
+
+ mdss_mdp_overlay_kickoff(mfd, NULL);
+ } else if (!mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
+ if (mfd->panel_reconfig) {
+ if (mfd->panel_info->cont_splash_enabled)
+ mdss_mdp_handoff_cleanup_ctl(mfd);
+
+ mdp5_data->borderfill_enable = false;
+ mdss_mdp_ctl_destroy(mdp5_data->ctl);
+ mdp5_data->ctl = NULL;
+ }
+ goto end;
+ }
+
+ /*
+ * If retire fences are still active wait for a vsync time
+ * for retire fence to be updated.
+ * As a last resort signal the timeline if vsync doesn't arrive.
+ */
+ mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+ retire_cnt = mdp5_data->retire_cnt;
+ mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+ if (retire_cnt) {
+ u32 fps = mdss_panel_get_framerate(mfd->panel_info,
+ FPS_RESOLUTION_HZ);
+ u32 vsync_time = 1000 / (fps ? : DEFAULT_FRAME_RATE);
+
+ msleep(vsync_time);
+
+ mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+ retire_cnt = mdp5_data->retire_cnt;
+ mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+ __vsync_retire_signal(mfd, retire_cnt);
+
+ /*
+ * the retire work can still schedule after above retire_signal
+ * api call. Flush workqueue guarantees that current caller
+ * context is blocked till retire_work finishes. Any work
+ * schedule after flush call should not cause any issue because
+ * retire_signal api checks for retire_cnt with sync_mutex lock.
+ */
+
+ kthread_flush_work(&mdp5_data->vsync_work);
+ }
+
+ctl_stop:
+ mutex_lock(&mdp5_data->ov_lock);
+ /* set the correct pipe_mapped before ctl_stop */
+ mdss_mdp_mixer_update_pipe_map(mdp5_data->ctl,
+ MDSS_MDP_MIXER_MUX_LEFT);
+ mdss_mdp_mixer_update_pipe_map(mdp5_data->ctl,
+ MDSS_MDP_MIXER_MUX_RIGHT);
+ rc = mdss_mdp_ctl_stop(mdp5_data->ctl, mfd->panel_power_state);
+ if (rc == 0) {
+ if (mdss_fb_is_power_off(mfd)) {
+ mutex_lock(&mdp5_data->list_lock);
+ __mdss_mdp_overlay_free_list_purge(mfd);
+ if (!mfd->ref_cnt)
+ mdss_mdp_overlay_buf_deinit(mfd);
+ mutex_unlock(&mdp5_data->list_lock);
+ mdss_mdp_ctl_notifier_unregister(mdp5_data->ctl,
+ &mfd->mdp_sync_pt_data.notifier);
+
+ if (destroy_ctl) {
+ mdp5_data->borderfill_enable = false;
+ mdss_mdp_ctl_destroy(mdp5_data->ctl);
+ mdp5_data->ctl = NULL;
+ }
+
+ atomic_dec(&mdp5_data->mdata->active_intf_cnt);
+
+ if (!mdp5_data->mdata->idle_pc_enabled ||
+ (mfd->panel_info->type != MIPI_CMD_PANEL)) {
+ rc = pm_runtime_put(&mfd->pdev->dev);
+ if (rc)
+ pr_err("unable to suspend w/pm_runtime_put (%d)\n",
+ rc);
+ }
+ }
+ }
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ if (mdp5_data->wfd) {
+ mdss_mdp_wfd_deinit(mdp5_data->wfd);
+ mdp5_data->wfd = NULL;
+ }
+
+end:
+ /* Release the last reference to the runtime device */
+ rc = pm_runtime_put(&mfd->pdev->dev);
+ if (rc)
+ pr_err("unable to suspend w/pm_runtime_put (%d)\n", rc);
+
+ return rc;
+}
+
+static int __mdss_mdp_ctl_handoff(struct msm_fb_data_type *mfd,
+ struct mdss_mdp_ctl *ctl, struct mdss_data_type *mdata)
+{
+ int rc = 0;
+ int i, j;
+ u32 mixercfg;
+ struct mdss_mdp_pipe *pipe = NULL;
+ struct mdss_overlay_private *mdp5_data;
+
+ if (!ctl || !mdata)
+ return -EINVAL;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ for (i = 0; i < mdata->nmixers_intf; i++) {
+ mixercfg = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_LAYER(i));
+ pr_debug("for lm%d mixercfg = 0x%09x\n", i, mixercfg);
+
+ j = MDSS_MDP_SSPP_VIG0;
+ for (; j < MDSS_MDP_SSPP_CURSOR0 && mixercfg; j++) {
+ u32 cfg = j * 3;
+
+ if ((j == MDSS_MDP_SSPP_VIG3) ||
+ (j == MDSS_MDP_SSPP_RGB3)) {
+ /* Add 2 to account for Cursor & Border bits */
+ cfg += 2;
+ }
+ if (mixercfg & (0x7 << cfg)) {
+ pr_debug("Pipe %d staged\n", j);
+ /* bootloader display always uses RECT0 */
+ pipe = mdss_mdp_pipe_search(mdata, BIT(j),
+ MDSS_MDP_PIPE_RECT0);
+ if (!pipe) {
+ pr_warn("Invalid pipe %d staged\n", j);
+ continue;
+ }
+
+ rc = mdss_mdp_pipe_handoff(pipe);
+ if (rc) {
+ pr_err("Failed to handoff pipe%d\n",
+ pipe->num);
+ goto exit;
+ }
+
+ pipe->mfd = mfd;
+ mutex_lock(&mdp5_data->list_lock);
+ list_add(&pipe->list, &mdp5_data->pipes_used);
+ mutex_unlock(&mdp5_data->list_lock);
+
+ rc = mdss_mdp_mixer_handoff(ctl, i, pipe);
+ if (rc) {
+ pr_err("failed to handoff mix%d\n", i);
+ goto exit;
+ }
+ }
+ }
+ }
+exit:
+ return rc;
+}
+
+/**
+ * mdss_mdp_overlay_handoff() - Read MDP registers to handoff an active ctl path
+ * @mfd: Msm frame buffer structure associated with the fb device.
+ *
+ * This function populates the MDP software structures with the current state of
+ * the MDP hardware to handoff any active control path for the framebuffer
+ * device. This is needed to identify any ctl, mixers and pipes being set up by
+ * the bootloader to display the splash screen when the continuous splash screen
+ * feature is enabled in kernel.
+ */
+static int mdss_mdp_overlay_handoff(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = NULL;
+ struct mdss_mdp_ctl *sctl = NULL;
+
+ if (!mdp5_data->ctl) {
+ ctl = __mdss_mdp_overlay_ctl_init(mfd);
+ if (IS_ERR_OR_NULL(ctl)) {
+ rc = PTR_ERR(ctl);
+ goto error;
+ }
+ } else {
+ ctl = mdp5_data->ctl;
+ }
+
+ /*
+ * vsync interrupt needs on during continuous splash, this is
+ * to initialize necessary ctl members here.
+ */
+ rc = mdss_mdp_ctl_start(ctl, true);
+ if (rc) {
+ pr_err("Failed to initialize ctl\n");
+ goto error;
+ }
+
+ ctl->clk_rate = mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false);
+ pr_debug("Set the ctl clock rate to %d Hz\n", ctl->clk_rate);
+
+ rc = __mdss_mdp_ctl_handoff(mfd, ctl, mdata);
+ if (rc) {
+ pr_err("primary ctl handoff failed. rc=%d\n", rc);
+ goto error;
+ }
+
+ if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (!sctl) {
+ pr_err("cannot get secondary ctl. fail the handoff\n");
+ rc = -EPERM;
+ goto error;
+ }
+ rc = __mdss_mdp_ctl_handoff(mfd, sctl, mdata);
+ if (rc) {
+ pr_err("secondary ctl handoff failed. rc=%d\n", rc);
+ goto error;
+ }
+ }
+
+ rc = mdss_mdp_smp_handoff(mdata);
+ if (rc)
+ pr_err("Failed to handoff smps\n");
+
+ mdp5_data->handoff = true;
+
+error:
+ if (rc && ctl) {
+ mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_RGB);
+ mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_VIG);
+ mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_DMA);
+ mdss_mdp_ctl_destroy(ctl);
+ mdp5_data->ctl = NULL;
+ mdp5_data->handoff = false;
+ }
+
+ return rc;
+}
+
+static void __vsync_retire_handle_vsync(struct mdss_mdp_ctl *ctl, ktime_t t)
+{
+ struct msm_fb_data_type *mfd = ctl->mfd;
+ struct mdss_overlay_private *mdp5_data;
+
+ if (!mfd || !mfd->mdp.private1) {
+ pr_warn("Invalid handle for vsync\n");
+ return;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ kthread_queue_work(&mdp5_data->worker, &mdp5_data->vsync_work);
+}
+
+static void __vsync_retire_work_handler(struct kthread_work *work)
+{
+ struct mdss_overlay_private *mdp5_data =
+ container_of(work, typeof(*mdp5_data), vsync_work);
+
+ if (!mdp5_data->ctl || !mdp5_data->ctl->mfd)
+ return;
+
+ if (!mdp5_data->ctl->ops.remove_vsync_handler)
+ return;
+
+ __vsync_retire_signal(mdp5_data->ctl->mfd, 1);
+}
+
+static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+ if (mdp5_data->retire_cnt > 0) {
+ mdss_inc_timeline(mdp5_data->vsync_timeline, val);
+ mdp5_data->retire_cnt -= min(val, mdp5_data->retire_cnt);
+ pr_debug("Retire signaled! timeline val=%d remaining=%d\n",
+ mdss_get_timeline_retire_ts(mdp5_data->vsync_timeline),
+ mdp5_data->retire_cnt);
+
+ if (mdp5_data->retire_cnt == 0) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mdp5_data->ctl->ops.remove_vsync_handler(mdp5_data->ctl,
+ &mdp5_data->vsync_retire_handler);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ }
+ }
+ mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+}
+
+static struct mdss_fence *
+__vsync_retire_get_fence(struct msm_sync_pt_data *sync_pt_data)
+{
+ struct msm_fb_data_type *mfd;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_mdp_ctl *ctl;
+ int value;
+
+ mfd = container_of(sync_pt_data, typeof(*mfd), mdp_sync_pt_data);
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (!mdp5_data || !mdp5_data->ctl)
+ return ERR_PTR(-ENODEV);
+
+ ctl = mdp5_data->ctl;
+ if (!ctl->ops.add_vsync_handler)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!mdss_mdp_ctl_is_power_on(ctl)) {
+ pr_debug("fb%d vsync pending first update\n", mfd->index);
+ return ERR_PTR(-EPERM);
+ }
+
+ value = 1 + mdp5_data->retire_cnt;
+ mdp5_data->retire_cnt++;
+
+ return mdss_fb_sync_get_fence(mdp5_data->vsync_timeline,
+ "mdp-retire", value);
+}
+
+static int __vsync_set_vsync_handler(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl;
+ int rc;
+ int retire_cnt;
+
+ ctl = mdp5_data->ctl;
+ mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+ retire_cnt = mdp5_data->retire_cnt;
+ mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+ if (!retire_cnt || mdp5_data->vsync_retire_handler.enabled)
+ return 0;
+
+ if (!ctl->ops.add_vsync_handler)
+ return -EOPNOTSUPP;
+
+ if (!mdss_mdp_ctl_is_power_on(ctl)) {
+ pr_debug("fb%d vsync pending first update\n", mfd->index);
+ return -EPERM;
+ }
+
+ rc = ctl->ops.add_vsync_handler(ctl,
+ &mdp5_data->vsync_retire_handler);
+ return rc;
+}
+
+static int __vsync_retire_setup(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ char name[24];
+ struct sched_param param = { .sched_priority = 5 };
+
+ snprintf(name, sizeof(name), "mdss_fb%d_retire", mfd->index);
+ mdp5_data->vsync_timeline = mdss_create_timeline(name);
+ if (mdp5_data->vsync_timeline == NULL) {
+ pr_err("cannot vsync create time line");
+ return -ENOMEM;
+ }
+
+ kthread_init_worker(&mdp5_data->worker);
+ kthread_init_work(&mdp5_data->vsync_work, __vsync_retire_work_handler);
+
+ mdp5_data->thread = kthread_run(kthread_worker_fn,
+ &mdp5_data->worker,
+ "vsync_retire_work");
+
+ if (IS_ERR(mdp5_data->thread)) {
+ pr_err("unable to start vsync thread\n");
+ mdp5_data->thread = NULL;
+ return -ENOMEM;
+ }
+
+ sched_setscheduler(mdp5_data->thread, SCHED_FIFO, ¶m);
+
+ mfd->mdp_sync_pt_data.get_retire_fence = __vsync_retire_get_fence;
+
+ mdp5_data->vsync_retire_handler.vsync_handler =
+ __vsync_retire_handle_vsync;
+ mdp5_data->vsync_retire_handler.cmd_post_flush = false;
+
+ return 0;
+}
+
+static int mdss_mdp_update_panel_info(struct msm_fb_data_type *mfd,
+ int mode, int dest_ctrl)
+{
+ int ret = 0;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_panel_data *pdata;
+ struct mdss_mdp_ctl *sctl;
+
+ if (ctl == NULL) {
+ pr_debug("ctl not initialized\n");
+ return 0;
+ }
+
+ ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_UPDATE_PANEL_DATA,
+ (void *)(unsigned long)mode, CTL_INTF_EVENT_FLAG_DEFAULT);
+ if (ret)
+ pr_err("Dynamic switch to %s mode failed!\n",
+ mode ? "command" : "video");
+
+ if (dest_ctrl) {
+ /*
+ * Destroy current ctrl structure as this is
+ * going to be re-initialized with the requested mode.
+ */
+ mdss_mdp_ctl_destroy(mdp5_data->ctl);
+ mdp5_data->ctl = NULL;
+ } else {
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+
+ if (mdp5_data->mdata->has_pingpong_split &&
+ pdata->panel_info.use_pingpong_split)
+ mfd->split_mode = MDP_PINGPONG_SPLIT;
+ /*
+ * Dynamic change so we need to reconfig instead of
+ * destroying current ctrl structure.
+ */
+ mdss_mdp_ctl_reconfig(ctl, pdata);
+
+ /*
+ * Set flag when dynamic resolution switch happens before
+ * handoff of cont-splash
+ */
+ if (mdata->handoff_pending)
+ ctl->switch_with_handoff = true;
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl) {
+ if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ mdss_mdp_ctl_reconfig(sctl, pdata->next);
+ sctl->border_x_off +=
+ pdata->panel_info.lcdc.border_left +
+ pdata->panel_info.lcdc.border_right;
+ } else {
+ /*
+ * todo: need to revisit this and properly
+ * cleanup slave resources
+ */
+ mdss_mdp_ctl_destroy(sctl);
+ ctl->mixer_right = NULL;
+ }
+ } else if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+ /* enable split display for the first time */
+ ret = mdss_mdp_ctl_split_display_setup(ctl,
+ pdata->next);
+ if (ret) {
+ mdss_mdp_ctl_destroy(ctl);
+ mdp5_data->ctl = NULL;
+ }
+ }
+ }
+
+ return ret;
+}
+
+int mdss_mdp_input_event_handler(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+ if (ctl && mdss_panel_is_power_on(ctl->power_state) &&
+ ctl->ops.early_wake_up_fnc)
+ rc = ctl->ops.early_wake_up_fnc(ctl);
+
+ return rc;
+}
+
+static void mdss_mdp_signal_retire_fence(struct msm_fb_data_type *mfd,
+ int retire_cnt)
+{
+ __vsync_retire_signal(mfd, retire_cnt);
+ pr_debug("Signaled (%d) pending retire fence\n", retire_cnt);
+}
+
+int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
+{
+ struct device *dev = mfd->fbi->dev;
+ struct msm_mdp_interface *mdp5_interface = &mfd->mdp;
+ struct mdss_overlay_private *mdp5_data = NULL;
+ struct irq_info *mdss_irq;
+ int rc;
+
+ mdp5_data = kcalloc(1, sizeof(struct mdss_overlay_private), GFP_KERNEL);
+ if (!mdp5_data)
+ return -ENOMEM;
+
+ mdp5_data->frc_fsm
+ = kcalloc(1, sizeof(struct mdss_mdp_frc_fsm), GFP_KERNEL);
+ if (!mdp5_data->frc_fsm) {
+ rc = -ENOMEM;
+ pr_err("fail to allocate mdp5 frc fsm structure\n");
+ goto init_fail1;
+ }
+
+ mdp5_data->mdata = dev_get_drvdata(mfd->pdev->dev.parent);
+ if (!mdp5_data->mdata) {
+ pr_err("unable to initialize overlay for fb%d\n", mfd->index);
+ rc = -ENODEV;
+ goto init_fail;
+ }
+
+ mdp5_interface->on_fnc = mdss_mdp_overlay_on;
+ mdp5_interface->off_fnc = mdss_mdp_overlay_off;
+ mdp5_interface->release_fnc = __mdss_mdp_overlay_release_all;
+ mdp5_interface->do_histogram = NULL;
+ if (mdp5_data->mdata->ncursor_pipes)
+ mdp5_interface->cursor_update = mdss_mdp_hw_cursor_pipe_update;
+ else
+ mdp5_interface->cursor_update = mdss_mdp_hw_cursor_update;
+ mdp5_interface->async_position_update =
+ mdss_mdp_async_position_update;
+ mdp5_interface->dma_fnc = mdss_mdp_overlay_pan_display;
+ mdp5_interface->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
+ mdp5_interface->kickoff_fnc = mdss_mdp_overlay_kickoff;
+ mdp5_interface->mode_switch = mdss_mode_switch;
+ mdp5_interface->mode_switch_post = mdss_mode_switch_post;
+ mdp5_interface->pre_commit_fnc = mdss_mdp_overlay_precommit;
+ mdp5_interface->splash_init_fnc = mdss_mdp_splash_init;
+ mdp5_interface->configure_panel = mdss_mdp_update_panel_info;
+ mdp5_interface->input_event_handler = mdss_mdp_input_event_handler;
+ mdp5_interface->signal_retire_fence = mdss_mdp_signal_retire_fence;
+
+ if (mfd->panel_info->type == WRITEBACK_PANEL) {
+ mdp5_interface->atomic_validate =
+ mdss_mdp_layer_atomic_validate_wfd;
+ mdp5_interface->pre_commit = mdss_mdp_layer_pre_commit_wfd;
+ mdp5_interface->is_config_same = mdss_mdp_wfd_is_config_same;
+ } else {
+ mdp5_interface->atomic_validate =
+ mdss_mdp_layer_atomic_validate;
+ mdp5_interface->pre_commit = mdss_mdp_layer_pre_commit;
+ }
+
+ INIT_LIST_HEAD(&mdp5_data->pipes_used);
+ INIT_LIST_HEAD(&mdp5_data->pipes_cleanup);
+ INIT_LIST_HEAD(&mdp5_data->pipes_destroy);
+ INIT_LIST_HEAD(&mdp5_data->bufs_pool);
+ INIT_LIST_HEAD(&mdp5_data->bufs_chunks);
+ INIT_LIST_HEAD(&mdp5_data->bufs_used);
+ INIT_LIST_HEAD(&mdp5_data->bufs_freelist);
+ INIT_LIST_HEAD(&mdp5_data->rot_proc_list);
+ mutex_init(&mdp5_data->list_lock);
+ mutex_init(&mdp5_data->ov_lock);
+ mutex_init(&mdp5_data->dfps_lock);
+ mdp5_data->hw_refresh = true;
+ mdp5_data->cursor_ndx[CURSOR_PIPE_LEFT] = MSMFB_NEW_REQUEST;
+ mdp5_data->cursor_ndx[CURSOR_PIPE_RIGHT] = MSMFB_NEW_REQUEST;
+ mdp5_data->allow_kickoff = false;
+
+ mfd->mdp.private1 = mdp5_data;
+ mfd->wait_for_kickoff = true;
+
+ rc = mdss_mdp_overlay_fb_parse_dt(mfd);
+ if (rc)
+ return rc;
+
+ /*
+ * disable BWC if primary panel is video mode on specific
+ * chipsets to workaround HW problem.
+ */
+ if (mdss_has_quirk(mdp5_data->mdata, MDSS_QUIRK_BWCPANIC) &&
+ mfd->panel_info->type == MIPI_VIDEO_PANEL && (mfd->index == 0))
+ mdp5_data->mdata->has_bwc = false;
+
+ mfd->panel_orientation = mfd->panel_info->panel_orientation;
+
+ if ((mfd->panel_info->panel_orientation & MDP_FLIP_LR) &&
+ (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY))
+ mdp5_data->mixer_swap = true;
+
+ rc = sysfs_create_group(&dev->kobj, &mdp_overlay_sysfs_group);
+ if (rc) {
+ pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
+ goto init_fail;
+ }
+
+ mdp5_data->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd,
+ "vsync_event");
+ if (!mdp5_data->vsync_event_sd) {
+ pr_err("vsync_event sysfs lookup failed\n");
+ rc = -ENODEV;
+ goto init_fail;
+ }
+
+ mdp5_data->lineptr_event_sd = sysfs_get_dirent(dev->kobj.sd,
+ "lineptr_event");
+ if (!mdp5_data->lineptr_event_sd) {
+ pr_err("lineptr_event sysfs lookup failed\n");
+ rc = -ENODEV;
+ goto init_fail;
+ }
+
+ mdp5_data->hist_event_sd = sysfs_get_dirent(dev->kobj.sd,
+ "hist_event");
+ if (!mdp5_data->hist_event_sd) {
+ pr_err("hist_event sysfs lookup failed\n");
+ rc = -ENODEV;
+ goto init_fail;
+ }
+
+ mdp5_data->bl_event_sd = sysfs_get_dirent(dev->kobj.sd,
+ "bl_event");
+ if (!mdp5_data->bl_event_sd) {
+ pr_err("bl_event sysfs lookup failed\n");
+ rc = -ENODEV;
+ goto init_fail;
+ }
+
+ mdp5_data->ad_event_sd = sysfs_get_dirent(dev->kobj.sd,
+ "ad_event");
+ if (!mdp5_data->ad_event_sd) {
+ pr_err("ad_event sysfs lookup failed\n");
+ rc = -ENODEV;
+ goto init_fail;
+ }
+
+ mdp5_data->ad_bl_event_sd = sysfs_get_dirent(dev->kobj.sd,
+ "ad_bl_event");
+ if (!mdp5_data->ad_bl_event_sd) {
+ pr_err("ad_bl_event sysfs lookup failed\n");
+ rc = -ENODEV;
+ goto init_fail;
+ }
+
+ rc = sysfs_create_link_nowarn(&dev->kobj,
+ &mdp5_data->mdata->pdev->dev.kobj, "mdp");
+ if (rc)
+ pr_warn("problem creating link to mdp sysfs\n");
+
+ rc = sysfs_create_link_nowarn(&dev->kobj,
+ &mfd->pdev->dev.kobj, "mdss_fb");
+ if (rc)
+ pr_warn("problem creating link to mdss_fb sysfs\n");
+
+ if (mfd->panel_info->type == MIPI_VIDEO_PANEL ||
+ mfd->panel_info->type == DTV_PANEL) {
+ rc = sysfs_create_group(&dev->kobj,
+ &dynamic_fps_fs_attrs_group);
+ if (rc) {
+ pr_err("Error dfps sysfs creation ret=%d\n", rc);
+ goto init_fail;
+ }
+ }
+
+ if (mfd->panel_info->mipi.dms_mode ||
+ mfd->panel_info->type == MIPI_CMD_PANEL) {
+ rc = __vsync_retire_setup(mfd);
+ if (IS_ERR_VALUE((unsigned long)rc)) {
+ pr_err("unable to create vsync timeline\n");
+ goto init_fail;
+ }
+ }
+ mfd->mdp_sync_pt_data.async_wait_fences = true;
+
+ pm_runtime_set_suspended(&mfd->pdev->dev);
+ pm_runtime_enable(&mfd->pdev->dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
+ pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
+
+ mdss_irq = mdss_intr_line();
+
+ /* Adding event timer only for primary panel */
+ if ((mfd->index == 0) && (mfd->panel_info->type != WRITEBACK_PANEL)) {
+ mdp5_data->cpu_pm_hdl = add_event_timer(mdss_irq->irq,
+ mdss_mdp_ctl_event_timer, (void *)mdp5_data);
+ if (!mdp5_data->cpu_pm_hdl)
+ pr_warn("%s: unable to add event timer\n", __func__);
+ }
+
+ if (mfd->panel_info->cont_splash_enabled) {
+ rc = mdss_mdp_overlay_handoff(mfd);
+ if (rc) {
+ /*
+ * Even though handoff failed, it is not fatal.
+ * MDP can continue, just that we would have a longer
+ * delay in transitioning from splash screen to boot
+ * animation
+ */
+ pr_warn("Overlay handoff failed for fb%d. rc=%d\n",
+ mfd->index, rc);
+ rc = 0;
+ }
+ }
+ mdp5_data->dyn_pu_state = mfd->panel_info->partial_update_enabled;
+
+ if (mdss_mdp_pp_overlay_init(mfd))
+ pr_warn("Failed to initialize pp overlay data.\n");
+ return rc;
+init_fail:
+ kfree(mdp5_data->frc_fsm);
+init_fail1:
+ kfree(mdp5_data);
+ return rc;
+}
+
+static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ struct platform_device *pdev = mfd->pdev;
+ struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
+
+ mdp5_mdata->mixer_swap = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-mixer-swap");
+ if (mdp5_mdata->mixer_swap) {
+ pr_info("mixer swap is enabled for fb device=%s\n",
+ pdev->name);
+ }
+
+ return rc;
+}
+
+static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
+ struct mdp_scale_luts_info *lut_tbl)
+{
+ struct mdss_mdp_qseed3_lut_tbl *qseed3_lut_tbl;
+ int ret;
+
+ if (!mdata->scaler_off)
+ return -EFAULT;
+
+ qseed3_lut_tbl = &mdata->scaler_off->lut_tbl;
+ if ((lut_tbl->dir_lut_size !=
+ DIR_LUT_IDX * DIR_LUT_COEFFS * sizeof(uint32_t)) ||
+ (lut_tbl->cir_lut_size !=
+ CIR_LUT_IDX * CIR_LUT_COEFFS * sizeof(uint32_t)) ||
+ (lut_tbl->sep_lut_size !=
+ SEP_LUT_IDX * SEP_LUT_COEFFS * sizeof(uint32_t)))
+ return -EINVAL;
+
+ if (!qseed3_lut_tbl->dir_lut) {
+ qseed3_lut_tbl->dir_lut = devm_kzalloc(&mdata->pdev->dev,
+ lut_tbl->dir_lut_size,
+ GFP_KERNEL);
+ if (!qseed3_lut_tbl->dir_lut) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ if (!qseed3_lut_tbl->cir_lut) {
+ qseed3_lut_tbl->cir_lut = devm_kzalloc(&mdata->pdev->dev,
+ lut_tbl->cir_lut_size,
+ GFP_KERNEL);
+ if (!qseed3_lut_tbl->cir_lut) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ if (!qseed3_lut_tbl->sep_lut) {
+ qseed3_lut_tbl->sep_lut = devm_kzalloc(&mdata->pdev->dev,
+ lut_tbl->sep_lut_size,
+ GFP_KERNEL);
+ if (!qseed3_lut_tbl->sep_lut) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ /* Invalidate before updating */
+ qseed3_lut_tbl->valid = false;
+
+
+ if (copy_from_user(qseed3_lut_tbl->dir_lut,
+ (void *)(unsigned long)lut_tbl->dir_lut,
+ lut_tbl->dir_lut_size)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (copy_from_user(qseed3_lut_tbl->cir_lut,
+ (void *)(unsigned long)lut_tbl->cir_lut,
+ lut_tbl->cir_lut_size)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (copy_from_user(qseed3_lut_tbl->sep_lut,
+ (void *)(unsigned long)lut_tbl->sep_lut,
+ lut_tbl->sep_lut_size)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ qseed3_lut_tbl->valid = true;
+ return ret;
+
+fail:
+ kfree(qseed3_lut_tbl->dir_lut);
+ kfree(qseed3_lut_tbl->cir_lut);
+ kfree(qseed3_lut_tbl->sep_lut);
+err:
+ qseed3_lut_tbl->valid = false;
+ return ret;
+}
+
+static int mdss_mdp_set_cfg(struct msm_fb_data_type *mfd,
+ struct mdp_set_cfg *cfg)
+{
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ int ret = -EINVAL;
+ struct mdp_scale_luts_info luts_info;
+
+ switch (cfg->flags) {
+ case MDP_QSEED3_LUT_CFG:
+ if (cfg->len != sizeof(luts_info)) {
+ pr_err("invalid length %d expected %zd\n", cfg->len,
+ sizeof(luts_info));
+ ret = -EINVAL;
+ break;
+ }
+ ret = copy_from_user(&luts_info,
+ (void *)(unsigned long)cfg->payload, cfg->len);
+ if (ret) {
+ pr_err("qseed3 lut copy failed ret %d\n", ret);
+ ret = -EFAULT;
+ break;
+ }
+ ret = mdss_mdp_scaler_lut_init(mdata, &luts_info);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
new file mode 100644
index 0000000..3a8df20
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -0,0 +1,3137 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+
+#include "mdss_mdp.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_debug.h"
+
+#define SMP_MB_SIZE (mdss_res->smp_mb_size)
+#define SMP_MB_CNT (mdss_res->smp_mb_cnt)
+#define SMP_MB_ENTRY_SIZE 16
+#define MAX_BPP 4
+
+#define PIPE_CLEANUP_TIMEOUT_US 100000
+
+/* following offsets are relative to ctrl register bit offset */
+#define CLK_FORCE_ON_OFFSET 0x0
+#define CLK_FORCE_OFF_OFFSET 0x1
+/* following offsets are relative to status register bit offset */
+#define CLK_STATUS_OFFSET 0x0
+
+#define QOS_LUT_NRT_READ 0x0
+#define PANIC_LUT_NRT_READ 0x0
+#define ROBUST_LUT_NRT_READ 0xFFFF
+
+#define VBLANK_PANIC_DEFAULT_CONFIG 0x200000 /* Priority 2, no panic */
+#define VBLANK_PANIC_CREQ_MASK 0x300030
+
+#define QSEED3_DEFAULT_PRELAOD_H 0x4
+#define QSEED3_DEFAULT_PRELAOD_V 0x3
+
+static DEFINE_MUTEX(mdss_mdp_sspp_lock);
+static DEFINE_MUTEX(mdss_mdp_smp_lock);
+
+static void mdss_mdp_pipe_free(struct kref *kref);
+static int mdss_mdp_smp_mmb_set(int client_id, unsigned long *smp);
+static void mdss_mdp_smp_mmb_free(unsigned long *smp, bool write);
+static struct mdss_mdp_pipe *mdss_mdp_pipe_search_by_client_id(
+ struct mdss_data_type *mdata, int client_id,
+ enum mdss_mdp_pipe_rect rect_num);
+static int mdss_mdp_calc_stride(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_plane_sizes *ps);
+static u32 mdss_mdp_calc_per_plane_num_blks(u32 ystride,
+ struct mdss_mdp_pipe *pipe);
+static int mdss_mdp_pipe_program_pixel_extn(struct mdss_mdp_pipe *pipe);
+
+/**
+ * enum mdss_mdp_pipe_qos - Different qos configurations for each pipe
+ *
+ * @MDSS_MDP_PIPE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @MDSS_MDP_PIPE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ * this configuration is mutually exclusive from VBLANK_CTRL.
+ * @MDSS_MDP_PIPE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum mdss_mdp_pipe_qos {
+ MDSS_MDP_PIPE_QOS_VBLANK_CTRL = BIT(0),
+ MDSS_MDP_PIPE_QOS_VBLANK_AMORTIZE = BIT(1),
+ MDSS_MDP_PIPE_QOS_PANIC_CTRL = BIT(2),
+};
+
+static inline void mdss_mdp_pipe_write(struct mdss_mdp_pipe *pipe,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, pipe->base + reg);
+}
+
+/**
+ * This function is used to decide if certain register programming can be
+ * delayed or not. This is useful when multirect is used where two pipe
+ * structures access same set of registers.
+ */
+static inline bool is_pipe_programming_delay_needed(
+ struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_mdp_pipe *next_pipe = pipe->multirect.next;
+
+ return (pipe->multirect.mode != MDSS_MDP_PIPE_MULTIRECT_NONE) &&
+ next_pipe->params_changed;
+}
+
+static inline u32 mdss_mdp_pipe_read(struct mdss_mdp_pipe *pipe, u32 reg)
+{
+ return readl_relaxed(pipe->base + reg);
+}
+
+static inline int mdss_calc_fill_level(struct mdss_mdp_format_params *fmt,
+ u32 src_width)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 fixed_buff_size = mdata->pixel_ram_size;
+ u32 total_fl;
+
+ if (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == MDSS_MDP_CHROMA_420) {
+ /* NV12 */
+ total_fl = (fixed_buff_size / 2) /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ /* non NV12 */
+ total_fl = (fixed_buff_size) /
+ ((src_width + 32) * fmt->bpp);
+ }
+ } else {
+ total_fl = (fixed_buff_size * 2) /
+ ((src_width + 32) * fmt->bpp);
+ }
+
+ return total_fl;
+}
+
+static inline u32 get_qos_lut_linear(u32 total_fl)
+{
+ u32 qos_lut;
+
+ if (total_fl <= 4)
+ qos_lut = 0x1B;
+ else if (total_fl <= 5)
+ qos_lut = 0x5B;
+ else if (total_fl <= 6)
+ qos_lut = 0x15B;
+ else if (total_fl <= 7)
+ qos_lut = 0x55B;
+ else if (total_fl <= 8)
+ qos_lut = 0x155B;
+ else if (total_fl <= 9)
+ qos_lut = 0x555B;
+ else if (total_fl <= 10)
+ qos_lut = 0x1555B;
+ else if (total_fl <= 11)
+ qos_lut = 0x5555B;
+ else if (total_fl <= 12)
+ qos_lut = 0x15555B;
+ else
+ qos_lut = 0x55555B;
+
+ return qos_lut;
+}
+
+static inline u32 get_qos_lut_macrotile(u32 total_fl)
+{
+ u32 qos_lut;
+
+ if (total_fl <= 10)
+ qos_lut = 0x1AAff;
+ else if (total_fl <= 11)
+ qos_lut = 0x5AAFF;
+ else if (total_fl <= 12)
+ qos_lut = 0x15AAFF;
+ else
+ qos_lut = 0x55AAFF;
+
+ return qos_lut;
+}
+
+static void mdss_mdp_pipe_qos_lut(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
+ u32 qos_lut;
+ u32 total_fl = 0;
+
+ if ((ctl->intf_num == MDSS_MDP_NO_INTF) ||
+ pipe->mixer_left->rotator_mode) {
+ qos_lut = QOS_LUT_NRT_READ; /* low priority for nrt */
+ } else {
+ total_fl = mdss_calc_fill_level(pipe->src_fmt,
+ pipe->src.w);
+
+ if (mdss_mdp_is_linear_format(pipe->src_fmt))
+ qos_lut = get_qos_lut_linear(total_fl);
+ else
+ qos_lut = get_qos_lut_macrotile(total_fl);
+ }
+
+ trace_mdp_perf_set_qos_luts(pipe->num, pipe->src_fmt->format,
+ ctl->intf_num, pipe->mixer_left->rotator_mode, total_fl,
+ qos_lut, mdss_mdp_is_linear_format(pipe->src_fmt));
+
+ pr_debug("pnum:%d fmt:%d intf:%d rot:%d fl:%d lut:0x%x\n",
+ pipe->num, pipe->src_fmt->format, ctl->intf_num,
+ pipe->mixer_left->rotator_mode, total_fl, qos_lut);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_CREQ_LUT,
+ qos_lut);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+bool is_rt_pipe(struct mdss_mdp_pipe *pipe)
+{
+ return pipe && pipe->mixer_left &&
+ pipe->mixer_left->type == MDSS_MDP_MIXER_TYPE_INTF;
+}
+
+static void mdss_mdp_config_pipe_panic_lut(struct mdss_mdp_pipe *pipe)
+{
+ u32 panic_lut, robust_lut;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!is_rt_pipe(pipe)) {
+ panic_lut = PANIC_LUT_NRT_READ;
+ robust_lut = ROBUST_LUT_NRT_READ;
+ } else if (mdss_mdp_is_linear_format(pipe->src_fmt)) {
+ panic_lut = mdata->default_panic_lut_per_pipe_linear;
+ robust_lut = mdata->default_robust_lut_per_pipe_linear;
+ } else {
+ panic_lut = mdata->default_panic_lut_per_pipe_tile;
+ robust_lut = mdata->default_robust_lut_per_pipe_tile;
+ }
+
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_DANGER_LUT,
+ panic_lut);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SAFE_LUT,
+ robust_lut);
+
+ trace_mdp_perf_set_panic_luts(pipe->num, pipe->src_fmt->format,
+ pipe->src_fmt->fetch_mode, panic_lut, robust_lut);
+
+ pr_debug("pnum:%d fmt:%d mode:%d luts[0x%x, 0x%x]\n",
+ pipe->num, pipe->src_fmt->format, pipe->src_fmt->fetch_mode,
+ panic_lut, robust_lut);
+}
+
+static void mdss_mdp_pipe_qos_ctrl(struct mdss_mdp_pipe *pipe,
+ bool enable, u32 flags)
+{
+ u32 per_pipe_qos;
+
+ per_pipe_qos = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_QOS_CTRL);
+
+ if (flags & MDSS_MDP_PIPE_QOS_VBLANK_CTRL) {
+ per_pipe_qos |= VBLANK_PANIC_DEFAULT_CONFIG;
+
+ if (enable)
+ per_pipe_qos |= BIT(16);
+ else
+ per_pipe_qos &= ~BIT(16);
+ }
+
+ if (flags & MDSS_MDP_PIPE_QOS_VBLANK_AMORTIZE) {
+ /* this feature overrules previous VBLANK_CTRL */
+ per_pipe_qos &= ~BIT(16);
+ per_pipe_qos &= ~VBLANK_PANIC_CREQ_MASK; /* clear vblank bits */
+ }
+
+ if (flags & MDSS_MDP_PIPE_QOS_PANIC_CTRL) {
+ if (enable)
+ per_pipe_qos |= BIT(0);
+ else
+ per_pipe_qos &= ~BIT(0);
+ }
+
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_QOS_CTRL,
+ per_pipe_qos);
+}
+
+/**
+ * @mdss_mdp_pipe_panic_vblank_signal_ctrl -
+ * @pipe: pointer to a pipe
+ * @enable: TRUE - enables feature FALSE - disables feature
+ *
+ * This function assumes that clocks are enabled, so it is callers
+ * responsibility to enable clocks before calling this function.
+ */
+static int mdss_mdp_pipe_panic_vblank_signal_ctrl(struct mdss_mdp_pipe *pipe,
+ bool enable)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata->has_panic_ctrl)
+ goto end;
+
+ if (!is_rt_pipe(pipe))
+ goto end;
+
+ if (!test_bit(MDSS_QOS_VBLANK_PANIC_CTRL, mdata->mdss_qos_map))
+ goto end;
+
+ mutex_lock(&mdata->reg_lock);
+
+ mdss_mdp_pipe_qos_ctrl(pipe, enable, MDSS_MDP_PIPE_QOS_VBLANK_CTRL);
+
+ mutex_unlock(&mdata->reg_lock);
+
+end:
+ return 0;
+}
+
+int mdss_mdp_pipe_panic_signal_ctrl(struct mdss_mdp_pipe *pipe, bool enable)
+{
+ uint32_t panic_robust_ctrl;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata->has_panic_ctrl)
+ goto end;
+
+ if (!is_rt_pipe(pipe))
+ goto end;
+
+ mutex_lock(&mdata->reg_lock);
+ switch (mdss_mdp_panic_signal_support_mode(mdata)) {
+ case MDSS_MDP_PANIC_COMMON_REG_CFG:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ panic_robust_ctrl = readl_relaxed(mdata->mdp_base +
+ MMSS_MDP_PANIC_ROBUST_CTRL);
+ if (enable)
+ panic_robust_ctrl |= BIT(pipe->panic_ctrl_ndx);
+ else
+ panic_robust_ctrl &= ~BIT(pipe->panic_ctrl_ndx);
+ writel_relaxed(panic_robust_ctrl,
+ mdata->mdp_base + MMSS_MDP_PANIC_ROBUST_CTRL);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ break;
+ case MDSS_MDP_PANIC_PER_PIPE_CFG:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ mdss_mdp_pipe_qos_ctrl(pipe, enable,
+ MDSS_MDP_PIPE_QOS_PANIC_CTRL);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ break;
+ }
+ mutex_unlock(&mdata->reg_lock);
+
+end:
+ return 0;
+}
+
+void mdss_mdp_bwcpanic_ctrl(struct mdss_data_type *mdata, bool enable)
+{
+ if (!mdata)
+ return;
+
+ mutex_lock(&mdata->reg_lock);
+ if (enable) {
+ writel_relaxed(0x0, mdata->mdp_base + MMSS_MDP_PANIC_LUT0);
+ writel_relaxed(0x0, mdata->mdp_base + MMSS_MDP_PANIC_LUT1);
+ writel_relaxed(0x0, mdata->mdp_base + MMSS_MDP_ROBUST_LUT);
+ } else {
+ writel_relaxed(mdata->default_panic_lut0,
+ mdata->mdp_base + MMSS_MDP_PANIC_LUT0);
+ writel_relaxed(mdata->default_panic_lut1,
+ mdata->mdp_base + MMSS_MDP_PANIC_LUT1);
+ writel_relaxed(mdata->default_robust_lut,
+ mdata->mdp_base + MMSS_MDP_ROBUST_LUT);
+ }
+ mutex_unlock(&mdata->reg_lock);
+}
+
+/**
+ * @mdss_mdp_pipe_nrt_vbif_setup -
+ * @mdata: pointer to global driver data.
+ * @pipe: pointer to a pipe
+ *
+ * This function assumes that clocks are enabled, so it is callers
+ * responsibility to enable clocks before calling this function.
+ */
+static void mdss_mdp_pipe_nrt_vbif_setup(struct mdss_data_type *mdata,
+ struct mdss_mdp_pipe *pipe)
+{
+ uint32_t nrt_vbif_client_sel;
+
+ if (pipe->type != MDSS_MDP_PIPE_TYPE_DMA)
+ return;
+
+ mutex_lock(&mdata->reg_lock);
+ nrt_vbif_client_sel = readl_relaxed(mdata->mdp_base +
+ MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL);
+ if (mdss_mdp_is_nrt_vbif_client(mdata, pipe))
+ nrt_vbif_client_sel |= BIT(pipe->num - MDSS_MDP_SSPP_DMA0);
+ else
+ nrt_vbif_client_sel &= ~BIT(pipe->num - MDSS_MDP_SSPP_DMA0);
+ writel_relaxed(nrt_vbif_client_sel,
+ mdata->mdp_base + MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL);
+ mutex_unlock(&mdata->reg_lock);
+}
+
+static inline bool is_unused_smp_allowed(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ switch (MDSS_GET_MAJOR_MINOR(mdata->mdp_rev)) {
+ case MDSS_GET_MAJOR_MINOR(MDSS_MDP_HW_REV_103):
+ case MDSS_GET_MAJOR_MINOR(MDSS_MDP_HW_REV_105):
+ case MDSS_GET_MAJOR_MINOR(MDSS_MDP_HW_REV_109):
+ case MDSS_GET_MAJOR_MINOR(MDSS_MDP_HW_REV_110):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
+ size_t n, bool force_alloc)
+{
+ u32 i, mmb;
+ u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (n <= fixed_cnt)
+ return fixed_cnt;
+
+ n -= fixed_cnt;
+
+ i = bitmap_weight(smp_map->allocated, SMP_MB_CNT);
+
+ /*
+ * SMP programming is not double buffered. Fail the request,
+ * that calls for change in smp configuration (addition/removal
+ * of smp blocks), so that fallback solution happens.
+ */
+ if (i != 0 && !force_alloc &&
+ (((n < i) && !is_unused_smp_allowed()) || (n > i))) {
+ pr_debug("Can't change mmb config, num_blks: %zu alloc: %d\n",
+ n, i);
+ return 0;
+ }
+
+ /*
+ * Clear previous SMP reservations and reserve according to the
+ * latest configuration
+ */
+ mdss_mdp_smp_mmb_free(smp_map->reserved, false);
+
+ /* Reserve mmb blocks*/
+ for (; i < n; i++) {
+ if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
+ break;
+
+ mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
+ set_bit(mmb, smp_map->reserved);
+ set_bit(mmb, mdata->mmb_alloc_map);
+ }
+
+ return i + fixed_cnt;
+}
+
+static int mdss_mdp_smp_mmb_set(int client_id, unsigned long *smp)
+{
+ u32 mmb, off, data, s;
+ int cnt = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ for_each_set_bit(mmb, smp, SMP_MB_CNT) {
+ off = (mmb / 3) * 4;
+ s = (mmb % 3) * 8;
+ data = readl_relaxed(mdata->mdp_base +
+ MDSS_MDP_REG_SMP_ALLOC_W0 + off);
+ data &= ~(0xFF << s);
+ data |= client_id << s;
+ writel_relaxed(data, mdata->mdp_base +
+ MDSS_MDP_REG_SMP_ALLOC_W0 + off);
+ writel_relaxed(data, mdata->mdp_base +
+ MDSS_MDP_REG_SMP_ALLOC_R0 + off);
+ cnt++;
+ }
+ return cnt;
+}
+
+static void mdss_mdp_smp_mmb_amend(unsigned long *smp, unsigned long *extra)
+{
+ bitmap_or(smp, smp, extra, SMP_MB_CNT);
+ bitmap_zero(extra, SMP_MB_CNT);
+}
+
+static void mdss_mdp_smp_mmb_free(unsigned long *smp, bool write)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!bitmap_empty(smp, SMP_MB_CNT)) {
+ if (write)
+ mdss_mdp_smp_mmb_set(0, smp);
+ bitmap_andnot(mdata->mmb_alloc_map, mdata->mmb_alloc_map,
+ smp, SMP_MB_CNT);
+ bitmap_zero(smp, SMP_MB_CNT);
+ }
+}
+
+u32 mdss_mdp_smp_calc_num_blocks(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_mdp_plane_sizes ps;
+ int rc = 0;
+ int i, num_blks = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->has_pixel_ram)
+ return 0;
+
+ rc = mdss_mdp_calc_stride(pipe, &ps);
+ if (rc) {
+ pr_err("wrong stride calc\n");
+ return 0;
+ }
+
+ for (i = 0; i < ps.num_planes; i++) {
+ num_blks += mdss_mdp_calc_per_plane_num_blks(ps.ystride[i],
+ pipe);
+ pr_debug("SMP for BW %d mmb for pnum=%d plane=%d\n",
+ num_blks, pipe->num, i);
+ }
+
+ pr_debug("SMP blks %d mb_cnt for pnum=%d\n",
+ num_blks, pipe->num);
+ return num_blks;
+}
+
+/**
+ * @mdss_mdp_smp_get_size - get allocated smp size for a pipe
+ * @pipe: pointer to a pipe
+ *
+ * Function counts number of blocks that are currently allocated for a
+ * pipe, then smp buffer size is number of blocks multiplied by block
+ * size.
+ */
+u32 mdss_mdp_smp_get_size(struct mdss_mdp_pipe *pipe)
+{
+ int i, mb_cnt = 0, smp_size;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->has_pixel_ram) {
+ smp_size = mdata->pixel_ram_size;
+ } else {
+ for (i = 0; i < MAX_PLANES; i++) {
+ mb_cnt += bitmap_weight(pipe->smp_map[i].allocated,
+ SMP_MB_CNT);
+ mb_cnt += bitmap_weight(pipe->smp_map[i].fixed,
+ SMP_MB_CNT);
+ }
+
+ smp_size = mb_cnt * SMP_MB_SIZE;
+ }
+
+ pr_debug("SMP size %d for pnum=%d\n",
+ smp_size, pipe->num);
+
+ return smp_size;
+}
+
+static void mdss_mdp_smp_set_wm_levels(struct mdss_mdp_pipe *pipe, int mb_cnt)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 useable_space, latency_bytes, val, wm[3];
+ struct mdss_mdp_mixer *mixer = pipe->mixer_left;
+
+ useable_space = mb_cnt * SMP_MB_SIZE;
+
+ /*
+ * For 1.3.x version, when source format is macrotile then useable
+ * space within total allocated SMP space is limited to src_w *
+ * bpp * nlines. Unlike linear format, any extra space left over is
+ * not filled.
+ *
+ * All other versions, in case of linear we calculate the latency
+ * bytes as the bytes to be used for the latency buffer lines, so the
+ * transactions when filling the full SMPs have the lowest priority.
+ */
+
+ latency_bytes = mdss_mdp_calc_latency_buf_bytes(pipe->src_fmt->is_yuv,
+ pipe->bwc_mode, mdss_mdp_is_tile_format(pipe->src_fmt),
+ pipe->src.w, pipe->src_fmt->bpp, false, useable_space,
+ mdss_mdp_is_ubwc_format(pipe->src_fmt),
+ mdss_mdp_is_nv12_format(pipe->src_fmt),
+ (pipe->flags & MDP_FLIP_LR));
+
+ if ((pipe->flags & MDP_FLIP_LR) &&
+ !mdss_mdp_is_tile_format(pipe->src_fmt)) {
+ /*
+ * when doing hflip, one line is reserved to be consumed down
+ * the pipeline. This line will always be marked as full even
+ * if it doesn't have any data. In order to generate proper
+ * priority levels ignore this region while setting up
+ * watermark levels
+ */
+ u8 bpp = pipe->src_fmt->is_yuv ? 1 :
+ pipe->src_fmt->bpp;
+ latency_bytes -= (pipe->src.w * bpp);
+ }
+
+ if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_103) &&
+ mdss_mdp_is_tile_format(pipe->src_fmt)) {
+ val = latency_bytes / SMP_MB_ENTRY_SIZE;
+
+ wm[0] = (val * 5) / 8;
+ wm[1] = (val * 6) / 8;
+ wm[2] = (val * 7) / 8;
+ } else if (mixer->rotator_mode ||
+ (mixer->ctl->intf_num == MDSS_MDP_NO_INTF)) {
+ /* any non real time pipe */
+ wm[0] = 0xffff;
+ wm[1] = 0xffff;
+ wm[2] = 0xffff;
+ } else {
+ /*
+ * 1/3 of the latency buffer bytes from the
+ * SMP pool that is being fetched
+ */
+ val = (latency_bytes / SMP_MB_ENTRY_SIZE) / 3;
+
+ wm[0] = val;
+ wm[1] = wm[0] + val;
+ wm[2] = wm[1] + val;
+ }
+
+ trace_mdp_perf_set_wm_levels(pipe->num, useable_space, latency_bytes,
+ wm[0], wm[1], wm[2], mb_cnt, SMP_MB_SIZE);
+
+ pr_debug("pnum=%d useable_space=%u watermarks %u,%u,%u\n", pipe->num,
+ useable_space, wm[0], wm[1], wm[2]);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_0, wm[0]);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_1, wm[1]);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_2, wm[2]);
+}
+
+static void mdss_mdp_smp_free(struct mdss_mdp_pipe *pipe)
+{
+ int i;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->has_pixel_ram)
+ return;
+
+ mutex_lock(&mdss_mdp_smp_lock);
+ for (i = 0; i < MAX_PLANES; i++) {
+ mdss_mdp_smp_mmb_free(pipe->smp_map[i].reserved, false);
+ mdss_mdp_smp_mmb_free(pipe->smp_map[i].allocated, true);
+ }
+ mutex_unlock(&mdss_mdp_smp_lock);
+}
+
+void mdss_mdp_smp_unreserve(struct mdss_mdp_pipe *pipe)
+{
+ int i;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->has_pixel_ram)
+ return;
+
+ mutex_lock(&mdss_mdp_smp_lock);
+ for (i = 0; i < MAX_PLANES; i++)
+ mdss_mdp_smp_mmb_free(pipe->smp_map[i].reserved, false);
+ mutex_unlock(&mdss_mdp_smp_lock);
+}
+
+static int mdss_mdp_calc_stride(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_plane_sizes *ps)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u16 width;
+ int rc = 0;
+ u32 format, seg_w = 0;
+
+ if (mdata->has_pixel_ram)
+ return 0;
+
+ width = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
+
+ if (pipe->bwc_mode) {
+ rc = mdss_mdp_get_rau_strides(pipe->src.w, pipe->src.h,
+ pipe->src_fmt, ps);
+ if (rc)
+ return rc;
+ /*
+ * Override fetch strides with SMP buffer size for both the
+ * planes. BWC line buffer needs to be divided into 16
+ * segments and every segment is aligned to format
+ * specific RAU size
+ */
+ seg_w = DIV_ROUND_UP(pipe->src.w, 16);
+ if (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
+ ps->ystride[0] = ALIGN(seg_w, 32) * 16 * ps->rau_h[0] *
+ pipe->src_fmt->bpp;
+ ps->ystride[1] = 0;
+ } else {
+ u32 bwc_width = ALIGN(seg_w, 64) * 16;
+
+ ps->ystride[0] = bwc_width * ps->rau_h[0];
+ ps->ystride[1] = bwc_width * ps->rau_h[1];
+ /*
+ * Since chroma for H1V2 is not subsampled it needs
+ * to be accounted for with bpp factor
+ */
+ if (pipe->src_fmt->chroma_sample ==
+ MDSS_MDP_CHROMA_H1V2)
+ ps->ystride[1] *= 2;
+ }
+ pr_debug("BWC SMP strides ystride0=%x ystride1=%x\n",
+ ps->ystride[0], ps->ystride[1]);
+ } else {
+ format = pipe->src_fmt->format;
+ /*
+ * when decimation block is present, all chroma planes
+ * are fetched on a single SMP plane for chroma pixels
+ */
+ if (mdata->has_decimation) {
+ switch (pipe->src_fmt->chroma_sample) {
+ case MDSS_MDP_CHROMA_H2V1:
+ format = MDP_Y_CRCB_H2V1;
+ break;
+ case MDSS_MDP_CHROMA_420:
+ format = MDP_Y_CBCR_H2V2;
+ break;
+ default:
+ break;
+ }
+ }
+ rc = mdss_mdp_get_plane_sizes(pipe->src_fmt, width, pipe->src.h,
+ ps, 0, 0);
+ if (rc)
+ return rc;
+
+ if (pipe->mixer_left && (ps->num_planes == 1)) {
+ ps->ystride[0] = MAX_BPP *
+ max(pipe->mixer_left->width, width);
+ } else if (mdata->has_decimation) {
+ /*
+ * To avoid quailty loss, MDP does one less decimation
+ * on chroma components if they are subsampled.
+ * Account for this to have enough SMPs for latency
+ */
+ switch (pipe->src_fmt->chroma_sample) {
+ case MDSS_MDP_CHROMA_H2V1:
+ case MDSS_MDP_CHROMA_420:
+ ps->ystride[1] <<= 1;
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static u32 mdss_mdp_calc_per_plane_num_blks(u32 ystride,
+ struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 num_blks = 0;
+ u32 nlines = 0;
+
+ if (pipe->mixer_left && (pipe->mixer_left->rotator_mode ||
+ (pipe->mixer_left->type == MDSS_MDP_MIXER_TYPE_WRITEBACK))) {
+ if (mdss_mdp_is_tile_format(pipe->src_fmt))
+ num_blks = 4;
+ else
+ num_blks = 1;
+ } else {
+ if (mdss_mdp_is_tile_format(pipe->src_fmt))
+ nlines = 8;
+ else
+ nlines = pipe->bwc_mode ? 1 : 2;
+
+ num_blks = DIV_ROUND_UP(ystride * nlines,
+ SMP_MB_SIZE);
+
+ if (mdata->mdp_rev == MDSS_MDP_HW_REV_100)
+ num_blks = roundup_pow_of_two(num_blks);
+
+ if (mdata->smp_mb_per_pipe &&
+ (num_blks > mdata->smp_mb_per_pipe) &&
+ !(pipe->flags & MDP_FLIP_LR))
+ num_blks = mdata->smp_mb_per_pipe;
+ }
+
+ pr_debug("pipenum:%d tile:%d bwc:%d ystride%d pipeblks:%d blks:%d\n",
+ pipe->num, mdss_mdp_is_tile_format(pipe->src_fmt),
+ pipe->bwc_mode, ystride, mdata->smp_mb_per_pipe, num_blks);
+
+ return num_blks;
+}
+
+int mdss_mdp_smp_reserve(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 num_blks = 0, reserved = 0;
+ struct mdss_mdp_plane_sizes ps;
+ int i, rc = 0;
+ bool force_alloc = 0;
+
+ if (mdata->has_pixel_ram)
+ return 0;
+
+ rc = mdss_mdp_calc_stride(pipe, &ps);
+ if (rc)
+ return rc;
+
+ force_alloc = pipe->flags & MDP_SMP_FORCE_ALLOC;
+
+ mutex_lock(&mdss_mdp_smp_lock);
+ if (!is_unused_smp_allowed()) {
+ for (i = (MAX_PLANES - 1); i >= ps.num_planes; i--) {
+ if (bitmap_weight(pipe->smp_map[i].allocated,
+ SMP_MB_CNT)) {
+ pr_debug("unsed mmb for pipe%d plane%d not allowed\n",
+ pipe->num, i);
+ mutex_unlock(&mdss_mdp_smp_lock);
+ return -EAGAIN;
+ }
+ }
+ }
+
+ for (i = 0; i < ps.num_planes; i++) {
+ num_blks = mdss_mdp_calc_per_plane_num_blks(ps.ystride[i],
+ pipe);
+ pr_debug("reserving %d mmb for pnum=%d plane=%d\n",
+ num_blks, pipe->num, i);
+ reserved = mdss_mdp_smp_mmb_reserve(&pipe->smp_map[i],
+ num_blks, force_alloc);
+ if (reserved < num_blks)
+ break;
+ }
+
+ if (reserved < num_blks) {
+ pr_debug("insufficient MMB blocks. pnum:%d\n", pipe->num);
+ for (; i >= 0; i--)
+ mdss_mdp_smp_mmb_free(pipe->smp_map[i].reserved,
+ false);
+ rc = -ENOBUFS;
+ }
+ mutex_unlock(&mdss_mdp_smp_lock);
+
+ return rc;
+}
+/*
+ * mdss_mdp_smp_alloc() -- set smp mmb and and wm levels for a staged pipe
+ * @pipe: pointer to a pipe
+ *
+ * Function amends reserved smp mmbs to allocated bitmap and ties respective
+ * mmbs to their pipe fetch_ids. Based on the number of total allocated mmbs
+ * for a staged pipe, it also sets the watermark levels (wm).
+ *
+ * This function will be called on every commit where pipe params might not
+ * have changed. In such cases, we need to ensure that wm levels are not
+ * wiped out. Also in some rare situations hw might have reset and wiped out
+ * smp mmb programming but new smp reservation is not done. In such cases we
+ * need to ensure that for a staged pipes, mmbs are set properly based on
+ * allocated bitmap.
+ */
+static int mdss_mdp_smp_alloc(struct mdss_mdp_pipe *pipe)
+{
+ int i;
+ int cnt = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->has_pixel_ram)
+ return 0;
+
+ mutex_lock(&mdss_mdp_smp_lock);
+ for (i = 0; i < MAX_PLANES; i++) {
+ cnt += bitmap_weight(pipe->smp_map[i].fixed, SMP_MB_CNT);
+
+ if (bitmap_empty(pipe->smp_map[i].reserved, SMP_MB_CNT)) {
+ cnt += mdss_mdp_smp_mmb_set(pipe->ftch_id + i,
+ pipe->smp_map[i].allocated);
+ continue;
+ }
+
+ mdss_mdp_smp_mmb_amend(pipe->smp_map[i].allocated,
+ pipe->smp_map[i].reserved);
+ cnt += mdss_mdp_smp_mmb_set(pipe->ftch_id + i,
+ pipe->smp_map[i].allocated);
+ }
+ mdss_mdp_smp_set_wm_levels(pipe, cnt);
+ mutex_unlock(&mdss_mdp_smp_lock);
+ return 0;
+}
+
+void mdss_mdp_smp_release(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->has_pixel_ram)
+ return;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mdss_mdp_smp_free(pipe);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+int mdss_mdp_smp_setup(struct mdss_data_type *mdata, u32 cnt, u32 size)
+{
+ if (!mdata)
+ return -EINVAL;
+
+ mdata->smp_mb_cnt = cnt;
+ mdata->smp_mb_size = size;
+
+ return 0;
+}
+
+/**
+ * mdss_mdp_smp_handoff() - Handoff SMP MMBs in use by staged pipes
+ * @mdata: pointer to the global mdss data structure.
+ *
+ * Iterate through the list of all SMP MMBs and check to see if any
+ * of them are assigned to a pipe being marked as being handed-off.
+ * If so, update the corresponding software allocation map to reflect
+ * this.
+ *
+ * This function would typically be called during MDP probe for the case
+ * when certain pipes might be programmed in the bootloader to display
+ * the splash screen.
+ */
+int mdss_mdp_smp_handoff(struct mdss_data_type *mdata)
+{
+ int rc = 0;
+ int i, client_id, prev_id = 0;
+ u32 off, s, data;
+ struct mdss_mdp_pipe *pipe = NULL;
+
+ if (mdata->has_pixel_ram)
+ return 0;
+
+ /*
+ * figure out what SMP MMBs are allocated for each of the pipes
+ * that need to be handed off.
+ */
+ for (i = 0; i < SMP_MB_CNT; i++) {
+ off = (i / 3) * 4;
+ s = (i % 3) * 8;
+ data = readl_relaxed(mdata->mdp_base +
+ MDSS_MDP_REG_SMP_ALLOC_W0 + off);
+ client_id = (data >> s) & 0xFF;
+ if (test_bit(i, mdata->mmb_alloc_map)) {
+ /*
+ * Certain pipes may have a dedicated set of
+ * SMP MMBs statically allocated to them. In
+ * such cases, we do not need to do anything
+ * here.
+ */
+ pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)\n"
+ , i, pipe ? pipe->num : -1, client_id);
+ continue;
+ }
+
+ if (client_id) {
+ if (client_id != prev_id) {
+ pipe = mdss_mdp_pipe_search_by_client_id(mdata,
+ client_id, MDSS_MDP_PIPE_RECT0);
+ prev_id = client_id;
+ }
+
+ if (!pipe) {
+ pr_warn("Invalid client id %d for SMP MMB %d\n",
+ client_id, i);
+ continue;
+ }
+
+ if (!pipe->is_handed_off) {
+ pr_warn("SMP MMB %d assigned to a pipe not marked for handoff (client id %d)\n"
+ , i, client_id);
+ continue;
+ }
+
+ /*
+ * Assume that the source format only has
+ * one plane
+ */
+ pr_debug("Assigning smp mmb %d to pipe %d (client_id %d)\n"
+ , i, pipe->num, client_id);
+ set_bit(i, pipe->smp_map[0].allocated);
+ set_bit(i, mdata->mmb_alloc_map);
+ }
+ }
+
+ return rc;
+}
+
+void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe)
+{
+ if (kref_put_mutex(&pipe->kref, mdss_mdp_pipe_free,
+ &mdss_mdp_sspp_lock)) {
+ WARN(1, "Unexpected free pipe during unmap\n");
+ mutex_unlock(&mdss_mdp_sspp_lock);
+ }
+}
+
+int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe)
+{
+ if (!kref_get_unless_zero(&pipe->kref))
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * mdss_mdp_qos_vbif_remapper_setup - Program the VBIF QoS remapper
+ * registers based on real or non real time clients
+ * @mdata: Pointer to the global mdss data structure.
+ * @pipe: Pointer to source pipe struct to get xin id's.
+ * @is_realtime: To determine if pipe's client is real or
+ * non real time.
+ * This function assumes that clocks are on, so it is caller responsibility to
+ * call this function with clocks enabled.
+ */
+static void mdss_mdp_qos_vbif_remapper_setup(struct mdss_data_type *mdata,
+ struct mdss_mdp_pipe *pipe, bool is_realtime)
+{
+ u32 mask, reg_val, reg_val_lvl, i, vbif_qos;
+ u32 reg_high;
+ bool is_nrt_vbif = mdss_mdp_is_nrt_vbif_client(mdata, pipe);
+
+ if (mdata->npriority_lvl == 0)
+ return;
+
+ if (test_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map)) {
+ mutex_lock(&mdata->reg_lock);
+ for (i = 0; i < mdata->npriority_lvl; i++) {
+ reg_high = ((pipe->xin_id & 0x8) >> 3) * 4 + (i * 8);
+
+ reg_val = MDSS_VBIF_READ(mdata,
+ MDSS_VBIF_QOS_RP_REMAP_BASE +
+ reg_high, is_nrt_vbif);
+ reg_val_lvl = MDSS_VBIF_READ(mdata,
+ MDSS_VBIF_QOS_LVL_REMAP_BASE + reg_high,
+ is_nrt_vbif);
+
+ mask = 0x3 << (pipe->xin_id * 4);
+ vbif_qos = is_realtime ?
+ mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
+
+ reg_val &= ~(mask);
+ reg_val |= vbif_qos << (pipe->xin_id * 4);
+
+ reg_val_lvl &= ~(mask);
+ reg_val_lvl |= vbif_qos << (pipe->xin_id * 4);
+
+ pr_debug("idx:%d xin:%d reg:0x%x val:0x%x lvl:0x%x\n",
+ i, pipe->xin_id, reg_high, reg_val, reg_val_lvl);
+ MDSS_VBIF_WRITE(mdata, MDSS_VBIF_QOS_RP_REMAP_BASE +
+ reg_high, reg_val, is_nrt_vbif);
+ MDSS_VBIF_WRITE(mdata, MDSS_VBIF_QOS_LVL_REMAP_BASE +
+ reg_high, reg_val_lvl, is_nrt_vbif);
+ }
+ mutex_unlock(&mdata->reg_lock);
+ } else {
+ mutex_lock(&mdata->reg_lock);
+ for (i = 0; i < mdata->npriority_lvl; i++) {
+ reg_val = MDSS_VBIF_READ(mdata,
+ MDSS_VBIF_QOS_REMAP_BASE + i*4, is_nrt_vbif);
+
+ mask = 0x3 << (pipe->xin_id * 2);
+ reg_val &= ~(mask);
+ vbif_qos = is_realtime ?
+ mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
+ reg_val |= vbif_qos << (pipe->xin_id * 2);
+ MDSS_VBIF_WRITE(mdata, MDSS_VBIF_QOS_REMAP_BASE + i*4,
+ reg_val, is_nrt_vbif);
+ }
+ mutex_unlock(&mdata->reg_lock);
+ }
+}
+
+/**
+ * mdss_mdp_fixed_qos_arbiter_setup - Program the RT/NRT registers based on
+ * real or non real time clients
+ * @mdata: Pointer to the global mdss data structure.
+ * @pipe: Pointer to source pipe struct to get xin id's.
+ * @is_realtime: To determine if pipe's client is real or
+ * non real time.
+ * This function assumes that clocks are on, so it is caller responsibility to
+ * call this function with clocks enabled.
+ */
+static void mdss_mdp_fixed_qos_arbiter_setup(struct mdss_data_type *mdata,
+ struct mdss_mdp_pipe *pipe, bool is_realtime)
+{
+ u32 mask, reg_val;
+ bool is_nrt_vbif = mdss_mdp_is_nrt_vbif_client(mdata, pipe);
+
+ if (!mdata->has_fixed_qos_arbiter_enabled)
+ return;
+
+ mutex_lock(&mdata->reg_lock);
+ reg_val = MDSS_VBIF_READ(mdata, MDSS_VBIF_FIXED_SORT_EN, is_nrt_vbif);
+ mask = 0x1 << pipe->xin_id;
+ reg_val |= mask;
+
+ /* Enable the fixed sort for the client */
+ MDSS_VBIF_WRITE(mdata, MDSS_VBIF_FIXED_SORT_EN, reg_val, is_nrt_vbif);
+ reg_val = MDSS_VBIF_READ(mdata, MDSS_VBIF_FIXED_SORT_SEL0, is_nrt_vbif);
+ mask = 0x1 << (pipe->xin_id * 2);
+ if (is_realtime) {
+ reg_val &= ~mask;
+ pr_debug("Real time traffic on pipe type=%x pnum=%d\n",
+ pipe->type, pipe->num);
+ } else {
+ reg_val |= mask;
+ pr_debug("Non real time traffic on pipe type=%x pnum=%d\n",
+ pipe->type, pipe->num);
+ }
+ /* Set the fixed_sort regs as per RT/NRT client */
+ MDSS_VBIF_WRITE(mdata, MDSS_VBIF_FIXED_SORT_SEL0, reg_val, is_nrt_vbif);
+ mutex_unlock(&mdata->reg_lock);
+}
+
+static void mdss_mdp_init_pipe_params(struct mdss_mdp_pipe *pipe)
+{
+ kref_init(&pipe->kref);
+ init_waitqueue_head(&pipe->free_waitq);
+ INIT_LIST_HEAD(&pipe->buf_queue);
+
+ pipe->flags = 0;
+ pipe->is_right_blend = false;
+ pipe->src_split_req = false;
+ pipe->bwc_mode = 0;
+
+ pipe->mfd = NULL;
+ pipe->mixer_left = pipe->mixer_right = NULL;
+ pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+ memset(&pipe->scaler, 0, sizeof(struct mdp_scale_data));
+ memset(&pipe->layer, 0, sizeof(struct mdp_input_layer));
+
+ pipe->multirect.mode = MDSS_MDP_PIPE_MULTIRECT_NONE;
+}
+
+static int mdss_mdp_pipe_init_config(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer, bool pipe_share)
+{
+ int rc = 0;
+ struct mdss_data_type *mdata;
+
+ if (pipe && pipe->unhalted) {
+ rc = mdss_mdp_pipe_fetch_halt(pipe, false);
+ if (rc) {
+ pr_err("%d failed because pipe is in bad state\n",
+ pipe->num);
+ goto end;
+ }
+ }
+
+ mdata = mixer->ctl->mdata;
+
+ if (pipe) {
+ pr_debug("type=%x pnum=%d rect=%d\n",
+ pipe->type, pipe->num, pipe->multirect.num);
+ mdss_mdp_init_pipe_params(pipe);
+ } else if (pipe_share) {
+ /*
+ * when there is no dedicated wfd blk, DMA pipe can be
+ * shared as long as its attached to a writeback mixer
+ */
+ pipe = mdata->dma_pipes + mixer->num;
+ if (pipe->mixer_left->type != MDSS_MDP_MIXER_TYPE_WRITEBACK) {
+ rc = -EINVAL;
+ goto end;
+ }
+ kref_get(&pipe->kref);
+ pr_debug("pipe sharing for pipe=%d\n", pipe->num);
+ }
+
+end:
+ return rc;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_pipe_init(struct mdss_mdp_mixer *mixer,
+ u32 type, u32 off, struct mdss_mdp_pipe *left_blend_pipe)
+{
+ struct mdss_mdp_pipe *pipe = NULL;
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_pipe *pipe_pool = NULL;
+ u32 npipes;
+ bool pipe_share = false;
+ u32 i;
+ int rc;
+
+ if (!mixer || !mixer->ctl || !mixer->ctl->mdata)
+ return NULL;
+
+ mdata = mixer->ctl->mdata;
+
+ switch (type) {
+ case MDSS_MDP_PIPE_TYPE_VIG:
+ pipe_pool = mdata->vig_pipes;
+ npipes = mdata->nvig_pipes;
+ break;
+
+ case MDSS_MDP_PIPE_TYPE_RGB:
+ pipe_pool = mdata->rgb_pipes;
+ npipes = mdata->nrgb_pipes;
+ break;
+
+ case MDSS_MDP_PIPE_TYPE_DMA:
+ pipe_pool = mdata->dma_pipes;
+ npipes = mdata->ndma_pipes;
+ if ((mdata->wfd_mode == MDSS_MDP_WFD_SHARED) &&
+ (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK))
+ pipe_share = true;
+ break;
+
+ case MDSS_MDP_PIPE_TYPE_CURSOR:
+ pipe_pool = mdata->cursor_pipes;
+ npipes = mdata->ncursor_pipes;
+ break;
+
+ default:
+ npipes = 0;
+ pr_err("invalid pipe type %d\n", type);
+ break;
+ }
+
+ /* allocate lower priority right blend pipe */
+ if (left_blend_pipe && (left_blend_pipe->type == type) && pipe_pool) {
+ struct mdss_mdp_pipe *pool_head = pipe_pool + off;
+
+ off += left_blend_pipe->priority - pool_head->priority + 1;
+ if (off >= npipes) {
+ pr_warn("priority limitation. l_pipe:%d. no low priority %d pipe type available.\n",
+ left_blend_pipe->num, type);
+ pipe = ERR_PTR(-EBADSLT);
+ return pipe;
+ }
+ }
+
+ for (i = off; i < npipes; i++) {
+ pipe = pipe_pool + i;
+ if (pipe && atomic_read(&pipe->kref.refcount) == 0) {
+ pipe->mixer_left = mixer;
+ break;
+ }
+ pipe = NULL;
+ }
+
+ if (pipe && type == MDSS_MDP_PIPE_TYPE_CURSOR) {
+ mdss_mdp_init_pipe_params(pipe);
+ pr_debug("cursor: type=%x pnum=%d\n",
+ pipe->type, pipe->num);
+ goto cursor_done;
+ }
+
+ rc = mdss_mdp_pipe_init_config(pipe, mixer, pipe_share);
+ if (rc)
+ return ERR_PTR(-EINVAL);
+cursor_done:
+ if (!pipe)
+ pr_err("no %d type pipes available\n", type);
+
+ return pipe;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
+ u32 type, struct mdss_mdp_pipe *left_blend_pipe)
+{
+ struct mdss_mdp_pipe *pipe;
+
+ mutex_lock(&mdss_mdp_sspp_lock);
+ pipe = mdss_mdp_pipe_init(mixer, type, 0, left_blend_pipe);
+ mutex_unlock(&mdss_mdp_sspp_lock);
+ return pipe;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_pipe_get(u32 ndx,
+ enum mdss_mdp_pipe_rect rect_num)
+{
+ struct mdss_mdp_pipe *pipe = NULL;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!ndx)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&mdss_mdp_sspp_lock);
+
+ pipe = mdss_mdp_pipe_search(mdata, ndx, rect_num);
+ if (!pipe) {
+ pipe = ERR_PTR(-EINVAL);
+ goto error;
+ }
+
+ if (mdss_mdp_pipe_map(pipe))
+ pipe = ERR_PTR(-EACCES);
+
+error:
+ mutex_unlock(&mdss_mdp_sspp_lock);
+ return pipe;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_pipe_assign(struct mdss_data_type *mdata,
+ struct mdss_mdp_mixer *mixer, u32 ndx, enum mdss_mdp_pipe_rect rect_num)
+{
+ struct mdss_mdp_pipe *pipe = NULL;
+ int rc;
+ int retry_count = 0;
+
+ if (!ndx)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&mdss_mdp_sspp_lock);
+ pipe = mdss_mdp_pipe_search(mdata, ndx, rect_num);
+ if (!pipe) {
+ pr_err("pipe search failed\n");
+ pipe = ERR_PTR(-EINVAL);
+ goto error;
+ }
+
+ if (atomic_read(&pipe->kref.refcount) != 0) {
+ mutex_unlock(&mdss_mdp_sspp_lock);
+ do {
+ rc = wait_event_interruptible_timeout(pipe->free_waitq,
+ !atomic_read(&pipe->kref.refcount),
+ usecs_to_jiffies(PIPE_CLEANUP_TIMEOUT_US));
+ if (rc == 0 || retry_count == 5) {
+ pr_err("pipe ndx:%d free wait failed, mfd ndx:%d rc=%d\n",
+ pipe->ndx,
+ pipe->mfd ? pipe->mfd->index : -1, rc);
+ pipe = ERR_PTR(-EBUSY);
+ goto end;
+ } else if (rc == -ERESTARTSYS) {
+ pr_debug("interrupt signal received\n");
+ retry_count++;
+ continue;
+ } else {
+ break;
+ }
+ } while (true);
+
+ mutex_lock(&mdss_mdp_sspp_lock);
+ }
+ pipe->mixer_left = mixer;
+
+ rc = mdss_mdp_pipe_init_config(pipe, mixer, false);
+ if (rc)
+ pipe = ERR_PTR(rc);
+
+error:
+ mutex_unlock(&mdss_mdp_sspp_lock);
+end:
+ return pipe;
+}
+
+static struct mdss_mdp_pipe *__pipe_lookup(struct mdss_mdp_pipe *pipe_list,
+ int count, enum mdss_mdp_pipe_rect rect_num,
+ bool (*cmp)(struct mdss_mdp_pipe *, void *), void *data)
+{
+ struct mdss_mdp_pipe *pipe;
+ int i, j, max_rects;
+
+ for (i = 0, pipe = pipe_list; i < count; i++) {
+ max_rects = pipe->multirect.max_rects;
+ for (j = 0; j < max_rects; j++, pipe++)
+ if ((rect_num == pipe->multirect.num) &&
+ cmp(pipe, data))
+ return pipe;
+ }
+
+ return NULL;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_pipe_lookup(
+ struct mdss_data_type *mdata, enum mdss_mdp_pipe_rect rect_num,
+ bool (*cmp)(struct mdss_mdp_pipe *, void *), void *data)
+{
+ struct mdss_mdp_pipe *pipe;
+
+ pipe = __pipe_lookup(mdata->vig_pipes, mdata->nvig_pipes,
+ rect_num, cmp, data);
+ if (pipe)
+ return pipe;
+
+ pipe = __pipe_lookup(mdata->rgb_pipes, mdata->nrgb_pipes,
+ rect_num, cmp, data);
+ if (pipe)
+ return pipe;
+
+ pipe = __pipe_lookup(mdata->dma_pipes, mdata->ndma_pipes,
+ rect_num, cmp, data);
+ if (pipe)
+ return pipe;
+
+ pipe = __pipe_lookup(mdata->cursor_pipes, mdata->ncursor_pipes,
+ rect_num, cmp, data);
+ if (pipe)
+ return pipe;
+
+ return NULL;
+}
+
+static bool __pipe_cmp_fetch_id(struct mdss_mdp_pipe *pipe, void *data)
+{
+ u32 *fetch_id = data;
+
+ return pipe->ftch_id == *fetch_id;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_pipe_search_by_client_id(
+ struct mdss_data_type *mdata, int client_id,
+ enum mdss_mdp_pipe_rect rect_num)
+{
+ return mdss_mdp_pipe_lookup(mdata, rect_num,
+ __pipe_cmp_fetch_id, &client_id);
+}
+
+static bool __pipe_cmp_ndx(struct mdss_mdp_pipe *pipe, void *data)
+{
+ u32 *ndx = data;
+
+ return pipe->ndx == *ndx;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
+ u32 ndx, enum mdss_mdp_pipe_rect rect_num)
+{
+ return mdss_mdp_pipe_lookup(mdata, rect_num, __pipe_cmp_ndx, &ndx);
+}
+
+/*
+ * This API checks if pipe is stagged on mixer or not. If
+ * any pipe is stagged on mixer than it will generate the
+ * panic signal.
+ *
+ * Only pipe_free API can call this API.
+ */
+static void mdss_mdp_pipe_check_stage(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer)
+{
+ int index;
+
+ if (pipe->mixer_stage == MDSS_MDP_STAGE_UNUSED || !mixer)
+ return;
+
+ index = (pipe->mixer_stage * MAX_PIPES_PER_STAGE);
+ if (pipe->is_right_blend)
+ index++;
+ if (index < MAX_PIPES_PER_LM && pipe == mixer->stage_pipe[index]) {
+ pr_err("pipe%d mixer:%d pipe->mixer_stage=%d src_split:%d right blend:%d\n",
+ pipe->num, mixer->num, pipe->mixer_stage,
+ pipe->src_split_req, pipe->is_right_blend);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dbg_bus", "panic");
+ }
+}
+
+static void mdss_mdp_pipe_hw_cleanup(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ mdss_mdp_pipe_panic_vblank_signal_ctrl(pipe, false);
+ mdss_mdp_pipe_panic_signal_ctrl(pipe, false);
+
+ if (pipe->play_cnt) {
+ mdss_mdp_pipe_fetch_halt(pipe, false);
+ mdss_mdp_pipe_pp_clear(pipe);
+ mdss_mdp_smp_free(pipe);
+ } else {
+ mdss_mdp_smp_unreserve(pipe);
+ }
+
+ if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC) && pipe->bwc_mode) {
+ unsigned long pnum_bitmap = BIT(pipe->num);
+
+ bitmap_andnot(mdata->bwc_enable_map, mdata->bwc_enable_map,
+ &pnum_bitmap, MAX_DRV_SUP_PIPES);
+
+ if (bitmap_empty(mdata->bwc_enable_map, MAX_DRV_SUP_PIPES))
+ mdss_mdp_bwcpanic_ctrl(mdata, false);
+ }
+
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_MULTI_REC_OP_MODE, 0);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+static void mdss_mdp_pipe_free(struct kref *kref)
+{
+ struct mdss_mdp_pipe *pipe, *next_pipe;
+
+ pipe = container_of(kref, struct mdss_mdp_pipe, kref);
+
+ pr_debug("ndx=%x pnum=%d rect=%d\n",
+ pipe->ndx, pipe->num, pipe->multirect.num);
+
+ next_pipe = (struct mdss_mdp_pipe *) pipe->multirect.next;
+ if (!next_pipe || (atomic_read(&next_pipe->kref.refcount) == 0)) {
+ mdss_mdp_pipe_hw_cleanup(pipe);
+ } else {
+ pr_debug("skip hw cleanup on pnum=%d rect=%d, rect%d still in use\n",
+ pipe->num, pipe->multirect.num,
+ next_pipe->multirect.num);
+ }
+
+ mdss_mdp_pipe_check_stage(pipe, pipe->mixer_left);
+ mdss_mdp_pipe_check_stage(pipe, pipe->mixer_right);
+}
+
+static bool mdss_mdp_check_pipe_in_use(struct mdss_mdp_pipe *pipe)
+{
+ int i;
+ bool in_use = false;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_mdp_mixer *mixer;
+
+ for (i = 0; i < mdata->nctl; i++) {
+ ctl = mdata->ctl_off + i;
+ if (!ctl || !ctl->ref_cnt)
+ continue;
+
+ mixer = ctl->mixer_left;
+ if (!mixer || mixer->rotator_mode)
+ continue;
+
+ if (mdss_mdp_mixer_reg_has_pipe(mixer, pipe)) {
+ in_use = true;
+ pr_err("IN USE: pipe=%d mixer=%d\n",
+ pipe->num, mixer->num);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus", "panic");
+ }
+
+ mixer = ctl->mixer_right;
+ if (mixer && mdss_mdp_mixer_reg_has_pipe(mixer, pipe)) {
+ in_use = true;
+ pr_err("IN USE: pipe=%d mixer=%d\n",
+ pipe->num, mixer->num);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus", "panic");
+ }
+ }
+
+ return in_use;
+}
+
+static int mdss_mdp_is_pipe_idle(struct mdss_mdp_pipe *pipe,
+ bool ignore_force_on, bool is_nrt_vbif)
+{
+ u32 reg_val;
+ u32 vbif_idle_mask, forced_on_mask, clk_status_idle_mask;
+ bool is_idle = false, is_forced_on;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ forced_on_mask = BIT(pipe->clk_ctrl.bit_off + CLK_FORCE_ON_OFFSET);
+ reg_val = readl_relaxed(mdata->mdp_base + pipe->clk_ctrl.reg_off);
+ is_forced_on = (reg_val & forced_on_mask) ? true : false;
+
+ pr_debug("pipe#:%d clk_ctrl: 0x%x forced_on_mask: 0x%x\n", pipe->num,
+ reg_val, forced_on_mask);
+ /* if forced on then no need to check status */
+ if (!is_forced_on) {
+ clk_status_idle_mask =
+ BIT(pipe->clk_status.bit_off + CLK_STATUS_OFFSET);
+ reg_val = readl_relaxed(mdata->mdp_base +
+ pipe->clk_status.reg_off);
+
+ if ((reg_val & clk_status_idle_mask) == 0)
+ is_idle = true;
+
+ pr_debug("pipe#:%d clk_status:0x%x clk_status_idle_mask:0x%x\n",
+ pipe->num, reg_val, clk_status_idle_mask);
+ }
+
+ if (!ignore_force_on && (is_forced_on || !is_idle))
+ goto exit;
+
+ /*
+ * skip vbif check for cursor pipes as the same xin-id is shared
+ * between cursor0, cursor1 and dsi
+ */
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR) {
+ if (ignore_force_on && is_forced_on)
+ is_idle = true;
+ goto exit;
+ }
+
+ vbif_idle_mask = BIT(pipe->xin_id + 16);
+ reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1, is_nrt_vbif);
+
+ if (reg_val & vbif_idle_mask)
+ is_idle = true;
+
+ pr_debug("pipe#:%d XIN_HALT_CTRL1: 0x%x, vbif_idle_mask: 0x%x\n",
+ pipe->num, reg_val, vbif_idle_mask);
+
+exit:
+ return is_idle;
+}
+
+/*
+ * mdss_mdp_pipe_clk_force_off() - check force off mask and reset for the pipe.
+ * @pipe: pointer to the pipe data structure which needs to be checked for clk.
+ *
+ * This function would be called where software reset is available for pipe
+ * clocks.
+ */
+
+void mdss_mdp_pipe_clk_force_off(struct mdss_mdp_pipe *pipe)
+{
+ u32 reg_val, force_off_mask;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ force_off_mask =
+ BIT(pipe->clk_ctrl.bit_off + CLK_FORCE_OFF_OFFSET);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mutex_lock(&mdata->reg_lock);
+ reg_val = readl_relaxed(mdata->mdp_base +
+ pipe->clk_ctrl.reg_off);
+ if (reg_val & force_off_mask) {
+ reg_val &= ~force_off_mask;
+ writel_relaxed(reg_val,
+ mdata->mdp_base + pipe->clk_ctrl.reg_off);
+ }
+ mutex_unlock(&mdata->reg_lock);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+/**
+ * mdss_mdp_pipe_fetch_halt() - Halt VBIF client corresponding to specified pipe
+ * @pipe: pointer to the pipe data structure which needs to be halted.
+ *
+ * Check if VBIF client corresponding to specified pipe is idle or not. If not
+ * send a halt request for the client in question and wait for it be idle.
+ *
+ * This function would typically be called after pipe is unstaged or before it
+ * is initialized. On success it should be assumed that pipe is in idle state
+ * and would not fetch any more data. This function cannot be called from
+ * interrupt context.
+ */
+int mdss_mdp_pipe_fetch_halt(struct mdss_mdp_pipe *pipe, bool is_recovery)
+{
+ bool is_idle, forced_on = false, in_use = false;
+ int rc = 0;
+ u32 reg_val, idle_mask, clk_val, clk_mask;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool sw_reset_avail = mdss_mdp_pipe_is_sw_reset_available(mdata);
+ bool is_nrt_vbif = mdss_mdp_is_nrt_vbif_client(mdata, pipe);
+ u32 sw_reset_off = pipe->sw_reset.reg_off;
+ u32 clk_ctrl_off = pipe->clk_ctrl.reg_off;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ is_idle = mdss_mdp_is_pipe_idle(pipe, true, is_nrt_vbif);
+ /*
+ * avoid pipe_in_use check in recovery path as the pipes would not
+ * have been unstaged at this point.
+ */
+ if (!is_idle && !is_recovery)
+ in_use = mdss_mdp_check_pipe_in_use(pipe);
+
+ if (!is_idle && !in_use) {
+
+ pr_err("%pS: pipe%d is not idle. xin_id=%d\n",
+ __builtin_return_address(0), pipe->num, pipe->xin_id);
+
+ mutex_lock(&mdata->reg_lock);
+ idle_mask = BIT(pipe->xin_id + 16);
+
+ /*
+ * make sure client clock is not gated while halting by forcing
+ * it ON only if it was not previously forced on
+ */
+ clk_val = readl_relaxed(mdata->mdp_base + clk_ctrl_off);
+ clk_mask = BIT(pipe->clk_ctrl.bit_off + CLK_FORCE_ON_OFFSET);
+ if (!(clk_val & clk_mask)) {
+ clk_val |= clk_mask;
+ writel_relaxed(clk_val, mdata->mdp_base + clk_ctrl_off);
+ wmb(); /* ensure write is finished before progressing */
+ forced_on = true;
+ }
+
+ reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ is_nrt_vbif);
+ MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ reg_val | BIT(pipe->xin_id), is_nrt_vbif);
+
+ if (sw_reset_avail) {
+ reg_val = readl_relaxed(mdata->mdp_base + sw_reset_off);
+ writel_relaxed(reg_val | BIT(pipe->sw_reset.bit_off),
+ mdata->mdp_base + sw_reset_off);
+ wmb(); /* ensure write is finished before progressing */
+ }
+ mutex_unlock(&mdata->reg_lock);
+
+ rc = mdss_mdp_wait_for_xin_halt(pipe->xin_id, is_nrt_vbif);
+
+ mutex_lock(&mdata->reg_lock);
+ reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ is_nrt_vbif);
+ MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ reg_val & ~BIT(pipe->xin_id), is_nrt_vbif);
+
+ clk_val = readl_relaxed(mdata->mdp_base + clk_ctrl_off);
+ if (forced_on)
+ clk_val &= ~clk_mask;
+
+ if (sw_reset_avail) {
+ reg_val = readl_relaxed(mdata->mdp_base + sw_reset_off);
+ writel_relaxed(reg_val & ~BIT(pipe->sw_reset.bit_off),
+ mdata->mdp_base + sw_reset_off);
+ wmb(); /* ensure write is finished before progressing */
+
+ clk_val |= BIT(pipe->clk_ctrl.bit_off +
+ CLK_FORCE_OFF_OFFSET);
+ }
+ writel_relaxed(clk_val, mdata->mdp_base + clk_ctrl_off);
+ mutex_unlock(&mdata->reg_lock);
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return rc;
+}
+
+int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe)
+{
+ if (!kref_put_mutex(&pipe->kref, mdss_mdp_pipe_free,
+ &mdss_mdp_sspp_lock)) {
+ pr_err("unable to free pipe %d while still in use\n",
+ pipe->num);
+ return -EBUSY;
+ }
+
+ wake_up_all(&pipe->free_waitq);
+ mutex_unlock(&mdss_mdp_sspp_lock);
+
+ return 0;
+}
+
+/**
+ * mdss_mdp_pipe_handoff() - Handoff staged pipes during bootup
+ * @pipe: pointer to the pipe to be handed-off
+ *
+ * Populate the software structures for the pipe based on the current
+ * configuration of the hardware pipe by the reading the appropriate MDP
+ * registers.
+ *
+ * This function would typically be called during MDP probe for the case
+ * when certain pipes might be programmed in the bootloader to display
+ * the splash screen.
+ */
+int mdss_mdp_pipe_handoff(struct mdss_mdp_pipe *pipe)
+{
+ int rc = 0;
+ u32 src_fmt, reg = 0, bpp = 0;
+
+ /*
+ * todo: for now, only reading pipe src and dest size details
+ * from the registers. This is needed for appropriately
+ * calculating perf metrics for the handed off pipes.
+ * We may need to parse some more details at a later date.
+ */
+ reg = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE);
+ pipe->src.h = reg >> 16;
+ pipe->src.w = reg & 0xFFFF;
+ reg = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_OUT_SIZE);
+ pipe->dst.h = reg >> 16;
+ pipe->dst.w = reg & 0xFFFF;
+
+ /* Assume that the source format is RGB */
+ reg = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT);
+ bpp = ((reg >> 9) & 0x3) + 1;
+ switch (bpp) {
+ case 4:
+ src_fmt = MDP_RGBA_8888;
+ break;
+ case 3:
+ src_fmt = MDP_RGB_888;
+ break;
+ case 2:
+ src_fmt = MDP_RGB_565;
+ break;
+ default:
+ pr_err("Invalid bpp=%d found\n", bpp);
+ rc = -EINVAL;
+ goto error;
+ }
+ pipe->src_fmt = mdss_mdp_get_format_params(src_fmt);
+ if (!pipe->src_fmt) {
+ pr_err("%s: failed to retrieve format parameters\n",
+ __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ pr_debug("Pipe settings: src.h=%d src.w=%d dst.h=%d dst.w=%d bpp=%d\n"
+ , pipe->src.h, pipe->src.w, pipe->dst.h, pipe->dst.w,
+ pipe->src_fmt->bpp);
+
+ pipe->is_handed_off = true;
+ pipe->play_cnt = 1;
+ mdss_mdp_init_pipe_params(pipe);
+
+error:
+ return rc;
+}
+
+void mdss_mdp_pipe_position_update(struct mdss_mdp_pipe *pipe,
+ struct mdss_rect *src, struct mdss_rect *dst)
+{
+ u32 src_size, src_xy, dst_size, dst_xy;
+ u32 tmp_src_size, tmp_src_xy, reg_data;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ src_size = (src->h << 16) | src->w;
+ src_xy = (src->y << 16) | src->x;
+ dst_size = (dst->h << 16) | dst->w;
+
+ /*
+ * base layer requirements are different compared to other layers
+ * located at different stages. If source split is enabled and base
+ * layer is used, base layer on the right LM's x offset is relative
+ * to right LM's co-ordinate system unlike other layers which are
+ * relative to left LM's top-left.
+ */
+ if (pipe->mixer_stage == MDSS_MDP_STAGE_BASE && mdata->has_src_split
+ && dst->x >= left_lm_w_from_mfd(pipe->mfd))
+ dst->x -= left_lm_w_from_mfd(pipe->mfd);
+ dst_xy = (dst->y << 16) | dst->x;
+
+ /*
+ * Software overfetch is used when scalar pixel extension is
+ * not enabled
+ */
+ if (pipe->overfetch_disable && !pipe->scaler.enable) {
+ if (pipe->overfetch_disable & OVERFETCH_DISABLE_LEFT)
+ src_xy &= ~0xFFFF;
+ if (pipe->overfetch_disable & OVERFETCH_DISABLE_TOP)
+ src_xy &= ~(0xFFFF << 16);
+ }
+
+ if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_103) &&
+ pipe->bwc_mode) {
+ /* check source dimensions change */
+ tmp_src_size = mdss_mdp_pipe_read(pipe,
+ MDSS_MDP_REG_SSPP_SRC_SIZE);
+ tmp_src_xy = mdss_mdp_pipe_read(pipe,
+ MDSS_MDP_REG_SSPP_SRC_XY);
+ if (src_xy != tmp_src_xy || tmp_src_size != src_size) {
+ reg_data = readl_relaxed(mdata->mdp_base +
+ AHB_CLK_OFFSET);
+ reg_data |= BIT(28);
+ writel_relaxed(reg_data,
+ mdata->mdp_base + AHB_CLK_OFFSET);
+ }
+ }
+
+ if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE, src_size);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_XY, src_xy);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_SIZE, dst_size);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_XY, dst_xy);
+ } else {
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE_REC1,
+ src_size);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_XY_REC1,
+ src_xy);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_SIZE_REC1,
+ dst_size);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_XY_REC1,
+ dst_xy);
+ }
+
+ MDSS_XLOG(pipe->num, pipe->multirect.num, src_size, src_xy,
+ dst_size, dst_xy, pipe->multirect.mode);
+}
+
+static void mdss_mdp_pipe_stride_update(struct mdss_mdp_pipe *pipe)
+{
+ u32 reg0, reg1;
+ u32 ystride[MAX_PLANES] = {0};
+ struct mdss_mdp_pipe *rec0_pipe, *rec1_pipe;
+ u32 secure = 0;
+
+ /*
+ * since stride registers are shared between both rectangles in
+ * multirect mode, delayed programming allows programming of both
+ * together
+ */
+ if (is_pipe_programming_delay_needed(pipe)) {
+ pr_debug("skip stride programming for pipe%d rec%d\n",
+ pipe->num, pipe->multirect.num);
+ return;
+ }
+
+ if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_NONE) {
+ memcpy(&ystride, &pipe->src_planes.ystride,
+ sizeof(u32) * MAX_PLANES);
+ if (pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ secure = 0xF;
+ } else {
+ if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+ rec0_pipe = pipe;
+ rec1_pipe = pipe->multirect.next;
+ } else {
+ rec1_pipe = pipe;
+ rec0_pipe = pipe->multirect.next;
+ }
+
+ ystride[0] = rec0_pipe->src_planes.ystride[0];
+ ystride[2] = rec0_pipe->src_planes.ystride[2];
+ if (rec0_pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ secure |= 0x5;
+
+ ystride[1] = rec1_pipe->src_planes.ystride[0];
+ ystride[3] = rec1_pipe->src_planes.ystride[2];
+ if (rec1_pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ secure |= 0xA;
+ }
+
+ reg0 = (ystride[0]) | (ystride[1] << 16);
+ reg1 = (ystride[2]) | (ystride[3] << 16);
+
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_YSTRIDE0, reg0);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_YSTRIDE1, reg1);
+
+ pr_debug("pipe%d multirect:num%d mode=%d, ystride0=0x%x ystride1=0x%x\n",
+ pipe->num, pipe->multirect.num, pipe->multirect.mode,
+ reg0, reg1);
+ MDSS_XLOG(pipe->num, pipe->multirect.num,
+ pipe->multirect.mode, reg0, reg1);
+}
+
+static int mdss_mdp_image_setup(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_data *data)
+{
+ u32 img_size;
+ u32 width, height, decimation;
+ int ret = 0;
+ struct mdss_rect dst, src;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool rotation = false;
+
+ pr_debug("ctl: %d pnum=%d wh=%dx%d src={%d,%d,%d,%d} dst={%d,%d,%d,%d}\n",
+ pipe->mixer_left->ctl->num, pipe->num,
+ pipe->img_width, pipe->img_height,
+ pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+ pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+ width = pipe->img_width;
+ height = pipe->img_height;
+
+ if (pipe->flags & MDP_SOURCE_ROTATED_90)
+ rotation = true;
+
+ mdss_mdp_get_plane_sizes(pipe->src_fmt, width, height,
+ &pipe->src_planes, pipe->bwc_mode, rotation);
+
+ if (data != NULL) {
+ ret = mdss_mdp_data_check(data, &pipe->src_planes,
+ pipe->src_fmt);
+ if (ret)
+ return ret;
+ }
+
+ if ((pipe->flags & MDP_DEINTERLACE) &&
+ !(pipe->flags & MDP_SOURCE_ROTATED_90)) {
+ int i;
+
+ for (i = 0; i < pipe->src_planes.num_planes; i++)
+ pipe->src_planes.ystride[i] *= 2;
+ width *= 2;
+ height /= 2;
+ }
+
+ decimation = ((1 << pipe->horz_deci) - 1) << 8;
+ decimation |= ((1 << pipe->vert_deci) - 1);
+ if (decimation)
+ pr_debug("Image decimation h=%d v=%d\n",
+ pipe->horz_deci, pipe->vert_deci);
+
+ dst = pipe->dst;
+ src = pipe->src;
+
+ if (!pipe->mixer_left->ctl->is_video_mode &&
+ (pipe->mixer_left->type != MDSS_MDP_MIXER_TYPE_WRITEBACK)) {
+
+ struct mdss_rect roi = pipe->mixer_left->roi;
+ bool is_right_mixer = pipe->mixer_left->is_right_mixer;
+ struct mdss_mdp_ctl *main_ctl;
+
+ if (pipe->mixer_left->ctl->is_master)
+ main_ctl = pipe->mixer_left->ctl;
+ else
+ main_ctl = mdss_mdp_get_main_ctl(pipe->mixer_left->ctl);
+
+ if (!main_ctl) {
+ pr_err("Error: couldn't find main_ctl for pipe%d\n",
+ pipe->num);
+ return -EINVAL;
+ }
+
+ if (pipe->src_split_req && main_ctl->mixer_right->valid_roi) {
+ /*
+ * pipe is staged on both mixers, expand roi to span
+ * both mixers before cropping pipe's dimensions.
+ */
+ roi.w += main_ctl->mixer_right->roi.w;
+ } else if (mdata->has_src_split && is_right_mixer) {
+ /*
+ * pipe is only on right mixer but since source-split
+ * is enabled, its dst_x is full panel coordinate
+ * aligned where as ROI is mixer coordinate aligned.
+ * Modify dst_x before applying ROI crop.
+ */
+ dst.x -= left_lm_w_from_mfd(pipe->mfd);
+ }
+
+ mdss_mdp_crop_rect(&src, &dst, &roi);
+
+ if (mdata->has_src_split && is_right_mixer) {
+ /*
+ * re-adjust dst_x only if both mixers are active,
+ * meaning right mixer will be working in source
+ * split mode.
+ */
+ if (mdss_mdp_is_both_lm_valid(main_ctl))
+ dst.x += main_ctl->mixer_left->roi.w;
+ }
+
+ if (pipe->flags & MDP_FLIP_LR) {
+ src.x = pipe->src.x + (pipe->src.x + pipe->src.w)
+ - (src.x + src.w);
+ }
+ if (pipe->flags & MDP_FLIP_UD) {
+ src.y = pipe->src.y + (pipe->src.y + pipe->src.h)
+ - (src.y + src.h);
+ }
+ }
+
+ /*
+ * Software overfetch is used when scalar pixel extension is
+ * not enabled
+ */
+ if (pipe->overfetch_disable && !pipe->scaler.enable) {
+ if (pipe->overfetch_disable & OVERFETCH_DISABLE_BOTTOM) {
+ height = pipe->src.h;
+ if (!(pipe->overfetch_disable & OVERFETCH_DISABLE_TOP))
+ height += pipe->src.y;
+ }
+ if (pipe->overfetch_disable & OVERFETCH_DISABLE_RIGHT) {
+ width = pipe->src.w;
+ if (!(pipe->overfetch_disable & OVERFETCH_DISABLE_LEFT))
+ width += pipe->src.x;
+ }
+
+ pr_debug("overfetch w=%d/%d h=%d/%d\n", width,
+ pipe->img_width, height, pipe->img_height);
+ }
+ img_size = (height << 16) | width;
+
+ /*
+ * in solid fill, there is no src rectangle, but hardware needs to
+ * be programmed same as dst to avoid issues in scaling blocks
+ */
+ if (data == NULL) {
+ src = (struct mdss_rect) {0, 0, dst.w, dst.h};
+ decimation = 0;
+ }
+
+ mdss_mdp_pipe_position_update(pipe, &src, &dst);
+ mdss_mdp_pipe_stride_update(pipe);
+
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_IMG_SIZE, img_size);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_DECIMATION_CONFIG,
+ decimation);
+
+ return 0;
+}
+
+static void mdss_mdp_set_pipe_cdp(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 cdp_settings = 0x0;
+ bool is_rotator = (pipe->mixer_left && pipe->mixer_left->rotator_mode);
+
+ /* Disable CDP for rotator pipe in v1 */
+ if (is_rotator && mdss_has_quirk(mdata, MDSS_QUIRK_ROTCDP))
+ goto exit;
+
+ cdp_settings = MDSS_MDP_CDP_ENABLE;
+
+ if (!mdss_mdp_is_linear_format(pipe->src_fmt)) {
+ /* Enable Amortized for non-linear formats */
+ cdp_settings |= MDSS_MDP_CDP_ENABLE_UBWCMETA;
+ cdp_settings |= MDSS_MDP_CDP_AMORTIZED;
+ } else {
+ /* 64-transactions for line mode otherwise we keep 32 */
+ if (!is_rotator)
+ cdp_settings |= MDSS_MDP_CDP_AHEAD_64;
+ }
+
+exit:
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_CDP_CTRL, cdp_settings);
+}
+
+static int mdss_mdp_format_setup(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_mdp_format_params *fmt;
+ u32 chroma_samp, unpack, src_format;
+ u32 opmode;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ fmt = pipe->src_fmt;
+
+ opmode = pipe->bwc_mode;
+ if (pipe->flags & MDP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (pipe->flags & MDP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ pr_debug("pnum=%d format=%d opmode=%x\n", pipe->num, fmt->format,
+ opmode);
+
+ chroma_samp = fmt->chroma_sample;
+ if (pipe->flags & MDP_SOURCE_ROTATED_90) {
+ if (chroma_samp == MDSS_MDP_CHROMA_H2V1)
+ chroma_samp = MDSS_MDP_CHROMA_H1V2;
+ else if (chroma_samp == MDSS_MDP_CHROMA_H1V2)
+ chroma_samp = MDSS_MDP_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) |
+ (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C0_G_Y] << 0);
+
+ if (mdss_mdp_is_tile_format(fmt))
+ src_format |= BIT(30);
+
+ if (pipe->flags & MDP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable &&
+ fmt->fetch_planes != MDSS_MDP_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (mdss_mdp_is_ubwc_format(fmt)) {
+ opmode |= BIT(0);
+ src_format |= BIT(31);
+ }
+
+ if (fmt->is_yuv && test_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map))
+ src_format |= BIT(15);
+
+ src_format |= (fmt->unpack_dx_format << 14);
+
+ mdss_mdp_pipe_sspp_setup(pipe, &opmode);
+ if (fmt->fetch_mode != MDSS_MDP_FETCH_LINEAR
+ && mdata->highest_bank_bit) {
+ u32 fetch_config = MDSS_MDP_FETCH_CONFIG_RESET_VALUE;
+
+ fetch_config |= (mdata->highest_bank_bit << 18);
+ if (fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC)
+ fetch_config |= (2 << 16);
+
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_FETCH_CONFIG,
+ fetch_config);
+ }
+ if (pipe->scaler.enable)
+ opmode |= (1 << 31);
+
+ if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_FORMAT, src_format);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_OP_MODE, opmode);
+ } else {
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_FORMAT_REC1, src_format);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN_REC1, unpack);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_OP_MODE_REC1, opmode);
+ }
+
+ /* clear UBWC error */
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_UBWC_ERROR_STATUS, BIT(31));
+
+ /* configure CDP */
+ if (test_bit(MDSS_QOS_CDP, mdata->mdss_qos_map))
+ mdss_mdp_set_pipe_cdp(pipe);
+
+ return 0;
+}
+
+int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata,
+ struct mdss_mdp_pipe *head, u32 *offsets, u32 *ftch_id, u32 *xin_id,
+ u32 type, const int *pnums, u32 len, u32 rects_per_sspp,
+ u8 priority_base)
+{
+ u32 i, j;
+
+ if (!head || !mdata) {
+ pr_err("unable to setup pipe type=%d: invalid input\n", type);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++) {
+ struct mdss_mdp_pipe *pipe = head + (i * rects_per_sspp);
+
+ pipe->type = type;
+ pipe->ftch_id = ftch_id[i];
+ pipe->xin_id = xin_id[i];
+ pipe->num = pnums[i];
+ pipe->ndx = BIT(pnums[i]);
+ pipe->priority = i + priority_base;
+ pipe->base = mdata->mdss_io.base + offsets[i];
+ pipe->multirect.num = MDSS_MDP_PIPE_RECT0;
+ pipe->multirect.mode = MDSS_MDP_PIPE_MULTIRECT_NONE;
+ pipe->multirect.max_rects = rects_per_sspp;
+ pipe->multirect.next = NULL;
+
+ pr_info("type:%d ftchid:%d xinid:%d num:%d rect:%d ndx:0x%x prio:%d\n",
+ pipe->type, pipe->ftch_id, pipe->xin_id, pipe->num,
+ pipe->multirect.num, pipe->ndx, pipe->priority);
+
+ for (j = 1; j < rects_per_sspp; j++) {
+ struct mdss_mdp_pipe *next = pipe + j;
+
+ pipe[j-1].multirect.next = next;
+ *next = pipe[j-1];
+ next->multirect.num++;
+ next->multirect.next = pipe;
+
+ pr_info("type:%d ftchid:%d xinid:%d num:%d rect:%d ndx:0x%x prio:%d\n",
+ next->type, next->ftch_id, next->xin_id,
+ next->num, next->multirect.num, next->ndx,
+ next->priority);
+ }
+
+ }
+
+ return 0;
+}
+
+static int mdss_mdp_src_addr_setup(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_data *src_data)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int i, ret = 0;
+ u32 addr[MAX_PLANES] = { 0 };
+
+ pr_debug("pnum=%d\n", pipe->num);
+
+ ret = mdss_mdp_data_check(src_data, &pipe->src_planes, pipe->src_fmt);
+ if (ret)
+ return ret;
+
+ if (pipe->overfetch_disable && !pipe->scaler.enable) {
+ u32 x = 0, y = 0;
+
+ if (pipe->overfetch_disable & OVERFETCH_DISABLE_LEFT)
+ x = pipe->src.x;
+ if (pipe->overfetch_disable & OVERFETCH_DISABLE_TOP)
+ y = pipe->src.y;
+
+ mdss_mdp_data_calc_offset(src_data, x, y,
+ &pipe->src_planes, pipe->src_fmt);
+ }
+
+ for (i = 0; i < MAX_PLANES; i++)
+ addr[i] = src_data->p[i].addr;
+
+ /* planar format expects YCbCr, swap chroma planes if YCrCb */
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 &&
+ (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR)
+ && (pipe->src_fmt->element[0] == C1_B_Cb))
+ swap(addr[1], addr[2]);
+
+ if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_NONE) {
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC0_ADDR, addr[0]);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC1_ADDR, addr[1]);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC2_ADDR, addr[2]);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC3_ADDR, addr[3]);
+ } else if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC0_ADDR, addr[0]);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC2_ADDR, addr[2]);
+ } else { /* RECT1 */
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC1_ADDR, addr[0]);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC3_ADDR, addr[2]);
+ }
+
+ return 0;
+}
+
+static int mdss_mdp_pipe_solidfill_setup(struct mdss_mdp_pipe *pipe)
+{
+ int ret;
+ u32 secure, format, unpack, opmode = 0;
+
+ pr_debug("solid fill setup on pnum=%d\n", pipe->num);
+
+ ret = mdss_mdp_image_setup(pipe, NULL);
+ if (ret) {
+ pr_err("image setup error for pnum=%d\n", pipe->num);
+ return ret;
+ }
+
+ format = MDSS_MDP_FMT_SOLID_FILL;
+ secure = (pipe->flags & MDP_SECURE_OVERLAY_SESSION ? 0xF : 0x0);
+
+ /* support ARGB color format only */
+ unpack = (C3_ALPHA << 24) | (C2_R_Cr << 16) |
+ (C1_B_Cb << 8) | (C0_G_Y << 0);
+ if (pipe->scaler.enable)
+ opmode |= (1 << 31);
+
+ if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+ /*
+ * rect0 will drive whether to secure the pipeline, even though
+ * no secure content is being fetched
+ */
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_FORMAT, format);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR, pipe->bg_color);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_OP_MODE, opmode);
+ } else {
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_FORMAT_REC1, format);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR_REC1,
+ pipe->bg_color);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN_REC1, unpack);
+ mdss_mdp_pipe_write(pipe,
+ MDSS_MDP_REG_SSPP_SRC_OP_MODE_REC1, opmode);
+ }
+
+ if (pipe->type != MDSS_MDP_PIPE_TYPE_DMA) {
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SCALE_CONFIG, 0);
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE, 0);
+ }
+
+ return 0;
+}
+
+static void mdss_mdp_set_ot_limit_pipe(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_mdp_set_ot_params ot_params;
+ struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
+
+ ot_params.xin_id = pipe->xin_id;
+ ot_params.num = pipe->num;
+ ot_params.width = pipe->src.w;
+ ot_params.height = pipe->src.h;
+ ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
+ ot_params.reg_off_mdp_clk_ctrl = pipe->clk_ctrl.reg_off;
+ ot_params.bit_off_mdp_clk_ctrl = pipe->clk_ctrl.bit_off +
+ CLK_FORCE_ON_OFFSET;
+ ot_params.is_rot = pipe->mixer_left->rotator_mode;
+ ot_params.is_wb = ctl->intf_num == MDSS_MDP_NO_INTF;
+ ot_params.is_yuv = pipe->src_fmt->is_yuv;
+ ot_params.frame_rate = pipe->frame_rate;
+
+ /* rotator read uses nrt vbif */
+ if (mdss_mdp_is_nrt_vbif_base_defined(ctl->mdata) &&
+ pipe->mixer_left->rotator_mode)
+ ot_params.is_vbif_nrt = true;
+ else
+ ot_params.is_vbif_nrt = false;
+
+ mdss_mdp_set_ot_limit(&ot_params);
+}
+
+bool mdss_mdp_is_amortizable_pipe(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer, struct mdss_data_type *mdata)
+{
+ /* do not apply for rotator or WB */
+ return ((pipe->src.y > mdata->prefill_data.ts_threshold) &&
+ (mixer->type == MDSS_MDP_MIXER_TYPE_INTF));
+}
+
+static inline void __get_ordered_rects(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_pipe **low_pipe,
+ struct mdss_mdp_pipe **high_pipe)
+{
+ *low_pipe = pipe;
+
+ if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_NONE) {
+ *high_pipe = NULL;
+ return;
+ }
+
+ *high_pipe = pipe->multirect.next;
+
+ /* if pipes are not in order, order them according to position */
+ if ((*low_pipe)->src.y > (*high_pipe)->src.y) {
+ *low_pipe = pipe->multirect.next;
+ *high_pipe = pipe;
+ }
+}
+
+static u32 __get_ts_count(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer, bool is_low_pipe)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 ts_diff, ts_ypos;
+ struct mdss_mdp_pipe *low_pipe, *high_pipe;
+ u32 ts_count = 0;
+ u32 v_total, fps, h_total, xres;
+
+ if (mdss_mdp_get_panel_params(pipe, mixer, &fps, &v_total,
+ &h_total, &xres)) {
+ pr_err(" error retreiving the panel params!\n");
+ return -EINVAL;
+ }
+
+ if (is_low_pipe) {
+ /* only calculate count if lower pipe is amortizable */
+ if (mdss_mdp_is_amortizable_pipe(pipe, mixer, mdata)) {
+ ts_diff = mdata->prefill_data.ts_threshold -
+ mdata->prefill_data.ts_end;
+ ts_ypos = pipe->src.y - ts_diff;
+ ts_count = mult_frac(ts_ypos, 19200000, fps * v_total);
+ }
+ } else { /* high pipe */
+
+ /* only calculate count for high pipe in serial mode */
+ if (pipe &&
+ pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL) {
+ __get_ordered_rects(pipe, &low_pipe, &high_pipe);
+ ts_count = high_pipe->src.y - low_pipe->src.y - 1;
+ ts_count = mult_frac(ts_count, 19200000, fps * v_total);
+ }
+ }
+
+ return ts_count;
+}
+
+static u32 __calc_ts_bytes(struct mdss_rect *src, u32 fps, u32 bpp)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 ts_bytes;
+
+ ts_bytes = src->h * src->w *
+ bpp * fps;
+ ts_bytes = mult_frac(ts_bytes,
+ mdata->prefill_data.ts_rate.numer,
+ mdata->prefill_data.ts_rate.denom);
+ ts_bytes /= 19200000;
+
+ return ts_bytes;
+}
+
+static u32 __get_ts_bytes(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_mixer *mixer)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_pipe *low_pipe, *high_pipe;
+ u32 v_total, fps, h_total, xres;
+ u64 low_pipe_bw, high_pipe_bw, temp;
+ u32 ts_bytes_low, ts_bytes_high;
+ u64 ts_bytes = 0;
+
+ if (mdss_mdp_get_panel_params(pipe, mixer, &fps, &v_total,
+ &h_total, &xres)) {
+ pr_err(" error retreiving the panel params!\n");
+ return -EINVAL;
+ }
+
+ switch (pipe->multirect.mode) {
+ case MDSS_MDP_PIPE_MULTIRECT_NONE:
+
+ /* do not amortize if pipe is not amortizable */
+ if (!mdss_mdp_is_amortizable_pipe(pipe, mixer, mdata)) {
+ ts_bytes = 0;
+ goto exit;
+ }
+
+ ts_bytes = __calc_ts_bytes(&pipe->src, fps,
+ pipe->src_fmt->bpp);
+
+ break;
+ case MDSS_MDP_PIPE_MULTIRECT_PARALLEL:
+
+ __get_ordered_rects(pipe, &low_pipe, &high_pipe);
+
+ /* do not amortize if low_pipe is not amortizable */
+ if (!mdss_mdp_is_amortizable_pipe(low_pipe, mixer, mdata)) {
+ ts_bytes = 0;
+ goto exit;
+ }
+
+ /* calculate ts bytes as the sum of both rects */
+ ts_bytes_low = __calc_ts_bytes(&low_pipe->src, fps,
+ low_pipe->src_fmt->bpp);
+ ts_bytes_high = __calc_ts_bytes(&low_pipe->src, fps,
+ high_pipe->src_fmt->bpp);
+
+ ts_bytes = ts_bytes_low + ts_bytes_high;
+ break;
+ case MDSS_MDP_PIPE_MULTIRECT_SERIAL:
+
+ __get_ordered_rects(pipe, &low_pipe, &high_pipe);
+
+ /* calculate amortization using per-pipe bw */
+ mdss_mdp_get_pipe_overlap_bw(low_pipe,
+ &low_pipe->mixer_left->roi,
+ &low_pipe_bw, &temp, 0);
+ mdss_mdp_get_pipe_overlap_bw(high_pipe,
+ &high_pipe->mixer_left->roi,
+ &high_pipe_bw, &temp, 0);
+
+ /* amortize depending on the lower pipe amortization */
+ if (mdss_mdp_is_amortizable_pipe(low_pipe, mixer, mdata))
+ ts_bytes = DIV_ROUND_UP_ULL(max(low_pipe_bw,
+ high_pipe_bw), 19200000);
+ else
+ ts_bytes = DIV_ROUND_UP_ULL(high_pipe_bw, 19200000);
+ break;
+ default:
+ pr_err("unknown multirect mode!\n");
+ goto exit;
+ break;
+ };
+
+ ts_bytes &= 0xFF;
+ ts_bytes |= BIT(27) | BIT(31);
+exit:
+ return (u32) ts_bytes;
+}
+
+static int mdss_mdp_set_ts_pipe(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_mixer *mixer;
+ u32 ts_count_low = 0, ts_count_high = 0;
+ u32 ts_rec0, ts_rec1;
+ u32 ts_bytes = 0;
+ struct mdss_mdp_pipe *low_pipe = NULL;
+ struct mdss_mdp_pipe *high_pipe = NULL;
+
+ if (!test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map))
+ return 0;
+
+ mixer = pipe->mixer_left;
+ if (!mixer)
+ return -EINVAL;
+
+ if (!mixer->ctl)
+ return -EINVAL;
+
+ if (!mdata->prefill_data.ts_threshold ||
+ (mdata->prefill_data.ts_threshold < mdata->prefill_data.ts_end)) {
+ pr_err("invalid ts data!\n");
+ return -EINVAL;
+ }
+
+ /* high pipe will be null for non-multi rect cases */
+ __get_ordered_rects(pipe, &low_pipe, &high_pipe);
+
+ ts_count_low = __get_ts_count(low_pipe, mixer, true);
+ if (high_pipe != NULL)
+ ts_count_high = __get_ts_count(high_pipe, mixer, false);
+ ts_bytes = __get_ts_bytes(pipe, mixer);
+
+ if (low_pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+ ts_rec0 = ts_count_low;
+ ts_rec1 = ts_count_high;
+ } else {
+ ts_rec0 = ts_count_high;
+ ts_rec1 = ts_count_low;
+ }
+
+ mdss_mdp_pipe_qos_ctrl(pipe, false, MDSS_MDP_PIPE_QOS_VBLANK_AMORTIZE);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER, ts_bytes);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER_PREFILL,
+ ts_rec0);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER_REC1_PREFILL,
+ ts_rec1);
+ MDSS_XLOG(pipe->num, ts_bytes, ts_rec0, ts_rec1);
+ pr_debug("ts: pipe:%d bytes=0x%x count0=0x%x count1=0x%x\n",
+ pipe->num, ts_bytes, ts_rec0, ts_rec1);
+ return 0;
+}
+
+int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
+ struct mdss_mdp_data *src_data)
+{
+ int ret = 0;
+ struct mdss_mdp_ctl *ctl;
+ u32 params_changed;
+ u32 opmode = 0, multirect_opmode = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool roi_changed = false;
+ bool delayed_programming;
+
+ if (!pipe) {
+ pr_err("pipe not setup properly for queue\n");
+ return -ENODEV;
+ }
+
+ if (!pipe->mixer_left || !pipe->mixer_left->ctl) {
+ if (src_data)
+ pr_err("pipe%d mixer not setup properly\n", pipe->num);
+ return -ENODEV;
+ }
+
+ if (pipe->src_split_req && !mdata->has_src_split) {
+ pr_err("src split can't be requested on mdp:0x%x\n",
+ mdata->mdp_rev);
+ return -EINVAL;
+ }
+
+ pr_debug("pnum=%x mixer=%d play_cnt=%u\n", pipe->num,
+ pipe->mixer_left->num, pipe->play_cnt);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ ctl = pipe->mixer_left->ctl;
+ roi_changed = pipe->mixer_left->roi_changed;
+
+ /*
+ * if pipe is staged on 2 mixers then it is possible that only
+ * right mixer roi has changed.
+ */
+ if (pipe->mixer_right)
+ roi_changed |= pipe->mixer_right->roi_changed;
+
+ delayed_programming = is_pipe_programming_delay_needed(pipe);
+
+ /*
+ * Reprogram the pipe when there is no dedicated wfd blk and
+ * virtual mixer is allocated for the DMA pipe during concurrent
+ * line and block mode operations
+ */
+ params_changed = (pipe->params_changed) ||
+ ((pipe->type == MDSS_MDP_PIPE_TYPE_DMA) &&
+ (pipe->mixer_left->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
+ (ctl->mdata->mixer_switched)) || roi_changed;
+
+ /* apply changes that are common in case of multi rects only once */
+ if (params_changed && !delayed_programming) {
+ bool is_realtime = !((ctl->intf_num == MDSS_MDP_NO_INTF)
+ || pipe->mixer_left->rotator_mode);
+
+ mdss_mdp_pipe_panic_vblank_signal_ctrl(pipe, false);
+ mdss_mdp_pipe_panic_signal_ctrl(pipe, false);
+
+ mdss_mdp_qos_vbif_remapper_setup(mdata, pipe, is_realtime);
+ mdss_mdp_fixed_qos_arbiter_setup(mdata, pipe, is_realtime);
+
+ if (mdata->vbif_nrt_io.base)
+ mdss_mdp_pipe_nrt_vbif_setup(mdata, pipe);
+
+ if (pipe && mdss_mdp_pipe_is_sw_reset_available(mdata))
+ mdss_mdp_pipe_clk_force_off(pipe);
+
+ if (pipe->scaler.enable)
+ mdss_mdp_pipe_program_pixel_extn(pipe);
+ }
+
+ if ((!(pipe->flags & MDP_VPU_PIPE) && (src_data == NULL)) ||
+ (pipe->flags & MDP_SOLID_FILL)) {
+ pipe->params_changed = 0;
+ mdss_mdp_pipe_solidfill_setup(pipe);
+
+ MDSS_XLOG(pipe->num, pipe->multirect.num,
+ pipe->mixer_left->num, pipe->play_cnt, 0x111);
+
+ goto update_nobuf;
+ }
+
+ MDSS_XLOG(pipe->num, pipe->multirect.num, pipe->mixer_left->num,
+ pipe->play_cnt, 0x222);
+
+ if (params_changed) {
+ pipe->params_changed = 0;
+
+ ret = mdss_mdp_pipe_pp_setup(pipe, &opmode);
+ if (ret) {
+ pr_err("pipe pp setup error for pnum=%d rect=%d\n",
+ pipe->num, pipe->multirect.num);
+ goto done;
+ }
+
+ ret = mdss_mdp_image_setup(pipe, src_data);
+ if (ret) {
+ pr_err("image setup error for pnum=%d\n", pipe->num);
+ goto done;
+ }
+
+ ret = mdss_mdp_format_setup(pipe);
+ if (ret) {
+ pr_err("format %d setup error pnum=%d\n",
+ pipe->src_fmt->format, pipe->num);
+ goto done;
+ }
+
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE,
+ opmode);
+
+ if (!delayed_programming) {
+ if (test_bit(MDSS_QOS_PER_PIPE_LUT,
+ mdata->mdss_qos_map))
+ mdss_mdp_pipe_qos_lut(pipe);
+
+ if (mdss_mdp_panic_signal_support_mode(mdata) ==
+ MDSS_MDP_PANIC_PER_PIPE_CFG)
+ mdss_mdp_config_pipe_panic_lut(pipe);
+
+ if (pipe->type != MDSS_MDP_PIPE_TYPE_CURSOR) {
+ mdss_mdp_pipe_panic_vblank_signal_ctrl(pipe, 1);
+ mdss_mdp_pipe_panic_signal_ctrl(pipe, true);
+ mdss_mdp_set_ot_limit_pipe(pipe);
+ mdss_mdp_set_ts_pipe(pipe);
+ }
+ }
+ }
+
+ /*
+ * enable multirect only when both RECT0 and RECT1 are enabled,
+ * othwerise expect to work in non-multirect only in RECT0
+ */
+ if (pipe->multirect.mode != MDSS_MDP_PIPE_MULTIRECT_NONE) {
+ multirect_opmode = BIT(0) | BIT(1);
+
+ if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL)
+ multirect_opmode |= BIT(2);
+ }
+
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_MULTI_REC_OP_MODE,
+ multirect_opmode);
+ if (src_data == NULL) {
+ pr_debug("src_data=%pK pipe num=%dx\n",
+ src_data, pipe->num);
+ goto update_nobuf;
+ }
+
+ if (pipe->type != MDSS_MDP_PIPE_TYPE_CURSOR)
+ mdss_mdp_smp_alloc(pipe);
+
+ ret = mdss_mdp_src_addr_setup(pipe, src_data);
+ if (ret) {
+ pr_err("addr setup error for pnum=%d\n", pipe->num);
+ goto done;
+ }
+
+update_nobuf:
+ if (pipe->src_split_req) {
+ pr_debug("src_split_enabled. pnum:%d\n", pipe->num);
+ mdss_mdp_mixer_pipe_update(pipe, ctl->mixer_left,
+ params_changed);
+ mdss_mdp_mixer_pipe_update(pipe, ctl->mixer_right,
+ params_changed);
+ pipe->mixer_right = ctl->mixer_right;
+ } else {
+ mdss_mdp_mixer_pipe_update(pipe, pipe->mixer_left,
+ params_changed);
+ }
+
+ pipe->play_cnt++;
+
+ if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC)) {
+ unsigned long pnum_bitmap = BIT(pipe->num);
+
+ if (pipe->bwc_mode)
+ bitmap_or(mdata->bwc_enable_map, mdata->bwc_enable_map,
+ &pnum_bitmap, MAX_DRV_SUP_PIPES);
+ else
+ bitmap_andnot(mdata->bwc_enable_map,
+ mdata->bwc_enable_map, &pnum_bitmap,
+ MAX_DRV_SUP_PIPES);
+ }
+
+done:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ return ret;
+}
+
+int mdss_mdp_pipe_is_staged(struct mdss_mdp_pipe *pipe)
+{
+ return (pipe == pipe->mixer_left->stage_pipe[pipe->mixer_stage]);
+}
+
+static inline void __mdss_mdp_pipe_program_pixel_extn_helper(
+ struct mdss_mdp_pipe *pipe, u32 plane, u32 off)
+{
+ u32 src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+ u32 mask = 0xFF;
+ u32 lr_pe, tb_pe, tot_req_pixels;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ /*
+ * CB CR plane required pxls need to be accounted
+ * for chroma decimation.
+ */
+ if (plane == 1)
+ src_h >>= pipe->chroma_sample_v;
+
+ lr_pe = ((pipe->scaler.right_ftch[plane] & mask) << 24)|
+ ((pipe->scaler.right_rpt[plane] & mask) << 16)|
+ ((pipe->scaler.left_ftch[plane] & mask) << 8)|
+ (pipe->scaler.left_rpt[plane] & mask);
+
+ tb_pe = ((pipe->scaler.btm_ftch[plane] & mask) << 24)|
+ ((pipe->scaler.btm_rpt[plane] & mask) << 16)|
+ ((pipe->scaler.top_ftch[plane] & mask) << 8)|
+ (pipe->scaler.top_rpt[plane] & mask);
+
+ writel_relaxed(lr_pe, pipe->base +
+ MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_LR + off);
+ writel_relaxed(tb_pe, pipe->base +
+ MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_TB + off);
+
+ mask = 0xFFFF;
+ if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
+ tot_req_pixels = ((pipe->scaler.num_ext_pxls_top[plane] &
+ mask) << 16 |
+ (pipe->scaler.num_ext_pxls_left[plane] & mask));
+ else
+ tot_req_pixels =
+ (((src_h + pipe->scaler.num_ext_pxls_top[plane] +
+ pipe->scaler.num_ext_pxls_btm[plane]) & mask) << 16) |
+ ((pipe->scaler.roi_w[plane] +
+ pipe->scaler.num_ext_pxls_left[plane] +
+ pipe->scaler.num_ext_pxls_right[plane]) & mask);
+
+ writel_relaxed(tot_req_pixels, pipe->base +
+ MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_REQ_PIXELS + off);
+
+ MDSS_XLOG(pipe->num, plane, lr_pe, tb_pe, tot_req_pixels);
+ pr_debug("pipe num=%d, plane=%d, LR PE=0x%x, TB PE=0x%x, req_pixels=0x0%x\n",
+ pipe->num, plane, lr_pe, tb_pe, tot_req_pixels);
+}
+
+/**
+ * mdss_mdp_pipe_program_pixel_extn - Program the source pipe's
+ * sw pixel extension
+ * @pipe: Source pipe struct containing pixel extn values
+ *
+ * Function programs the pixel extn values calculated during
+ * scale setup.
+ */
+static int mdss_mdp_pipe_program_pixel_extn(struct mdss_mdp_pipe *pipe)
+{
+ /* Y plane pixel extn */
+ __mdss_mdp_pipe_program_pixel_extn_helper(pipe, 0, 0);
+ /* CB CR plane pixel extn */
+ __mdss_mdp_pipe_program_pixel_extn_helper(pipe, 1, 16);
+ /* Alpha plane pixel extn */
+ __mdss_mdp_pipe_program_pixel_extn_helper(pipe, 3, 32);
+ return 0;
+}
+
+
+static int __pxl_extn_helper(int residue)
+{
+ int tmp = 0;
+
+ if (residue == 0) {
+ return tmp;
+ } else if (residue > 0) {
+ tmp = (uint32_t) residue;
+ tmp >>= PHASE_STEP_SHIFT;
+ return -tmp;
+ }
+ tmp = (uint32_t)(-residue);
+ tmp >>= PHASE_STEP_SHIFT;
+ if ((tmp << PHASE_STEP_SHIFT) != (-residue))
+ tmp++;
+ return tmp;
+}
+
+/**
+ * mdss_mdp_calc_pxl_extn - Calculate source pipe's sw pixel extension
+ *
+ * @pipe: Source pipe struct containing pixel extn values
+ *
+ * Function calculates the pixel extn values during scale setup.
+ */
+void mdss_mdp_pipe_calc_pixel_extn(struct mdss_mdp_pipe *pipe)
+{
+ int caf, i;
+ uint32_t src_h;
+ bool unity_scale_x = false, upscale_x = false;
+ bool unity_scale_y, upscale_y;
+
+ if (!(pipe->src_fmt->is_yuv))
+ unity_scale_x = (pipe->src.w == pipe->dst.w);
+
+ if (!unity_scale_x)
+ upscale_x = (pipe->src.w <= pipe->dst.w);
+
+ pr_debug("pipe=%d, src(%d, %d, %d, %d), dest(%d, %d, %d, %d)\n",
+ pipe->num,
+ pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+ pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+ for (i = 0; i < MAX_PLANES; i++) {
+ int64_t left = 0, right = 0, top = 0, bottom = 0;
+
+ caf = 0;
+
+ /*
+ * phase step x,y for 0 plane should be calculated before
+ * this
+ */
+ if (pipe->src_fmt->is_yuv && (i == 1 || i == 2)) {
+ pipe->scaler.phase_step_x[i] =
+ pipe->scaler.phase_step_x[0]
+ >> pipe->chroma_sample_h;
+ pipe->scaler.phase_step_y[i] =
+ pipe->scaler.phase_step_y[0]
+ >> pipe->chroma_sample_v;
+ } else if (i > 0) {
+ pipe->scaler.phase_step_x[i] =
+ pipe->scaler.phase_step_x[0];
+ pipe->scaler.phase_step_y[i] =
+ pipe->scaler.phase_step_y[0];
+ }
+ /* Pixel extension calculations for X direction */
+ pipe->scaler.roi_w[i] = DECIMATED_DIMENSION(pipe->src.w,
+ pipe->horz_deci);
+
+ if (pipe->src_fmt->is_yuv)
+ pipe->scaler.roi_w[i] &= ~0x1;
+
+ /* CAF filtering on only luma plane */
+ if (i == 0 && pipe->src_fmt->is_yuv)
+ caf = 1;
+ if (i == 1 || i == 2)
+ pipe->scaler.roi_w[i] >>= pipe->chroma_sample_h;
+
+ pr_debug("roi_w[%d]=%d, caf=%d\n", i, pipe->scaler.roi_w[i],
+ caf);
+ if (unity_scale_x) {
+ left = 0;
+ right = 0;
+ } else if (!upscale_x) {
+ left = 0;
+ right = (pipe->dst.w - 1) *
+ pipe->scaler.phase_step_x[i];
+ right -= (pipe->scaler.roi_w[i] - 1) *
+ PHASE_STEP_UNIT_SCALE;
+ right += pipe->scaler.phase_step_x[i];
+ right = -(right);
+ } else {
+ left = (1 << PHASE_RESIDUAL);
+ left -= (caf * PHASE_STEP_UNIT_SCALE);
+
+ right = (1 << PHASE_RESIDUAL);
+ right += (pipe->dst.w - 1) *
+ pipe->scaler.phase_step_x[i];
+ right -= ((pipe->scaler.roi_w[i] - 1) *
+ PHASE_STEP_UNIT_SCALE);
+ right += (caf * PHASE_STEP_UNIT_SCALE);
+ right = -(right);
+ }
+ pr_debug("left=%lld, right=%lld\n", left, right);
+ pipe->scaler.num_ext_pxls_left[i] = __pxl_extn_helper(left);
+ pipe->scaler.num_ext_pxls_right[i] = __pxl_extn_helper(right);
+
+ /* Pixel extension calculations for Y direction */
+ unity_scale_y = false;
+ upscale_y = false;
+
+ src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+
+ /* Subsampling of chroma components is factored */
+ if (i == 1 || i == 2)
+ src_h >>= pipe->chroma_sample_v;
+
+ if (!(pipe->src_fmt->is_yuv))
+ unity_scale_y = (src_h == pipe->dst.h);
+
+ if (!unity_scale_y)
+ upscale_y = (src_h <= pipe->dst.h);
+
+ if (unity_scale_y) {
+ top = 0;
+ bottom = 0;
+ } else if (!upscale_y) {
+ top = 0;
+ bottom = (pipe->dst.h - 1) *
+ pipe->scaler.phase_step_y[i];
+ bottom -= (src_h - 1) * PHASE_STEP_UNIT_SCALE;
+ bottom += pipe->scaler.phase_step_y[i];
+ bottom = -(bottom);
+ } else {
+ top = (1 << PHASE_RESIDUAL);
+ top -= (caf * PHASE_STEP_UNIT_SCALE);
+
+ bottom = (1 << PHASE_RESIDUAL);
+ bottom += (pipe->dst.h - 1) *
+ pipe->scaler.phase_step_y[i];
+ bottom -= (src_h - 1) * PHASE_STEP_UNIT_SCALE;
+ bottom += (caf * PHASE_STEP_UNIT_SCALE);
+ bottom = -(bottom);
+ }
+
+ pipe->scaler.num_ext_pxls_top[i] = __pxl_extn_helper(top);
+ pipe->scaler.num_ext_pxls_btm[i] = __pxl_extn_helper(bottom);
+
+ /* Single pixel rgb scale adjustment */
+ if ((!(pipe->src_fmt->is_yuv)) &&
+ ((pipe->src.h - pipe->dst.h) == 1)) {
+
+ uint32_t residue = pipe->scaler.phase_step_y[i] -
+ PHASE_STEP_UNIT_SCALE;
+ uint32_t result = (pipe->dst.h * residue) + residue;
+
+ if (result < PHASE_STEP_UNIT_SCALE)
+ pipe->scaler.num_ext_pxls_btm[i] -= 1;
+ }
+
+ if (pipe->scaler.num_ext_pxls_left[i] >= 0)
+ pipe->scaler.left_rpt[i] =
+ pipe->scaler.num_ext_pxls_left[i];
+ else
+ pipe->scaler.left_ftch[i] =
+ pipe->scaler.num_ext_pxls_left[i];
+
+ if (pipe->scaler.num_ext_pxls_right[i] >= 0)
+ pipe->scaler.right_rpt[i] =
+ pipe->scaler.num_ext_pxls_right[i];
+ else
+ pipe->scaler.right_ftch[i] =
+ pipe->scaler.num_ext_pxls_right[i];
+
+ if (pipe->scaler.num_ext_pxls_top[i] >= 0)
+ pipe->scaler.top_rpt[i] =
+ pipe->scaler.num_ext_pxls_top[i];
+ else
+ pipe->scaler.top_ftch[i] =
+ pipe->scaler.num_ext_pxls_top[i];
+
+ if (pipe->scaler.num_ext_pxls_btm[i] >= 0)
+ pipe->scaler.btm_rpt[i] =
+ pipe->scaler.num_ext_pxls_btm[i];
+ else
+ pipe->scaler.btm_ftch[i] =
+ pipe->scaler.num_ext_pxls_btm[i];
+
+ pr_debug("plane repeat=%d, left=%d, right=%d, top=%d, btm=%d\n",
+ i, pipe->scaler.left_rpt[i],
+ pipe->scaler.right_rpt[i],
+ pipe->scaler.top_rpt[i],
+ pipe->scaler.btm_rpt[i]);
+ pr_debug("plane overfetch=%d, left=%d, right=%d, top=%d, btm=%d\n",
+ i, pipe->scaler.left_ftch[i],
+ pipe->scaler.right_ftch[i],
+ pipe->scaler.top_ftch[i],
+ pipe->scaler.btm_ftch[i]);
+ }
+
+ pipe->scaler.enable = 1;
+}
+
+/**
+ * mdss_mdp_pipe_calc_qseed3_cfg - Calculate source pipe's sw qseed3 filter
+ * configuration
+ *
+ * @pipe: Source pipe struct
+ *
+ * Function set the qseed3 filter configuration to bilenear configuration
+ * also calclulates pixel extension for qseed3
+ */
+void mdss_mdp_pipe_calc_qseed3_cfg(struct mdss_mdp_pipe *pipe)
+{
+ int i;
+ int roi_h;
+
+ /* calculate qseed3 pixel extension values */
+ for (i = 0; i < MAX_PLANES; i++) {
+
+ /* Pixel extension calculations for X direction */
+ pipe->scaler.roi_w[i] = DECIMATED_DIMENSION(pipe->src.w,
+ pipe->horz_deci);
+
+ if (pipe->src_fmt->is_yuv)
+ pipe->scaler.roi_w[i] &= ~0x1;
+ /*
+ * phase step x,y for 0 plane should be calculated before
+ * this
+ */
+ if (pipe->src_fmt->is_yuv && (i == 1 || i == 2)) {
+ pipe->scaler.phase_step_x[i] =
+ pipe->scaler.phase_step_x[0]
+ >> pipe->chroma_sample_h;
+
+ pipe->scaler.phase_step_y[i] =
+ pipe->scaler.phase_step_y[0]
+ >> pipe->chroma_sample_v;
+
+ pipe->scaler.roi_w[i] >>= pipe->chroma_sample_h;
+ }
+
+ pipe->scaler.preload_x[i] = QSEED3_DEFAULT_PRELAOD_H;
+ pipe->scaler.src_width[i] = pipe->scaler.roi_w[i];
+ pipe->scaler.num_ext_pxls_left[i] = pipe->scaler.roi_w[i];
+
+ /* Pixel extension calculations for Y direction */
+ roi_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+
+ /* Subsampling of chroma components is factored */
+ if (i == 1 || i == 2)
+ roi_h >>= pipe->chroma_sample_v;
+
+ pipe->scaler.preload_y[i] = QSEED3_DEFAULT_PRELAOD_V;
+ pipe->scaler.src_height[i] = roi_h;
+ pipe->scaler.num_ext_pxls_top[i] = roi_h;
+
+ pr_debug("QSEED3 params=%d, preload_x=%d, preload_y=%d,src_w=%d,src_h=%d\n",
+ i, pipe->scaler.preload_x[i],
+ pipe->scaler.preload_y[i],
+ pipe->scaler.src_width[i],
+ pipe->scaler.src_height[i]);
+ }
+
+ pipe->scaler.dst_width = pipe->dst.w;
+ pipe->scaler.dst_height = pipe->dst.h;
+ /* assign filters */
+ pipe->scaler.y_rgb_filter_cfg = FILTER_BILINEAR;
+ pipe->scaler.uv_filter_cfg = FILTER_BILINEAR;
+ pipe->scaler.alpha_filter_cfg = FILTER_ALPHA_BILINEAR;
+ pipe->scaler.lut_flag = 0;
+ pipe->scaler.enable = ENABLE_SCALE;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
new file mode 100644
index 0000000..6ac2c4b
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -0,0 +1,7592 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "mdss_mdp_pp_cache_config.h"
+
+struct mdp_csc_cfg mdp_csc_8bit_convert[MDSS_MDP_MAX_CSC] = {
+ [MDSS_MDP_CSC_YUV2RGB_601L] = {
+ 0,
+ {
+ 0x0254, 0x0000, 0x0331,
+ 0x0254, 0xff37, 0xfe60,
+ 0x0254, 0x0409, 0x0000,
+ },
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
+ [MDSS_MDP_CSC_YUV2RGB_601FR] = {
+ 0,
+ {
+ 0x0200, 0x0000, 0x02ce,
+ 0x0200, 0xff50, 0xfe92,
+ 0x0200, 0x038b, 0x0000,
+ },
+ { 0x0000, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
+ [MDSS_MDP_CSC_YUV2RGB_709L] = {
+ 0,
+ {
+ 0x0254, 0x0000, 0x0396,
+ 0x0254, 0xff93, 0xfeef,
+ 0x0254, 0x043e, 0x0000,
+ },
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
+ [MDSS_MDP_CSC_YUV2RGB_2020L] = {
+ 0,
+ {
+ 0x0256, 0x0000, 0x035e,
+ 0x0256, 0xffa0, 0xfeb2,
+ 0x0256, 0x044c, 0x0000,
+ },
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
+ [MDSS_MDP_CSC_YUV2RGB_2020FR] = {
+ 0,
+ {
+ 0x0200, 0x0000, 0x02f3,
+ 0x0200, 0xffac, 0xfedb,
+ 0x0200, 0x03c3, 0x0000,
+ },
+ { 0x0000, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_601L] = {
+ 0,
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0xffb4, 0xff6b, 0x00e1,
+ 0x00e1, 0xff44, 0xffdb
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0010, 0x0080, 0x0080,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_601FR] = {
+ 0,
+ {
+ 0x0099, 0x012d, 0x003a,
+ 0xffaa, 0xff56, 0x0100,
+ 0x0100, 0xff2a, 0xffd6
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0000, 0x0080, 0x0080,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_709L] = {
+ 0,
+ {
+ 0x005d, 0x013a, 0x0020,
+ 0xffcc, 0xff53, 0x00e1,
+ 0x00e1, 0xff34, 0xffeb
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0010, 0x0080, 0x0080,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_2020L] = {
+ 0,
+ {
+ 0x0073, 0x0129, 0x001a,
+ 0xffc1, 0xff5e, 0x00e0,
+ 0x00e0, 0xff32, 0xffee
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0010, 0x0080, 0x0080,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_2020FR] = {
+ 0,
+ {
+ 0x0086, 0x015b, 0x001e,
+ 0xffb9, 0xff47, 0x0100,
+ 0x0100, 0xff15, 0xffeb
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x0080, 0x0080,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
+ [MDSS_MDP_CSC_YUV2YUV] = {
+ 0,
+ {
+ 0x0200, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000,
+ 0x0000, 0x0000, 0x0200,
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
+ [MDSS_MDP_CSC_RGB2RGB] = {
+ 0,
+ {
+ 0x0200, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000,
+ 0x0000, 0x0000, 0x0200,
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+ },
+};
+
+struct mdp_csc_cfg mdp_csc_10bit_convert[MDSS_MDP_MAX_CSC] = {
+ [MDSS_MDP_CSC_YUV2RGB_601L] = {
+ 0,
+ {
+ 0x0254, 0x0000, 0x0331,
+ 0x0254, 0xff37, 0xfe60,
+ 0x0254, 0x0409, 0x0000,
+ },
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+ [MDSS_MDP_CSC_YUV2RGB_601FR] = {
+ 0,
+ {
+ 0x0200, 0x0000, 0x02ce,
+ 0x0200, 0xff50, 0xfe92,
+ 0x0200, 0x038b, 0x0000,
+ },
+ { 0x0000, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+ [MDSS_MDP_CSC_YUV2RGB_709L] = {
+ 0,
+ {
+ 0x0254, 0x0000, 0x0396,
+ 0x0254, 0xff93, 0xfeef,
+ 0x0254, 0x043a, 0x0000,
+ },
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+ [MDSS_MDP_CSC_YUV2RGB_2020L] = {
+ 0,
+ {
+ 0x0256, 0x0000, 0x035e,
+ 0x0256, 0xffa0, 0xfeb2,
+ 0x0256, 0x044c, 0x0000,
+ },
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+ [MDSS_MDP_CSC_YUV2RGB_2020FR] = {
+ 0,
+ {
+ 0x0200, 0x0000, 0x02f3,
+ 0x0200, 0xffac, 0xfedb,
+ 0x0200, 0x03c3, 0x0000,
+ },
+ { 0x0000, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_601L] = {
+ 0,
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0xffb4, 0xff6b, 0x00e1,
+ 0x00e1, 0xff44, 0xffdb
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_601FR] = {
+ 0,
+ {
+ 0x0099, 0x012d, 0x003a,
+ 0xffaa, 0xff56, 0x0100,
+ 0x0100, 0xff2a, 0xffd6
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0000, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_709L] = {
+ 0,
+ {
+ 0x005d, 0x013a, 0x0020,
+ 0xffcc, 0xff53, 0x00e1,
+ 0x00e1, 0xff34, 0xffeb
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_2020L] = {
+ 0,
+ {
+ 0x0073, 0x0129, 0x001a,
+ 0xffc1, 0xff5e, 0x00e0,
+ 0x00e0, 0xff32, 0xffee
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+ [MDSS_MDP_CSC_RGB2YUV_2020FR] = {
+ 0,
+ {
+ 0x0086, 0x015b, 0x001e,
+ 0xffb9, 0xff47, 0x0100,
+ 0x0100, 0xff15, 0xffeb
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+ [MDSS_MDP_CSC_YUV2YUV] = {
+ 0,
+ {
+ 0x0200, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000,
+ 0x0000, 0x0000, 0x0200,
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+ [MDSS_MDP_CSC_RGB2RGB] = {
+ 0,
+ {
+ 0x0200, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000,
+ 0x0000, 0x0000, 0x0200,
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+};
+
+#define CSC_MV_OFF 0x0
+#define CSC_BV_OFF 0x2C
+#define CSC_LV_OFF 0x14
+#define CSC_POST_OFF 0xC
+#define CSC_10BIT_LV_SHIFT 16
+#define CSC_8BIT_LV_SHIFT 8
+
+
+#define HIST_INTR_DSPP_MASK 0xFFF000
+#define HIST_V2_INTR_BIT_MASK 0xF33000
+#define HIST_V1_INTR_BIT_MASK 0X333333
+#define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000)
+#define HIST_KICKOFF_WAIT_FRACTION 4
+
+/* hist collect state */
+enum {
+ HIST_UNKNOWN,
+ HIST_IDLE,
+ HIST_READY,
+};
+
+static u32 dither_matrix[16] = {
+ 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
+static u32 dither_depth_map[9] = {
+ 0, 0, 0, 0, 0, 1, 2, 3, 3};
+
+static u32 igc_limited[IGC_LUT_ENTRIES] = {
+ 16777472, 17826064, 18874656, 19923248,
+ 19923248, 20971840, 22020432, 23069024,
+ 24117616, 25166208, 26214800, 26214800,
+ 27263392, 28311984, 29360576, 30409168,
+ 31457760, 32506352, 32506352, 33554944,
+ 34603536, 35652128, 36700720, 37749312,
+ 38797904, 38797904, 39846496, 40895088,
+ 41943680, 42992272, 44040864, 45089456,
+ 45089456, 46138048, 47186640, 48235232,
+ 49283824, 50332416, 51381008, 51381008,
+ 52429600, 53478192, 54526784, 55575376,
+ 56623968, 57672560, 58721152, 58721152,
+ 59769744, 60818336, 61866928, 62915520,
+ 63964112, 65012704, 65012704, 66061296,
+ 67109888, 68158480, 69207072, 70255664,
+ 71304256, 71304256, 72352848, 73401440,
+ 74450032, 75498624, 76547216, 77595808,
+ 77595808, 78644400, 79692992, 80741584,
+ 81790176, 82838768, 83887360, 83887360,
+ 84935952, 85984544, 87033136, 88081728,
+ 89130320, 90178912, 90178912, 91227504,
+ 92276096, 93324688, 94373280, 95421872,
+ 96470464, 96470464, 97519056, 98567648,
+ 99616240, 100664832, 101713424, 102762016,
+ 102762016, 103810608, 104859200, 105907792,
+ 106956384, 108004976, 109053568, 109053568,
+ 110102160, 111150752, 112199344, 113247936,
+ 114296528, 115345120, 115345120, 116393712,
+ 117442304, 118490896, 119539488, 120588080,
+ 121636672, 121636672, 122685264, 123733856,
+ 124782448, 125831040, 126879632, 127928224,
+ 127928224, 128976816, 130025408, 131074000,
+ 132122592, 133171184, 134219776, 135268368,
+ 135268368, 136316960, 137365552, 138414144,
+ 139462736, 140511328, 141559920, 141559920,
+ 142608512, 143657104, 144705696, 145754288,
+ 146802880, 147851472, 147851472, 148900064,
+ 149948656, 150997248, 152045840, 153094432,
+ 154143024, 154143024, 155191616, 156240208,
+ 157288800, 158337392, 159385984, 160434576,
+ 160434576, 161483168, 162531760, 163580352,
+ 164628944, 165677536, 166726128, 166726128,
+ 167774720, 168823312, 169871904, 170920496,
+ 171969088, 173017680, 173017680, 174066272,
+ 175114864, 176163456, 177212048, 178260640,
+ 179309232, 179309232, 180357824, 181406416,
+ 182455008, 183503600, 184552192, 185600784,
+ 185600784, 186649376, 187697968, 188746560,
+ 189795152, 190843744, 191892336, 191892336,
+ 192940928, 193989520, 195038112, 196086704,
+ 197135296, 198183888, 198183888, 199232480,
+ 200281072, 201329664, 202378256, 203426848,
+ 204475440, 204475440, 205524032, 206572624,
+ 207621216, 208669808, 209718400, 210766992,
+ 211815584, 211815584, 212864176, 213912768,
+ 214961360, 216009952, 217058544, 218107136,
+ 218107136, 219155728, 220204320, 221252912,
+ 222301504, 223350096, 224398688, 224398688,
+ 225447280, 226495872, 227544464, 228593056,
+ 229641648, 230690240, 230690240, 231738832,
+ 232787424, 233836016, 234884608, 235933200,
+ 236981792, 236981792, 238030384, 239078976,
+ 240127568, 241176160, 242224752, 243273344,
+ 243273344, 244321936, 245370528, 246419120};
+
+
+#define MDSS_MDP_PA_SIZE 0xC
+#define MDSS_MDP_SIX_ZONE_SIZE 0xC
+#define MDSS_MDP_MEM_COL_SIZE 0x3C
+#define MDSS_MDP_GC_SIZE 0x28
+#define MDSS_MDP_PCC_SIZE 0xB8
+#define MDSS_MDP_GAMUT_SIZE 0x5C
+#define MDSS_MDP_IGC_DSPP_SIZE 0x28
+#define MDSS_MDP_IGC_SSPP_SIZE 0x88
+#define MDSS_MDP_VIG_QSEED2_SHARP_SIZE 0x0C
+#define TOTAL_BLEND_STAGES 0x4
+
+#define PP_FLAGS_DIRTY_PA 0x1
+#define PP_FLAGS_DIRTY_PCC 0x2
+#define PP_FLAGS_DIRTY_IGC 0x4
+#define PP_FLAGS_DIRTY_ARGC 0x8
+#define PP_FLAGS_DIRTY_ENHIST 0x10
+#define PP_FLAGS_DIRTY_DITHER 0x20
+#define PP_FLAGS_DIRTY_GAMUT 0x40
+#define PP_FLAGS_DIRTY_HIST_COL 0x80
+#define PP_FLAGS_DIRTY_PGC 0x100
+#define PP_FLAGS_DIRTY_SHARP 0x200
+/* Leave space for future features */
+#define PP_FLAGS_RESUME_COMMIT 0x10000000
+
+#define IS_PP_RESUME_COMMIT(x) ((x) & PP_FLAGS_RESUME_COMMIT)
+#define PP_FLAGS_LUT_BASED (PP_FLAGS_DIRTY_IGC | PP_FLAGS_DIRTY_GAMUT | \
+ PP_FLAGS_DIRTY_PGC | PP_FLAGS_DIRTY_ARGC)
+#define IS_PP_LUT_DIRTY(x) ((x) & PP_FLAGS_LUT_BASED)
+#define IS_SIX_ZONE_DIRTY(d, pa) (((d) & PP_FLAGS_DIRTY_PA) && \
+ ((pa) & MDP_PP_PA_SIX_ZONE_ENABLE))
+
+#define PP_SSPP 0
+#define PP_DSPP 1
+
+#define PP_AD_BAD_HW_NUM 255
+
+#define PP_AD_STATE_INIT 0x2
+#define PP_AD_STATE_CFG 0x4
+#define PP_AD_STATE_DATA 0x8
+#define PP_AD_STATE_RUN 0x10
+#define PP_AD_STATE_VSYNC 0x20
+#define PP_AD_STATE_BL_LIN 0x40
+#define PP_AD_STATE_IPC_RESUME 0x80
+#define PP_AD_STATE_IPC_RESET 0x100
+
+#define PP_AD_STATE_IS_INITCFG(st) (((st) & PP_AD_STATE_INIT) &&\
+ ((st) & PP_AD_STATE_CFG))
+
+#define PP_AD_STATE_IS_READY(st) (((st) & PP_AD_STATE_INIT) &&\
+ ((st) & PP_AD_STATE_CFG) &&\
+ ((st) & PP_AD_STATE_DATA))
+
+#define PP_AD_STS_DIRTY_INIT 0x2
+#define PP_AD_STS_DIRTY_CFG 0x4
+#define PP_AD_STS_DIRTY_DATA 0x8
+#define PP_AD_STS_DIRTY_VSYNC 0x10
+#define PP_AD_STS_DIRTY_ENABLE 0x20
+
+#define PP_AD_STS_IS_DIRTY(sts) (((sts) & PP_AD_STS_DIRTY_INIT) ||\
+ ((sts) & PP_AD_STS_DIRTY_CFG))
+
+/* Bits 0 and 1 and 5 */
+#define MDSS_AD_INPUT_AMBIENT (0x23)
+/* Bits 3 and 7 */
+#define MDSS_AD_INPUT_STRENGTH (0x88)
+/*
+ * Check data by shifting by mode to see if it matches to the
+ * MDSS_AD_INPUT_* bitfields
+ */
+#define MDSS_AD_MODE_DATA_MATCH(mode, data) ((1 << (mode)) & (data))
+#define MDSS_AD_RUNNING_AUTO_BL(ad) (((ad)->state & PP_AD_STATE_RUN) &&\
+ ((ad)->cfg.mode == MDSS_AD_MODE_AUTO_BL))
+#define MDSS_AD_RUNNING_AUTO_STR(ad) (((ad)->state & PP_AD_STATE_RUN) &&\
+ ((ad)->cfg.mode == MDSS_AD_MODE_AUTO_STR))
+#define MDSS_AD_AUTO_TRIGGER 0x80
+#define MDSS_AD_T_FILTER_CTRL_0 0
+#define MDSS_AD_IPC_FRAME_COUNT 2
+#define MDSS_AD_MODE_IPC_BIT 0x4
+#define MDSS_AD_MODE_MAN_IPC 0x5
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT 8
+#define SHARP_NOISE_THR_DEFAULT 2
+
+static struct mdp_pp_driver_ops pp_driver_ops;
+static struct mdp_pp_feature_ops *pp_ops;
+
+static DEFINE_MUTEX(mdss_pp_mutex);
+static struct mdss_pp_res_type *mdss_pp_res;
+
+static u32 pp_hist_read(char __iomem *v_addr,
+ struct pp_hist_col_info *hist_info);
+static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix,
+ struct pp_sts_type *pp_sts);
+static int pp_hist_disable(struct pp_hist_col_info *hist_info);
+static void pp_update_pcc_regs(char __iomem *addr,
+ struct mdp_pcc_cfg_data *cfg_ptr);
+static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
+ char __iomem *addr, u32 blk_idx,
+ u32 total_idx);
+static void pp_update_gc_one_lut(char __iomem *addr,
+ struct mdp_ar_gc_lut_data *lut_data,
+ uint8_t num_stages);
+static void pp_update_argc_lut(char __iomem *addr,
+ struct mdp_pgc_lut_data *config);
+static void pp_update_hist_lut(char __iomem *base,
+ struct mdp_hist_lut_data *cfg);
+static int pp_gm_has_invalid_lut_size(struct mdp_gamut_cfg_data *config);
+static void pp_gamut_config(struct mdp_gamut_cfg_data *gamut_cfg,
+ char __iomem *base,
+ struct pp_sts_type *pp_sts);
+static void pp_pa_config(unsigned long flags, char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_pa_cfg *pa_config);
+static void pp_pa_v2_config(unsigned long flags, char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_pa_v2_data *pa_v2_config,
+ int mdp_location);
+static void pp_pcc_config(unsigned long flags, char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_pcc_cfg_data *pcc_config);
+static void pp_igc_config(unsigned long flags, char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_igc_lut_data *igc_config,
+ u32 pipe_num, u32 pipe_cnt);
+static void pp_enhist_config(unsigned long flags, char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_hist_lut_data *enhist_cfg);
+static void pp_dither_config(char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_dither_cfg_data *dither_cfg);
+static void pp_dspp_opmode_config(struct mdss_mdp_ctl *ctl, u32 num,
+ struct pp_sts_type *pp_sts, int mdp_rev,
+ u32 *opmode);
+static void pp_sharp_config(char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_sharp_cfg *sharp_config);
+static void pp_update_pa_v2_vig_opmode(struct pp_sts_type *pp_sts,
+ u32 *opmode);
+static int pp_copy_pa_six_zone_lut(struct mdp_pa_v2_cfg_data *pa_v2_config,
+ u32 disp_num);
+static void pp_update_pa_v2_global_adj_regs(char __iomem *addr,
+ struct mdp_pa_v2_data *pa_config);
+static void pp_update_pa_v2_mem_col(char __iomem *addr,
+ struct mdp_pa_v2_data *pa_v2_config);
+static void pp_update_pa_v2_mem_col_regs(char __iomem *addr,
+ struct mdp_pa_mem_col_cfg *cfg);
+static void pp_update_pa_v2_six_zone_regs(char __iomem *addr,
+ struct mdp_pa_v2_data *pa_v2_config);
+static void pp_update_pa_v2_sts(struct pp_sts_type *pp_sts,
+ struct mdp_pa_v2_data *pa_v2_config);
+static int pp_read_pa_v2_regs(char __iomem *addr,
+ struct mdp_pa_v2_data *pa_v2_config,
+ u32 disp_num);
+static void pp_read_pa_mem_col_regs(char __iomem *addr,
+ struct mdp_pa_mem_col_cfg *mem_col_cfg);
+static struct msm_fb_data_type *mdss_get_mfd_from_index(int index);
+static int mdss_mdp_mfd_valid_ad(struct msm_fb_data_type *mfd);
+static int mdss_mdp_get_ad(struct msm_fb_data_type *mfd,
+ struct mdss_ad_info **ad);
+static int pp_ad_invalidate_input(struct msm_fb_data_type *mfd);
+static void pp_ad_vsync_handler(struct mdss_mdp_ctl *ctl, ktime_t t);
+static void pp_ad_cfg_write(struct mdss_mdp_ad *ad_hw,
+ struct mdss_ad_info *ad);
+static void pp_ad_init_write(struct mdss_mdp_ad *ad_hw,
+ struct mdss_ad_info *ad, struct mdss_mdp_ctl *ctl);
+static void pp_ad_input_write(struct mdss_mdp_ad *ad_hw,
+ struct mdss_ad_info *ad);
+static int pp_ad_setup_hw_nums(struct msm_fb_data_type *mfd,
+ struct mdss_ad_info *ad);
+static void pp_ad_bypass_config(struct mdss_ad_info *ad,
+ struct mdss_mdp_ctl *ctl, u32 num, u32 *opmode);
+static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd);
+static void pp_ad_cfg_lut(char __iomem *addr, u32 *data);
+static int pp_ad_attenuate_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out);
+static int pp_ad_linearize_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out,
+ int inv);
+static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out,
+ bool *bl_out_notify);
+static int pp_num_to_side(struct mdss_mdp_ctl *ctl, u32 num);
+static int pp_update_pcc_pipe_setup(struct mdss_mdp_pipe *pipe, u32 location);
+static void mdss_mdp_hist_irq_set_mask(u32 irq);
+static void mdss_mdp_hist_irq_clear_mask(u32 irq);
+static void mdss_mdp_hist_intr_notify(u32 disp);
+static int mdss_mdp_panel_default_dither_config(struct msm_fb_data_type *mfd,
+ u32 panel_bpp, bool enable);
+static int mdss_mdp_limited_lut_igc_config(struct msm_fb_data_type *mfd,
+ bool enable);
+static inline int pp_validate_dspp_mfd_block(struct msm_fb_data_type *mfd,
+ int block);
+static int pp_mfd_release_all(struct msm_fb_data_type *mfd);
+static int pp_mfd_ad_release_all(struct msm_fb_data_type *mfd);
+static int mdss_mdp_ad_ipc_reset(struct msm_fb_data_type *mfd);
+static int pp_get_driver_ops(struct mdp_pp_driver_ops *ops);
+static int pp_ppb_setup(struct mdss_mdp_mixer *mixer);
+
+static u32 last_sts, last_state;
+
+static inline void mdss_mdp_pp_get_dcm_state(struct mdss_mdp_pipe *pipe,
+ u32 *dcm_state)
+{
+ if (pipe && pipe->mixer_left && pipe->mixer_left->ctl &&
+ pipe->mixer_left->ctl->mfd)
+ *dcm_state = pipe->mixer_left->ctl->mfd->dcm_state;
+}
+
+inline int linear_map(int in, int *out, int in_max, int out_max)
+{
+ if (in < 0 || !out || in_max <= 0 || out_max <= 0)
+ return -EINVAL;
+ *out = ((2 * (in * out_max) + in_max) / (2 * in_max));
+ pr_debug("in = %d, out = %d, in_max = %d, out_max = %d\n",
+ in, *out, in_max, out_max);
+ if ((in > 0) && (*out == 0))
+ *out = 1;
+ return 0;
+
+}
+
+/**
+ * __get_hist_pipe() - get a pipe only if histogram is supported on it
+ * @pnum: pipe number desired
+ *
+ * returns the pipe with id only if the pipe supports sspp histogram
+ */
+static inline struct mdss_mdp_pipe *__get_hist_pipe(int pnum)
+{
+ enum mdss_mdp_pipe_type ptype;
+
+ ptype = get_pipe_type_from_num(pnum);
+
+ /* only VIG pipes support histogram */
+ if (ptype != MDSS_MDP_PIPE_TYPE_VIG)
+ return NULL;
+
+ return mdss_mdp_pipe_get(BIT(pnum), MDSS_MDP_PIPE_RECT0);
+}
+
+int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, struct mdp_csc_cfg *data)
+{
+ int i, ret = 0;
+ char __iomem *base, *addr;
+ u32 val = 0, lv_shift = 0;
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_mdp_cdm *cdm;
+ struct mdss_mdp_writeback *wb;
+
+ if (data == NULL) {
+ pr_err("no csc matrix specified\n");
+ return -EINVAL;
+ }
+
+ mdata = mdss_mdp_get_mdata();
+ switch (block) {
+ case MDSS_MDP_BLOCK_SSPP:
+ lv_shift = CSC_8BIT_LV_SHIFT;
+ /*
+ * CSC is used on VIG pipes and currently VIG pipes do not
+ * support multirect so always use RECT0.
+ */
+ pipe = mdss_mdp_pipe_search(mdata, BIT(blk_idx),
+ MDSS_MDP_PIPE_RECT0);
+ if (!pipe) {
+ pr_err("invalid blk index=%d\n", blk_idx);
+ ret = -EINVAL;
+ break;
+ }
+ if (mdss_mdp_pipe_is_yuv(pipe)) {
+ base = pipe->base + MDSS_MDP_REG_VIG_CSC_1_BASE;
+ } else {
+ pr_err("non ViG pipe %d for CSC is not allowed\n",
+ blk_idx);
+ ret = -EINVAL;
+ }
+ break;
+ case MDSS_MDP_BLOCK_WB:
+ lv_shift = CSC_8BIT_LV_SHIFT;
+ if (blk_idx < mdata->nwb) {
+ wb = mdata->wb + blk_idx;
+ if (wb->base)
+ base = wb->base + MDSS_MDP_REG_WB_CSC_BASE;
+ else
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case MDSS_MDP_BLOCK_CDM:
+ lv_shift = CSC_10BIT_LV_SHIFT;
+ if (blk_idx < mdata->ncdm) {
+ cdm = mdata->cdm_off + blk_idx;
+ if (cdm->base)
+ base = cdm->base +
+ MDSS_MDP_REG_CDM_CSC_10_BASE;
+ else
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case MDSS_MDP_BLOCK_SSPP_10:
+ lv_shift = CSC_10BIT_LV_SHIFT;
+
+ /* CSC can be applied only on VIG which RECT0 only */
+ pipe = mdss_mdp_pipe_search(mdata, BIT(blk_idx),
+ MDSS_MDP_PIPE_RECT0);
+ if (!pipe) {
+ pr_err("invalid blk index=%d\n", blk_idx);
+ ret = -EINVAL;
+ break;
+ }
+ if (mdss_mdp_pipe_is_yuv(pipe)) {
+ base = pipe->base + MDSS_MDP_REG_VIG_CSC_10_BASE;
+ } else {
+ pr_err("non ViG pipe %d for CSC is not allowed\n",
+ blk_idx);
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret != 0) {
+ pr_err("unsupported block id %d for csc\n", blk_idx);
+ return ret;
+ }
+
+ addr = base + CSC_MV_OFF;
+ for (i = 0; i < 9; i++) {
+ if (i & 0x1) {
+ val |= data->csc_mv[i] << 16;
+ writel_relaxed(val, addr);
+ addr += sizeof(u32);
+ } else {
+ val = data->csc_mv[i];
+ }
+ }
+ writel_relaxed(val, addr); /* COEFF_33 */
+
+ addr = base + CSC_BV_OFF;
+ for (i = 0; i < 3; i++) {
+ writel_relaxed(data->csc_pre_bv[i], addr);
+ writel_relaxed(data->csc_post_bv[i], addr + CSC_POST_OFF);
+ addr += sizeof(u32);
+ }
+
+ addr = base + CSC_LV_OFF;
+ for (i = 0; i < 6; i += 2) {
+ val = (data->csc_pre_lv[i] << lv_shift) | data->csc_pre_lv[i+1];
+ writel_relaxed(val, addr);
+
+ val = (data->csc_post_lv[i] << lv_shift) |
+ data->csc_post_lv[i+1];
+ writel_relaxed(val, addr + CSC_POST_OFF);
+ addr += sizeof(u32);
+ }
+
+ return ret;
+}
+
+int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 csc_type)
+{
+ struct mdp_csc_cfg *data;
+
+ if (csc_type >= MDSS_MDP_MAX_CSC) {
+ pr_err("invalid csc matrix index %d\n", csc_type);
+ return -ERANGE;
+ }
+
+ pr_debug("csc type=%d blk=%d idx=%d\n", csc_type,
+ block, blk_idx);
+
+ if (block == MDSS_MDP_BLOCK_CDM || block == MDSS_MDP_BLOCK_SSPP_10)
+ data = &mdp_csc_10bit_convert[csc_type];
+ else
+ data = &mdp_csc_8bit_convert[csc_type];
+ return mdss_mdp_csc_setup_data(block, blk_idx, data);
+}
+
+static void pp_gamut_config(struct mdp_gamut_cfg_data *gamut_cfg,
+ char __iomem *base, struct pp_sts_type *pp_sts)
+{
+ char __iomem *addr;
+ int i, j;
+
+ if (gamut_cfg->flags & MDP_PP_OPS_WRITE) {
+ addr = base + MDSS_MDP_REG_DSPP_GAMUT_BASE;
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
+ writel_relaxed((u32)gamut_cfg->r_tbl[i][j]
+ & 0x1FFF, addr);
+ addr += 4;
+ }
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
+ writel_relaxed((u32)gamut_cfg->g_tbl[i][j]
+ & 0x1FFF, addr);
+ addr += 4;
+ }
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
+ writel_relaxed((u32)gamut_cfg->b_tbl[i][j]
+ & 0x1FFF, addr);
+ addr += 4;
+ }
+ if (gamut_cfg->gamut_first)
+ pp_sts->gamut_sts |= PP_STS_GAMUT_FIRST;
+ }
+
+ if (gamut_cfg->flags & MDP_PP_OPS_DISABLE)
+ pp_sts->gamut_sts &= ~PP_STS_ENABLE;
+ else if (gamut_cfg->flags & MDP_PP_OPS_ENABLE)
+ pp_sts->gamut_sts |= PP_STS_ENABLE;
+ pp_sts_set_split_bits(&pp_sts->gamut_sts, gamut_cfg->flags);
+}
+
+static void pp_pa_config(unsigned long flags, char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_pa_cfg *pa_config)
+{
+ if (flags & PP_FLAGS_DIRTY_PA) {
+ if (pa_config->flags & MDP_PP_OPS_WRITE) {
+ writel_relaxed(pa_config->hue_adj, addr);
+ addr += 4;
+ writel_relaxed(pa_config->sat_adj, addr);
+ addr += 4;
+ writel_relaxed(pa_config->val_adj, addr);
+ addr += 4;
+ writel_relaxed(pa_config->cont_adj, addr);
+ }
+ if (pa_config->flags & MDP_PP_OPS_DISABLE)
+ pp_sts->pa_sts &= ~PP_STS_ENABLE;
+ else if (pa_config->flags & MDP_PP_OPS_ENABLE)
+ pp_sts->pa_sts |= PP_STS_ENABLE;
+ }
+}
+
+static void pp_pa_v2_config(unsigned long flags, char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_pa_v2_data *pa_v2_config,
+ int mdp_location)
+{
+ if ((flags & PP_FLAGS_DIRTY_PA) &&
+ (pa_v2_config->flags & MDP_PP_OPS_WRITE)) {
+ pp_update_pa_v2_global_adj_regs(addr,
+ pa_v2_config);
+ /* Update PA DSPP Regs */
+ if (mdp_location == PP_DSPP) {
+ addr += 0x10;
+ pp_update_pa_v2_six_zone_regs(addr, pa_v2_config);
+ addr += 0xC;
+ pp_update_pa_v2_mem_col(addr, pa_v2_config);
+ } else if (mdp_location == PP_SSPP) { /* Update PA SSPP Regs */
+ addr -= MDSS_MDP_REG_VIG_PA_BASE;
+ addr += MDSS_MDP_REG_VIG_MEM_COL_BASE;
+ pp_update_pa_v2_mem_col(addr, pa_v2_config);
+ }
+ pp_update_pa_v2_sts(pp_sts, pa_v2_config);
+ }
+}
+
+static void pp_update_pa_v2_global_adj_regs(char __iomem *addr,
+ struct mdp_pa_v2_data *pa_v2_config)
+{
+ if (pa_v2_config->flags & MDP_PP_PA_HUE_ENABLE)
+ writel_relaxed(pa_v2_config->global_hue_adj, addr);
+ addr += 4;
+ if (pa_v2_config->flags & MDP_PP_PA_SAT_ENABLE)
+ /* Sat Global Adjust reg includes Sat Threshold */
+ writel_relaxed(pa_v2_config->global_sat_adj, addr);
+ addr += 4;
+ if (pa_v2_config->flags & MDP_PP_PA_VAL_ENABLE)
+ writel_relaxed(pa_v2_config->global_val_adj, addr);
+ addr += 4;
+ if (pa_v2_config->flags & MDP_PP_PA_CONT_ENABLE)
+ writel_relaxed(pa_v2_config->global_cont_adj, addr);
+}
+
+static void pp_update_pa_v2_mem_col(char __iomem *addr,
+ struct mdp_pa_v2_data *pa_v2_config)
+{
+ /* Update skin zone memory color registers */
+ if (pa_v2_config->flags & MDP_PP_PA_SKIN_ENABLE)
+ pp_update_pa_v2_mem_col_regs(addr, &pa_v2_config->skin_cfg);
+ addr += 0x14;
+ /* Update sky zone memory color registers */
+ if (pa_v2_config->flags & MDP_PP_PA_SKY_ENABLE)
+ pp_update_pa_v2_mem_col_regs(addr, &pa_v2_config->sky_cfg);
+ addr += 0x14;
+ /* Update foliage zone memory color registers */
+ if (pa_v2_config->flags & MDP_PP_PA_FOL_ENABLE)
+ pp_update_pa_v2_mem_col_regs(addr, &pa_v2_config->fol_cfg);
+}
+
+static void pp_update_pa_v2_mem_col_regs(char __iomem *addr,
+ struct mdp_pa_mem_col_cfg *cfg)
+{
+ writel_relaxed(cfg->color_adjust_p0, addr);
+ addr += 4;
+ writel_relaxed(cfg->color_adjust_p1, addr);
+ addr += 4;
+ writel_relaxed(cfg->hue_region, addr);
+ addr += 4;
+ writel_relaxed(cfg->sat_region, addr);
+ addr += 4;
+ writel_relaxed(cfg->val_region, addr);
+}
+
+static void pp_update_pa_v2_six_zone_regs(char __iomem *addr,
+ struct mdp_pa_v2_data *pa_v2_config)
+{
+ int i;
+ u32 data;
+ /* Update six zone memory color registers */
+ if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
+ addr += 4;
+ writel_relaxed(pa_v2_config->six_zone_curve_p1[0], addr);
+ addr -= 4;
+ /* Index Update to trigger auto-incrementing LUT accesses */
+ data = (1 << 26);
+ writel_relaxed((pa_v2_config->six_zone_curve_p0[0] & 0xFFF) |
+ data, addr);
+
+ /* Remove Index Update */
+ for (i = 1; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+ addr += 4;
+ writel_relaxed(pa_v2_config->six_zone_curve_p1[i],
+ addr);
+ addr -= 4;
+ writel_relaxed(pa_v2_config->six_zone_curve_p0[i] &
+ 0xFFF, addr);
+ }
+ addr += 8;
+ writel_relaxed(pa_v2_config->six_zone_thresh, addr);
+ }
+}
+
+static void pp_update_pa_v2_sts(struct pp_sts_type *pp_sts,
+ struct mdp_pa_v2_data *pa_v2_config)
+{
+ pp_sts->pa_sts = 0;
+ /* PA STS update */
+ if (pa_v2_config->flags & MDP_PP_OPS_ENABLE)
+ pp_sts->pa_sts |= PP_STS_ENABLE;
+ else
+ pp_sts->pa_sts &= ~PP_STS_ENABLE;
+
+ /* Global HSV STS update */
+ if (pa_v2_config->flags & MDP_PP_PA_HUE_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_HUE_MASK;
+ if (pa_v2_config->flags & MDP_PP_PA_SAT_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SAT_MASK;
+ if (pa_v2_config->flags & MDP_PP_PA_VAL_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_VAL_MASK;
+ if (pa_v2_config->flags & MDP_PP_PA_CONT_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_CONT_MASK;
+ if (pa_v2_config->flags & MDP_PP_PA_MEM_PROTECT_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROTECT_EN;
+ if (pa_v2_config->flags & MDP_PP_PA_SAT_ZERO_EXP_EN)
+ pp_sts->pa_sts |= PP_STS_PA_SAT_ZERO_EXP_EN;
+
+ /* Memory Color STS update */
+ if (pa_v2_config->flags & MDP_PP_PA_MEM_COL_SKIN_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKIN_MASK;
+ if (pa_v2_config->flags & MDP_PP_PA_MEM_COL_SKY_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKY_MASK;
+ if (pa_v2_config->flags & MDP_PP_PA_MEM_COL_FOL_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_COL_FOL_MASK;
+
+ /* Six Zone STS update */
+ if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_HUE_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_HUE_MASK;
+ if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_SAT_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_SAT_MASK;
+ if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_VAL_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_VAL_MASK;
+
+ pp_sts_set_split_bits(&pp_sts->pa_sts, pa_v2_config->flags);
+}
+
+static void pp_pcc_config(unsigned long flags, char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_pcc_cfg_data *pcc_config)
+{
+ if (flags & PP_FLAGS_DIRTY_PCC) {
+ if (pcc_config->ops & MDP_PP_OPS_WRITE)
+ pp_update_pcc_regs(addr, pcc_config);
+
+ if (pcc_config->ops & MDP_PP_OPS_DISABLE)
+ pp_sts->pcc_sts &= ~PP_STS_ENABLE;
+ else if (pcc_config->ops & MDP_PP_OPS_ENABLE)
+ pp_sts->pcc_sts |= PP_STS_ENABLE;
+ pp_sts_set_split_bits(&pp_sts->pcc_sts, pcc_config->ops);
+ }
+}
+
+static void pp_igc_config(unsigned long flags, char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_igc_lut_data *igc_config,
+ u32 pipe_num, u32 pipe_cnt)
+{
+ u32 tbl_idx;
+
+ if (igc_config->ops & MDP_PP_OPS_WRITE)
+ pp_update_igc_lut(igc_config, addr, pipe_num,
+ pipe_cnt);
+
+ if (igc_config->ops & MDP_PP_IGC_FLAG_ROM0) {
+ pp_sts->igc_sts |= PP_STS_ENABLE;
+ tbl_idx = 1;
+ } else if (igc_config->ops & MDP_PP_IGC_FLAG_ROM1) {
+ pp_sts->igc_sts |= PP_STS_ENABLE;
+ tbl_idx = 2;
+ } else {
+ tbl_idx = 0;
+ }
+ pp_sts->igc_tbl_idx = tbl_idx;
+ if (igc_config->ops & MDP_PP_OPS_DISABLE)
+ pp_sts->igc_sts &= ~PP_STS_ENABLE;
+ else if (igc_config->ops & MDP_PP_OPS_ENABLE)
+ pp_sts->igc_sts |= PP_STS_ENABLE;
+ pp_sts_set_split_bits(&pp_sts->igc_sts, igc_config->ops);
+}
+
+static void pp_enhist_config(unsigned long flags, char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_hist_lut_data *enhist_cfg)
+{
+ if (flags & PP_FLAGS_DIRTY_ENHIST) {
+ if (enhist_cfg->ops & MDP_PP_OPS_WRITE)
+ pp_update_hist_lut(addr, enhist_cfg);
+
+ if (enhist_cfg->ops & MDP_PP_OPS_DISABLE)
+ pp_sts->enhist_sts &= ~PP_STS_ENABLE;
+ else if (enhist_cfg->ops & MDP_PP_OPS_ENABLE)
+ pp_sts->enhist_sts |= PP_STS_ENABLE;
+ }
+}
+
+/*the below function doesn't do error checking on the input params*/
+static void pp_sharp_config(char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_sharp_cfg *sharp_config)
+{
+ if (sharp_config->flags & MDP_PP_OPS_WRITE) {
+ writel_relaxed(sharp_config->strength, addr);
+ addr += 4;
+ writel_relaxed(sharp_config->edge_thr, addr);
+ addr += 4;
+ writel_relaxed(sharp_config->smooth_thr, addr);
+ addr += 4;
+ writel_relaxed(sharp_config->noise_thr, addr);
+ }
+ if (sharp_config->flags & MDP_PP_OPS_DISABLE)
+ pp_sts->sharp_sts &= ~PP_STS_ENABLE;
+ else if (sharp_config->flags & MDP_PP_OPS_ENABLE)
+ pp_sts->sharp_sts |= PP_STS_ENABLE;
+
+}
+
+static void pp_vig_pipe_opmode_config(struct pp_sts_type *pp_sts, u32 *opmode)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if ((mdata->mdp_rev < MDSS_MDP_HW_REV_103) &&
+ (pp_sts->pa_sts & PP_STS_ENABLE))
+ *opmode |= MDSS_MDP_VIG_OP_PA_EN;
+ else if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103)
+ pp_update_pa_v2_vig_opmode(pp_sts,
+ opmode);
+
+ if (pp_sts->enhist_sts & PP_STS_ENABLE)
+ /* Enable HistLUT and PA */
+ *opmode |= MDSS_MDP_VIG_OP_HIST_LUTV_EN |
+ MDSS_MDP_VIG_OP_PA_EN;
+}
+
+static int pp_vig_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+ unsigned long flags = 0;
+ char __iomem *offset;
+ struct mdss_data_type *mdata;
+ u32 dcm_state = DCM_UNINIT, current_opmode, csc_reset;
+ int ret = 0;
+ u32 csc_op;
+
+ pr_debug("pnum=%x\n", pipe->num);
+
+ mdss_mdp_pp_get_dcm_state(pipe, &dcm_state);
+
+ mdata = mdss_mdp_get_mdata();
+ if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_301) ||
+ IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_300)) {
+ if (pipe->src_fmt->is_yuv) {
+ /* TODO: check csc cfg from PP block */
+ mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP_10, pipe->num,
+ pp_vig_csc_pipe_val(pipe));
+ csc_op = ((0 << 2) | /* DST_DATA=RGB */
+ (1 << 1) | /* SRC_DATA=YCBCR*/
+ (1 << 0)); /* CSC_10_EN */
+ } else {
+ csc_op = 0; /* CSC_10_DISABLE */
+ }
+ writel_relaxed(csc_op, pipe->base +
+ MDSS_MDP_REG_VIG_CSC_10_OP_MODE);
+ } else if ((pipe->flags & MDP_OVERLAY_PP_CFG_EN) &&
+ (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG)) {
+ *op |= !!(pipe->pp_cfg.csc_cfg.flags &
+ MDP_CSC_FLAG_ENABLE) << 17;
+ *op |= !!(pipe->pp_cfg.csc_cfg.flags &
+ MDP_CSC_FLAG_YUV_IN) << 18;
+ *op |= !!(pipe->pp_cfg.csc_cfg.flags &
+ MDP_CSC_FLAG_YUV_OUT) << 19;
+ /*
+ * TODO: Allow pipe to be programmed whenever new CSC is
+ * applied (i.e. dirty bit)
+ */
+ mdss_mdp_csc_setup_data(MDSS_MDP_BLOCK_SSPP, pipe->num,
+ &pipe->pp_cfg.csc_cfg);
+ } else if (pipe->src_fmt->is_yuv) {
+ *op |= (0 << 19) | /* DST_DATA=RGB */
+ (1 << 18) | /* SRC_DATA=YCBCR */
+ (1 << 17); /* CSC_1_EN */
+ /*
+ * TODO: Needs to be part of dirty bit logic: if there
+ * is a previously configured pipe need to re-configure
+ * CSC matrix
+ */
+ mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP, pipe->num,
+ pp_vig_csc_pipe_val(pipe));
+ }
+
+ /* Update CSC state only if tuning mode is enable */
+ if (dcm_state == DTM_ENTER) {
+ /* Reset bit 16 to 19 for CSC_STATE in VIG_OP_MODE */
+ csc_reset = 0xFFF0FFFF;
+ current_opmode = readl_relaxed(pipe->base +
+ MDSS_MDP_REG_VIG_OP_MODE);
+ *op |= (current_opmode & csc_reset);
+ return 0;
+ }
+
+ /* Histogram collection enabled checked inside pp_hist_setup */
+ pp_hist_setup(op, MDSS_PP_SSPP_CFG | pipe->num, pipe->mixer_left,
+ &pipe->pp_res.pp_sts);
+
+ if (!(pipe->flags & MDP_OVERLAY_PP_CFG_EN)) {
+ pr_debug("Overlay PP CFG enable not set\n");
+ return 0;
+ }
+
+ if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_CFG) &&
+ (mdata->mdp_rev < MDSS_MDP_HW_REV_103)) {
+ flags = PP_FLAGS_DIRTY_PA;
+ pp_pa_config(flags,
+ pipe->base + MDSS_MDP_REG_VIG_PA_BASE,
+ &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.pa_cfg);
+ }
+ if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_V2_CFG) &&
+ (mdata->mdp_rev >= MDSS_MDP_HW_REV_103)) {
+ flags = PP_FLAGS_DIRTY_PA;
+ if (!pp_ops[PA].pp_set_config)
+ pp_pa_v2_config(flags,
+ pipe->base + MDSS_MDP_REG_VIG_PA_BASE,
+ &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.pa_v2_cfg,
+ PP_SSPP);
+ else
+ pp_ops[PA].pp_set_config(pipe->base,
+ &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.pa_v2_cfg_data,
+ SSPP_VIG);
+ }
+
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) {
+ flags = PP_FLAGS_DIRTY_ENHIST;
+ if (!pp_ops[HIST_LUT].pp_set_config) {
+ pp_enhist_config(flags,
+ pipe->base + MDSS_MDP_REG_VIG_HIST_LUT_BASE,
+ &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.hist_lut_cfg);
+ if ((pipe->pp_res.pp_sts.enhist_sts & PP_STS_ENABLE) &&
+ !(pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)) {
+ /* Program default value */
+ offset = pipe->base + MDSS_MDP_REG_VIG_PA_BASE;
+ writel_relaxed(0, offset);
+ writel_relaxed(0, offset + 4);
+ writel_relaxed(0, offset + 8);
+ writel_relaxed(0, offset + 12);
+ }
+ } else {
+ pp_ops[HIST_LUT].pp_set_config(pipe->base,
+ &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.hist_lut_cfg,
+ SSPP_VIG);
+ }
+ }
+
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PCC_CFG) {
+ ret = pp_update_pcc_pipe_setup(pipe, SSPP_VIG);
+ if (ret)
+ pr_err("error in enabling the pcc ret %d pipe type %d pipe num %d\n",
+ ret, pipe->type, pipe->num);
+ }
+ if (pp_driver_ops.pp_opmode_config)
+ pp_driver_ops.pp_opmode_config(SSPP_VIG, &pipe->pp_res.pp_sts,
+ op, 0);
+ else
+ pp_vig_pipe_opmode_config(&pipe->pp_res.pp_sts, op);
+
+ return 0;
+}
+
+static void pp_update_pa_v2_vig_opmode(struct pp_sts_type *pp_sts,
+ u32 *opmode)
+{
+ if (pp_sts->pa_sts & PP_STS_ENABLE)
+ *opmode |= MDSS_MDP_VIG_OP_PA_EN;
+ if (pp_sts->pa_sts & PP_STS_PA_HUE_MASK)
+ *opmode |= MDSS_MDP_VIG_OP_PA_HUE_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_SAT_MASK)
+ *opmode |= MDSS_MDP_VIG_OP_PA_SAT_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_VAL_MASK)
+ *opmode |= MDSS_MDP_VIG_OP_PA_VAL_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_CONT_MASK)
+ *opmode |= MDSS_MDP_VIG_OP_PA_CONT_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROTECT_EN)
+ *opmode |= MDSS_MDP_VIG_OP_PA_MEM_PROTECT_EN;
+ if (pp_sts->pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+ *opmode |= MDSS_MDP_VIG_OP_PA_SAT_ZERO_EXP_EN;
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+ *opmode |= MDSS_MDP_VIG_OP_PA_MEM_COL_SKIN_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+ *opmode |= MDSS_MDP_VIG_OP_PA_MEM_COL_SKY_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+ *opmode |= MDSS_MDP_VIG_OP_PA_MEM_COL_FOL_MASK;
+}
+
+static int pp_rgb_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+ int ret = 0;
+
+ if (!pipe) {
+ pr_err("invalid param pipe %pK\n", pipe);
+ return -EINVAL;
+ }
+ if (pipe->flags & MDP_OVERLAY_PP_CFG_EN &&
+ pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PCC_CFG) {
+ ret = pp_update_pcc_pipe_setup(pipe, SSPP_RGB);
+ if (ret)
+ pr_err("error in enabling the pcc ret %d pipe type %d pipe num %d\n",
+ ret, pipe->type, pipe->num);
+ }
+ return 0;
+}
+
+static int pp_dma_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+ int ret = 0;
+
+ if (!pipe) {
+ pr_err("invalid param pipe %pK\n", pipe);
+ return -EINVAL;
+ }
+ if (pipe->flags & MDP_OVERLAY_PP_CFG_EN &&
+ pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PCC_CFG) {
+ ret = pp_update_pcc_pipe_setup(pipe, SSPP_DMA);
+ if (ret)
+ pr_err("error in enabling the pcc ret %d pipe type %d pipe num %d\n",
+ ret, pipe->type, pipe->num);
+ }
+ return 0;
+}
+
+static int mdss_mdp_qseed2_setup(struct mdss_mdp_pipe *pipe)
+{
+ u32 scale_config = 0;
+ int init_phasex = 0, init_phasey = 0;
+ int phasex_step = 0, phasey_step = 0;
+ u32 chroma_sample;
+ u32 filter_mode;
+ struct mdss_data_type *mdata;
+ u32 src_w, src_h;
+ u32 dcm_state = DCM_UNINIT;
+ u32 chroma_shift_x = 0, chroma_shift_y = 0;
+
+ pr_debug("pipe=%d, change pxl ext=%d\n", pipe->num,
+ pipe->scaler.enable);
+ mdata = mdss_mdp_get_mdata();
+
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA ||
+ pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR) {
+ if (pipe->dst.h != pipe->src.h || pipe->dst.w != pipe->src.w) {
+ pr_err("no scaling supported on dma/cursor pipe, num:%d\n",
+ pipe->num);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ }
+
+ mdss_mdp_pp_get_dcm_state(pipe, &dcm_state);
+
+ if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102 && pipe->src_fmt->is_yuv)
+ filter_mode = MDSS_MDP_SCALE_FILTER_CA;
+ else
+ filter_mode = MDSS_MDP_SCALE_FILTER_BIL;
+
+ src_w = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
+ src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+
+ chroma_sample = pipe->src_fmt->chroma_sample;
+ if (pipe->flags & MDP_SOURCE_ROTATED_90) {
+ if (chroma_sample == MDSS_MDP_CHROMA_H1V2)
+ chroma_sample = MDSS_MDP_CHROMA_H2V1;
+ else if (chroma_sample == MDSS_MDP_CHROMA_H2V1)
+ chroma_sample = MDSS_MDP_CHROMA_H1V2;
+ }
+
+ if (!(pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_SHARP_CFG)) {
+ pipe->pp_cfg.sharp_cfg.flags = MDP_PP_OPS_ENABLE |
+ MDP_PP_OPS_WRITE;
+ pipe->pp_cfg.sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
+ pipe->pp_cfg.sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
+ pipe->pp_cfg.sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
+ pipe->pp_cfg.sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
+ }
+
+ if (dcm_state != DTM_ENTER &&
+ ((pipe->src_fmt->is_yuv) &&
+ !((pipe->dst.w < src_w) || (pipe->dst.h < src_h)))) {
+ pp_sharp_config(pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_SHARP,
+ &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.sharp_cfg);
+ }
+
+ if ((src_h != pipe->dst.h) ||
+ (pipe->src_fmt->is_yuv &&
+ (pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE)) ||
+ (chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (chroma_sample == MDSS_MDP_CHROMA_H1V2) ||
+ (pipe->scaler.enable && (src_h != pipe->dst.h))) {
+ pr_debug("scale y - src_h=%d dst_h=%d\n", src_h, pipe->dst.h);
+
+ if ((src_h / MAX_DOWNSCALE_RATIO) > pipe->dst.h) {
+ pr_err("too much downscaling height=%d->%d\n",
+ src_h, pipe->dst.h);
+ return -EINVAL;
+ }
+
+ scale_config |= MDSS_MDP_SCALEY_EN;
+ phasey_step = pipe->scaler.phase_step_y[0];
+ init_phasey = pipe->scaler.init_phase_y[0];
+
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+ if (!pipe->vert_deci &&
+ ((chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (chroma_sample == MDSS_MDP_CHROMA_H1V2)))
+ chroma_shift_y = 1; /* 2x upsample chroma */
+
+ if (src_h <= pipe->dst.h)
+ scale_config |= /* G/Y, A */
+ (filter_mode << 10) |
+ (MDSS_MDP_SCALE_FILTER_BIL << 18);
+ else
+ scale_config |= /* G/Y, A */
+ (MDSS_MDP_SCALE_FILTER_PCMN << 10) |
+ (MDSS_MDP_SCALE_FILTER_PCMN << 18);
+
+ if ((src_h >> chroma_shift_y) <= pipe->dst.h)
+ scale_config |= /* CrCb */
+ (MDSS_MDP_SCALE_FILTER_BIL << 14);
+ else
+ scale_config |= /* CrCb */
+ (MDSS_MDP_SCALE_FILTER_PCMN << 14);
+
+ writel_relaxed(init_phasey, pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
+ writel_relaxed(phasey_step >> chroma_shift_y,
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
+ } else {
+ if (src_h <= pipe->dst.h)
+ scale_config |= /* RGB, A */
+ (MDSS_MDP_SCALE_FILTER_BIL << 10) |
+ (MDSS_MDP_SCALE_FILTER_BIL << 18);
+ else
+ scale_config |= /* RGB, A */
+ (MDSS_MDP_SCALE_FILTER_PCMN << 10) |
+ (MDSS_MDP_SCALE_FILTER_PCMN << 18);
+ }
+ }
+
+ if ((src_w != pipe->dst.w) ||
+ (pipe->src_fmt->is_yuv &&
+ (pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE)) ||
+ (chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (chroma_sample == MDSS_MDP_CHROMA_H2V1) ||
+ (pipe->scaler.enable && (src_w != pipe->dst.w))) {
+ pr_debug("scale x - src_w=%d dst_w=%d\n", src_w, pipe->dst.w);
+
+ if ((src_w / MAX_DOWNSCALE_RATIO) > pipe->dst.w) {
+ pr_err("too much downscaling width=%d->%d\n",
+ src_w, pipe->dst.w);
+ return -EINVAL;
+ }
+
+ scale_config |= MDSS_MDP_SCALEX_EN;
+ init_phasex = pipe->scaler.init_phase_x[0];
+ phasex_step = pipe->scaler.phase_step_x[0];
+
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+ if (!pipe->horz_deci &&
+ ((chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (chroma_sample == MDSS_MDP_CHROMA_H2V1)))
+ chroma_shift_x = 1; /* 2x upsample chroma */
+
+ if (src_w <= pipe->dst.w)
+ scale_config |= /* G/Y, A */
+ (filter_mode << 8) |
+ (MDSS_MDP_SCALE_FILTER_BIL << 16);
+ else
+ scale_config |= /* G/Y, A */
+ (MDSS_MDP_SCALE_FILTER_PCMN << 8) |
+ (MDSS_MDP_SCALE_FILTER_PCMN << 16);
+
+ if ((src_w >> chroma_shift_x) <= pipe->dst.w)
+ scale_config |= /* CrCb */
+ (MDSS_MDP_SCALE_FILTER_BIL << 12);
+ else
+ scale_config |= /* CrCb */
+ (MDSS_MDP_SCALE_FILTER_PCMN << 12);
+
+ writel_relaxed(init_phasex, pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
+ writel_relaxed(phasex_step >> chroma_shift_x,
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
+ } else {
+ if (src_w <= pipe->dst.w)
+ scale_config |= /* RGB, A */
+ (MDSS_MDP_SCALE_FILTER_BIL << 8) |
+ (MDSS_MDP_SCALE_FILTER_BIL << 16);
+ else
+ scale_config |= /* RGB, A */
+ (MDSS_MDP_SCALE_FILTER_PCMN << 8) |
+ (MDSS_MDP_SCALE_FILTER_PCMN << 16);
+ }
+ }
+
+ if (pipe->scaler.enable) {
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+ /*program x,y initial phase and phase step*/
+ writel_relaxed(pipe->scaler.init_phase_x[0],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX);
+ writel_relaxed(pipe->scaler.phase_step_x[0],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX);
+ writel_relaxed(pipe->scaler.init_phase_x[1],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
+ writel_relaxed(pipe->scaler.phase_step_x[1],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
+
+ writel_relaxed(pipe->scaler.init_phase_y[0],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY);
+ writel_relaxed(pipe->scaler.phase_step_y[0],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY);
+ writel_relaxed(pipe->scaler.init_phase_y[1],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
+ writel_relaxed(pipe->scaler.phase_step_y[1],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
+ } else {
+
+ writel_relaxed(pipe->scaler.phase_step_x[0],
+ pipe->base +
+ MDSS_MDP_REG_SCALE_PHASE_STEP_X);
+ writel_relaxed(pipe->scaler.phase_step_y[0],
+ pipe->base +
+ MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
+ writel_relaxed(pipe->scaler.init_phase_x[0],
+ pipe->base +
+ MDSS_MDP_REG_SCALE_INIT_PHASE_X);
+ writel_relaxed(pipe->scaler.init_phase_y[0],
+ pipe->base +
+ MDSS_MDP_REG_SCALE_INIT_PHASE_Y);
+ }
+ } else {
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+ /*program x,y initial phase and phase step*/
+ writel_relaxed(0,
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX);
+ writel_relaxed(init_phasex,
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
+ writel_relaxed(phasex_step,
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX);
+ writel_relaxed(phasex_step >> chroma_shift_x,
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
+
+ writel_relaxed(0,
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY);
+ writel_relaxed(init_phasey,
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
+ writel_relaxed(phasey_step,
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY);
+ writel_relaxed(phasey_step >> chroma_shift_y,
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
+ } else {
+
+ writel_relaxed(phasex_step,
+ pipe->base +
+ MDSS_MDP_REG_SCALE_PHASE_STEP_X);
+ writel_relaxed(phasey_step,
+ pipe->base +
+ MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
+ writel_relaxed(0,
+ pipe->base +
+ MDSS_MDP_REG_SCALE_INIT_PHASE_X);
+ writel_relaxed(0,
+ pipe->base +
+ MDSS_MDP_REG_SCALE_INIT_PHASE_Y);
+ }
+ }
+
+ writel_relaxed(scale_config, pipe->base +
+ MDSS_MDP_REG_SCALE_CONFIG);
+
+ return 0;
+}
+
+int mdss_mdp_scaler_lut_cfg(struct mdp_scale_data_v2 *scaler,
+ char __iomem *offset)
+{
+ int i, j, filter;
+ struct mdss_data_type *mdata;
+ char __iomem *lut_addr;
+ uint32_t *lut_type[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+ uint32_t lut_offset, lut_len;
+ struct mdss_mdp_qseed3_lut_tbl *lut_tbl;
+ /* for each filter, 4 lut regions offset and length table */
+ static uint32_t offset_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+ {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+ {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+ {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+ {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+ {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+ };
+
+ mdata = mdss_mdp_get_mdata();
+ lut_tbl = &mdata->scaler_off->lut_tbl;
+ if ((!lut_tbl) || (!lut_tbl->valid)) {
+ pr_err("%s:Invalid QSEED3 LUT TABLE\n", __func__);
+ return -EINVAL;
+ }
+ if ((scaler->lut_flag & SCALER_LUT_DIR_WR) ||
+ (scaler->lut_flag & SCALER_LUT_Y_CIR_WR) ||
+ (scaler->lut_flag & SCALER_LUT_UV_CIR_WR) ||
+ (scaler->lut_flag & SCALER_LUT_Y_SEP_WR) ||
+ (scaler->lut_flag & SCALER_LUT_UV_SEP_WR)) {
+
+ if (scaler->lut_flag & SCALER_LUT_DIR_WR)
+ lut_type[0] = lut_tbl->dir_lut;
+ if (scaler->lut_flag & SCALER_LUT_Y_CIR_WR)
+ lut_type[1] =
+ lut_tbl->cir_lut + scaler->y_rgb_cir_lut_idx *
+ CIR_LUT_COEFFS;
+ if (scaler->lut_flag & SCALER_LUT_UV_CIR_WR)
+ lut_type[2] = lut_tbl->cir_lut +
+ scaler->uv_cir_lut_idx * CIR_LUT_COEFFS;
+ if (scaler->lut_flag & SCALER_LUT_Y_SEP_WR)
+ lut_type[3] =
+ lut_tbl->sep_lut + scaler->y_rgb_sep_lut_idx *
+ SEP_LUT_COEFFS;
+ if (scaler->lut_flag & SCALER_LUT_UV_SEP_WR)
+ lut_type[4] =
+ lut_tbl->sep_lut + scaler->uv_sep_lut_idx *
+ SEP_LUT_COEFFS;
+
+ /* for each filter per plane */
+ for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+ if (!lut_type[filter])
+ continue;
+ lut_offset = 0;
+ /* for each lut region */
+ for (i = 0; i < 4; i++) {
+ lut_addr = offset +
+ offset_tbl[filter][i][1];
+ lut_len =
+ offset_tbl[filter][i][0] << 2;
+ for (j = 0; j < lut_len; j++) {
+ writel_relaxed(
+ (lut_type[filter])
+ [lut_offset++],
+ lut_addr);
+ lut_addr += 4;
+ }
+ }
+ }
+ }
+
+ if (scaler->lut_flag & SCALER_LUT_SWAP)
+ writel_relaxed(BIT(0), MDSS_MDP_REG_SCALER_COEF_LUT_CTRL +
+ offset);
+
+ return 0;
+}
+
+static void mdss_mdp_scaler_detail_enhance_cfg(
+ struct mdp_det_enhance_data *detail_en,
+ char __iomem *offset)
+{
+
+ uint32_t sharp_lvl, sharp_ctl, shape_ctl;
+ uint32_t de_thr;
+ uint32_t adjust_a, adjust_b, adjust_c;
+
+ if (detail_en->enable) {
+ sharp_lvl = (detail_en->sharpen_level1 & 0x1FF) |
+ ((detail_en->sharpen_level2 & 0x1FF) << 16);
+
+ sharp_ctl = ((detail_en->limit & 0xF) << 9) |
+ ((detail_en->prec_shift & 0x7) << 13) |
+ ((detail_en->clip & 0x7) << 16);
+
+ shape_ctl = (detail_en->thr_quiet & 0xFF) |
+ ((detail_en->thr_dieout & 0x3FF) << 16);
+
+ de_thr = (detail_en->thr_low & 0x3FF) |
+ ((detail_en->thr_high & 0x3FF) << 16);
+
+ adjust_a = (detail_en->adjust_a[0] & 0x3FF) |
+ ((detail_en->adjust_a[1] & 0x3FF) << 10) |
+ ((detail_en->adjust_a[2] & 0x3FF) << 20);
+
+ adjust_b = (detail_en->adjust_b[0] & 0x3FF) |
+ ((detail_en->adjust_b[1] & 0x3FF) << 10) |
+ ((detail_en->adjust_b[2] & 0x3FF) << 20);
+
+ adjust_c = (detail_en->adjust_c[0] & 0x3FF) |
+ ((detail_en->adjust_c[1] & 0x3FF) << 10) |
+ ((detail_en->adjust_c[2] & 0x3FF) << 20);
+
+ writel_relaxed(sharp_lvl, MDSS_MDP_REG_SCALER_DE_SHARPEN +
+ offset);
+ writel_relaxed(sharp_ctl, MDSS_MDP_REG_SCALER_DE_SHARPEN_CTL +
+ offset);
+ writel_relaxed(shape_ctl, MDSS_MDP_REG_SCALER_DE_SHAPE_CTL +
+ offset);
+ writel_relaxed(de_thr, MDSS_MDP_REG_SCALER_DE_THRESHOLD +
+ offset);
+ writel_relaxed(adjust_a, MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_0
+ + offset);
+ writel_relaxed(adjust_b, MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_1
+ + offset);
+ writel_relaxed(adjust_c, MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_2
+ + offset);
+ }
+}
+
+int mdss_mdp_qseed3_setup(struct mdss_mdp_pipe *pipe,
+ int location, int id)
+{
+ int rc = 0;
+ struct mdp_scale_data_v2 *scaler;
+ struct mdss_data_type *mdata;
+ char __iomem *offset, *lut_offset;
+ struct mdss_mdp_format_params *fmt;
+ uint32_t op_mode;
+ uint32_t phase_init, preload, src_y_rgb, src_uv, dst;
+
+ mdata = mdss_mdp_get_mdata();
+ /* SRC pipe QSEED3 Configuration */
+ if (location == SSPP_VIG) {
+ scaler = &pipe->scaler;
+ offset = pipe->base + mdata->scaler_off->vig_scaler_off;
+ lut_offset = pipe->base + mdata->scaler_off->vig_scaler_lut_off;
+ fmt = pipe->src_fmt;
+ } else if (location == DSPP) {
+ /* Destination scaler QSEED3 Configuration */
+ if ((mdata->scaler_off->has_dest_scaler) &&
+ (id < mdata->scaler_off->ndest_scalers)) {
+ /* TODO :point to the destination params */
+ scaler = NULL;
+ offset = mdata->scaler_off->dest_base +
+ mdata->scaler_off->dest_scaler_off[id];
+ lut_offset = mdata->scaler_off->dest_base +
+ mdata->scaler_off->dest_scaler_lut_off[id];
+ /*TODO : set pixel fmt to RGB101010 */
+ return -ENOTSUPP;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ pr_debug("scaler->enable=%d", scaler->enable);
+ op_mode = readl_relaxed(MDSS_MDP_REG_SCALER_OP_MODE +
+ offset);
+
+ if (scaler->enable) {
+ op_mode |= SCALER_EN;
+ op_mode |= (scaler->y_rgb_filter_cfg & 0x3) <<
+ Y_FILTER_CFG;
+
+ if (fmt->is_yuv) {
+ op_mode |= (1 << SCALER_COLOR_SPACE);
+ op_mode |= (scaler->uv_filter_cfg & 0x3) <<
+ UV_FILTER_CFG;
+ }
+
+ if (fmt->alpha_enable) {
+ op_mode |= SCALER_ALPHA_EN;
+ op_mode |= (scaler->alpha_filter_cfg & 1) <<
+ ALPHA_FILTER_CFG;
+ }
+
+ /* TODO:if src_fmt is 10 bits program the bitwidth
+ * accordingly
+ */
+ if (!fmt->unpack_dx_format)
+ op_mode |= 0x1 << SCALER_BIT_WIDTH;
+
+ op_mode |= (scaler->blend_cfg & 1) <<
+ SCALER_BLEND_CFG;
+
+ op_mode |= (scaler->enable & ENABLE_DIRECTION_DETECTION) ?
+ (1 << SCALER_DIR_EN) : 0;
+ phase_init =
+ ((scaler->init_phase_x[0] & PHASE_BITS)
+ << Y_PHASE_INIT_H) |
+ ((scaler->init_phase_y[0] & PHASE_BITS) <<
+ Y_PHASE_INIT_V) |
+ ((scaler->init_phase_x[1] & PHASE_BITS) <<
+ UV_PHASE_INIT_H) |
+ ((scaler->init_phase_y[1] & PHASE_BITS) <<
+ UV_PHASE_INIT_V);
+
+ preload =
+ ((scaler->preload_x[0] & PRELOAD_BITS)
+ << Y_PRELOAD_H) |
+ ((scaler->preload_y[0] & PRELOAD_BITS) <<
+ Y_PRELOAD_V) |
+ ((scaler->preload_x[1] & PRELOAD_BITS) <<
+ UV_PRELOAD_H) |
+ ((scaler->preload_y[1] & PRELOAD_BITS) <<
+ UV_PRELOAD_V);
+
+ src_y_rgb = (scaler->src_width[0] & 0x1FFFF) |
+ ((scaler->src_height[0] & 0x1FFFF) << 16);
+
+ src_uv = (scaler->src_width[1] & 0x1FFFF) |
+ ((scaler->src_height[1] & 0x1FFFF) << 16);
+
+ dst = (scaler->dst_width & 0x1FFFF) |
+ ((scaler->dst_height & 0x1FFFF) << 16);
+
+ if (scaler->detail_enhance.enable) {
+ mdss_mdp_scaler_detail_enhance_cfg(
+ &scaler->detail_enhance,
+ offset);
+ op_mode |= SCALER_DE_EN;
+ }
+
+ /* LUT Config */
+ if (scaler->lut_flag) {
+ rc = mdss_mdp_scaler_lut_cfg(scaler, lut_offset);
+ if (rc) {
+ pr_err("%s:Failed QSEED3 LUT cfg\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ writel_relaxed(phase_init,
+ MDSS_MDP_REG_SCALER_PHASE_INIT +
+ offset);
+ writel_relaxed(scaler->phase_step_x[0] &
+ PHASE_STEP_BITS,
+ MDSS_MDP_REG_SCALER_PHASE_STEP_Y_H +
+ offset);
+
+ writel_relaxed(scaler->phase_step_y[0] &
+ PHASE_STEP_BITS,
+ MDSS_MDP_REG_SCALER_PHASE_STEP_Y_V + offset);
+
+ writel_relaxed(scaler->phase_step_x[1] &
+ PHASE_STEP_BITS,
+ MDSS_MDP_REG_SCALER_PHASE_STEP_UV_H + offset);
+
+ writel_relaxed(scaler->phase_step_y[1] &
+ PHASE_STEP_BITS,
+ MDSS_MDP_REG_SCALER_PHASE_STEP_UV_V + offset);
+
+ writel_relaxed(preload, MDSS_MDP_REG_SCALER_PRELOAD +
+ offset);
+ writel_relaxed(src_y_rgb,
+ MDSS_MDP_REG_SCALER_SRC_SIZE_Y_RGB_A +
+ offset);
+ writel_relaxed(src_uv, MDSS_MDP_REG_SCALER_SRC_SIZE_UV
+ + offset);
+
+ writel_relaxed(dst, MDSS_MDP_REG_SCALER_DST_SIZE +
+ offset);
+ } else {
+ op_mode &= ~SCALER_EN;
+ }
+
+ writel_relaxed(op_mode, MDSS_MDP_REG_SCALER_OP_MODE +
+ offset);
+ return rc;
+}
+
+static int mdss_mdp_scale_setup(struct mdss_mdp_pipe *pipe)
+{
+ struct mdss_data_type *mdata;
+ int rc = 0;
+
+ mdata = mdss_mdp_get_mdata();
+ if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
+ rc = mdss_mdp_qseed3_setup(pipe, SSPP_VIG, 0);
+ else
+ rc = mdss_mdp_qseed2_setup(pipe);
+
+ return rc;
+}
+
+int mdss_mdp_pipe_pp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+ int ret = 0;
+
+ if (!pipe)
+ return -ENODEV;
+
+ ret = mdss_mdp_scale_setup(pipe);
+ if (ret) {
+ pr_err("scale setup on pipe %d type %d failed ret %d\n",
+ pipe->num, pipe->type, ret);
+ return -EINVAL;
+ }
+
+ switch (pipe->type) {
+ case MDSS_MDP_PIPE_TYPE_VIG:
+ ret = pp_vig_pipe_setup(pipe, op);
+ break;
+ case MDSS_MDP_PIPE_TYPE_RGB:
+ ret = pp_rgb_pipe_setup(pipe, op);
+ break;
+ case MDSS_MDP_PIPE_TYPE_DMA:
+ ret = pp_dma_pipe_setup(pipe, op);
+ break;
+ default:
+ pr_debug("no PP setup for pipe type %d\n",
+ pipe->type);
+ break;
+ }
+
+ return ret;
+}
+
+void mdss_mdp_pipe_pp_clear(struct mdss_mdp_pipe *pipe)
+{
+ struct pp_hist_col_info *hist_info;
+
+ if (!pipe) {
+ pr_err("Invalid pipe context passed, %pK\n",
+ pipe);
+ return;
+ }
+
+ if (mdss_mdp_pipe_is_yuv(pipe)) {
+ hist_info = &pipe->pp_res.hist;
+ pp_hist_disable(hist_info);
+ }
+
+ kfree(pipe->pp_res.pa_cfg_payload);
+ pipe->pp_res.pa_cfg_payload = NULL;
+ pipe->pp_cfg.pa_v2_cfg_data.cfg_payload = NULL;
+ kfree(pipe->pp_res.igc_cfg_payload);
+ pipe->pp_res.igc_cfg_payload = NULL;
+ pipe->pp_cfg.igc_cfg.cfg_payload = NULL;
+ kfree(pipe->pp_res.pcc_cfg_payload);
+ pipe->pp_res.pcc_cfg_payload = NULL;
+ pipe->pp_cfg.pcc_cfg_data.cfg_payload = NULL;
+ kfree(pipe->pp_res.hist_lut_cfg_payload);
+ pipe->pp_res.hist_lut_cfg_payload = NULL;
+ pipe->pp_cfg.hist_lut_cfg.cfg_payload = NULL;
+
+ memset(&pipe->pp_res.pp_sts, 0, sizeof(struct pp_sts_type));
+ pipe->pp_cfg.config_ops = 0;
+}
+
+int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+ int i, ret = 0;
+ unsigned long flags = 0;
+ char __iomem *pipe_base;
+ u32 pipe_num, pipe_cnt;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 current_opmode, location;
+ u32 dcm_state = DCM_UNINIT;
+ struct mdss_mdp_pipe *pipe_list;
+
+ if (pipe == NULL)
+ return -EINVAL;
+
+ mdss_mdp_pp_get_dcm_state(pipe, &dcm_state);
+
+ /* Read IGC state and update the same if tuning mode is enable */
+ if (dcm_state == DTM_ENTER) {
+ current_opmode = readl_relaxed(pipe->base +
+ MDSS_MDP_REG_SSPP_SRC_OP_MODE);
+ *op |= (current_opmode & BIT(16));
+ return ret;
+ }
+
+ /*
+ * TODO: should this function be responsible for masking multiple
+ * pipes to be written in dual pipe case?
+ * if so, requires rework of update_igc_lut
+ */
+ switch (pipe->type) {
+ case MDSS_MDP_PIPE_TYPE_VIG:
+ pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_VIG_BASE;
+ pipe_cnt = mdata->nvig_pipes;
+ pipe_list = mdata->vig_pipes;
+ location = SSPP_VIG;
+ break;
+ case MDSS_MDP_PIPE_TYPE_RGB:
+ pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_RGB_BASE;
+ pipe_cnt = mdata->nrgb_pipes;
+ pipe_list = mdata->rgb_pipes;
+ location = SSPP_RGB;
+ break;
+ case MDSS_MDP_PIPE_TYPE_DMA:
+ pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_DMA_BASE;
+ pipe_cnt = mdata->ndma_pipes;
+ pipe_list = mdata->dma_pipes;
+ location = SSPP_DMA;
+ break;
+ case MDSS_MDP_PIPE_TYPE_CURSOR:
+ /* cursor does not support the feature */
+ return 0;
+ default:
+ pr_err("Invalid pipe type %d\n", pipe->type);
+ return -EINVAL;
+ }
+
+ for (i = 0, pipe_num = 0; pipe_num < pipe_cnt; pipe_num++) {
+ if (pipe->num == pipe_list[i].num)
+ break;
+ i += pipe->multirect.max_rects;
+ }
+
+ if (pipe_num == pipe_cnt) {
+ pr_err("Invalid pipe num %d pipe type %d\n",
+ pipe->num, pipe->type);
+ return -EINVAL;
+ }
+
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_IGC_CFG) {
+ flags |= PP_FLAGS_DIRTY_IGC;
+ if (!pp_ops[IGC].pp_set_config) {
+ pp_igc_config(flags, pipe_base, &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.igc_cfg, pipe_num, pipe_cnt);
+ } else {
+ pipe->pp_cfg.igc_cfg.block = pipe_num;
+ pipe_base = mdata->mdp_base +
+ mdata->pp_block_off.sspp_igc_lut_off;
+ pp_ops[IGC].pp_set_config(pipe_base,
+ &pipe->pp_res.pp_sts, &pipe->pp_cfg.igc_cfg,
+ location);
+ }
+ }
+
+ if (pipe->pp_res.pp_sts.igc_sts & PP_STS_ENABLE)
+ *op |= (1 << 16); /* IGC_LUT_EN */
+
+ return ret;
+}
+
+static int pp_mixer_setup(struct mdss_mdp_mixer *mixer)
+{
+ u32 flags, disp_num, opmode = 0, lm_bitmask = 0;
+ struct mdp_pgc_lut_data *pgc_config;
+ struct pp_sts_type *pp_sts;
+ struct mdss_mdp_ctl *ctl;
+ char __iomem *addr;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mixer || !mixer->ctl || !mixer->ctl->mfd || !mdata) {
+ pr_err("invalid parameters, mixer %pK ctl %pK mfd %pK mdata %pK\n",
+ mixer, (mixer ? mixer->ctl : NULL),
+ (mixer ? (mixer->ctl ? mixer->ctl->mfd : NULL) : NULL),
+ mdata);
+ return -EINVAL;
+ }
+ ctl = mixer->ctl;
+ disp_num = ctl->mfd->index;
+
+ if (disp_num < MDSS_BLOCK_DISP_NUM)
+ flags = mdss_pp_res->pp_disp_flags[disp_num];
+ else
+ flags = 0;
+
+ if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3)
+ lm_bitmask = BIT(20);
+ else if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK)
+ lm_bitmask = BIT(9) << mixer->num;
+ else
+ lm_bitmask = BIT(6) << mixer->num;
+
+ pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
+ /* GC_LUT is in layer mixer */
+ if (flags & PP_FLAGS_DIRTY_ARGC) {
+ if (pp_ops[GC].pp_set_config) {
+ if (mdata->pp_block_off.lm_pgc_off == U32_MAX) {
+ pr_err("invalid pgc offset %d\n", U32_MAX);
+ } else {
+ addr = mixer->base +
+ mdata->pp_block_off.lm_pgc_off;
+ pp_ops[GC].pp_set_config(addr, pp_sts,
+ &mdss_pp_res->argc_disp_cfg[disp_num], LM);
+ }
+ } else {
+ pgc_config = &mdss_pp_res->argc_disp_cfg[disp_num];
+ if (pgc_config->flags & MDP_PP_OPS_WRITE) {
+ addr = mixer->base +
+ MDSS_MDP_REG_LM_GC_LUT_BASE;
+ pp_update_argc_lut(addr, pgc_config);
+ }
+ if (pgc_config->flags & MDP_PP_OPS_DISABLE)
+ pp_sts->argc_sts &= ~PP_STS_ENABLE;
+ else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
+ pp_sts->argc_sts |= PP_STS_ENABLE;
+ }
+ ctl->flush_bits |= lm_bitmask;
+ }
+
+ /* update LM opmode if LM needs flush */
+ if ((pp_sts->argc_sts & PP_STS_ENABLE) &&
+ (ctl->flush_bits & lm_bitmask)) {
+ if (pp_driver_ops.pp_opmode_config) {
+ pp_driver_ops.pp_opmode_config(LM, pp_sts,
+ &opmode, 0);
+ } else {
+ addr = mixer->base + MDSS_MDP_REG_LM_OP_MODE;
+ opmode = readl_relaxed(addr);
+ opmode |= (1 << 0); /* GC_LUT_EN */
+ writel_relaxed(opmode, addr);
+ }
+ }
+ return 0;
+}
+
+static char __iomem *mdss_mdp_get_mixer_addr_off(u32 mixer_num)
+{
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_mixer *mixer;
+
+ mdata = mdss_mdp_get_mdata();
+ if (mdata->nmixers_intf <= mixer_num) {
+ pr_err("Invalid mixer_num=%d\n", mixer_num);
+ return ERR_PTR(-EINVAL);
+ }
+ mixer = mdata->mixer_intf + mixer_num;
+ return mixer->base;
+}
+
+static char __iomem *mdss_mdp_get_dspp_addr_off(u32 dspp_num)
+{
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_mixer *mixer;
+
+ mdata = mdss_mdp_get_mdata();
+ if (mdata->ndspp <= dspp_num) {
+ pr_debug("destination not supported dspp_num=%d\n",
+ dspp_num);
+ return ERR_PTR(-EINVAL);
+ }
+ mixer = mdata->mixer_intf + dspp_num;
+ return mixer->dspp_base;
+}
+
+/* Assumes that function will be called from within clock enabled space*/
+static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix,
+ struct pp_sts_type *pp_sts)
+{
+ int ret = 0;
+ char __iomem *base;
+ u32 op_flags = 0, block_type = 0;
+ struct mdss_mdp_pipe *pipe;
+ struct pp_hist_col_info *hist_info;
+ unsigned long flag;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 intr_mask;
+
+ if (!mdata)
+ return -EPERM;
+
+ intr_mask = 1;
+ if (mix && (PP_LOCAT(block) == MDSS_PP_DSPP_CFG)) {
+ /* HIST_EN */
+ block_type = DSPP;
+ op_flags = BIT(16);
+ hist_info = &mdss_pp_res->dspp_hist[mix->num];
+ base = mdss_mdp_get_dspp_addr_off(PP_BLOCK(block));
+ if (IS_ERR(base)) {
+ ret = -EPERM;
+ goto error;
+ }
+ } else if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG &&
+ (pp_driver_ops.is_sspp_hist_supp) &&
+ (pp_driver_ops.is_sspp_hist_supp())) {
+ block_type = SSPP_VIG;
+ pipe = __get_hist_pipe(PP_BLOCK(block));
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_debug("pipe DNE (%d)\n",
+ (u32) PP_BLOCK(block));
+ ret = -ENODEV;
+ goto error;
+ }
+ op_flags = BIT(8);
+ hist_info = &pipe->pp_res.hist;
+ base = pipe->base;
+ mdss_mdp_pipe_unmap(pipe);
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ mutex_lock(&hist_info->hist_mutex);
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ /*
+ * Set histogram interrupt if histogram collection is enabled. The
+ * interrupt register offsets are the same across different mdss
+ * versions so far, hence mdss_mdp_hist_irq_set_mask is used for
+ * all the mdss versions.
+ */
+ if (hist_info->col_en)
+ mdss_mdp_hist_irq_set_mask(intr_mask << hist_info->intr_shift);
+ /*
+ * Starting from msmcobalt, the histogram enable bit has been moved
+ * from DSPP opmode register to PA_HIST opmode register, hence we need
+ * to update the histogram enable bit differently based on mdss version.
+ * If HIST pp_set_config is defined, we will enable or disable the
+ * hist_en bit in PA_HIST opmode register inside HIST pp_set_config
+ * function; else, we only need to add the hist_en bit to the *op when
+ * histogram collection is enable, and *op will be passed to
+ * pp_dspp_setup to update the DSPP opmode register.
+ */
+ if (pp_ops[HIST].pp_set_config)
+ ret = pp_ops[HIST].pp_set_config(base, pp_sts, hist_info,
+ block_type);
+ else if (hist_info->col_en)
+ *op |= op_flags;
+
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ mutex_unlock(&hist_info->hist_mutex);
+error:
+ return ret;
+}
+
+static void pp_dither_config(char __iomem *addr,
+ struct pp_sts_type *pp_sts,
+ struct mdp_dither_cfg_data *dither_cfg)
+{
+ u32 data;
+ int i;
+
+ if (dither_cfg->flags & MDP_PP_OPS_WRITE) {
+ data = dither_depth_map[dither_cfg->g_y_depth];
+ data |= dither_depth_map[dither_cfg->b_cb_depth] << 2;
+ data |= dither_depth_map[dither_cfg->r_cr_depth] << 4;
+ writel_relaxed(data, addr);
+ addr += 0x14;
+ for (i = 0; i < 16; i += 4) {
+ data = dither_matrix[i] |
+ (dither_matrix[i + 1] << 4) |
+ (dither_matrix[i + 2] << 8) |
+ (dither_matrix[i + 3] << 12);
+ writel_relaxed(data, addr);
+ addr += 4;
+ }
+ }
+ if (dither_cfg->flags & MDP_PP_OPS_DISABLE)
+ pp_sts->dither_sts &= ~PP_STS_ENABLE;
+ else if (dither_cfg->flags & MDP_PP_OPS_ENABLE)
+ pp_sts->dither_sts |= PP_STS_ENABLE;
+ pp_sts_set_split_bits(&pp_sts->dither_sts, dither_cfg->flags);
+}
+
+static void pp_dspp_opmode_config(struct mdss_mdp_ctl *ctl, u32 num,
+ struct pp_sts_type *pp_sts, int mdp_rev,
+ u32 *opmode)
+{
+ int side;
+ bool pa_side_enabled = false;
+
+ side = pp_num_to_side(ctl, num);
+
+ if (side < 0)
+ return;
+
+ if (pp_driver_ops.pp_opmode_config) {
+ pp_driver_ops.pp_opmode_config(DSPP,
+ pp_sts, opmode, side);
+ return;
+ }
+
+ if (pp_sts_is_enabled(pp_sts->pa_sts, side)) {
+ *opmode |= MDSS_MDP_DSPP_OP_PA_EN; /* PA_EN */
+ pa_side_enabled = true;
+ }
+ if (mdp_rev >= MDSS_MDP_HW_REV_103 && pa_side_enabled) {
+ if (pp_sts->pa_sts & PP_STS_PA_HUE_MASK)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_HUE_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_SAT_MASK)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_SAT_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_VAL_MASK)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_VAL_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_CONT_MASK)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_CONT_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROTECT_EN)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_MEM_PROTECT_EN;
+ if (pp_sts->pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_SAT_ZERO_EXP_EN;
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_MEM_COL_SKIN_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_MEM_COL_FOL_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_MEM_COL_SKY_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_HUE_MASK)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_SIX_ZONE_HUE_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_SAT_MASK)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_SIX_ZONE_SAT_MASK;
+ if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_VAL_MASK)
+ *opmode |= MDSS_MDP_DSPP_OP_PA_SIX_ZONE_VAL_MASK;
+ }
+ if (pp_sts_is_enabled(pp_sts->pcc_sts, side))
+ *opmode |= MDSS_MDP_DSPP_OP_PCC_EN; /* PCC_EN */
+
+ if (pp_sts_is_enabled(pp_sts->igc_sts, side)) {
+ *opmode |= MDSS_MDP_DSPP_OP_IGC_LUT_EN | /* IGC_LUT_EN */
+ (pp_sts->igc_tbl_idx << 1);
+ }
+ if (pp_sts->enhist_sts & PP_STS_ENABLE) {
+ *opmode |= MDSS_MDP_DSPP_OP_HIST_LUTV_EN | /* HIST_LUT_EN */
+ MDSS_MDP_DSPP_OP_PA_EN; /* PA_EN */
+ }
+ if (pp_sts_is_enabled(pp_sts->dither_sts, side))
+ *opmode |= MDSS_MDP_DSPP_OP_DST_DITHER_EN; /* DITHER_EN */
+ if (pp_sts_is_enabled(pp_sts->gamut_sts, side)) {
+ *opmode |= MDSS_MDP_DSPP_OP_GAMUT_EN; /* GAMUT_EN */
+ if (pp_sts->gamut_sts & PP_STS_GAMUT_FIRST)
+ *opmode |= MDSS_MDP_DSPP_OP_GAMUT_PCC_ORDER;
+ }
+ if (pp_sts_is_enabled(pp_sts->pgc_sts, side))
+ *opmode |= MDSS_MDP_DSPP_OP_ARGC_LUT_EN;
+}
+
+static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
+{
+ u32 ad_flags, flags, dspp_num, opmode = 0, ad_bypass;
+ struct mdp_pgc_lut_data *pgc_config;
+ struct pp_sts_type *pp_sts = NULL;
+ char __iomem *base, *addr = NULL;
+ int ret = 0;
+ struct mdss_data_type *mdata;
+ struct mdss_ad_info *ad = NULL;
+ struct mdss_mdp_ad *ad_hw = NULL;
+ struct mdp_pa_v2_cfg_data *pa_v2_cfg_data = NULL;
+ struct mdss_mdp_ctl *ctl;
+ u32 mixer_cnt;
+ u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ int side;
+
+ if (!mixer || !mixer->ctl || !mixer->ctl->mdata)
+ return -EINVAL;
+ ctl = mixer->ctl;
+ mdata = ctl->mdata;
+ dspp_num = mixer->num;
+ /* no corresponding dspp */
+ if ((mixer->type != MDSS_MDP_MIXER_TYPE_INTF) ||
+ (dspp_num >= mdata->ndspp))
+ return -EINVAL;
+ base = mdss_mdp_get_dspp_addr_off(dspp_num);
+ if (IS_ERR(base))
+ return -EINVAL;
+
+ side = pp_num_to_side(ctl, dspp_num);
+ if (side < 0) {
+ pr_err("invalid side information for dspp_num %d", dspp_num);
+ return -EINVAL;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if ((mdata->pp_block_off.dspp_gamut_off != U32_MAX) &&
+ (pp_driver_ops.gamut_clk_gate_en))
+ pp_driver_ops.gamut_clk_gate_en(base +
+ mdata->pp_block_off.dspp_gamut_off);
+
+ if (disp_num < MDSS_BLOCK_DISP_NUM) {
+ pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
+ pp_sts->side_sts = side;
+
+ ret = pp_hist_setup(&opmode, MDSS_PP_DSPP_CFG | dspp_num, mixer,
+ pp_sts);
+ if (ret)
+ goto dspp_exit;
+
+ flags = mdss_pp_res->pp_disp_flags[disp_num];
+ } else {
+ flags = 0;
+ }
+
+ mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+ if (dspp_num < mdata->nad_cfgs && disp_num < mdata->nad_cfgs &&
+ (mixer_cnt <= mdata->nmax_concurrent_ad_hw)) {
+ ad = &mdata->ad_cfgs[disp_num];
+ ad_flags = ad->reg_sts;
+ ad_hw = &mdata->ad_off[dspp_num];
+ } else {
+ ad_flags = 0;
+ }
+
+ /* nothing to update */
+ if ((!flags) && (!(opmode)) && (!ad_flags))
+ goto dspp_exit;
+
+ if (flags & PP_FLAGS_DIRTY_PA) {
+ if (!pp_ops[PA].pp_set_config) {
+ if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
+ pa_v2_cfg_data =
+ &mdss_pp_res->pa_v2_disp_cfg[disp_num];
+ pp_pa_v2_config(flags,
+ base + MDSS_MDP_REG_DSPP_PA_BASE,
+ pp_sts,
+ &pa_v2_cfg_data->pa_v2_data,
+ PP_DSPP);
+ } else
+ pp_pa_config(flags,
+ base + MDSS_MDP_REG_DSPP_PA_BASE,
+ pp_sts,
+ &mdss_pp_res->pa_disp_cfg[disp_num]);
+ } else {
+ pp_ops[PA].pp_set_config(base, pp_sts,
+ &mdss_pp_res->pa_v2_disp_cfg[disp_num],
+ DSPP);
+ }
+ }
+ if (flags & PP_FLAGS_DIRTY_PCC) {
+ if (!pp_ops[PCC].pp_set_config)
+ pp_pcc_config(flags, base + MDSS_MDP_REG_DSPP_PCC_BASE,
+ pp_sts,
+ &mdss_pp_res->pcc_disp_cfg[disp_num]);
+ else {
+ if (mdata->pp_block_off.dspp_pcc_off == U32_MAX) {
+ pr_err("invalid pcc off %d\n", U32_MAX);
+ } else {
+ addr = base + mdata->pp_block_off.dspp_pcc_off;
+ pp_ops[PCC].pp_set_config(addr, pp_sts,
+ &mdss_pp_res->pcc_disp_cfg[disp_num],
+ DSPP);
+ }
+ }
+ }
+
+ if (flags & PP_FLAGS_DIRTY_IGC) {
+ if (!pp_ops[IGC].pp_set_config) {
+ pp_igc_config(flags,
+ mdata->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE,
+ pp_sts, &mdss_pp_res->igc_disp_cfg[disp_num],
+ dspp_num, mdata->ndspp);
+ } else {
+ addr = mdata->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE;
+ /* Pass dspp num using block */
+ mdss_pp_res->igc_disp_cfg[disp_num].block = dspp_num;
+ pp_ops[IGC].pp_set_config(addr, pp_sts,
+ &mdss_pp_res->igc_disp_cfg[disp_num],
+ DSPP);
+ }
+ }
+
+ if (flags & PP_FLAGS_DIRTY_ENHIST) {
+ if (!pp_ops[HIST_LUT].pp_set_config) {
+ pp_enhist_config(flags,
+ base + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
+ pp_sts,
+ &mdss_pp_res->enhist_disp_cfg[disp_num]);
+
+ if ((pp_sts->enhist_sts & PP_STS_ENABLE) &&
+ !(pp_sts->pa_sts & PP_STS_ENABLE)) {
+ /* Program default value */
+ addr = base + MDSS_MDP_REG_DSPP_PA_BASE;
+ writel_relaxed(0, addr);
+ writel_relaxed(0, addr + 4);
+ writel_relaxed(0, addr + 8);
+ writel_relaxed(0, addr + 12);
+ }
+ } else {
+ /* Pass dspp num using block */
+ mdss_pp_res->enhist_disp_cfg[disp_num].block = dspp_num;
+ pp_ops[HIST_LUT].pp_set_config(base, pp_sts,
+ &mdss_pp_res->enhist_disp_cfg[disp_num], DSPP);
+ }
+ }
+
+ if (flags & PP_FLAGS_DIRTY_DITHER) {
+ if (!pp_ops[DITHER].pp_set_config) {
+ pp_dither_config(addr, pp_sts,
+ &mdss_pp_res->dither_disp_cfg[disp_num]);
+ } else {
+ addr = base + MDSS_MDP_REG_DSPP_DITHER_DEPTH;
+ pp_ops[DITHER].pp_set_config(addr, pp_sts,
+ &mdss_pp_res->dither_disp_cfg[disp_num], DSPP);
+ }
+ }
+ if (flags & PP_FLAGS_DIRTY_GAMUT) {
+ if (!pp_ops[GAMUT].pp_set_config) {
+ pp_gamut_config(&mdss_pp_res->gamut_disp_cfg[disp_num],
+ base, pp_sts);
+ } else {
+ if (mdata->pp_block_off.dspp_gamut_off == U32_MAX) {
+ pr_err("invalid gamut off %d\n", U32_MAX);
+ } else {
+ addr = base +
+ mdata->pp_block_off.dspp_gamut_off;
+ pp_ops[GAMUT].pp_set_config(addr, pp_sts,
+ &mdss_pp_res->gamut_disp_cfg[disp_num],
+ DSPP);
+ }
+ }
+ }
+
+ if (flags & PP_FLAGS_DIRTY_PGC) {
+ pgc_config = &mdss_pp_res->pgc_disp_cfg[disp_num];
+ if (pp_ops[GC].pp_set_config) {
+ if (mdata->pp_block_off.dspp_pgc_off == U32_MAX) {
+ pr_err("invalid pgc offset %d\n", U32_MAX);
+ } else {
+ addr = base +
+ mdata->pp_block_off.dspp_pgc_off;
+ pp_ops[GC].pp_set_config(addr, pp_sts,
+ &mdss_pp_res->pgc_disp_cfg[disp_num],
+ DSPP);
+ }
+ } else {
+ if (pgc_config->flags & MDP_PP_OPS_WRITE) {
+ addr = base + MDSS_MDP_REG_DSPP_GC_BASE;
+ pp_update_argc_lut(addr, pgc_config);
+ }
+ if (pgc_config->flags & MDP_PP_OPS_DISABLE)
+ pp_sts->pgc_sts &= ~PP_STS_ENABLE;
+ else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
+ pp_sts->pgc_sts |= PP_STS_ENABLE;
+ pp_sts_set_split_bits(&pp_sts->pgc_sts,
+ pgc_config->flags);
+ }
+ }
+
+ if (pp_sts != NULL)
+ pp_dspp_opmode_config(ctl, dspp_num, pp_sts, mdata->mdp_rev,
+ &opmode);
+
+ if (ad_hw) {
+ mutex_lock(&ad->lock);
+ ad_flags = ad->reg_sts;
+ if (ad_flags & PP_AD_STS_DIRTY_DATA)
+ pp_ad_input_write(ad_hw, ad);
+ if (ad_flags & PP_AD_STS_DIRTY_INIT)
+ pp_ad_init_write(ad_hw, ad, ctl);
+ if (ad_flags & PP_AD_STS_DIRTY_CFG)
+ pp_ad_cfg_write(ad_hw, ad);
+
+ if (ad->state & PP_AD_STATE_IPC_RESET) {
+ writel_relaxed(ad->cfg.t_filter_recursion,
+ ad_hw->base + MDSS_MDP_REG_AD_TFILT_CTRL);
+ writel_relaxed(ad->cfg.mode | MDSS_AD_AUTO_TRIGGER,
+ ad_hw->base + MDSS_MDP_REG_AD_MODE_SEL);
+ }
+
+ pp_ad_bypass_config(ad, ctl, ad_hw->num, &ad_bypass);
+ writel_relaxed(ad_bypass, ad_hw->base);
+ mutex_unlock(&ad->lock);
+ }
+
+ writel_relaxed(opmode, base + MDSS_MDP_REG_DSPP_OP_MODE);
+
+ if (dspp_num == MDSS_MDP_DSPP3)
+ ctl->flush_bits |= BIT(21);
+ else
+ ctl->flush_bits |= BIT(13 + dspp_num);
+
+ wmb(); /* ensure write is finished before progressing */
+dspp_exit:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return ret;
+}
+
+int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl)
+{
+ int ret = 0;
+
+ if ((!ctl->mfd) || (!mdss_pp_res))
+ return -EINVAL;
+
+ /* TODO: have some sort of reader/writer lock to prevent unclocked
+ * access while display power is toggled
+ */
+ mutex_lock(&ctl->lock);
+ if (!mdss_mdp_ctl_is_power_on(ctl)) {
+ ret = -EPERM;
+ goto error;
+ }
+ ret = mdss_mdp_pp_setup_locked(ctl);
+error:
+ mutex_unlock(&ctl->lock);
+
+ return ret;
+}
+
+int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_data_type *mdata;
+ int ret = 0, i;
+ u32 flags, pa_v2_flags;
+ u32 max_bw_needed;
+ u32 mixer_cnt;
+ u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ u32 disp_num;
+ bool valid_mixers = true;
+ bool valid_ad_panel = true;
+
+ if ((!ctl) || (!ctl->mfd) || (!mdss_pp_res) || (!ctl->mdata))
+ return -EINVAL;
+
+ mdata = ctl->mdata;
+ /* treat fb_num the same as block logical id*/
+ disp_num = ctl->mfd->index;
+
+ mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+ if (!mixer_cnt) {
+ valid_mixers = false;
+ ret = -EINVAL;
+ pr_warn("Configuring post processing without mixers, err = %d\n",
+ ret);
+ goto exit;
+ }
+ if (mdata->nad_cfgs == 0)
+ valid_mixers = false;
+ for (i = 0; i < mixer_cnt && valid_mixers; i++) {
+ if (mixer_id[i] >= mdata->nad_cfgs)
+ valid_mixers = false;
+ }
+ valid_ad_panel = (ctl->mfd->panel_info->type != DTV_PANEL) &&
+ (((mdata->mdp_rev < MDSS_MDP_HW_REV_103) &&
+ (ctl->mfd->panel_info->type == WRITEBACK_PANEL)) ||
+ (ctl->mfd->panel_info->type != WRITEBACK_PANEL));
+
+ if (valid_mixers && (mixer_cnt <= mdata->nmax_concurrent_ad_hw) &&
+ valid_ad_panel) {
+ ret = mdss_mdp_ad_setup(ctl->mfd);
+ if (ret < 0)
+ pr_warn("ad_setup(disp%d) returns %d\n", disp_num, ret);
+ }
+
+ mutex_lock(&mdss_pp_mutex);
+
+ flags = mdss_pp_res->pp_disp_flags[disp_num];
+ if (pp_ops[PA].pp_set_config)
+ pa_v2_flags = mdss_pp_res->pa_v2_disp_cfg[disp_num].flags;
+ else
+ pa_v2_flags =
+ mdss_pp_res->pa_v2_disp_cfg[disp_num].pa_v2_data.flags;
+ /*
+ * If a LUT based PP feature needs to be reprogrammed during resume,
+ * increase the register bus bandwidth to maximum frequency
+ * in order to speed up the register reprogramming.
+ */
+ max_bw_needed = (IS_PP_RESUME_COMMIT(flags) &&
+ (IS_PP_LUT_DIRTY(flags) ||
+ IS_SIX_ZONE_DIRTY(flags, pa_v2_flags)));
+ if (mdata->pp_reg_bus_clt && max_bw_needed) {
+ ret = mdss_update_reg_bus_vote(mdata->pp_reg_bus_clt,
+ VOTE_INDEX_HIGH);
+ if (ret)
+ pr_err("Updated reg_bus_scale failed, ret = %d", ret);
+ }
+
+ if (ctl->mixer_left) {
+ pp_mixer_setup(ctl->mixer_left);
+ pp_dspp_setup(disp_num, ctl->mixer_left);
+ pp_ppb_setup(ctl->mixer_left);
+ }
+ if (ctl->mixer_right) {
+ pp_mixer_setup(ctl->mixer_right);
+ pp_dspp_setup(disp_num, ctl->mixer_right);
+ pp_ppb_setup(ctl->mixer_right);
+ }
+
+ if (valid_mixers && (mixer_cnt <= mdata->nmax_concurrent_ad_hw) &&
+ valid_ad_panel) {
+ ret = mdss_mdp_ad_ipc_reset(ctl->mfd);
+ if (ret < 0)
+ pr_warn("ad_setup(disp%d) returns %d\n", disp_num, ret);
+ }
+
+ /* clear dirty flag */
+ if (disp_num < MDSS_BLOCK_DISP_NUM) {
+ mdss_pp_res->pp_disp_flags[disp_num] = 0;
+ if (disp_num < mdata->nad_cfgs)
+ mdata->ad_cfgs[disp_num].reg_sts = 0;
+ }
+
+ if (mdata->pp_reg_bus_clt && max_bw_needed) {
+ ret = mdss_update_reg_bus_vote(mdata->pp_reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+ if (ret)
+ pr_err("Updated reg_bus_scale failed, ret = %d", ret);
+ }
+ if (IS_PP_RESUME_COMMIT(flags))
+ mdss_pp_res->pp_disp_flags[disp_num] &=
+ ~PP_FLAGS_RESUME_COMMIT;
+ mutex_unlock(&mdss_pp_mutex);
+exit:
+ return ret;
+}
+
+/*
+ * Set dirty and write bits on features that were enabled so they will be
+ * reconfigured
+ */
+int mdss_mdp_pp_resume(struct msm_fb_data_type *mfd)
+{
+ u32 flags = 0, disp_num, ret = 0;
+ struct pp_sts_type pp_sts;
+ struct mdss_ad_info *ad;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdp_pa_v2_cfg_data *pa_v2_cache_cfg = NULL;
+
+ if (!mfd) {
+ pr_err("invalid input: mfd = 0x%pK\n", mfd);
+ return -EINVAL;
+ }
+
+ if (!mdss_mdp_mfd_valid_dspp(mfd)) {
+ pr_debug("PP not supported on display num %d hw config\n",
+ mfd->index);
+ return -EPERM;
+ }
+
+ disp_num = mfd->index;
+ pp_sts = mdss_pp_res->pp_disp_sts[disp_num];
+
+ if (pp_sts.pa_sts & PP_STS_ENABLE) {
+ flags |= PP_FLAGS_DIRTY_PA;
+ pa_v2_cache_cfg = &mdss_pp_res->pa_v2_disp_cfg[disp_num];
+ if (pp_ops[PA].pp_set_config) {
+ if (!(pa_v2_cache_cfg->flags & MDP_PP_OPS_DISABLE))
+ pa_v2_cache_cfg->flags |= MDP_PP_OPS_WRITE;
+ } else if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
+ if (!(pa_v2_cache_cfg->pa_v2_data.flags
+ & MDP_PP_OPS_DISABLE))
+ pa_v2_cache_cfg->pa_v2_data.flags |=
+ MDP_PP_OPS_WRITE;
+ } else {
+ if (!(mdss_pp_res->pa_disp_cfg[disp_num].flags
+ & MDP_PP_OPS_DISABLE))
+ mdss_pp_res->pa_disp_cfg[disp_num].flags |=
+ MDP_PP_OPS_WRITE;
+ }
+ }
+ if (pp_sts.pcc_sts & PP_STS_ENABLE) {
+ flags |= PP_FLAGS_DIRTY_PCC;
+ if (!(mdss_pp_res->pcc_disp_cfg[disp_num].ops
+ & MDP_PP_OPS_DISABLE))
+ mdss_pp_res->pcc_disp_cfg[disp_num].ops |=
+ MDP_PP_OPS_WRITE;
+ }
+ if (pp_sts.igc_sts & PP_STS_ENABLE) {
+ flags |= PP_FLAGS_DIRTY_IGC;
+ if (!(mdss_pp_res->igc_disp_cfg[disp_num].ops
+ & MDP_PP_OPS_DISABLE))
+ mdss_pp_res->igc_disp_cfg[disp_num].ops |=
+ MDP_PP_OPS_WRITE;
+ }
+ if (pp_sts.argc_sts & PP_STS_ENABLE) {
+ flags |= PP_FLAGS_DIRTY_ARGC;
+ if (!(mdss_pp_res->argc_disp_cfg[disp_num].flags
+ & MDP_PP_OPS_DISABLE))
+ mdss_pp_res->argc_disp_cfg[disp_num].flags |=
+ MDP_PP_OPS_WRITE;
+ }
+ if (pp_sts.enhist_sts & PP_STS_ENABLE) {
+ flags |= PP_FLAGS_DIRTY_ENHIST;
+ if (!(mdss_pp_res->enhist_disp_cfg[disp_num].ops
+ & MDP_PP_OPS_DISABLE))
+ mdss_pp_res->enhist_disp_cfg[disp_num].ops |=
+ MDP_PP_OPS_WRITE;
+ }
+ if (pp_sts.dither_sts & PP_STS_ENABLE) {
+ flags |= PP_FLAGS_DIRTY_DITHER;
+ if (!(mdss_pp_res->dither_disp_cfg[disp_num].flags
+ & MDP_PP_OPS_DISABLE))
+ mdss_pp_res->dither_disp_cfg[disp_num].flags |=
+ MDP_PP_OPS_WRITE;
+ }
+ if (pp_sts.gamut_sts & PP_STS_ENABLE) {
+ flags |= PP_FLAGS_DIRTY_GAMUT;
+ if (!(mdss_pp_res->gamut_disp_cfg[disp_num].flags
+ & MDP_PP_OPS_DISABLE))
+ mdss_pp_res->gamut_disp_cfg[disp_num].flags |=
+ MDP_PP_OPS_WRITE;
+ }
+ if (pp_sts.pgc_sts & PP_STS_ENABLE) {
+ flags |= PP_FLAGS_DIRTY_PGC;
+ if (!(mdss_pp_res->pgc_disp_cfg[disp_num].flags
+ & MDP_PP_OPS_DISABLE))
+ mdss_pp_res->pgc_disp_cfg[disp_num].flags |=
+ MDP_PP_OPS_WRITE;
+ }
+
+ mdss_pp_res->pp_disp_flags[disp_num] |= flags;
+ mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_RESUME_COMMIT;
+
+ ret = mdss_mdp_get_ad(mfd, &ad);
+ if (ret == -ENODEV || ret == -EPERM) {
+ pr_debug("AD not supported on device, disp num %d\n",
+ mfd->index);
+ return 0;
+ } else if (ret || !ad) {
+ pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+ ret, ad);
+ return ret;
+ }
+
+ mutex_lock(&ad->lock);
+ if (mfd->ipc_resume) {
+ mfd->ipc_resume = false;
+ if (PP_AD_STATE_RUN & ad->state) {
+ ad->ipc_frame_count = 0;
+ ad->state |= PP_AD_STATE_IPC_RESUME;
+ ad->cfg.mode |= MDSS_AD_MODE_IPC_BIT;
+ pr_debug("switch mode to %d, last_ad_data = %d\n",
+ ad->cfg.mode, ad->last_ad_data);
+ }
+ }
+
+ if (PP_AD_STATE_CFG & ad->state)
+ ad->sts |= PP_AD_STS_DIRTY_CFG;
+ if (PP_AD_STATE_INIT & ad->state)
+ ad->sts |= PP_AD_STS_DIRTY_INIT;
+ if ((PP_AD_STATE_DATA & ad->state) &&
+ (ad->sts & PP_STS_ENABLE))
+ ad->sts |= PP_AD_STS_DIRTY_DATA;
+
+ if (PP_AD_STATE_RUN & ad->state)
+ ad->state &= ~PP_AD_STATE_VSYNC;
+ mutex_unlock(&ad->lock);
+
+ return 0;
+}
+
+static int mdss_mdp_pp_dt_parse(struct device *dev)
+{
+ int ret = -EINVAL;
+ struct device_node *node;
+ struct mdss_data_type *mdata;
+ u32 prop_val;
+
+ mdata = mdss_mdp_get_mdata();
+ if (dev && mdata) {
+ /* initialize offsets to U32_MAX */
+ memset(&mdata->pp_block_off, U8_MAX,
+ sizeof(mdata->pp_block_off));
+ node = of_get_child_by_name(dev->of_node,
+ "qcom,mdss-pp-offsets");
+ if (node) {
+ ret = of_property_read_u32(node,
+ "qcom,mdss-sspp-mdss-igc-lut-off",
+ &prop_val);
+ if (ret) {
+ pr_err("read property %s failed ret %d\n",
+ "qcom,mdss-sspp-mdss-igc-lut-off", ret);
+ goto bail_out;
+ } else {
+ mdata->pp_block_off.sspp_igc_lut_off =
+ prop_val;
+ }
+
+ ret = of_property_read_u32(node,
+ "qcom,mdss-sspp-vig-pcc-off",
+ &prop_val);
+ if (ret) {
+ pr_err("read property %s failed ret %d\n",
+ "qcom,mdss-sspp-vig-pcc-off", ret);
+ goto bail_out;
+ } else {
+ mdata->pp_block_off.vig_pcc_off = prop_val;
+ }
+
+ ret = of_property_read_u32(node,
+ "qcom,mdss-sspp-rgb-pcc-off",
+ &prop_val);
+ if (ret) {
+ pr_err("read property %s failed ret %d\n",
+ "qcom,mdss-sspp-rgb-pcc-off", ret);
+ goto bail_out;
+ } else {
+ mdata->pp_block_off.rgb_pcc_off = prop_val;
+ }
+
+ ret = of_property_read_u32(node,
+ "qcom,mdss-sspp-dma-pcc-off",
+ &prop_val);
+ if (ret) {
+ pr_err("read property %s failed ret %d\n",
+ "qcom,mdss-sspp-dma-pcc-off", ret);
+ goto bail_out;
+ } else {
+ mdata->pp_block_off.dma_pcc_off = prop_val;
+ }
+
+ ret = of_property_read_u32(node,
+ "qcom,mdss-lm-pgc-off",
+ &prop_val);
+
+ if (ret) {
+ pr_err("read property %s failed ret %d\n",
+ "qcom,mdss-lm-pgc-off", ret);
+ goto bail_out;
+ } else {
+ mdata->pp_block_off.lm_pgc_off = prop_val;
+ }
+
+ ret = of_property_read_u32(node,
+ "qcom,mdss-dspp-gamut-off",
+ &prop_val);
+ if (ret) {
+ pr_debug("Could not read/find %s prop ret %d\n",
+ "qcom,mdss-dspp-gamut-off", ret);
+ mdata->pp_block_off.dspp_gamut_off = U32_MAX;
+ } else {
+ mdata->pp_block_off.dspp_gamut_off = prop_val;
+ }
+
+ ret = of_property_read_u32(node,
+ "qcom,mdss-dspp-pcc-off",
+ &prop_val);
+ if (ret) {
+ pr_err("read property %s failed ret %d\n",
+ "qcom,mdss-dspp-pcc-off", ret);
+ goto bail_out;
+ } else {
+ mdata->pp_block_off.dspp_pcc_off = prop_val;
+ }
+
+ ret = of_property_read_u32(node,
+ "qcom,mdss-dspp-pgc-off",
+ &prop_val);
+ if (ret) {
+ pr_err("read property %s failed ret %d\n",
+ "qcom,mdss-dspp-pgc-off", ret);
+ goto bail_out;
+ } else {
+ mdata->pp_block_off.dspp_pgc_off = prop_val;
+ }
+ } else {
+ pr_debug("offsets are not supported\n");
+ ret = 0;
+ }
+ } else {
+ pr_err("invalid dev %pK mdata %pK\n", dev, mdata);
+ ret = -EINVAL;
+ }
+bail_out:
+ return ret;
+}
+
+int mdss_mdp_pp_init(struct device *dev)
+{
+ int i, ret = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_pipe *vig;
+ struct pp_hist_col_info *hist = NULL;
+ u32 ctl_off = 0;
+
+ if (!mdata)
+ return -EPERM;
+
+
+ mdata->pp_reg_bus_clt = mdss_reg_bus_vote_client_create("pp\0");
+ if (IS_ERR(mdata->pp_reg_bus_clt))
+ pr_err("bus client register failed\n");
+
+ mutex_lock(&mdss_pp_mutex);
+ if (!mdss_pp_res) {
+ mdss_pp_res = devm_kzalloc(dev, sizeof(*mdss_pp_res),
+ GFP_KERNEL);
+ if (mdss_pp_res == NULL) {
+ ret = -ENOMEM;
+ } else {
+ if (mdss_mdp_pp_dt_parse(dev))
+ pr_info("No PP info in device tree\n");
+
+ ret = pp_get_driver_ops(&pp_driver_ops);
+ if (ret) {
+ pr_err("pp_get_driver_ops failed, ret=%d\n",
+ ret);
+ goto pp_exit;
+ }
+ pp_ops = pp_driver_ops.pp_ops;
+ hist = devm_kzalloc(dev,
+ sizeof(struct pp_hist_col_info) *
+ mdata->ndspp,
+ GFP_KERNEL);
+ if (hist == NULL) {
+ pr_err("dspp histogram allocation failed!\n");
+ ret = -ENOMEM;
+ goto pp_exit;
+ }
+ for (i = 0; i < mdata->ndspp; i++) {
+ mutex_init(&hist[i].hist_mutex);
+ spin_lock_init(&hist[i].hist_lock);
+ hist[i].intr_shift = (i * 4) + 12;
+ if (pp_driver_ops.get_hist_offset) {
+ ret = pp_driver_ops.get_hist_offset(
+ DSPP, &ctl_off);
+ if (ret) {
+ pr_err("get_hist_offset ret %d\n",
+ ret);
+ goto hist_exit;
+ }
+ hist[i].base =
+ i < mdata->ndspp ?
+ mdss_mdp_get_dspp_addr_off(i) +
+ ctl_off : NULL;
+ } else {
+ hist[i].base = i < mdata->ndspp ?
+ mdss_mdp_get_dspp_addr_off(i) +
+ MDSS_MDP_REG_DSPP_HIST_CTL_BASE
+ : NULL;
+ }
+ }
+ if (mdata->ndspp == 4)
+ hist[3].intr_shift = 22;
+
+ mdss_pp_res->dspp_hist = hist;
+ }
+ }
+ if (mdata && mdata->vig_pipes) {
+ vig = mdata->vig_pipes;
+ for (i = 0; i < mdata->nvig_pipes; i++) {
+ mutex_init(&vig[i].pp_res.hist.hist_mutex);
+ spin_lock_init(&vig[i].pp_res.hist.hist_lock);
+ vig[i].pp_res.hist.intr_shift = (vig[i].num * 4);
+ if (i == 3)
+ vig[i].pp_res.hist.intr_shift = 10;
+ if (pp_driver_ops.get_hist_offset) {
+ ret = pp_driver_ops.get_hist_offset(
+ SSPP_VIG, &ctl_off);
+ if (ret) {
+ pr_err("get_hist_offset ret %d\n",
+ ret);
+ goto hist_exit;
+ }
+ vig[i].pp_res.hist.base = vig[i].base +
+ ctl_off;
+ } else {
+ vig[i].pp_res.hist.base = vig[i].base +
+ MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ }
+ }
+ }
+ mutex_unlock(&mdss_pp_mutex);
+ return ret;
+hist_exit:
+ devm_kfree(dev, hist);
+pp_exit:
+ devm_kfree(dev, mdss_pp_res);
+ mutex_unlock(&mdss_pp_mutex);
+ return ret;
+}
+
+void mdss_mdp_pp_term(struct device *dev)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdss_pp_res) {
+ mutex_lock(&mdss_pp_mutex);
+ devm_kfree(dev, mdss_pp_res->dspp_hist);
+ devm_kfree(dev, mdss_pp_res);
+ mdss_pp_res = NULL;
+ mutex_unlock(&mdss_pp_mutex);
+ }
+
+ mdss_reg_bus_vote_client_destroy(mdata->pp_reg_bus_clt);
+ mdata->pp_reg_bus_clt = NULL;
+}
+
+int mdss_mdp_pp_overlay_init(struct msm_fb_data_type *mfd)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mfd || !mdata) {
+ pr_err("Invalid mfd %pK mdata %pK\n", mfd, mdata);
+ return -EPERM;
+ }
+ if (mfd->index >= (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0))
+ return 0;
+
+ if (mdata->nad_cfgs)
+ mfd->mdp.ad_calc_bl = pp_ad_calc_bl;
+ mfd->mdp.pp_release_fnc = pp_mfd_release_all;
+ return 0;
+}
+
+int mdss_mdp_pp_default_overlay_config(struct msm_fb_data_type *mfd,
+ struct mdss_panel_data *pdata,
+ bool enable)
+{
+ int ret = 0;
+
+ if (!mfd || !pdata) {
+ pr_err("Invalid parameters mfd %pK pdata %pK\n", mfd, pdata);
+ return -EINVAL;
+ }
+
+ ret = mdss_mdp_panel_default_dither_config(mfd, pdata->panel_info.bpp,
+ enable);
+ if (ret)
+ pr_err("Unable to configure default dither on fb%d ret %d\n",
+ mfd->index, ret);
+
+ if (pdata->panel_info.type == DTV_PANEL) {
+ ret = mdss_mdp_limited_lut_igc_config(mfd, enable);
+ if (ret)
+ pr_err("Unable to configure DTV panel default IGC ret %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static bool pp_ad_bl_threshold_check(int al_thresh, int base, int prev_bl,
+ int curr_bl)
+{
+ int bl_thresh = 0, diff = 0;
+ bool ret = false;
+
+ pr_debug("al_thresh = %d, base = %d\n", al_thresh, base);
+ if (base <= 0) {
+ pr_debug("Invalid base for threshold calculation %d\n", base);
+ return ret;
+ }
+ bl_thresh = (curr_bl * al_thresh) / (base * 4);
+ diff = (curr_bl > prev_bl) ? (curr_bl - prev_bl) : (prev_bl - curr_bl);
+ ret = (diff > bl_thresh) ? true : false;
+ pr_debug("prev_bl =%d, curr_bl = %d, bl_thresh = %d, diff = %d, ret = %d\n",
+ prev_bl, curr_bl, bl_thresh, diff, ret);
+
+ return ret;
+}
+
+static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out,
+ bool *bl_out_notify)
+{
+ int ret = -1;
+ int temp = bl_in;
+ u32 ad_bl_out = 0;
+ struct mdss_ad_info *ad;
+
+ ret = mdss_mdp_get_ad(mfd, &ad);
+ if (ret == -ENODEV || ret == -EPERM) {
+ pr_debug("AD not supported on device, disp num %d\n",
+ mfd->index);
+ return 0;
+ } else if (ret || !ad) {
+ pr_err("Failed to get ad info: ret = %d, ad = 0x%pK.\n",
+ ret, ad);
+ return ret;
+ }
+
+ /* Don't update BL = 0 to AD */
+ if (bl_in == 0)
+ return 0;
+ mutex_lock(&ad->lock);
+ if (!mfd->ad_bl_level)
+ mfd->ad_bl_level = bl_in;
+ if (!(ad->sts & PP_STS_ENABLE)) {
+ pr_debug("AD is not enabled.\n");
+ mutex_unlock(&ad->lock);
+ return -EPERM;
+ }
+
+ if (!ad->bl_mfd || !ad->bl_mfd->panel_info ||
+ !ad->bl_att_lut) {
+ pr_err("Invalid ad info: bl_mfd = 0x%pK, ad->bl_mfd->panel_info = 0x%pK, bl_att_lut = 0x%pK\n",
+ ad->bl_mfd,
+ (!ad->bl_mfd) ? NULL : ad->bl_mfd->panel_info,
+ ad->bl_att_lut);
+ mutex_unlock(&ad->lock);
+ return -EINVAL;
+ }
+
+ ret = pp_ad_linearize_bl(ad, bl_in, &temp,
+ MDP_PP_AD_BL_LINEAR);
+ if (ret) {
+ pr_err("Failed to linearize BL: %d\n", ret);
+ mutex_unlock(&ad->lock);
+ return ret;
+ }
+
+ if (ad->init.alpha > 0) {
+ ret = pp_ad_attenuate_bl(ad, temp, &temp);
+ if (ret) {
+ pr_err("Failed to attenuate BL: %d\n", ret);
+ mutex_unlock(&ad->lock);
+ return ret;
+ }
+ ad_bl_out = temp;
+
+ ret = pp_ad_linearize_bl(ad, temp, &temp,
+ MDP_PP_AD_BL_LINEAR_INV);
+ if (ret) {
+ pr_err("Failed to inverse linearize BL: %d\n", ret);
+ mutex_unlock(&ad->lock);
+ return ret;
+ }
+ *bl_out = temp;
+ } else {
+ ad_bl_out = temp;
+ }
+
+ if (pp_ad_bl_threshold_check(ad->init.al_thresh, ad->init.alpha_base,
+ ad->last_bl, ad_bl_out)) {
+ mfd->ad_bl_level = ad_bl_out;
+ pr_debug("backlight send to AD block: %d\n", mfd->ad_bl_level);
+ *bl_out_notify = true;
+ pp_ad_invalidate_input(mfd);
+ }
+
+ mutex_unlock(&ad->lock);
+ return 0;
+}
+
+static int pp_get_dspp_num(u32 disp_num, u32 *dspp_num)
+{
+ int i;
+ u32 mixer_cnt;
+ u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+
+ if (!mixer_cnt || !mdata)
+ return -EPERM;
+
+ /* only read the first mixer */
+ for (i = 0; i < mixer_cnt; i++) {
+ if (mixer_id[i] < mdata->nmixers_intf)
+ break;
+ }
+ if (i >= mixer_cnt || mixer_id[i] >= mdata->ndspp)
+ return -EPERM;
+ *dspp_num = mixer_id[i];
+ return 0;
+}
+
+int mdss_mdp_pa_config(struct msm_fb_data_type *mfd,
+ struct mdp_pa_cfg_data *config,
+ u32 *copyback)
+{
+ int ret = 0;
+ u32 disp_num, dspp_num = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ char __iomem *pa_addr;
+
+ if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103)
+ return -EINVAL;
+
+ ret = pp_validate_dspp_mfd_block(mfd, config->block);
+ if (ret) {
+ pr_err("Invalid block %d mfd index %d, ret %d\n",
+ config->block,
+ (mfd ? mfd->index : -1), ret);
+ return ret;
+ }
+
+ mutex_lock(&mdss_pp_mutex);
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+ if (config->pa_data.flags & MDP_PP_OPS_READ) {
+ ret = pp_get_dspp_num(disp_num, &dspp_num);
+ if (ret) {
+ pr_err("no dspp connects to disp %d\n",
+ disp_num);
+ goto pa_config_exit;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ pa_addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
+ MDSS_MDP_REG_DSPP_PA_BASE;
+ config->pa_data.hue_adj = readl_relaxed(pa_addr);
+ pa_addr += 4;
+ config->pa_data.sat_adj = readl_relaxed(pa_addr);
+ pa_addr += 4;
+ config->pa_data.val_adj = readl_relaxed(pa_addr);
+ pa_addr += 4;
+ config->pa_data.cont_adj = readl_relaxed(pa_addr);
+ *copyback = 1;
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ } else {
+ mdss_pp_res->pa_disp_cfg[disp_num] = config->pa_data;
+ mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PA;
+ }
+
+pa_config_exit:
+ mutex_unlock(&mdss_pp_mutex);
+ return ret;
+}
+
+int mdss_mdp_pa_v2_config(struct msm_fb_data_type *mfd,
+ struct mdp_pa_v2_cfg_data *config,
+ u32 *copyback)
+{
+ int ret = 0;
+ u32 disp_num, dspp_num = 0;
+ char __iomem *pa_addr;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdp_pa_v2_cfg_data *pa_v2_cache = NULL;
+ struct mdp_pp_cache_res res_cache;
+ uint32_t flags = 0;
+
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_103)
+ return -EINVAL;
+
+ ret = pp_validate_dspp_mfd_block(mfd, config->block);
+ if (ret) {
+ pr_err("Invalid block %d mfd index %d, ret %d\n",
+ config->block,
+ (mfd ? mfd->index : -1), ret);
+ return ret;
+ }
+
+ if (pp_ops[PA].pp_set_config)
+ flags = config->flags;
+ else
+ flags = config->pa_v2_data.flags;
+
+ if ((flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+ pr_warn("Can't set both split bits\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mdss_pp_mutex);
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+ if (flags & MDP_PP_OPS_READ) {
+ ret = pp_get_dspp_num(disp_num, &dspp_num);
+ if (ret) {
+ pr_err("no dspp connects to disp %d\n",
+ disp_num);
+ goto pa_config_exit;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ pa_addr = mdss_mdp_get_dspp_addr_off(dspp_num);
+ if (IS_ERR(pa_addr)) {
+ ret = PTR_ERR(pa_addr);
+ goto pa_clk_off;
+ }
+ if (pp_ops[PA].pp_get_config) {
+ ret = pp_ops[PA].pp_get_config(pa_addr, config,
+ DSPP, disp_num);
+ if (ret)
+ pr_err("PA get config failed %d\n", ret);
+ } else {
+ pa_addr += MDSS_MDP_REG_DSPP_PA_BASE;
+ ret = pp_read_pa_v2_regs(pa_addr,
+ &config->pa_v2_data,
+ disp_num);
+ if (ret)
+ goto pa_config_exit;
+ *copyback = 1;
+ }
+pa_clk_off:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ } else {
+ if (pp_ops[PA].pp_set_config) {
+ pr_debug("version of PA is %d\n", config->version);
+ res_cache.block = DSPP;
+ res_cache.mdss_pp_res = mdss_pp_res;
+ res_cache.pipe_res = NULL;
+ ret = pp_pa_cache_params(config, &res_cache);
+ if (ret) {
+ pr_err("PA config failed version %d ret %d\n",
+ config->version, ret);
+ ret = -EFAULT;
+ goto pa_config_exit;
+ }
+ } else {
+ if (flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
+ ret = pp_copy_pa_six_zone_lut(config, disp_num);
+ if (ret) {
+ pr_err("PA copy six zone lut failed ret %d\n",
+ ret);
+ goto pa_config_exit;
+ }
+ }
+ pa_v2_cache = &mdss_pp_res->pa_v2_disp_cfg[disp_num];
+ *pa_v2_cache = *config;
+ pa_v2_cache->pa_v2_data.six_zone_curve_p0 =
+ mdss_pp_res->six_zone_lut_curve_p0[disp_num];
+ pa_v2_cache->pa_v2_data.six_zone_curve_p1 =
+ mdss_pp_res->six_zone_lut_curve_p1[disp_num];
+ }
+ mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PA;
+ }
+
+pa_config_exit:
+ mutex_unlock(&mdss_pp_mutex);
+ return ret;
+}
+
+
+static int pp_read_pa_v2_regs(char __iomem *addr,
+ struct mdp_pa_v2_data *pa_v2_config,
+ u32 disp_num)
+{
+ int i;
+ u32 data;
+
+ if (pa_v2_config->flags & MDP_PP_PA_HUE_ENABLE)
+ pa_v2_config->global_hue_adj = readl_relaxed(addr);
+ addr += 4;
+ if (pa_v2_config->flags & MDP_PP_PA_SAT_ENABLE)
+ pa_v2_config->global_sat_adj = readl_relaxed(addr);
+ addr += 4;
+ if (pa_v2_config->flags & MDP_PP_PA_VAL_ENABLE)
+ pa_v2_config->global_val_adj = readl_relaxed(addr);
+ addr += 4;
+ if (pa_v2_config->flags & MDP_PP_PA_CONT_ENABLE)
+ pa_v2_config->global_cont_adj = readl_relaxed(addr);
+ addr += 4;
+
+ /* Six zone LUT and thresh data */
+ if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
+ if (pa_v2_config->six_zone_len != MDP_SIX_ZONE_LUT_SIZE)
+ return -EINVAL;
+
+ data = (3 << 25);
+ writel_relaxed(data, addr);
+
+ for (i = 0; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+ addr += 4;
+ mdss_pp_res->six_zone_lut_curve_p1[disp_num][i] =
+ readl_relaxed(addr);
+ addr -= 4;
+ mdss_pp_res->six_zone_lut_curve_p0[disp_num][i] =
+ readl_relaxed(addr) & 0xFFF;
+ }
+
+ if (copy_to_user(pa_v2_config->six_zone_curve_p0,
+ &mdss_pp_res->six_zone_lut_curve_p0[disp_num][0],
+ pa_v2_config->six_zone_len * sizeof(u32))) {
+ return -EFAULT;
+ }
+
+ if (copy_to_user(pa_v2_config->six_zone_curve_p1,
+ &mdss_pp_res->six_zone_lut_curve_p1[disp_num][0],
+ pa_v2_config->six_zone_len * sizeof(u32))) {
+ return -EFAULT;
+ }
+
+ addr += 8;
+ pa_v2_config->six_zone_thresh = readl_relaxed(addr);
+ addr += 4;
+ } else {
+ addr += 12;
+ }
+
+ /* Skin memory color config registers */
+ if (pa_v2_config->flags & MDP_PP_PA_SKIN_ENABLE)
+ pp_read_pa_mem_col_regs(addr, &pa_v2_config->skin_cfg);
+
+ addr += 0x14;
+ /* Sky memory color config registers */
+ if (pa_v2_config->flags & MDP_PP_PA_SKY_ENABLE)
+ pp_read_pa_mem_col_regs(addr, &pa_v2_config->sky_cfg);
+
+ addr += 0x14;
+ /* Foliage memory color config registers */
+ if (pa_v2_config->flags & MDP_PP_PA_FOL_ENABLE)
+ pp_read_pa_mem_col_regs(addr, &pa_v2_config->fol_cfg);
+
+ return 0;
+}
+
+static void pp_read_pa_mem_col_regs(char __iomem *addr,
+ struct mdp_pa_mem_col_cfg *mem_col_cfg)
+{
+ mem_col_cfg->color_adjust_p0 = readl_relaxed(addr);
+ addr += 4;
+ mem_col_cfg->color_adjust_p1 = readl_relaxed(addr);
+ addr += 4;
+ mem_col_cfg->hue_region = readl_relaxed(addr);
+ addr += 4;
+ mem_col_cfg->sat_region = readl_relaxed(addr);
+ addr += 4;
+ mem_col_cfg->val_region = readl_relaxed(addr);
+}
+
+static int pp_copy_pa_six_zone_lut(struct mdp_pa_v2_cfg_data *pa_v2_config,
+ u32 disp_num)
+{
+ if (pa_v2_config->pa_v2_data.six_zone_len != MDP_SIX_ZONE_LUT_SIZE)
+ return -EINVAL;
+
+ if (copy_from_user(&mdss_pp_res->six_zone_lut_curve_p0[disp_num][0],
+ pa_v2_config->pa_v2_data.six_zone_curve_p0,
+ pa_v2_config->pa_v2_data.six_zone_len * sizeof(u32))) {
+ return -EFAULT;
+ }
+ if (copy_from_user(&mdss_pp_res->six_zone_lut_curve_p1[disp_num][0],
+ pa_v2_config->pa_v2_data.six_zone_curve_p1,
+ pa_v2_config->pa_v2_data.six_zone_len * sizeof(u32))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void pp_read_pcc_regs(char __iomem *addr,
+ struct mdp_pcc_cfg_data *cfg_ptr)
+{
+ cfg_ptr->r.c = readl_relaxed(addr);
+ cfg_ptr->g.c = readl_relaxed(addr + 4);
+ cfg_ptr->b.c = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.r = readl_relaxed(addr);
+ cfg_ptr->g.r = readl_relaxed(addr + 4);
+ cfg_ptr->b.r = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.g = readl_relaxed(addr);
+ cfg_ptr->g.g = readl_relaxed(addr + 4);
+ cfg_ptr->b.g = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.b = readl_relaxed(addr);
+ cfg_ptr->g.b = readl_relaxed(addr + 4);
+ cfg_ptr->b.b = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.rr = readl_relaxed(addr);
+ cfg_ptr->g.rr = readl_relaxed(addr + 4);
+ cfg_ptr->b.rr = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.rg = readl_relaxed(addr);
+ cfg_ptr->g.rg = readl_relaxed(addr + 4);
+ cfg_ptr->b.rg = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.rb = readl_relaxed(addr);
+ cfg_ptr->g.rb = readl_relaxed(addr + 4);
+ cfg_ptr->b.rb = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.gg = readl_relaxed(addr);
+ cfg_ptr->g.gg = readl_relaxed(addr + 4);
+ cfg_ptr->b.gg = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.gb = readl_relaxed(addr);
+ cfg_ptr->g.gb = readl_relaxed(addr + 4);
+ cfg_ptr->b.gb = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.bb = readl_relaxed(addr);
+ cfg_ptr->g.bb = readl_relaxed(addr + 4);
+ cfg_ptr->b.bb = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.rgb_0 = readl_relaxed(addr);
+ cfg_ptr->g.rgb_0 = readl_relaxed(addr + 4);
+ cfg_ptr->b.rgb_0 = readl_relaxed(addr + 8);
+ addr += 0x10;
+
+ cfg_ptr->r.rgb_1 = readl_relaxed(addr);
+ cfg_ptr->g.rgb_1 = readl_relaxed(addr + 4);
+ cfg_ptr->b.rgb_1 = readl_relaxed(addr + 8);
+}
+
+static void pp_update_pcc_regs(char __iomem *addr,
+ struct mdp_pcc_cfg_data *cfg_ptr)
+{
+ writel_relaxed(cfg_ptr->r.c, addr);
+ writel_relaxed(cfg_ptr->g.c, addr + 4);
+ writel_relaxed(cfg_ptr->b.c, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.r, addr);
+ writel_relaxed(cfg_ptr->g.r, addr + 4);
+ writel_relaxed(cfg_ptr->b.r, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.g, addr);
+ writel_relaxed(cfg_ptr->g.g, addr + 4);
+ writel_relaxed(cfg_ptr->b.g, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.b, addr);
+ writel_relaxed(cfg_ptr->g.b, addr + 4);
+ writel_relaxed(cfg_ptr->b.b, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.rr, addr);
+ writel_relaxed(cfg_ptr->g.rr, addr + 4);
+ writel_relaxed(cfg_ptr->b.rr, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.rg, addr);
+ writel_relaxed(cfg_ptr->g.rg, addr + 4);
+ writel_relaxed(cfg_ptr->b.rg, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.rb, addr);
+ writel_relaxed(cfg_ptr->g.rb, addr + 4);
+ writel_relaxed(cfg_ptr->b.rb, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.gg, addr);
+ writel_relaxed(cfg_ptr->g.gg, addr + 4);
+ writel_relaxed(cfg_ptr->b.gg, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.gb, addr);
+ writel_relaxed(cfg_ptr->g.gb, addr + 4);
+ writel_relaxed(cfg_ptr->b.gb, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.bb, addr);
+ writel_relaxed(cfg_ptr->g.bb, addr + 4);
+ writel_relaxed(cfg_ptr->b.bb, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.rgb_0, addr);
+ writel_relaxed(cfg_ptr->g.rgb_0, addr + 4);
+ writel_relaxed(cfg_ptr->b.rgb_0, addr + 8);
+ addr += 0x10;
+
+ writel_relaxed(cfg_ptr->r.rgb_1, addr);
+ writel_relaxed(cfg_ptr->g.rgb_1, addr + 4);
+ writel_relaxed(cfg_ptr->b.rgb_1, addr + 8);
+}
+
+int mdss_mdp_pcc_config(struct msm_fb_data_type *mfd,
+ struct mdp_pcc_cfg_data *config,
+ u32 *copyback)
+{
+ int ret = 0;
+ u32 disp_num, dspp_num = 0;
+ char __iomem *addr;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdp_pp_cache_res res_cache;
+
+ ret = pp_validate_dspp_mfd_block(mfd, config->block);
+ if (ret) {
+ pr_err("Invalid block %d mfd index %d, ret %d\n",
+ config->block,
+ (mfd ? mfd->index : -1), ret);
+ return ret;
+ }
+
+ if ((config->ops & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+ pr_warn("Can't set both split bits\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mdss_pp_mutex);
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+ if (config->ops & MDP_PP_OPS_READ) {
+ ret = pp_get_dspp_num(disp_num, &dspp_num);
+ if (ret) {
+ pr_err("%s, no dspp connects to disp %d\n",
+ __func__, disp_num);
+ goto pcc_config_exit;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if (pp_ops[PCC].pp_get_config) {
+ addr = mdss_mdp_get_dspp_addr_off(disp_num);
+ if (IS_ERR_OR_NULL(addr)) {
+ pr_err("invalid dspp base_addr %pK\n",
+ addr);
+ ret = -EINVAL;
+ goto pcc_clk_off;
+ }
+ if (mdata->pp_block_off.dspp_pcc_off == U32_MAX) {
+ pr_err("invalid pcc params off %d\n",
+ mdata->pp_block_off.dspp_pcc_off);
+ ret = -EINVAL;
+ goto pcc_clk_off;
+ }
+ addr += mdata->pp_block_off.dspp_pcc_off;
+ ret = pp_ops[PCC].pp_get_config(addr, config,
+ DSPP, disp_num);
+ if (ret)
+ pr_err("pcc get config failed %d\n", ret);
+ goto pcc_clk_off;
+ }
+
+ addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
+ MDSS_MDP_REG_DSPP_PCC_BASE;
+ pp_read_pcc_regs(addr, config);
+ *copyback = 1;
+pcc_clk_off:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ } else {
+ if (pp_ops[PCC].pp_set_config) {
+ pr_debug("version of pcc is %d\n", config->version);
+ res_cache.block = DSPP;
+ res_cache.mdss_pp_res = mdss_pp_res;
+ res_cache.pipe_res = NULL;
+ ret = pp_pcc_cache_params(config, &res_cache);
+ if (ret) {
+ pr_err("pcc config failed version %d ret %d\n",
+ config->version, ret);
+ ret = -EFAULT;
+ goto pcc_config_exit;
+ } else
+ goto pcc_set_dirty;
+ }
+ mdss_pp_res->pcc_disp_cfg[disp_num] = *config;
+pcc_set_dirty:
+ mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PCC;
+ }
+
+pcc_config_exit:
+ mutex_unlock(&mdss_pp_mutex);
+ return ret;
+}
+
+static void pp_read_igc_lut_cached(struct mdp_igc_lut_data *cfg)
+{
+ int i;
+ u32 disp_num;
+
+ disp_num = cfg->block - MDP_LOGICAL_BLOCK_DISP_0;
+ for (i = 0; i < IGC_LUT_ENTRIES; i++) {
+ cfg->c0_c1_data[i] =
+ mdss_pp_res->igc_disp_cfg[disp_num].c0_c1_data[i];
+ cfg->c2_data[i] =
+ mdss_pp_res->igc_disp_cfg[disp_num].c2_data[i];
+ }
+}
+
+static void pp_read_igc_lut(struct mdp_igc_lut_data *cfg,
+ char __iomem *addr, u32 blk_idx, int32_t total_idx)
+{
+ int i;
+ u32 data;
+ int32_t mask = 0, idx = total_idx;
+
+ while (idx > 0) {
+ mask = (mask << 1) + 1;
+ idx--;
+ }
+ /* INDEX_UPDATE & VALUE_UPDATEN */
+ data = (3 << 24) | (((~(1 << blk_idx)) & mask) << 28);
+ writel_relaxed(data, addr);
+
+ for (i = 0; i < cfg->len; i++)
+ cfg->c0_c1_data[i] = readl_relaxed(addr) & 0xFFF;
+
+ addr += 0x4;
+ writel_relaxed(data, addr);
+ for (i = 0; i < cfg->len; i++)
+ cfg->c0_c1_data[i] |= (readl_relaxed(addr) & 0xFFF) << 16;
+
+ addr += 0x4;
+ writel_relaxed(data, addr);
+ for (i = 0; i < cfg->len; i++)
+ cfg->c2_data[i] = readl_relaxed(addr) & 0xFFF;
+}
+
+static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
+ char __iomem *addr, u32 blk_idx,
+ u32 total_idx)
+{
+ int i;
+ u32 data;
+ int32_t mask = 0, idx = total_idx;
+
+ while (idx > 0) {
+ mask = (mask << 1) + 1;
+ idx--;
+ }
+
+ /* INDEX_UPDATE */
+ data = (1 << 25) | (((~(1 << blk_idx)) & mask) << 28);
+ writel_relaxed((cfg->c0_c1_data[0] & 0xFFF) | data, addr);
+
+ /* disable index update */
+ data &= ~(1 << 25);
+ for (i = 1; i < cfg->len; i++)
+ writel_relaxed((cfg->c0_c1_data[i] & 0xFFF) | data, addr);
+
+ addr += 0x4;
+ data |= (1 << 25);
+ writel_relaxed(((cfg->c0_c1_data[0] >> 16) & 0xFFF) | data, addr);
+ data &= ~(1 << 25);
+ for (i = 1; i < cfg->len; i++)
+ writel_relaxed(((cfg->c0_c1_data[i] >> 16) & 0xFFF) | data,
+ addr);
+
+ addr += 0x4;
+ data |= (1 << 25);
+ writel_relaxed((cfg->c2_data[0] & 0xFFF) | data, addr);
+ data &= ~(1 << 25);
+ for (i = 1; i < cfg->len; i++)
+ writel_relaxed((cfg->c2_data[i] & 0xFFF) | data, addr);
+}
+
+static int mdss_mdp_limited_lut_igc_config(struct msm_fb_data_type *mfd,
+ bool enable)
+{
+ int ret = 0;
+ u32 copyback = 0;
+ u32 copy_from_kernel = 1;
+ struct mdp_igc_lut_data config;
+ struct mdp_pp_feature_version igc_version = {
+ .pp_feature = IGC,
+ };
+ struct mdp_igc_lut_data_v1_7 igc_data;
+
+ if (!mfd)
+ return -EINVAL;
+
+ if (!mdss_mdp_mfd_valid_dspp(mfd)) {
+ pr_debug("IGC not supported on display num %d hw configuration\n",
+ mfd->index);
+ return 0;
+ }
+
+ ret = mdss_mdp_pp_get_version(&igc_version);
+ if (ret)
+ pr_err("failed to get default IGC version, ret %d\n", ret);
+
+ config.version = igc_version.version_info;
+ if (enable)
+ config.ops = MDP_PP_OPS_WRITE | MDP_PP_OPS_ENABLE;
+ else
+ config.ops = MDP_PP_OPS_DISABLE;
+ config.block = (mfd->index) + MDP_LOGICAL_BLOCK_DISP_0;
+ switch (config.version) {
+ case mdp_igc_v1_7:
+ config.cfg_payload = &igc_data;
+ igc_data.table_fmt = mdp_igc_custom;
+ igc_data.len = IGC_LUT_ENTRIES;
+ igc_data.c0_c1_data = igc_limited;
+ igc_data.c2_data = igc_limited;
+ break;
+ case mdp_pp_legacy:
+ default:
+ config.cfg_payload = NULL;
+ config.len = IGC_LUT_ENTRIES;
+ config.c0_c1_data = igc_limited;
+ config.c2_data = igc_limited;
+ break;
+ }
+
+ ret = mdss_mdp_igc_lut_config(mfd, &config, ©back,
+ copy_from_kernel);
+ return ret;
+}
+
+int mdss_mdp_igc_lut_config(struct msm_fb_data_type *mfd,
+ struct mdp_igc_lut_data *config,
+ u32 *copyback, u32 copy_from_kernel)
+{
+ int ret = 0;
+ u32 tbl_idx, disp_num, dspp_num = 0;
+ struct mdp_igc_lut_data local_cfg;
+ char __iomem *igc_addr;
+ struct mdp_pp_cache_res res_cache;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ ret = pp_validate_dspp_mfd_block(mfd, config->block);
+ if (ret) {
+ pr_err("Invalid block %d mfd index %d, ret %d\n",
+ config->block,
+ (mfd ? mfd->index : -1), ret);
+ return ret;
+ }
+
+ if ((config->ops & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+ pr_warn("Can't set both split bits\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mdss_pp_mutex);
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+ if (config->ops & MDP_PP_OPS_READ) {
+ if (config->len != IGC_LUT_ENTRIES) {
+ pr_err("invalid len for IGC table for read %d\n",
+ config->len);
+ return -EINVAL;
+ }
+ ret = pp_get_dspp_num(disp_num, &dspp_num);
+ if (ret) {
+ pr_err("%s, no dspp connects to disp %d\n",
+ __func__, disp_num);
+ goto igc_config_exit;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if (config->ops & MDP_PP_IGC_FLAG_ROM0)
+ tbl_idx = 1;
+ else if (config->ops & MDP_PP_IGC_FLAG_ROM1)
+ tbl_idx = 2;
+ else
+ tbl_idx = 0;
+ igc_addr = mdata->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE +
+ (0x10 * tbl_idx);
+ local_cfg = *config;
+ local_cfg.c0_c1_data =
+ &mdss_pp_res->igc_lut_c0c1[disp_num][0];
+ local_cfg.c2_data =
+ &mdss_pp_res->igc_lut_c2[disp_num][0];
+ if (mdata->has_no_lut_read)
+ pp_read_igc_lut_cached(&local_cfg);
+ else {
+ if (pp_ops[IGC].pp_get_config) {
+ config->block = dspp_num;
+ pp_ops[IGC].pp_get_config(igc_addr, config,
+ DSPP, disp_num);
+ goto clock_off;
+ } else {
+ pp_read_igc_lut(&local_cfg, igc_addr,
+ dspp_num, mdata->ndspp);
+ }
+ }
+ if (copy_to_user(config->c0_c1_data, local_cfg.c0_c1_data,
+ config->len * sizeof(u32))) {
+ ret = -EFAULT;
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ goto igc_config_exit;
+ }
+ if (copy_to_user(config->c2_data, local_cfg.c2_data,
+ config->len * sizeof(u32))) {
+ ret = -EFAULT;
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ goto igc_config_exit;
+ }
+ *copyback = 1;
+clock_off:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ } else {
+ if (pp_ops[IGC].pp_set_config) {
+ res_cache.block = DSPP;
+ res_cache.mdss_pp_res = mdss_pp_res;
+ res_cache.pipe_res = NULL;
+ ret = pp_igc_lut_cache_params(config,
+ &res_cache, copy_from_kernel);
+ if (ret) {
+ pr_err("igc caching failed ret %d", ret);
+ goto igc_config_exit;
+ } else
+ goto igc_set_dirty;
+ }
+ if (config->len != IGC_LUT_ENTRIES) {
+ pr_err("invalid len for IGC table for write %d\n",
+ config->len);
+ return -EINVAL;
+ }
+ if (copy_from_kernel) {
+ memcpy(&mdss_pp_res->igc_lut_c0c1[disp_num][0],
+ config->c0_c1_data, config->len * sizeof(u32));
+ memcpy(&mdss_pp_res->igc_lut_c2[disp_num][0],
+ config->c2_data, config->len * sizeof(u32));
+ } else {
+ if (copy_from_user(
+ &mdss_pp_res->igc_lut_c0c1[disp_num][0],
+ config->c0_c1_data,
+ config->len * sizeof(u32))) {
+ ret = -EFAULT;
+ goto igc_config_exit;
+ }
+ if (copy_from_user(
+ &mdss_pp_res->igc_lut_c2[disp_num][0],
+ config->c2_data, config->len * sizeof(u32))) {
+ ret = -EFAULT;
+ goto igc_config_exit;
+ }
+ }
+ mdss_pp_res->igc_disp_cfg[disp_num] = *config;
+ mdss_pp_res->igc_disp_cfg[disp_num].c0_c1_data =
+ &mdss_pp_res->igc_lut_c0c1[disp_num][0];
+ mdss_pp_res->igc_disp_cfg[disp_num].c2_data =
+ &mdss_pp_res->igc_lut_c2[disp_num][0];
+igc_set_dirty:
+ mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_IGC;
+ }
+
+igc_config_exit:
+ mutex_unlock(&mdss_pp_mutex);
+ return ret;
+}
+static void pp_update_gc_one_lut(char __iomem *addr,
+ struct mdp_ar_gc_lut_data *lut_data,
+ uint8_t num_stages)
+{
+ int i, start_idx, idx;
+
+ start_idx = ((readl_relaxed(addr) >> 16) & 0xF) + 1;
+ for (i = start_idx; i < GC_LUT_SEGMENTS; i++) {
+ idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+ writel_relaxed(lut_data[idx].x_start, addr);
+ }
+ for (i = 0; i < start_idx; i++) {
+ idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+ writel_relaxed(lut_data[idx].x_start, addr);
+ }
+ addr += 4;
+ start_idx = ((readl_relaxed(addr) >> 16) & 0xF) + 1;
+ for (i = start_idx; i < GC_LUT_SEGMENTS; i++) {
+ idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+ writel_relaxed(lut_data[idx].slope, addr);
+ }
+ for (i = 0; i < start_idx; i++) {
+ idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+ writel_relaxed(lut_data[idx].slope, addr);
+ }
+ addr += 4;
+ start_idx = ((readl_relaxed(addr) >> 16) & 0xF) + 1;
+ for (i = start_idx; i < GC_LUT_SEGMENTS; i++) {
+ idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+ writel_relaxed(lut_data[idx].offset, addr);
+ }
+ for (i = 0; i < start_idx; i++) {
+ idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+ writel_relaxed(lut_data[idx].offset, addr);
+ }
+}
+static void pp_update_argc_lut(char __iomem *addr,
+ struct mdp_pgc_lut_data *config)
+{
+ pp_update_gc_one_lut(addr, config->r_data, config->num_r_stages);
+ addr += 0x10;
+ pp_update_gc_one_lut(addr, config->g_data, config->num_g_stages);
+ addr += 0x10;
+ pp_update_gc_one_lut(addr, config->b_data, config->num_b_stages);
+}
+static void pp_read_gc_one_lut(char __iomem *addr,
+ struct mdp_ar_gc_lut_data *gc_data)
+{
+ int i, start_idx, data;
+
+ data = readl_relaxed(addr);
+ start_idx = (data >> 16) & 0xF;
+ gc_data[start_idx].x_start = data & 0xFFF;
+
+ for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
+ data = readl_relaxed(addr);
+ gc_data[i].x_start = data & 0xFFF;
+ }
+ for (i = 0; i < start_idx; i++) {
+ data = readl_relaxed(addr);
+ gc_data[i].x_start = data & 0xFFF;
+ }
+
+ addr += 4;
+ data = readl_relaxed(addr);
+ start_idx = (data >> 16) & 0xF;
+ gc_data[start_idx].slope = data & 0x7FFF;
+ for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
+ data = readl_relaxed(addr);
+ gc_data[i].slope = data & 0x7FFF;
+ }
+ for (i = 0; i < start_idx; i++) {
+ data = readl_relaxed(addr);
+ gc_data[i].slope = data & 0x7FFF;
+ }
+ addr += 4;
+ data = readl_relaxed(addr);
+ start_idx = (data >> 16) & 0xF;
+ gc_data[start_idx].offset = data & 0x7FFF;
+ for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
+ data = readl_relaxed(addr);
+ gc_data[i].offset = data & 0x7FFF;
+ }
+ for (i = 0; i < start_idx; i++) {
+ data = readl_relaxed(addr);
+ gc_data[i].offset = data & 0x7FFF;
+ }
+}
+
+static int pp_read_argc_lut(struct mdp_pgc_lut_data *config, char __iomem *addr)
+{
+ int ret = 0;
+
+ pp_read_gc_one_lut(addr, config->r_data);
+ addr += 0x10;
+ pp_read_gc_one_lut(addr, config->g_data);
+ addr += 0x10;
+ pp_read_gc_one_lut(addr, config->b_data);
+ return ret;
+}
+
+static int pp_read_argc_lut_cached(struct mdp_pgc_lut_data *config)
+{
+ int i;
+ u32 disp_num;
+ struct mdp_pgc_lut_data *pgc_ptr;
+
+ disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
+ switch (PP_LOCAT(config->block)) {
+ case MDSS_PP_LM_CFG:
+ pgc_ptr = &mdss_pp_res->argc_disp_cfg[disp_num];
+ break;
+ case MDSS_PP_DSPP_CFG:
+ pgc_ptr = &mdss_pp_res->pgc_disp_cfg[disp_num];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < GC_LUT_SEGMENTS; i++) {
+ config->r_data[i].x_start = pgc_ptr->r_data[i].x_start;
+ config->r_data[i].slope = pgc_ptr->r_data[i].slope;
+ config->r_data[i].offset = pgc_ptr->r_data[i].offset;
+
+ config->g_data[i].x_start = pgc_ptr->g_data[i].x_start;
+ config->g_data[i].slope = pgc_ptr->g_data[i].slope;
+ config->g_data[i].offset = pgc_ptr->g_data[i].offset;
+
+ config->b_data[i].x_start = pgc_ptr->b_data[i].x_start;
+ config->b_data[i].slope = pgc_ptr->b_data[i].slope;
+ config->b_data[i].offset = pgc_ptr->b_data[i].offset;
+ }
+
+ return 0;
+}
+
+/* Note: Assumes that its inputs have been checked by calling function */
+static void pp_update_hist_lut(char __iomem *addr,
+ struct mdp_hist_lut_data *cfg)
+{
+ int i;
+
+ for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
+ writel_relaxed(cfg->data[i], addr);
+ /* swap */
+ if (PP_LOCAT(cfg->block) == MDSS_PP_DSPP_CFG)
+ writel_relaxed(1, addr + 4);
+ else
+ writel_relaxed(1, addr + 16);
+}
+
+int mdss_mdp_argc_config(struct msm_fb_data_type *mfd,
+ struct mdp_pgc_lut_data *config,
+ u32 *copyback)
+{
+ int ret = 0;
+ u32 disp_num, num = 0, is_lm = 0;
+ struct mdp_pgc_lut_data local_cfg;
+ struct mdp_pgc_lut_data *pgc_ptr;
+ u32 tbl_size, r_size, g_size, b_size;
+ char __iomem *argc_addr = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_ctl *ctl = NULL;
+ u32 dirty_flag = 0;
+
+ if ((PP_BLOCK(config->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(config->block) >= MDP_BLOCK_MAX)) {
+ pr_err("invalid block value %d\n", PP_BLOCK(config->block));
+ return -EINVAL;
+ }
+
+ if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+ pr_warn("Can't set both split bits\n");
+ return -EINVAL;
+ }
+
+ if ((PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0) !=
+ mfd->index) {
+ pr_err("PP block %d does not match corresponding mfd index %d\n",
+ config->block, mfd->index);
+ return -EINVAL;
+ }
+
+ disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
+ ctl = mfd_to_ctl(mfd);
+ num = (ctl && ctl->mixer_left) ? ctl->mixer_left->num : -1;
+ if (num < 0) {
+ pr_err("invalid mfd index %d config\n",
+ mfd->index);
+ return -EPERM;
+ }
+ switch (PP_LOCAT(config->block)) {
+ case MDSS_PP_LM_CFG:
+ /*
+ * LM GC LUT should be disabled before being rewritten. Skip
+ * GC LUT config if it is already enabled.
+ */
+ if ((mdss_pp_res->pp_disp_sts[disp_num].argc_sts &
+ PP_STS_ENABLE) &&
+ !(config->flags & MDP_PP_OPS_DISABLE)) {
+ pr_err("LM GC already enabled disp %d, skipping config\n",
+ mfd->index);
+ return -EPERM;
+ }
+ argc_addr = mdss_mdp_get_mixer_addr_off(num) +
+ MDSS_MDP_REG_LM_GC_LUT_BASE;
+ pgc_ptr = &mdss_pp_res->argc_disp_cfg[disp_num];
+ dirty_flag = PP_FLAGS_DIRTY_ARGC;
+ break;
+ case MDSS_PP_DSPP_CFG:
+ if (!mdss_mdp_mfd_valid_dspp(mfd)) {
+ pr_err("invalid mfd index %d for dspp config\n",
+ mfd->index);
+ return -EPERM;
+ }
+ argc_addr = mdss_mdp_get_dspp_addr_off(num) +
+ MDSS_MDP_REG_DSPP_GC_BASE;
+ pgc_ptr = &mdss_pp_res->pgc_disp_cfg[disp_num];
+ dirty_flag = PP_FLAGS_DIRTY_PGC;
+ break;
+ default:
+ goto argc_config_exit;
+ }
+
+ mutex_lock(&mdss_pp_mutex);
+
+ tbl_size = GC_LUT_SEGMENTS * sizeof(struct mdp_ar_gc_lut_data);
+ if (config->flags & MDP_PP_OPS_READ) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if (pp_ops[GC].pp_get_config) {
+ char __iomem *temp_addr = NULL;
+ u32 off = 0;
+
+ is_lm = (PP_LOCAT(config->block) == MDSS_PP_LM_CFG);
+ off = (is_lm) ? mdata->pp_block_off.lm_pgc_off :
+ mdata->pp_block_off.dspp_pgc_off;
+ if (off == U32_MAX) {
+ pr_err("invalid offset for loc %d off %d\n",
+ PP_LOCAT(config->block), U32_MAX);
+ ret = -EINVAL;
+ goto clock_off;
+ }
+ temp_addr = (is_lm) ?
+ mdss_mdp_get_mixer_addr_off(num) :
+ mdss_mdp_get_dspp_addr_off(num);
+ if (IS_ERR_OR_NULL(temp_addr)) {
+ pr_err("invalid addr is_lm %d\n", is_lm);
+ ret = -EINVAL;
+ goto clock_off;
+ }
+ temp_addr += off;
+ ret = pp_ops[GC].pp_get_config(temp_addr, config,
+ ((is_lm) ? LM : DSPP), disp_num);
+ if (ret)
+ pr_err("gc get config failed %d\n", ret);
+ goto clock_off;
+ }
+ local_cfg = *config;
+ local_cfg.r_data =
+ &mdss_pp_res->gc_lut_r[disp_num][0];
+ local_cfg.g_data =
+ &mdss_pp_res->gc_lut_g[disp_num][0];
+ local_cfg.b_data =
+ &mdss_pp_res->gc_lut_b[disp_num][0];
+ if (mdata->has_no_lut_read)
+ pp_read_argc_lut_cached(&local_cfg);
+ else
+ pp_read_argc_lut(&local_cfg, argc_addr);
+ if (copy_to_user(config->r_data,
+ &mdss_pp_res->gc_lut_r[disp_num][0], tbl_size)) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ ret = -EFAULT;
+ goto argc_config_exit;
+ }
+ if (copy_to_user(config->g_data,
+ &mdss_pp_res->gc_lut_g[disp_num][0], tbl_size)) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ ret = -EFAULT;
+ goto argc_config_exit;
+ }
+ if (copy_to_user(config->b_data,
+ &mdss_pp_res->gc_lut_b[disp_num][0], tbl_size)) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ ret = -EFAULT;
+ goto argc_config_exit;
+ }
+ *copyback = 1;
+clock_off:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ } else {
+ if (pp_ops[GC].pp_set_config) {
+ pr_debug("version of gc is %d\n", config->version);
+ is_lm = (PP_LOCAT(config->block) == MDSS_PP_LM_CFG);
+ ret = pp_pgc_lut_cache_params(config, mdss_pp_res,
+ ((is_lm) ? LM : DSPP));
+ if (ret) {
+ pr_err("pgc cache params failed, ret %d\n",
+ ret);
+ goto argc_config_exit;
+ }
+ } else {
+ r_size = config->num_r_stages *
+ sizeof(struct mdp_ar_gc_lut_data);
+ g_size = config->num_g_stages *
+ sizeof(struct mdp_ar_gc_lut_data);
+ b_size = config->num_b_stages *
+ sizeof(struct mdp_ar_gc_lut_data);
+ if (r_size > tbl_size ||
+ g_size > tbl_size ||
+ b_size > tbl_size ||
+ r_size == 0 ||
+ g_size == 0 ||
+ b_size == 0) {
+ ret = -EINVAL;
+ pr_warn("%s, number of rgb stages invalid\n",
+ __func__);
+ goto argc_config_exit;
+ }
+ if (copy_from_user(&mdss_pp_res->gc_lut_r[disp_num][0],
+ config->r_data, r_size)) {
+ ret = -EFAULT;
+ goto argc_config_exit;
+ }
+ if (copy_from_user(&mdss_pp_res->gc_lut_g[disp_num][0],
+ config->g_data, g_size)) {
+ ret = -EFAULT;
+ goto argc_config_exit;
+ }
+ if (copy_from_user(&mdss_pp_res->gc_lut_b[disp_num][0],
+ config->b_data, b_size)) {
+ ret = -EFAULT;
+ goto argc_config_exit;
+ }
+
+ *pgc_ptr = *config;
+ pgc_ptr->r_data =
+ &mdss_pp_res->gc_lut_r[disp_num][0];
+ pgc_ptr->g_data =
+ &mdss_pp_res->gc_lut_g[disp_num][0];
+ pgc_ptr->b_data =
+ &mdss_pp_res->gc_lut_b[disp_num][0];
+ }
+ mdss_pp_res->pp_disp_flags[disp_num] |= dirty_flag;
+ }
+argc_config_exit:
+ mutex_unlock(&mdss_pp_mutex);
+ return ret;
+}
+int mdss_mdp_hist_lut_config(struct msm_fb_data_type *mfd,
+ struct mdp_hist_lut_data *config,
+ u32 *copyback)
+{
+ int i, ret = 0;
+ u32 disp_num, dspp_num = 0;
+ char __iomem *hist_addr = NULL, *base_addr = NULL;
+ struct mdp_pp_cache_res res_cache;
+
+ ret = pp_validate_dspp_mfd_block(mfd, PP_BLOCK(config->block));
+ if (ret) {
+ pr_err("Invalid block %d mfd index %d, ret %d\n",
+ PP_BLOCK(config->block),
+ (mfd ? mfd->index : -1), ret);
+ return ret;
+ }
+
+ mutex_lock(&mdss_pp_mutex);
+ disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
+
+ if (config->ops & MDP_PP_OPS_READ) {
+ ret = pp_get_dspp_num(disp_num, &dspp_num);
+ if (ret) {
+ pr_err("%s, no dspp connects to disp %d\n",
+ __func__, disp_num);
+ goto enhist_config_exit;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ base_addr = mdss_mdp_get_dspp_addr_off(dspp_num);
+ if (IS_ERR_OR_NULL(base_addr)) {
+ pr_err("invalid base addr %pK\n",
+ base_addr);
+ ret = -EINVAL;
+ goto hist_lut_clk_off;
+ }
+ hist_addr = base_addr + MDSS_MDP_REG_DSPP_HIST_LUT_BASE;
+ if (pp_ops[HIST_LUT].pp_get_config) {
+ ret = pp_ops[HIST_LUT].pp_get_config(base_addr, config,
+ DSPP, disp_num);
+ if (ret)
+ pr_err("hist_lut get config failed %d\n", ret);
+ goto hist_lut_clk_off;
+ }
+
+ for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
+ mdss_pp_res->enhist_lut[disp_num][i] =
+ readl_relaxed(hist_addr);
+ if (copy_to_user(config->data,
+ &mdss_pp_res->enhist_lut[disp_num][0],
+ ENHIST_LUT_ENTRIES * sizeof(u32))) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ ret = -EFAULT;
+ goto enhist_config_exit;
+ }
+ *copyback = 1;
+hist_lut_clk_off:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ } else {
+ if (pp_ops[HIST_LUT].pp_set_config) {
+ res_cache.block = DSPP;
+ res_cache.mdss_pp_res = mdss_pp_res;
+ res_cache.pipe_res = NULL;
+ ret = pp_hist_lut_cache_params(config, &res_cache);
+ if (ret) {
+ pr_err("hist_lut config failed version %d ret %d\n",
+ config->version, ret);
+ ret = -EFAULT;
+ goto enhist_config_exit;
+ } else {
+ goto enhist_set_dirty;
+ }
+ }
+ if (copy_from_user(&mdss_pp_res->enhist_lut[disp_num][0],
+ config->data, ENHIST_LUT_ENTRIES * sizeof(u32))) {
+ ret = -EFAULT;
+ goto enhist_config_exit;
+ }
+ mdss_pp_res->enhist_disp_cfg[disp_num] = *config;
+ mdss_pp_res->enhist_disp_cfg[disp_num].data =
+ &mdss_pp_res->enhist_lut[disp_num][0];
+enhist_set_dirty:
+ mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_ENHIST;
+ }
+enhist_config_exit:
+ mutex_unlock(&mdss_pp_mutex);
+ return ret;
+}
+
+static int mdss_mdp_panel_default_dither_config(struct msm_fb_data_type *mfd,
+ u32 panel_bpp, bool enable)
+{
+ int ret = 0;
+ struct mdp_dither_cfg_data dither;
+ struct mdp_pp_feature_version dither_version = {
+ .pp_feature = DITHER,
+ };
+ struct mdp_dither_data_v1_7 dither_data;
+
+ if (!mdss_mdp_mfd_valid_dspp(mfd)) {
+ pr_debug("dither config not supported on display num %d\n",
+ mfd->index);
+ return 0;
+ }
+
+ dither.block = mfd->index + MDP_LOGICAL_BLOCK_DISP_0;
+ dither.flags = MDP_PP_OPS_DISABLE;
+
+ ret = mdss_mdp_pp_get_version(&dither_version);
+ if (ret) {
+ pr_err("failed to get default dither version, ret %d\n",
+ ret);
+ return ret;
+ }
+ dither.version = dither_version.version_info;
+ dither.cfg_payload = NULL;
+
+ if (enable) {
+ switch (panel_bpp) {
+ case 24:
+ dither.flags = MDP_PP_OPS_ENABLE | MDP_PP_OPS_WRITE;
+ switch (dither.version) {
+ case mdp_dither_v1_7:
+ dither_data.g_y_depth = 8;
+ dither_data.r_cr_depth = 8;
+ dither_data.b_cb_depth = 8;
+ /*
+ * Use default dither table by setting len to 0
+ */
+ dither_data.len = 0;
+ dither.cfg_payload = &dither_data;
+ break;
+ case mdp_pp_legacy:
+ default:
+ dither.g_y_depth = 8;
+ dither.r_cr_depth = 8;
+ dither.b_cb_depth = 8;
+ dither.cfg_payload = NULL;
+ break;
+ }
+ break;
+ case 18:
+ dither.flags = MDP_PP_OPS_ENABLE | MDP_PP_OPS_WRITE;
+ switch (dither.version) {
+ case mdp_dither_v1_7:
+ dither_data.g_y_depth = 6;
+ dither_data.r_cr_depth = 6;
+ dither_data.b_cb_depth = 6;
+ /*
+ * Use default dither table by setting len to 0
+ */
+ dither_data.len = 0;
+ dither.cfg_payload = &dither_data;
+ break;
+ case mdp_pp_legacy:
+ default:
+ dither.g_y_depth = 6;
+ dither.r_cr_depth = 6;
+ dither.b_cb_depth = 6;
+ dither.cfg_payload = NULL;
+ break;
+ }
+ break;
+ default:
+ dither.cfg_payload = NULL;
+ break;
+ }
+ }
+ ret = mdss_mdp_dither_config(mfd, &dither, NULL, true);
+ if (ret)
+ pr_err("dither config failed, ret %d\n", ret);
+
+ return ret;
+}
+
+int mdss_mdp_dither_config(struct msm_fb_data_type *mfd,
+ struct mdp_dither_cfg_data *config,
+ u32 *copyback,
+ int copy_from_kernel)
+{
+ u32 disp_num;
+ int ret = 0;
+
+ ret = pp_validate_dspp_mfd_block(mfd, config->block);
+ if (ret) {
+ pr_err("Invalid block %d mfd index %d, ret %d\n",
+ config->block,
+ (mfd ? mfd->index : -1), ret);
+ return ret;
+ }
+
+ if (config->flags & MDP_PP_OPS_READ) {
+ pr_err("Dither read is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+ pr_warn("Can't set both split bits\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mdss_pp_mutex);
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+ if (pp_ops[DITHER].pp_set_config) {
+ pr_debug("version of dither is %d\n", config->version);
+ ret = pp_dither_cache_params(config, mdss_pp_res,
+ copy_from_kernel);
+ if (ret) {
+ pr_err("dither config failed version %d ret %d\n",
+ config->version, ret);
+ goto dither_config_exit;
+ } else {
+ goto dither_set_dirty;
+ }
+ }
+
+ mdss_pp_res->dither_disp_cfg[disp_num] = *config;
+dither_set_dirty:
+ mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_DITHER;
+dither_config_exit:
+ mutex_unlock(&mdss_pp_mutex);
+ return ret;
+}
+
+static int pp_gm_has_invalid_lut_size(struct mdp_gamut_cfg_data *config)
+{
+ if (config->tbl_size[0] != GAMUT_T0_SIZE)
+ return -EINVAL;
+ if (config->tbl_size[1] != GAMUT_T1_SIZE)
+ return -EINVAL;
+ if (config->tbl_size[2] != GAMUT_T2_SIZE)
+ return -EINVAL;
+ if (config->tbl_size[3] != GAMUT_T3_SIZE)
+ return -EINVAL;
+ if (config->tbl_size[4] != GAMUT_T4_SIZE)
+ return -EINVAL;
+ if (config->tbl_size[5] != GAMUT_T5_SIZE)
+ return -EINVAL;
+ if (config->tbl_size[6] != GAMUT_T6_SIZE)
+ return -EINVAL;
+ if (config->tbl_size[7] != GAMUT_T7_SIZE)
+ return -EINVAL;
+ return 0;
+}
+
+
+int mdss_mdp_gamut_config(struct msm_fb_data_type *mfd,
+ struct mdp_gamut_cfg_data *config,
+ u32 *copyback)
+{
+ int i, j, ret = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 disp_num, dspp_num = 0;
+ uint16_t *tbl_off;
+ struct mdp_gamut_cfg_data local_cfg;
+ uint16_t *r_tbl[MDP_GAMUT_TABLE_NUM];
+ uint16_t *g_tbl[MDP_GAMUT_TABLE_NUM];
+ uint16_t *b_tbl[MDP_GAMUT_TABLE_NUM];
+ char __iomem *addr;
+ u32 data = (3 << 20);
+
+ ret = pp_validate_dspp_mfd_block(mfd, config->block);
+ if (ret) {
+ pr_err("Invalid block %d mfd index %d, ret %d\n",
+ config->block,
+ (mfd ? mfd->index : -1), ret);
+ return ret;
+ }
+
+ if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+ pr_warn("Can't set both split bits\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mdss_pp_mutex);
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+ if (config->flags & MDP_PP_OPS_READ) {
+ ret = pp_get_dspp_num(disp_num, &dspp_num);
+ if (ret) {
+ pr_err("%s, no dspp connects to disp %d\n",
+ __func__, disp_num);
+ goto gamut_config_exit;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if (pp_ops[GAMUT].pp_get_config) {
+ addr = mdss_mdp_get_dspp_addr_off(disp_num);
+ if (IS_ERR_OR_NULL(addr)) {
+ pr_err("invalid dspp base addr %pK\n",
+ addr);
+ ret = -EINVAL;
+ goto gamut_clk_off;
+ }
+ if (mdata->pp_block_off.dspp_gamut_off == U32_MAX) {
+ pr_err("invalid gamut parmas off %d\n",
+ mdata->pp_block_off.dspp_gamut_off);
+ ret = -EINVAL;
+ goto gamut_clk_off;
+ }
+ addr += mdata->pp_block_off.dspp_gamut_off;
+ ret = pp_ops[GAMUT].pp_get_config(addr, config, DSPP,
+ disp_num);
+ if (ret)
+ pr_err("gamut get config failed %d\n", ret);
+ goto gamut_clk_off;
+ }
+ if (pp_gm_has_invalid_lut_size(config)) {
+ pr_err("invalid lut size for gamut\n");
+ ret = -EINVAL;
+ goto gamut_clk_off;
+ }
+ addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
+ MDSS_MDP_REG_DSPP_GAMUT_BASE;
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ r_tbl[i] = kzalloc(
+ sizeof(uint16_t) * config->tbl_size[i],
+ GFP_KERNEL);
+ if (!r_tbl[i]) {
+ pr_err("%s: alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto gamut_clk_off;
+ }
+ /* Reset gamut LUT index to 0 */
+ writel_relaxed(data, addr);
+ for (j = 0; j < config->tbl_size[i]; j++)
+ r_tbl[i][j] = readl_relaxed(addr) & 0x1FFF;
+ addr += 4;
+ ret = copy_to_user(config->r_tbl[i], r_tbl[i],
+ sizeof(uint16_t) * config->tbl_size[i]);
+ kfree(r_tbl[i]);
+ if (ret) {
+ pr_err("%s: copy tbl to usr failed\n",
+ __func__);
+ ret = -EFAULT;
+ goto gamut_clk_off;
+ }
+ }
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ g_tbl[i] = kzalloc(
+ sizeof(uint16_t) * config->tbl_size[i],
+ GFP_KERNEL);
+ if (!g_tbl[i]) {
+ pr_err("%s: alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto gamut_clk_off;
+ }
+ /* Reset gamut LUT index to 0 */
+ writel_relaxed(data, addr);
+ for (j = 0; j < config->tbl_size[i]; j++)
+ g_tbl[i][j] = readl_relaxed(addr) & 0x1FFF;
+ addr += 4;
+ ret = copy_to_user(config->g_tbl[i], g_tbl[i],
+ sizeof(uint16_t) * config->tbl_size[i]);
+ kfree(g_tbl[i]);
+ if (ret) {
+ pr_err("%s: copy tbl to usr failed\n",
+ __func__);
+ ret = -EFAULT;
+ goto gamut_clk_off;
+ }
+ }
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ b_tbl[i] = kzalloc(
+ sizeof(uint16_t) * config->tbl_size[i],
+ GFP_KERNEL);
+ if (!b_tbl[i]) {
+ pr_err("%s: alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto gamut_clk_off;
+ }
+ /* Reset gamut LUT index to 0 */
+ writel_relaxed(data, addr);
+ for (j = 0; j < config->tbl_size[i]; j++)
+ b_tbl[i][j] = readl_relaxed(addr) & 0x1FFF;
+ addr += 4;
+ ret = copy_to_user(config->b_tbl[i], b_tbl[i],
+ sizeof(uint16_t) * config->tbl_size[i]);
+ kfree(b_tbl[i]);
+ if (ret) {
+ pr_err("%s: copy tbl to usr failed\n",
+ __func__);
+ ret = -EFAULT;
+ goto gamut_clk_off;
+ }
+ }
+ *copyback = 1;
+gamut_clk_off:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ } else {
+ if (pp_ops[GAMUT].pp_set_config) {
+ pr_debug("version of gamut is %d\n", config->version);
+ ret = pp_gamut_cache_params(config, mdss_pp_res);
+ if (ret) {
+ pr_err("gamut config failed version %d ret %d\n",
+ config->version, ret);
+ ret = -EFAULT;
+ goto gamut_config_exit;
+ } else {
+ goto gamut_set_dirty;
+ }
+ }
+ if (pp_gm_has_invalid_lut_size(config)) {
+ pr_err("invalid lut size for gamut\n");
+ ret = -EINVAL;
+ goto gamut_config_exit;
+ }
+ local_cfg = *config;
+ tbl_off = mdss_pp_res->gamut_tbl[disp_num];
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ local_cfg.r_tbl[i] = tbl_off;
+ if (copy_from_user(tbl_off, config->r_tbl[i],
+ config->tbl_size[i] * sizeof(uint16_t))) {
+ ret = -EFAULT;
+ goto gamut_config_exit;
+ }
+ tbl_off += local_cfg.tbl_size[i];
+ }
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ local_cfg.g_tbl[i] = tbl_off;
+ if (copy_from_user(tbl_off, config->g_tbl[i],
+ config->tbl_size[i] * sizeof(uint16_t))) {
+ ret = -EFAULT;
+ goto gamut_config_exit;
+ }
+ tbl_off += local_cfg.tbl_size[i];
+ }
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ local_cfg.b_tbl[i] = tbl_off;
+ if (copy_from_user(tbl_off, config->b_tbl[i],
+ config->tbl_size[i] * sizeof(uint16_t))) {
+ ret = -EFAULT;
+ goto gamut_config_exit;
+ }
+ tbl_off += local_cfg.tbl_size[i];
+ }
+ mdss_pp_res->gamut_disp_cfg[disp_num] = local_cfg;
+gamut_set_dirty:
+ mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_GAMUT;
+ }
+gamut_config_exit:
+ mutex_unlock(&mdss_pp_mutex);
+ return ret;
+}
+
+static u32 pp_hist_read(char __iomem *v_addr,
+ struct pp_hist_col_info *hist_info)
+{
+ int i, i_start;
+ u32 sum = 0;
+ u32 data;
+
+ data = readl_relaxed(v_addr);
+ i_start = data >> 24;
+ hist_info->data[i_start] = data & 0xFFFFFF;
+ sum += hist_info->data[i_start];
+ for (i = i_start + 1; i < HIST_V_SIZE; i++) {
+ hist_info->data[i] = readl_relaxed(v_addr) & 0xFFFFFF;
+ sum += hist_info->data[i];
+ }
+ for (i = 0; i < i_start; i++) {
+ hist_info->data[i] = readl_relaxed(v_addr) & 0xFFFFFF;
+ sum += hist_info->data[i];
+ }
+ hist_info->hist_cnt_read++;
+ return sum;
+}
+
+/* Assumes that relevant clocks are enabled */
+static int pp_hist_enable(struct pp_hist_col_info *hist_info,
+ struct mdp_histogram_start_req *req,
+ struct mdss_mdp_ctl *ctl)
+{
+ unsigned long flag;
+ int ret = 0;
+
+ mutex_lock(&hist_info->hist_mutex);
+ /* check if it is idle */
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ if (hist_info->col_en) {
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ pr_err("%s Hist collection has already been enabled %pK\n",
+ __func__, hist_info->base);
+ ret = -EBUSY;
+ goto exit;
+ }
+ hist_info->col_state = HIST_IDLE;
+ hist_info->col_en = true;
+ hist_info->frame_cnt = req->frame_cnt;
+ hist_info->hist_cnt_read = 0;
+ hist_info->hist_cnt_sent = 0;
+ hist_info->hist_cnt_time = 0;
+ if (ctl && ctl->mfd) {
+ hist_info->ctl = ctl;
+ hist_info->disp_num =
+ ctl->mfd->index + MDP_LOGICAL_BLOCK_DISP_0;
+ }
+ /* if hist v2, make sure HW is unlocked */
+ writel_relaxed(0, hist_info->base);
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+exit:
+ mutex_unlock(&hist_info->hist_mutex);
+ return ret;
+}
+
+#define MDSS_MAX_HIST_BIN_SIZE 16777215
+int mdss_mdp_hist_start(struct mdp_histogram_start_req *req)
+{
+ struct pp_hist_col_info *hist_info;
+ int i, ret = 0;
+ u32 disp_num, dspp_num = 0;
+ u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ u32 frame_size, intr_mask = 0;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool sspp_hist_supp = false;
+
+ if (!mdss_is_ready())
+ return -EPROBE_DEFER;
+
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_103) {
+ pr_err("Unsupported mdp rev %d\n", mdata->mdp_rev);
+ return -EOPNOTSUPP;
+ }
+
+ if (pp_driver_ops.is_sspp_hist_supp)
+ sspp_hist_supp = pp_driver_ops.is_sspp_hist_supp();
+
+ if (!sspp_hist_supp &&
+ (PP_LOCAT(req->block) == MDSS_PP_SSPP_CFG)) {
+ pr_warn("No histogram on SSPP\n");
+ ret = -EINVAL;
+ goto hist_exit;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ if (PP_LOCAT(req->block) == MDSS_PP_SSPP_CFG) {
+ i = MDSS_PP_ARG_MASK & req->block;
+ if (!i) {
+ ret = -EINVAL;
+ pr_warn("Must pass pipe arguments, %d\n", i);
+ goto hist_stop_clk;
+ }
+
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, req->block))
+ continue;
+ pipe = __get_hist_pipe(i);
+ if (IS_ERR_OR_NULL(pipe))
+ continue;
+ hist_info = &pipe->pp_res.hist;
+ ret = pp_hist_enable(hist_info, req, NULL);
+ intr_mask = 1 << hist_info->intr_shift;
+ mdss_mdp_hist_intr_req(&mdata->hist_intr, intr_mask,
+ true);
+ mdss_mdp_pipe_unmap(pipe);
+ }
+ } else if (PP_LOCAT(req->block) == MDSS_PP_DSPP_CFG) {
+ if ((PP_BLOCK(req->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(req->block) >= MDP_BLOCK_MAX))
+ goto hist_stop_clk;
+
+ disp_num = PP_BLOCK(req->block) - MDP_LOGICAL_BLOCK_DISP_0;
+ mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+
+ if (!mixer_cnt) {
+ pr_err("%s, no dspp connects to disp %d\n",
+ __func__, disp_num);
+ ret = -EPERM;
+ goto hist_stop_clk;
+ }
+ if (mixer_cnt > mdata->nmixers_intf) {
+ pr_err("%s, Too many dspp connects to disp %d\n",
+ __func__, mixer_cnt);
+ ret = -EPERM;
+ goto hist_stop_clk;
+ }
+
+ ctl = mdata->mixer_intf[mixer_id[0]].ctl;
+ frame_size = (ctl->width * ctl->height);
+
+ if (!frame_size ||
+ ((MDSS_MAX_HIST_BIN_SIZE / frame_size) <
+ req->frame_cnt)) {
+ pr_err("%s, too many frames for given display size, %d\n",
+ __func__, req->frame_cnt);
+ ret = -EINVAL;
+ goto hist_stop_clk;
+ }
+
+ for (i = 0; i < mixer_cnt; i++) {
+ dspp_num = mixer_id[i];
+ if (dspp_num >= mdata->ndspp) {
+ ret = -EINVAL;
+ pr_warn("Invalid dspp num %d\n", dspp_num);
+ goto hist_stop_clk;
+ }
+ hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+ ret = pp_hist_enable(hist_info, req, ctl);
+ if (ret) {
+ pr_err("failed to enable histogram dspp_num %d ret %d\n",
+ dspp_num, ret);
+ goto hist_stop_clk;
+ }
+ intr_mask |= 1 << hist_info->intr_shift;
+ mdss_pp_res->pp_disp_flags[disp_num] |=
+ PP_FLAGS_DIRTY_HIST_COL;
+ }
+ mdss_mdp_hist_intr_req(&mdata->hist_intr, intr_mask,
+ true);
+ }
+hist_stop_clk:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+hist_exit:
+ return ret;
+}
+
+static int pp_hist_disable(struct pp_hist_col_info *hist_info)
+{
+ int ret = 0;
+ unsigned long flag;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 intr_mask = 1;
+
+ mutex_lock(&hist_info->hist_mutex);
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ if (hist_info->col_en == false) {
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ pr_debug("Histogram already disabled (%pK)\n", hist_info->base);
+ ret = -EINVAL;
+ goto exit;
+ }
+ hist_info->col_en = false;
+ hist_info->col_state = HIST_UNKNOWN;
+ hist_info->disp_num = 0;
+ hist_info->ctl = NULL;
+ /* make sure HW is unlocked */
+ writel_relaxed(0, hist_info->base);
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ mdss_mdp_hist_intr_req(&mdata->hist_intr,
+ intr_mask << hist_info->intr_shift, false);
+ ret = 0;
+exit:
+ mutex_unlock(&hist_info->hist_mutex);
+ return ret;
+}
+
+int mdss_mdp_hist_stop(u32 block)
+{
+ int i, ret = 0;
+ u32 disp_num;
+ struct pp_hist_col_info *hist_info;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata)
+ return -EPERM;
+
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_103) {
+ pr_err("Unsupported mdp rev %d\n", mdata->mdp_rev);
+ return -EOPNOTSUPP;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG) {
+ i = MDSS_PP_ARG_MASK & block;
+ if (!i) {
+ pr_warn("Must pass pipe arguments, %d\n", i);
+ goto hist_stop_clk;
+ }
+
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, block))
+ continue;
+ pipe = __get_hist_pipe(i);
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_warn("Invalid Hist pipe (%d)\n", i);
+ continue;
+ }
+ hist_info = &pipe->pp_res.hist;
+ ret = pp_hist_disable(hist_info);
+ mdss_mdp_pipe_unmap(pipe);
+ if (ret)
+ goto hist_stop_clk;
+ }
+ } else if (PP_LOCAT(block) == MDSS_PP_DSPP_CFG) {
+ if ((PP_BLOCK(block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(block) >= MDP_BLOCK_MAX))
+ goto hist_stop_clk;
+
+ disp_num = PP_BLOCK(block);
+ for (i = 0; i < mdata->ndspp; i++) {
+ hist_info = &mdss_pp_res->dspp_hist[i];
+ if (disp_num != hist_info->disp_num)
+ continue;
+ ret = pp_hist_disable(hist_info);
+ if (ret)
+ goto hist_stop_clk;
+ mdss_pp_res->pp_disp_flags[i] |=
+ PP_FLAGS_DIRTY_HIST_COL;
+ }
+ }
+hist_stop_clk:
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return ret;
+}
+
+/**
+ * mdss_mdp_hist_intr_req() - Request changes the histogram interrupts
+ * @intr: structure containting state of interrupt register
+ * @bits: the bits on interrupt register that should be changed
+ * @en: true if bits should be set, false if bits should be cleared
+ *
+ * Adds or removes the bits from the interrupt request.
+ *
+ * Does not store reference count for each bit. I.e. a bit with multiple
+ * enable requests can be disabled with a single disable request.
+ *
+ * Return: 0 if uneventful, errno on invalid input
+ */
+int mdss_mdp_hist_intr_req(struct mdss_intr *intr, u32 bits, bool en)
+{
+ unsigned long flag;
+ int ret = 0;
+
+ if (!intr) {
+ pr_err("NULL addr passed, %pK\n", intr);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&intr->lock, flag);
+ if (en)
+ intr->req |= bits;
+ else
+ intr->req &= ~bits;
+ spin_unlock_irqrestore(&intr->lock, flag);
+
+ mdss_mdp_hist_intr_setup(intr, MDSS_IRQ_REQ);
+
+ return ret;
+}
+
+
+#define MDSS_INTR_STATE_ACTIVE 1
+#define MDSS_INTR_STATE_NULL 0
+#define MDSS_INTR_STATE_SUSPEND -1
+
+/**
+ * mdss_mdp_hist_intr_setup() - Manage intr and clk depending on requests.
+ * @intr: structure containting state of intr reg
+ * @state: MDSS_IRQ_SUSPEND if suspend is needed,
+ * MDSS_IRQ_RESUME if resume is needed,
+ * MDSS_IRQ_REQ if neither (i.e. requesting an interrupt)
+ *
+ * This function acts as a gatekeeper for the interrupt, making sure that the
+ * MDP clocks are enabled while the interrupts are enabled to prevent
+ * unclocked accesses.
+ *
+ * To reduce code repetition, 4 state transitions have been encoded here. Each
+ * transition updates the interrupt's state structure (mdss_intr) to reflect
+ * the which bits have been requested (intr->req), are currently enabled
+ * (intr->curr), as well as defines which interrupt bits need to be enabled or
+ * disabled ('en' and 'dis' respectively). The 4th state is not explicity
+ * coded in the if/else chain, but is for MDSS_IRQ_REQ's when the interrupt
+ * is in suspend, in which case, the only change required (intr->req being
+ * updated) has already occurred in the calling function.
+ *
+ * To control the clock, which can't be requested while holding the spinlock,
+ * the initial state is compared with the exit state to detect when the
+ * interrupt needs a clock.
+ *
+ * The clock requests surrounding the majority of this function serve to
+ * enable the register writes to change the interrupt register, as well as to
+ * prevent a race condition that could keep the clocks on (due to mdp_clk_cnt
+ * never being decremented below 0) when a enable/disable occurs but the
+ * disable requests the clocks disabled before the enable is able to request
+ * the clocks enabled.
+ *
+ * Return: 0 if uneventful, errno on repeated action or invalid input
+ */
+int mdss_mdp_hist_intr_setup(struct mdss_intr *intr, int type)
+{
+ unsigned long flag;
+ int ret = 0, req_clk = 0;
+ u32 en = 0, dis = 0;
+ u32 diff, init_curr;
+ int init_state;
+
+ if (!intr) {
+ WARN(1, "NULL intr pointer\n");
+ return -EINVAL;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ spin_lock_irqsave(&intr->lock, flag);
+
+ init_state = intr->state;
+ init_curr = intr->curr;
+
+ if (type == MDSS_IRQ_RESUME) {
+ /* resume intrs */
+ if (intr->state == MDSS_INTR_STATE_ACTIVE) {
+ ret = -EPERM;
+ goto exit;
+ }
+ en = intr->req;
+ dis = 0;
+ intr->curr = intr->req;
+ intr->state = intr->curr ?
+ MDSS_INTR_STATE_ACTIVE : MDSS_INTR_STATE_NULL;
+ } else if (type == MDSS_IRQ_SUSPEND) {
+ /* suspend intrs */
+ if (intr->state == MDSS_INTR_STATE_SUSPEND) {
+ ret = -EPERM;
+ goto exit;
+ }
+ en = 0;
+ dis = intr->curr;
+ intr->curr = 0;
+ intr->state = MDSS_INTR_STATE_SUSPEND;
+ } else if (intr->state != MDSS_IRQ_SUSPEND &&
+ type == MDSS_IRQ_REQ) {
+ /* Not resuming/suspending or in suspend state */
+ diff = intr->req ^ intr->curr;
+ en = diff & ~intr->curr;
+ dis = diff & ~intr->req;
+ intr->curr = intr->req;
+ intr->state = intr->curr ?
+ MDSS_INTR_STATE_ACTIVE : MDSS_INTR_STATE_NULL;
+ }
+
+ if (en)
+ mdss_mdp_hist_irq_enable(en);
+ if (dis)
+ mdss_mdp_hist_irq_disable(dis);
+
+ if ((init_state != MDSS_INTR_STATE_ACTIVE) &&
+ (intr->state == MDSS_INTR_STATE_ACTIVE))
+ req_clk = 1;
+ else if ((init_state == MDSS_INTR_STATE_ACTIVE) &&
+ (intr->state != MDSS_INTR_STATE_ACTIVE))
+ req_clk = -1;
+
+exit:
+ spin_unlock_irqrestore(&intr->lock, flag);
+ if (req_clk < 0)
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ else if (req_clk > 0)
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return ret;
+}
+
+static int pp_hist_collect(struct mdp_histogram_data *hist,
+ struct pp_hist_col_info *hist_info,
+ char __iomem *ctl_base, u32 expect_sum,
+ u32 block)
+{
+ int ret = 0;
+ u32 sum;
+ char __iomem *v_base = NULL;
+ unsigned long flag;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata)
+ return -EPERM;
+
+ mutex_lock(&hist_info->hist_mutex);
+ spin_lock_irqsave(&hist_info->hist_lock, flag);
+ if ((hist_info->col_en == 0) ||
+ (hist_info->col_state != HIST_READY)) {
+ pr_err("invalid params for histogram hist_info->col_en %d hist_info->col_state %d",
+ hist_info->col_en, hist_info->col_state);
+ ret = -ENODATA;
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ goto hist_collect_exit;
+ }
+ spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ if (pp_ops[HIST].pp_get_config) {
+ sum = pp_ops[HIST].pp_get_config(ctl_base, hist_info,
+ block, 0);
+ } else {
+ if (block == DSPP)
+ v_base = ctl_base +
+ MDSS_MDP_REG_DSPP_HIST_DATA_BASE;
+ else if (block == SSPP_VIG)
+ v_base = ctl_base +
+ MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+ sum = pp_hist_read(v_base, hist_info);
+ }
+ writel_relaxed(0, hist_info->base);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ if (sum < 0) {
+ pr_err("failed to get the hist data, sum = %d\n", sum);
+ ret = sum;
+ } else if (expect_sum && sum != expect_sum) {
+ pr_err("hist error: bin sum incorrect! (%d/%d)\n",
+ sum, expect_sum);
+ ret = -EINVAL;
+ }
+hist_collect_exit:
+ mutex_unlock(&hist_info->hist_mutex);
+ return ret;
+}
+
+int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
+{
+ int i, j, off, ret = 0, temp_ret = 0;
+ struct pp_hist_col_info *hist_info;
+ struct pp_hist_col_info *hists[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ u32 dspp_num, disp_num;
+ char __iomem *ctl_base;
+ u32 hist_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ u32 *hist_concat = NULL;
+ u32 *hist_data_addr;
+ u32 pipe_cnt = 0;
+ u32 pipe_num = MDSS_MDP_SSPP_VIG0;
+ u32 exp_sum = 0;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ unsigned long flag;
+
+ if (mdata->mdp_rev < MDSS_MDP_HW_REV_103) {
+ pr_err("Unsupported mdp rev %d\n", mdata->mdp_rev);
+ return -EOPNOTSUPP;
+ }
+
+ if (PP_LOCAT(hist->block) == MDSS_PP_DSPP_CFG) {
+ if ((PP_BLOCK(hist->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (PP_BLOCK(hist->block) >= MDP_BLOCK_MAX))
+ return -EINVAL;
+
+ disp_num = PP_BLOCK(hist->block) - MDP_LOGICAL_BLOCK_DISP_0;
+ hist_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+
+ if (!hist_cnt) {
+ pr_err("%s, no dspp connects to disp %d\n",
+ __func__, disp_num);
+ ret = -EPERM;
+ goto hist_collect_exit;
+ }
+ if (hist_cnt > mdata->nmixers_intf) {
+ pr_err("%s, Too many dspp connects to disp %d\n",
+ __func__, hist_cnt);
+ ret = -EPERM;
+ goto hist_collect_exit;
+ }
+
+ for (i = 0; i < hist_cnt; i++) {
+ dspp_num = mixer_id[i];
+ if (dspp_num >= mdata->ndspp) {
+ ret = -EINVAL;
+ pr_warn("Invalid dspp num %d\n", dspp_num);
+ goto hist_collect_exit;
+ }
+ hists[i] = &mdss_pp_res->dspp_hist[dspp_num];
+ }
+ for (i = 0; i < hist_cnt; i++) {
+ dspp_num = mixer_id[i];
+ ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num);
+ exp_sum = (mdata->mixer_intf[dspp_num].width *
+ mdata->mixer_intf[dspp_num].height);
+ if (ret)
+ temp_ret = ret;
+ ret = pp_hist_collect(hist, hists[i], ctl_base,
+ exp_sum, DSPP);
+ if (ret)
+ pr_err("hist error: dspp[%d] collect %d\n",
+ dspp_num, ret);
+ }
+ /* state of dspp histogram blocks attached to logical display
+ * should be changed atomically to idle. This will ensure that
+ * histogram interrupt will see consistent states for all dspp's
+ * attached to logical display.
+ */
+ for (i = 0; i < hist_cnt; i++) {
+ if (!i)
+ spin_lock_irqsave(&hists[i]->hist_lock, flag);
+ else
+ spin_lock(&hists[i]->hist_lock);
+ }
+ for (i = 0; i < hist_cnt; i++)
+ hists[i]->col_state = HIST_IDLE;
+ for (i = hist_cnt - 1; i >= 0; i--) {
+ if (!i)
+ spin_unlock_irqrestore(&hists[i]->hist_lock,
+ flag);
+ else
+ spin_unlock(&hists[i]->hist_lock);
+ }
+ if (ret || temp_ret) {
+ ret = ret ? ret : temp_ret;
+ goto hist_collect_exit;
+ }
+
+ if (hist->bin_cnt != HIST_V_SIZE) {
+ pr_err("User not expecting size %d output\n",
+ HIST_V_SIZE);
+ ret = -EINVAL;
+ goto hist_collect_exit;
+ }
+ if (hist_cnt > 1) {
+ for (i = 1; i < hist_cnt; i++) {
+ mutex_lock(&hists[i]->hist_mutex);
+ for (j = 0; j < HIST_V_SIZE; j++)
+ hists[0]->data[j] += hists[i]->data[j];
+ mutex_unlock(&hists[i]->hist_mutex);
+ }
+ }
+ hist_data_addr = hists[0]->data;
+
+ for (i = 0; i < hist_cnt; i++)
+ hists[i]->hist_cnt_sent++;
+
+ } else if (PP_LOCAT(hist->block) == MDSS_PP_SSPP_CFG) {
+
+ hist_cnt = MDSS_PP_ARG_MASK & hist->block;
+ if (!hist_cnt) {
+ pr_warn("Must pass pipe arguments, %d\n", hist_cnt);
+ goto hist_collect_exit;
+ }
+
+ /* Find the first pipe requested */
+ for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+ if (PP_ARG(i, hist_cnt)) {
+ pipe_num = i;
+ break;
+ }
+ }
+
+ pipe = __get_hist_pipe(pipe_num);
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_warn("Invalid starting hist pipe, %d\n", pipe_num);
+ ret = -ENODEV;
+ goto hist_collect_exit;
+ }
+ hist_info = &pipe->pp_res.hist;
+ mdss_mdp_pipe_unmap(pipe);
+ for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, hist->block))
+ continue;
+ pipe_cnt++;
+ pipe = __get_hist_pipe(i);
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_warn("Invalid Hist pipe (%d)\n", i);
+ continue;
+ }
+ hist_info = &pipe->pp_res.hist;
+ mdss_mdp_pipe_unmap(pipe);
+ }
+ for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, hist->block))
+ continue;
+ pipe_cnt++;
+ pipe = __get_hist_pipe(i);
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_warn("Invalid Hist pipe (%d)\n", i);
+ continue;
+ }
+ hist_info = &pipe->pp_res.hist;
+ ctl_base = pipe->base;
+ if (ret)
+ temp_ret = ret;
+ ret = pp_hist_collect(hist, hist_info, ctl_base,
+ exp_sum, SSPP_VIG);
+ if (ret)
+ pr_debug("hist error: pipe[%d] collect: %d\n",
+ pipe->num, ret);
+
+ mdss_mdp_pipe_unmap(pipe);
+ }
+ for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, hist->block))
+ continue;
+ pipe_cnt++;
+ pipe = __get_hist_pipe(i);
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_warn("Invalid Hist pipe (%d)\n", i);
+ continue;
+ }
+ hist_info = &pipe->pp_res.hist;
+ mdss_mdp_pipe_unmap(pipe);
+ }
+ if (ret || temp_ret) {
+ ret = ret ? ret : temp_ret;
+ goto hist_collect_exit;
+ }
+
+ if (pipe_cnt != 0 &&
+ (hist->bin_cnt != (HIST_V_SIZE * pipe_cnt))) {
+ pr_err("User not expecting size %d output\n",
+ pipe_cnt * HIST_V_SIZE);
+ ret = -EINVAL;
+ goto hist_collect_exit;
+ }
+ if (pipe_cnt > 1) {
+ hist_concat = kzalloc(HIST_V_SIZE * pipe_cnt *
+ sizeof(u32), GFP_KERNEL);
+ if (!hist_concat) {
+ ret = -ENOMEM;
+ goto hist_collect_exit;
+ }
+
+ for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+ if (!PP_ARG(i, hist->block))
+ continue;
+ pipe = __get_hist_pipe(i);
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_warn("Invalid Hist pipe (%d)\n", i);
+ continue;
+ }
+ hist_info = &pipe->pp_res.hist;
+ off = HIST_V_SIZE * i;
+ mutex_lock(&hist_info->hist_mutex);
+ for (j = off; j < off + HIST_V_SIZE; j++)
+ hist_concat[j] =
+ hist_info->data[j - off];
+ hist_info->hist_cnt_sent++;
+ mutex_unlock(&hist_info->hist_mutex);
+ mdss_mdp_pipe_unmap(pipe);
+ }
+
+ hist_data_addr = hist_concat;
+ } else {
+ hist_data_addr = hist_info->data;
+ }
+ } else {
+ pr_info("No Histogram at location %d\n", PP_LOCAT(hist->block));
+ goto hist_collect_exit;
+ }
+ ret = copy_to_user(hist->c0, hist_data_addr, sizeof(u32) *
+ hist->bin_cnt);
+hist_collect_exit:
+ kfree(hist_concat);
+
+ return ret;
+}
+
+static inline struct pp_hist_col_info *get_hist_info_from_isr(u32 *isr)
+{
+ u32 blk_idx;
+ struct pp_hist_col_info *hist_info = NULL;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (*isr & HIST_INTR_DSPP_MASK) {
+ if (*isr & (MDSS_MDP_HIST_INTR_DSPP_0_DONE |
+ MDSS_MDP_HIST_INTR_DSPP_0_RESET_DONE)) {
+ blk_idx = 0;
+ *isr &= ~(MDSS_MDP_HIST_INTR_DSPP_0_DONE |
+ MDSS_MDP_HIST_INTR_DSPP_0_RESET_DONE);
+ } else if (*isr & (MDSS_MDP_HIST_INTR_DSPP_1_DONE |
+ MDSS_MDP_HIST_INTR_DSPP_1_RESET_DONE)) {
+ blk_idx = 1;
+ *isr &= ~(MDSS_MDP_HIST_INTR_DSPP_1_DONE |
+ MDSS_MDP_HIST_INTR_DSPP_1_RESET_DONE);
+ } else if (*isr & (MDSS_MDP_HIST_INTR_DSPP_2_DONE |
+ MDSS_MDP_HIST_INTR_DSPP_2_RESET_DONE)) {
+ blk_idx = 2;
+ *isr &= ~(MDSS_MDP_HIST_INTR_DSPP_2_DONE |
+ MDSS_MDP_HIST_INTR_DSPP_2_RESET_DONE);
+ } else {
+ blk_idx = 3;
+ *isr &= ~(MDSS_MDP_HIST_INTR_DSPP_3_DONE |
+ MDSS_MDP_HIST_INTR_DSPP_3_RESET_DONE);
+ }
+ hist_info = &mdss_pp_res->dspp_hist[blk_idx];
+ } else {
+ if (*isr & (MDSS_MDP_HIST_INTR_VIG_0_DONE |
+ MDSS_MDP_HIST_INTR_VIG_0_RESET_DONE)) {
+ blk_idx = MDSS_MDP_SSPP_VIG0;
+ *isr &= ~(MDSS_MDP_HIST_INTR_VIG_0_DONE |
+ MDSS_MDP_HIST_INTR_VIG_0_RESET_DONE);
+ } else if (*isr & (MDSS_MDP_HIST_INTR_VIG_1_DONE |
+ MDSS_MDP_HIST_INTR_VIG_1_RESET_DONE)) {
+ blk_idx = MDSS_MDP_SSPP_VIG1;
+ *isr &= ~(MDSS_MDP_HIST_INTR_VIG_1_DONE |
+ MDSS_MDP_HIST_INTR_VIG_1_RESET_DONE);
+ } else if (*isr & (MDSS_MDP_HIST_INTR_VIG_2_DONE |
+ MDSS_MDP_HIST_INTR_VIG_2_RESET_DONE)) {
+ blk_idx = MDSS_MDP_SSPP_VIG2;
+ *isr &= ~(MDSS_MDP_HIST_INTR_VIG_2_DONE |
+ MDSS_MDP_HIST_INTR_VIG_2_RESET_DONE);
+ } else {
+ blk_idx = MDSS_MDP_SSPP_VIG3;
+ *isr &= ~(MDSS_MDP_HIST_INTR_VIG_3_DONE |
+ MDSS_MDP_HIST_INTR_VIG_3_RESET_DONE);
+ }
+ pipe = mdss_mdp_pipe_search(mdata, BIT(blk_idx),
+ MDSS_MDP_PIPE_RECT0);
+ if (IS_ERR_OR_NULL(pipe)) {
+ pr_debug("pipe DNE, %d\n", blk_idx);
+ return NULL;
+ }
+ hist_info = &pipe->pp_res.hist;
+ }
+
+ return hist_info;
+}
+
+/**
+ * mdss_mdp_hist_intr_done - Handle histogram interrupts.
+ * @isr: incoming histogram interrupts as bit mask
+ *
+ * This function takes the histogram interrupts received by the
+ * MDP interrupt handler, and handles each of the interrupts by
+ * progressing the histogram state if necessary and then clearing
+ * the interrupt.
+ */
+void mdss_mdp_hist_intr_done(u32 isr)
+{
+ u32 isr_blk, is_hist_done, isr_tmp;
+ struct pp_hist_col_info *hist_info = NULL;
+ u32 isr_mask = HIST_V2_INTR_BIT_MASK;
+ u32 intr_mask = 1, disp_num = 0;
+
+ if (pp_driver_ops.get_hist_isr_info)
+ pp_driver_ops.get_hist_isr_info(&isr_mask);
+
+ isr &= isr_mask;
+ while (isr != 0) {
+ isr_tmp = isr;
+ hist_info = get_hist_info_from_isr(&isr);
+ if (hist_info == NULL) {
+ pr_err("hist interrupt gave incorrect blk_idx\n");
+ continue;
+ }
+ isr_blk = (isr_tmp >> hist_info->intr_shift) & 0x3;
+ is_hist_done = isr_blk & 0x1;
+ spin_lock(&hist_info->hist_lock);
+ if (hist_info && is_hist_done && hist_info->col_en &&
+ hist_info->col_state == HIST_IDLE) {
+ hist_info->col_state = HIST_READY;
+ disp_num = hist_info->disp_num;
+ /* Clear the interrupt until next commit */
+ mdss_mdp_hist_irq_clear_mask(intr_mask <<
+ hist_info->intr_shift);
+ writel_relaxed(1, hist_info->base);
+ spin_unlock(&hist_info->hist_lock);
+ mdss_mdp_hist_intr_notify(disp_num);
+ } else {
+ spin_unlock(&hist_info->hist_lock);
+ }
+ };
+}
+
+static struct msm_fb_data_type *mdss_get_mfd_from_index(int index)
+{
+ struct msm_fb_data_type *out = NULL;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int i;
+
+ for (i = 0; i < mdata->nctl; i++) {
+ ctl = mdata->ctl_off + i;
+ if ((mdss_mdp_ctl_is_power_on(ctl)) && (ctl->mfd)
+ && (ctl->mfd->index == index))
+ out = ctl->mfd;
+ }
+ return out;
+}
+
+static int pp_num_to_side(struct mdss_mdp_ctl *ctl, u32 num)
+{
+ u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ u32 mixer_num;
+
+ if (!ctl || !ctl->mfd)
+ return -EINVAL;
+ mixer_num = mdss_mdp_get_ctl_mixers(ctl->mfd->index, mixer_id);
+ if (mixer_num < 2)
+ return MDSS_SIDE_NONE;
+ else if (mixer_id[1] == num)
+ return MDSS_SIDE_RIGHT;
+ else if (mixer_id[0] == num)
+ return MDSS_SIDE_LEFT;
+
+ pr_err("invalid, not on any side\n");
+ return -EINVAL;
+}
+
+static int mdss_mdp_get_ad(struct msm_fb_data_type *mfd,
+ struct mdss_ad_info **ret_ad)
+{
+ int ret = 0;
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_ctl *ctl = NULL;
+
+ *ret_ad = NULL;
+ if (!mfd) {
+ pr_err("invalid parameter mfd %pK\n", mfd);
+ return -EINVAL;
+ }
+ mdata = mfd_to_mdata(mfd);
+
+ if (mdata->nad_cfgs == 0) {
+ pr_debug("Assertive Display not supported by device\n");
+ return -ENODEV;
+ }
+
+ if (!mdss_mdp_mfd_valid_ad(mfd)) {
+ pr_debug("AD not supported on display num %d hw config\n",
+ mfd->index);
+ return -EPERM;
+ }
+
+ if (mfd->panel_info->type == DTV_PANEL) {
+ pr_debug("AD not supported on external display\n");
+ return -EPERM;
+ }
+
+ ctl = mfd_to_ctl(mfd);
+ if ((ctl) && (ctl->mixer_left))
+ *ret_ad = &mdata->ad_cfgs[ctl->mixer_left->num];
+ else
+ ret = -EPERM;
+
+ return ret;
+}
+
+/* must call this function from within ad->lock */
+static int pp_ad_invalidate_input(struct msm_fb_data_type *mfd)
+{
+ int ret;
+ struct mdss_ad_info *ad;
+
+ if (!mfd) {
+ pr_err("Invalid mfd\n");
+ return -EINVAL;
+ }
+
+ ret = mdss_mdp_get_ad(mfd, &ad);
+ if (ret == -ENODEV || ret == -EPERM) {
+ pr_debug("AD not supported on device, disp num %d\n",
+ mfd->index);
+ return 0;
+ } else if (ret || !ad) {
+ pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+ ret, ad);
+ return ret;
+ }
+ pr_debug("AD backlight level changed (%d), trigger update to AD\n",
+ mfd->ad_bl_level);
+ if (ad->cfg.mode == MDSS_AD_MODE_AUTO_BL) {
+ pr_err("AD auto backlight no longer supported.\n");
+ return -EINVAL;
+ }
+
+ if (ad->state & PP_AD_STATE_RUN) {
+ ad->calc_itr = ad->cfg.stab_itr;
+ ad->sts |= PP_AD_STS_DIRTY_VSYNC;
+ ad->sts |= PP_AD_STS_DIRTY_DATA;
+ }
+
+ return 0;
+}
+
+int mdss_mdp_ad_config(struct msm_fb_data_type *mfd,
+ struct mdss_ad_init_cfg *init_cfg)
+{
+ struct mdss_ad_info *ad;
+ struct msm_fb_data_type *bl_mfd;
+ int lin_ret = -1, inv_ret = -1, att_ret = -1, ret = 0;
+ u32 last_ops;
+ struct mdss_overlay_private *mdp5_data;
+
+ ret = mdss_mdp_get_ad(mfd, &ad);
+ if (ret == -ENODEV || ret == -EPERM) {
+ pr_err("AD not supported on device, disp num %d\n",
+ mfd->index);
+ return ret;
+ } else if (ret || !ad) {
+ pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+ ret, ad);
+ return ret;
+ }
+ if (mfd->panel_info->type == WRITEBACK_PANEL) {
+ bl_mfd = mdss_get_mfd_from_index(0);
+ if (!bl_mfd)
+ return ret;
+ } else {
+ bl_mfd = mfd;
+ }
+
+ if ((init_cfg->ops & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+ pr_warn("Can't set both split bits\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ad->lock);
+ if (init_cfg->ops & MDP_PP_AD_INIT) {
+ memcpy(&ad->init, &init_cfg->params.init,
+ sizeof(struct mdss_ad_init));
+ if (init_cfg->params.init.bl_lin_len == AD_BL_LIN_LEN) {
+ lin_ret = copy_from_user(&ad->bl_lin,
+ init_cfg->params.init.bl_lin,
+ init_cfg->params.init.bl_lin_len *
+ sizeof(uint32_t));
+ inv_ret = copy_from_user(&ad->bl_lin_inv,
+ init_cfg->params.init.bl_lin_inv,
+ init_cfg->params.init.bl_lin_len *
+ sizeof(uint32_t));
+ if (lin_ret || inv_ret)
+ ret = -ENOMEM;
+ } else {
+ ret = -EINVAL;
+ }
+ if (ret) {
+ ad->state &= ~PP_AD_STATE_BL_LIN;
+ goto ad_config_exit;
+ } else
+ ad->state |= PP_AD_STATE_BL_LIN;
+
+ if ((init_cfg->params.init.bl_att_len == AD_BL_ATT_LUT_LEN) &&
+ (init_cfg->params.init.bl_att_lut)) {
+ att_ret = copy_from_user(&ad->bl_att_lut,
+ init_cfg->params.init.bl_att_lut,
+ init_cfg->params.init.bl_att_len *
+ sizeof(uint32_t));
+ if (att_ret)
+ ret = -ENOMEM;
+ } else {
+ ret = -EINVAL;
+ }
+ if (ret) {
+ ad->state &= ~PP_AD_STATE_BL_LIN;
+ goto ad_config_exit;
+ } else
+ ad->state |= PP_AD_STATE_BL_LIN;
+
+ ad->sts |= PP_AD_STS_DIRTY_INIT;
+ } else if (init_cfg->ops & MDP_PP_AD_CFG) {
+ memcpy(&ad->cfg, &init_cfg->params.cfg,
+ sizeof(struct mdss_ad_cfg));
+ if (ad->state & PP_AD_STATE_IPC_RESUME)
+ ad->cfg.mode |= MDSS_AD_MODE_IPC_BIT;
+ ad->cfg.backlight_scale = MDSS_MDP_AD_BL_SCALE;
+ ad->sts |= PP_AD_STS_DIRTY_CFG;
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ if (mdp5_data)
+ mdp5_data->ad_events = 0;
+ }
+
+ last_ops = ad->ops & MDSS_PP_SPLIT_MASK;
+ ad->ops = init_cfg->ops & MDSS_PP_SPLIT_MASK;
+ /*
+ * if there is a change in the split mode config, the init values
+ * need to be re-written to hardware (if they have already been
+ * written or if there is data pending to be written). Check for
+ * pending data (DIRTY_INIT) is not checked here since it will not
+ * affect the outcome of this conditional (i.e. if init hasn't
+ * already been written (*_STATE_INIT is set), this conditional will
+ * only evaluate to true (and set the DIRTY bit) if the DIRTY bit has
+ * already been set).
+ */
+ if ((last_ops ^ ad->ops) && (ad->state & PP_AD_STATE_INIT))
+ ad->sts |= PP_AD_STS_DIRTY_INIT;
+
+
+ if (!ret && (init_cfg->ops & MDP_PP_OPS_DISABLE)) {
+ ad->sts &= ~PP_STS_ENABLE;
+ mutex_unlock(&ad->lock);
+ cancel_work_sync(&ad->calc_work);
+ mutex_lock(&ad->lock);
+ ad->mfd = NULL;
+ ad->bl_mfd = NULL;
+ } else if (!ret && (init_cfg->ops & MDP_PP_OPS_ENABLE)) {
+ ad->sts |= PP_STS_ENABLE;
+ ad->mfd = mfd;
+ ad->bl_mfd = bl_mfd;
+ }
+ad_config_exit:
+ mutex_unlock(&ad->lock);
+ return ret;
+}
+
+int mdss_mdp_ad_input(struct msm_fb_data_type *mfd,
+ struct mdss_ad_input *input, int wait) {
+ int ret = 0;
+ struct mdss_ad_info *ad;
+ u32 bl;
+ struct mdss_overlay_private *mdp5_data;
+
+ ret = mdss_mdp_get_ad(mfd, &ad);
+ if (ret == -ENODEV || ret == -EPERM) {
+ pr_err("AD not supported on device, disp num %d\n",
+ mfd->index);
+ return ret;
+ } else if (ret || !ad) {
+ pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+ ret, ad);
+ return ret;
+ }
+
+ mutex_lock(&ad->lock);
+ if ((!PP_AD_STATE_IS_INITCFG(ad->state) &&
+ !PP_AD_STS_IS_DIRTY(ad->sts)) &&
+ (input->mode != MDSS_AD_MODE_CALIB)) {
+ pr_warn("AD not initialized or configured.\n");
+ ret = -EPERM;
+ goto error;
+ }
+ switch (input->mode) {
+ case MDSS_AD_MODE_AUTO_BL:
+ case MDSS_AD_MODE_AUTO_STR:
+ if (!MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode,
+ MDSS_AD_INPUT_AMBIENT)) {
+ pr_err("Invalid mode %x\n", ad->cfg.mode);
+ ret = -EINVAL;
+ goto error;
+ }
+ if (input->in.amb_light > MDSS_MDP_MAX_AD_AL) {
+ pr_warn("invalid input ambient light\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ ad->ad_data_mode = MDSS_AD_INPUT_AMBIENT;
+ pr_debug("ambient = %d\n", input->in.amb_light);
+ ad->ad_data = input->in.amb_light;
+ ad->calc_itr = ad->cfg.stab_itr;
+ ad->sts |= PP_AD_STS_DIRTY_VSYNC;
+ ad->sts |= PP_AD_STS_DIRTY_DATA;
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ if (mdp5_data)
+ mdp5_data->ad_events = 0;
+ break;
+ case MDSS_AD_MODE_TARG_STR:
+ case MDSS_AD_MODE_MAN_STR:
+ if (!MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode,
+ MDSS_AD_INPUT_STRENGTH)) {
+ pr_err("Invalid mode %x\n", ad->cfg.mode);
+ ret = -EINVAL;
+ goto error;
+ }
+ if (input->in.strength > MDSS_MDP_MAX_AD_STR) {
+ pr_warn("invalid input strength\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ ad->ad_data_mode = MDSS_AD_INPUT_STRENGTH;
+ pr_debug("strength = %d\n", input->in.strength);
+ ad->ad_data = input->in.strength;
+ ad->calc_itr = ad->cfg.stab_itr;
+ ad->sts |= PP_AD_STS_DIRTY_VSYNC;
+ ad->sts |= PP_AD_STS_DIRTY_DATA;
+ break;
+ case MDSS_AD_MODE_CALIB:
+ wait = 0;
+ if (mfd->calib_mode) {
+ bl = input->in.calib_bl;
+ if (bl >= AD_BL_LIN_LEN) {
+ pr_warn("calib_bl 255 max!\n");
+ break;
+ }
+ mutex_unlock(&ad->lock);
+ mutex_lock(&mfd->bl_lock);
+ MDSS_BRIGHT_TO_BL(bl, bl, mfd->panel_info->bl_max,
+ mfd->panel_info->brightness_max);
+ mdss_fb_set_backlight(mfd, bl);
+ mutex_unlock(&mfd->bl_lock);
+ mutex_lock(&ad->lock);
+ mfd->calib_mode_bl = bl;
+ } else {
+ pr_warn("should be in calib mode\n");
+ }
+ break;
+ default:
+ pr_warn("invalid default %d\n", input->mode);
+ ret = -EINVAL;
+ goto error;
+ }
+error:
+ mutex_unlock(&ad->lock);
+ return ret;
+}
+
+static void pp_ad_input_write(struct mdss_mdp_ad *ad_hw,
+ struct mdss_ad_info *ad)
+{
+ char __iomem *base;
+
+ base = ad_hw->base;
+ switch (ad->cfg.mode) {
+ case MDSS_AD_MODE_AUTO_BL:
+ writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_AL);
+ break;
+ case MDSS_AD_MODE_AUTO_STR:
+ pr_debug("bl_data = %d, ad_data = %d\n", ad->bl_data,
+ ad->ad_data);
+ ad->last_ad_data = ad->ad_data;
+ ad->last_ad_data_valid = true;
+ writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
+ writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_AL);
+ break;
+ case MDSS_AD_MODE_TARG_STR:
+ writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
+ writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_TARG_STR);
+ break;
+ case MDSS_AD_MODE_MAN_STR:
+ writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
+ writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_STR_MAN);
+ break;
+ case MDSS_AD_MODE_MAN_IPC:
+ if (!ad->last_ad_data_valid) {
+ ad->last_ad_data = ad->ad_data;
+ ad->last_ad_data_valid = true;
+ }
+ pr_debug("bl_data = %d, last_ad_data = %d, last_str = %d\n",
+ ad->bl_data, ad->last_ad_data, ad->last_str);
+ writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
+ writel_relaxed(ad->last_ad_data, base + MDSS_MDP_REG_AD_AL);
+ writel_relaxed(ad->last_str, base + MDSS_MDP_REG_AD_STR_MAN);
+ break;
+ default:
+ pr_warn("Invalid mode! %d\n", ad->cfg.mode);
+ break;
+ }
+}
+
+#define MDSS_AD_MERGED_WIDTH 4
+static void pp_ad_init_write(struct mdss_mdp_ad *ad_hw, struct mdss_ad_info *ad,
+ struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_data_type *mdata = ctl->mdata;
+ u32 temp, cfg_buf_mode;
+ u32 frame_start, frame_end, procs_start, procs_end, tile_ctrl;
+ u32 num;
+ int side;
+ char __iomem *base;
+ bool is_calc, is_dual_pipe, split_mode;
+ u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ u32 mixer_num;
+
+ mixer_num = mdss_mdp_get_ctl_mixers(ctl->mfd->index, mixer_id);
+ if (mixer_num > 1)
+ is_dual_pipe = true;
+ else
+ is_dual_pipe = false;
+
+ base = ad_hw->base;
+ is_calc = ad->calc_hw_num == ad_hw->num;
+ split_mode = !!(ad->ops & MDSS_PP_SPLIT_MASK);
+
+ writel_relaxed(ad->init.i_control[0] & 0x1F,
+ base + MDSS_MDP_REG_AD_CON_CTRL_0);
+ writel_relaxed(ad->init.i_control[1] << 8,
+ base + MDSS_MDP_REG_AD_CON_CTRL_1);
+
+ temp = ad->init.white_lvl << 16;
+ temp |= ad->init.black_lvl & 0xFFFF;
+ writel_relaxed(temp, base + MDSS_MDP_REG_AD_BW_LVL);
+
+ writel_relaxed(ad->init.var, base + MDSS_MDP_REG_AD_VAR);
+
+ writel_relaxed(ad->init.limit_ampl, base + MDSS_MDP_REG_AD_AMP_LIM);
+
+ writel_relaxed(ad->init.i_dither, base + MDSS_MDP_REG_AD_DITH);
+
+ temp = ad->init.slope_max << 8;
+ temp |= ad->init.slope_min & 0xFF;
+ writel_relaxed(temp, base + MDSS_MDP_REG_AD_SLOPE);
+
+ writel_relaxed(ad->init.dither_ctl, base + MDSS_MDP_REG_AD_DITH_CTRL);
+
+ writel_relaxed(ad->init.format, base + MDSS_MDP_REG_AD_CTRL_0);
+ writel_relaxed(ad->init.auto_size, base + MDSS_MDP_REG_AD_CTRL_1);
+
+ if (split_mode)
+ temp = mdata->mixer_intf[ad_hw->num].width << 16;
+ else
+ temp = ad->init.frame_w << 16;
+ temp |= ad->init.frame_h & 0xFFFF;
+ writel_relaxed(temp, base + MDSS_MDP_REG_AD_FRAME_SIZE);
+
+ temp = ad->init.logo_v << 8;
+ temp |= ad->init.logo_h & 0xFF;
+ writel_relaxed(temp, base + MDSS_MDP_REG_AD_LOGO_POS);
+
+ pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_FI, ad->init.asym_lut);
+ pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_CC, ad->init.color_corr_lut);
+
+ if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
+ if (is_dual_pipe && !split_mode) {
+ num = ad_hw->num;
+ side = pp_num_to_side(ctl, num);
+ tile_ctrl = 0x5;
+ if ((ad->calc_hw_num + 1) == num)
+ tile_ctrl |= 0x10;
+
+ if (side <= MDSS_SIDE_NONE) {
+ WARN(1, "error finding sides, %d\n", side);
+ frame_start = 0;
+ procs_start = frame_start;
+ frame_end = 0;
+ procs_end = frame_end;
+ } else if (side == MDSS_SIDE_LEFT) {
+ frame_start = 0;
+ procs_start = 0;
+ frame_end = mdata->mixer_intf[num].width +
+ MDSS_AD_MERGED_WIDTH;
+ procs_end = mdata->mixer_intf[num].width;
+ } else {
+ procs_start = ad->init.frame_w -
+ (mdata->mixer_intf[num].width);
+ procs_end = ad->init.frame_w;
+ frame_start = procs_start -
+ MDSS_AD_MERGED_WIDTH;
+ frame_end = procs_end;
+ }
+ procs_end -= 1;
+ frame_end -= 1;
+ cfg_buf_mode = 0x3;
+ } else {
+ frame_start = 0x0;
+ frame_end = 0xFFFF;
+ procs_start = 0x0;
+ procs_end = 0xFFFF;
+ tile_ctrl = 0x0;
+ cfg_buf_mode = 0x2;
+ }
+
+ writel_relaxed(frame_start, base + MDSS_MDP_REG_AD_FRAME_START);
+ writel_relaxed(frame_end, base + MDSS_MDP_REG_AD_FRAME_END);
+ writel_relaxed(procs_start, base + MDSS_MDP_REG_AD_PROCS_START);
+ writel_relaxed(procs_end, base + MDSS_MDP_REG_AD_PROCS_END);
+ writel_relaxed(tile_ctrl, base + MDSS_MDP_REG_AD_TILE_CTRL);
+ writel_relaxed(cfg_buf_mode, base + MDSS_MDP_REG_AD_CFG_BUF);
+ }
+}
+
+#define MDSS_PP_AD_DEF_CALIB 0x6E
+static void pp_ad_cfg_write(struct mdss_mdp_ad *ad_hw, struct mdss_ad_info *ad)
+{
+ char __iomem *base;
+ u32 temp, temp_calib = MDSS_PP_AD_DEF_CALIB;
+
+ base = ad_hw->base;
+ switch (ad->cfg.mode) {
+ case MDSS_AD_MODE_AUTO_BL:
+ temp = ad->cfg.backlight_max << 16;
+ temp |= ad->cfg.backlight_min & 0xFFFF;
+ writel_relaxed(temp, base + MDSS_MDP_REG_AD_BL_MINMAX);
+ writel_relaxed(ad->cfg.amb_light_min,
+ base + MDSS_MDP_REG_AD_AL_MIN);
+ temp = ad->cfg.filter[1] << 16;
+ temp |= ad->cfg.filter[0] & 0xFFFF;
+ writel_relaxed(temp, base + MDSS_MDP_REG_AD_AL_FILT);
+ /* fall-through */
+ case MDSS_AD_MODE_AUTO_STR:
+ memcpy(ad->last_calib, ad->cfg.calib, sizeof(ad->last_calib));
+ ad->last_calib_valid = true;
+ pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_AL,
+ ad->cfg.al_calib_lut);
+ writel_relaxed(ad->cfg.strength_limit,
+ base + MDSS_MDP_REG_AD_STR_LIM);
+ temp = ad->cfg.calib[3] << 16;
+ temp |= ad->cfg.calib[2] & 0xFFFF;
+ writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_CD);
+ writel_relaxed(ad->cfg.t_filter_recursion,
+ base + MDSS_MDP_REG_AD_TFILT_CTRL);
+ temp_calib = ad->cfg.calib[0] & 0xFFFF;
+ /* fall-through */
+ case MDSS_AD_MODE_TARG_STR:
+ temp = ad->cfg.calib[1] << 16;
+ temp |= temp_calib;
+ writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_AB);
+ /* fall-through */
+ case MDSS_AD_MODE_MAN_STR:
+ writel_relaxed(ad->cfg.backlight_scale,
+ base + MDSS_MDP_REG_AD_BL_MAX);
+ writel_relaxed(ad->cfg.mode | MDSS_AD_AUTO_TRIGGER,
+ base + MDSS_MDP_REG_AD_MODE_SEL);
+ pr_debug("stab_itr = %d\n", ad->cfg.stab_itr);
+ break;
+ case MDSS_AD_MODE_MAN_IPC:
+ if (!ad->last_calib_valid) {
+ memcpy(ad->last_calib, ad->cfg.calib,
+ sizeof(ad->last_calib));
+ ad->last_calib_valid = true;
+ }
+ writel_relaxed(MDSS_AD_T_FILTER_CTRL_0,
+ base + MDSS_MDP_REG_AD_TFILT_CTRL);
+ pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_AL,
+ ad->cfg.al_calib_lut);
+ writel_relaxed(ad->cfg.strength_limit,
+ base + MDSS_MDP_REG_AD_STR_LIM);
+ temp = ad->last_calib[3] << 16;
+ temp |= ad->last_calib[2] & 0xFFFF;
+ writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_CD);
+ temp_calib = ad->last_calib[0] & 0xFFFF;
+ temp = ad->last_calib[1] << 16;
+ temp |= temp_calib;
+ writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_AB);
+ writel_relaxed(ad->cfg.backlight_scale,
+ base + MDSS_MDP_REG_AD_BL_MAX);
+ writel_relaxed(ad->cfg.mode | MDSS_AD_AUTO_TRIGGER,
+ base + MDSS_MDP_REG_AD_MODE_SEL);
+ pr_debug("stab_itr = %d\n", ad->cfg.stab_itr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void pp_ad_vsync_handler(struct mdss_mdp_ctl *ctl, ktime_t t)
+{
+ struct mdss_data_type *mdata = ctl->mdata;
+ struct mdss_ad_info *ad;
+
+ if (ctl->mixer_left && ctl->mixer_left->num < mdata->nad_cfgs) {
+ ad = &mdata->ad_cfgs[ctl->mixer_left->num];
+ queue_work(mdata->ad_calc_wq, &ad->calc_work);
+ }
+}
+
+#define MDSS_PP_AD_BYPASS_DEF 0x101
+static void pp_ad_bypass_config(struct mdss_ad_info *ad,
+ struct mdss_mdp_ctl *ctl, u32 num, u32 *opmode)
+{
+ int side = pp_num_to_side(ctl, num);
+
+ if (pp_sts_is_enabled(ad->reg_sts | (ad->ops & MDSS_PP_SPLIT_MASK),
+ side)) {
+ *opmode = 0;
+ } else {
+ *opmode = MDSS_PP_AD_BYPASS_DEF;
+ }
+}
+
+static int pp_ad_setup_hw_nums(struct msm_fb_data_type *mfd,
+ struct mdss_ad_info *ad)
+{
+ u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+ u32 mixer_num;
+
+ mixer_num = mdss_mdp_get_ctl_mixers(mfd->index, mixer_id);
+ if (!mixer_num)
+ return -EINVAL;
+
+ /* default to left mixer */
+ ad->calc_hw_num = mixer_id[0];
+ if ((mixer_num > 1) && (ad->ops & MDSS_PP_SPLIT_RIGHT_ONLY))
+ ad->calc_hw_num = mixer_id[1];
+ return 0;
+}
+
+static int mdss_mdp_ad_ipc_reset(struct msm_fb_data_type *mfd)
+{
+ int ret = 0;
+ struct mdss_ad_info *ad;
+
+ if (!mfd) {
+ pr_err("mfd = 0x%pK\n", mfd);
+ return -EINVAL;
+ }
+
+ ret = mdss_mdp_get_ad(mfd, &ad);
+ if (ret == -ENODEV || ret == -EPERM) {
+ pr_debug("AD not supported on device, disp num %d\n",
+ mfd->index);
+ return 0;
+ } else if (ret || !ad) {
+ pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+ ret, ad);
+ return ret;
+ }
+
+ mutex_lock(&ad->lock);
+ if (ad->state & PP_AD_STATE_RUN && ad->state & PP_AD_STATE_IPC_RESET)
+ ad->state &= ~PP_AD_STATE_IPC_RESET;
+ mutex_unlock(&ad->lock);
+
+ return 0;
+}
+
+static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd)
+{
+ int ret = 0;
+ struct mdss_ad_info *ad;
+ struct mdss_mdp_ctl *ctl, *sctl;
+ struct msm_fb_data_type *bl_mfd;
+ struct mdss_data_type *mdata;
+ u32 bypass = MDSS_PP_AD_BYPASS_DEF, bl;
+ u32 width;
+
+ if (!mfd) {
+ pr_err("mfd = 0x%pK\n", mfd);
+ return -EINVAL;
+ }
+
+ ctl = mfd_to_ctl(mfd);
+ if (!ctl) {
+ pr_err("ctl = 0x%pK\n", ctl);
+ return -EINVAL;
+ }
+ sctl = mdss_mdp_get_split_ctl(ctl);
+
+ ret = mdss_mdp_get_ad(mfd, &ad);
+ if (ret == -ENODEV || ret == -EPERM) {
+ pr_debug("AD not supported on device, disp num %d\n",
+ mfd->index);
+ return 0;
+ } else if (ret || !ad) {
+ pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+ ret, ad);
+ return ret;
+ }
+ if (mfd->panel_info->type == WRITEBACK_PANEL) {
+ bl_mfd = mdss_get_mfd_from_index(0);
+ if (!bl_mfd) {
+ ret = -EINVAL;
+ pr_warn("failed to get primary FB bl handle, err = %d\n",
+ ret);
+ goto exit;
+ }
+ } else {
+ bl_mfd = mfd;
+ }
+
+ mdata = mdss_mdp_get_mdata();
+
+ mutex_lock(&ad->lock);
+ if (ad->state & PP_AD_STATE_RUN && ad->state & PP_AD_STATE_IPC_RESUME) {
+ if (ad->ipc_frame_count == MDSS_AD_IPC_FRAME_COUNT) {
+ ad->state &= ~PP_AD_STATE_IPC_RESUME;
+ ad->state |= PP_AD_STATE_IPC_RESET;
+ ad->cfg.mode &= ~MDSS_AD_MODE_IPC_BIT;
+ if (ad->last_ad_data != ad->ad_data)
+ ad->sts |= PP_AD_STS_DIRTY_DATA;
+ if (memcmp(ad->last_calib, ad->cfg.calib,
+ sizeof(ad->last_calib)))
+ ad->sts |= PP_AD_STS_DIRTY_CFG;
+ pr_debug("switch mode to %d, last_ad_data = %d\n",
+ ad->cfg.mode, ad->last_ad_data);
+ } else {
+ ad->ipc_frame_count++;
+ }
+ }
+
+ if (ad->sts != last_sts || ad->state != last_state) {
+ last_sts = ad->sts;
+ last_state = ad->state;
+ pr_debug("beginning: ad->sts = 0x%08x, state = 0x%08x\n",
+ ad->sts, ad->state);
+ }
+
+ if (ad->sts & PP_AD_STS_DIRTY_DATA) {
+ ad->sts &= ~PP_AD_STS_DIRTY_DATA;
+ ad->state |= PP_AD_STATE_DATA;
+ pr_debug("dirty data, last_bl = %d\n", ad->last_bl);
+ if (!bl_mfd->ad_bl_level)
+ bl_mfd->ad_bl_level = bl_mfd->bl_level;
+ bl = bl_mfd->ad_bl_level;
+
+ if (ad->last_bl != bl) {
+ ad->last_bl = bl;
+ linear_map(bl, &ad->bl_data,
+ bl_mfd->panel_info->bl_max,
+ MDSS_MDP_AD_BL_SCALE);
+ }
+ if (!(ad->state & PP_AD_STATE_IPC_RESUME)) {
+ ad->calc_itr = ad->cfg.stab_itr;
+ ad->sts |= PP_AD_STS_DIRTY_VSYNC;
+ }
+ ad->reg_sts |= PP_AD_STS_DIRTY_DATA;
+ }
+
+ if (ad->sts & PP_AD_STS_DIRTY_CFG) {
+ ad->sts &= ~PP_AD_STS_DIRTY_CFG;
+ ad->state |= PP_AD_STATE_CFG;
+
+ ad->reg_sts |= PP_AD_STS_DIRTY_CFG;
+ }
+ if (ad->sts & PP_AD_STS_DIRTY_INIT) {
+ ad->sts &= ~PP_AD_STS_DIRTY_INIT;
+ if (pp_ad_setup_hw_nums(mfd, ad)) {
+ pr_warn("failed to setup ad master\n");
+ ad->calc_hw_num = PP_AD_BAD_HW_NUM;
+ } else {
+ ad->state |= PP_AD_STATE_INIT;
+ ad->reg_sts |= PP_AD_STS_DIRTY_INIT;
+ }
+ }
+
+ width = ctl->width;
+ if (sctl)
+ width += sctl->width;
+
+ /* update ad screen size if it has changed since last configuration */
+ if ((ad->init.frame_w != width) ||
+ (ad->init.frame_h != ctl->height)) {
+ pr_debug("changing from %dx%d to %dx%d\n", ad->init.frame_w,
+ ad->init.frame_h,
+ width,
+ ctl->height);
+ ad->init.frame_w = width;
+ ad->init.frame_h = ctl->height;
+ ad->reg_sts |= PP_AD_STS_DIRTY_INIT;
+ }
+
+ if ((ad->sts & PP_STS_ENABLE) && PP_AD_STATE_IS_READY(ad->state)) {
+ bypass = 0;
+ ad->reg_sts |= PP_AD_STS_DIRTY_ENABLE;
+ ad->state |= PP_AD_STATE_RUN;
+ if (bl_mfd != mfd)
+ bl_mfd->ext_ad_ctrl = mfd->index;
+ bl_mfd->ext_bl_ctrl = ad->cfg.bl_ctrl_mode;
+ } else {
+ if (ad->state & PP_AD_STATE_RUN) {
+ ad->reg_sts = PP_AD_STS_DIRTY_ENABLE;
+ /* Clear state and regs when going to off state*/
+ ad->sts = 0;
+ ad->sts |= PP_AD_STS_DIRTY_VSYNC;
+ ad->state &= ~PP_AD_STATE_INIT;
+ ad->state &= ~PP_AD_STATE_CFG;
+ ad->state &= ~PP_AD_STATE_DATA;
+ ad->state &= ~PP_AD_STATE_BL_LIN;
+ ad->state &= ~PP_AD_STATE_IPC_RESUME;
+ ad->state &= ~PP_AD_STATE_IPC_RESET;
+ ad->ad_data = 0;
+ ad->ad_data_mode = 0;
+ ad->last_bl = 0;
+ ad->last_ad_data = 0;
+ ad->last_calib_valid = false;
+ ad->last_ad_data_valid = false;
+ ad->ipc_frame_count = 0;
+ ad->calc_itr = 0;
+ ad->calc_hw_num = PP_AD_BAD_HW_NUM;
+ memset(&ad->last_calib, 0, sizeof(ad->last_calib));
+ memset(&ad->bl_lin, 0, sizeof(uint32_t) *
+ AD_BL_LIN_LEN);
+ memset(&ad->bl_lin_inv, 0, sizeof(uint32_t) *
+ AD_BL_LIN_LEN);
+ memset(&ad->bl_att_lut, 0, sizeof(uint32_t) *
+ AD_BL_ATT_LUT_LEN);
+ memset(&ad->init, 0, sizeof(struct mdss_ad_init));
+ memset(&ad->cfg, 0, sizeof(struct mdss_ad_cfg));
+ bl_mfd->ext_bl_ctrl = 0;
+ bl_mfd->ext_ad_ctrl = -1;
+ bl_mfd->ad_bl_level = 0;
+ }
+ ad->state &= ~PP_AD_STATE_RUN;
+ }
+ if (!bypass)
+ ad->reg_sts |= PP_STS_ENABLE;
+ else
+ ad->reg_sts &= ~PP_STS_ENABLE;
+
+ if (PP_AD_STS_DIRTY_VSYNC & ad->sts) {
+ pr_debug("dirty vsync, calc_itr = %d\n", ad->calc_itr);
+ ad->sts &= ~PP_AD_STS_DIRTY_VSYNC;
+ if (!(PP_AD_STATE_VSYNC & ad->state) && ad->calc_itr &&
+ (ad->state & PP_AD_STATE_RUN)) {
+ ctl->ops.add_vsync_handler(ctl, &ad->handle);
+ ad->state |= PP_AD_STATE_VSYNC;
+ } else if ((PP_AD_STATE_VSYNC & ad->state) &&
+ (!ad->calc_itr || !(PP_AD_STATE_RUN & ad->state))) {
+ ctl->ops.remove_vsync_handler(ctl, &ad->handle);
+ ad->state &= ~PP_AD_STATE_VSYNC;
+ }
+ }
+
+ if (ad->sts != last_sts || ad->state != last_state) {
+ last_sts = ad->sts;
+ last_state = ad->state;
+ pr_debug("end: ad->sts = 0x%08x, state = 0x%08x\n", ad->sts,
+ ad->state);
+ }
+ mutex_unlock(&ad->lock);
+exit:
+ return ret;
+}
+
+#define MDSS_PP_AD_SLEEP 10
+static void pp_ad_calc_worker(struct work_struct *work)
+{
+ struct mdss_ad_info *ad;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_data_type *mdata;
+ char __iomem *base;
+
+ ad = container_of(work, struct mdss_ad_info, calc_work);
+
+ mutex_lock(&ad->lock);
+ if (!ad->mfd || !(ad->sts & PP_STS_ENABLE)) {
+ mutex_unlock(&ad->lock);
+ return;
+ }
+ mdp5_data = mfd_to_mdp5_data(ad->mfd);
+ if (!mdp5_data) {
+ pr_err("mdp5_data = 0x%pK\n", mdp5_data);
+ mutex_unlock(&ad->lock);
+ return;
+ }
+
+ ctl = mfd_to_ctl(ad->mfd);
+ mdata = mfd_to_mdata(ad->mfd);
+ if (!ctl || !mdata || ad->calc_hw_num >= mdata->nad_cfgs) {
+ pr_err("ctl = 0x%pK, mdata = 0x%pK, ad->calc_hw_num = %d, mdata->nad_cfg = %d\n",
+ ctl, mdata, ad->calc_hw_num,
+ (!mdata ? 0 : mdata->nad_cfgs));
+ mutex_unlock(&ad->lock);
+ return;
+ }
+
+ base = mdata->ad_off[ad->calc_hw_num].base;
+
+ if ((ad->cfg.mode == MDSS_AD_MODE_AUTO_STR) && (ad->last_bl == 0)) {
+ mutex_unlock(&ad->lock);
+ return;
+ }
+ if ((PP_AD_STATE_RUN & ad->state) && ad->calc_itr > 0)
+ ad->calc_itr--;
+
+ mdp5_data->ad_events++;
+ sysfs_notify_dirent(mdp5_data->ad_event_sd);
+ if (!ad->calc_itr) {
+ ad->state &= ~PP_AD_STATE_VSYNC;
+ ctl->ops.remove_vsync_handler(ctl, &ad->handle);
+ }
+ mutex_unlock(&ad->lock);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ ad->last_str = 0xFF & readl_relaxed(base + MDSS_MDP_REG_AD_STR_OUT);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ pr_debug("itr number %d str %d\n", ad->calc_itr, ad->last_str);
+}
+
+#define PP_AD_LUT_LEN 33
+static void pp_ad_cfg_lut(char __iomem *addr, u32 *data)
+{
+ int i;
+ u32 temp;
+
+ for (i = 0; i < PP_AD_LUT_LEN - 1; i += 2) {
+ temp = data[i+1] << 16;
+ temp |= (data[i] & 0xFFFF);
+ writel_relaxed(temp, addr + (i*2));
+ }
+ writel_relaxed(data[PP_AD_LUT_LEN - 1] << 16,
+ addr + ((PP_AD_LUT_LEN - 1) * 2));
+}
+
+/* must call this function from within ad->lock */
+static int pp_ad_attenuate_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out)
+{
+ u32 shift = 0, ratio_temp = 0;
+ u32 n, lut_interval, bl_att;
+
+ if (bl < 0 || ad->init.alpha < 0) {
+ pr_err("Invalid input: backlight = %d, alpha = %d\n", bl,
+ ad->init.alpha);
+ return -EINVAL;
+ }
+
+ if (ad->init.alpha == 0) {
+ pr_debug("alpha = %d, hence no attenuation needed\n",
+ ad->init.alpha);
+ return 0;
+ }
+ pr_debug("bl_in = %d\n", bl);
+ /* map panel backlight range to AD backlight range */
+ linear_map(bl, &bl, ad->bl_mfd->panel_info->bl_max,
+ MDSS_MDP_AD_BL_SCALE);
+
+ pr_debug("Before attenuation = %d\n", bl);
+ ratio_temp = MDSS_MDP_AD_BL_SCALE / (AD_BL_ATT_LUT_LEN - 1);
+ while (ratio_temp > 0) {
+ ratio_temp = ratio_temp >> 1;
+ shift++;
+ }
+ n = bl >> shift;
+ if (n >= (AD_BL_ATT_LUT_LEN - 1)) {
+ pr_err("Invalid index for BL attenuation: %d.\n", n);
+ return -EINVAL;
+ }
+ lut_interval = (MDSS_MDP_AD_BL_SCALE + 1) / (AD_BL_ATT_LUT_LEN - 1);
+ bl_att = ((ad->bl_att_lut[n + 1] - ad->bl_att_lut[n]) *
+ (bl - lut_interval * n) + (ad->bl_att_lut[n] * lut_interval)) /
+ lut_interval;
+ pr_debug("n = %u, bl_att_lut[%u] = %u, bl_att_lut[%u] = %u, bl_att = %u\n",
+ n, n, ad->bl_att_lut[n], n + 1, ad->bl_att_lut[n + 1], bl_att);
+ *bl_out = (ad->init.alpha * bl_att +
+ (ad->init.alpha_base - ad->init.alpha) * bl) /
+ ad->init.alpha_base;
+
+ pr_debug("After attenuation = %d\n", *bl_out);
+ /* map AD backlight range back to panel backlight range */
+ linear_map(*bl_out, bl_out, MDSS_MDP_AD_BL_SCALE,
+ ad->bl_mfd->panel_info->bl_max);
+
+ pr_debug("bl_out = %d\n", *bl_out);
+ return 0;
+}
+
+/* must call this function from within ad->lock */
+static int pp_ad_linearize_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out,
+ int inv)
+{
+
+ u32 n, bl_lut_max_index = AD_BL_LIN_LEN - 1;
+ uint32_t *bl_lut = NULL;
+ int ret = -EINVAL;
+
+ if (bl < 0 || bl > ad->bl_mfd->panel_info->bl_max) {
+ pr_err("Invalid backlight input: bl = %d, bl_max = %d\n", bl,
+ ad->bl_mfd->panel_info->bl_max);
+ return -EINVAL;
+ }
+
+ pr_debug("bl_in = %d, inv = %d\n", bl, inv);
+ if (inv == MDP_PP_AD_BL_LINEAR) {
+ bl_lut = ad->bl_lin;
+ } else if (inv == MDP_PP_AD_BL_LINEAR_INV) {
+ bl_lut = ad->bl_lin_inv;
+ } else {
+ pr_err("invalid inv param: inv = %d\n", inv);
+ return -EINVAL;
+ }
+
+ /* map panel backlight range to AD backlight range */
+ linear_map(bl, &bl, ad->bl_mfd->panel_info->bl_max,
+ MDSS_MDP_AD_BL_SCALE);
+
+ pr_debug("Before linearization = %d\n", bl);
+ n = bl * bl_lut_max_index / MDSS_MDP_AD_BL_SCALE;
+ pr_debug("n = %u\n", n);
+ if (n > bl_lut_max_index) {
+ pr_err("Invalid index for BL linearization: %d.\n", n);
+ return ret;
+ } else if (n == bl_lut_max_index) {
+ *bl_out = bl_lut[n];
+ } else if (bl == n * MDSS_MDP_AD_BL_SCALE / bl_lut_max_index) {
+ *bl_out = bl_lut[n];
+ } else if (bl == (n + 1) * MDSS_MDP_AD_BL_SCALE / bl_lut_max_index) {
+ *bl_out = bl_lut[n + 1];
+ } else {
+ /* linear piece-wise interpolation */
+ *bl_out = ((bl_lut[n + 1] - bl_lut[n]) *
+ (bl - n * MDSS_MDP_AD_BL_SCALE /
+ bl_lut_max_index) + bl_lut[n] *
+ MDSS_MDP_AD_BL_SCALE / bl_lut_max_index) *
+ bl_lut_max_index / MDSS_MDP_AD_BL_SCALE;
+ }
+ pr_debug("After linearization = %d\n", *bl_out);
+
+ /* map AD backlight range back to panel backlight range */
+ linear_map(*bl_out, bl_out, MDSS_MDP_AD_BL_SCALE,
+ ad->bl_mfd->panel_info->bl_max);
+
+ pr_debug("bl_out = %d\n", *bl_out);
+ return 0;
+}
+
+int mdss_mdp_ad_addr_setup(struct mdss_data_type *mdata, u32 *ad_offsets)
+{
+ u32 i;
+ int rc = 0;
+
+ mdata->ad_off = devm_kzalloc(&mdata->pdev->dev,
+ sizeof(struct mdss_mdp_ad) * mdata->nad_cfgs,
+ GFP_KERNEL);
+
+ if (!mdata->ad_off) {
+ pr_err("unable to setup assertive display hw:devm_kzalloc fail\n");
+ return -ENOMEM;
+ }
+
+ mdata->ad_cfgs = devm_kzalloc(&mdata->pdev->dev,
+ sizeof(struct mdss_ad_info) * mdata->nad_cfgs,
+ GFP_KERNEL);
+
+ if (!mdata->ad_cfgs) {
+ pr_err("unable to setup assertive display:devm_kzalloc fail\n");
+ devm_kfree(&mdata->pdev->dev, mdata->ad_off);
+ return -ENOMEM;
+ }
+
+ mdata->ad_calc_wq = create_singlethread_workqueue("ad_calc_wq");
+ for (i = 0; i < mdata->nad_cfgs; i++) {
+ mdata->ad_off[i].base = mdata->mdss_io.base + ad_offsets[i];
+ mdata->ad_off[i].num = i;
+ mdata->ad_cfgs[i].num = i;
+ mdata->ad_cfgs[i].ops = 0;
+ mdata->ad_cfgs[i].reg_sts = 0;
+ mdata->ad_cfgs[i].calc_itr = 0;
+ mdata->ad_cfgs[i].last_str = 0xFFFFFFFF;
+ mdata->ad_cfgs[i].last_bl = 0;
+ mdata->ad_cfgs[i].last_ad_data = 0;
+ memset(mdata->ad_cfgs[i].last_calib, 0,
+ sizeof(mdata->ad_cfgs[i].last_calib));
+ mdata->ad_cfgs[i].last_calib_valid = false;
+ mdata->ad_cfgs[i].last_ad_data_valid = false;
+ mutex_init(&mdata->ad_cfgs[i].lock);
+ mdata->ad_cfgs[i].handle.vsync_handler = pp_ad_vsync_handler;
+ mdata->ad_cfgs[i].handle.cmd_post_flush = true;
+ INIT_WORK(&mdata->ad_cfgs[i].calc_work, pp_ad_calc_worker);
+ }
+ return rc;
+}
+
+static int is_valid_calib_ctrl_addr(char __iomem *ptr)
+{
+ char __iomem *base;
+ int ret = 0, counter = 0;
+ int stage = 0;
+ struct mdss_mdp_ctl *ctl;
+
+ /* Controller */
+ for (counter = 0; counter < mdss_res->nctl; counter++) {
+ ctl = mdss_res->ctl_off + counter;
+ base = ctl->base;
+
+ if (ptr == base + MDSS_MDP_REG_CTL_TOP) {
+ ret = MDP_PP_OPS_READ;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_CTL_FLUSH) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ }
+
+ for (stage = 0; stage < (mdss_res->nmixers_intf +
+ mdss_res->nmixers_wb); stage++)
+ if (ptr == base + MDSS_MDP_REG_CTL_LAYER(stage)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ goto End;
+ }
+ }
+
+End:
+ return ret;
+}
+
+static int is_valid_calib_dspp_addr(char __iomem *ptr)
+{
+ char __iomem *base;
+ int ret = 0, counter = 0;
+ struct mdss_mdp_mixer *mixer;
+
+ for (counter = 0; counter < mdss_res->nmixers_intf; counter++) {
+ mixer = mdss_res->mixer_intf + counter;
+ base = mixer->dspp_base;
+
+ if (ptr == base) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ /* PA range */
+ } else if ((ptr >= base + MDSS_MDP_REG_DSPP_PA_BASE) &&
+ (ptr <= base + MDSS_MDP_REG_DSPP_PA_BASE +
+ MDSS_MDP_PA_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ /* PCC range */
+ } else if ((ptr >= base + MDSS_MDP_REG_DSPP_PCC_BASE) &&
+ (ptr <= base + MDSS_MDP_REG_DSPP_PCC_BASE +
+ MDSS_MDP_PCC_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ /* Gamut range */
+ } else if ((ptr >= base + MDSS_MDP_REG_DSPP_GAMUT_BASE) &&
+ (ptr <= base + MDSS_MDP_REG_DSPP_GAMUT_BASE +
+ MDSS_MDP_GAMUT_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ /* GC range */
+ } else if ((ptr >= base + MDSS_MDP_REG_DSPP_GC_BASE) &&
+ (ptr <= base + MDSS_MDP_REG_DSPP_GC_BASE +
+ MDSS_MDP_GC_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ /* Dither enable/disable */
+ } else if ((ptr == base + MDSS_MDP_REG_DSPP_DITHER_DEPTH)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ /* Six zone and mem color */
+ } else if (mdss_res->mdp_rev >= MDSS_MDP_HW_REV_103 &&
+ (ptr >= base + MDSS_MDP_REG_DSPP_SIX_ZONE_BASE) &&
+ (ptr <= base + MDSS_MDP_REG_DSPP_SIX_ZONE_BASE +
+ MDSS_MDP_SIX_ZONE_SIZE +
+ MDSS_MDP_MEM_COL_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int is_valid_calib_vig_addr(char __iomem *ptr)
+{
+ char __iomem *base;
+ int ret = 0, counter = 0;
+ struct mdss_mdp_pipe *pipe;
+
+ for (counter = 0; counter < mdss_res->nvig_pipes; counter++) {
+ pipe = mdss_res->vig_pipes + counter;
+ base = pipe->base;
+
+ if (ptr == base + MDSS_MDP_REG_VIG_OP_MODE) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_FORMAT) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_OP_MODE) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ /* QSEED2 range */
+ } else if ((ptr >= base + MDSS_MDP_REG_VIG_QSEED2_SHARP) &&
+ (ptr <= base + MDSS_MDP_REG_VIG_QSEED2_SHARP +
+ MDSS_MDP_VIG_QSEED2_SHARP_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ /* PA range */
+ } else if ((ptr >= base + MDSS_MDP_REG_VIG_PA_BASE) &&
+ (ptr <= base + MDSS_MDP_REG_VIG_PA_BASE +
+ MDSS_MDP_PA_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ /* Mem color range */
+ } else if (mdss_res->mdp_rev >= MDSS_MDP_HW_REV_103 &&
+ (ptr >= base + MDSS_MDP_REG_VIG_MEM_COL_BASE) &&
+ (ptr <= base + MDSS_MDP_REG_VIG_MEM_COL_BASE +
+ MDSS_MDP_MEM_COL_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int is_valid_calib_rgb_addr(char __iomem *ptr)
+{
+ char __iomem *base;
+ int ret = 0, counter = 0;
+ struct mdss_mdp_pipe *pipe;
+
+ for (counter = 0; counter < mdss_res->nrgb_pipes; counter++) {
+ pipe = mdss_res->rgb_pipes + counter;
+ base = pipe->base;
+
+ if (ptr == base + MDSS_MDP_REG_SSPP_SRC_FORMAT) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_OP_MODE) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int is_valid_calib_dma_addr(char __iomem *ptr)
+{
+ char __iomem *base;
+ int ret = 0, counter = 0;
+ struct mdss_mdp_pipe *pipe;
+
+ for (counter = 0; counter < mdss_res->ndma_pipes; counter++) {
+ pipe = mdss_res->dma_pipes + counter;
+ base = pipe->base;
+
+ if (ptr == base + MDSS_MDP_REG_SSPP_SRC_FORMAT) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ } else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_OP_MODE) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int is_valid_calib_mixer_addr(char __iomem *ptr)
+{
+ char __iomem *base;
+ int ret = 0, counter = 0;
+ int stage = 0;
+ struct mdss_mdp_mixer *mixer;
+
+ for (counter = 0; counter < (mdss_res->nmixers_intf +
+ mdss_res->nmixers_wb); counter++) {
+ mixer = mdss_res->mixer_intf + counter;
+ base = mixer->base;
+
+ if (ptr == base + MDSS_MDP_REG_LM_OP_MODE) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ /* GC range */
+ } else if ((ptr >= base + MDSS_MDP_REG_LM_GC_LUT_BASE) &&
+ (ptr <= base + MDSS_MDP_REG_LM_GC_LUT_BASE +
+ MDSS_MDP_GC_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ break;
+ }
+
+ for (stage = 0; stage < TOTAL_BLEND_STAGES; stage++)
+ if (ptr == base + MDSS_MDP_REG_LM_BLEND_OFFSET(stage) +
+ MDSS_MDP_REG_LM_BLEND_OP) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ goto End;
+ } else if (ptr == base +
+ MDSS_MDP_REG_LM_BLEND_OFFSET(stage) +
+ MDSS_MDP_REG_LM_BLEND_FG_ALPHA) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ goto End;
+ } else if (ptr == base +
+ MDSS_MDP_REG_LM_BLEND_OFFSET(stage) +
+ MDSS_MDP_REG_LM_BLEND_BG_ALPHA) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ goto End;
+ }
+ }
+
+End:
+ return ret;
+}
+
+static int is_valid_calib_addr(void *addr, u32 operation)
+{
+ int ret = 0;
+ char __iomem *ptr = addr;
+ char __iomem *mixer_base = mdss_res->mixer_intf->base;
+ char __iomem *ctl_base = mdss_res->ctl_off->base;
+ char __iomem *dspp_base = mdss_res->mixer_intf->dspp_base;
+
+ if ((uintptr_t) addr % 4) {
+ ret = 0;
+ } else if (ptr == mdss_res->mdss_io.base + MDSS_REG_HW_VERSION) {
+ ret = MDP_PP_OPS_READ;
+ } else if (ptr == (mdss_res->mdp_base + MDSS_MDP_REG_HW_VERSION) ||
+ ptr == (mdss_res->mdp_base + MDSS_MDP_REG_DISP_INTF_SEL)) {
+ ret = MDP_PP_OPS_READ;
+ /* IGC DSPP range */
+ } else if (ptr >= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE) &&
+ ptr <= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE +
+ MDSS_MDP_IGC_DSPP_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ /* IGC SSPP range */
+ } else if (ptr >= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_VIG_BASE) &&
+ ptr <= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_VIG_BASE +
+ MDSS_MDP_IGC_SSPP_SIZE)) {
+ ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+ } else {
+ if (ptr >= dspp_base) {
+ ret = is_valid_calib_dspp_addr(ptr);
+ if (ret)
+ goto valid_addr;
+ }
+ if (ptr >= ctl_base) {
+ ret = is_valid_calib_ctrl_addr(ptr);
+ if (ret)
+ goto valid_addr;
+ }
+ if (mdss_res->vig_pipes &&
+ ptr >= mdss_res->vig_pipes->base) {
+ ret = is_valid_calib_vig_addr(ptr);
+ if (ret)
+ goto valid_addr;
+ }
+ if (mdss_res->rgb_pipes &&
+ ptr >= mdss_res->rgb_pipes->base) {
+ ret = is_valid_calib_rgb_addr(ptr);
+ if (ret)
+ goto valid_addr;
+ }
+ if (mdss_res->dma_pipes &&
+ ptr >= mdss_res->dma_pipes->base) {
+ ret = is_valid_calib_dma_addr(ptr);
+ if (ret)
+ goto valid_addr;
+ }
+ if (ptr >= mixer_base)
+ ret = is_valid_calib_mixer_addr(ptr);
+ }
+
+valid_addr:
+ return ret & operation;
+}
+
+int mdss_mdp_calib_config(struct mdp_calib_config_data *cfg, u32 *copyback)
+{
+ int ret = -1;
+ void *ptr;
+
+ /* Calib addrs are always offsets from the MDSS base */
+ ptr = (void *)((unsigned long) cfg->addr) +
+ ((uintptr_t) mdss_res->mdss_io.base);
+ if (is_valid_calib_addr(ptr, cfg->ops))
+ ret = 0;
+ else
+ return ret;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ if (cfg->ops & MDP_PP_OPS_READ) {
+ cfg->data = readl_relaxed(ptr);
+ *copyback = 1;
+ ret = 0;
+ } else if (cfg->ops & MDP_PP_OPS_WRITE) {
+ writel_relaxed(cfg->data, ptr);
+ ret = 0;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return ret;
+}
+
+int mdss_mdp_calib_mode(struct msm_fb_data_type *mfd,
+ struct mdss_calib_cfg *cfg)
+{
+ if (!mdss_pp_res || !mfd)
+ return -EINVAL;
+ mutex_lock(&mdss_pp_mutex);
+ mfd->calib_mode = cfg->calib_mask;
+ mutex_lock(&mfd->bl_lock);
+ mfd->calib_mode_bl = mfd->bl_level;
+ mutex_unlock(&mfd->bl_lock);
+ mutex_unlock(&mdss_pp_mutex);
+ return 0;
+}
+
+int mdss_mdp_calib_config_buffer(struct mdp_calib_config_buffer *cfg,
+ u32 *copyback)
+{
+ int ret = -1, counter;
+ uint32_t *buff = NULL, *buff_org = NULL;
+ void *ptr;
+ int i = 0;
+
+ if (!cfg) {
+ pr_err("Invalid buffer pointer\n");
+ return ret;
+ }
+
+ if (cfg->size == 0 || cfg->size > PAGE_SIZE) {
+ pr_err("Invalid buffer size %d\n", cfg->size);
+ return ret;
+ }
+
+ counter = cfg->size / (sizeof(uint32_t) * 2);
+ buff_org = buff = kzalloc(cfg->size, GFP_KERNEL);
+ if (buff == NULL) {
+ pr_err("Config buffer allocation failed\n");
+ return ret;
+ }
+
+ if (copy_from_user(buff, cfg->buffer, cfg->size)) {
+ kfree(buff);
+ pr_err("config buffer copy failed\n");
+ return ret;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+ for (i = 0; i < counter; i++) {
+ ptr = (void *) (((unsigned int) *buff) +
+ mdss_res->mdss_io.base);
+
+ if (!is_valid_calib_addr(ptr, cfg->ops)) {
+ ret = -1;
+ pr_err("Address validation failed or access not permitted\n");
+ break;
+ }
+
+ buff++;
+ if (cfg->ops & MDP_PP_OPS_READ)
+ *buff = readl_relaxed(ptr);
+ else if (cfg->ops & MDP_PP_OPS_WRITE)
+ writel_relaxed(*buff, ptr);
+ buff++;
+ }
+
+ if (ret & MDP_PP_OPS_READ) {
+ ret = copy_to_user(cfg->buffer, buff_org, cfg->size);
+ *copyback = 1;
+ }
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+ kfree(buff_org);
+ return ret;
+}
+
+static int sspp_cache_location(u32 pipe_type, enum pp_config_block *block)
+{
+ int ret = 0;
+
+ if (!block) {
+ pr_err("invalid params %pK\n", block);
+ return -EINVAL;
+ }
+ switch (pipe_type) {
+ case MDSS_MDP_PIPE_TYPE_VIG:
+ *block = SSPP_VIG;
+ break;
+ case MDSS_MDP_PIPE_TYPE_RGB:
+ *block = SSPP_RGB;
+ break;
+ case MDSS_MDP_PIPE_TYPE_DMA:
+ *block = SSPP_DMA;
+ break;
+ default:
+ pr_err("invalid pipe type %d\n", pipe_type);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+int mdss_mdp_pp_sspp_config(struct mdss_mdp_pipe *pipe)
+{
+ struct mdp_histogram_start_req hist;
+ struct mdp_pp_cache_res cache_res;
+ u32 len = 0;
+ int ret = 0;
+
+ if (!pipe) {
+ pr_err("invalid params, pipe %pK\n", pipe);
+ return -EINVAL;
+ }
+
+ cache_res.mdss_pp_res = NULL;
+ cache_res.pipe_res = pipe;
+ ret = sspp_cache_location(pipe->type, &cache_res.block);
+ if (ret) {
+ pr_err("invalid cache res block for igc ret %d\n",
+ ret);
+ goto exit_fail;
+ }
+ if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_IGC_CFG)) {
+ len = pipe->pp_cfg.igc_cfg.len;
+ if (pp_ops[IGC].pp_set_config) {
+ ret = pp_igc_lut_cache_params(&pipe->pp_cfg.igc_cfg,
+ &cache_res, false);
+ if (ret) {
+ pr_err("failed to cache igc params ret %d\n",
+ ret);
+ goto exit_fail;
+ }
+ } else if (len == IGC_LUT_ENTRIES) {
+ ret = copy_from_user(pipe->pp_res.igc_c0_c1,
+ pipe->pp_cfg.igc_cfg.c0_c1_data,
+ sizeof(uint32_t) * len);
+ if (ret) {
+ pr_err("failed to copy the igc c0_c1 data\n");
+ ret = -EFAULT;
+ goto exit_fail;
+ }
+ ret = copy_from_user(pipe->pp_res.igc_c2,
+ pipe->pp_cfg.igc_cfg.c2_data,
+ sizeof(uint32_t) * len);
+ if (ret) {
+ ret = -EFAULT;
+ pr_err("failed to copy the igc c2 data\n");
+ goto exit_fail;
+ }
+ pipe->pp_cfg.igc_cfg.c0_c1_data =
+ pipe->pp_res.igc_c0_c1;
+ pipe->pp_cfg.igc_cfg.c2_data = pipe->pp_res.igc_c2;
+ } else
+ pr_warn("invalid length of IGC len %d\n", len);
+ }
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_CFG) {
+ if (pipe->pp_cfg.hist_cfg.ops & MDP_PP_OPS_ENABLE) {
+ hist.block = pipe->pp_cfg.hist_cfg.block;
+ hist.frame_cnt =
+ pipe->pp_cfg.hist_cfg.frame_cnt;
+ hist.bit_mask = pipe->pp_cfg.hist_cfg.bit_mask;
+ hist.num_bins = pipe->pp_cfg.hist_cfg.num_bins;
+ mdss_mdp_hist_start(&hist);
+ } else if (pipe->pp_cfg.hist_cfg.ops &
+ MDP_PP_OPS_DISABLE) {
+ mdss_mdp_hist_stop(pipe->pp_cfg.hist_cfg.block);
+ }
+ }
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) {
+ if (!pp_ops[HIST_LUT].pp_set_config) {
+ len = pipe->pp_cfg.hist_lut_cfg.len;
+ if (len != ENHIST_LUT_ENTRIES) {
+ ret = -EINVAL;
+ pr_err("Invalid hist lut len: %d\n", len);
+ goto exit_fail;
+ }
+ ret = copy_from_user(pipe->pp_res.hist_lut,
+ pipe->pp_cfg.hist_lut_cfg.data,
+ sizeof(uint32_t) * len);
+ if (ret) {
+ ret = -EFAULT;
+ pr_err("failed to copy the hist lut\n");
+ goto exit_fail;
+ }
+ pipe->pp_cfg.hist_lut_cfg.data = pipe->pp_res.hist_lut;
+ } else {
+ ret = pp_hist_lut_cache_params(
+ &pipe->pp_cfg.hist_lut_cfg,
+ &cache_res);
+ if (ret) {
+ pr_err("Failed to cache Hist LUT params on pipe %d, ret %d\n",
+ pipe->num, ret);
+ goto exit_fail;
+ }
+ }
+ }
+ if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_V2_CFG) &&
+ (pp_ops[PA].pp_set_config)) {
+ ret = pp_pa_cache_params(&pipe->pp_cfg.pa_v2_cfg_data,
+ &cache_res);
+ if (ret) {
+ pr_err("Failed to cache PA params on pipe %d, ret %d\n",
+ pipe->num, ret);
+ goto exit_fail;
+ }
+ }
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PCC_CFG
+ && pp_ops[PCC].pp_set_config) {
+ ret = pp_pcc_cache_params(&pipe->pp_cfg.pcc_cfg_data,
+ &cache_res);
+ if (ret) {
+ pr_err("failed to cache the pcc params ret %d\n", ret);
+ goto exit_fail;
+ }
+ }
+exit_fail:
+ if (ret) {
+ pr_err("VIG PP setup failed on pipe %d type %d ret %d\n",
+ pipe->num, pipe->type, ret);
+ pipe->pp_cfg.config_ops = 0;
+ }
+
+ return ret;
+}
+
+static int pp_update_pcc_pipe_setup(struct mdss_mdp_pipe *pipe, u32 location)
+{
+ int ret = 0;
+ struct mdss_data_type *mdata = NULL;
+ char __iomem *pipe_base = NULL;
+
+ if (!pipe) {
+ pr_err("invalid param pipe %pK\n", pipe);
+ return -EINVAL;
+ }
+
+ mdata = mdss_mdp_get_mdata();
+ pipe_base = pipe->base;
+ switch (location) {
+ case SSPP_VIG:
+ if (mdata->pp_block_off.vig_pcc_off == U32_MAX) {
+ pr_err("invalid offset for vig pcc %d\n",
+ U32_MAX);
+ ret = -EINVAL;
+ goto exit_sspp_setup;
+ }
+ pipe_base += mdata->pp_block_off.vig_pcc_off;
+ break;
+ case SSPP_RGB:
+ if (mdata->pp_block_off.rgb_pcc_off == U32_MAX) {
+ pr_err("invalid offset for rgb pcc %d\n",
+ U32_MAX);
+ ret = -EINVAL;
+ goto exit_sspp_setup;
+ }
+ pipe_base += mdata->pp_block_off.rgb_pcc_off;
+ break;
+ case SSPP_DMA:
+ if (mdata->pp_block_off.dma_pcc_off == U32_MAX) {
+ pr_err("invalid offset for dma pcc %d\n",
+ U32_MAX);
+ ret = -EINVAL;
+ goto exit_sspp_setup;
+ }
+ pipe_base += mdata->pp_block_off.dma_pcc_off;
+ break;
+ default:
+ pr_err("invalid location for PCC %d\n",
+ location);
+ ret = -EINVAL;
+ goto exit_sspp_setup;
+ }
+ pp_ops[PCC].pp_set_config(pipe_base, &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.pcc_cfg_data, location);
+exit_sspp_setup:
+ return ret;
+}
+
+int mdss_mdp_pp_get_version(struct mdp_pp_feature_version *version)
+{
+ int ret = 0;
+ u32 ver_info = mdp_pp_legacy;
+
+ if (!version) {
+ pr_err("invalid param version %pK\n", version);
+ ret = -EINVAL;
+ goto exit_version;
+ }
+ if (version->pp_feature >= PP_FEATURE_MAX) {
+ pr_err("invalid feature passed %d\n", version->pp_feature);
+ ret = -EINVAL;
+ goto exit_version;
+ }
+ if (pp_ops[version->pp_feature].pp_get_version)
+ ret = pp_ops[version->pp_feature].pp_get_version(&ver_info);
+ if (ret)
+ pr_err("failed to query version for feature %d ret %d\n",
+ version->pp_feature, ret);
+ else
+ version->version_info = ver_info;
+exit_version:
+ return ret;
+}
+
+static void mdss_mdp_hist_irq_set_mask(u32 irq)
+{
+ u32 mask;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ spin_lock(&mdata->hist_intr.lock);
+ mask = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN);
+ mask |= irq;
+ pr_debug("interrupt mask being set %x irq updated %x\n", mask, irq);
+ writel_relaxed(mask, mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN);
+ spin_unlock(&mdata->hist_intr.lock);
+}
+
+static void mdss_mdp_hist_irq_clear_mask(u32 irq)
+{
+ u32 mask;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ spin_lock(&mdata->hist_intr.lock);
+ mask = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN);
+ mask = mask & ~irq;
+ pr_debug("interrupt mask being cleared %x irq cleared %x\n", mask, irq);
+ writel_relaxed(mask, mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN);
+ spin_unlock(&mdata->hist_intr.lock);
+}
+
+static void mdss_mdp_hist_intr_notify(u32 disp)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct pp_hist_col_info *hist_info = NULL;
+ int i = 0, disp_count = 0, hist_count = 0;
+ struct mdss_mdp_ctl *ctl = NULL;
+ struct mdss_overlay_private *mdp5_data = NULL;
+
+ for (i = 0; i < mdata->ndspp; i++) {
+ hist_info = &mdss_pp_res->dspp_hist[i];
+ spin_lock(&hist_info->hist_lock);
+ if (hist_info->disp_num == disp) {
+ disp_count++;
+ ctl = hist_info->ctl;
+ if (hist_info->col_state == HIST_READY)
+ hist_count++;
+ }
+ spin_unlock(&hist_info->hist_lock);
+ }
+ if (disp_count != hist_count || !ctl)
+ return;
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ if (!mdp5_data) {
+ pr_err("mdp5_data is NULL\n");
+ return;
+ }
+ mdp5_data->hist_events++;
+ sysfs_notify_dirent(mdp5_data->hist_event_sd);
+}
+
+int mdss_mdp_copy_layer_pp_info(struct mdp_input_layer *layer)
+{
+ struct mdp_overlay_pp_params *pp_info = NULL;
+ int ret = 0;
+ uint32_t ops;
+
+ if (!layer) {
+ pr_err("invalid layer pointer passed %pK\n", layer);
+ return -EFAULT;
+ }
+
+ pp_info = kmalloc(sizeof(struct mdp_overlay_pp_params),
+ GFP_KERNEL);
+ if (!pp_info)
+ return -ENOMEM;
+
+ ret = copy_from_user(pp_info, layer->pp_info,
+ sizeof(struct mdp_overlay_pp_params));
+ if (ret) {
+ pr_err("layer list copy from user failed, pp_info = %pK\n",
+ layer->pp_info);
+ ret = -EFAULT;
+ goto exit_pp_info;
+ }
+
+ ops = pp_info->config_ops;
+ if (ops & MDP_OVERLAY_PP_IGC_CFG) {
+ ret = pp_copy_layer_igc_payload(pp_info);
+ if (ret) {
+ pr_err("Failed to copy IGC payload, ret = %d\n", ret);
+ goto exit_pp_info;
+ }
+ } else {
+ pp_info->igc_cfg.cfg_payload = NULL;
+ }
+ if (ops & MDP_OVERLAY_PP_HIST_LUT_CFG) {
+ ret = pp_copy_layer_hist_lut_payload(pp_info);
+ if (ret) {
+ pr_err("Failed to copy Hist LUT payload, ret = %d\n",
+ ret);
+ goto exit_igc;
+ }
+ } else {
+ pp_info->hist_lut_cfg.cfg_payload = NULL;
+ }
+ if (ops & MDP_OVERLAY_PP_PA_V2_CFG) {
+ ret = pp_copy_layer_pa_payload(pp_info);
+ if (ret) {
+ pr_err("Failed to copy PA payload, ret = %d\n", ret);
+ goto exit_hist_lut;
+ }
+ } else {
+ pp_info->pa_v2_cfg_data.cfg_payload = NULL;
+ }
+ if (ops & MDP_OVERLAY_PP_PCC_CFG) {
+ ret = pp_copy_layer_pcc_payload(pp_info);
+ if (ret) {
+ pr_err("Failed to copy PCC payload, ret = %d\n", ret);
+ goto exit_pa;
+ }
+ } else {
+ pp_info->pcc_cfg_data.cfg_payload = NULL;
+ }
+
+ layer->pp_info = pp_info;
+
+ return ret;
+
+exit_pa:
+ kfree(pp_info->pa_v2_cfg_data.cfg_payload);
+exit_hist_lut:
+ kfree(pp_info->hist_lut_cfg.cfg_payload);
+exit_igc:
+ kfree(pp_info->igc_cfg.cfg_payload);
+exit_pp_info:
+ kfree(pp_info);
+ return ret;
+}
+
+void mdss_mdp_free_layer_pp_info(struct mdp_input_layer *layer)
+{
+ struct mdp_overlay_pp_params *pp_info = (layer) ?
+ (struct mdp_overlay_pp_params *) layer->pp_info : NULL;
+
+ if (!pp_info)
+ return;
+
+ kfree(pp_info->igc_cfg.cfg_payload);
+ kfree(pp_info->hist_lut_cfg.cfg_payload);
+ kfree(pp_info->pa_v2_cfg_data.cfg_payload);
+ kfree(pp_info->pcc_cfg_data.cfg_payload);
+ kfree(pp_info);
+ layer->pp_info = NULL;
+}
+
+int mdss_mdp_mfd_valid_dspp(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_ctl *ctl = NULL;
+ int valid_dspp = false;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ ctl = mfd_to_ctl(mfd);
+ valid_dspp = (ctl) && (ctl->mixer_left) &&
+ (ctl->mixer_left->num < mdata->ndspp);
+ if ((ctl) && (ctl->mixer_right))
+ valid_dspp &= (ctl->mixer_right->num < mdata->ndspp);
+ return valid_dspp;
+}
+
+static int mdss_mdp_mfd_valid_ad(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_ctl *ctl = NULL;
+ int valid_ad = false;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ ctl = mfd_to_ctl(mfd);
+ valid_ad = (ctl) && (ctl->mixer_left) &&
+ (ctl->mixer_left->num < mdata->nad_cfgs);
+ if ((ctl) && (ctl->mixer_right))
+ valid_ad &= (ctl->mixer_right->num < mdata->nad_cfgs);
+ return valid_ad;
+}
+
+static int pp_mfd_release_all(struct msm_fb_data_type *mfd)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int ret = 0;
+
+ if (!mfd || !mdata) {
+ pr_err("Invalid mfd %pK mdata %pK\n", mfd, mdata);
+ return -EPERM;
+ }
+
+ if (mfd->index >= (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0))
+ return ret;
+
+ if (mdata->nad_cfgs) {
+ ret = pp_mfd_ad_release_all(mfd);
+ if (ret)
+ pr_err("ad release all failed on disp %d, ret %d\n",
+ mfd->index, ret);
+ }
+
+ if (mdss_mdp_mfd_valid_dspp(mfd))
+ mdss_mdp_hist_stop(mfd->index + MDP_LOGICAL_BLOCK_DISP_0);
+ memset(&mdss_pp_res->pp_disp_sts[mfd->index], 0,
+ sizeof(mdss_pp_res->pp_disp_sts[mfd->index]));
+ mfd->bl_scale = 1024;
+
+ return ret;
+}
+
+static int pp_mfd_ad_release_all(struct msm_fb_data_type *mfd)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_ctl *ctl = NULL;
+ struct mdss_ad_info *ad = NULL;
+ int ret = 0;
+
+ if (!mdata || !mfd) {
+ pr_err("invalid params mdata %pK mfd %pK\n", mdata, mfd);
+ return -EINVAL;
+ }
+ if (!mdata->ad_calc_wq)
+ return 0;
+
+ ret = mdss_mdp_get_ad(mfd, &ad);
+ if (ret == -ENODEV || ret == -EPERM) {
+ pr_debug("AD not supported on device, disp num %d\n",
+ mfd->index);
+ return 0;
+ } else if (ret) {
+ pr_err("failed to get ad_info ret %d\n", ret);
+ return ret;
+ }
+ if (!ad->mfd)
+ return 0;
+
+ mutex_lock(&ad->lock);
+ ad->sts &= ~PP_STS_ENABLE;
+ ad->mfd = NULL;
+ ad->bl_mfd = NULL;
+ ad->state = 0;
+ mutex_unlock(&ad->lock);
+ cancel_work_sync(&ad->calc_work);
+
+ ctl = mfd_to_ctl(mfd);
+ if (ctl && ctl->ops.remove_vsync_handler)
+ ctl->ops.remove_vsync_handler(ctl, &ad->handle);
+
+ return ret;
+}
+
+static inline int pp_validate_dspp_mfd_block(struct msm_fb_data_type *mfd,
+ int block)
+{
+ if (!mfd)
+ return -EINVAL;
+
+ if (!mdss_mdp_mfd_valid_dspp(mfd)) {
+ pr_err("invalid display num %d for PP config\n", mfd->index);
+ return -EPERM;
+ }
+
+ if ((block < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (block >= MDP_BLOCK_MAX)) {
+ pr_err("invalid block %d\n", block);
+ return -EINVAL;
+ }
+
+ if ((block - MDP_LOGICAL_BLOCK_DISP_0) != mfd->index) {
+ pr_err("PP block %d does not match corresponding mfd index %d\n",
+ block, mfd->index);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pp_get_driver_ops(struct mdp_pp_driver_ops *ops)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int ret = 0;
+ void *pp_cfg = NULL;
+
+ switch (mdata->mdp_rev) {
+ case MDSS_MDP_HW_REV_107:
+ case MDSS_MDP_HW_REV_107_1:
+ case MDSS_MDP_HW_REV_107_2:
+ case MDSS_MDP_HW_REV_114:
+ case MDSS_MDP_HW_REV_115:
+ case MDSS_MDP_HW_REV_116:
+ pp_cfg = pp_get_driver_ops_v1_7(ops);
+ if (IS_ERR_OR_NULL(pp_cfg))
+ ret = -EINVAL;
+ else
+ mdss_pp_res->pp_data_v1_7 = pp_cfg;
+ break;
+ case MDSS_MDP_HW_REV_300:
+ case MDSS_MDP_HW_REV_301:
+ pp_cfg = pp_get_driver_ops_v3(ops);
+ if (IS_ERR_OR_NULL(pp_cfg)) {
+ ret = -EINVAL;
+ } else {
+ mdss_pp_res->pp_data_v1_7 = pp_cfg;
+ /* Currently all caching data is used from v17 for V3
+ * hence setting the pointer to NULL. Will be used if we
+ * have to add any caching specific to V3.
+ */
+ mdss_pp_res->pp_data_v3 = NULL;
+ }
+ break;
+ default:
+ memset(ops, 0, sizeof(struct mdp_pp_driver_ops));
+ break;
+ }
+ return ret;
+}
+
+static int pp_ppb_setup(struct mdss_mdp_mixer *mixer)
+{
+ struct pp_sts_type *pp_sts;
+ struct mdss_mdp_ctl *ctl;
+ char __iomem *addr;
+ u32 flags, disp_num;
+ int ret = 0;
+
+ if (!mixer || !mixer->ctl || !mixer->ctl->mfd) {
+ pr_err("invalid parameters, mixer %pK ctl %pK mfd %pK\n",
+ mixer, (mixer ? mixer->ctl : NULL),
+ (mixer ? (mixer->ctl ? mixer->ctl->mfd : NULL) : NULL));
+ return -EINVAL;
+ }
+ ctl = mixer->ctl;
+ disp_num = ctl->mfd->index;
+
+ if (disp_num < MDSS_BLOCK_DISP_NUM)
+ flags = mdss_pp_res->pp_disp_flags[disp_num];
+ else
+ flags = 0;
+ if ((flags & PP_FLAGS_DIRTY_DITHER)) {
+ if (pp_ops[DITHER].pp_set_config) {
+ pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
+ addr = mixer->pingpong_base;
+ /* if dither is supported in PPB function will
+ * return 0. Failure will indicate that there
+ * is no DITHER in PPB. In case of error skip the
+ * programming of CTL flush bits for dither flush.
+ */
+ ret = pp_ops[DITHER].pp_set_config(addr, pp_sts,
+ &mdss_pp_res->dither_disp_cfg[disp_num], PPB);
+ if (!ret) {
+ switch (mixer->num) {
+ case MDSS_MDP_INTF_LAYERMIXER0:
+ case MDSS_MDP_INTF_LAYERMIXER1:
+ case MDSS_MDP_INTF_LAYERMIXER2:
+ ctl->flush_bits |= BIT(13) <<
+ mixer->num;
+ break;
+ case MDSS_MDP_INTF_LAYERMIXER3:
+ ctl->flush_bits |= BIT(21);
+ break;
+ }
+ }
+ ret = 0;
+ }
+ }
+ return ret;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.h b/drivers/video/fbdev/msm/mdss_mdp_pp.h
new file mode 100644
index 0000000..46ec80f
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2014-2015, 2017, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_PP_DEBUG_H
+#define MDSS_MDP_PP_DEBUG_H
+
+#include <linux/msm_mdp.h>
+
+#define MDSS_BLOCK_DISP_NUM (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0)
+
+/* PP STS related flags */
+#define PP_STS_ENABLE 0x1
+#define PP_STS_GAMUT_FIRST 0x2
+#define PP_STS_PA_LUT_FIRST 0x4
+
+#define PP_STS_PA_HUE_MASK 0x2
+#define PP_STS_PA_SAT_MASK 0x4
+#define PP_STS_PA_VAL_MASK 0x8
+#define PP_STS_PA_CONT_MASK 0x10
+#define PP_STS_PA_MEM_PROTECT_EN 0x20
+#define PP_STS_PA_MEM_COL_SKIN_MASK 0x40
+#define PP_STS_PA_MEM_COL_FOL_MASK 0x80
+#define PP_STS_PA_MEM_COL_SKY_MASK 0x100
+#define PP_STS_PA_SIX_ZONE_HUE_MASK 0x200
+#define PP_STS_PA_SIX_ZONE_SAT_MASK 0x400
+#define PP_STS_PA_SIX_ZONE_VAL_MASK 0x800
+#define PP_STS_PA_SAT_ZERO_EXP_EN 0x1000
+#define PP_STS_PA_MEM_PROT_HUE_EN 0x2000
+#define PP_STS_PA_MEM_PROT_SAT_EN 0x4000
+#define PP_STS_PA_MEM_PROT_VAL_EN 0x8000
+#define PP_STS_PA_MEM_PROT_CONT_EN 0x10000
+#define PP_STS_PA_MEM_PROT_BLEND_EN 0x20000
+#define PP_STS_PA_MEM_PROT_SIX_EN 0x40000
+
+/* Demo mode macros */
+#define MDSS_SIDE_NONE 0
+#define MDSS_SIDE_LEFT 1
+#define MDSS_SIDE_RIGHT 2
+/* size calculated for c0,c1_c2 for 4 tables */
+#define GAMUT_COLOR_COEFF_SIZE_V1_7 (2 * MDP_GAMUT_TABLE_V1_7_SZ * 4)
+/* 16 entries for c0,c1,c2 */
+#define GAMUT_SCALE_OFFSET_SIZE_V1_7 (3 * MDP_GAMUT_SCALE_OFF_SZ)
+#define GAMUT_TOTAL_TABLE_SIZE_V1_7 (GAMUT_COLOR_COEFF_SIZE_V1_7 + \
+ GAMUT_SCALE_OFFSET_SIZE_V1_7)
+
+#define GAMUT_T0_SIZE 125
+#define GAMUT_T1_SIZE 100
+#define GAMUT_T2_SIZE 80
+#define GAMUT_T3_SIZE 100
+#define GAMUT_T4_SIZE 100
+#define GAMUT_T5_SIZE 80
+#define GAMUT_T6_SIZE 64
+#define GAMUT_T7_SIZE 80
+#define GAMUT_TOTAL_TABLE_SIZE (GAMUT_T0_SIZE + GAMUT_T1_SIZE + \
+ GAMUT_T2_SIZE + GAMUT_T3_SIZE + GAMUT_T4_SIZE + \
+ GAMUT_T5_SIZE + GAMUT_T6_SIZE + GAMUT_T7_SIZE)
+
+/* Total 5 QSEED3 filters: Direction filter + Y plane cir and sep + UV plane
+ * cir and sep filters
+ */
+#define QSEED3_FILTERS 5
+
+#define QSEED3_LUT_REGIONS 4
+
+enum pp_block_opmodes {
+ PP_OPMODE_VIG = 1,
+ PP_OPMODE_DSPP,
+ PP_OPMODE_MAX
+};
+
+enum pp_config_block {
+ SSPP_RGB = 1,
+ SSPP_DMA,
+ SSPP_VIG,
+ DSPP,
+ LM,
+ PPB
+};
+
+struct mdp_pp_feature_ops {
+ u32 feature;
+ int (*pp_get_config)(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+ int (*pp_set_config)(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+ int (*pp_get_version)(u32 *version);
+};
+
+struct mdp_pp_driver_ops {
+ struct mdp_pp_feature_ops pp_ops[PP_FEATURE_MAX];
+ void (*pp_opmode_config)(int location, struct pp_sts_type *pp_sts,
+ u32 *opmode, int side);
+ int (*get_hist_offset)(u32 block, u32 *ctl_off);
+ int (*get_hist_isr_info)(u32 *isr_mask);
+ bool (*is_sspp_hist_supp)(void);
+ void (*gamut_clk_gate_en)(char __iomem *base_addr);
+};
+
+struct mdss_pp_res_type_v1_7 {
+ u32 pgc_lm_table_c0[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+ u32 pgc_lm_table_c1[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+ u32 pgc_lm_table_c2[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+ u32 pgc_table_c0[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+ u32 pgc_table_c1[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+ u32 pgc_table_c2[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+ u32 igc_table_c0_c1[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
+ u32 igc_table_c2[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
+ u32 hist_lut[MDSS_BLOCK_DISP_NUM][ENHIST_LUT_ENTRIES];
+ u32 six_zone_lut_p0[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
+ u32 six_zone_lut_p1[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
+ struct mdp_pgc_lut_data_v1_7 pgc_dspp_v17_data[MDSS_BLOCK_DISP_NUM];
+ struct mdp_pgc_lut_data_v1_7 pgc_lm_v17_data[MDSS_BLOCK_DISP_NUM];
+ struct mdp_igc_lut_data_v1_7 igc_v17_data[MDSS_BLOCK_DISP_NUM];
+ struct mdp_hist_lut_data_v1_7 hist_lut_v17_data[MDSS_BLOCK_DISP_NUM];
+ struct mdp_dither_data_v1_7 dither_v17_data[MDSS_BLOCK_DISP_NUM];
+ struct mdp_gamut_data_v1_7 gamut_v17_data[MDSS_BLOCK_DISP_NUM];
+ struct mdp_pcc_data_v1_7 pcc_v17_data[MDSS_BLOCK_DISP_NUM];
+ struct mdp_pa_data_v1_7 pa_v17_data[MDSS_BLOCK_DISP_NUM];
+};
+
+struct mdss_pp_res_type {
+ /* logical info */
+ u32 pp_disp_flags[MDSS_BLOCK_DISP_NUM];
+ u32 igc_lut_c0c1[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
+ u32 igc_lut_c2[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
+ struct mdp_ar_gc_lut_data
+ gc_lut_r[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
+ struct mdp_ar_gc_lut_data
+ gc_lut_g[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
+ struct mdp_ar_gc_lut_data
+ gc_lut_b[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
+ u32 enhist_lut[MDSS_BLOCK_DISP_NUM][ENHIST_LUT_ENTRIES];
+ struct mdp_pa_cfg pa_disp_cfg[MDSS_BLOCK_DISP_NUM];
+ struct mdp_pa_v2_cfg_data pa_v2_disp_cfg[MDSS_BLOCK_DISP_NUM];
+ u32 six_zone_lut_curve_p0[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
+ u32 six_zone_lut_curve_p1[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
+ struct mdp_pcc_cfg_data pcc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+ struct mdp_igc_lut_data igc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+ struct mdp_pgc_lut_data argc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+ struct mdp_pgc_lut_data pgc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+ struct mdp_hist_lut_data enhist_disp_cfg[MDSS_BLOCK_DISP_NUM];
+ struct mdp_dither_cfg_data dither_disp_cfg[MDSS_BLOCK_DISP_NUM];
+ struct mdp_gamut_cfg_data gamut_disp_cfg[MDSS_BLOCK_DISP_NUM];
+ uint16_t gamut_tbl[MDSS_BLOCK_DISP_NUM][GAMUT_TOTAL_TABLE_SIZE * 3];
+ u32 hist_data[MDSS_BLOCK_DISP_NUM][HIST_V_SIZE];
+ struct pp_sts_type pp_disp_sts[MDSS_BLOCK_DISP_NUM];
+ /* physical info */
+ struct pp_hist_col_info *dspp_hist;
+ /*
+ * The pp_data_v1_7 will be a pointer to newer MDP revisions of the
+ * pp_res, which will hold the cfg_payloads of each feature in a single
+ * struct.
+ */
+ void *pp_data_v1_7;
+ void *pp_data_v3;
+};
+
+void *pp_get_driver_ops_v1_7(struct mdp_pp_driver_ops *ops);
+void *pp_get_driver_ops_v3(struct mdp_pp_driver_ops *ops);
+
+
+static inline void pp_sts_set_split_bits(u32 *sts, u32 bits)
+{
+ u32 tmp = *sts;
+
+ tmp &= ~MDSS_PP_SPLIT_MASK;
+ tmp |= bits & MDSS_PP_SPLIT_MASK;
+ *sts = tmp;
+}
+
+static inline bool pp_sts_is_enabled(u32 sts, int side)
+{
+ bool ret = false;
+ /*
+ * If there are no sides, or if there are no split mode bits set, the
+ * side can't be disabled via split mode.
+ *
+ * Otherwise, if the side being checked opposes the split mode
+ * configuration, the side is disabled.
+ */
+ if ((side == MDSS_SIDE_NONE) || !(sts & MDSS_PP_SPLIT_MASK))
+ ret = true;
+ else if ((sts & MDSS_PP_SPLIT_RIGHT_ONLY) && (side == MDSS_SIDE_RIGHT))
+ ret = true;
+ else if ((sts & MDSS_PP_SPLIT_LEFT_ONLY) && (side == MDSS_SIDE_LEFT))
+ ret = true;
+
+ return ret && (sts & PP_STS_ENABLE);
+}
+
+/* Debug related functions */
+void pp_print_lut(void *data, int size, char *tab, uint32_t type);
+void pp_print_uint16_lut(uint16_t *data, int size, char *tab);
+void pp_print_pcc_coeff(struct mdp_pcc_coeff *pcc_coeff, int tab_depth);
+void pp_print_pcc_cfg_data(struct mdp_pcc_cfg_data *pcc_data, int tab_depth);
+void pp_print_csc_cfg(struct mdp_csc_cfg *data, int tab_depth);
+void pp_print_csc_cfg_data(struct mdp_csc_cfg_data *data, int tab_depth);
+void pp_print_igc_lut_data(struct mdp_igc_lut_data *data, int tab_depth);
+void pp_print_ar_gc_lut_data(struct mdp_ar_gc_lut_data *data, int tab_depth);
+void pp_print_pgc_lut_data(struct mdp_pgc_lut_data *data, int tab_depth);
+void pp_print_hist_lut_data(struct mdp_hist_lut_data *data, int tab_depth);
+void pp_print_lut_cfg_data(struct mdp_lut_cfg_data *data, int tab_depth);
+void pp_print_qseed_cfg(struct mdp_qseed_cfg *data, int tab_depth);
+void pp_print_qseed_cfg_data(struct mdp_qseed_cfg_data *data, int tab_depth);
+void pp_print_pa_cfg(struct mdp_pa_cfg *data, int tab_depth);
+void pp_print_pa_cfg_data(struct mdp_pa_cfg_data *data, int tab_depth);
+void pp_print_mem_col_cfg(struct mdp_pa_mem_col_cfg *data, int tab_depth);
+void pp_print_pa_v2_data(struct mdp_pa_v2_data *data, int tab_depth);
+void pp_print_pa_v2_cfg_data(struct mdp_pa_v2_cfg_data *data, int tab_depth);
+void pp_print_dither_cfg_data(struct mdp_dither_cfg_data *data, int tab_depth);
+void pp_print_gamut_cfg_data(struct mdp_gamut_cfg_data *data, int tab_depth);
+void pp_print_ad_init(struct mdss_ad_init *data, int tab_depth);
+void pp_print_ad_cfg(struct mdss_ad_cfg *data, int tab_depth);
+void pp_print_ad_init_cfg(struct mdss_ad_init_cfg *data, int tab_depth);
+void pp_print_ad_input(struct mdss_ad_input *data, int tab_depth);
+void pp_print_histogram_cfg(struct mdp_histogram_cfg *data, int tab_depth);
+void pp_print_sharp_cfg(struct mdp_sharp_cfg *data, int tab_depth);
+void pp_print_calib_config_data(struct mdp_calib_config_data *data,
+ int tab_depth);
+void pp_print_calib_config_buffer(struct mdp_calib_config_buffer *data,
+ int tab_depth);
+void pp_print_calib_dcm_state(struct mdp_calib_dcm_state *data, int tab_depth);
+void pp_print_mdss_calib_cfg(struct mdss_calib_cfg *data, int tab_depth);
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
new file mode 100644
index 0000000..ade3add
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
@@ -0,0 +1,1503 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+#include "mdss_mdp_pp_cache_config.h"
+
+#define IGC_C1_SHIFT 16
+static u32 pp_igc_601[IGC_LUT_ENTRIES] = {
+ 0, 1, 2, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 16, 18, 20, 21, 23,
+ 25, 27, 29, 31, 33, 35, 37, 40, 42, 45, 48, 50, 53, 56, 59, 62,
+ 66, 69, 72, 76, 79, 83, 87, 91, 95, 99, 103, 107, 112, 116, 121,
+ 126, 131, 136, 141, 146, 151, 156, 162, 168, 173, 179, 185, 191,
+ 197, 204, 210, 216, 223, 230, 237, 244, 251, 258, 265, 273, 280,
+ 288, 296, 304, 312, 320, 329, 337, 346, 354, 363, 372, 381, 390,
+ 400, 409, 419, 428, 438, 448, 458, 469, 479, 490, 500, 511, 522,
+ 533, 544, 555, 567, 578, 590, 602, 614, 626, 639, 651, 664, 676,
+ 689, 702, 715, 728, 742, 755, 769, 783, 797, 811, 825, 840, 854,
+ 869, 884, 899, 914, 929, 945, 960, 976, 992, 1008, 1024, 1041,
+ 1057, 1074, 1091, 1108, 1125, 1142, 1159, 1177, 1195, 1213, 1231,
+ 1249, 1267, 1286, 1304, 1323, 1342, 1361, 1381, 1400, 1420, 1440,
+ 1459, 1480, 1500, 1520, 1541, 1562, 1582, 1603, 1625, 1646, 1668,
+ 1689, 1711, 1733, 1755, 1778, 1800, 1823, 1846, 1869, 1892, 1916,
+ 1939, 1963, 1987, 2011, 2035, 2059, 2084, 2109, 2133, 2159, 2184,
+ 2209, 2235, 2260, 2286, 2312, 2339, 2365, 2392, 2419, 2446, 2473,
+ 2500, 2527, 2555, 2583, 2611, 2639, 2668, 2696, 2725, 2754, 2783,
+ 2812, 2841, 2871, 2901, 2931, 2961, 2991, 3022, 3052, 3083, 3114,
+ 3146, 3177, 3209, 3240, 3272, 3304, 3337, 3369, 3402, 3435, 3468,
+ 3501, 3535, 3568, 3602, 3636, 3670, 3705, 3739, 3774, 3809, 3844,
+ 3879, 3915, 3950, 3986, 4022, 4059, 4095,
+};
+
+static u32 pp_igc_709[IGC_LUT_ENTRIES] = {
+ 0, 4, 7, 11, 14, 18, 21, 25, 29, 32, 36, 39, 43, 46, 50, 54, 57,
+ 61, 64, 68, 71, 75, 78, 82, 86, 90, 94, 98, 102, 107, 111, 115,
+ 120, 125, 130, 134, 139, 145, 150, 155, 161, 166, 172, 177, 183,
+ 189, 195, 201, 208, 214, 220, 227, 234, 240, 247, 254, 261, 269,
+ 276, 283, 291, 298, 306, 314, 322, 330, 338, 347, 355, 364, 372,
+ 381, 390, 399, 408, 417, 426, 436, 445, 455, 465, 474, 484, 495,
+ 505, 515, 525, 536, 547, 558, 568, 579, 591, 602, 613, 625, 636,
+ 648, 660, 672, 684, 696, 708, 721, 733, 746, 759, 772, 785, 798,
+ 811, 825, 838, 852, 865, 879, 893, 907, 922, 936, 950, 965, 980,
+ 995, 1010, 1025, 1040, 1055, 1071, 1086, 1102, 1118, 1134, 1150,
+ 1166, 1183, 1199, 1216, 1232, 1249, 1266, 1283, 1300, 1318, 1335,
+ 1353, 1370, 1388, 1406, 1424, 1443, 1461, 1479, 1498, 1517, 1536,
+ 1555, 1574, 1593, 1612, 1632, 1652, 1671, 1691, 1711, 1731, 1752,
+ 1772, 1793, 1813, 1834, 1855, 1876, 1897, 1919, 1940, 1962, 1984,
+ 2005, 2027, 2050, 2072, 2094, 2117, 2139, 2162, 2185, 2208, 2231,
+ 2255, 2278, 2302, 2325, 2349, 2373, 2397, 2422, 2446, 2471, 2495,
+ 2520, 2545, 2570, 2595, 2621, 2646, 2672, 2697, 2723, 2749, 2775,
+ 2802, 2828, 2855, 2881, 2908, 2935, 2962, 2990, 3017, 3044, 3072,
+ 3100, 3128, 3156, 3184, 3212, 3241, 3270, 3298, 3327, 3356, 3385,
+ 3415, 3444, 3474, 3503, 3533, 3563, 3594, 3624, 3654, 3685, 3716,
+ 3746, 3777, 3808, 3840, 3871, 3903, 3934, 3966, 3998, 4030, 4063,
+ 4095,
+};
+
+static u32 pp_igc_srgb[IGC_LUT_ENTRIES] = {
+ 0, 1, 2, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 16, 18, 20, 21, 23,
+ 25, 27, 29, 31, 33, 35, 37, 40, 42, 45, 48, 50, 53, 56, 59, 62,
+ 66, 69, 72, 76, 79, 83, 87, 91, 95, 99, 103, 107, 112, 116, 121,
+ 126, 131, 136, 141, 146, 151, 156, 162, 168, 173, 179, 185, 191,
+ 197, 204, 210, 216, 223, 230, 237, 244, 251, 258, 265, 273, 280,
+ 288, 296, 304, 312, 320, 329, 337, 346, 354, 363, 372, 381, 390,
+ 400, 409, 419, 428, 438, 448, 458, 469, 479, 490, 500, 511, 522,
+ 533, 544, 555, 567, 578, 590, 602, 614, 626, 639, 651, 664, 676,
+ 689, 702, 715, 728, 742, 755, 769, 783, 797, 811, 825, 840, 854,
+ 869, 884, 899, 914, 929, 945, 960, 976, 992, 1008, 1024, 1041,
+ 1057, 1074, 1091, 1108, 1125, 1142, 1159, 1177, 1195, 1213, 1231, 1249,
+ 1267, 1286, 1304, 1323, 1342, 1361, 1381, 1400, 1420, 1440, 1459, 1480,
+ 1500, 1520, 1541, 1562, 1582, 1603, 1625, 1646, 1668, 1689, 1711, 1733,
+ 1755, 1778, 1800, 1823, 1846, 1869, 1892, 1916, 1939, 1963, 1987, 2011,
+ 2035, 2059, 2084, 2109, 2133, 2159, 2184, 2209, 2235, 2260, 2286, 2312,
+ 2339, 2365, 2392, 2419, 2446, 2473, 2500, 2527, 2555, 2583, 2611, 2639,
+ 2668, 2696, 2725, 2754, 2783, 2812, 2841, 2871, 2901, 2931, 2961, 2991,
+ 3022, 3052, 3083, 3114, 3146, 3177, 3209, 3240, 3272, 3304, 3337, 3369,
+ 3402, 3435, 3468, 3501, 3535, 3568, 3602, 3636, 3670, 3705, 3739, 3774,
+ 3809, 3844, 3879, 3915, 3950, 3986, 4022, 4059, 4095
+};
+
+static int pp_hist_lut_cache_params_v1_7(struct mdp_hist_lut_data *config,
+ struct mdss_pp_res_type *mdss_pp_res)
+{
+ u32 disp_num;
+ struct mdss_pp_res_type_v1_7 *res_cache = NULL;
+ struct mdp_hist_lut_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
+ int ret = 0;
+
+ if (!config || !mdss_pp_res) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, mdss_pp_res);
+ return -EINVAL;
+ }
+ if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (config->block >= MDP_BLOCK_MAX)) {
+ pr_err("invalid config block %d\n", config->block);
+ return -EINVAL;
+ }
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+ return -EINVAL;
+ }
+
+ res_cache = mdss_pp_res->pp_data_v1_7;
+ if (config->ops & MDP_PP_OPS_READ) {
+ pr_err("read op is not supported\n");
+ return -EINVAL;
+ }
+ {
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+ mdss_pp_res->enhist_disp_cfg[disp_num] = *config;
+ v17_cache_data = &res_cache->hist_lut_v17_data[disp_num];
+ mdss_pp_res->enhist_disp_cfg[disp_num].cfg_payload =
+ (void *) v17_cache_data;
+
+ if (copy_from_user(&v17_usr_config, config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy v17 hist_lut\n");
+ ret = -EFAULT;
+ return ret;
+ }
+ if ((config->ops & MDP_PP_OPS_DISABLE)) {
+ pr_debug("disable hist_lut\n");
+ ret = 0;
+ return ret;
+ }
+ memcpy(v17_cache_data, &v17_usr_config, sizeof(v17_usr_config));
+ if (v17_usr_config.len != ENHIST_LUT_ENTRIES) {
+ pr_err("Invalid table size %d exp %d\n",
+ v17_usr_config.len, ENHIST_LUT_ENTRIES);
+ ret = -EINVAL;
+ return ret;
+ }
+ v17_cache_data->data = &res_cache->hist_lut[disp_num][0];
+ if (copy_from_user(v17_cache_data->data, v17_usr_config.data,
+ v17_usr_config.len * sizeof(u32))) {
+ pr_err("failed to copy v17 hist_lut->data\n");
+ ret = -EFAULT;
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int pp_hist_lut_cache_params_pipe_v1_7(struct mdp_hist_lut_data *config,
+ struct mdss_mdp_pipe *pipe)
+{
+ struct mdp_hist_lut_data_v1_7 *hist_lut_cache_data;
+ struct mdp_hist_lut_data_v1_7 hist_lut_usr_config;
+ int ret = 0;
+
+ if (!config || !pipe) {
+ pr_err("Invalid param config %pK pipe %pK\n",
+ config, pipe);
+ return -EINVAL;
+ }
+
+ if (config->ops & MDP_PP_OPS_DISABLE) {
+ pr_debug("Disable Hist LUT on pipe %d\n", pipe->num);
+ goto hist_lut_cache_pipe_exit;
+ }
+
+ if (config->ops & MDP_PP_OPS_READ) {
+ pr_err("Read op is not supported\n");
+ return -EINVAL;
+ }
+
+ if (!config->cfg_payload) {
+ pr_err("Hist LUT config payload invalid\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&hist_lut_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(hist_lut_usr_config))) {
+ pr_err("failed to copy hist lut config\n");
+ return -EFAULT;
+ }
+
+ hist_lut_cache_data = pipe->pp_res.hist_lut_cfg_payload;
+ if (!hist_lut_cache_data) {
+ hist_lut_cache_data = kzalloc(
+ sizeof(struct mdp_hist_lut_data_v1_7),
+ GFP_KERNEL);
+ if (!hist_lut_cache_data) {
+ pr_err("failed to allocate cache_data\n");
+ ret = -ENOMEM;
+ goto hist_lut_cache_pipe_exit;
+ } else
+ pipe->pp_res.hist_lut_cfg_payload = hist_lut_cache_data;
+ }
+
+ *hist_lut_cache_data = hist_lut_usr_config;
+
+ if (hist_lut_cache_data->len != ENHIST_LUT_ENTRIES) {
+ pr_err("Invalid Hist LUT length %d\n",
+ hist_lut_cache_data->len);
+ ret = -EINVAL;
+ goto hist_lut_cache_pipe_exit;
+ }
+
+ if (copy_from_user(pipe->pp_res.hist_lut,
+ hist_lut_usr_config.data,
+ sizeof(uint32_t) * hist_lut_cache_data->len)) {
+ pr_err("Failed to copy usr Hist LUT data\n");
+ ret = -EFAULT;
+ goto hist_lut_cache_pipe_exit;
+ }
+
+ hist_lut_cache_data->data = pipe->pp_res.hist_lut;
+
+hist_lut_cache_pipe_exit:
+ if (ret || (config->ops & MDP_PP_OPS_DISABLE)) {
+ kfree(pipe->pp_res.hist_lut_cfg_payload);
+ pipe->pp_res.hist_lut_cfg_payload = NULL;
+ }
+ pipe->pp_cfg.hist_lut_cfg.cfg_payload =
+ pipe->pp_res.hist_lut_cfg_payload;
+ return ret;
+}
+
+int pp_hist_lut_cache_params(struct mdp_hist_lut_data *config,
+ struct mdp_pp_cache_res *res_cache)
+{
+ int ret = 0;
+
+ if (!config || !res_cache) {
+ pr_err("invalid param config %pK res_cache %pK\n",
+ config, res_cache);
+ return -EINVAL;
+ }
+ if (res_cache->block != SSPP_VIG && res_cache->block != DSPP) {
+ pr_err("invalid block for Hist LUT %d\n", res_cache->block);
+ return -EINVAL;
+ }
+ if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
+ pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
+ res_cache->block, res_cache->mdss_pp_res,
+ res_cache->pipe_res);
+ return -EINVAL;
+ }
+
+ switch (config->version) {
+ case mdp_hist_lut_v1_7:
+ if (res_cache->block == DSPP) {
+ ret = pp_hist_lut_cache_params_v1_7(config,
+ res_cache->mdss_pp_res);
+ if (ret)
+ pr_err("failed to cache Hist LUT params for DSPP ret %d\n",
+ ret);
+ } else {
+ ret = pp_hist_lut_cache_params_pipe_v1_7(config,
+ res_cache->pipe_res);
+ if (ret)
+ pr_err("failed to cache Hist LUT params for SSPP ret %d\n",
+ ret);
+ }
+ break;
+ default:
+ pr_err("unsupported hist_lut version %d\n",
+ config->version);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+int pp_dither_cache_params_v1_7(struct mdp_dither_cfg_data *config,
+ struct mdss_pp_res_type *mdss_pp_res,
+ int copy_from_kernel)
+{
+ u32 disp_num;
+ int ret = 0;
+ struct mdss_pp_res_type_v1_7 *res_cache = NULL;
+ struct mdp_dither_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
+
+ if (!config || !mdss_pp_res) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, mdss_pp_res);
+ return -EINVAL;
+ }
+ if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (config->block >= MDP_BLOCK_MAX)) {
+ pr_err("invalid config block %d\n", config->block);
+ return -EINVAL;
+ }
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+ return -EINVAL;
+ }
+
+ res_cache = mdss_pp_res->pp_data_v1_7;
+
+ if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+ pr_warn("Can't set both split bits\n");
+ return -EINVAL;
+ }
+
+ if (config->flags & MDP_PP_OPS_READ) {
+ pr_err("read op is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+ mdss_pp_res->dither_disp_cfg[disp_num] = *config;
+
+ if (config->flags & MDP_PP_OPS_DISABLE) {
+ pr_debug("disable dither\n");
+ ret = 0;
+ goto dither_config_exit;
+ }
+
+ if (!(config->flags & MDP_PP_OPS_WRITE)) {
+ pr_debug("op for dither %d\n", config->flags);
+ goto dither_config_exit;
+ }
+
+ v17_cache_data = &res_cache->dither_v17_data[disp_num];
+ mdss_pp_res->dither_disp_cfg[disp_num].cfg_payload =
+ (void *)v17_cache_data;
+ if (copy_from_kernel) {
+ memcpy(v17_cache_data, config->cfg_payload,
+ sizeof(struct mdp_dither_data_v1_7));
+ } else {
+ if (copy_from_user(&v17_usr_config, config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy v17 dither\n");
+ ret = -EFAULT;
+ goto dither_config_exit;
+ }
+ memcpy(v17_cache_data, &v17_usr_config, sizeof(v17_usr_config));
+ }
+ if (v17_cache_data->len &&
+ v17_cache_data->len != MDP_DITHER_DATA_V1_7_SZ) {
+ pr_err("invalid dither len %d expected %d\n",
+ v17_cache_data->len, MDP_DITHER_DATA_V1_7_SZ);
+ ret = -EINVAL;
+ }
+
+dither_config_exit:
+ return ret;
+}
+
+int pp_dither_cache_params(struct mdp_dither_cfg_data *config,
+ struct mdss_pp_res_type *mdss_pp_res,
+ int copy_from_kernel)
+{
+ int ret = 0;
+
+ if (!config || !mdss_pp_res) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, mdss_pp_res);
+ return -EINVAL;
+ }
+ switch (config->version) {
+ case mdp_dither_v1_7:
+ ret = pp_dither_cache_params_v1_7(config, mdss_pp_res,
+ copy_from_kernel);
+ break;
+ default:
+ pr_err("unsupported dither version %d\n",
+ config->version);
+ break;
+ }
+ return ret;
+}
+
+
+static int pp_gamut_cache_params_v1_7(struct mdp_gamut_cfg_data *config,
+ struct mdss_pp_res_type *mdss_pp_res)
+{
+ u32 disp_num, tbl_sz;
+ struct mdss_pp_res_type_v1_7 *res_cache;
+ struct mdp_gamut_data_v1_7 *v17_cache_data, v17_usr_config;
+ u32 gamut_size = 0, scal_coff_size = 0, sz = 0, index = 0;
+ u32 *tbl_gamut = NULL;
+ int ret = 0, i = 0;
+
+ if (!config || !mdss_pp_res) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, mdss_pp_res);
+ return -EINVAL;
+ }
+
+ if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (config->block >= MDP_BLOCK_MAX)) {
+ pr_err("invalid config block %d\n", config->block);
+ return -EINVAL;
+ }
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+ return -EINVAL;
+ }
+ res_cache = mdss_pp_res->pp_data_v1_7;
+ if (config->flags & MDP_PP_OPS_READ) {
+ pr_err("read op is not supported\n");
+ return -EINVAL;
+ }
+
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+ /* Copy top level gamut cfg struct into PP res cache */
+ memcpy(&mdss_pp_res->gamut_disp_cfg[disp_num], config,
+ sizeof(struct mdp_gamut_cfg_data));
+
+ v17_cache_data = &res_cache->gamut_v17_data[disp_num];
+ mdss_pp_res->gamut_disp_cfg[disp_num].cfg_payload =
+ (void *) v17_cache_data;
+ tbl_gamut = v17_cache_data->c0_data[0];
+
+ if ((config->flags & MDP_PP_OPS_DISABLE)) {
+ pr_debug("disable gamut\n");
+ ret = 0;
+ goto gamut_config_exit;
+ }
+
+ if (copy_from_user(&v17_usr_config, config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy v17 gamut\n");
+ ret = -EFAULT;
+ goto gamut_config_exit;
+ }
+ if (v17_usr_config.mode != mdp_gamut_coarse_mode &&
+ v17_usr_config.mode != mdp_gamut_fine_mode) {
+ pr_err("invalid gamut mode %d\n", v17_usr_config.mode);
+ return -EINVAL;
+ }
+ if (!(config->flags & MDP_PP_OPS_WRITE)) {
+ pr_debug("op for gamut %d\n", config->flags);
+ goto gamut_config_exit;
+ }
+ tbl_sz = (v17_usr_config.mode == mdp_gamut_fine_mode) ?
+ MDP_GAMUT_TABLE_V1_7_SZ :
+ MDP_GAMUT_TABLE_V1_7_COARSE_SZ;
+ v17_cache_data->mode = v17_usr_config.mode;
+ v17_cache_data->map_en = v17_usr_config.map_en;
+ /* sanity check for sizes */
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+ if (v17_usr_config.tbl_size[i] != tbl_sz) {
+ pr_err("invalid tbl size %d exp %d tbl index %d mode %d\n",
+ v17_usr_config.tbl_size[i], tbl_sz, i,
+ v17_usr_config.mode);
+ ret = -EINVAL;
+ goto gamut_config_exit;
+ }
+ gamut_size += v17_usr_config.tbl_size[i];
+ if (i >= MDP_GAMUT_SCALE_OFF_TABLE_NUM)
+ continue;
+ if (v17_usr_config.tbl_scale_off_sz[i] !=
+ MDP_GAMUT_SCALE_OFF_SZ) {
+ pr_err("invalid scale size %d exp %d scale index %d mode %d\n",
+ v17_usr_config.tbl_scale_off_sz[i],
+ MDP_GAMUT_SCALE_OFF_SZ, i,
+ v17_usr_config.mode);
+ ret = -EINVAL;
+ goto gamut_config_exit;
+ }
+ scal_coff_size += v17_usr_config.tbl_scale_off_sz[i];
+
+ }
+ /* gamut size should be accounted for c0, c1c2 table */
+ sz = gamut_size * 2 + scal_coff_size;
+ if (sz > GAMUT_TOTAL_TABLE_SIZE_V1_7) {
+ pr_err("Invalid table size act %d max %d\n",
+ sz, GAMUT_TOTAL_TABLE_SIZE_V1_7);
+ ret = -EINVAL;
+ goto gamut_config_exit;
+ }
+ /* Allocate for fine mode other modes will fit */
+ if (!tbl_gamut)
+ tbl_gamut = vmalloc(GAMUT_TOTAL_TABLE_SIZE_V1_7 *
+ sizeof(u32));
+ if (!tbl_gamut) {
+ ret = -ENOMEM;
+ goto gamut_config_exit;
+ }
+ index = 0;
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+ ret = copy_from_user(&tbl_gamut[index],
+ v17_usr_config.c0_data[i],
+ (sizeof(u32) * v17_usr_config.tbl_size[i]));
+ if (ret) {
+ pr_err("copying c0 table %d from userspace failed size %zd ret %d\n",
+ i, (sizeof(u32) *
+ v17_usr_config.tbl_size[i]), ret);
+ ret = -EFAULT;
+ goto gamut_memory_free_exit;
+ }
+ v17_cache_data->c0_data[i] = &tbl_gamut[index];
+ v17_cache_data->tbl_size[i] =
+ v17_usr_config.tbl_size[i];
+ index += v17_usr_config.tbl_size[i];
+ ret = copy_from_user(&tbl_gamut[index],
+ v17_usr_config.c1_c2_data[i],
+ (sizeof(u32) * v17_usr_config.tbl_size[i]));
+ if (ret) {
+ pr_err("copying c1_c2 table %d from userspace failed size %zd ret %d\n",
+ i, (sizeof(u32) *
+ v17_usr_config.tbl_size[i]), ret);
+ ret = -EINVAL;
+ goto gamut_memory_free_exit;
+ }
+ v17_cache_data->c1_c2_data[i] = &tbl_gamut[index];
+ index += v17_usr_config.tbl_size[i];
+ }
+ for (i = 0; i < MDP_GAMUT_SCALE_OFF_TABLE_NUM; i++) {
+ ret = copy_from_user(&tbl_gamut[index],
+ v17_usr_config.scale_off_data[i],
+ (sizeof(u32) *
+ v17_usr_config.tbl_scale_off_sz[i]));
+ if (ret) {
+ pr_err("copying scale offset table %d from userspace failed size %zd ret %d\n",
+ i, (sizeof(u32) *
+ v17_usr_config.tbl_scale_off_sz[i]),
+ ret);
+ ret = -EFAULT;
+ goto gamut_memory_free_exit;
+ }
+ v17_cache_data->tbl_scale_off_sz[i] =
+ v17_usr_config.tbl_scale_off_sz[i];
+ v17_cache_data->scale_off_data[i] = &tbl_gamut[index];
+ index += v17_usr_config.tbl_scale_off_sz[i];
+ }
+
+gamut_config_exit:
+ return ret;
+gamut_memory_free_exit:
+ vfree(tbl_gamut);
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+ v17_cache_data->c0_data[i] = NULL;
+ v17_cache_data->c1_c2_data[i] = NULL;
+ v17_cache_data->tbl_size[i] = 0;
+ if (i < MDP_GAMUT_SCALE_OFF_TABLE_NUM) {
+ v17_cache_data->scale_off_data[i] = NULL;
+ v17_cache_data->tbl_scale_off_sz[i] = 0;
+ }
+ }
+ return ret;
+}
+
+int pp_gamut_cache_params(struct mdp_gamut_cfg_data *config,
+ struct mdss_pp_res_type *mdss_pp_res)
+{
+ int ret = 0;
+
+ if (!config || !mdss_pp_res) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, mdss_pp_res);
+ return -EINVAL;
+ }
+ switch (config->version) {
+ case mdp_gamut_v1_7:
+ ret = pp_gamut_cache_params_v1_7(config, mdss_pp_res);
+ break;
+ default:
+ pr_err("unsupported gamut version %d\n",
+ config->version);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int pp_pcc_cache_params_pipe_v1_7(struct mdp_pcc_cfg_data *config,
+ struct mdss_mdp_pipe *pipe)
+{
+ struct mdp_pcc_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
+
+ if (!pipe || !config) {
+ pr_err("invalid params pipe %pK config %pK\n", pipe, config);
+ return -EINVAL;
+ }
+
+ if (config->ops & MDP_PP_OPS_DISABLE) {
+ pr_debug("disable ops set cleanup payload\n");
+ goto cleanup;
+ }
+
+ if (config->ops & MDP_PP_OPS_READ) {
+ pr_err("read ops not supported\n");
+ return -EINVAL;
+ }
+
+ if (!config->cfg_payload) {
+ pr_err("PCC config payload invalid\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&v17_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy pcc config\n");
+ return -EFAULT;
+ }
+
+ if (!(config->ops & MDP_PP_OPS_WRITE)) {
+ pr_debug("write ops not set value of flag is %d\n",
+ config->ops);
+ goto cleanup;
+ }
+
+ v17_cache_data = pipe->pp_res.pcc_cfg_payload;
+ if (!v17_cache_data) {
+ v17_cache_data = kzalloc(sizeof(struct mdp_pcc_data_v1_7),
+ GFP_KERNEL);
+ pipe->pp_res.pcc_cfg_payload = v17_cache_data;
+ }
+ if (!v17_cache_data) {
+ pr_err("failed to allocate the pcc cache data\n");
+ return -ENOMEM;
+ }
+ memcpy(v17_cache_data, &v17_usr_config, sizeof(v17_usr_config));
+ pipe->pp_cfg.pcc_cfg_data.cfg_payload = v17_cache_data;
+cleanup:
+ if (config->ops & MDP_PP_OPS_DISABLE) {
+ kfree(pipe->pp_res.pcc_cfg_payload);
+ pipe->pp_res.pcc_cfg_payload = NULL;
+ pipe->pp_cfg.pcc_cfg_data.cfg_payload = NULL;
+ }
+ return 0;
+}
+
+static int pp_pcc_cache_params_v1_7(struct mdp_pcc_cfg_data *config,
+ struct mdss_pp_res_type *mdss_pp_res)
+{
+ u32 disp_num;
+ int ret = 0;
+ struct mdss_pp_res_type_v1_7 *res_cache;
+ struct mdp_pcc_data_v1_7 *v17_cache_data, v17_usr_config;
+
+ if (!config || !mdss_pp_res) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, mdss_pp_res);
+ return -EINVAL;
+ }
+
+ if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (config->block >= MDP_BLOCK_MAX)) {
+ pr_err("invalid config block %d\n", config->block);
+ return -EINVAL;
+ }
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+ return -EINVAL;
+ }
+
+ res_cache = mdss_pp_res->pp_data_v1_7;
+ if (config->ops & MDP_PP_OPS_READ) {
+ pr_err("read op is not supported\n");
+ return -EINVAL;
+ }
+ {
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+ mdss_pp_res->pcc_disp_cfg[disp_num] = *config;
+ v17_cache_data = &res_cache->pcc_v17_data[disp_num];
+ mdss_pp_res->pcc_disp_cfg[disp_num].cfg_payload =
+ (void *) v17_cache_data;
+ if (copy_from_user(&v17_usr_config, config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy v17 pcc\n");
+ ret = -EFAULT;
+ goto pcc_config_exit;
+ }
+ if ((config->ops & MDP_PP_OPS_DISABLE)) {
+ pr_debug("disable pcc\n");
+ ret = 0;
+ goto pcc_config_exit;
+ }
+ if (!(config->ops & MDP_PP_OPS_WRITE)) {
+ pr_debug("op for pcc %d\n", config->ops);
+ goto pcc_config_exit;
+ }
+ memcpy(v17_cache_data, &v17_usr_config, sizeof(v17_usr_config));
+ }
+pcc_config_exit:
+ return ret;
+}
+
+int pp_pcc_cache_params(struct mdp_pcc_cfg_data *config,
+ struct mdp_pp_cache_res *res_cache)
+{
+ int ret = 0;
+
+ if (!config || !res_cache) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, res_cache);
+ return -EINVAL;
+ }
+ if (res_cache->block < SSPP_RGB || res_cache->block > DSPP) {
+ pr_err("invalid block for PCC %d\n", res_cache->block);
+ return -EINVAL;
+ }
+ if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
+ pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
+ res_cache->block, res_cache->mdss_pp_res,
+ res_cache->pipe_res);
+ return -EINVAL;
+ }
+ switch (config->version) {
+ case mdp_pcc_v1_7:
+ if (res_cache->block == DSPP) {
+ ret = pp_pcc_cache_params_v1_7(config,
+ res_cache->mdss_pp_res);
+ if (ret)
+ pr_err("caching for DSPP failed for PCC ret %d\n",
+ ret);
+ } else {
+ ret = pp_pcc_cache_params_pipe_v1_7(config,
+ res_cache->pipe_res);
+ if (ret)
+ pr_err("caching for SSPP failed for PCC ret %d block %d\n",
+ ret, res_cache->block);
+ }
+ break;
+ default:
+ pr_err("unsupported pcc version %d\n",
+ config->version);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int pp_igc_lut_cache_params_v1_7(struct mdp_igc_lut_data *config,
+ struct mdss_pp_res_type *mdss_pp_res,
+ u32 copy_from_kernel)
+{
+ int ret = 0;
+ struct mdss_pp_res_type_v1_7 *res_cache;
+ struct mdp_igc_lut_data_v1_7 *v17_cache_data, v17_usr_config;
+ u32 disp_num;
+
+ if (!config || !mdss_pp_res) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, mdss_pp_res);
+ return -EINVAL;
+ }
+ if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (config->block >= MDP_BLOCK_MAX)) {
+ pr_err("invalid config block %d\n", config->block);
+ return -EINVAL;
+ }
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+ return -EINVAL;
+ }
+ res_cache = mdss_pp_res->pp_data_v1_7;
+ if (config->ops & MDP_PP_OPS_READ) {
+ pr_err("read op is not supported\n");
+ return -EINVAL;
+ }
+ {
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+ mdss_pp_res->igc_disp_cfg[disp_num] = *config;
+ v17_cache_data = &res_cache->igc_v17_data[disp_num];
+ mdss_pp_res->igc_disp_cfg[disp_num].cfg_payload =
+ (void *) v17_cache_data;
+ if (!copy_from_kernel) {
+ if (copy_from_user(&v17_usr_config,
+ config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy igc config\n");
+ ret = -EFAULT;
+ goto igc_config_exit;
+ }
+ } else {
+ if (!config->cfg_payload) {
+ pr_err("can't copy config info NULL payload\n");
+ ret = -EINVAL;
+ goto igc_config_exit;
+ }
+ memcpy(&v17_usr_config, config->cfg_payload,
+ sizeof(v17_usr_config));
+ }
+ if (!(config->ops & MDP_PP_OPS_WRITE)) {
+ pr_debug("op for gamut %d\n", config->ops);
+ goto igc_config_exit;
+ }
+ if (copy_from_kernel && (!v17_usr_config.c0_c1_data ||
+ !v17_usr_config.c2_data)) {
+ pr_err("copy from kernel invalid params c0_c1_data %pK c2_data %pK\n",
+ v17_usr_config.c0_c1_data,
+ v17_usr_config.c2_data);
+ ret = -EINVAL;
+ goto igc_config_exit;
+ }
+ if (v17_usr_config.len != IGC_LUT_ENTRIES) {
+ pr_err("Invalid table size %d exp %d\n",
+ v17_usr_config.len, IGC_LUT_ENTRIES);
+ ret = -EINVAL;
+ goto igc_config_exit;
+ }
+ memcpy(v17_cache_data, &v17_usr_config,
+ sizeof(v17_usr_config));
+ v17_cache_data->c0_c1_data =
+ &res_cache->igc_table_c0_c1[disp_num][0];
+ v17_cache_data->c2_data =
+ &res_cache->igc_table_c2[disp_num][0];
+ if (copy_from_kernel) {
+ memcpy(v17_cache_data->c0_c1_data,
+ v17_usr_config.c0_c1_data,
+ v17_usr_config.len * sizeof(u32));
+ memcpy(v17_cache_data->c2_data, v17_usr_config.c2_data,
+ v17_usr_config.len * sizeof(u32));
+ } else {
+ ret = copy_from_user(v17_cache_data->c0_c1_data,
+ v17_usr_config.c0_c1_data,
+ v17_usr_config.len * sizeof(u32));
+ if (ret) {
+ pr_err("copy from user failed for c0_c1_data size %zd ret %d\n",
+ v17_usr_config.len * sizeof(u32), ret);
+ ret = -EFAULT;
+ goto igc_config_exit;
+ }
+ ret = copy_from_user(v17_cache_data->c2_data,
+ v17_usr_config.c2_data,
+ v17_usr_config.len * sizeof(u32));
+ if (ret) {
+ pr_err("copy from user failed for c2_data size %zd ret %d\n",
+ v17_usr_config.len * sizeof(u32), ret);
+ ret = -EFAULT;
+ goto igc_config_exit;
+ }
+ }
+ }
+igc_config_exit:
+ return ret;
+}
+
+static int pp_igc_lut_cache_params_pipe_v1_7(struct mdp_igc_lut_data *config,
+ struct mdss_mdp_pipe *pipe,
+ u32 copy_from_kernel)
+{
+ struct mdp_igc_lut_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
+ int ret = 0, fix_up = 0, i = 0;
+
+ if (!config || !pipe) {
+ pr_err("invalid param config %pK pipe %pK\n",
+ config, pipe);
+ return -EINVAL;
+ }
+ if (config->ops & MDP_PP_OPS_READ) {
+ pr_err("read op is not supported\n");
+ return -EINVAL;
+ }
+
+ if (!config->cfg_payload) {
+ pr_err("can't copy config info NULL payload\n");
+ ret = -EINVAL;
+ goto igc_config_exit;
+ }
+
+ if (copy_from_user(&v17_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy igc config\n");
+ return -EFAULT;
+ }
+
+ if (!(config->ops & MDP_PP_OPS_WRITE)) {
+ pr_debug("op for gamut %d\n", config->ops);
+ goto igc_config_exit;
+ }
+
+ switch (v17_usr_config.table_fmt) {
+ case mdp_igc_custom:
+ if (!v17_usr_config.c0_c1_data ||
+ !v17_usr_config.c2_data ||
+ v17_usr_config.len != IGC_LUT_ENTRIES) {
+ pr_err("invalid c0_c1data %pK c2_data %pK tbl len %d\n",
+ v17_usr_config.c0_c1_data,
+ v17_usr_config.c2_data,
+ v17_usr_config.len);
+ ret = -EINVAL;
+ goto igc_config_exit;
+ }
+ break;
+ case mdp_igc_rec709:
+ v17_usr_config.c0_c1_data = pp_igc_709;
+ v17_usr_config.c2_data = pp_igc_709;
+ v17_usr_config.len = IGC_LUT_ENTRIES;
+ copy_from_kernel = 1;
+ fix_up = 1;
+ break;
+ case mdp_igc_srgb:
+ v17_usr_config.c0_c1_data = pp_igc_srgb;
+ v17_usr_config.c2_data = pp_igc_srgb;
+ v17_usr_config.len = IGC_LUT_ENTRIES;
+ copy_from_kernel = 1;
+ fix_up = 1;
+ break;
+ case mdp_igc_rec601:
+ v17_usr_config.c0_c1_data = pp_igc_601;
+ v17_usr_config.c2_data = pp_igc_601;
+ v17_usr_config.len = IGC_LUT_ENTRIES;
+ copy_from_kernel = 1;
+ fix_up = 1;
+ break;
+ default:
+ pr_err("invalid format %d\n",
+ v17_usr_config.table_fmt);
+ ret = -EINVAL;
+ goto igc_config_exit;
+ }
+ v17_cache_data = pipe->pp_res.igc_cfg_payload;
+ if (!v17_cache_data)
+ v17_cache_data = kzalloc(sizeof(struct mdp_igc_lut_data_v1_7),
+ GFP_KERNEL);
+ if (!v17_cache_data) {
+ ret = -ENOMEM;
+ goto igc_config_exit;
+ } else {
+ pipe->pp_res.igc_cfg_payload = v17_cache_data;
+ pipe->pp_cfg.igc_cfg.cfg_payload = v17_cache_data;
+ }
+ v17_cache_data->c0_c1_data = pipe->pp_res.igc_c0_c1;
+ v17_cache_data->c2_data = pipe->pp_res.igc_c2;
+ v17_cache_data->len = IGC_LUT_ENTRIES;
+ if (copy_from_kernel) {
+ memcpy(v17_cache_data->c0_c1_data,
+ v17_usr_config.c0_c1_data,
+ IGC_LUT_ENTRIES * sizeof(u32));
+ memcpy(v17_cache_data->c2_data,
+ v17_usr_config.c2_data,
+ IGC_LUT_ENTRIES * sizeof(u32));
+ if (fix_up) {
+ for (i = 0; i < IGC_LUT_ENTRIES; i++)
+ v17_cache_data->c0_c1_data[i]
+ |= (v17_cache_data->c0_c1_data[i]
+ << IGC_C1_SHIFT);
+ }
+ } else {
+ if (copy_from_user(v17_cache_data->c0_c1_data,
+ v17_usr_config.c0_c1_data,
+ IGC_LUT_ENTRIES * sizeof(u32))) {
+ pr_err("error in copying the c0_c1_data of size %zd\n",
+ IGC_LUT_ENTRIES * sizeof(u32));
+ ret = -EFAULT;
+ goto igc_config_exit;
+ }
+ if (copy_from_user(v17_cache_data->c2_data,
+ v17_usr_config.c2_data,
+ IGC_LUT_ENTRIES * sizeof(u32))) {
+ pr_err("error in copying the c2_data of size %zd\n",
+ IGC_LUT_ENTRIES * sizeof(u32));
+ ret = -EFAULT;
+ }
+ }
+igc_config_exit:
+ if (ret || (config->ops & MDP_PP_OPS_DISABLE)) {
+ kfree(v17_cache_data);
+ pipe->pp_cfg.igc_cfg.cfg_payload = NULL;
+ pipe->pp_res.igc_cfg_payload = NULL;
+ }
+ return ret;
+}
+
+int pp_igc_lut_cache_params(struct mdp_igc_lut_data *config,
+ struct mdp_pp_cache_res *res_cache,
+ u32 copy_from_kernel)
+{
+ int ret = 0;
+
+ if (!config || !res_cache) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, res_cache);
+ return -EINVAL;
+ }
+ if (res_cache->block < SSPP_RGB || res_cache->block > DSPP) {
+ pr_err("invalid block for IGC %d\n", res_cache->block);
+ return -EINVAL;
+ }
+ if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
+ pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
+ res_cache->block, res_cache->mdss_pp_res,
+ res_cache->pipe_res);
+ ret = -EINVAL;
+ goto igc_exit;
+ }
+ switch (config->version) {
+ case mdp_igc_v1_7:
+ if (res_cache->block == DSPP) {
+ ret = pp_igc_lut_cache_params_v1_7(config,
+ res_cache->mdss_pp_res, copy_from_kernel);
+ if (ret)
+ pr_err("failed to cache IGC params for DSPP ret %d\n",
+ ret);
+
+ } else {
+ ret = pp_igc_lut_cache_params_pipe_v1_7(config,
+ res_cache->pipe_res, copy_from_kernel);
+ if (ret)
+ pr_err("failed to cache IGC params for SSPP ret %d\n",
+ ret);
+ }
+ break;
+ default:
+ pr_err("unsupported igc version %d\n",
+ config->version);
+ ret = -EINVAL;
+ break;
+ }
+igc_exit:
+ return ret;
+}
+
+static int pp_pgc_lut_cache_params_v1_7(struct mdp_pgc_lut_data *config,
+ struct mdss_pp_res_type *mdss_pp_res,
+ int location)
+{
+ int ret = 0;
+ u32 sz = 0;
+ u32 disp_num;
+ struct mdp_pgc_lut_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
+ struct mdss_pp_res_type_v1_7 *res_cache = NULL;
+
+ if (location != DSPP && location != LM) {
+ pr_err("Invalid location for pgc %d\n", location);
+ return -EINVAL;
+ }
+ disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
+ if (disp_num >= MDSS_BLOCK_DISP_NUM) {
+ pr_err("invalid disp_num %d\n", disp_num);
+ return -EINVAL;
+ }
+ res_cache = mdss_pp_res->pp_data_v1_7;
+ if (!res_cache) {
+ pr_err("invalid resource payload\n");
+ return -EINVAL;
+ }
+ if (copy_from_user(&v17_usr_config, config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy from user config info\n");
+ return -EFAULT;
+ }
+ if (v17_usr_config.len != PGC_LUT_ENTRIES) {
+ pr_err("invalid entries for pgc act %d exp %d\n",
+ v17_usr_config.len, PGC_LUT_ENTRIES);
+ return -EFAULT;
+ }
+ if (config->flags & MDP_PP_OPS_READ) {
+ pr_err("ops read not supported\n");
+ return -EINVAL;
+ }
+ if (!(config->flags & MDP_PP_OPS_WRITE)) {
+ pr_debug("ops write not set flags %d\n", config->flags);
+ if (location == DSPP)
+ mdss_pp_res->pgc_disp_cfg[disp_num].flags =
+ config->flags;
+ else
+ mdss_pp_res->argc_disp_cfg[disp_num].flags =
+ config->flags;
+ return 0;
+ }
+ if (location == DSPP) {
+ mdss_pp_res->pgc_disp_cfg[disp_num] = *config;
+ v17_cache_data = &res_cache->pgc_dspp_v17_data[disp_num];
+ v17_cache_data->c0_data = &res_cache->pgc_table_c0[disp_num][0];
+ v17_cache_data->c1_data = &res_cache->pgc_table_c1[disp_num][0];
+ v17_cache_data->c2_data = &res_cache->pgc_table_c2[disp_num][0];
+ mdss_pp_res->pgc_disp_cfg[disp_num].cfg_payload =
+ v17_cache_data;
+ } else {
+ mdss_pp_res->argc_disp_cfg[disp_num] = *config;
+ v17_cache_data = &res_cache->pgc_lm_v17_data[disp_num];
+ v17_cache_data->c0_data =
+ &res_cache->pgc_lm_table_c0[disp_num][0];
+ v17_cache_data->c1_data =
+ &res_cache->pgc_lm_table_c1[disp_num][0];
+ v17_cache_data->c2_data =
+ &res_cache->pgc_lm_table_c2[disp_num][0];
+ mdss_pp_res->argc_disp_cfg[disp_num].cfg_payload =
+ v17_cache_data;
+ }
+ v17_cache_data->len = 0;
+ sz = PGC_LUT_ENTRIES * sizeof(u32);
+ if (copy_from_user(v17_cache_data->c0_data, v17_usr_config.c0_data,
+ sz)) {
+ pr_err("failed to copy c0_data from user sz %d\n", sz);
+ ret = -EFAULT;
+ goto bail_out;
+ }
+ if (copy_from_user(v17_cache_data->c1_data, v17_usr_config.c1_data,
+ sz)) {
+ pr_err("failed to copy c1_data from user sz %d\n", sz);
+ ret = -EFAULT;
+ goto bail_out;
+ }
+ if (copy_from_user(v17_cache_data->c2_data, v17_usr_config.c2_data,
+ sz)) {
+ pr_err("failed to copy c2_data from user sz %d\n", sz);
+ ret = -EFAULT;
+ goto bail_out;
+ }
+ v17_cache_data->len = PGC_LUT_ENTRIES;
+ return 0;
+bail_out:
+ if (location == DSPP)
+ mdss_pp_res->pgc_disp_cfg[disp_num].flags = 0;
+ else
+ mdss_pp_res->argc_disp_cfg[disp_num].flags = 0;
+ return ret;
+}
+
+int pp_pgc_lut_cache_params(struct mdp_pgc_lut_data *config,
+ struct mdss_pp_res_type *mdss_pp_res, int loc)
+{
+ int ret = 0;
+
+ if (!config || !mdss_pp_res) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, mdss_pp_res);
+ return -EINVAL;
+ }
+ switch (config->version) {
+ case mdp_pgc_v1_7:
+ ret = pp_pgc_lut_cache_params_v1_7(config, mdss_pp_res, loc);
+ break;
+ default:
+ pr_err("unsupported igc version %d\n",
+ config->version);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int pp_pa_cache_params_v1_7(struct mdp_pa_v2_cfg_data *config,
+ struct mdss_pp_res_type *mdss_pp_res)
+{
+ struct mdss_pp_res_type_v1_7 *res_cache;
+ struct mdp_pa_data_v1_7 *pa_cache_data, pa_usr_config;
+ int disp_num, ret = 0;
+
+ if (!config || !mdss_pp_res) {
+ pr_err("Invalid param config %pK pp_res %pK\n",
+ config, mdss_pp_res);
+ return -EINVAL;
+ }
+
+ if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+ (config->block >= MDP_BLOCK_MAX)) {
+ pr_err("Invalid config block %d\n", config->block);
+ return -EINVAL;
+ }
+
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("Invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+ return -EINVAL;
+ }
+
+ res_cache = mdss_pp_res->pp_data_v1_7;
+ if (config->flags & MDP_PP_OPS_READ) {
+ pr_err("Read op is not supported\n");
+ return -EINVAL;
+ }
+
+ disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+ mdss_pp_res->pa_v2_disp_cfg[disp_num] = *config;
+ pa_cache_data = &res_cache->pa_v17_data[disp_num];
+ mdss_pp_res->pa_v2_disp_cfg[disp_num].cfg_payload =
+ (void *) pa_cache_data;
+
+ if (copy_from_user(&pa_usr_config, config->cfg_payload,
+ sizeof(pa_usr_config))) {
+ pr_err("Failed to copy v1_7 PA\n");
+ ret = -EFAULT;
+ goto pa_config_exit;
+ }
+
+ if ((config->flags & MDP_PP_OPS_DISABLE)) {
+ pr_debug("Disable PA\n");
+ ret = 0;
+ goto pa_config_exit;
+ }
+
+ if (!(config->flags & MDP_PP_OPS_WRITE)) {
+ pr_debug("op for PA %d\n", config->flags);
+ ret = 0;
+ goto pa_config_exit;
+ }
+
+ memcpy(pa_cache_data, &pa_usr_config, sizeof(pa_usr_config));
+ /* Copy six zone LUT if six zone is enabled to be written */
+ if (config->flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
+ if (pa_usr_config.six_zone_len != MDP_SIX_ZONE_LUT_SIZE) {
+ pr_err("Invalid six zone size, actual %d max %d\n",
+ pa_usr_config.six_zone_len,
+ MDP_SIX_ZONE_LUT_SIZE);
+ ret = -EINVAL;
+ goto pa_config_exit;
+ }
+
+ ret = copy_from_user(&res_cache->six_zone_lut_p0[disp_num][0],
+ pa_usr_config.six_zone_curve_p0,
+ pa_usr_config.six_zone_len * sizeof(u32));
+ if (ret) {
+ pr_err("copying six_zone_curve_p0 lut from userspace failed size %zd ret %d\n",
+ (sizeof(u32) * pa_usr_config.six_zone_len),
+ ret);
+ ret = -EFAULT;
+ goto pa_config_exit;
+ }
+ pa_cache_data->six_zone_curve_p0 =
+ &res_cache->six_zone_lut_p0[disp_num][0];
+ ret = copy_from_user(&res_cache->six_zone_lut_p1[disp_num][0],
+ pa_usr_config.six_zone_curve_p1,
+ pa_usr_config.six_zone_len * sizeof(u32));
+ if (ret) {
+ pr_err("copying six_zone_curve_p1 lut from userspace failed size %zd ret %d\n",
+ (sizeof(u32) * pa_usr_config.six_zone_len),
+ ret);
+ ret = -EFAULT;
+ goto pa_config_exit;
+ }
+ pa_cache_data->six_zone_curve_p1 =
+ &res_cache->six_zone_lut_p1[disp_num][0];
+ }
+
+pa_config_exit:
+ if (ret || config->flags & MDP_PP_OPS_DISABLE) {
+ pa_cache_data->six_zone_len = 0;
+ pa_cache_data->six_zone_curve_p0 = NULL;
+ pa_cache_data->six_zone_curve_p1 = NULL;
+ }
+ return ret;
+}
+
+static int pp_pa_cache_params_pipe_v1_7(struct mdp_pa_v2_cfg_data *config,
+ struct mdss_mdp_pipe *pipe)
+{
+ struct mdp_pa_data_v1_7 *pa_cache_data, pa_usr_config;
+ int ret = 0;
+
+ if (!config || !pipe) {
+ pr_err("Invalid param config %pK pipe %pK\n",
+ config, pipe);
+ return -EINVAL;
+ }
+
+ if (config->flags & MDP_PP_OPS_DISABLE) {
+ pr_debug("Disable PA on pipe %d\n", pipe->num);
+ goto pa_cache_pipe_exit;
+ }
+
+ if (config->flags & MDP_PP_OPS_READ) {
+ pr_err("Read op is not supported\n");
+ return -EINVAL;
+ }
+
+ if (!config->cfg_payload) {
+ pr_err("invalid PA config payload\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&pa_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(pa_usr_config))) {
+ pr_err("failed to copy pa usr config\n");
+ return -EFAULT;
+ }
+
+ pa_cache_data = pipe->pp_res.pa_cfg_payload;
+ if (!pa_cache_data) {
+ pa_cache_data = kzalloc(sizeof(struct mdp_pa_data_v1_7),
+ GFP_KERNEL);
+ if (!pa_cache_data) {
+ ret = -ENOMEM;
+ goto pa_cache_pipe_exit;
+ } else
+ pipe->pp_res.pa_cfg_payload = pa_cache_data;
+ }
+
+ *pa_cache_data = pa_usr_config;
+
+ /* No six zone in SSPP */
+ pa_cache_data->six_zone_len = 0;
+ pa_cache_data->six_zone_curve_p0 = NULL;
+ pa_cache_data->six_zone_curve_p1 = NULL;
+
+pa_cache_pipe_exit:
+ if (ret || (config->flags & MDP_PP_OPS_DISABLE)) {
+ kfree(pipe->pp_res.pa_cfg_payload);
+ pipe->pp_res.pa_cfg_payload = NULL;
+ }
+ pipe->pp_cfg.pa_v2_cfg_data.cfg_payload = pipe->pp_res.pa_cfg_payload;
+ return ret;
+}
+
+int pp_pa_cache_params(struct mdp_pa_v2_cfg_data *config,
+ struct mdp_pp_cache_res *res_cache)
+{
+ int ret = 0;
+
+ if (!config || !res_cache) {
+ pr_err("invalid param config %pK pp_res %pK\n",
+ config, res_cache);
+ return -EINVAL;
+ }
+ if (res_cache->block != SSPP_VIG && res_cache->block != DSPP) {
+ pr_err("invalid block for PA %d\n", res_cache->block);
+ return -EINVAL;
+ }
+ if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
+ pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
+ res_cache->block, res_cache->mdss_pp_res,
+ res_cache->pipe_res);
+ return -EINVAL;
+ }
+
+ switch (config->version) {
+ case mdp_pa_v1_7:
+ if (res_cache->block == DSPP) {
+ ret = pp_pa_cache_params_v1_7(config,
+ res_cache->mdss_pp_res);
+ if (ret)
+ pr_err("failed to cache PA params for DSPP ret %d\n",
+ ret);
+ } else {
+ ret = pp_pa_cache_params_pipe_v1_7(config,
+ res_cache->pipe_res);
+ if (ret)
+ pr_err("failed to cache PA params for SSPP ret %d\n",
+ ret);
+
+ }
+ break;
+ default:
+ pr_err("unsupported pa version %d\n",
+ config->version);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+int pp_copy_layer_igc_payload(struct mdp_overlay_pp_params *pp_info)
+{
+ void *cfg_payload = NULL;
+ int ret = 0;
+
+ switch (pp_info->igc_cfg.version) {
+ case mdp_igc_v1_7:
+ cfg_payload = kmalloc(
+ sizeof(struct mdp_igc_lut_data_v1_7),
+ GFP_KERNEL);
+ if (!cfg_payload) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = copy_from_user(cfg_payload,
+ pp_info->igc_cfg.cfg_payload,
+ sizeof(struct mdp_igc_lut_data_v1_7));
+ if (ret) {
+ pr_err("layer list copy from user failed, IGC cfg payload = %pK\n",
+ pp_info->igc_cfg.cfg_payload);
+ ret = -EFAULT;
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ goto exit;
+ }
+ break;
+ default:
+ pr_debug("No version set, fallback to legacy IGC version\n");
+ cfg_payload = NULL;
+ break;
+ }
+
+exit:
+ pp_info->igc_cfg.cfg_payload = cfg_payload;
+ return ret;
+}
+
+int pp_copy_layer_hist_lut_payload(struct mdp_overlay_pp_params *pp_info)
+{
+ void *cfg_payload = NULL;
+ int ret = 0;
+
+ switch (pp_info->hist_lut_cfg.version) {
+ case mdp_hist_lut_v1_7:
+ cfg_payload = kmalloc(
+ sizeof(struct mdp_hist_lut_data_v1_7),
+ GFP_KERNEL);
+ if (!cfg_payload) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = copy_from_user(cfg_payload,
+ pp_info->hist_lut_cfg.cfg_payload,
+ sizeof(struct mdp_hist_lut_data_v1_7));
+ if (ret) {
+ pr_err("layer list copy from user failed, Hist LUT cfg payload = %pK\n",
+ pp_info->hist_lut_cfg.cfg_payload);
+ ret = -EFAULT;
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ goto exit;
+ }
+ break;
+ default:
+ pr_debug("No version set, fallback to legacy Hist LUT version\n");
+ cfg_payload = NULL;
+ break;
+ }
+
+exit:
+ pp_info->hist_lut_cfg.cfg_payload = cfg_payload;
+ return ret;
+}
+
+int pp_copy_layer_pa_payload(struct mdp_overlay_pp_params *pp_info)
+{
+ void *cfg_payload = NULL;
+ int ret = 0;
+
+ switch (pp_info->pa_v2_cfg_data.version) {
+ case mdp_pa_v1_7:
+ cfg_payload = kmalloc(
+ sizeof(struct mdp_pa_data_v1_7),
+ GFP_KERNEL);
+ if (!cfg_payload) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = copy_from_user(cfg_payload,
+ pp_info->pa_v2_cfg_data.cfg_payload,
+ sizeof(struct mdp_pa_data_v1_7));
+ if (ret) {
+ pr_err("layer list copy from user failed, PA cfg payload = %pK\n",
+ pp_info->pa_v2_cfg_data.cfg_payload);
+ ret = -EFAULT;
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ goto exit;
+ }
+ break;
+ default:
+ pr_debug("No version set, fallback to legacy PA version\n");
+ cfg_payload = NULL;
+ break;
+ }
+
+exit:
+ pp_info->pa_v2_cfg_data.cfg_payload = cfg_payload;
+ return ret;
+}
+
+int pp_copy_layer_pcc_payload(struct mdp_overlay_pp_params *pp_info)
+{
+ void *cfg_payload = NULL;
+ int ret = 0;
+
+ switch (pp_info->pcc_cfg_data.version) {
+ case mdp_pcc_v1_7:
+ cfg_payload = kmalloc(
+ sizeof(struct mdp_pcc_data_v1_7),
+ GFP_KERNEL);
+ if (!cfg_payload) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = copy_from_user(cfg_payload,
+ pp_info->pcc_cfg_data.cfg_payload,
+ sizeof(struct mdp_pcc_data_v1_7));
+ if (ret) {
+ pr_err("layer list copy from user failed, PCC cfg payload = %pK\n",
+ pp_info->pcc_cfg_data.cfg_payload);
+ ret = -EFAULT;
+ kfree(cfg_payload);
+ cfg_payload = NULL;
+ goto exit;
+ }
+ break;
+ default:
+ pr_debug("No version set, fallback to legacy PCC version\n");
+ cfg_payload = NULL;
+ break;
+ }
+
+exit:
+ pp_info->pcc_cfg_data.cfg_payload = cfg_payload;
+ return ret;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.h b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.h
new file mode 100644
index 0000000..ab9a3dd
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_CACHE_CONFIG_H
+#define MDSS_MDP_CACHE_CONFIG_H
+#include "mdss_mdp_pp.h"
+
+struct mdp_pp_cache_res {
+ enum pp_config_block block;
+ struct mdss_pp_res_type *mdss_pp_res;
+ struct mdss_mdp_pipe *pipe_res;
+};
+
+int pp_hist_lut_cache_params(struct mdp_hist_lut_data *config,
+ struct mdp_pp_cache_res *res_cache);
+
+int pp_dither_cache_params(struct mdp_dither_cfg_data *config,
+ struct mdss_pp_res_type *mdss_pp_res,
+ int copy_from_kernel);
+
+int pp_gamut_cache_params(struct mdp_gamut_cfg_data *config,
+ struct mdss_pp_res_type *mdss_pp_res);
+int pp_pcc_cache_params(struct mdp_pcc_cfg_data *config,
+ struct mdp_pp_cache_res *res_cache);
+int pp_pa_cache_params(struct mdp_pa_v2_cfg_data *config,
+ struct mdp_pp_cache_res *res_cache);
+
+int pp_igc_lut_cache_params(struct mdp_igc_lut_data *config,
+ struct mdp_pp_cache_res *res_cache,
+ u32 copy_from_kernel);
+
+int pp_pgc_lut_cache_params(struct mdp_pgc_lut_data *config,
+ struct mdss_pp_res_type *mdss_pp_res,
+ int location);
+
+int pp_copy_layer_igc_payload(struct mdp_overlay_pp_params *pp_info);
+int pp_copy_layer_hist_lut_payload(struct mdp_overlay_pp_params *pp_info);
+int pp_copy_layer_pa_payload(struct mdp_overlay_pp_params *pp_info);
+int pp_copy_layer_pcc_payload(struct mdp_overlay_pp_params *pp_info);
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_common.c b/drivers/video/fbdev/msm/mdss_mdp_pp_common.c
new file mode 100644
index 0000000..c4e7462
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_common.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+#include "mdss_mdp_pp_common.h"
+
+void pp_pa_set_sts(struct pp_sts_type *pp_sts,
+ struct mdp_pa_data_v1_7 *pa_data,
+ int enable_flag, int block_type)
+{
+ if (!pp_sts) {
+ pr_err("invalid input pp_sts %pK\n", pp_sts);
+ return;
+ }
+
+ pp_sts->pa_sts = 0;
+
+ if (enable_flag & MDP_PP_OPS_DISABLE) {
+ pp_sts->pa_sts &= ~PP_STS_ENABLE;
+ return;
+ } else if (enable_flag & MDP_PP_OPS_ENABLE) {
+ pp_sts->pa_sts |= PP_STS_ENABLE;
+ }
+
+ if (!pa_data) {
+ pr_err("invalid input pa_data %pK\n", pa_data);
+ return;
+ }
+
+ /* Global HSV STS update */
+ if (pa_data->mode & MDP_PP_PA_HUE_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_HUE_MASK;
+ if (pa_data->mode & MDP_PP_PA_SAT_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SAT_MASK;
+ if (pa_data->mode & MDP_PP_PA_VAL_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_VAL_MASK;
+ if (pa_data->mode & MDP_PP_PA_CONT_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_CONT_MASK;
+ if (pa_data->mode & MDP_PP_PA_SAT_ZERO_EXP_EN)
+ pp_sts->pa_sts |= PP_STS_PA_SAT_ZERO_EXP_EN;
+
+ /* Memory Protect STS update */
+ if (pa_data->mode & MDP_PP_PA_MEM_PROT_HUE_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_HUE_EN;
+ if (pa_data->mode & MDP_PP_PA_MEM_PROT_SAT_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_SAT_EN;
+ if (pa_data->mode & MDP_PP_PA_MEM_PROT_VAL_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_VAL_EN;
+ if (pa_data->mode & MDP_PP_PA_MEM_PROT_CONT_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_CONT_EN;
+ if (pa_data->mode & MDP_PP_PA_MEM_PROT_BLEND_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_BLEND_EN;
+ if ((block_type == DSPP) &&
+ (pa_data->mode & MDP_PP_PA_MEM_PROT_SIX_EN))
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_SIX_EN;
+
+ /* Memory Color STS update */
+ if (pa_data->mode & MDP_PP_PA_MEM_COL_SKIN_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKIN_MASK;
+ if (pa_data->mode & MDP_PP_PA_MEM_COL_SKY_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKY_MASK;
+ if (pa_data->mode & MDP_PP_PA_MEM_COL_FOL_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_COL_FOL_MASK;
+
+ /* Six Zone STS update */
+ if (block_type == DSPP) {
+ if (pa_data->mode & MDP_PP_PA_SIX_ZONE_HUE_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_HUE_MASK;
+ if (pa_data->mode & MDP_PP_PA_SIX_ZONE_SAT_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_SAT_MASK;
+ if (pa_data->mode & MDP_PP_PA_SIX_ZONE_VAL_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_VAL_MASK;
+
+ pp_sts_set_split_bits(&pp_sts->pa_sts, enable_flag);
+ }
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_common.h b/drivers/video/fbdev/msm/mdss_mdp_pp_common.h
new file mode 100644
index 0000000..53a191d
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_common.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, 2018,The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDSS_MDP_PP_COMMON_H
+#define MDSS_MDP_PP_COMMON_H
+
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+
+#define JUMP_REGISTERS_OFF(n) ((n) * (sizeof(uint32_t)))
+#define REG_MASK(n) ((BIT(n)) - 1)
+#define REG_MASK_SHIFT(n, shift) ((REG_MASK(n)) << (shift))
+
+void pp_pa_set_sts(struct pp_sts_type *pp_sts,
+ struct mdp_pa_data_v1_7 *pa_data,
+ int enable_flag, int block_type);
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_pp_debug.c
new file mode 100644
index 0000000..d5c3abb
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_debug.c
@@ -0,0 +1,857 @@
+/*
+ * Copyright (c) 2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+
+#define MAX_TAB_BUFFER_SIZE 12
+#define MAX_LINE_BUFFER_SIZE 256
+
+static inline void tab_prefix(char *tab_str, int n)
+{
+ while ((n)--)
+ strlcat((tab_str), "\t", MAX_TAB_BUFFER_SIZE);
+}
+
+enum {
+ UINT32,
+ UINT16,
+};
+
+void pp_print_lut(void *data, int size, char *tab, uint32_t type)
+{
+ char buf[MAX_LINE_BUFFER_SIZE];
+ int lines = size / 16;
+ int last_start = lines * 16;
+ int i, j;
+ uint32_t read = 0;
+
+ if (!data || !tab)
+ return;
+
+ buf[0] = '\0';
+ for (i = 0; i < lines; i++) {
+ buf[0] = '\0';
+ read += snprintf(buf, MAX_LINE_BUFFER_SIZE - read,
+ "%s", tab);
+ for (j = 0; j < 16; j++) {
+ if (type == UINT32)
+ read += snprintf(buf + read,
+ MAX_LINE_BUFFER_SIZE - read, "%04x ",
+ ((uint32_t *)data)[i*16+j]);
+ else if (type == UINT16)
+ read += snprintf(buf + read,
+ MAX_LINE_BUFFER_SIZE - read, "%02x ",
+ ((uint16_t *)data)[i*16+j]);
+ }
+ snprintf(buf + read, MAX_LINE_BUFFER_SIZE - read, "\n");
+
+ pr_debug("%s", buf);
+ memset(buf, 0, sizeof(char) * MAX_LINE_BUFFER_SIZE);
+ read = 0;
+ }
+
+ lines = size % 16;
+ read += snprintf(buf, MAX_LINE_BUFFER_SIZE - read, "%s", tab);
+ for (i = 0; i < lines; i++) {
+ if (type == UINT32)
+ read += snprintf(buf + read,
+ MAX_LINE_BUFFER_SIZE - read, "%04x ",
+ ((uint32_t *)data)[last_start+i]);
+ else if (type == UINT16)
+ read += snprintf(buf + read,
+ MAX_LINE_BUFFER_SIZE - read, "%02x ",
+ ((uint16_t *)data)[last_start+i]);
+ }
+ snprintf(buf + read, MAX_LINE_BUFFER_SIZE - read, "\n");
+ pr_debug("%s", buf);
+}
+
+void pp_print_pcc_coeff(struct mdp_pcc_coeff *pcc_coeff, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!pcc_coeff || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_pcc_coeff:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sc: %x\n"
+ "%sr: %x\n%sg: %x\n%sb: %x\n"
+ "%srr: %x\n%sgg: %x\n%sbb: %x\n"
+ "%srg: %x\n%sgb: %x\n%srb: %x\n"
+ "%srgb_0: %x\n%srgb_1: %x\n",
+ tab, pcc_coeff->c,
+ tab, pcc_coeff->r,
+ tab, pcc_coeff->g,
+ tab, pcc_coeff->b,
+ tab, pcc_coeff->rr,
+ tab, pcc_coeff->gg,
+ tab, pcc_coeff->bb,
+ tab, pcc_coeff->rg,
+ tab, pcc_coeff->gb,
+ tab, pcc_coeff->rb,
+ tab, pcc_coeff->rgb_0,
+ tab, pcc_coeff->rgb_1);
+}
+
+void pp_print_pcc_cfg_data(struct mdp_pcc_cfg_data *pcc_data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!pcc_data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_pcc_cfg_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sblock: %x\n%sops: %x\n",
+ tab, pcc_data->block,
+ tab, pcc_data->ops);
+
+ pp_print_pcc_coeff(&pcc_data->r, tab_depth + 1);
+ pp_print_pcc_coeff(&pcc_data->g, tab_depth + 1);
+ pp_print_pcc_coeff(&pcc_data->b, tab_depth + 1);
+}
+
+void pp_print_csc_cfg(struct mdp_csc_cfg *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_csc_cfg:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sflags: %x\n",
+ tab, data->flags);
+
+ pr_debug("%scsc_mv[]:\n", tab);
+ pp_print_lut(&data->csc_mv[0], 9, tab, UINT32);
+ pr_debug("%scsc_pre_bv[]:\n", tab);
+ pp_print_lut(&data->csc_pre_bv[0], 3, tab, UINT32);
+ pr_debug("%scsc_post_bv[]:\n", tab);
+ pp_print_lut(&data->csc_post_bv[0], 3, tab, UINT32);
+ pr_debug("%scsc_pre_lv[]:\n", tab);
+ pp_print_lut(&data->csc_pre_lv[0], 6, tab, UINT32);
+ pr_debug("%scsc_post_lv[]:\n", tab);
+ pp_print_lut(&data->csc_post_lv[0], 6, tab, UINT32);
+}
+
+void pp_print_csc_cfg_data(struct mdp_csc_cfg_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[tab_depth] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_csc_cfg_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sblock: %x\n",
+ tab, data->block);
+
+ pp_print_csc_cfg(&data->csc_data, tab_depth + 1);
+}
+
+void pp_print_igc_lut_data(struct mdp_igc_lut_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[tab_depth] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_igc_lut_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sblock: %x\n"
+ "%slen: %x\n"
+ "%sops: %x\n",
+ tab, data->block,
+ tab, data->len,
+ tab, data->ops);
+
+ pr_debug("%sc0_c1_data[]:\n", tab);
+ pp_print_lut(&data->c0_c1_data[0], data->len, tab, UINT32);
+ pr_debug("%sc2_data[]:\n", tab);
+ pp_print_lut(&data->c2_data[0], data->len, tab, UINT32);
+}
+
+void pp_print_ar_gc_lut_data(struct mdp_ar_gc_lut_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[tab_depth] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_ar_gc_lut_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sx_start: %x\n"
+ "%sslope: %x\n"
+ "%soffset: %x\n",
+ tab, data->x_start,
+ tab, data->slope,
+ tab, data->offset);
+}
+
+void pp_print_pgc_lut_data(struct mdp_pgc_lut_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+ int i;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[tab_depth] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_pgc_lut_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sblock: %x\n"
+ "%sflags: %x\n"
+ "%snum_r_stages: %x\n"
+ "%snum_g_stages: %x\n"
+ "%snum_b_stages: %x\n",
+ tab, data->block,
+ tab, data->flags,
+ tab, data->num_r_stages,
+ tab, data->num_g_stages,
+ tab, data->num_b_stages);
+
+ for (i = 0; i < data->num_r_stages; i++) {
+ pr_debug("%sr_data[%d]\n", tab, i);
+ pp_print_ar_gc_lut_data(&data->r_data[i], tab_depth + 1);
+ }
+ for (i = 0; i < data->num_g_stages; i++) {
+ pr_debug("%sg_data[%d]\n", tab, i);
+ pp_print_ar_gc_lut_data(&data->g_data[i], tab_depth + 1);
+ }
+ for (i = 0; i < data->num_b_stages; i++) {
+ pr_debug("%sb_data[%d]\n", tab, i);
+ pp_print_ar_gc_lut_data(&data->b_data[i], tab_depth + 1);
+ }
+}
+
+void pp_print_hist_lut_data(struct mdp_hist_lut_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[tab_depth] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_hist_lut_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sblock: %x\n"
+ "%sops: %x\n"
+ "%slen: %x\n",
+ tab, data->block,
+ tab, data->ops,
+ tab, data->len);
+
+ pr_debug("%sdata[]:\n", tab);
+ pp_print_lut(&data->data[0], data->len, tab, UINT32);
+}
+
+void pp_print_lut_cfg_data(struct mdp_lut_cfg_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[tab_depth] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_lut_cfg_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%slut_type: %x\n",
+ tab, data->lut_type);
+
+ switch (data->lut_type) {
+ case mdp_lut_igc:
+ pp_print_igc_lut_data(&data->data.igc_lut_data, tab_depth + 1);
+ break;
+ case mdp_lut_pgc:
+ pp_print_pgc_lut_data(&data->data.pgc_lut_data, tab_depth + 1);
+ break;
+ case mdp_lut_hist:
+ pp_print_hist_lut_data(&data->data.hist_lut_data,
+ tab_depth + 1);
+ break;
+ default:
+ break;
+ }
+}
+
+void pp_print_qseed_cfg(struct mdp_qseed_cfg *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[tab_depth] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_qseed_cfg:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%stable_num: %x\n"
+ "%sops: %x\n"
+ "%slen: %x\n",
+ tab, data->table_num,
+ tab, data->ops,
+ tab, data->len);
+
+ pr_debug("%sdata[]:\n", tab);
+ pp_print_lut(&data->data[0], data->len, tab, UINT32);
+}
+
+void pp_print_qseed_cfg_data(struct mdp_qseed_cfg_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[tab_depth] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_qseed_cfg_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sblock: %x\n",
+ tab, data->block);
+
+ pp_print_qseed_cfg(&data->qseed_data, tab_depth + 1);
+}
+
+void pp_print_pa_cfg(struct mdp_pa_cfg *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_pa_cfg:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sflags: %x\n"
+ "%shue_adj: %x\n"
+ "%ssat_adj: %x\n"
+ "%sval_adj: %x\n"
+ "%scont_adj: %x\n",
+ tab, data->flags,
+ tab, data->hue_adj,
+ tab, data->sat_adj,
+ tab, data->val_adj,
+ tab, data->cont_adj);
+}
+
+void pp_print_pa_cfg_data(struct mdp_pa_cfg_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_pa_cfg_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sblock: %x\n",
+ tab, data->block);
+
+ pp_print_pa_cfg(&data->pa_data, tab_depth + 1);
+}
+
+void pp_print_mem_col_cfg(struct mdp_pa_mem_col_cfg *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_pa_mem_col_cfg:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%scolor_adjust_p0: %x\n"
+ "%scolor_adjust_p1: %x\n"
+ "%shue_region: %x\n"
+ "%ssat_region: %x\n"
+ "%sval_region: %x\n",
+ tab, data->color_adjust_p0,
+ tab, data->color_adjust_p1,
+ tab, data->hue_region,
+ tab, data->sat_region,
+ tab, data->val_region);
+}
+
+void pp_print_pa_v2_data(struct mdp_pa_v2_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_pa_v2_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sflags: %x\n"
+ "%sglobal_hue_adj: %x\n"
+ "%sglobal_sat_adj: %x\n"
+ "%sglobal_val_adj: %x\n"
+ "%sglobal_cont_adj: %x\n",
+ tab, data->flags,
+ tab, data->global_hue_adj,
+ tab, data->global_sat_adj,
+ tab, data->global_val_adj,
+ tab, data->global_cont_adj);
+
+ pp_print_mem_col_cfg(&data->skin_cfg, tab_depth + 1);
+ pp_print_mem_col_cfg(&data->sky_cfg, tab_depth + 1);
+ pp_print_mem_col_cfg(&data->fol_cfg, tab_depth + 1);
+
+ pr_debug("%ssix_zone_len: %x\n"
+ "%ssix_zone_thresh: %x\n",
+ tab, data->six_zone_len,
+ tab, data->six_zone_thresh);
+
+ pr_debug("%ssix_zone_curve_p0[]:\n", tab);
+ pp_print_lut(&data->six_zone_curve_p0[0], data->six_zone_len, tab,
+ UINT32);
+ pr_debug("%ssix_zone_curve_p1[]:\n", tab);
+ pp_print_lut(&data->six_zone_curve_p1[0], data->six_zone_len, tab,
+ UINT32);
+}
+
+void pp_print_pa_v2_cfg_data(struct mdp_pa_v2_cfg_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_pa_v2_cfg_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sblock: %x\n",
+ tab, data->block);
+
+ pp_print_pa_v2_data(&data->pa_v2_data, tab_depth + 1);
+}
+
+void pp_print_dither_cfg_data(struct mdp_dither_cfg_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_dither_cfg_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sblock: %x\n"
+ "%sflags: %x\n"
+ "%sg_y_depth: %x\n"
+ "%sr_cr_depth: %x\n"
+ "%sb_cb_depth: %x\n",
+ tab, data->block,
+ tab, data->flags,
+ tab, data->g_y_depth,
+ tab, data->r_cr_depth,
+ tab, data->b_cb_depth);
+}
+
+void pp_print_gamut_cfg_data(struct mdp_gamut_cfg_data *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+ int i;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_gamut_cfg_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sblock: %x\n"
+ "%sflags: %x\n"
+ "%sgamut_first: %x\n",
+ tab, data->block,
+ tab, data->flags,
+ tab, data->gamut_first);
+
+ pr_debug("%stbl_size[]:\n", tab);
+ pp_print_lut(&data->tbl_size[0], MDP_GAMUT_TABLE_NUM, tab, UINT32);
+
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ pr_debug("%sr_tbl[%d]:\n", tab, i);
+ pp_print_lut(&data->r_tbl[i][0], data->tbl_size[i], tab,
+ UINT16);
+ }
+
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ pr_debug("%sg_tbl[%d]:\n", tab, i);
+ pp_print_lut(&data->g_tbl[i][0], data->tbl_size[i], tab,
+ UINT16);
+ }
+
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+ pr_debug("%sb_tbl[%d]:\n", tab, i);
+ pp_print_lut(&data->b_tbl[i][0], data->tbl_size[i], tab,
+ UINT16);
+ }
+}
+
+void pp_print_ad_init(struct mdss_ad_init *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdss_ad_init:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sasym_lut[]:\n", tab);
+ pp_print_lut(&data->asym_lut[0], 33, tab, UINT32);
+
+ pr_debug("%scolor_corr_lut[]:\n", tab);
+ pp_print_lut(&data->color_corr_lut[0], 33, tab, UINT32);
+
+ pr_debug("%si_control[]:\n%s%x %x\n"
+ "%sblack_lvl: %x\n"
+ "%swhite_lvl: %x\n"
+ "%svar: %x\n"
+ "%slimit_ampl: %x\n"
+ "%si_dither: %x\n"
+ "%sslope_max: %x\n"
+ "%sslope_min: %x\n"
+ "%sdither_ctl: %x\n"
+ "%sformat: %x\n"
+ "%sauto_size: %x\n"
+ "%sframe_w: %x\n"
+ "%sframe_h: %x\n"
+ "%slogo_v: %x\n"
+ "%slogo_h: %x\n"
+ "%sbl_lin_len: %x\n",
+ tab, tab, data->i_control[0], data->i_control[1],
+ tab, data->black_lvl,
+ tab, data->white_lvl,
+ tab, data->var,
+ tab, data->limit_ampl,
+ tab, data->i_dither,
+ tab, data->slope_max,
+ tab, data->slope_min,
+ tab, data->dither_ctl,
+ tab, data->format,
+ tab, data->auto_size,
+ tab, data->frame_w,
+ tab, data->frame_h,
+ tab, data->logo_v,
+ tab, data->logo_h,
+ tab, data->bl_lin_len);
+
+ pr_debug("%sbl_lin[]:\n", tab);
+ pp_print_lut(&data->bl_lin[0], data->bl_lin_len, tab, UINT32);
+
+ pr_debug("%sbl_lin_inv[]:\n", tab);
+ pp_print_lut(&data->bl_lin_inv[0], data->bl_lin_len, tab, UINT32);
+}
+
+void pp_print_ad_cfg(struct mdss_ad_cfg *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdss_ad_cfg:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%smode: %x\n",
+ tab, data->mode);
+
+ pr_debug("%sal_calib_lut[]:\n", tab);
+ pp_print_lut(&data->al_calib_lut[0], 33, tab, UINT32);
+
+ pr_debug("%sbacklight_min: %x\n"
+ "%sbacklight_max: %x\n"
+ "%sbacklight_scale: %x\n"
+ "%samb_light_min: %x\n",
+ tab, data->backlight_min,
+ tab, data->backlight_max,
+ tab, data->backlight_scale,
+ tab, data->amb_light_min);
+
+ pp_print_lut(&data->filter[0], 2, tab, UINT16);
+ pp_print_lut(&data->calib[0], 4, tab, UINT16);
+
+ pr_debug("%sstrength_limit: %x\n"
+ "%st_filter_recursion: %x\n"
+ "%sstab_itr: %x\n"
+ "%sbl_ctrl_mode: %x\n",
+ tab, data->strength_limit,
+ tab, data->t_filter_recursion,
+ tab, data->stab_itr,
+ tab, data->bl_ctrl_mode);
+}
+
+void pp_print_ad_init_cfg(struct mdss_ad_init_cfg *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdss_ad_init_cfg:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sops: %x\n",
+ tab, data->ops);
+
+ if (data->ops & MDP_PP_AD_INIT)
+ pp_print_ad_init(&data->params.init, tab_depth + 1);
+ else if (data->ops & MDP_PP_AD_CFG)
+ pp_print_ad_cfg(&data->params.cfg, tab_depth + 1);
+}
+
+void pp_print_ad_input(struct mdss_ad_input *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdss_ad_input:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%smode: %x\n",
+ tab, data->mode);
+
+ switch (data->mode) {
+ case MDSS_AD_MODE_AUTO_BL:
+ case MDSS_AD_MODE_AUTO_STR:
+ pr_debug("%samb_light: %x\n",
+ tab, data->in.amb_light);
+ break;
+ case MDSS_AD_MODE_TARG_STR:
+ case MDSS_AD_MODE_MAN_STR:
+ pr_debug("%sstrength: %x\n",
+ tab, data->in.strength);
+ break;
+ case MDSS_AD_MODE_CALIB:
+ pr_debug("%scalib_bl: %x\n",
+ tab, data->in.calib_bl);
+ break;
+ default:
+ break;
+ }
+
+ pr_debug("%soutput: %x\n",
+ tab, data->output);
+}
+
+void pp_print_histogram_cfg(struct mdp_histogram_cfg *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_histogram_cfg:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sops: %x\n"
+ "%sblock: %x\n"
+ "%sframe_cnt: %x\n"
+ "%sbit_mask: %x\n"
+ "%snum_bins: %x\n",
+ tab, data->ops,
+ tab, data->block,
+ tab, data->frame_cnt,
+ tab, data->bit_mask,
+ tab, data->num_bins);
+}
+
+void pp_print_sharp_cfg(struct mdp_sharp_cfg *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_sharp_cfg:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sflags: %x\n"
+ "%sstrength: %x\n"
+ "%sedge_thr: %x\n"
+ "%ssmooth_thr: %x\n"
+ "%snoise_thr: %x\n",
+ tab, data->flags,
+ tab, data->strength,
+ tab, data->edge_thr,
+ tab, data->smooth_thr,
+ tab, data->noise_thr);
+}
+
+void pp_print_calib_config_data(struct mdp_calib_config_data *data,
+ int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_calib_config_data:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sops: %x\n"
+ "%saddr: %x\n"
+ "%sdata: %x\n",
+ tab, data->ops,
+ tab, data->addr,
+ tab, data->data);
+}
+
+void pp_print_calib_config_buffer(struct mdp_calib_config_buffer *data,
+ int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_calib_config_buffer:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sops: %x\n"
+ "%ssize: %x\n",
+ tab, data->ops,
+ tab, data->size);
+
+ pr_debug("%sbuffer[]:\n", tab);
+ pp_print_lut(&data->buffer[0], data->size, tab, UINT32);
+}
+
+void pp_print_calib_dcm_state(struct mdp_calib_dcm_state *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdp_calib_dcm_state:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sops: %x\n"
+ "%sdcm_state: %x\n",
+ tab, data->ops,
+ tab, data->dcm_state);
+}
+
+void pp_print_mdss_calib_cfg(struct mdss_calib_cfg *data, int tab_depth)
+{
+ char tab[MAX_TAB_BUFFER_SIZE];
+ int tmp = 1;
+
+ if (!data || tab_depth < 0)
+ return;
+
+ tab[0] = '\0';
+ tab_prefix(tab, tab_depth);
+ pr_debug("%smdss_calib_cfg:\n", tab);
+ tab_prefix(tab, tmp);
+
+ pr_debug("%sops: %x\n"
+ "%scalib_mask: %x\n",
+ tab, data->ops,
+ tab, data->calib_mask);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
new file mode 100644
index 0000000..9438aca
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
@@ -0,0 +1,2117 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/uaccess.h>
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+#include "mdss_mdp_pp_common.h"
+
+
+/* MDP v1.7 specific macros */
+
+/* PCC_EN for PCC opmode*/
+#define PCC_ENABLE BIT(0)
+#define PCC_OP_MODE_OFF 0
+#define PCC_CONST_COEFF_OFF 4
+#define PCC_R_COEFF_OFF 0x10
+#define PCC_G_COEFF_OFF 0x1C
+#define PCC_B_COEFF_OFF 0x28
+#define PCC_RG_COEFF_OFF 0x34
+#define PCC_RB_COEFF_OFF 0x40
+#define PCC_GB_COEFF_OFF 0x4C
+#define PCC_RGB_COEFF_OFF 0x58
+#define PCC_CONST_COEFF_MASK 0xFFFF
+#define PCC_COEFF_MASK 0x3FFFF
+
+
+#define GAMUT_OP_MODE_OFF 0
+#define GAMUT_TABLE_INDEX 4
+#define GAMUT_TABLE_UPPER_R 8
+#define GAMUT_TABLE_LOWER_GB 0xC
+#define GAMUT_C0_SCALE_OFF 0x10
+#define GAMUT_CLK_CTRL 0xD0
+#define GAMUT_CLK_STATUS 0xD4
+#define GAMUT_READ_TABLE_EN BIT(16)
+#define GAMUT_TABLE_SELECT(x) ((BIT(x)) << 12)
+#define GAMUT_COARSE_EN (BIT(2))
+#define GAMUT_COARSE_INDEX 1248
+#define GAMUT_FINE_INDEX 0
+#define GAMUT_MAP_EN BIT(1)
+#define GAMUT_ENABLE BIT(0)
+#define GAMUT_CLK_GATING_ACTIVE 0x0
+#define GAMUT_CLK_GATING_PARTIAL_ACTIVE 0x11
+#define GAMUT_CLK_GATING_INACTIVE 0x33
+
+#define IGC_MASK_MAX 3
+#define IGC_C0_LUT 0
+#define IGC_RGB_C0_LUT 0xC
+#define IGC_DMA_C0_LUT 0x18
+#define IGC_CONFIG_MASK(n) \
+ ((((1 << (IGC_MASK_MAX + 1)) - 1) & ~(1 << n)) << 28)
+#define IGC_INDEX_UPDATE BIT(25)
+#define IGC_INDEX_VALUE_UPDATE (BIT(24) | IGC_INDEX_UPDATE)
+#define IGC_DATA_MASK (BIT(12) - 1)
+#define IGC_DSPP_OP_MODE_EN BIT(0)
+
+#define HIST_LUT_VIG_OP_FIRST_EN BIT(21)
+#define HIST_LUT_DSPP_OP_FIRST_EN BIT(21)
+#define HIST_LUT_VIG_OP_ENABLE BIT(10)
+#define HIST_LUT_DSPP_OP_ENABLE BIT(19)
+#define REG_SSPP_VIG_HIST_LUT_BASE 0x1400
+#define REG_DSPP_HIST_LUT_BASE 0x1400
+#define REG_SSPP_VIG_HIST_SWAP_BASE 0x300
+#define REG_DSPP_HIST_SWAP_BASE 0x234
+#define ENHIST_LOWER_VALUE_MASK 0x3FF
+#define ENHIST_UPPER_VALUE_MASK 0x3FF0000
+#define ENHIST_BIT_SHIFT 16
+
+#define PGC_OPMODE_OFF 0
+#define PGC_C0_LUT_INDEX 4
+#define PGC_INDEX_OFF 4
+#define PGC_C1C2_LUT_OFF 8
+#define PGC_LUT_SWAP 0x1C
+#define PGC_LUT_SEL 0x20
+#define PGC_DATA_MASK (BIT(10) - 1)
+#define PGC_ODD_SHIFT 16
+#define PGC_SWAP 1
+#define PGC_8B_ROUND BIT(1)
+#define PGC_ENABLE BIT(0)
+
+#define HIST_V3_INTR_BIT_MASK 0xF33333
+#define HIST_CTL_OFF_DSPP_V1_7 0x210
+#define HIST_CTL_OFF_SSPP_V1_7 0x2C4
+#define HIST_DATA_OFF_DSPP_V1_7 0x1000
+#define HIST_DATA_OFF_SSPP_V1_7 0xA00
+#define HIST_DATA_MASK 0xFFFFFF
+#define DITHER_MATRIX_OFF 0x14
+#define DITHER_MATRIX_INDEX 16
+#define DITHER_DEPTH_MAP_INDEX 9
+static u32 dither_matrix[DITHER_MATRIX_INDEX] = {
+ 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
+static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
+ 0, 0, 0, 0, 0, 1, 2, 3, 3};
+
+#define PA_DSPP_GLOBAL_OFF 0x238
+#define PA_DSPP_MEM_COL_SKIN_P0_OFF 0x254
+#define PA_DSPP_MEM_COL_SKIN_P2_OFF 0x318
+#define PA_DSPP_MEM_COL_SKY_P0_OFF 0x268
+#define PA_DSPP_MEM_COL_SKY_P2_OFF 0x320
+#define PA_DSPP_MEM_COL_FOL_P0_OFF 0x27C
+#define PA_DSPP_MEM_COL_FOL_P2_OFF 0x328
+#define PA_SIX_ZONE_LUT_OFF 0x248
+#define PA_SIX_ZONE_REGION_OFF 0x250
+#define PA_SIX_ZONE_ADJ_OFF 0x330
+#define PA_VIG_GLOBAL_OFF 0x310
+#define PA_VIG_MEM_COL_SKIN_P0_OFF 0x288
+#define PA_VIG_MEM_COL_SKIN_P2_OFF 0x418
+#define PA_VIG_MEM_COL_SKY_P0_OFF 0x29C
+#define PA_VIG_MEM_COL_SKY_P2_OFF 0x420
+#define PA_VIG_MEM_COL_FOL_P0_OFF 0x2B0
+#define PA_VIG_MEM_COL_FOL_P2_OFF 0x428
+#define PA_DSPP_HOLD_OFF 0x314
+#define PA_VIG_HOLD_OFF 0x414
+#define PA_GLOBAL_HUE_MASK 0xFFF
+#define PA_GLOBAL_SAT_MASK 0xFFFF
+#define PA_GLOBAL_VAL_MASK 0xFF
+#define PA_GLOBAL_CONT_MASK 0xFF
+#define PA_MEM_COL_ADJ_P0_MASK 0xFFFF07FF
+#define PA_MEM_COL_HUE_REGION_MASK 0x7FF07FF
+#define PA_MEM_COL_SAT_REGION_MASK 0xFFFFFF
+#define PA_MEM_COL_VAL_REGION_MASK 0xFFFFFF
+#define PA_SIX_ZONE_INDEX_UPDATE BIT(26)
+#define PA_SIX_ZONE_VALUE_UPDATE BIT(25)
+#define PA_SIX_ZONE_CURVE_P0_MASK 0xFFF
+#define PA_SIX_ZONE_CURVE_P1_MASK 0xFFF0FFF
+#define PA_SIX_ZONE_ADJ_P0_MASK 0xFFFF
+#define PA_HOLD_MASK 0x3
+#define PA_HOLD_SAT_SHIFT 0
+#define PA_HOLD_VAL_SHIFT 2
+#define PA_HOLD_SKIN_SHIFT 0
+#define PA_HOLD_SKY_SHIFT 4
+#define PA_HOLD_FOL_SHIFT 8
+#define PA_HOLD_SIX_ZONE_SHIFT 12
+#define PA_HOLD_SKIN_MASK 0xF
+#define PA_HOLD_SKY_MASK 0xF0
+#define PA_HOLD_FOL_MASK 0xF00
+#define PA_HOLD_SIX_ZONE_MASK 0xF000
+#define PA_DSPP_OP_ENABLE BIT(20)
+#define PA_DSPP_OP_HUE_MASK BIT(25)
+#define PA_DSPP_OP_SAT_MASK BIT(26)
+#define PA_DSPP_OP_VAL_MASK BIT(27)
+#define PA_DSPP_OP_CONT_MASK BIT(28)
+#define PA_DSPP_OP_SAT_ZERO_EXP_EN BIT(1)
+#define PA_DSPP_OP_SIX_ZONE_HUE_MASK BIT(29)
+#define PA_DSPP_OP_SIX_ZONE_SAT_MASK BIT(30)
+#define PA_DSPP_OP_SIX_ZONE_VAL_MASK BIT(31)
+#define PA_DSPP_OP_MEM_COL_SKIN_MASK BIT(5)
+#define PA_DSPP_OP_MEM_COL_FOL_MASK BIT(6)
+#define PA_DSPP_OP_MEM_COL_SKY_MASK BIT(7)
+#define PA_DSPP_OP_MEM_PROT_HUE_EN BIT(22)
+#define PA_DSPP_OP_MEM_PROT_SAT_EN BIT(23)
+#define PA_DSPP_OP_MEM_PROT_VAL_EN BIT(24)
+#define PA_DSPP_OP_MEM_PROT_CONT_EN BIT(18)
+#define PA_DSPP_OP_MEM_PROT_BLEND_EN BIT(3)
+#define PA_DSPP_OP_MEM_PROT_SIX_EN BIT(17)
+#define PA_VIG_OP_HUE_MASK BIT(25)
+#define PA_VIG_OP_SAT_MASK BIT(26)
+#define PA_VIG_OP_VAL_MASK BIT(27)
+#define PA_VIG_OP_CONT_MASK BIT(28)
+#define PA_VIG_OP_MEM_PROT_HUE_EN BIT(12)
+#define PA_VIG_OP_MEM_PROT_SAT_EN BIT(13)
+#define PA_VIG_OP_MEM_PROT_VAL_EN BIT(14)
+#define PA_VIG_OP_MEM_PROT_CONT_EN BIT(15)
+#define PA_VIG_OP_MEM_COL_SKIN_MASK BIT(5)
+#define PA_VIG_OP_MEM_COL_FOL_MASK BIT(6)
+#define PA_VIG_OP_MEM_COL_SKY_MASK BIT(7)
+#define PA_VIG_OP_MEM_PROT_BLEND_EN BIT(1)
+#define PA_VIG_OP_ENABLE BIT(4)
+#define PA_VIG_OP_SAT_ZERO_EXP_EN BIT(2)
+
+static struct mdss_pp_res_type_v1_7 config_data;
+
+static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_hist_lut_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_dither_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_dither_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+/* histogram prototypes */
+static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_get_hist_offset(u32 block, u32 *ctl_off);
+static int pp_get_hist_isr(u32 *isr_mask);
+static bool pp_is_sspp_hist_supp(void);
+
+static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
+ u32 *opmode, int side);
+
+/* Gamut prototypes */
+static int pp_gamut_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_gamut_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+/* PCC prototypes */
+static int pp_pcc_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_pcc_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+/* PA prototypes */
+static int pp_pa_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static void pp_pa_update_dspp_opmode(int pa_sts, u32 *opmode);
+static void pp_pa_update_vig_opmode(int pa_sts, u32 *opmode);
+
+static int pp_igc_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_igc_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_pgc_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_pgc_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_pcc_get_version(u32 *version);
+static int pp_igc_get_version(u32 *version);
+static int pp_pgc_get_version(u32 *version);
+static int pp_pa_get_version(u32 *version);
+static int pp_gamut_get_version(u32 *version);
+static int pp_dither_get_version(u32 *version);
+static int pp_hist_lut_get_version(u32 *version);
+static void pp_gamut_clock_gating_en(char __iomem *base_addr);
+
+void *pp_get_driver_ops_v1_7(struct mdp_pp_driver_ops *ops)
+{
+ if (!ops) {
+ pr_err("PP driver ops invalid %pK\n", ops);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* IGC ops */
+ ops->pp_ops[IGC].pp_set_config = pp_igc_set_config;
+ ops->pp_ops[IGC].pp_get_config = pp_igc_get_config;
+ ops->pp_ops[IGC].pp_get_version = pp_igc_get_version;
+
+ /* PCC ops */
+ ops->pp_ops[PCC].pp_set_config = pp_pcc_set_config;
+ ops->pp_ops[PCC].pp_get_config = pp_pcc_get_config;
+ ops->pp_ops[PCC].pp_get_version = pp_pcc_get_version;
+ /* GC ops */
+ ops->pp_ops[GC].pp_set_config = pp_pgc_set_config;
+ ops->pp_ops[GC].pp_get_config = pp_pgc_get_config;
+ ops->pp_ops[GC].pp_get_version = pp_pgc_get_version;
+
+ /* PA ops */
+ ops->pp_ops[PA].pp_set_config = pp_pa_set_config;
+ ops->pp_ops[PA].pp_get_config = pp_pa_get_config;
+ ops->pp_ops[PA].pp_get_version = pp_pa_get_version;
+
+ /* Gamut ops */
+ ops->pp_ops[GAMUT].pp_set_config = pp_gamut_set_config;
+ ops->pp_ops[GAMUT].pp_get_config = pp_gamut_get_config;
+ ops->pp_ops[GAMUT].pp_get_version = pp_gamut_get_version;
+
+ /* Dither ops */
+ ops->pp_ops[DITHER].pp_set_config = pp_dither_set_config;
+ ops->pp_ops[DITHER].pp_get_config = pp_dither_get_config;
+ ops->pp_ops[DITHER].pp_get_version = pp_dither_get_version;
+
+ /* QSEED ops */
+ ops->pp_ops[QSEED].pp_set_config = NULL;
+ ops->pp_ops[QSEED].pp_get_config = NULL;
+ ops->pp_ops[QSEED].pp_get_version = NULL;
+
+ /* HIST_LUT ops */
+ ops->pp_ops[HIST_LUT].pp_set_config = pp_hist_lut_set_config;
+ ops->pp_ops[HIST_LUT].pp_get_config = pp_hist_lut_get_config;
+ ops->pp_ops[HIST_LUT].pp_get_version = pp_hist_lut_get_version;
+
+ /* HIST ops */
+ ops->pp_ops[HIST].pp_set_config = NULL;
+ ops->pp_ops[HIST].pp_get_config = pp_hist_get_config;
+ ops->pp_ops[HIST].pp_get_version = NULL;
+
+ /* Set opmode pointers */
+ ops->pp_opmode_config = pp_opmode_config;
+
+ ops->get_hist_offset = pp_get_hist_offset;
+ ops->get_hist_isr_info = pp_get_hist_isr;
+ ops->is_sspp_hist_supp = pp_is_sspp_hist_supp;
+ ops->gamut_clk_gate_en = pp_gamut_clock_gating_en;
+ return &config_data;
+}
+
+static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
+ u32 *opmode, int side)
+{
+ if (!pp_sts || !opmode) {
+ pr_err("Invalid pp_sts %pK or opmode %pK\n", pp_sts, opmode);
+ return;
+ }
+ switch (location) {
+ case SSPP_RGB:
+ break;
+ case SSPP_DMA:
+ break;
+ case SSPP_VIG:
+ if (pp_sts->pa_sts & PP_STS_ENABLE)
+ pp_pa_update_vig_opmode(pp_sts->pa_sts, opmode);
+ if (pp_sts->enhist_sts & PP_STS_ENABLE) {
+ *opmode |= HIST_LUT_VIG_OP_ENABLE |
+ PA_VIG_OP_ENABLE;
+ if (pp_sts->enhist_sts & PP_STS_PA_LUT_FIRST)
+ *opmode |= HIST_LUT_VIG_OP_FIRST_EN;
+ }
+ break;
+ case DSPP:
+ if (pp_sts_is_enabled(pp_sts->pa_sts, side))
+ pp_pa_update_dspp_opmode(pp_sts->pa_sts, opmode);
+ if (pp_sts_is_enabled(pp_sts->igc_sts, side))
+ *opmode |= IGC_DSPP_OP_MODE_EN;
+ if (pp_sts->enhist_sts & PP_STS_ENABLE) {
+ *opmode |= HIST_LUT_DSPP_OP_ENABLE |
+ PA_DSPP_OP_ENABLE;
+ if (pp_sts->enhist_sts & PP_STS_PA_LUT_FIRST)
+ *opmode |= HIST_LUT_DSPP_OP_FIRST_EN;
+ }
+ if (pp_sts_is_enabled(pp_sts->dither_sts, side))
+ *opmode |= MDSS_MDP_DSPP_OP_DST_DITHER_EN;
+ break;
+ case LM:
+ if (pp_sts->argc_sts & PP_STS_ENABLE)
+ pr_debug("pgc in LM enabled\n");
+ break;
+ default:
+ pr_err("Invalid block type %d\n", location);
+ break;
+ }
+}
+
+static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+
+ int ret = 0, i = 0;
+ char __iomem *hist_addr;
+ u32 sz = 0, temp = 0, *data = NULL;
+ struct mdp_hist_lut_data_v1_7 lut_data_v1_7;
+ struct mdp_hist_lut_data_v1_7 *lut_data = &lut_data_v1_7;
+ struct mdp_hist_lut_data *lut_cfg_data = NULL;
+
+ if (!base_addr || !cfg_data) {
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
+ base_addr, cfg_data);
+ return -EINVAL;
+ }
+
+ lut_cfg_data = (struct mdp_hist_lut_data *) cfg_data;
+ if (!(lut_cfg_data->ops & MDP_PP_OPS_READ)) {
+ pr_err("read ops not set for hist_lut %d\n", lut_cfg_data->ops);
+ return 0;
+ }
+ if (lut_cfg_data->version != mdp_hist_lut_v1_7 ||
+ !lut_cfg_data->cfg_payload) {
+ pr_err("invalid hist_lut version %d payload %pK\n",
+ lut_cfg_data->version, lut_cfg_data->cfg_payload);
+ return -EINVAL;
+ }
+ if (copy_from_user(lut_data, (void __user *) lut_cfg_data->cfg_payload,
+ sizeof(*lut_data))) {
+ pr_err("copy from user failed for lut_data\n");
+ return -EFAULT;
+ }
+ if (lut_data->len != ENHIST_LUT_ENTRIES) {
+ pr_err("invalid hist_lut len %d", lut_data->len);
+ return -EINVAL;
+ }
+ sz = ENHIST_LUT_ENTRIES * sizeof(u32);
+ if (!access_ok(VERIFY_WRITE, lut_data->data, sz)) {
+ pr_err("invalid lut address for hist_lut sz %d\n", sz);
+ return -EFAULT;
+ }
+
+ switch (block_type) {
+ case SSPP_VIG:
+ hist_addr = base_addr + REG_SSPP_VIG_HIST_LUT_BASE;
+ break;
+ case DSPP:
+ hist_addr = base_addr + REG_DSPP_HIST_LUT_BASE;
+ break;
+ default:
+ pr_err("Invalid block type %d\n", block_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ pr_err("Failed to read hist_lut table ret %d", ret);
+ return ret;
+ }
+
+ data = kzalloc(sz, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ for (i = 0; i < ENHIST_LUT_ENTRIES; i += 2) {
+ temp = readl_relaxed(hist_addr);
+ data[i] = temp & ENHIST_LOWER_VALUE_MASK;
+ data[i + 1] =
+ (temp & ENHIST_UPPER_VALUE_MASK) >> ENHIST_BIT_SHIFT;
+ hist_addr += 4;
+ }
+ if (copy_to_user(lut_data->data, data, sz)) {
+ pr_err("failed to copy the hist_lut back to user\n");
+ ret = -EFAULT;
+ }
+ kfree(data);
+ return ret;
+}
+
+static int pp_hist_lut_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ int ret = 0, i = 0;
+ u32 temp = 0;
+ struct mdp_hist_lut_data *lut_cfg_data = NULL;
+ struct mdp_hist_lut_data_v1_7 *lut_data = NULL;
+ char __iomem *hist_addr = NULL, *swap_addr = NULL;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+
+ lut_cfg_data = (struct mdp_hist_lut_data *) cfg_data;
+ if (lut_cfg_data->version != mdp_hist_lut_v1_7) {
+ pr_err("invalid hist_lut version %d\n", lut_cfg_data->version);
+ return -EINVAL;
+ }
+
+ if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE) {
+ pr_debug("Disable Hist LUT\n");
+ goto bail_out;
+ }
+
+ if (!(lut_cfg_data->ops & ~(MDP_PP_OPS_READ))) {
+ pr_err("only read ops set for lut\n");
+ return ret;
+ }
+ if (!(lut_cfg_data->ops & MDP_PP_OPS_WRITE)) {
+ pr_debug("non write ops set %d\n", lut_cfg_data->ops);
+ goto bail_out;
+ }
+ lut_data = lut_cfg_data->cfg_payload;
+ if (!lut_data) {
+ pr_err("invalid hist_lut cfg_payload %pK\n", lut_data);
+ return -EINVAL;
+ }
+
+ if (lut_data->len != ENHIST_LUT_ENTRIES || !lut_data->data) {
+ pr_err("invalid hist_lut len %d data %pK\n",
+ lut_data->len, lut_data->data);
+ return -EINVAL;
+ }
+ switch (block_type) {
+ case SSPP_VIG:
+ hist_addr = base_addr + REG_SSPP_VIG_HIST_LUT_BASE;
+ swap_addr = base_addr +
+ REG_SSPP_VIG_HIST_SWAP_BASE;
+ break;
+ case DSPP:
+ hist_addr = base_addr + REG_DSPP_HIST_LUT_BASE;
+ swap_addr = base_addr + REG_DSPP_HIST_SWAP_BASE;
+ break;
+ default:
+ pr_err("Invalid block type %d\n", block_type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret) {
+ pr_err("hist_lut table not updated ret %d", ret);
+ return ret;
+ }
+ for (i = 0; i < ENHIST_LUT_ENTRIES; i += 2) {
+ temp = (lut_data->data[i] & ENHIST_LOWER_VALUE_MASK) |
+ ((lut_data->data[i + 1] & ENHIST_LOWER_VALUE_MASK)
+ << ENHIST_BIT_SHIFT);
+
+ writel_relaxed(temp, hist_addr);
+ hist_addr += 4;
+ }
+ if (lut_cfg_data->hist_lut_first)
+ pp_sts->enhist_sts |= PP_STS_PA_LUT_FIRST;
+ else
+ pp_sts->enhist_sts &= ~PP_STS_PA_LUT_FIRST;
+
+ writel_relaxed(1, swap_addr);
+
+bail_out:
+ if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE)
+ pp_sts->enhist_sts &= ~PP_STS_ENABLE;
+ else if (lut_cfg_data->ops & MDP_PP_OPS_ENABLE)
+ pp_sts->enhist_sts |= PP_STS_ENABLE;
+
+ return ret;
+}
+
+static int pp_dither_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ pr_err("Operation not supported\n");
+ return -ENOTSUPP;
+}
+
+static int pp_dither_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ int i = 0;
+ u32 data;
+ struct mdp_dither_cfg_data *dither_cfg_data = NULL;
+ struct mdp_dither_data_v1_7 *dither_data = NULL;
+ uint32_t *pdata = NULL;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+ if (block_type != DSPP)
+ return -ENOTSUPP;
+ dither_cfg_data = (struct mdp_dither_cfg_data *) cfg_data;
+
+ if (dither_cfg_data->version != mdp_dither_v1_7) {
+ pr_err("invalid dither version %d\n", dither_cfg_data->version);
+ return -EINVAL;
+ }
+ if (dither_cfg_data->flags & MDP_PP_OPS_READ) {
+ pr_err("Invalid context for read operation\n");
+ return -EINVAL;
+ }
+ if (dither_cfg_data->flags & MDP_PP_OPS_DISABLE) {
+ pr_debug("set disable dither\n");
+ goto bail_out;
+ }
+
+ if (!(dither_cfg_data->flags & MDP_PP_OPS_WRITE)) {
+ pr_debug("non write ops set %d\n", dither_cfg_data->flags);
+ goto bail_out;
+ }
+
+ dither_data = dither_cfg_data->cfg_payload;
+ if (!dither_data) {
+ pr_err("invalid payload for dither %pK\n", dither_data);
+ return -EINVAL;
+ }
+
+ if ((dither_data->g_y_depth >= DITHER_DEPTH_MAP_INDEX) ||
+ (dither_data->b_cb_depth >= DITHER_DEPTH_MAP_INDEX) ||
+ (dither_data->r_cr_depth >= DITHER_DEPTH_MAP_INDEX) ||
+ (dither_data->len > DITHER_MATRIX_INDEX)) {
+ pr_err("invalid data for dither, g_y_depth %d y_cb_depth %d r_cr_depth %d\n len %d",
+ dither_data->g_y_depth, dither_data->b_cb_depth,
+ dither_data->r_cr_depth, dither_data->len);
+ return -EINVAL;
+ }
+ if (!dither_data->len)
+ pdata = dither_matrix;
+ else
+ pdata = dither_data->data;
+
+ data = dither_depth_map[dither_data->g_y_depth];
+ data |= dither_depth_map[dither_data->b_cb_depth] << 2;
+ data |= dither_depth_map[dither_data->r_cr_depth] << 4;
+ data |= (dither_data->temporal_en) ? (1 << 8) : 0;
+ writel_relaxed(data, base_addr);
+ base_addr += DITHER_MATRIX_OFF;
+ for (i = 0; i < DITHER_MATRIX_INDEX; i += 4) {
+ data = pdata[i] | (pdata[i + 1] << 4) |
+ (pdata[i + 2] << 8) | (pdata[i + 3] << 12);
+ writel_relaxed(data, base_addr);
+ base_addr += 4;
+ }
+bail_out:
+ if (dither_cfg_data->flags & MDP_PP_OPS_DISABLE)
+ pp_sts->dither_sts &= ~PP_STS_ENABLE;
+ else if (dither_cfg_data->flags & MDP_PP_OPS_ENABLE)
+ pp_sts->dither_sts |= PP_STS_ENABLE;
+ pp_sts_set_split_bits(&pp_sts->dither_sts, dither_cfg_data->flags);
+
+ return 0;
+}
+
+static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ int ret = 0, i = 0;
+ u32 sum = 0;
+ struct pp_hist_col_info *hist_info = NULL;
+
+ if (!base_addr || !cfg_data) {
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
+ base_addr, cfg_data);
+ return -EINVAL;
+ }
+
+ hist_info = (struct pp_hist_col_info *) cfg_data;
+
+ switch (block_type) {
+ case SSPP_VIG:
+ base_addr += HIST_DATA_OFF_SSPP_V1_7;
+ break;
+ case DSPP:
+ base_addr += HIST_DATA_OFF_DSPP_V1_7;
+ break;
+ default:
+ pr_err("Invalid block type %d\n", block_type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret) {
+ pr_err("Failed to read hist data ret %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < HIST_V_SIZE; i++) {
+ hist_info->data[i] = readl_relaxed(base_addr) & HIST_DATA_MASK;
+ base_addr += 0x4;
+ sum += hist_info->data[i];
+ }
+ hist_info->hist_cnt_read++;
+ return sum;
+}
+
+static int pp_get_hist_offset(u32 block, u32 *ctl_off)
+{
+ int ret = 0;
+
+ if (!ctl_off) {
+ pr_err("invalid params ctl_off %pK\n", ctl_off);
+ return -EINVAL;
+ }
+ switch (block) {
+ case SSPP_VIG:
+ *ctl_off = HIST_CTL_OFF_SSPP_V1_7;
+ break;
+ case DSPP:
+ *ctl_off = HIST_CTL_OFF_DSPP_V1_7;
+ break;
+ default:
+ pr_err("Invalid block type %d\n", block);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int pp_get_hist_isr(u32 *isr_mask)
+{
+ if (!isr_mask) {
+ pr_err("invalid params isr_mask %pK\n", isr_mask);
+ return -EINVAL;
+ }
+
+ *isr_mask = HIST_V3_INTR_BIT_MASK;
+ return 0;
+}
+
+static bool pp_is_sspp_hist_supp(void)
+{
+ return false;
+}
+
+static int pp_gamut_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ u32 val = 0, sz = 0, sz_scale = 0, mode = 0, tbl_sz = 0;
+ u32 index_start = 0;
+ int i = 0, j = 0, ret = 0;
+ u32 *gamut_tbl = NULL, *gamut_c0 = NULL, *gamut_c1c2 = NULL;
+ struct mdp_gamut_cfg_data *gamut_cfg = (struct mdp_gamut_cfg_data *)
+ cfg_data;
+ struct mdp_gamut_data_v1_7 gamut_data;
+ u32 clk_gate_disable = 0;
+
+ if (!base_addr || !cfg_data) {
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
+ base_addr, cfg_data);
+ return -EINVAL;
+ }
+ if (gamut_cfg->version != mdp_gamut_v1_7) {
+ pr_err("unsupported version of gamut %d\n",
+ gamut_cfg->version);
+ return -EINVAL;
+ }
+ if (copy_from_user(&gamut_data, gamut_cfg->cfg_payload,
+ sizeof(gamut_data))) {
+ pr_err("copy from user failed for gamut_data\n");
+ return -EFAULT;
+ }
+ mode = readl_relaxed(base_addr + GAMUT_OP_MODE_OFF);
+ if (mode & GAMUT_COARSE_EN) {
+ tbl_sz = MDP_GAMUT_TABLE_V1_7_COARSE_SZ;
+ sz = tbl_sz * sizeof(u32);
+ index_start = GAMUT_COARSE_INDEX;
+ gamut_data.mode = mdp_gamut_coarse_mode;
+ } else {
+ tbl_sz = MDP_GAMUT_TABLE_V1_7_SZ;
+ sz = tbl_sz * sizeof(u32);
+ index_start = GAMUT_FINE_INDEX;
+ gamut_data.mode = mdp_gamut_fine_mode;
+ }
+ gamut_data.map_en = mode & GAMUT_MAP_EN;
+ sz_scale = MDP_GAMUT_SCALE_OFF_SZ * sizeof(u32);
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+ if (!access_ok(VERIFY_WRITE, gamut_data.c0_data[i], sz)) {
+ pr_err("invalid c0 address for sz %d table index %d\n",
+ sz, (i+1));
+ return -EFAULT;
+ }
+ if (!access_ok(VERIFY_WRITE, gamut_data.c1_c2_data[i], sz)) {
+ pr_err("invalid c1c2 address for sz %d table index %d\n",
+ sz, (i+1));
+ return -EFAULT;
+ }
+ gamut_data.tbl_size[i] = tbl_sz;
+ if (i < MDP_GAMUT_SCALE_OFF_TABLE_NUM) {
+ if (!access_ok(VERIFY_WRITE,
+ gamut_data.scale_off_data[i], sz_scale)) {
+ pr_err("invalid scale address for sz %d color c%d\n",
+ sz_scale, i);
+ return -EFAULT;
+ }
+ gamut_data.tbl_scale_off_sz[i] =
+ MDP_GAMUT_SCALE_OFF_SZ;
+ }
+ }
+ /* allocate for c0 and c1c2 tables */
+ gamut_tbl = kzalloc((sz * 2), GFP_KERNEL);
+ if (!gamut_tbl)
+ return -ENOMEM;
+
+ gamut_c0 = gamut_tbl;
+ gamut_c1c2 = gamut_c0 + tbl_sz;
+ writel_relaxed(GAMUT_CLK_GATING_INACTIVE, base_addr + GAMUT_CLK_CTRL);
+ clk_gate_disable = 1;
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+ val = index_start;
+ val |= GAMUT_READ_TABLE_EN;
+ val |= GAMUT_TABLE_SELECT(i);
+ writel_relaxed(val, (base_addr + GAMUT_TABLE_INDEX));
+ for (j = 0; j < tbl_sz; j++) {
+ gamut_c1c2[j] = readl_relaxed(base_addr +
+ GAMUT_TABLE_LOWER_GB);
+ gamut_c0[j] = readl_relaxed(base_addr +
+ GAMUT_TABLE_UPPER_R);
+ }
+ if (copy_to_user(gamut_data.c0_data[i], gamut_c0, sz)) {
+ pr_err("copy to user failed for table %d c0 sz %d\n",
+ i, sz);
+ ret = -EFAULT;
+ goto bail_out;
+ }
+ if (copy_to_user(gamut_data.c1_c2_data[i], gamut_c1c2, sz)) {
+ pr_err("copy to user failed for table %d c1c2 sz %d\n",
+ i, sz);
+ ret = -EFAULT;
+ goto bail_out;
+ }
+ }
+ sz_scale = MDP_GAMUT_SCALE_OFF_TABLE_NUM * MDP_GAMUT_SCALE_OFF_SZ
+ * sizeof(u32);
+ if (sz < sz_scale) {
+ kfree(gamut_tbl);
+ gamut_tbl = kzalloc(sz_scale, GFP_KERNEL);
+ if (!gamut_tbl) {
+ pr_err("failed to alloc scale tbl size %d\n",
+ sz_scale);
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+ }
+ gamut_c0 = gamut_tbl;
+ base_addr += GAMUT_C0_SCALE_OFF;
+ for (i = 0;
+ i < (MDP_GAMUT_SCALE_OFF_TABLE_NUM * MDP_GAMUT_SCALE_OFF_SZ);
+ i++) {
+ gamut_c0[i] = readl_relaxed(base_addr);
+ base_addr += 4;
+ }
+ for (i = 0; i < MDP_GAMUT_SCALE_OFF_TABLE_NUM; i++) {
+ if (copy_to_user(gamut_data.scale_off_data[i],
+ &gamut_c0[i * MDP_GAMUT_SCALE_OFF_SZ],
+ (MDP_GAMUT_SCALE_OFF_SZ * sizeof(u32)))) {
+ pr_err("copy to user failed for scale color c%d\n",
+ i);
+ ret = -EFAULT;
+ goto bail_out;
+ }
+ }
+ if (copy_to_user(gamut_cfg->cfg_payload, &gamut_data,
+ sizeof(gamut_data))) {
+ pr_err("failed to copy the gamut info into payload\n");
+ ret = -EFAULT;
+ }
+bail_out:
+ if (clk_gate_disable)
+ writel_relaxed(GAMUT_CLK_GATING_PARTIAL_ACTIVE,
+ base_addr + GAMUT_CLK_CTRL);
+ kfree(gamut_tbl);
+ return ret;
+}
+
+static int pp_gamut_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ int val = 0, ret = 0, i = 0, j = 0;
+ u32 index_start = 0, tbl_sz = 0;
+ struct mdp_gamut_cfg_data *gamut_cfg_data = NULL;
+ struct mdp_gamut_data_v1_7 *gamut_data = NULL;
+ char __iomem *base_addr_scale = base_addr;
+ uint64_t gamut_val;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+ gamut_cfg_data = (struct mdp_gamut_cfg_data *) cfg_data;
+ if (gamut_cfg_data->version != mdp_gamut_v1_7) {
+ pr_err("invalid gamut version %d\n", gamut_cfg_data->version);
+ return -EINVAL;
+ }
+ if (!(gamut_cfg_data->flags & ~(MDP_PP_OPS_READ))) {
+ pr_debug("only read ops is set %d", gamut_cfg_data->flags);
+ return 0;
+ }
+
+ if (gamut_cfg_data->flags & MDP_PP_OPS_DISABLE) {
+ pr_debug("disabling gamut\n");
+ goto bail_out;
+ }
+
+ gamut_data = (struct mdp_gamut_data_v1_7 *)
+ gamut_cfg_data->cfg_payload;
+ if (!gamut_data) {
+ pr_err("invalid payload for gamut %pK\n", gamut_data);
+ return -EINVAL;
+ }
+
+ if (gamut_data->mode != mdp_gamut_fine_mode &&
+ gamut_data->mode != mdp_gamut_coarse_mode) {
+ pr_err("invalid gamut mode %d", gamut_data->mode);
+ return -EINVAL;
+ }
+ index_start = (gamut_data->mode == mdp_gamut_fine_mode) ?
+ GAMUT_FINE_INDEX : GAMUT_COARSE_INDEX;
+ tbl_sz = (gamut_data->mode == mdp_gamut_fine_mode) ?
+ MDP_GAMUT_TABLE_V1_7_SZ : MDP_GAMUT_TABLE_V1_7_COARSE_SZ;
+ if (!(gamut_cfg_data->flags & MDP_PP_OPS_WRITE))
+ goto bail_out;
+ /* Sanity check for all tables */
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+ if (!gamut_data->c0_data[i] || !gamut_data->c1_c2_data[i]
+ || (gamut_data->tbl_size[i] != tbl_sz)) {
+ pr_err("invalid param for c0 %pK c1c2 %pK table %d size %d expected sz %d\n",
+ gamut_data->c0_data[i],
+ gamut_data->c1_c2_data[i], i,
+ gamut_data->tbl_size[i], tbl_sz);
+ ret = -EINVAL;
+ goto bail_out;
+ }
+ if (i < MDP_GAMUT_SCALE_OFF_TABLE_NUM &&
+ (!gamut_data->scale_off_data[i] ||
+ (gamut_data->tbl_scale_off_sz[i] !=
+ MDP_GAMUT_SCALE_OFF_SZ))) {
+ pr_err("invalid param for scale table %pK for c%d size %d expected size%d\n",
+ gamut_data->scale_off_data[i], i,
+ gamut_data->tbl_scale_off_sz[i],
+ MDP_GAMUT_SCALE_OFF_SZ);
+ ret = -EINVAL;
+ goto bail_out;
+ }
+ }
+ base_addr_scale += GAMUT_C0_SCALE_OFF;
+ writel_relaxed(GAMUT_CLK_GATING_INACTIVE, base_addr + GAMUT_CLK_CTRL);
+ for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+ val = index_start;
+ val |= GAMUT_TABLE_SELECT(i);
+ writel_relaxed(val, (base_addr + GAMUT_TABLE_INDEX));
+
+ writel_relaxed(gamut_data->c1_c2_data[i][0],
+ base_addr + GAMUT_TABLE_LOWER_GB);
+ for (j = 0; j < gamut_data->tbl_size[i] - 1; j++) {
+ gamut_val = gamut_data->c1_c2_data[i][j + 1];
+ gamut_val = (gamut_val << 32) |
+ gamut_data->c0_data[i][j];
+ writeq_relaxed(gamut_val,
+ base_addr + GAMUT_TABLE_UPPER_R);
+ }
+ writel_relaxed(gamut_data->c0_data[i][j],
+ base_addr + GAMUT_TABLE_UPPER_R);
+ if ((i >= MDP_GAMUT_SCALE_OFF_TABLE_NUM) ||
+ (!gamut_data->map_en))
+ continue;
+ for (j = 0; j < MDP_GAMUT_SCALE_OFF_SZ; j++) {
+ writel_relaxed((gamut_data->scale_off_data[i][j]),
+ base_addr_scale);
+ base_addr_scale += 4;
+ }
+ }
+ writel_relaxed(GAMUT_CLK_GATING_PARTIAL_ACTIVE,
+ base_addr + GAMUT_CLK_CTRL);
+bail_out:
+ if (!ret) {
+ val = 0;
+ pp_sts_set_split_bits(&pp_sts->gamut_sts,
+ gamut_cfg_data->flags);
+ if (gamut_cfg_data->flags & MDP_PP_OPS_DISABLE) {
+ pp_sts->gamut_sts &= ~PP_STS_ENABLE;
+ writel_relaxed(val, base_addr + GAMUT_OP_MODE_OFF);
+ } else if (gamut_cfg_data->flags & MDP_PP_OPS_ENABLE) {
+ pp_sts->gamut_sts |= PP_STS_ENABLE;
+ if (pp_sts_is_enabled(pp_sts->gamut_sts,
+ pp_sts->side_sts)) {
+ if (gamut_data->mode == mdp_gamut_coarse_mode)
+ val |= GAMUT_COARSE_EN;
+ if (gamut_data->map_en)
+ val |= GAMUT_MAP_EN;
+ val |= GAMUT_ENABLE;
+ }
+ writel_relaxed(val, base_addr + GAMUT_OP_MODE_OFF);
+ }
+ }
+ return ret;
+}
+
+static int pp_pcc_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ struct mdp_pcc_cfg_data *pcc_cfg_data = NULL;
+ struct mdp_pcc_data_v1_7 *pcc_data = NULL;
+ char __iomem *addr = NULL;
+ u32 opmode = 0;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+ pcc_cfg_data = (struct mdp_pcc_cfg_data *) cfg_data;
+ if (pcc_cfg_data->version != mdp_pcc_v1_7) {
+ pr_err("invalid pcc version %d\n", pcc_cfg_data->version);
+ return -EINVAL;
+ }
+ if (!(pcc_cfg_data->ops & ~(MDP_PP_OPS_READ))) {
+ pr_info("only read ops is set %d", pcc_cfg_data->ops);
+ return 0;
+ }
+ pcc_data = pcc_cfg_data->cfg_payload;
+ if (!pcc_data) {
+ pr_err("invalid payload for pcc %pK\n", pcc_data);
+ return -EINVAL;
+ }
+
+ if (!(pcc_cfg_data->ops & MDP_PP_OPS_WRITE))
+ goto bail_out;
+
+ addr = base_addr + PCC_CONST_COEFF_OFF;
+ writel_relaxed(pcc_data->r.c & PCC_CONST_COEFF_MASK, addr);
+ writel_relaxed(pcc_data->g.c & PCC_CONST_COEFF_MASK, addr + 4);
+ writel_relaxed(pcc_data->b.c & PCC_CONST_COEFF_MASK, addr + 8);
+
+ addr = base_addr + PCC_R_COEFF_OFF;
+ writel_relaxed(pcc_data->r.r & PCC_COEFF_MASK, addr);
+ writel_relaxed(pcc_data->g.r & PCC_COEFF_MASK, addr + 4);
+ writel_relaxed(pcc_data->b.r & PCC_COEFF_MASK, addr + 8);
+
+ addr = base_addr + PCC_G_COEFF_OFF;
+ writel_relaxed(pcc_data->r.g & PCC_COEFF_MASK, addr);
+ writel_relaxed(pcc_data->g.g & PCC_COEFF_MASK, addr + 4);
+ writel_relaxed(pcc_data->b.g & PCC_COEFF_MASK, addr + 8);
+
+ addr = base_addr + PCC_B_COEFF_OFF;
+ writel_relaxed(pcc_data->r.b & PCC_COEFF_MASK, addr);
+ writel_relaxed(pcc_data->g.b & PCC_COEFF_MASK, addr + 4);
+ writel_relaxed(pcc_data->b.b & PCC_COEFF_MASK, addr + 8);
+
+ addr = base_addr + PCC_RG_COEFF_OFF;
+ writel_relaxed(pcc_data->r.rg & PCC_COEFF_MASK, addr);
+ writel_relaxed(pcc_data->g.rg & PCC_COEFF_MASK, addr + 4);
+ writel_relaxed(pcc_data->b.rg & PCC_COEFF_MASK, addr + 8);
+
+ addr = base_addr + PCC_RB_COEFF_OFF;
+ writel_relaxed(pcc_data->r.rb & PCC_COEFF_MASK, addr);
+ writel_relaxed(pcc_data->g.rb & PCC_COEFF_MASK, addr + 4);
+ writel_relaxed(pcc_data->b.rb & PCC_COEFF_MASK, addr + 8);
+
+ addr = base_addr + PCC_GB_COEFF_OFF;
+ writel_relaxed(pcc_data->r.gb & PCC_COEFF_MASK, addr);
+ writel_relaxed(pcc_data->g.gb & PCC_COEFF_MASK, addr + 4);
+ writel_relaxed(pcc_data->b.gb & PCC_COEFF_MASK, addr + 8);
+
+ addr = base_addr + PCC_RGB_COEFF_OFF;
+ writel_relaxed(pcc_data->r.rgb & PCC_COEFF_MASK, addr);
+ writel_relaxed(pcc_data->g.rgb & PCC_COEFF_MASK, addr + 4);
+ writel_relaxed(pcc_data->b.rgb & PCC_COEFF_MASK, addr + 8);
+
+bail_out:
+ pp_sts_set_split_bits(&pp_sts->pcc_sts, pcc_cfg_data->ops);
+ if (pcc_cfg_data->ops & MDP_PP_OPS_DISABLE) {
+ writel_relaxed(opmode, base_addr + PCC_OP_MODE_OFF);
+ pp_sts->pcc_sts &= ~PP_STS_ENABLE;
+ } else if (pcc_cfg_data->ops & MDP_PP_OPS_ENABLE) {
+ pp_sts->pcc_sts |= PP_STS_ENABLE;
+ if (pp_sts_is_enabled(pp_sts->pcc_sts, pp_sts->side_sts))
+ opmode |= PCC_ENABLE;
+ writel_relaxed(opmode, base_addr + PCC_OP_MODE_OFF);
+ }
+
+ return 0;
+}
+
+static int pp_pcc_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ char __iomem *addr;
+ struct mdp_pcc_cfg_data *pcc_cfg = NULL;
+ struct mdp_pcc_data_v1_7 pcc_data;
+
+ if (!base_addr || !cfg_data) {
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
+ base_addr, cfg_data);
+ return -EINVAL;
+ }
+
+ pcc_cfg = (struct mdp_pcc_cfg_data *) cfg_data;
+ if (pcc_cfg->version != mdp_pcc_v1_7) {
+ pr_err("unsupported version of pcc %d\n",
+ pcc_cfg->version);
+ return -EINVAL;
+ }
+
+ addr = base_addr + PCC_CONST_COEFF_OFF;
+ pcc_data.r.c = readl_relaxed(addr) & PCC_CONST_COEFF_MASK;
+ pcc_data.g.c = readl_relaxed(addr + 4) & PCC_CONST_COEFF_MASK;
+ pcc_data.b.c = readl_relaxed(addr + 8) & PCC_CONST_COEFF_MASK;
+
+ addr = base_addr + PCC_R_COEFF_OFF;
+ pcc_data.r.r = readl_relaxed(addr) & PCC_COEFF_MASK;
+ pcc_data.g.r = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+ pcc_data.b.r = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+ addr = base_addr + PCC_G_COEFF_OFF;
+ pcc_data.r.g = readl_relaxed(addr) & PCC_COEFF_MASK;
+ pcc_data.g.g = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+ pcc_data.b.g = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+ addr = base_addr + PCC_B_COEFF_OFF;
+ pcc_data.r.b = readl_relaxed(addr) & PCC_COEFF_MASK;
+ pcc_data.g.b = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+ pcc_data.b.b = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+ addr = base_addr + PCC_RG_COEFF_OFF;
+ pcc_data.r.rg = readl_relaxed(addr) & PCC_COEFF_MASK;
+ pcc_data.g.rg = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+ pcc_data.b.rg = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+ addr = base_addr + PCC_RB_COEFF_OFF;
+ pcc_data.r.rb = readl_relaxed(addr) & PCC_COEFF_MASK;
+ pcc_data.g.rb = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+ pcc_data.b.rb = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+ addr = base_addr + PCC_GB_COEFF_OFF;
+ pcc_data.r.gb = readl_relaxed(addr) & PCC_COEFF_MASK;
+ pcc_data.g.gb = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+ pcc_data.b.gb = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+ addr = base_addr + PCC_RGB_COEFF_OFF;
+ pcc_data.r.rgb = readl_relaxed(addr) & PCC_COEFF_MASK;
+ pcc_data.g.rgb = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+ pcc_data.b.rgb = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+ if (copy_to_user(pcc_cfg->cfg_payload, &pcc_data,
+ sizeof(pcc_data))) {
+ pr_err("failed to copy the pcc info into payload\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void pp_pa_set_global_adj_regs(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags,
+ int block_type)
+{
+ char __iomem *addr = NULL;
+
+ if (block_type == DSPP)
+ addr = base_addr + PA_DSPP_GLOBAL_OFF;
+ else
+ addr = base_addr + PA_VIG_GLOBAL_OFF;
+
+ if (flags & MDP_PP_PA_HUE_ENABLE)
+ writel_relaxed((pa_data->global_hue_adj &
+ PA_GLOBAL_HUE_MASK), addr);
+ addr += 4;
+ if (flags & MDP_PP_PA_SAT_ENABLE)
+ writel_relaxed((pa_data->global_sat_adj &
+ PA_GLOBAL_SAT_MASK), addr);
+ addr += 4;
+ if (flags & MDP_PP_PA_VAL_ENABLE)
+ writel_relaxed((pa_data->global_val_adj &
+ PA_GLOBAL_VAL_MASK), addr);
+ addr += 4;
+ if (flags & MDP_PP_PA_CONT_ENABLE)
+ writel_relaxed((pa_data->global_cont_adj &
+ PA_GLOBAL_CONT_MASK), addr);
+}
+
+static void pp_pa_set_mem_col_regs(char __iomem *mem_col_p0_addr,
+ char __iomem *mem_col_p2_addr,
+ struct mdp_pa_mem_col_data_v1_7 *mem_col_data)
+{
+ writel_relaxed((mem_col_data->color_adjust_p0 &
+ PA_MEM_COL_ADJ_P0_MASK), mem_col_p0_addr);
+ mem_col_p0_addr += 4;
+ writel_relaxed(mem_col_data->color_adjust_p1, mem_col_p0_addr);
+ mem_col_p0_addr += 4;
+ writel_relaxed((mem_col_data->hue_region &
+ PA_MEM_COL_HUE_REGION_MASK), mem_col_p0_addr);
+ mem_col_p0_addr += 4;
+ writel_relaxed((mem_col_data->sat_region &
+ PA_MEM_COL_SAT_REGION_MASK), mem_col_p0_addr);
+ mem_col_p0_addr += 4;
+ writel_relaxed((mem_col_data->val_region &
+ PA_MEM_COL_VAL_REGION_MASK), mem_col_p0_addr);
+
+ writel_relaxed(mem_col_data->color_adjust_p2, mem_col_p2_addr);
+ mem_col_p2_addr += 4;
+ writel_relaxed(mem_col_data->blend_gain, mem_col_p2_addr);
+}
+
+static void pp_pa_set_mem_col(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags,
+ int block_type, uint32_t *pa_hold,
+ uint32_t *pa_hold_mask)
+{
+ uint32_t sat_hold = 0, val_hold = 0, mem_col_hold = 0;
+ u32 skin_p0_off = 0, skin_p2_off = 0;
+ u32 sky_p0_off = 0, sky_p2_off = 0;
+ u32 fol_p0_off = 0, fol_p2_off = 0;
+ char __iomem *mem_col_p0_addr = NULL;
+ char __iomem *mem_col_p2_addr = NULL;
+
+ if (block_type == DSPP) {
+ skin_p0_off = PA_DSPP_MEM_COL_SKIN_P0_OFF;
+ skin_p2_off = PA_DSPP_MEM_COL_SKIN_P2_OFF;
+ sky_p0_off = PA_DSPP_MEM_COL_SKY_P0_OFF;
+ sky_p2_off = PA_DSPP_MEM_COL_SKY_P2_OFF;
+ fol_p0_off = PA_DSPP_MEM_COL_FOL_P0_OFF;
+ fol_p2_off = PA_DSPP_MEM_COL_FOL_P2_OFF;
+ } else {
+ skin_p0_off = PA_VIG_MEM_COL_SKIN_P0_OFF;
+ skin_p2_off = PA_VIG_MEM_COL_SKIN_P2_OFF;
+ sky_p0_off = PA_VIG_MEM_COL_SKY_P0_OFF;
+ sky_p2_off = PA_VIG_MEM_COL_SKY_P2_OFF;
+ fol_p0_off = PA_VIG_MEM_COL_FOL_P0_OFF;
+ fol_p2_off = PA_VIG_MEM_COL_FOL_P2_OFF;
+ }
+ /* Update skin zone memory color registers */
+ if (flags & MDP_PP_PA_SKIN_ENABLE) {
+ mem_col_p0_addr = base_addr + skin_p0_off;
+ mem_col_p2_addr = base_addr + skin_p2_off;
+ pp_pa_set_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+ &pa_data->skin_cfg);
+ sat_hold = (pa_data->skin_cfg.sat_hold & PA_HOLD_MASK) <<
+ PA_HOLD_SAT_SHIFT;
+ val_hold = (pa_data->skin_cfg.val_hold & PA_HOLD_MASK) <<
+ PA_HOLD_VAL_SHIFT;
+ mem_col_hold = (sat_hold | val_hold) << PA_HOLD_SKIN_SHIFT;
+ *pa_hold |= mem_col_hold;
+ *pa_hold_mask |= PA_HOLD_SKIN_MASK;
+ }
+ /* Update sky zone memory color registers */
+ if (flags & MDP_PP_PA_SKY_ENABLE) {
+ mem_col_p0_addr = base_addr + sky_p0_off;
+ mem_col_p2_addr = base_addr + sky_p2_off;
+ pp_pa_set_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+ &pa_data->sky_cfg);
+ sat_hold = (pa_data->sky_cfg.sat_hold & PA_HOLD_MASK) <<
+ PA_HOLD_SAT_SHIFT;
+ val_hold = (pa_data->sky_cfg.val_hold & PA_HOLD_MASK) <<
+ PA_HOLD_VAL_SHIFT;
+ mem_col_hold = (sat_hold | val_hold) << PA_HOLD_SKY_SHIFT;
+ *pa_hold |= mem_col_hold;
+ *pa_hold_mask |= PA_HOLD_SKY_MASK;
+ }
+ /* Update foliage zone memory color registers */
+ if (flags & MDP_PP_PA_FOL_ENABLE) {
+ mem_col_p0_addr = base_addr + fol_p0_off;
+ mem_col_p2_addr = base_addr + fol_p2_off;
+ pp_pa_set_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+ &pa_data->fol_cfg);
+ sat_hold = (pa_data->fol_cfg.sat_hold & PA_HOLD_MASK) <<
+ PA_HOLD_SAT_SHIFT;
+ val_hold = (pa_data->fol_cfg.val_hold & PA_HOLD_MASK) <<
+ PA_HOLD_VAL_SHIFT;
+ mem_col_hold = (sat_hold | val_hold) << PA_HOLD_FOL_SHIFT;
+ *pa_hold |= mem_col_hold;
+ *pa_hold_mask |= PA_HOLD_FOL_MASK;
+ }
+}
+
+static void pp_pa_set_six_zone(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data,
+ u32 flags,
+ uint32_t *pa_hold,
+ uint32_t *pa_hold_mask)
+{
+ u32 data, i;
+ char __iomem *addr = base_addr + PA_SIX_ZONE_LUT_OFF;
+ uint32_t sat_hold = 0, val_hold = 0, mem_col_hold = 0;
+ /* Update six zone memory color registers */
+ if (!(flags & MDP_PP_PA_SIX_ZONE_ENABLE))
+ return;
+
+ if (!pa_data->six_zone_len || !pa_data->six_zone_curve_p0 ||
+ !pa_data->six_zone_curve_p1) {
+ pr_err("Invalid six zone data: len %d curve_p0 %pK curve_p1 %pK\n",
+ pa_data->six_zone_len,
+ pa_data->six_zone_curve_p0,
+ pa_data->six_zone_curve_p1);
+ return;
+ }
+
+ writel_relaxed((pa_data->six_zone_curve_p1[0] &
+ PA_SIX_ZONE_CURVE_P1_MASK), addr + 4);
+ /* Index Update to trigger auto-incrementing LUT accesses */
+ data = PA_SIX_ZONE_INDEX_UPDATE;
+ writel_relaxed((pa_data->six_zone_curve_p0[0] &
+ PA_SIX_ZONE_CURVE_P0_MASK) | data, addr);
+
+ /* Remove Index Update */
+ for (i = 1; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+ writel_relaxed((pa_data->six_zone_curve_p1[i] &
+ PA_SIX_ZONE_CURVE_P1_MASK), addr + 4);
+ writel_relaxed((pa_data->six_zone_curve_p0[i] &
+ PA_SIX_ZONE_CURVE_P0_MASK), addr);
+ }
+ addr = base_addr + PA_SIX_ZONE_REGION_OFF;
+ writel_relaxed(pa_data->six_zone_thresh, addr);
+
+ addr = base_addr + PA_SIX_ZONE_ADJ_OFF;
+ writel_relaxed((pa_data->six_zone_adj_p0 &
+ PA_SIX_ZONE_ADJ_P0_MASK), addr);
+ addr += 4;
+ writel_relaxed(pa_data->six_zone_adj_p1, addr);
+
+ sat_hold = (pa_data->six_zone_sat_hold & PA_HOLD_MASK) <<
+ PA_HOLD_SAT_SHIFT;
+ val_hold = (pa_data->six_zone_val_hold & PA_HOLD_MASK) <<
+ PA_HOLD_VAL_SHIFT;
+ mem_col_hold = (sat_hold | val_hold) << PA_HOLD_SIX_ZONE_SHIFT;
+ *pa_hold |= mem_col_hold;
+ *pa_hold_mask |= PA_HOLD_SIX_ZONE_MASK;
+}
+
+static int pp_pa_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ struct mdp_pa_v2_cfg_data *pa_cfg_data = NULL;
+ struct mdp_pa_data_v1_7 *pa_data = NULL;
+ uint32_t pa_hold = 0, pa_hold_mask = 0, pa_hold_tmp;
+ char __iomem *pa_hold_addr = NULL;
+ int ret = 0;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+ if ((block_type != DSPP) && (block_type != SSPP_VIG)) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ pa_cfg_data = (struct mdp_pa_v2_cfg_data *) cfg_data;
+ if (pa_cfg_data->version != mdp_pa_v1_7) {
+ pr_err("invalid pa version %d\n", pa_cfg_data->version);
+ return -EINVAL;
+ }
+ if (!(pa_cfg_data->flags & ~(MDP_PP_OPS_READ))) {
+ pr_info("only read ops is set %d", pa_cfg_data->flags);
+ return 0;
+ }
+ if (pa_cfg_data->flags & MDP_PP_OPS_DISABLE) {
+ pr_debug("Disable PA");
+ goto pa_set_sts;
+ }
+
+ pa_data = pa_cfg_data->cfg_payload;
+ if (!pa_data) {
+ pr_err("invalid payload for pa %pK\n", pa_data);
+ return -EINVAL;
+ }
+
+ if (!(pa_cfg_data->flags & MDP_PP_OPS_WRITE)) {
+ pr_warn("No write flag enabled for PA flags %d\n",
+ pa_cfg_data->flags);
+ return 0;
+ }
+
+ pp_pa_set_global_adj_regs(base_addr, pa_data, pa_cfg_data->flags,
+ block_type);
+ pp_pa_set_mem_col(base_addr, pa_data, pa_cfg_data->flags,
+ block_type, &pa_hold, &pa_hold_mask);
+ if (block_type == DSPP)
+ pp_pa_set_six_zone(base_addr, pa_data, pa_cfg_data->flags,
+ &pa_hold, &pa_hold_mask);
+
+ /*
+ * Only modify the PA hold bits for PA features that have
+ * been updated.
+ */
+ if (block_type == DSPP)
+ pa_hold_addr = base_addr + PA_DSPP_HOLD_OFF;
+ else
+ pa_hold_addr = base_addr + PA_VIG_HOLD_OFF;
+ pa_hold_tmp = readl_relaxed(pa_hold_addr);
+ pa_hold_tmp &= ~pa_hold_mask;
+ pa_hold |= pa_hold_tmp;
+ writel_relaxed(pa_hold, pa_hold_addr);
+
+pa_set_sts:
+ pp_pa_set_sts(pp_sts, pa_data, pa_cfg_data->flags, block_type);
+
+ return ret;
+}
+
+static void pp_pa_get_global_adj_regs(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags,
+ int block_type)
+{
+ char __iomem *addr = NULL;
+
+ if (block_type == DSPP)
+ addr = base_addr + PA_DSPP_GLOBAL_OFF;
+ else
+ addr = base_addr + PA_VIG_GLOBAL_OFF;
+
+ if (flags & MDP_PP_PA_HUE_ENABLE)
+ pa_data->global_hue_adj = readl_relaxed(addr) &
+ PA_GLOBAL_HUE_MASK;
+ addr += 4;
+ if (flags & MDP_PP_PA_SAT_ENABLE)
+ pa_data->global_sat_adj = readl_relaxed(addr) &
+ PA_GLOBAL_SAT_MASK;
+ addr += 4;
+ if (flags & MDP_PP_PA_VAL_ENABLE)
+ pa_data->global_val_adj = readl_relaxed(addr) &
+ PA_GLOBAL_VAL_MASK;
+ addr += 4;
+ if (flags & MDP_PP_PA_CONT_ENABLE)
+ pa_data->global_cont_adj = readl_relaxed(addr) &
+ PA_GLOBAL_CONT_MASK;
+}
+
+static void pp_pa_get_mem_col_regs(char __iomem *mem_col_p0_addr,
+ char __iomem *mem_col_p2_addr,
+ struct mdp_pa_mem_col_data_v1_7 *mem_col_data)
+{
+ mem_col_data->color_adjust_p0 = readl_relaxed(mem_col_p0_addr) &
+ PA_MEM_COL_ADJ_P0_MASK;
+ mem_col_p0_addr += 4;
+ mem_col_data->color_adjust_p1 = readl_relaxed(mem_col_p0_addr);
+ mem_col_p0_addr += 4;
+ mem_col_data->hue_region = readl_relaxed(mem_col_p0_addr) &
+ PA_MEM_COL_HUE_REGION_MASK;
+ mem_col_p0_addr += 4;
+ mem_col_data->sat_region = readl_relaxed(mem_col_p0_addr) &
+ PA_MEM_COL_SAT_REGION_MASK;
+ mem_col_p0_addr += 4;
+ mem_col_data->val_region = readl_relaxed(mem_col_p0_addr) &
+ PA_MEM_COL_VAL_REGION_MASK;
+
+ mem_col_data->color_adjust_p2 = readl_relaxed(mem_col_p2_addr);
+ mem_col_p2_addr += 4;
+ mem_col_data->blend_gain = readl_relaxed(mem_col_p2_addr);
+}
+
+static void pp_pa_get_mem_col(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags,
+ int block_type,
+ uint32_t pa_hold)
+{
+ uint32_t mem_col_hold = 0;
+ u32 skin_p0_off = 0, skin_p2_off = 0;
+ u32 sky_p0_off = 0, sky_p2_off = 0;
+ u32 fol_p0_off = 0, fol_p2_off = 0;
+ char __iomem *mem_col_p0_addr = NULL;
+ char __iomem *mem_col_p2_addr = NULL;
+
+ if (block_type == DSPP) {
+ skin_p0_off = PA_DSPP_MEM_COL_SKIN_P0_OFF;
+ skin_p2_off = PA_DSPP_MEM_COL_SKIN_P2_OFF;
+ sky_p0_off = PA_DSPP_MEM_COL_SKY_P0_OFF;
+ sky_p2_off = PA_DSPP_MEM_COL_SKY_P2_OFF;
+ fol_p0_off = PA_DSPP_MEM_COL_FOL_P0_OFF;
+ fol_p2_off = PA_DSPP_MEM_COL_FOL_P2_OFF;
+ } else {
+ skin_p0_off = PA_VIG_MEM_COL_SKIN_P0_OFF;
+ skin_p2_off = PA_VIG_MEM_COL_SKIN_P2_OFF;
+ sky_p0_off = PA_VIG_MEM_COL_SKY_P0_OFF;
+ sky_p2_off = PA_VIG_MEM_COL_SKY_P2_OFF;
+ fol_p0_off = PA_VIG_MEM_COL_FOL_P0_OFF;
+ fol_p2_off = PA_VIG_MEM_COL_FOL_P2_OFF;
+ }
+ /* Update skin zone memory color registers */
+ if (flags & MDP_PP_PA_SKIN_ENABLE) {
+ mem_col_p0_addr = base_addr + skin_p0_off;
+ mem_col_p2_addr = base_addr + skin_p2_off;
+ pp_pa_get_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+ &pa_data->skin_cfg);
+ mem_col_hold = pa_hold >> PA_HOLD_SKIN_SHIFT;
+ pa_data->skin_cfg.sat_hold = (mem_col_hold >>
+ PA_HOLD_SAT_SHIFT) & PA_HOLD_MASK;
+ pa_data->skin_cfg.val_hold = (mem_col_hold >>
+ PA_HOLD_VAL_SHIFT) & PA_HOLD_MASK;
+ }
+ /* Update sky zone memory color registers */
+ if (flags & MDP_PP_PA_SKY_ENABLE) {
+ mem_col_p0_addr = base_addr + sky_p0_off;
+ mem_col_p2_addr = base_addr + sky_p2_off;
+ pp_pa_get_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+ &pa_data->sky_cfg);
+ mem_col_hold = pa_hold >> PA_HOLD_SKY_SHIFT;
+ pa_data->sky_cfg.sat_hold = (mem_col_hold >>
+ PA_HOLD_SAT_SHIFT) & PA_HOLD_MASK;
+ pa_data->sky_cfg.val_hold = (mem_col_hold >>
+ PA_HOLD_VAL_SHIFT) & PA_HOLD_MASK;
+ }
+ /* Update foliage zone memory color registers */
+ if (flags & MDP_PP_PA_FOL_ENABLE) {
+ mem_col_p0_addr = base_addr + fol_p0_off;
+ mem_col_p2_addr = base_addr + fol_p2_off;
+ pp_pa_get_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+ &pa_data->fol_cfg);
+ mem_col_hold = pa_hold >> PA_HOLD_FOL_SHIFT;
+ pa_data->fol_cfg.sat_hold = (mem_col_hold >>
+ PA_HOLD_SAT_SHIFT) & PA_HOLD_MASK;
+ pa_data->fol_cfg.val_hold = (mem_col_hold >>
+ PA_HOLD_VAL_SHIFT) & PA_HOLD_MASK;
+ }
+}
+
+static int pp_pa_get_six_zone(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags,
+ u32 pa_hold)
+{
+ uint32_t six_zone_sz = 0, six_zone_buf_sz = 0;
+ u32 data = 0;
+ char __iomem *addr = base_addr + PA_SIX_ZONE_LUT_OFF;
+ uint32_t *six_zone_read_buf = NULL;
+ uint32_t *six_zone_p0 = NULL, *six_zone_p1 = NULL;
+ uint32_t six_zone_hold = 0;
+ int ret = 0, i;
+
+ if (pa_data->six_zone_len != MDP_SIX_ZONE_LUT_SIZE) {
+ pr_err("Invalid six zone length %d\n",
+ pa_data->six_zone_len);
+ return -EINVAL;
+ }
+ six_zone_sz = pa_data->six_zone_len * sizeof(uint32_t);
+
+ if (!access_ok(VERIFY_WRITE, pa_data->six_zone_curve_p0,
+ six_zone_sz)) {
+ pr_err("invalid six_zone_curve_p0 addr for sz %d\n",
+ six_zone_sz);
+ return -EFAULT;
+ }
+ if (!access_ok(VERIFY_WRITE, pa_data->six_zone_curve_p1,
+ six_zone_sz)) {
+ pr_err("invalid six_zone_curve_p1 addr for sz %d\n",
+ six_zone_sz);
+ return -EFAULT;
+ }
+
+ six_zone_buf_sz = 2 * six_zone_sz;
+ six_zone_read_buf = kzalloc(six_zone_buf_sz, GFP_KERNEL);
+ if (!six_zone_read_buf) {
+ pr_err("allocation failed for six zone lut size %d\n",
+ six_zone_buf_sz);
+ ret = -ENOMEM;
+ goto six_zone_exit;
+ }
+ six_zone_p0 = six_zone_read_buf;
+ six_zone_p1 = &six_zone_read_buf[MDP_SIX_ZONE_LUT_SIZE];
+
+ data = PA_SIX_ZONE_VALUE_UPDATE | PA_SIX_ZONE_INDEX_UPDATE;
+ writel_relaxed(data, addr);
+
+ for (i = 0; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+ six_zone_p1[i] = readl_relaxed(addr + 4) &
+ PA_SIX_ZONE_CURVE_P1_MASK;
+ six_zone_p0[i] = readl_relaxed(addr) &
+ PA_SIX_ZONE_CURVE_P0_MASK;
+ }
+
+ addr = base_addr + PA_SIX_ZONE_REGION_OFF;
+ pa_data->six_zone_thresh = readl_relaxed(addr);
+
+ addr = base_addr + PA_SIX_ZONE_ADJ_OFF;
+ pa_data->six_zone_adj_p0 = readl_relaxed(addr) &
+ PA_SIX_ZONE_ADJ_P0_MASK;
+ addr += 4;
+ pa_data->six_zone_adj_p1 = readl_relaxed(addr);
+
+ if (copy_to_user(pa_data->six_zone_curve_p0, six_zone_p0,
+ six_zone_sz)) {
+ pr_err("Failed to copy six zone p0 data\n");
+ ret = -EFAULT;
+ goto six_zone_memory_exit;
+ }
+ if (copy_to_user(pa_data->six_zone_curve_p1, six_zone_p1,
+ six_zone_sz)) {
+ pr_err("Failed to copy six zone p1 data\n");
+ ret = -EFAULT;
+ goto six_zone_memory_exit;
+ }
+
+ six_zone_hold = pa_hold >> PA_HOLD_SIX_ZONE_SHIFT;
+ pa_data->six_zone_sat_hold = (six_zone_hold >> PA_HOLD_SAT_SHIFT) &
+ PA_HOLD_MASK;
+ pa_data->six_zone_val_hold = (six_zone_hold >> PA_HOLD_VAL_SHIFT) &
+ PA_HOLD_MASK;
+
+six_zone_memory_exit:
+ kfree(six_zone_read_buf);
+six_zone_exit:
+ return ret;
+}
+
+static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ struct mdp_pa_v2_cfg_data *pa_cfg_data = NULL;
+ struct mdp_pa_data_v1_7 pa_data;
+ int ret = 0;
+ uint32_t pa_hold = 0;
+ char __iomem *pa_hold_addr = NULL;
+
+ if (!base_addr || !cfg_data) {
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
+ base_addr, cfg_data);
+ return -EINVAL;
+ }
+ if ((block_type != DSPP) && (block_type != SSPP_VIG)) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ pa_cfg_data = (struct mdp_pa_v2_cfg_data *) cfg_data;
+ if (pa_cfg_data->version != mdp_pa_v1_7) {
+ pr_err("invalid pa version %d\n", pa_cfg_data->version);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&pa_data, pa_cfg_data->cfg_payload,
+ sizeof(pa_data))) {
+ pr_err("copy from user failed for pa data\n");
+ return -EFAULT;
+ }
+
+ if (block_type == DSPP)
+ pa_hold_addr = base_addr + PA_DSPP_HOLD_OFF;
+ else
+ pa_hold_addr = base_addr + PA_VIG_HOLD_OFF;
+ pa_hold = readl_relaxed(pa_hold_addr);
+
+ if ((block_type == DSPP) &&
+ (pa_cfg_data->flags & MDP_PP_PA_SIX_ZONE_ENABLE)) {
+ ret = pp_pa_get_six_zone(base_addr,
+ &pa_data,
+ pa_cfg_data->flags,
+ pa_hold);
+ if (ret) {
+ pr_err("six zone read failed ret %d\n", ret);
+ return ret;
+ }
+ }
+ pp_pa_get_global_adj_regs(base_addr, &pa_data, pa_cfg_data->flags,
+ block_type);
+ pp_pa_get_mem_col(base_addr, &pa_data, pa_cfg_data->flags,
+ block_type, pa_hold);
+
+ ret = copy_to_user(pa_cfg_data->cfg_payload, &pa_data, sizeof(pa_data));
+ if (ret) {
+ pr_err("Failed to copy PA data to user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void pp_pa_update_vig_opmode(int pa_sts, u32 *opmode)
+{
+ *opmode |= PA_VIG_OP_ENABLE;
+ if (pa_sts & PP_STS_PA_HUE_MASK)
+ *opmode |= PA_VIG_OP_HUE_MASK;
+ if (pa_sts & PP_STS_PA_SAT_MASK)
+ *opmode |= PA_VIG_OP_SAT_MASK;
+ if (pa_sts & PP_STS_PA_VAL_MASK)
+ *opmode |= PA_VIG_OP_VAL_MASK;
+ if (pa_sts & PP_STS_PA_CONT_MASK)
+ *opmode |= PA_VIG_OP_CONT_MASK;
+ if (pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+ *opmode |= PA_VIG_OP_SAT_ZERO_EXP_EN;
+ if (pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+ *opmode |= PA_VIG_OP_MEM_COL_SKIN_MASK;
+ if (pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+ *opmode |= PA_VIG_OP_MEM_COL_FOL_MASK;
+ if (pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+ *opmode |= PA_VIG_OP_MEM_COL_SKY_MASK;
+ if (pa_sts & PP_STS_PA_MEM_PROT_HUE_EN)
+ *opmode |= PA_VIG_OP_MEM_PROT_HUE_EN;
+ if (pa_sts & PP_STS_PA_MEM_PROT_SAT_EN)
+ *opmode |= PA_VIG_OP_MEM_PROT_SAT_EN;
+ if (pa_sts & PP_STS_PA_MEM_PROT_VAL_EN)
+ *opmode |= PA_VIG_OP_MEM_PROT_VAL_EN;
+ if (pa_sts & PP_STS_PA_MEM_PROT_CONT_EN)
+ *opmode |= PA_VIG_OP_MEM_PROT_CONT_EN;
+ if (pa_sts & PP_STS_PA_MEM_PROT_BLEND_EN)
+ *opmode |= PA_VIG_OP_MEM_PROT_BLEND_EN;
+}
+
+static void pp_pa_update_dspp_opmode(int pa_sts, u32 *opmode)
+{
+ *opmode |= PA_DSPP_OP_ENABLE;
+ if (pa_sts & PP_STS_PA_HUE_MASK)
+ *opmode |= PA_DSPP_OP_HUE_MASK;
+ if (pa_sts & PP_STS_PA_SAT_MASK)
+ *opmode |= PA_DSPP_OP_SAT_MASK;
+ if (pa_sts & PP_STS_PA_VAL_MASK)
+ *opmode |= PA_DSPP_OP_VAL_MASK;
+ if (pa_sts & PP_STS_PA_CONT_MASK)
+ *opmode |= PA_DSPP_OP_CONT_MASK;
+ if (pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+ *opmode |= PA_DSPP_OP_SAT_ZERO_EXP_EN;
+ if (pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+ *opmode |= PA_DSPP_OP_MEM_COL_SKIN_MASK;
+ if (pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+ *opmode |= PA_DSPP_OP_MEM_COL_FOL_MASK;
+ if (pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+ *opmode |= PA_DSPP_OP_MEM_COL_SKY_MASK;
+ if (pa_sts & PP_STS_PA_SIX_ZONE_HUE_MASK)
+ *opmode |= PA_DSPP_OP_SIX_ZONE_HUE_MASK;
+ if (pa_sts & PP_STS_PA_SIX_ZONE_SAT_MASK)
+ *opmode |= PA_DSPP_OP_SIX_ZONE_SAT_MASK;
+ if (pa_sts & PP_STS_PA_SIX_ZONE_VAL_MASK)
+ *opmode |= PA_DSPP_OP_SIX_ZONE_VAL_MASK;
+ if (pa_sts & PP_STS_PA_MEM_PROT_HUE_EN)
+ *opmode |= PA_DSPP_OP_MEM_PROT_HUE_EN;
+ if (pa_sts & PP_STS_PA_MEM_PROT_SAT_EN)
+ *opmode |= PA_DSPP_OP_MEM_PROT_SAT_EN;
+ if (pa_sts & PP_STS_PA_MEM_PROT_VAL_EN)
+ *opmode |= PA_DSPP_OP_MEM_PROT_VAL_EN;
+ if (pa_sts & PP_STS_PA_MEM_PROT_CONT_EN)
+ *opmode |= PA_DSPP_OP_MEM_PROT_CONT_EN;
+ if (pa_sts & PP_STS_PA_MEM_PROT_BLEND_EN)
+ *opmode |= PA_DSPP_OP_MEM_PROT_BLEND_EN;
+ if (pa_sts & PP_STS_PA_MEM_PROT_SIX_EN)
+ *opmode |= PA_DSPP_OP_MEM_PROT_SIX_EN;
+}
+
+static int pp_igc_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ int ret = 0, i = 0;
+ struct mdp_igc_lut_data *lut_cfg_data = NULL;
+ struct mdp_igc_lut_data_v1_7 *lut_data = NULL;
+ char __iomem *c0 = NULL, *c1 = NULL, *c2 = NULL;
+ u32 data;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+
+ lut_cfg_data = (struct mdp_igc_lut_data *) cfg_data;
+ if (lut_cfg_data->version != mdp_igc_v1_7 ||
+ !lut_cfg_data->cfg_payload) {
+ pr_err_once("invalid igc version %d payload %pK\n",
+ lut_cfg_data->version, lut_cfg_data->cfg_payload);
+ return -EINVAL;
+ }
+ if (!(lut_cfg_data->ops & ~(MDP_PP_OPS_READ))) {
+ pr_err("only read ops set for lut\n");
+ return ret;
+ }
+ if (lut_cfg_data->block > IGC_MASK_MAX) {
+ pr_err("invalid mask value for IGC %d", lut_cfg_data->block);
+ return -EINVAL;
+ }
+ if (!(lut_cfg_data->ops & MDP_PP_OPS_WRITE)) {
+ pr_debug("non write ops set %d\n", lut_cfg_data->ops);
+ goto bail_out;
+ }
+ lut_data = lut_cfg_data->cfg_payload;
+ if (lut_data->len != IGC_LUT_ENTRIES || !lut_data->c0_c1_data ||
+ !lut_data->c2_data) {
+ pr_err("invalid lut len %d c0_c1_data %pK c2_data %pK\n",
+ lut_data->len, lut_data->c0_c1_data, lut_data->c2_data);
+ return -EINVAL;
+ }
+ switch (block_type) {
+ case SSPP_RGB:
+ c0 = base_addr + IGC_RGB_C0_LUT;
+ break;
+ case SSPP_DMA:
+ c0 = base_addr + IGC_DMA_C0_LUT;
+ break;
+ case SSPP_VIG:
+ case DSPP:
+ c0 = base_addr + IGC_C0_LUT;
+ break;
+ default:
+ pr_err("Invalid block type %d\n", block_type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret) {
+ pr_err("igc table not updated ret %d\n", ret);
+ return ret;
+ }
+ c1 = c0 + 4;
+ c2 = c1 + 4;
+ data = IGC_INDEX_UPDATE | IGC_CONFIG_MASK(lut_cfg_data->block);
+ pr_debug("data %x block type %d mask %x\n",
+ data, lut_cfg_data->block,
+ IGC_CONFIG_MASK(lut_cfg_data->block));
+ writel_relaxed((lut_data->c0_c1_data[0] & IGC_DATA_MASK) | data, c0);
+ writel_relaxed(((lut_data->c0_c1_data[0] >> 16)
+ & IGC_DATA_MASK) | data, c1);
+ writel_relaxed((lut_data->c2_data[0] & IGC_DATA_MASK) | data, c2);
+ data &= ~IGC_INDEX_UPDATE;
+ /* update the index for c0, c1 , c2 */
+ for (i = 1; i < IGC_LUT_ENTRIES; i++) {
+ writel_relaxed((lut_data->c0_c1_data[i] & IGC_DATA_MASK)
+ | data, c0);
+ writel_relaxed(((lut_data->c0_c1_data[i] >> 16)
+ & IGC_DATA_MASK) | data, c1);
+ writel_relaxed((lut_data->c2_data[i] & IGC_DATA_MASK)
+ | data, c2);
+ }
+bail_out:
+ if (!ret) {
+ if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE)
+ pp_sts->igc_sts &= ~PP_STS_ENABLE;
+ else if (lut_cfg_data->ops & MDP_PP_OPS_ENABLE)
+ pp_sts->igc_sts |= PP_STS_ENABLE;
+ pp_sts_set_split_bits(&pp_sts->igc_sts,
+ lut_cfg_data->ops);
+ }
+ return ret;
+}
+
+static int pp_igc_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ int ret = 0, i = 0;
+ struct mdp_igc_lut_data *lut_cfg_data = NULL;
+ struct mdp_igc_lut_data_v1_7 lut_data_v1_7;
+ struct mdp_igc_lut_data_v1_7 *lut_data = &lut_data_v1_7;
+ char __iomem *c1 = NULL, *c2 = NULL;
+ u32 *c0c1_data = NULL, *c2_data = NULL;
+ u32 data = 0, sz = 0;
+
+ if (!base_addr || !cfg_data || block_type != DSPP) {
+ pr_err("invalid params base_addr %pK cfg_data %pK block_type %d\n",
+ base_addr, cfg_data, block_type);
+ return -EINVAL;
+ }
+ lut_cfg_data = (struct mdp_igc_lut_data *) cfg_data;
+ if (!(lut_cfg_data->ops & MDP_PP_OPS_READ)) {
+ pr_err("read ops not set for lut ops %d\n", lut_cfg_data->ops);
+ return ret;
+ }
+ if (lut_cfg_data->version != mdp_igc_v1_7 ||
+ !lut_cfg_data->cfg_payload ||
+ lut_cfg_data->block > IGC_MASK_MAX) {
+ pr_err("invalid igc version %d payload %pK block %d\n",
+ lut_cfg_data->version, lut_cfg_data->cfg_payload,
+ lut_cfg_data->block);
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (copy_from_user(lut_data, (void __user *) lut_cfg_data->cfg_payload,
+ sizeof(*lut_data))) {
+ pr_err("copy from user failed for lut_data\n");
+ return -EFAULT;
+ }
+ if (lut_data->len != IGC_LUT_ENTRIES) {
+ pr_err("invalid lut len %d\n", lut_data->len);
+ ret = -EINVAL;
+ goto exit;
+ }
+ sz = IGC_LUT_ENTRIES * sizeof(u32);
+ if (!access_ok(VERIFY_WRITE, lut_data->c0_c1_data, sz) ||
+ (!access_ok(VERIFY_WRITE, lut_data->c2_data, sz))) {
+ pr_err("invalid lut address for sz %d\n", sz);
+ ret = -EFAULT;
+ goto exit;
+ }
+ /* Allocate for c0c1 and c2 tables */
+ c0c1_data = kzalloc(sz * 2, GFP_KERNEL);
+ if (!c0c1_data) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ c2_data = &c0c1_data[IGC_LUT_ENTRIES];
+ data = IGC_INDEX_VALUE_UPDATE | IGC_CONFIG_MASK(lut_cfg_data->block);
+ pr_debug("data %x block type %d mask %x\n",
+ data, lut_cfg_data->block,
+ IGC_CONFIG_MASK(lut_cfg_data->block));
+ c1 = base_addr + 4;
+ c2 = c1 + 4;
+ writel_relaxed(data, base_addr);
+ writel_relaxed(data, c1);
+ writel_relaxed(data, c2);
+ for (i = 0; i < IGC_LUT_ENTRIES; i++) {
+ c0c1_data[i] = readl_relaxed(base_addr) & IGC_DATA_MASK;
+ c0c1_data[i] |= (readl_relaxed(c1) & IGC_DATA_MASK) << 16;
+ c2_data[i] = readl_relaxed(c2) & IGC_DATA_MASK;
+ }
+ if (copy_to_user(lut_data->c0_c1_data, c0c1_data, sz)) {
+ pr_err("failed to copy the c0c1 data");
+ ret = -EFAULT;
+ }
+ if (!ret && copy_to_user(lut_data->c2_data, c2_data, sz)) {
+ pr_err("failed to copy the c2 data");
+ ret = -EFAULT;
+ }
+ kfree(c0c1_data);
+exit:
+ return ret;
+}
+
+
+static int pp_pgc_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ char __iomem *c0 = NULL, *c1 = NULL, *c2 = NULL;
+ u32 val = 0, i = 0, *sts = NULL;
+ struct mdp_pgc_lut_data *pgc_data = NULL;
+ struct mdp_pgc_lut_data_v1_7 *pgc_data_v17 = NULL;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+ if (block_type != DSPP && block_type != LM) {
+ pr_err("invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+ sts = (block_type == DSPP) ? &pp_sts->pgc_sts : &pp_sts->argc_sts;
+ pgc_data = (struct mdp_pgc_lut_data *) cfg_data;
+ if (pgc_data->version != mdp_pgc_v1_7) {
+ pr_err("invalid pgc version %d\n",
+ pgc_data->version);
+ return -EINVAL;
+ }
+ if (!(pgc_data->flags & ~(MDP_PP_OPS_READ))) {
+ pr_debug("only read ops is set %d", pgc_data->flags);
+ return 0;
+ }
+ if (pgc_data->flags & MDP_PP_OPS_DISABLE) {
+ pr_debug("disable GC\n");
+ goto set_ops;
+ }
+
+ pgc_data_v17 = (struct mdp_pgc_lut_data_v1_7 *) pgc_data->cfg_payload;
+ if (!pgc_data_v17) {
+ pr_err("invalid payload for GC %pK\n", pgc_data_v17);
+ return -EINVAL;
+ }
+
+ if (pgc_data_v17->len != PGC_LUT_ENTRIES || !pgc_data_v17->c0_data ||
+ !pgc_data_v17->c1_data || !pgc_data_v17->c2_data) {
+ pr_err("Invalid params entries %d c0_data %pK c1_data %pK c2_data %pK\n",
+ pgc_data_v17->len, pgc_data_v17->c0_data,
+ pgc_data_v17->c1_data, pgc_data_v17->c2_data);
+ return -EINVAL;
+ }
+ c0 = base_addr + PGC_C0_LUT_INDEX;
+ c1 = c0 + PGC_C1C2_LUT_OFF;
+ c2 = c1 + PGC_C1C2_LUT_OFF;
+ /* set the indexes to zero */
+ writel_relaxed(0, c0 + PGC_INDEX_OFF);
+ writel_relaxed(0, c1 + PGC_INDEX_OFF);
+ writel_relaxed(0, c2 + PGC_INDEX_OFF);
+ for (i = 0; i < PGC_LUT_ENTRIES; i += 2) {
+ val = pgc_data_v17->c0_data[i] & PGC_DATA_MASK;
+ val |= (pgc_data_v17->c0_data[i + 1] & PGC_DATA_MASK) <<
+ PGC_ODD_SHIFT;
+ writel_relaxed(val, c0);
+ val = pgc_data_v17->c1_data[i] & PGC_DATA_MASK;
+ val |= (pgc_data_v17->c1_data[i + 1] & PGC_DATA_MASK) <<
+ PGC_ODD_SHIFT;
+ writel_relaxed(val, c1);
+ val = pgc_data_v17->c2_data[i] & PGC_DATA_MASK;
+ val |= (pgc_data_v17->c2_data[i + 1] & PGC_DATA_MASK) <<
+ PGC_ODD_SHIFT;
+ writel_relaxed(val, c2);
+ }
+ if (block_type == DSPP) {
+ val = PGC_SWAP;
+ writel_relaxed(val, base_addr + PGC_LUT_SWAP);
+ }
+
+set_ops:
+ if (pgc_data->flags & MDP_PP_OPS_DISABLE) {
+ *sts &= ~PP_STS_ENABLE;
+ writel_relaxed(0, base_addr + PGC_OPMODE_OFF);
+ } else if (pgc_data->flags & MDP_PP_OPS_ENABLE) {
+ val = PGC_ENABLE;
+ val |= (pgc_data->flags & MDP_PP_PGC_ROUNDING_ENABLE)
+ ? BIT(1) : 0;
+ writel_relaxed(val, base_addr + PGC_OPMODE_OFF);
+ *sts |= PP_STS_ENABLE;
+ }
+ return 0;
+}
+
+static int pp_pgc_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ int ret = 0;
+ char __iomem *c0 = NULL, *c1 = NULL, *c2 = NULL;
+ u32 *c0_data = NULL, *c1_data = NULL, *c2_data = NULL;
+ u32 val = 0, i = 0, sz = 0;
+ struct mdp_pgc_lut_data *pgc_data = NULL;
+ struct mdp_pgc_lut_data_v1_7 pgc_lut_data_v17;
+ struct mdp_pgc_lut_data_v1_7 *pgc_data_v17 = &pgc_lut_data_v17;
+
+ if (!base_addr || !cfg_data) {
+ pr_err("invalid params base_addr %pK cfg_data %pK block_type %d\n",
+ base_addr, cfg_data, block_type);
+ return -EINVAL;
+ }
+ pgc_data = (struct mdp_pgc_lut_data *) cfg_data;
+ if (pgc_data->version != mdp_pgc_v1_7 || !pgc_data->cfg_payload) {
+ pr_err("invalid pgc version %d payload %pK\n",
+ pgc_data->version, pgc_data->cfg_payload);
+ return -EINVAL;
+ }
+ if (copy_from_user(pgc_data_v17, (void __user *) pgc_data->cfg_payload,
+ sizeof(*pgc_data_v17))) {
+ pr_err("copy from user failed for pgc lut data\n");
+ return -EFAULT;
+ }
+ if (!(pgc_data->flags & MDP_PP_OPS_READ)) {
+ pr_info("read ops is not set %d", pgc_data->flags);
+ return -EINVAL;
+ }
+ sz = PGC_LUT_ENTRIES * sizeof(u32);
+ if (!access_ok(VERIFY_WRITE, pgc_data_v17->c0_data, sz) ||
+ !access_ok(VERIFY_WRITE, pgc_data_v17->c1_data, sz) ||
+ !access_ok(VERIFY_WRITE, pgc_data_v17->c2_data, sz)) {
+ pr_err("incorrect payload for PGC read size %d\n",
+ PGC_LUT_ENTRIES);
+ return -EFAULT;
+ }
+ c0_data = kzalloc(sz * 3, GFP_KERNEL);
+ if (!c0_data)
+ return -ENOMEM;
+
+ c1_data = c0_data + PGC_LUT_ENTRIES;
+ c2_data = c1_data + PGC_LUT_ENTRIES;
+ c0 = base_addr + PGC_C0_LUT_INDEX;
+ c1 = c0 + PGC_C1C2_LUT_OFF;
+ c2 = c1 + PGC_C1C2_LUT_OFF;
+ /* set the indexes to zero */
+ writel_relaxed(0, c0 + 4);
+ writel_relaxed(0, c1 + 4);
+ writel_relaxed(0, c2 + 4);
+ for (i = 0; i < PGC_LUT_ENTRIES; i += 2) {
+ val = readl_relaxed(c0);
+ c0_data[i] = val & PGC_DATA_MASK;
+ c0_data[i + 1] = (val >> PGC_ODD_SHIFT) & PGC_DATA_MASK;
+ val = readl_relaxed(c1);
+ c1_data[i] = val & PGC_DATA_MASK;
+ c1_data[i + 1] = (val >> PGC_ODD_SHIFT) & PGC_DATA_MASK;
+ val = readl_relaxed(c2);
+ c2_data[i] = val & PGC_DATA_MASK;
+ c2_data[i + 1] = (val >> PGC_ODD_SHIFT) & PGC_DATA_MASK;
+ }
+ if (copy_to_user(pgc_data_v17->c0_data, c0_data, sz)) {
+ pr_err("failed to copyuser c0 data of sz %d\n", sz);
+ ret = -EFAULT;
+ }
+ if (!ret && copy_to_user(pgc_data_v17->c1_data, c1_data, sz)) {
+ pr_err("failed to copyuser c1 data of sz %d\n", sz);
+ ret = -EFAULT;
+ }
+ if (!ret && copy_to_user(pgc_data_v17->c2_data, c2_data, sz)) {
+ pr_err("failed to copyuser c2 data of sz %d\n", sz);
+ ret = -EFAULT;
+ }
+ if (!ret)
+ pgc_data_v17->len = PGC_LUT_ENTRIES;
+ kfree(c0_data);
+ return ret;
+}
+
+static int pp_pcc_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version %pK\n", version);
+ return -EINVAL;
+ }
+ *version = mdp_pcc_v1_7;
+ return 0;
+}
+
+static int pp_igc_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version %pK\n", version);
+ return -EINVAL;
+ }
+ *version = mdp_igc_v1_7;
+ return 0;
+}
+
+static int pp_pgc_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version %pK\n", version);
+ return -EINVAL;
+ }
+ *version = mdp_pgc_v1_7;
+ return 0;
+}
+
+static int pp_pa_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version %pK\n", version);
+ return -EINVAL;
+ }
+ *version = mdp_pa_v1_7;
+ return 0;
+}
+
+static int pp_gamut_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version %pK\n", version);
+ return -EINVAL;
+ }
+ *version = mdp_gamut_v1_7;
+ return 0;
+}
+
+static int pp_dither_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version %pK\n", version);
+ return -EINVAL;
+ }
+ *version = mdp_dither_v1_7;
+ return 0;
+}
+
+static int pp_hist_lut_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version %pK\n", version);
+ return -EINVAL;
+ }
+ *version = mdp_hist_lut_v1_7;
+ return 0;
+}
+
+static void pp_gamut_clock_gating_en(char __iomem *base_addr)
+{
+ u32 val;
+
+ if (base_addr) {
+ val = readl_relaxed(base_addr + GAMUT_CLK_CTRL);
+ if (val == GAMUT_CLK_GATING_PARTIAL_ACTIVE)
+ writel_relaxed(GAMUT_CLK_GATING_ACTIVE,
+ base_addr + GAMUT_CLK_CTRL);
+ }
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
new file mode 100644
index 0000000..7611ea4
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/uaccess.h>
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+#include "mdss_mdp_pp_common.h"
+
+#define IGC_DSPP_OP_MODE_EN BIT(0)
+#define ENHIST_BIT_SHIFT 16
+/* PA related define */
+
+/* Offsets from DSPP/VIG base to PA block */
+#define PA_DSPP_BLOCK_REG_OFF 0x800
+#define PA_VIG_BLOCK_REG_OFF 0x1200
+
+/* Offsets to various subblocks from PA block
+ * in VIG/DSPP.
+ */
+#define PA_OP_MODE_REG_OFF 0x0
+#define PA_HIST_REG_OFF 0x4
+#define PA_LUTV_SWAP_REG_OFF 0x18
+#define PA_HSIC_REG_OFF 0x1C
+#define PA_DITHER_CTL_REG_OFF 0x2C
+#define PA_PWL_HOLD_REG_OFF 0x40
+
+/* Memory Color offsets */
+#define PA_MEM_COL_REG_OFF 0x80
+#define PA_MEM_SKIN_REG_OFF (PA_MEM_COL_REG_OFF)
+#define PA_MEM_SKY_REG_OFF (PA_MEM_SKIN_REG_OFF + \
+ JUMP_REGISTERS_OFF(5))
+#define PA_MEM_FOL_REG_OFF (PA_MEM_SKY_REG_OFF + \
+ JUMP_REGISTERS_OFF(5))
+#define PA_MEM_SKIN_ADJUST_P2_REG_OFF (PA_MEM_FOL_REG_OFF + \
+ JUMP_REGISTERS_OFF(5))
+#define PA_MEM_SKY_ADJUST_P2_REG_OFF (PA_MEM_SKIN_ADJUST_P2_REG_OFF + \
+ JUMP_REGISTERS_OFF(2))
+#define PA_MEM_FOL_ADJUST_P2_REG_OFF (PA_MEM_SKY_ADJUST_P2_REG_OFF + \
+ JUMP_REGISTERS_OFF(2))
+
+#define PA_SZONE_REG_OFF 0x100
+#define PA_LUTV_REG_OFF 0x200
+#define PA_HIST_RAM_REG_OFF 0x400
+
+#define PPB_GLOBAL_DITHER_REG_OFF 0x30E0
+#define DITHER_MATRIX_LEN 16
+#define DITHER_DEPTH_MAP_INDEX 9
+static u32 dither_matrix[DITHER_MATRIX_LEN] = {
+ 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
+static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
+ 0, 0, 0, 0, 0, 1, 2, 3, 3};
+
+/* histogram prototypes */
+static int pp_get_hist_offset(u32 block, u32 *ctl_off);
+static int pp_hist_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+
+/* PA LUT prototypes */
+static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_hist_lut_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_hist_lut_get_version(u32 *version);
+static void pp_hist_lut_opmode_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts);
+
+static int pp_pa_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_pa_get_version(u32 *version);
+
+static int pp_dither_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_dither_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_dither_get_version(u32 *version);
+
+static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
+ u32 *opmode, int side);
+
+static void pp_pa_set_global_adj_regs(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flag);
+
+static void pp_pa_set_mem_col(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags);
+
+static void pp_pa_set_six_zone(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data,
+ u32 flags);
+
+static void pp_pa_opmode_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts);
+
+void *pp_get_driver_ops_v3(struct mdp_pp_driver_ops *ops)
+{
+ void *pp_cfg = NULL;
+
+ if (!ops) {
+ pr_err("PP driver ops invalid %pK\n", ops);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pp_cfg = pp_get_driver_ops_v1_7(ops);
+ if (IS_ERR_OR_NULL(pp_cfg))
+ return NULL;
+ /* PA ops */
+ ops->pp_ops[PA].pp_set_config = pp_pa_set_config;
+ ops->pp_ops[PA].pp_get_config = pp_pa_get_config;
+ ops->pp_ops[PA].pp_get_version = pp_pa_get_version;
+
+ /* HIST_LUT ops */
+ ops->pp_ops[HIST_LUT].pp_set_config = pp_hist_lut_set_config;
+ ops->pp_ops[HIST_LUT].pp_get_config = pp_hist_lut_get_config;
+ ops->pp_ops[HIST_LUT].pp_get_version = pp_hist_lut_get_version;
+
+ /* HIST ops */
+ ops->pp_ops[HIST].pp_set_config = pp_hist_set_config;
+ ops->pp_ops[HIST].pp_get_config = pp_hist_get_config;
+ ops->pp_ops[HIST].pp_get_version = NULL;
+
+ /* Dither ops */
+ ops->pp_ops[DITHER].pp_set_config = pp_dither_set_config;
+ ops->pp_ops[DITHER].pp_get_config = pp_dither_get_config;
+ ops->pp_ops[DITHER].pp_get_version = pp_dither_get_version;
+
+ /* Set opmode pointers */
+ ops->pp_opmode_config = pp_opmode_config;
+
+ ops->get_hist_offset = pp_get_hist_offset;
+ ops->gamut_clk_gate_en = NULL;
+
+ return pp_cfg;
+}
+
+static int pp_get_hist_offset(u32 block, u32 *ctl_off)
+{
+ int ret = 0;
+
+ if (!ctl_off) {
+ pr_err("invalid params ctl_off %pK\n", ctl_off);
+ return -EINVAL;
+ }
+
+ switch (block) {
+ case SSPP_VIG:
+ *ctl_off = PA_VIG_BLOCK_REG_OFF + PA_HIST_REG_OFF;
+ break;
+ case DSPP:
+ *ctl_off = PA_DSPP_BLOCK_REG_OFF + PA_HIST_REG_OFF;
+ break;
+ default:
+ pr_err("Invalid block type %d\n", block);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int pp_hist_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data, u32 block_type)
+{
+ u32 opmode = 0;
+ struct pp_hist_col_info *hist_info = NULL;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+
+ if (block_type != DSPP) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ hist_info = (struct pp_hist_col_info *)cfg_data;
+ opmode = readl_relaxed(base_addr + PA_DSPP_BLOCK_REG_OFF +
+ PA_OP_MODE_REG_OFF);
+ /* set the hist_en bit */
+ if (hist_info->col_en) {
+ pp_sts->hist_sts |= PP_STS_ENABLE;
+ opmode |= BIT(16);
+ } else {
+ pp_sts->hist_sts &= ~PP_STS_ENABLE;
+ opmode &= ~BIT(16);
+ }
+
+ writel_relaxed(opmode, base_addr + PA_DSPP_BLOCK_REG_OFF +
+ PA_OP_MODE_REG_OFF);
+ return 0;
+}
+
+static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ int i = 0;
+ u32 sum = 0;
+ struct pp_hist_col_info *hist_info = NULL;
+ char __iomem *hist_addr;
+
+ if (!base_addr || !cfg_data) {
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
+ base_addr, cfg_data);
+ return -EINVAL;
+ }
+
+ if (block_type != DSPP) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ hist_info = (struct pp_hist_col_info *) cfg_data;
+ hist_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_HIST_RAM_REG_OFF;
+
+ for (i = 0; i < HIST_V_SIZE; i++) {
+ hist_info->data[i] = readl_relaxed(hist_addr) & REG_MASK(24);
+ hist_addr += 0x4;
+ sum += hist_info->data[i];
+ }
+ hist_info->hist_cnt_read++;
+ return sum;
+}
+
+static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+
+ int ret = 0, i = 0;
+ char __iomem *hist_lut_addr;
+ u32 sz = 0, temp = 0, *data = NULL;
+ struct mdp_hist_lut_data_v1_7 lut_data_v1_7;
+ struct mdp_hist_lut_data_v1_7 *lut_data = &lut_data_v1_7;
+ struct mdp_hist_lut_data *lut_cfg_data = NULL;
+
+ if (!base_addr || !cfg_data) {
+ pr_err("invalid params base_addr %pK cfg_data %pK\n",
+ base_addr, cfg_data);
+ return -EINVAL;
+ }
+
+ if (block_type != DSPP) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ lut_cfg_data = (struct mdp_hist_lut_data *) cfg_data;
+ if (!(lut_cfg_data->ops & MDP_PP_OPS_READ)) {
+ pr_err("read ops not set for hist_lut %d\n", lut_cfg_data->ops);
+ return 0;
+ }
+ if (lut_cfg_data->version != mdp_hist_lut_v1_7 ||
+ !lut_cfg_data->cfg_payload) {
+ pr_err("invalid hist_lut version %d payload %pK\n",
+ lut_cfg_data->version, lut_cfg_data->cfg_payload);
+ return -EINVAL;
+ }
+ if (copy_from_user(lut_data, (void __user *) lut_cfg_data->cfg_payload,
+ sizeof(*lut_data))) {
+ pr_err("copy from user failed for lut_data\n");
+ return -EFAULT;
+ }
+ if (lut_data->len != ENHIST_LUT_ENTRIES) {
+ pr_err("invalid hist_lut len %d", lut_data->len);
+ return -EINVAL;
+ }
+ sz = ENHIST_LUT_ENTRIES * sizeof(u32);
+ if (!access_ok(VERIFY_WRITE, lut_data->data, sz)) {
+ pr_err("invalid lut address for hist_lut sz %d\n", sz);
+ return -EFAULT;
+ }
+
+ hist_lut_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_LUTV_REG_OFF;
+
+ data = kzalloc(sz, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ for (i = 0; i < ENHIST_LUT_ENTRIES; i += 2) {
+ temp = readl_relaxed(hist_lut_addr);
+ data[i] = temp & REG_MASK(10);
+ data[i + 1] =
+ (temp & REG_MASK_SHIFT(10, 16)) >> ENHIST_BIT_SHIFT;
+ hist_lut_addr += 4;
+ }
+ if (copy_to_user(lut_data->data, data, sz)) {
+ pr_err("failed to copy the hist_lut back to user\n");
+ ret = -EFAULT;
+ }
+ kfree(data);
+ return ret;
+}
+
+static int pp_hist_lut_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ int ret = 0, i = 0;
+ u32 temp = 0;
+ struct mdp_hist_lut_data *lut_cfg_data = NULL;
+ struct mdp_hist_lut_data_v1_7 *lut_data = NULL;
+ char __iomem *hist_lut_addr = NULL, *swap_addr = NULL;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+
+ if (block_type != DSPP) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ lut_cfg_data = (struct mdp_hist_lut_data *) cfg_data;
+ if (lut_cfg_data->version != mdp_hist_lut_v1_7) {
+ pr_err("invalid hist_lut version %d\n", lut_cfg_data->version);
+ return -EINVAL;
+ }
+
+ if (!(lut_cfg_data->ops & ~(MDP_PP_OPS_READ))) {
+ pr_err("only read ops set for lut\n");
+ return ret;
+ }
+ if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE ||
+ !(lut_cfg_data->ops & MDP_PP_OPS_WRITE)) {
+ pr_debug("non write ops set %d\n", lut_cfg_data->ops);
+ goto hist_lut_set_sts;
+ }
+ lut_data = lut_cfg_data->cfg_payload;
+ if (!lut_data) {
+ pr_err("invalid hist_lut cfg_payload %pK\n", lut_data);
+ return -EINVAL;
+ }
+
+ if (lut_data->len != ENHIST_LUT_ENTRIES || !lut_data->data) {
+ pr_err("invalid hist_lut len %d data %pK\n",
+ lut_data->len, lut_data->data);
+ return -EINVAL;
+ }
+
+ hist_lut_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_LUTV_REG_OFF;
+ swap_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_LUTV_SWAP_REG_OFF;
+
+ for (i = 0; i < ENHIST_LUT_ENTRIES; i += 2) {
+ temp = (lut_data->data[i] & REG_MASK(10)) |
+ ((lut_data->data[i + 1] & REG_MASK(10))
+ << ENHIST_BIT_SHIFT);
+
+ writel_relaxed(temp, hist_lut_addr);
+ hist_lut_addr += 4;
+ }
+
+ writel_relaxed(1, swap_addr);
+
+hist_lut_set_sts:
+ if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE) {
+ pp_sts->enhist_sts &= ~(PP_STS_ENABLE | PP_STS_PA_LUT_FIRST);
+ } else if (lut_cfg_data->ops & MDP_PP_OPS_ENABLE) {
+ pp_sts->enhist_sts |= PP_STS_ENABLE;
+ if (lut_cfg_data->hist_lut_first)
+ pp_sts->enhist_sts |= PP_STS_PA_LUT_FIRST;
+ else
+ pp_sts->enhist_sts &= ~PP_STS_PA_LUT_FIRST;
+ }
+
+ pp_hist_lut_opmode_config(base_addr + PA_DSPP_BLOCK_REG_OFF, pp_sts);
+ return ret;
+}
+
+static int pp_hist_lut_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version %pK\n", version);
+ return -EINVAL;
+ }
+ *version = mdp_hist_lut_v1_7;
+ return 0;
+}
+
+static void pp_hist_lut_opmode_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts)
+{
+ u32 opmode = 0;
+
+ if (!base_addr || !pp_sts) {
+ pr_err("invalid params base_addr %pK pp_sts_type %pK\n",
+ base_addr, pp_sts);
+ return;
+ }
+ opmode = readl_relaxed(base_addr + PA_OP_MODE_REG_OFF);
+
+ /* set the hist_lutv_en and hist_lutv_first_en bits */
+ if (pp_sts->enhist_sts & PP_STS_ENABLE) {
+ opmode |= BIT(19) | BIT(20);
+ opmode |= (pp_sts->enhist_sts & PP_STS_PA_LUT_FIRST) ?
+ BIT(21) : 0;
+ } else {
+ opmode &= ~(BIT(19) | BIT(21));
+ if (!(pp_sts->pa_sts & PP_STS_ENABLE))
+ opmode &= ~BIT(20);
+ }
+
+ writel_relaxed(opmode, base_addr + PA_OP_MODE_REG_OFF);
+}
+
+static int pp_pa_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ struct mdp_pa_v2_cfg_data *pa_cfg_data = NULL;
+ struct mdp_pa_data_v1_7 *pa_data = NULL;
+ char __iomem *block_addr = NULL;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+ if ((block_type != DSPP) && (block_type != SSPP_VIG)) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ pa_cfg_data = (struct mdp_pa_v2_cfg_data *) cfg_data;
+ if (pa_cfg_data->version != mdp_pa_v1_7) {
+ pr_err("invalid pa version %d\n", pa_cfg_data->version);
+ return -EINVAL;
+ }
+ if (!(pa_cfg_data->flags & ~(MDP_PP_OPS_READ))) {
+ pr_info("only read ops is set %d", pa_cfg_data->flags);
+ return 0;
+ }
+
+ block_addr = base_addr +
+ ((block_type == DSPP) ? PA_DSPP_BLOCK_REG_OFF :
+ PA_VIG_BLOCK_REG_OFF);
+
+ if (pa_cfg_data->flags & MDP_PP_OPS_DISABLE ||
+ !(pa_cfg_data->flags & MDP_PP_OPS_WRITE)) {
+ pr_debug("pa_cfg_data->flags = %d\n", pa_cfg_data->flags);
+ goto pa_set_sts;
+ }
+
+ pa_data = pa_cfg_data->cfg_payload;
+ if (!pa_data) {
+ pr_err("invalid payload for pa %pK\n", pa_data);
+ return -EINVAL;
+ }
+
+ pp_pa_set_global_adj_regs(block_addr, pa_data, pa_cfg_data->flags);
+ pp_pa_set_mem_col(block_addr, pa_data, pa_cfg_data->flags);
+ if (block_type == DSPP)
+ pp_pa_set_six_zone(block_addr, pa_data, pa_cfg_data->flags);
+
+pa_set_sts:
+ pp_pa_set_sts(pp_sts, pa_data, pa_cfg_data->flags, block_type);
+ pp_pa_opmode_config(block_addr, pp_sts);
+
+ return 0;
+}
+
+static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ return -ENOTSUPP;
+}
+
+static int pp_pa_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version");
+ return -EINVAL;
+ }
+ *version = mdp_pa_v1_7;
+ return 0;
+}
+
+static int pp_dither_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ return -ENOTSUPP;
+}
+
+static int pp_dither_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ int i = 0;
+ u32 data;
+ struct mdp_dither_cfg_data *dither_cfg_data = NULL;
+ struct mdp_dither_data_v1_7 *dither_data = NULL;
+ char __iomem *dither_opmode = NULL;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+ if (block_type != PPB)
+ return -ENOTSUPP;
+ dither_opmode = base_addr + PPB_GLOBAL_DITHER_REG_OFF;
+ base_addr = dither_opmode + 4;
+
+ dither_cfg_data = (struct mdp_dither_cfg_data *) cfg_data;
+
+ if (dither_cfg_data->version != mdp_dither_v1_7) {
+ pr_err("invalid dither version %d\n", dither_cfg_data->version);
+ return -EINVAL;
+ }
+
+ if (dither_cfg_data->flags & MDP_PP_OPS_READ) {
+ pr_err("Invalid context for read operation\n");
+ return -EINVAL;
+ }
+
+ if (dither_cfg_data->flags & MDP_PP_OPS_DISABLE ||
+ !(dither_cfg_data->flags & MDP_PP_OPS_WRITE)) {
+ pr_debug("non write ops set %d\n", dither_cfg_data->flags);
+ goto dither_set_sts;
+ }
+
+ dither_data = dither_cfg_data->cfg_payload;
+ if (!dither_data) {
+ pr_err("invalid payload for dither %pK\n", dither_data);
+ return -EINVAL;
+ }
+
+ if ((dither_data->g_y_depth >= DITHER_DEPTH_MAP_INDEX) ||
+ (dither_data->b_cb_depth >= DITHER_DEPTH_MAP_INDEX) ||
+ (dither_data->r_cr_depth >= DITHER_DEPTH_MAP_INDEX)) {
+ pr_err("invalid data for dither, g_y_depth %d y_cb_depth %d r_cr_depth %d\n",
+ dither_data->g_y_depth, dither_data->b_cb_depth,
+ dither_data->r_cr_depth);
+ return -EINVAL;
+ }
+ data = dither_depth_map[dither_data->g_y_depth];
+ data |= dither_depth_map[dither_data->b_cb_depth] << 2;
+ data |= dither_depth_map[dither_data->r_cr_depth] << 4;
+ data |= (dither_data->temporal_en) ? (1 << 8) : 0;
+ writel_relaxed(data, base_addr);
+ base_addr += 4;
+ for (i = 0; i < DITHER_MATRIX_LEN; i += 4) {
+ data = (dither_matrix[i] & REG_MASK(4)) |
+ ((dither_matrix[i + 1] & REG_MASK(4)) << 4) |
+ ((dither_matrix[i + 2] & REG_MASK(4)) << 8) |
+ ((dither_matrix[i + 3] & REG_MASK(4)) << 12);
+ writel_relaxed(data, base_addr);
+ base_addr += 4;
+ }
+
+dither_set_sts:
+ pp_sts_set_split_bits(&pp_sts->dither_sts,
+ dither_cfg_data->flags);
+ if (dither_cfg_data->flags & MDP_PP_OPS_DISABLE) {
+ pp_sts->dither_sts &= ~PP_STS_ENABLE;
+ writel_relaxed(0, dither_opmode);
+ } else if (dither_cfg_data->flags & MDP_PP_OPS_ENABLE) {
+ pp_sts->dither_sts |= PP_STS_ENABLE;
+ if (pp_sts_is_enabled(pp_sts->dither_sts, pp_sts->side_sts))
+ writel_relaxed(BIT(0), dither_opmode);
+ }
+ return 0;
+}
+
+static int pp_dither_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version");
+ return -EINVAL;
+ }
+ *version = mdp_dither_v1_7;
+ return 0;
+}
+
+static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
+ u32 *opmode, int side)
+{
+ if (!pp_sts || !opmode) {
+ pr_err("Invalid pp_sts %pK or opmode %pK\n", pp_sts, opmode);
+ return;
+ }
+ switch (location) {
+ case SSPP_DMA:
+ break;
+ case SSPP_VIG:
+ break;
+ case DSPP:
+ if (pp_sts_is_enabled(pp_sts->igc_sts, side))
+ *opmode |= IGC_DSPP_OP_MODE_EN;
+ break;
+ case LM:
+ if (pp_sts->argc_sts & PP_STS_ENABLE)
+ pr_debug("pgc in LM enabled\n");
+ break;
+ default:
+ pr_err("Invalid block type %d\n", location);
+ break;
+ }
+}
+
+static void pp_pa_set_global_adj_regs(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags)
+{
+ char __iomem *addr = NULL;
+
+ addr = base_addr + PA_HSIC_REG_OFF;
+ if (flags & MDP_PP_PA_HUE_ENABLE)
+ writel_relaxed((pa_data->global_hue_adj &
+ REG_MASK(12)), addr);
+ addr += 4;
+ if (flags & MDP_PP_PA_SAT_ENABLE)
+ writel_relaxed((pa_data->global_sat_adj &
+ REG_MASK(16)), addr);
+ addr += 4;
+ if (flags & MDP_PP_PA_VAL_ENABLE)
+ writel_relaxed((pa_data->global_val_adj &
+ REG_MASK(8)), addr);
+ addr += 4;
+ if (flags & MDP_PP_PA_CONT_ENABLE)
+ writel_relaxed((pa_data->global_cont_adj &
+ REG_MASK(8)), addr);
+}
+
+static void pp_pa_set_mem_col(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags)
+{
+ char __iomem *mem_col_base = NULL, *mem_col_p2 = NULL;
+ struct mdp_pa_mem_col_data_v1_7 *mem_col_data = NULL;
+ uint32_t mask = 0, hold = 0, hold_mask = 0;
+ uint32_t hold_curr = 0;
+
+ flags &= (MDP_PP_PA_SKIN_ENABLE | MDP_PP_PA_SKY_ENABLE |
+ MDP_PP_PA_FOL_ENABLE);
+ if (!flags)
+ return;
+ while (flags) {
+ if (flags & MDP_PP_PA_SKIN_ENABLE) {
+ flags &= ~MDP_PP_PA_SKIN_ENABLE;
+ mem_col_base = base_addr + PA_MEM_SKIN_REG_OFF;
+ mem_col_p2 = base_addr + PA_MEM_SKIN_ADJUST_P2_REG_OFF;
+ mem_col_data = &pa_data->skin_cfg;
+ hold |= pa_data->skin_cfg.sat_hold & REG_MASK(2);
+ hold |= (pa_data->skin_cfg.val_hold & REG_MASK(2))
+ << 2;
+ hold_mask |= REG_MASK(4);
+ } else if (flags & MDP_PP_PA_SKY_ENABLE) {
+ flags &= ~MDP_PP_PA_SKY_ENABLE;
+ mem_col_base = base_addr + PA_MEM_SKY_REG_OFF;
+ mem_col_p2 = base_addr + PA_MEM_SKY_ADJUST_P2_REG_OFF;
+ mem_col_data = &pa_data->sky_cfg;
+ hold |= (pa_data->sky_cfg.sat_hold & REG_MASK(2)) << 4;
+ hold |= (pa_data->sky_cfg.val_hold & REG_MASK(2)) << 6;
+ hold_mask |= REG_MASK_SHIFT(4, 4);
+ } else if (flags & MDP_PP_PA_FOL_ENABLE) {
+ flags &= ~MDP_PP_PA_FOL_ENABLE;
+ mem_col_base = base_addr + PA_MEM_FOL_REG_OFF;
+ mem_col_p2 = base_addr + PA_MEM_FOL_ADJUST_P2_REG_OFF;
+ mem_col_data = &pa_data->fol_cfg;
+ hold |= (pa_data->fol_cfg.sat_hold & REG_MASK(2)) << 8;
+ hold |= (pa_data->fol_cfg.val_hold & REG_MASK(2)) << 10;
+ hold_mask |= REG_MASK_SHIFT(4, 8);
+ } else {
+ break;
+ }
+ mask = REG_MASK_SHIFT(16, 16) | REG_MASK(11);
+ writel_relaxed((mem_col_data->color_adjust_p0 & mask),
+ mem_col_base);
+ mem_col_base += 4;
+ mask = U32_MAX;
+ writel_relaxed((mem_col_data->color_adjust_p1 & mask),
+ mem_col_base);
+ mem_col_base += 4;
+ mask = REG_MASK_SHIFT(11, 16) | REG_MASK(11);
+ writel_relaxed((mem_col_data->hue_region & mask),
+ mem_col_base);
+ mem_col_base += 4;
+ mask = REG_MASK(24);
+ writel_relaxed((mem_col_data->sat_region & mask),
+ mem_col_base);
+ mem_col_base += 4;
+ /* mask is same for val and sat */
+ writel_relaxed((mem_col_data->val_region & mask),
+ mem_col_base);
+ mask = U32_MAX;
+ writel_relaxed((mem_col_data->color_adjust_p2 & mask),
+ mem_col_p2);
+ mem_col_p2 += 4;
+ writel_relaxed((mem_col_data->blend_gain & mask),
+ mem_col_p2);
+ }
+ hold_curr = readl_relaxed(base_addr + PA_PWL_HOLD_REG_OFF) &
+ REG_MASK(16);
+ hold_curr &= ~hold_mask;
+ hold = hold_curr | (hold & hold_mask);
+ writel_relaxed(hold, (base_addr + PA_PWL_HOLD_REG_OFF));
+}
+
+static void pp_pa_set_six_zone(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data,
+ u32 flags)
+{
+ char __iomem *addr = base_addr + PA_SZONE_REG_OFF;
+ uint32_t mask_p0 = 0, mask_p1 = 0, hold = 0, hold_mask = 0;
+ uint32_t hold_curr = 0;
+ int i = 0;
+
+ if (!(flags & MDP_PP_PA_SIX_ZONE_ENABLE))
+ return;
+
+ if (pa_data->six_zone_len != MDP_SIX_ZONE_LUT_SIZE ||
+ !pa_data->six_zone_curve_p0 ||
+ !pa_data->six_zone_curve_p1) {
+ pr_err("Invalid six zone data: len %d curve_p0 %pK curve_p1 %pK\n",
+ pa_data->six_zone_len,
+ pa_data->six_zone_curve_p0,
+ pa_data->six_zone_curve_p1);
+ return;
+ }
+ mask_p0 = REG_MASK(12);
+ mask_p1 = REG_MASK(12) | REG_MASK_SHIFT(12, 16);
+ writel_relaxed((pa_data->six_zone_curve_p1[0] & mask_p1), addr + 4);
+ /* Update the index to 0 and write value */
+ writel_relaxed((pa_data->six_zone_curve_p0[0] & mask_p0) | BIT(26),
+ addr);
+ for (i = 1; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+ writel_relaxed((pa_data->six_zone_curve_p1[i] & mask_p1),
+ addr + 4);
+ writel_relaxed((pa_data->six_zone_curve_p0[i] & mask_p0), addr);
+ }
+ addr += 8;
+ writel_relaxed(pa_data->six_zone_thresh, addr);
+ addr += 4;
+ writel_relaxed(pa_data->six_zone_adj_p0 & REG_MASK(16), addr);
+ addr += 4;
+ writel_relaxed(pa_data->six_zone_adj_p1, addr);
+
+ hold = (pa_data->six_zone_sat_hold & REG_MASK(2)) << 12;
+ hold |= (pa_data->six_zone_val_hold & REG_MASK(2)) << 14;
+ hold_mask = REG_MASK_SHIFT(4, 12);
+ hold_curr = readl_relaxed(base_addr + PA_PWL_HOLD_REG_OFF) &
+ REG_MASK(16);
+ hold_curr &= ~hold_mask;
+ hold = hold_curr | (hold & hold_mask);
+ writel_relaxed(hold, (base_addr + PA_PWL_HOLD_REG_OFF));
+}
+
+static void pp_pa_opmode_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts)
+{
+ uint32_t opmode = 0;
+
+ /* set the PA bits */
+ if (pp_sts->pa_sts & PP_STS_ENABLE) {
+ opmode |= BIT(20);
+
+ if (pp_sts->pa_sts & PP_STS_PA_HUE_MASK)
+ opmode |= BIT(25);
+ if (pp_sts->pa_sts & PP_STS_PA_SAT_MASK)
+ opmode |= BIT(26);
+ if (pp_sts->pa_sts & PP_STS_PA_VAL_MASK)
+ opmode |= BIT(27);
+ if (pp_sts->pa_sts & PP_STS_PA_CONT_MASK)
+ opmode |= BIT(28);
+ if (pp_sts->pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+ opmode |= BIT(1);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+ opmode |= BIT(5);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+ opmode |= BIT(6);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+ opmode |= BIT(7);
+ if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_HUE_MASK)
+ opmode |= BIT(29);
+ if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_SAT_MASK)
+ opmode |= BIT(30);
+ if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_VAL_MASK)
+ opmode |= BIT(31);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_HUE_EN)
+ opmode |= BIT(22);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_SAT_EN)
+ opmode |= BIT(23);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_VAL_EN)
+ opmode |= BIT(24);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_CONT_EN)
+ opmode |= BIT(18);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_BLEND_EN)
+ opmode |= BIT(3);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_SIX_EN)
+ opmode |= BIT(17);
+ }
+
+ /* reset hist_en, hist_lutv_en and hist_lutv_first_en
+ * bits based on the pp_sts
+ */
+ if (pp_sts->hist_sts & PP_STS_ENABLE)
+ opmode |= BIT(16);
+ if (pp_sts->enhist_sts & PP_STS_ENABLE)
+ opmode |= BIT(19) | BIT(20);
+ if (pp_sts->enhist_sts & PP_STS_PA_LUT_FIRST)
+ opmode |= BIT(21);
+
+ writel_relaxed(opmode, base_addr + PA_OP_MODE_REG_OFF);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
new file mode 100644
index 0000000..02ddced
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
@@ -0,0 +1,773 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
+#include <linux/iommu.h>
+#include <linux/of_address.h>
+#include <linux/fb.h>
+#include <linux/dma-buf.h>
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "splash.h"
+#include "mdss_mdp_splash_logo.h"
+#include "mdss_smmu.h"
+
+#define INVALID_PIPE_INDEX 0xFFFF
+#define MAX_FRAME_DONE_COUNT_WAIT 2
+
+static int mdss_mdp_splash_alloc_memory(struct msm_fb_data_type *mfd,
+ uint32_t size)
+{
+ int rc;
+ struct msm_fb_splash_info *sinfo;
+ unsigned long buf_size = size;
+ struct mdss_data_type *mdata;
+ struct ion_handle *handle;
+
+ if (!mfd || !size)
+ return -EINVAL;
+
+ mdata = mfd_to_mdata(mfd);
+ sinfo = &mfd->splash_info;
+
+ if (!mdata || !mdata->iclient || sinfo->splash_buffer)
+ return -EINVAL;
+
+ handle = ion_alloc(mdata->iclient, size, SZ_4K,
+ ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("ion memory allocation failed\n");
+ rc = PTR_RET(handle);
+ goto end;
+ }
+
+ sinfo->size = size;
+ sinfo->dma_buf = ion_share_dma_buf(mdata->iclient, handle);
+ if (IS_ERR(sinfo->dma_buf)) {
+ rc = PTR_ERR(sinfo->dma_buf);
+ goto imap_err;
+ }
+
+ sinfo->attachment = mdss_smmu_dma_buf_attach(sinfo->dma_buf,
+ &mfd->pdev->dev, MDSS_IOMMU_DOMAIN_UNSECURE);
+ if (IS_ERR(sinfo->attachment)) {
+ rc = PTR_ERR(sinfo->attachment);
+ goto err_put;
+ }
+
+ sinfo->table = dma_buf_map_attachment(sinfo->attachment,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR(sinfo->table)) {
+ rc = PTR_ERR(sinfo->table);
+ goto err_detach;
+ }
+
+ rc = mdss_smmu_map_dma_buf(sinfo->dma_buf, sinfo->table,
+ MDSS_IOMMU_DOMAIN_UNSECURE, &sinfo->iova,
+ &buf_size, DMA_BIDIRECTIONAL);
+ if (rc) {
+ pr_err("mdss smmu map dma buf failed!\n");
+ goto err_unmap;
+ }
+ sinfo->size = buf_size;
+
+ dma_buf_begin_cpu_access(sinfo->dma_buf, DMA_BIDIRECTIONAL);
+ sinfo->splash_buffer = dma_buf_kmap(sinfo->dma_buf, 0);
+ if (IS_ERR(sinfo->splash_buffer)) {
+ pr_err("ion kernel memory mapping failed\n");
+ rc = IS_ERR(sinfo->splash_buffer);
+ goto kmap_err;
+ }
+
+ /**
+ * dma_buf has the reference
+ */
+ ion_free(mdata->iclient, handle);
+
+ return rc;
+kmap_err:
+ mdss_smmu_unmap_dma_buf(sinfo->table, MDSS_IOMMU_DOMAIN_UNSECURE,
+ DMA_BIDIRECTIONAL, sinfo->dma_buf);
+err_unmap:
+ dma_buf_unmap_attachment(sinfo->attachment, sinfo->table,
+ DMA_BIDIRECTIONAL);
+err_detach:
+ dma_buf_detach(sinfo->dma_buf, sinfo->attachment);
+err_put:
+ dma_buf_put(sinfo->dma_buf);
+imap_err:
+ ion_free(mdata->iclient, handle);
+end:
+ return rc;
+}
+
+static void mdss_mdp_splash_free_memory(struct msm_fb_data_type *mfd)
+{
+ struct msm_fb_splash_info *sinfo;
+ struct mdss_data_type *mdata;
+
+ if (!mfd)
+ return;
+
+ sinfo = &mfd->splash_info;
+ mdata = mfd_to_mdata(mfd);
+
+ if (!mdata || !mdata->iclient || !sinfo->dma_buf)
+ return;
+
+ dma_buf_end_cpu_access(sinfo->dma_buf, DMA_BIDIRECTIONAL);
+ dma_buf_kunmap(sinfo->dma_buf, 0, sinfo->splash_buffer);
+
+ mdss_smmu_unmap_dma_buf(sinfo->table, MDSS_IOMMU_DOMAIN_UNSECURE, 0,
+ sinfo->dma_buf);
+ dma_buf_unmap_attachment(sinfo->attachment, sinfo->table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(sinfo->dma_buf, sinfo->attachment);
+ dma_buf_put(sinfo->dma_buf);
+
+ sinfo->splash_buffer = NULL;
+}
+
+static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int rc, ret;
+
+ /*
+ * iommu dynamic attach for following conditions.
+ * 1. it is still not attached
+ * 2. MDP hardware version supports the feature
+ * 3. configuration is with valid splash buffer
+ */
+ if (mdata->mdss_util->iommu_attached() ||
+ !mfd->panel_info->cont_splash_enabled ||
+ !mdss_mdp_iommu_dyn_attach_supported(mdp5_data->mdata) ||
+ !mdp5_data->splash_mem_addr ||
+ !mdp5_data->splash_mem_size) {
+ pr_debug("dynamic attach is not supported\n");
+ return -EPERM;
+ }
+
+ rc = mdss_smmu_map(MDSS_IOMMU_DOMAIN_UNSECURE,
+ mdp5_data->splash_mem_addr,
+ mdp5_data->splash_mem_addr,
+ mdp5_data->splash_mem_size,
+ IOMMU_READ | IOMMU_NOEXEC);
+ if (rc) {
+ pr_debug("iommu memory mapping failed rc=%d\n", rc);
+ } else {
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("mdss iommu attach failed\n");
+ mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
+ mdp5_data->splash_mem_addr,
+ mdp5_data->splash_mem_size);
+ } else {
+ mfd->splash_info.iommu_dynamic_attached = true;
+ }
+ }
+
+ return rc;
+}
+
+static void mdss_mdp_splash_unmap_splash_mem(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (mfd->splash_info.iommu_dynamic_attached) {
+
+ mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
+ mdp5_data->splash_mem_addr,
+ mdp5_data->splash_mem_size);
+ mdss_iommu_ctrl(0);
+
+ mfd->splash_info.iommu_dynamic_attached = false;
+ }
+}
+
+void mdss_mdp_release_splash_pipe(struct msm_fb_data_type *mfd)
+{
+ struct msm_fb_splash_info *sinfo;
+
+ if (!mfd || !mfd->splash_info.splash_pipe_allocated)
+ return;
+
+ sinfo = &mfd->splash_info;
+
+ if (sinfo->pipe_ndx[0] != INVALID_PIPE_INDEX)
+ mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[0]);
+ if (sinfo->pipe_ndx[1] != INVALID_PIPE_INDEX)
+ mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[1]);
+ sinfo->splash_pipe_allocated = false;
+}
+
+/*
+ * In order to free reseved memory from bootup we are not
+ * able to call the __init free functions, as we could be
+ * passed the init boot sequence. As a reult we need to
+ * free this memory ourselves using the
+ * free_reeserved_page() function.
+ */
+void mdss_free_bootmem(u32 mem_addr, u32 size)
+{
+ unsigned long pfn_start, pfn_end, pfn_idx;
+
+ pfn_start = mem_addr >> PAGE_SHIFT;
+ pfn_end = (mem_addr + size) >> PAGE_SHIFT;
+ for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+ free_reserved_page(pfn_to_page(pfn_idx));
+}
+
+int mdss_mdp_splash_cleanup(struct msm_fb_data_type *mfd,
+ bool use_borderfill)
+{
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_mdp_ctl *ctl;
+ static u32 splash_mem_addr;
+ static u32 splash_mem_size;
+ int rc = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mfd)
+ return -EINVAL;
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ if (!mdp5_data)
+ return -EINVAL;
+
+ ctl = mdp5_data->ctl;
+ if (!ctl)
+ return -EINVAL;
+
+ if (!mfd->panel_info->cont_splash_enabled ||
+ (mfd->splash_info.iommu_dynamic_attached && !use_borderfill)) {
+ if (mfd->splash_info.iommu_dynamic_attached &&
+ use_borderfill) {
+ mdss_mdp_splash_unmap_splash_mem(mfd);
+ memblock_free(mdp5_data->splash_mem_addr,
+ mdp5_data->splash_mem_size);
+ mdss_free_bootmem(mdp5_data->splash_mem_addr,
+ mdp5_data->splash_mem_size);
+ }
+ goto end;
+ }
+
+ /* 1-to-1 mapping */
+ mdss_mdp_splash_iommu_attach(mfd);
+
+ if (use_borderfill && mdp5_data->handoff &&
+ !mfd->splash_info.iommu_dynamic_attached) {
+ /*
+ * Set up border-fill on the handed off pipes.
+ * This is needed to ensure that there are no memory
+ * accesses prior to attaching iommu during continuous
+ * splash screen case. However, for command mode
+ * displays, this is not necessary since the panels can
+ * refresh from their internal memory if no data is sent
+ * out on the dsi lanes.
+ */
+ if (mdp5_data->handoff && ctl && ctl->is_video_mode) {
+ rc = mdss_mdp_display_commit(ctl, NULL, NULL);
+ if (!IS_ERR_VALUE((unsigned long)rc)) {
+ mdss_mdp_display_wait4comp(ctl);
+ } else {
+ /*
+ * Since border-fill setup failed, we
+ * need to ensure that we turn off the
+ * MDP timing generator before attaching
+ * iommu
+ */
+ pr_err("failed to set BF at handoff\n");
+ mdp5_data->handoff = false;
+ }
+ }
+ }
+
+ if (rc || mdp5_data->handoff) {
+ /* Add all the handed off pipes to the cleanup list */
+ mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_RGB);
+ mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_VIG);
+ mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_DMA);
+ }
+
+ mdss_mdp_ctl_splash_finish(ctl, mdp5_data->handoff);
+
+ /* If DSI-1 interface is enabled by LK & split dsi is not enabled,
+ * free cont_splash_mem for dsi during the cleanup for DSI-1.
+ */
+ if (!mdata->splash_split_disp &&
+ (mdata->splash_intf_sel & MDSS_MDP_INTF_DSI1_SEL) &&
+ mfd->panel_info->pdest == DISPLAY_1) {
+ pr_debug("delay cleanup for display %d\n",
+ mfd->panel_info->pdest);
+ splash_mem_addr = mdp5_data->splash_mem_addr;
+ splash_mem_size = mdp5_data->splash_mem_size;
+
+ mdss_mdp_footswitch_ctrl_splash(0);
+ goto end;
+ }
+
+ if (!mdata->splash_split_disp &&
+ (mdata->splash_intf_sel & MDSS_MDP_INTF_DSI1_SEL) &&
+ mfd->panel_info->pdest == DISPLAY_2 &&
+ !mfd->splash_info.iommu_dynamic_attached) {
+ pr_debug("free splash mem for display %d\n",
+ mfd->panel_info->pdest);
+ /* Give back the reserved memory to the system */
+ memblock_free(splash_mem_addr, splash_mem_size);
+ mdss_free_bootmem(splash_mem_addr, splash_mem_size);
+
+ mdss_mdp_footswitch_ctrl_splash(0);
+ goto end;
+ }
+
+ if (mdp5_data->splash_mem_addr &&
+ !mfd->splash_info.iommu_dynamic_attached) {
+ pr_debug("free splash mem for display %d\n",
+ mfd->panel_info->pdest);
+ /* Give back the reserved memory to the system */
+ memblock_free(mdp5_data->splash_mem_addr,
+ mdp5_data->splash_mem_size);
+ mdss_free_bootmem(mdp5_data->splash_mem_addr,
+ mdp5_data->splash_mem_size);
+ }
+
+ mdss_mdp_footswitch_ctrl_splash(0);
+end:
+ return rc;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_splash_get_pipe(
+ struct msm_fb_data_type *mfd,
+ struct mdp_overlay *req)
+{
+ struct mdss_mdp_pipe *pipe;
+ int ret;
+ struct mdss_mdp_data *buf;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ uint32_t image_size = SPLASH_IMAGE_WIDTH * SPLASH_IMAGE_HEIGHT
+ * SPLASH_IMAGE_BPP;
+
+ ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, true);
+ if (ret)
+ return NULL;
+
+ if (mdss_mdp_pipe_map(pipe)) {
+ pr_err("unable to map base pipe\n");
+ return NULL;
+ }
+
+ mutex_lock(&mdp5_data->list_lock);
+ buf = mdss_mdp_overlay_buf_alloc(mfd, pipe);
+ if (!buf) {
+ pr_err("unable to allocate memory for splash buffer\n");
+ mdss_mdp_pipe_unmap(pipe);
+ mutex_unlock(&mdp5_data->list_lock);
+ return NULL;
+ }
+ mutex_unlock(&mdp5_data->list_lock);
+
+ buf->p[0].addr = mfd->splash_info.iova;
+ buf->p[0].len = image_size;
+ buf->num_planes = 1;
+ mdss_mdp_pipe_unmap(pipe);
+
+ return pipe;
+}
+
+static int mdss_mdp_splash_kickoff(struct msm_fb_data_type *mfd,
+ struct mdss_rect *src_rect,
+ struct mdss_rect *dest_rect)
+{
+ struct mdss_mdp_pipe *pipe;
+ struct fb_info *fbi;
+ struct mdp_overlay *req = NULL;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_mixer *mixer;
+ int ret;
+ bool use_single_pipe = false;
+ struct msm_fb_splash_info *sinfo;
+
+ if (!mfd)
+ return -EINVAL;
+
+ fbi = mfd->fbi;
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ mdata = mfd_to_mdata(mfd);
+ sinfo = &mfd->splash_info;
+
+ if (!mdp5_data || !mdp5_data->ctl)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&mdp5_data->ov_lock))
+ return -EINVAL;
+
+ ret = mdss_mdp_overlay_start(mfd);
+ if (ret) {
+ pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+ goto end;
+ }
+
+ mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ if (!mixer) {
+ pr_err("unable to retrieve mixer\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ req = kzalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ /*
+ * use single pipe for
+ * 1. split display disabled
+ * 2. splash image is only on one side of panel
+ * 3. source split is enabled and splash image is within line
+ * buffer boundary
+ */
+ use_single_pipe =
+ !is_split_lm(mfd) ||
+ (is_split_lm(mfd) &&
+ ((dest_rect->x + dest_rect->w) < mfd->split_fb_left ||
+ dest_rect->x > mfd->split_fb_left)) ||
+ (mdata->has_src_split &&
+ src_rect->w < min_t(u16, mixer->width,
+ mdss_mdp_line_buffer_width()) &&
+ dest_rect->w < min_t(u16, mixer->width,
+ mdss_mdp_line_buffer_width()));
+
+ req->src.width = src_rect->w;
+ if (use_single_pipe)
+ req->src_rect.w = src_rect->w;
+ else
+ req->src_rect.w = min_t(u16, mixer->width, src_rect->w >> 1);
+ req->dst_rect.w = req->src_rect.w;
+ req->src.height = req->dst_rect.h = req->src_rect.h =
+ src_rect->h;
+ req->src.format = SPLASH_IMAGE_FORMAT;
+ req->id = MSMFB_NEW_REQUEST;
+ req->z_order = MDSS_MDP_STAGE_0;
+ req->alpha = 0xff;
+ req->transp_mask = MDP_TRANSP_NOP;
+ req->dst_rect.x = dest_rect->x;
+ req->dst_rect.y = dest_rect->y;
+
+ pipe = mdss_mdp_splash_get_pipe(mfd, req);
+ if (!pipe) {
+ pr_err("unable to allocate base pipe\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ sinfo->pipe_ndx[0] = pipe->ndx;
+
+ if (!use_single_pipe) {
+ req->id = MSMFB_NEW_REQUEST;
+ req->src_rect.x = src_rect->x + min_t(u16, mixer->width,
+ src_rect->w - req->src_rect.w);
+ req->dst_rect.x = mixer->width;
+ pipe = mdss_mdp_splash_get_pipe(mfd, req);
+ if (!pipe) {
+ pr_err("unable to allocate right base pipe\n");
+ mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[0]);
+ ret = -EINVAL;
+ goto end;
+ }
+ sinfo->pipe_ndx[1] = pipe->ndx;
+ }
+ mutex_unlock(&mdp5_data->ov_lock);
+
+ ret = mfd->mdp.kickoff_fnc(mfd, NULL);
+ if (ret) {
+ pr_err("error in displaying image\n");
+ mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[0] |
+ sinfo->pipe_ndx[1]);
+ }
+
+ kfree(req);
+ return ret;
+end:
+ kfree(req);
+ sinfo->pipe_ndx[0] = INVALID_PIPE_INDEX;
+ sinfo->pipe_ndx[1] = INVALID_PIPE_INDEX;
+ mutex_unlock(&mdp5_data->ov_lock);
+ return ret;
+}
+
+static int mdss_mdp_display_splash_image(struct msm_fb_data_type *mfd)
+{
+ int rc = 0;
+ struct fb_info *fbi;
+ uint32_t image_len = SPLASH_IMAGE_WIDTH * SPLASH_IMAGE_HEIGHT
+ * SPLASH_IMAGE_BPP;
+ struct mdss_rect src_rect, dest_rect;
+ struct msm_fb_splash_info *sinfo;
+
+ if (!mfd || !mfd->fbi) {
+ pr_err("invalid input parameter\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ fbi = mfd->fbi;
+ sinfo = &mfd->splash_info;
+
+ if (fbi->var.xres < SPLASH_IMAGE_WIDTH ||
+ fbi->var.yres < SPLASH_IMAGE_HEIGHT ||
+ (fbi->var.bits_per_pixel >> 3) < SPLASH_IMAGE_BPP) {
+ pr_err("invalid splash parameter configuration\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ sinfo->pipe_ndx[0] = INVALID_PIPE_INDEX;
+ sinfo->pipe_ndx[1] = INVALID_PIPE_INDEX;
+
+ src_rect.x = 0;
+ src_rect.y = 0;
+ dest_rect.w = src_rect.w = SPLASH_IMAGE_WIDTH;
+ dest_rect.h = src_rect.h = SPLASH_IMAGE_HEIGHT;
+ dest_rect.x = (fbi->var.xres >> 1) - (SPLASH_IMAGE_WIDTH >> 1);
+ dest_rect.y = (fbi->var.yres >> 1) - (SPLASH_IMAGE_HEIGHT >> 1);
+
+ rc = mdss_mdp_splash_alloc_memory(mfd, image_len);
+ if (rc) {
+ pr_err("splash buffer allocation failed\n");
+ goto end;
+ }
+
+ memcpy(sinfo->splash_buffer, splash_bgr888_image, image_len);
+
+ rc = mdss_mdp_splash_iommu_attach(mfd);
+ if (rc)
+ pr_debug("iommu dynamic attach failed\n");
+
+ rc = mdss_mdp_splash_kickoff(mfd, &src_rect, &dest_rect);
+ if (rc)
+ pr_err("splash image display failed\n");
+ else
+ sinfo->splash_pipe_allocated = true;
+end:
+ return rc;
+}
+
+static int mdss_mdp_splash_ctl_cb(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ struct msm_fb_splash_info *sinfo = container_of(self,
+ struct msm_fb_splash_info, notifier);
+ struct msm_fb_data_type *mfd;
+
+ if (!sinfo)
+ goto done;
+
+ mfd = container_of(sinfo, struct msm_fb_data_type, splash_info);
+
+ if (!mfd)
+ goto done;
+
+ if (event != MDP_NOTIFY_FRAME_DONE)
+ goto done;
+
+ if (!sinfo->frame_done_count) {
+ mdss_mdp_splash_unmap_splash_mem(mfd);
+ mdss_mdp_splash_cleanup(mfd, false);
+ /* wait for 2 frame done events before releasing memory */
+ } else if (sinfo->frame_done_count > MAX_FRAME_DONE_COUNT_WAIT &&
+ sinfo->splash_thread) {
+ complete(&sinfo->frame_done);
+ sinfo->splash_thread = NULL;
+ }
+
+ /* increase frame done count after pipes are staged from other client */
+ if (!sinfo->splash_pipe_allocated)
+ sinfo->frame_done_count++;
+done:
+ return NOTIFY_OK;
+}
+
+static int mdss_mdp_splash_thread(void *data)
+{
+ struct msm_fb_data_type *mfd = data;
+ struct mdss_overlay_private *mdp5_data;
+ int ret = -EINVAL;
+
+ if (!mfd) {
+ pr_err("invalid input parameter\n");
+ goto end;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(mfd);
+ lock_fb_info(mfd->fbi);
+ ret = fb_blank(mfd->fbi, FB_BLANK_UNBLANK);
+ if (ret) {
+ pr_err("can't turn on fb!\n");
+ goto end;
+ }
+ unlock_fb_info(mfd->fbi);
+
+ mutex_lock(&mfd->bl_lock);
+ mfd->allow_bl_update = true;
+ mdss_fb_set_backlight(mfd, mfd->panel_info->bl_max >> 1);
+ mutex_unlock(&mfd->bl_lock);
+
+ init_completion(&mfd->splash_info.frame_done);
+
+ mfd->splash_info.notifier.notifier_call = mdss_mdp_splash_ctl_cb;
+ mdss_mdp_ctl_notifier_register(mdp5_data->ctl,
+ &mfd->splash_info.notifier);
+
+ ret = mdss_mdp_display_splash_image(mfd);
+ if (ret) {
+ /*
+ * keep thread alive to release dynamically allocated
+ * resources
+ */
+ pr_err("splash image display failed\n");
+ }
+
+ /* wait for second display complete to release splash resources */
+ ret = wait_for_completion_killable(&mfd->splash_info.frame_done);
+
+ mdss_mdp_splash_free_memory(mfd);
+
+ mdss_mdp_ctl_notifier_unregister(mdp5_data->ctl,
+ &mfd->splash_info.notifier);
+end:
+ return ret;
+}
+
+static __ref int mdss_mdp_splash_parse_dt(struct msm_fb_data_type *mfd)
+{
+ struct platform_device *pdev = mfd->pdev;
+ struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
+ int len = 0, rc = 0;
+ u32 offsets[2];
+ struct device_node *pnode, *child_node;
+
+ mfd->splash_info.splash_logo_enabled =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-fb-splash-logo-enabled");
+
+ of_find_property(pdev->dev.of_node, "qcom,memblock-reserve", &len);
+ if (len) {
+ len = len / sizeof(u32);
+
+ rc = of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,memblock-reserve", offsets, len);
+ if (rc) {
+ pr_err("error reading mem reserve settings for fb\n");
+ goto error;
+ }
+ } else {
+ child_node = of_get_child_by_name(pdev->dev.of_node,
+ "qcom,cont-splash-memory");
+ if (!child_node) {
+ pr_err("splash mem child node is not present\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ pnode = of_parse_phandle(child_node, "linux,contiguous-region",
+ 0);
+ if (pnode != NULL) {
+ const u32 *addr;
+ u64 size;
+
+ addr = of_get_address(pnode, 0, &size, NULL);
+ if (!addr) {
+ pr_err("failed to parse the splash memory address\n");
+ of_node_put(pnode);
+ rc = -EINVAL;
+ goto error;
+ }
+ offsets[0] = (u32) of_read_ulong(addr, 2);
+ offsets[1] = (u32) size;
+ of_node_put(pnode);
+ } else {
+ pr_err("mem reservation for splash screen fb not present\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+ if (!memblock_is_reserved(offsets[0])) {
+ pr_debug("failed to reserve memory for fb splash\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ mdp5_mdata->splash_mem_addr = offsets[0];
+ mdp5_mdata->splash_mem_size = offsets[1];
+ pr_debug("memaddr=%x size=%x\n", mdp5_mdata->splash_mem_addr,
+ mdp5_mdata->splash_mem_size);
+
+error:
+ if (!rc && !mfd->panel_info->cont_splash_enabled &&
+ mdp5_mdata->splash_mem_addr) {
+ pr_debug("mem reservation not reqd if cont splash disabled\n");
+ memblock_free(mdp5_mdata->splash_mem_addr,
+ mdp5_mdata->splash_mem_size);
+ mdss_free_bootmem(mdp5_mdata->splash_mem_addr,
+ mdp5_mdata->splash_mem_size);
+ } else if (rc && mfd->panel_info->cont_splash_enabled) {
+ pr_err("no rsvd mem found in DT for splash screen\n");
+ } else {
+ rc = 0;
+ }
+
+ return rc;
+}
+
+int mdss_mdp_splash_init(struct msm_fb_data_type *mfd)
+{
+ int rc;
+
+ if (!mfd) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = mdss_mdp_splash_parse_dt(mfd);
+ if (rc) {
+ pr_err("splash memory reserve failed\n");
+ goto end;
+ }
+
+ if (!mfd->splash_info.splash_logo_enabled) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mfd->splash_info.splash_thread = kthread_run(mdss_mdp_splash_thread,
+ mfd, "mdss_fb_splash");
+
+ if (IS_ERR(mfd->splash_info.splash_thread)) {
+ pr_err("unable to start splash thread %d\n", mfd->index);
+ mfd->splash_info.splash_thread = NULL;
+ }
+
+end:
+ return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.h b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.h
new file mode 100644
index 0000000..205bb65
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDSS_MDP_SPLASH_LOGO
+#define MDSS_MDP_SPLASH_LOGO
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+
+struct msm_fb_splash_info {
+ struct task_struct *splash_thread;
+ bool splash_logo_enabled;
+ bool iommu_dynamic_attached;
+ struct notifier_block notifier;
+ uint32_t frame_done_count;
+ struct completion frame_done;
+
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *table;
+ dma_addr_t iova;
+ void *splash_buffer;
+ int pipe_ndx[2];
+ bool splash_pipe_allocated;
+ uint32_t size;
+};
+
+struct msm_fb_data_type;
+
+void mdss_mdp_release_splash_pipe(struct msm_fb_data_type *mfd);
+int mdss_mdp_splash_cleanup(struct msm_fb_data_type *mfd,
+ bool use_borderfill);
+int mdss_mdp_splash_init(struct msm_fb_data_type *mfd);
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_trace.h b/drivers/video/fbdev/msm/mdss_mdp_trace.h
new file mode 100644
index 0000000..f8a6baf
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_trace.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#if !defined(TRACE_MDSS_MDP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define TRACE_MDSS_MDP_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mdss
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mdss_mdp_trace
+
+#include <linux/tracepoint.h>
+#include "mdss_mdp.h"
+
+DECLARE_EVENT_CLASS(mdp_sspp_template,
+ TP_PROTO(struct mdss_mdp_pipe *pipe),
+ TP_ARGS(pipe),
+ TP_STRUCT__entry(
+ __field(u32, num)
+ __field(u32, play_cnt)
+ __field(u32, mixer)
+ __field(u32, stage)
+ __field(u32, flags)
+ __field(u32, format)
+ __field(u16, img_w)
+ __field(u16, img_h)
+ __field(u16, src_x)
+ __field(u16, src_y)
+ __field(u16, src_w)
+ __field(u16, src_h)
+ __field(u16, dst_x)
+ __field(u16, dst_y)
+ __field(u16, dst_w)
+ __field(u16, dst_h)
+ ),
+ TP_fast_assign(
+ __entry->num = pipe->num;
+ __entry->play_cnt = pipe->play_cnt;
+ __entry->mixer = pipe->mixer_left->num;
+ __entry->stage = pipe->mixer_stage;
+ __entry->flags = pipe->flags;
+ __entry->format = pipe->src_fmt ?
+ pipe->src_fmt->format : -1;
+ __entry->img_w = pipe->img_width;
+ __entry->img_h = pipe->img_height;
+ __entry->src_x = pipe->src.x;
+ __entry->src_y = pipe->src.y;
+ __entry->src_w = pipe->src.w;
+ __entry->src_h = pipe->src.h;
+ __entry->dst_x = pipe->dst.x;
+ __entry->dst_y = pipe->dst.y;
+ __entry->dst_w = pipe->dst.w;
+ __entry->dst_h = pipe->dst.h;
+ ),
+
+ TP_printk("pnum=%d mixer=%d play_cnt=%d flags=0x%x stage=%d format=%d img=%dx%d src=[%d,%d,%d,%d] dst=[%d,%d,%d,%d]",
+ __entry->num, __entry->mixer, __entry->play_cnt,
+ __entry->flags, __entry->stage,
+ __entry->format, __entry->img_w, __entry->img_h,
+ __entry->src_x, __entry->src_y,
+ __entry->src_w, __entry->src_h,
+ __entry->dst_x, __entry->dst_y,
+ __entry->dst_w, __entry->dst_h)
+);
+
+DEFINE_EVENT(mdp_sspp_template, mdp_sspp_set,
+ TP_PROTO(struct mdss_mdp_pipe *pipe),
+ TP_ARGS(pipe)
+);
+
+DEFINE_EVENT(mdp_sspp_template, mdp_sspp_change,
+ TP_PROTO(struct mdss_mdp_pipe *pipe),
+ TP_ARGS(pipe)
+);
+
+TRACE_EVENT(mdp_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 intf, u32 rot, u32 fl,
+ u32 lut, bool linear),
+ TP_ARGS(pnum, fmt, intf, rot, fl, lut, linear),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, intf)
+ __field(u32, rot)
+ __field(u32, fl)
+ __field(u32, lut)
+ __field(bool, linear)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->intf = intf;
+ __entry->rot = rot;
+ __entry->fl = fl;
+ __entry->lut = lut;
+ __entry->linear = linear;
+ ),
+ TP_printk("pnum=%d fmt=%d intf=%d rot=%d fl:%d lut=0x%x lin:%d",
+ __entry->pnum, __entry->fmt,
+ __entry->intf, __entry->rot, __entry->fl,
+ __entry->lut, __entry->linear)
+);
+
+TRACE_EVENT(mdp_perf_set_panic_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 panic_lut,
+ u32 robust_lut),
+ TP_ARGS(pnum, fmt, mode, panic_lut, robust_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, panic_lut)
+ __field(u32, robust_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->panic_lut = panic_lut;
+ __entry->robust_lut = robust_lut;
+ ),
+ TP_printk("pnum=%d fmt=%d mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->panic_lut,
+ __entry->robust_lut)
+);
+
+TRACE_EVENT(mdp_perf_set_wm_levels,
+ TP_PROTO(u32 pnum, u32 use_space, u32 priority_bytes, u32 wm0, u32 wm1,
+ u32 wm2, u32 mb_cnt, u32 mb_size),
+ TP_ARGS(pnum, use_space, priority_bytes, wm0, wm1, wm2, mb_cnt,
+ mb_size),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, use_space)
+ __field(u32, priority_bytes)
+ __field(u32, wm0)
+ __field(u32, wm1)
+ __field(u32, wm2)
+ __field(u32, mb_cnt)
+ __field(u32, mb_size)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->use_space = use_space;
+ __entry->priority_bytes = priority_bytes;
+ __entry->wm0 = wm0;
+ __entry->wm1 = wm1;
+ __entry->wm2 = wm2;
+ __entry->mb_cnt = mb_cnt;
+ __entry->mb_size = mb_size;
+ ),
+ TP_printk("pnum:%d useable_space:%d priority_bytes:%d watermark:[%d | %d | %d] nmb=%d mb_size=%d",
+ __entry->pnum, __entry->use_space,
+ __entry->priority_bytes, __entry->wm0, __entry->wm1,
+ __entry->wm2, __entry->mb_cnt, __entry->mb_size)
+);
+
+TRACE_EVENT(mdp_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 is_vbif_rt),
+ TP_ARGS(pnum, xin_id, rd_lim, is_vbif_rt),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ __field(u32, is_vbif_rt)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ __entry->is_vbif_rt = is_vbif_rt;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d rt:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim,
+ __entry->is_vbif_rt)
+);
+
+TRACE_EVENT(mdp_perf_prefill_calc,
+ TP_PROTO(u32 pnum, u32 latency_buf, u32 ot, u32 y_buf, u32 y_scaler,
+ u32 pp_lines, u32 pp_bytes, u32 post_sc, u32 fbc_bytes,
+ u32 prefill_bytes),
+ TP_ARGS(pnum, latency_buf, ot, y_buf, y_scaler, pp_lines, pp_bytes,
+ post_sc, fbc_bytes, prefill_bytes),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, latency_buf)
+ __field(u32, ot)
+ __field(u32, y_buf)
+ __field(u32, y_scaler)
+ __field(u32, pp_lines)
+ __field(u32, pp_bytes)
+ __field(u32, post_sc)
+ __field(u32, fbc_bytes)
+ __field(u32, prefill_bytes)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->latency_buf = latency_buf;
+ __entry->ot = ot;
+ __entry->y_buf = y_buf;
+ __entry->y_scaler = y_scaler;
+ __entry->pp_lines = pp_lines;
+ __entry->pp_bytes = pp_bytes;
+ __entry->post_sc = post_sc;
+ __entry->fbc_bytes = fbc_bytes;
+ __entry->prefill_bytes = prefill_bytes;
+ ),
+ TP_printk("pnum:%d latency_buf:%d ot:%d y_buf:%d y_scaler:%d pp_lines:%d, pp_bytes=%d post_sc:%d fbc_bytes:%d prefill:%d",
+ __entry->pnum, __entry->latency_buf, __entry->ot,
+ __entry->y_buf, __entry->y_scaler, __entry->pp_lines,
+ __entry->pp_bytes, __entry->post_sc,
+ __entry->fbc_bytes, __entry->prefill_bytes)
+);
+
+TRACE_EVENT(mdp_mixer_update,
+ TP_PROTO(u32 mixer_num),
+ TP_ARGS(mixer_num),
+ TP_STRUCT__entry(
+ __field(u32, mixer_num)
+ ),
+ TP_fast_assign(
+ __entry->mixer_num = mixer_num;
+ ),
+ TP_printk("mixer=%d",
+ __entry->mixer_num)
+);
+
+TRACE_EVENT(mdp_commit,
+ TP_PROTO(struct mdss_mdp_ctl *ctl),
+ TP_ARGS(ctl),
+ TP_STRUCT__entry(
+ __field(u32, num)
+ __field(u32, play_cnt)
+ __field(u32, clk_rate)
+ __field(u64, bandwidth)
+ ),
+ TP_fast_assign(
+ __entry->num = ctl->num;
+ __entry->play_cnt = ctl->play_cnt;
+ __entry->clk_rate = ctl->new_perf.mdp_clk_rate;
+ __entry->bandwidth = ctl->new_perf.bw_ctl;
+ ),
+ TP_printk("num=%d play_cnt=%d bandwidth=%llu clk_rate=%u",
+ __entry->num,
+ __entry->play_cnt,
+ __entry->bandwidth,
+ __entry->clk_rate)
+);
+
+TRACE_EVENT(mdp_video_underrun_done,
+ TP_PROTO(u32 ctl_num, u32 underrun_cnt),
+ TP_ARGS(ctl_num, underrun_cnt),
+ TP_STRUCT__entry(
+ __field(u32, ctl_num)
+ __field(u32, underrun_cnt)
+ ),
+ TP_fast_assign(
+ __entry->ctl_num = ctl_num;
+ __entry->underrun_cnt = underrun_cnt;
+ ),
+ TP_printk("ctl=%d count=%d",
+ __entry->ctl_num, __entry->underrun_cnt)
+);
+
+TRACE_EVENT(mdp_perf_update_bus,
+ TP_PROTO(int client, unsigned long long ab_quota,
+ unsigned long long ib_quota),
+ TP_ARGS(client, ab_quota, ib_quota),
+ TP_STRUCT__entry(
+ __field(int, client)
+ __field(u64, ab_quota)
+ __field(u64, ib_quota)
+ ),
+ TP_fast_assign(
+ __entry->client = client;
+ __entry->ab_quota = ab_quota;
+ __entry->ib_quota = ib_quota;
+ ),
+ TP_printk("Request client:%d ab=%llu ib=%llu",
+ __entry->client,
+ __entry->ab_quota,
+ __entry->ib_quota)
+);
+
+TRACE_EVENT(mdp_misr_crc,
+ TP_PROTO(u32 block_id, u32 vsync_cnt, u32 crc),
+ TP_ARGS(block_id, vsync_cnt, crc),
+ TP_STRUCT__entry(
+ __field(u32, block_id)
+ __field(u32, vsync_cnt)
+ __field(u32, crc)
+ ),
+ TP_fast_assign(
+ __entry->block_id = block_id;
+ __entry->vsync_cnt = vsync_cnt;
+ __entry->crc = crc;
+ ),
+ TP_printk("block_id:%d vsync_cnt:%d crc:0x%08x",
+ __entry->block_id, __entry->vsync_cnt, __entry->crc)
+);
+
+TRACE_EVENT(mdp_cmd_pingpong_done,
+ TP_PROTO(struct mdss_mdp_ctl *ctl, u32 pp_num, int koff_cnt),
+ TP_ARGS(ctl, pp_num, koff_cnt),
+ TP_STRUCT__entry(
+ __field(u32, ctl_num)
+ __field(u32, intf_num)
+ __field(u32, pp_num)
+ __field(int, koff_cnt)
+ ),
+ TP_fast_assign(
+ __entry->ctl_num = ctl->num;
+ __entry->intf_num = ctl->intf_num;
+ __entry->pp_num = pp_num;
+ __entry->koff_cnt = koff_cnt;
+ ),
+ TP_printk("ctl num:%d intf_num:%d ctx:%d kickoff:%d",
+ __entry->ctl_num, __entry->intf_num, __entry->pp_num,
+ __entry->koff_cnt)
+);
+
+TRACE_EVENT(mdp_cmd_release_bw,
+ TP_PROTO(u32 ctl_num),
+ TP_ARGS(ctl_num),
+ TP_STRUCT__entry(
+ __field(u32, ctl_num)
+ ),
+ TP_fast_assign(
+ __entry->ctl_num = ctl_num;
+ ),
+ TP_printk("ctl num:%d", __entry->ctl_num)
+);
+
+TRACE_EVENT(mdp_cmd_kickoff,
+ TP_PROTO(u32 ctl_num, int kickoff_cnt),
+ TP_ARGS(ctl_num, kickoff_cnt),
+ TP_STRUCT__entry(
+ __field(u32, ctl_num)
+ __field(int, kickoff_cnt)
+ ),
+ TP_fast_assign(
+ __entry->ctl_num = ctl_num;
+ __entry->kickoff_cnt = kickoff_cnt;
+ ),
+ TP_printk("kickoff ctl=%d cnt=%d",
+ __entry->ctl_num,
+ __entry->kickoff_cnt)
+);
+
+TRACE_EVENT(mdp_cmd_wait_pingpong,
+ TP_PROTO(u32 ctl_num, int kickoff_cnt),
+ TP_ARGS(ctl_num, kickoff_cnt),
+ TP_STRUCT__entry(
+ __field(u32, ctl_num)
+ __field(int, kickoff_cnt)
+ ),
+ TP_fast_assign(
+ __entry->ctl_num = ctl_num;
+ __entry->kickoff_cnt = kickoff_cnt;
+ ),
+ TP_printk("pingpong ctl=%d cnt=%d",
+ __entry->ctl_num,
+ __entry->kickoff_cnt)
+);
+
+TRACE_EVENT(mdss_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+);
+
+TRACE_EVENT(mdp_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+);
+
+TRACE_EVENT(rotator_bw_ao_as_context,
+ TP_PROTO(u32 state),
+ TP_ARGS(state),
+ TP_STRUCT__entry(
+ __field(u32, state)
+ ),
+ TP_fast_assign(
+ __entry->state = state;
+ ),
+ TP_printk("Rotator bw context %s",
+ __entry->state ? "Active Only" : "Active+Sleep")
+
+);
+
+#endif /* if !defined(TRACE_MDSS_MDP_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
new file mode 100644
index 0000000..44f53a7
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -0,0 +1,1321 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/msm_ion.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <media/msm_media_info.h>
+
+#include <linux/dma-buf.h>
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_formats.h"
+#include "mdss_debug.h"
+#include "mdss_smmu.h"
+#include "mdss_panel.h"
+
+#define PHY_ADDR_4G (1ULL<<32)
+
+void mdss_mdp_format_flag_removal(u32 *table, u32 num, u32 remove_bits)
+{
+ struct mdss_mdp_format_params *fmt = NULL;
+ int i, j;
+
+ if (table == NULL) {
+ pr_err("Null table provided\n");
+ return;
+ }
+
+ for (i = 0; i < num; i++) {
+ for (j = 0; j < ARRAY_SIZE(mdss_mdp_format_map); j++) {
+ fmt = &mdss_mdp_format_map[i];
+ if (table[i] == fmt->format) {
+ fmt->flag &= ~remove_bits;
+ break;
+ }
+ }
+ }
+}
+
+#define SET_BIT(value, bit_num) \
+ { \
+ value[bit_num >> 3] |= (1 << (bit_num & 7)); \
+ }
+static inline void __set_pipes_supported_fmt(struct mdss_mdp_pipe *pipe_list,
+ int count, struct mdss_mdp_format_params *fmt)
+{
+ struct mdss_mdp_pipe *pipe = pipe_list;
+ int i, j;
+
+ for (i = 0; i < count; i++, pipe += j)
+ for (j = 0; j < pipe->multirect.max_rects; j++)
+ SET_BIT(pipe[j].supported_formats, fmt->format);
+}
+
+void mdss_mdp_set_supported_formats(struct mdss_data_type *mdata)
+{
+ struct mdss_mdp_writeback *wb = mdata->wb;
+ bool has_tile = mdata->highest_bank_bit && !mdata->has_ubwc;
+ bool has_ubwc = mdata->has_ubwc;
+ int i;
+ int j;
+
+ for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_map); i++) {
+ struct mdss_mdp_format_params *fmt = &mdss_mdp_format_map[i];
+
+ if ((fmt->fetch_mode == MDSS_MDP_FETCH_TILE && has_tile) ||
+ (fmt->fetch_mode == MDSS_MDP_FETCH_LINEAR)) {
+ if (fmt->unpack_dx_format &&
+ !test_bit(MDSS_CAPS_10_BIT_SUPPORTED,
+ mdata->mdss_caps_map))
+ continue;
+
+ __set_pipes_supported_fmt(mdata->vig_pipes,
+ mdata->nvig_pipes, fmt);
+
+ if (fmt->flag & VALID_ROT_WB_FORMAT) {
+ for (j = 0; j < mdata->nwb; j++)
+ SET_BIT(wb[j].supported_input_formats,
+ fmt->format);
+ }
+ if (fmt->flag & VALID_MDP_WB_INTF_FORMAT) {
+ for (j = 0; j < mdata->nwb; j++)
+ SET_BIT(wb[j].supported_output_formats,
+ fmt->format);
+ }
+ if (fmt->flag & VALID_MDP_CURSOR_FORMAT &&
+ mdata->ncursor_pipes) {
+ __set_pipes_supported_fmt(mdata->cursor_pipes,
+ mdata->ncursor_pipes, fmt);
+ }
+
+ if (!fmt->is_yuv) {
+ __set_pipes_supported_fmt(mdata->rgb_pipes,
+ mdata->nrgb_pipes, fmt);
+ __set_pipes_supported_fmt(mdata->dma_pipes,
+ mdata->ndma_pipes, fmt);
+ }
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_ubwc_map) && has_ubwc; i++) {
+ struct mdss_mdp_format_params *fmt =
+ &mdss_mdp_format_ubwc_map[i].mdp_format;
+
+ if (fmt->unpack_dx_format &&
+ !test_bit(MDSS_CAPS_10_BIT_SUPPORTED,
+ mdata->mdss_caps_map))
+ continue;
+
+ __set_pipes_supported_fmt(mdata->vig_pipes,
+ mdata->nvig_pipes, fmt);
+
+ if (fmt->flag & VALID_ROT_WB_FORMAT) {
+ for (j = 0; j < mdata->nwb; j++)
+ SET_BIT(wb[j].supported_input_formats,
+ fmt->format);
+ }
+ if (fmt->flag & VALID_MDP_WB_INTF_FORMAT) {
+ for (j = 0; j < mdata->nwb; j++)
+ SET_BIT(wb[j].supported_output_formats,
+ fmt->format);
+ }
+ if (fmt->flag & VALID_MDP_CURSOR_FORMAT &&
+ mdata->ncursor_pipes) {
+ __set_pipes_supported_fmt(mdata->cursor_pipes,
+ mdata->ncursor_pipes, fmt);
+ }
+
+ if (!fmt->is_yuv) {
+ __set_pipes_supported_fmt(mdata->rgb_pipes,
+ mdata->nrgb_pipes, fmt);
+ __set_pipes_supported_fmt(mdata->dma_pipes,
+ mdata->ndma_pipes, fmt);
+ }
+ }
+}
+
+struct mdss_mdp_format_params *mdss_mdp_get_format_params(u32 format)
+{
+ struct mdss_mdp_format_params *fmt = NULL;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int i;
+ bool fmt_found = false;
+
+ for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_map); i++) {
+ fmt = &mdss_mdp_format_map[i];
+ if (format == fmt->format) {
+ fmt_found = true;
+ break;
+ }
+ }
+
+ if (!fmt_found) {
+ for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_ubwc_map); i++) {
+ fmt = &mdss_mdp_format_ubwc_map[i].mdp_format;
+ if (format == fmt->format)
+ break;
+ }
+ }
+
+ return (mdss_mdp_is_ubwc_format(fmt) &&
+ !mdss_mdp_is_ubwc_supported(mdata)) ? NULL : fmt;
+}
+
+int mdss_mdp_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h)
+{
+ struct mdss_mdp_format_params_ubwc *fmt = NULL;
+ bool fmt_found = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_ubwc_map); i++) {
+ fmt = &mdss_mdp_format_ubwc_map[i];
+ if (format == fmt->mdp_format.format) {
+ fmt_found = true;
+ break;
+ }
+ }
+
+ if (!fmt_found)
+ return -EINVAL;
+
+ *w = fmt->micro.tile_width;
+ *h = fmt->micro.tile_height;
+ return 0;
+}
+
+void mdss_mdp_get_v_h_subsample_rate(u8 chroma_sample,
+ u8 *v_sample, u8 *h_sample)
+{
+ switch (chroma_sample) {
+ case MDSS_MDP_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case MDSS_MDP_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case MDSS_MDP_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+void mdss_mdp_intersect_rect(struct mdss_rect *res_rect,
+ const struct mdss_rect *dst_rect,
+ const struct mdss_rect *sci_rect)
+{
+ int l = max(dst_rect->x, sci_rect->x);
+ int t = max(dst_rect->y, sci_rect->y);
+ int r = min((dst_rect->x + dst_rect->w), (sci_rect->x + sci_rect->w));
+ int b = min((dst_rect->y + dst_rect->h), (sci_rect->y + sci_rect->h));
+
+ if (r < l || b < t)
+ *res_rect = (struct mdss_rect){0, 0, 0, 0};
+ else
+ *res_rect = (struct mdss_rect){l, t, (r-l), (b-t)};
+}
+
+void mdss_mdp_crop_rect(struct mdss_rect *src_rect,
+ struct mdss_rect *dst_rect,
+ const struct mdss_rect *sci_rect)
+{
+ struct mdss_rect res;
+
+ mdss_mdp_intersect_rect(&res, dst_rect, sci_rect);
+
+ if (res.w && res.h) {
+ if ((res.w != dst_rect->w) || (res.h != dst_rect->h)) {
+ src_rect->x = src_rect->x + (res.x - dst_rect->x);
+ src_rect->y = src_rect->y + (res.y - dst_rect->y);
+ src_rect->w = res.w;
+ src_rect->h = res.h;
+ }
+ *dst_rect = (struct mdss_rect)
+ {(res.x - sci_rect->x), (res.y - sci_rect->y),
+ res.w, res.h};
+ }
+}
+
+/*
+ * rect_copy_mdp_to_mdss() - copy mdp_rect struct to mdss_rect
+ * @mdp - pointer to mdp_rect, destination of the copy
+ * @mdss - pointer to mdss_rect, source of the copy
+ */
+void rect_copy_mdss_to_mdp(struct mdp_rect *mdp, struct mdss_rect *mdss)
+{
+ mdp->x = mdss->x;
+ mdp->y = mdss->y;
+ mdp->w = mdss->w;
+ mdp->h = mdss->h;
+}
+
+/*
+ * rect_copy_mdp_to_mdss() - copy mdp_rect struct to mdss_rect
+ * @mdp - pointer to mdp_rect, source of the copy
+ * @mdss - pointer to mdss_rect, destination of the copy
+ */
+void rect_copy_mdp_to_mdss(struct mdp_rect *mdp, struct mdss_rect *mdss)
+{
+ mdss->x = mdp->x;
+ mdss->y = mdp->y;
+ mdss->w = mdp->w;
+ mdss->h = mdp->h;
+}
+
+/*
+ * mdss_rect_cmp() - compares two rects
+ * @rect1 - rect value to compare
+ * @rect2 - rect value to compare
+ *
+ * Returns 1 if the rects are same, 0 otherwise.
+ */
+int mdss_rect_cmp(struct mdss_rect *rect1, struct mdss_rect *rect2)
+{
+ return rect1->x == rect2->x && rect1->y == rect2->y &&
+ rect1->w == rect2->w && rect1->h == rect2->h;
+}
+
+/*
+ * mdss_rect_overlap_check() - compare two rects and check if they overlap
+ * @rect1 - rect value to compare
+ * @rect2 - rect value to compare
+ *
+ * Returns true if rects overlap, false otherwise.
+ */
+bool mdss_rect_overlap_check(struct mdss_rect *rect1, struct mdss_rect *rect2)
+{
+ u32 rect1_left = rect1->x, rect1_right = rect1->x + rect1->w;
+ u32 rect1_top = rect1->y, rect1_bottom = rect1->y + rect1->h;
+ u32 rect2_left = rect2->x, rect2_right = rect2->x + rect2->w;
+ u32 rect2_top = rect2->y, rect2_bottom = rect2->y + rect2->h;
+
+ if ((rect1_right <= rect2_left) ||
+ (rect1_left >= rect2_right) ||
+ (rect1_bottom <= rect2_top) ||
+ (rect1_top >= rect2_bottom))
+ return false;
+
+ return true;
+}
+
+/*
+ * mdss_rect_split() - split roi into two with regards to split-point.
+ * @in_roi - input roi, non-split
+ * @l_roi - left roi after split
+ * @r_roi - right roi after split
+ *
+ * Split input ROI into left and right ROIs with respect to split-point. This
+ * is useful during partial update with ping-pong split enabled, where user-land
+ * program is aware of only one frame-buffer but physically there are two
+ * distinct panels which requires their own ROIs.
+ */
+void mdss_rect_split(struct mdss_rect *in_roi, struct mdss_rect *l_roi,
+ struct mdss_rect *r_roi, u32 splitpoint)
+{
+ memset(l_roi, 0x0, sizeof(*l_roi));
+ memset(r_roi, 0x0, sizeof(*r_roi));
+
+ /* left update needed */
+ if (in_roi->x < splitpoint) {
+ *l_roi = *in_roi;
+
+ if ((l_roi->x + l_roi->w) >= splitpoint)
+ l_roi->w = splitpoint - in_roi->x;
+ }
+
+ /* right update needed */
+ if ((in_roi->x + in_roi->w) > splitpoint) {
+ *r_roi = *in_roi;
+
+ if (in_roi->x < splitpoint) {
+ r_roi->x = 0;
+ r_roi->w = in_roi->x + in_roi->w - splitpoint;
+ } else {
+ r_roi->x = in_roi->x - splitpoint;
+ }
+ }
+
+ pr_debug("left: %d,%d,%d,%d right: %d,%d,%d,%d\n",
+ l_roi->x, l_roi->y, l_roi->w, l_roi->h,
+ r_roi->x, r_roi->y, r_roi->w, r_roi->h);
+}
+
+int mdss_mdp_get_rau_strides(u32 w, u32 h,
+ struct mdss_mdp_format_params *fmt,
+ struct mdss_mdp_plane_sizes *ps)
+{
+ if (fmt->is_yuv) {
+ ps->rau_cnt = DIV_ROUND_UP(w, 64);
+ ps->ystride[0] = 64 * 4;
+ ps->rau_h[0] = 4;
+ ps->rau_h[1] = 2;
+ if (fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2)
+ ps->ystride[1] = 64 * 2;
+ else if (fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1) {
+ ps->ystride[1] = 32 * 4;
+ ps->rau_h[1] = 4;
+ } else
+ ps->ystride[1] = 32 * 2;
+
+ /* account for both chroma components */
+ ps->ystride[1] <<= 1;
+ } else if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
+ ps->rau_cnt = DIV_ROUND_UP(w, 32);
+ ps->ystride[0] = 32 * 4 * fmt->bpp;
+ ps->ystride[1] = 0;
+ ps->rau_h[0] = 4;
+ ps->rau_h[1] = 0;
+ } else {
+ pr_err("Invalid format=%d\n", fmt->format);
+ return -EINVAL;
+ }
+
+ ps->ystride[0] *= ps->rau_cnt;
+ ps->ystride[1] *= ps->rau_cnt;
+ ps->num_planes = 2;
+
+ pr_debug("BWC rau_cnt=%d strides={%d,%d} heights={%d,%d}\n",
+ ps->rau_cnt, ps->ystride[0], ps->ystride[1],
+ ps->rau_h[0], ps->rau_h[1]);
+
+ return 0;
+}
+
+static int mdss_mdp_get_ubwc_plane_size(struct mdss_mdp_format_params *fmt,
+ u32 width, u32 height, struct mdss_mdp_plane_sizes *ps)
+{
+ int rc = 0;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_mdp_format_params_ubwc *fmt_ubwc =
+ (struct mdss_mdp_format_params_ubwc *)fmt;
+
+ if (!mdss_mdp_is_ubwc_supported(mdata)) {
+ pr_err("ubwc format is not supported for format: %d\n",
+ fmt->format);
+ return -EINVAL;
+ }
+
+ if (fmt->format == MDP_Y_CBCR_H2V2_UBWC ||
+ fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC) {
+ uint32_t y_stride_alignment = 0, uv_stride_alignment = 0;
+ uint32_t y_height_alignment = 0, uv_height_alignment = 0;
+ uint32_t y_tile_width = fmt_ubwc->micro.tile_width;
+ uint32_t y_tile_height = fmt_ubwc->micro.tile_height;
+ uint32_t uv_tile_width = y_tile_width / 2;
+ uint32_t uv_tile_height = y_tile_height;
+ uint32_t y_bpp_numer = 1, y_bpp_denom = 1;
+ uint32_t uv_bpp_numer = 1, uv_bpp_denom = 1;
+
+ ps->num_planes = 4;
+ if (fmt->format == MDP_Y_CBCR_H2V2_UBWC) {
+ y_stride_alignment = 128;
+ uv_stride_alignment = 64;
+ y_height_alignment = 32;
+ uv_height_alignment = 32;
+ y_bpp_numer = 1;
+ uv_bpp_numer = 2;
+ y_bpp_denom = 1;
+ uv_bpp_denom = 1;
+ } else if (fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC) {
+ y_stride_alignment = 192;
+ uv_stride_alignment = 96;
+ y_height_alignment = 16;
+ uv_height_alignment = 16;
+ y_bpp_numer = 4;
+ uv_bpp_numer = 8;
+ y_bpp_denom = 3;
+ uv_bpp_denom = 3;
+ }
+
+ /* Y bitstream stride and plane size */
+ ps->ystride[0] = ALIGN(width, y_stride_alignment);
+ ps->ystride[0] = (ps->ystride[0] * y_bpp_numer) / y_bpp_denom;
+ ps->plane_size[0] = ALIGN(ps->ystride[0] *
+ ALIGN(height, y_height_alignment), 4096);
+
+ /* CbCr bitstream stride and plane size */
+ ps->ystride[1] = ALIGN(width / 2, uv_stride_alignment);
+ ps->ystride[1] = (ps->ystride[1] * uv_bpp_numer) / uv_bpp_denom;
+ ps->plane_size[1] = ALIGN(ps->ystride[1] *
+ ALIGN(height / 2, uv_height_alignment), 4096);
+
+ /* Y meta data stride and plane size */
+ ps->ystride[2] = ALIGN(DIV_ROUND_UP(width, y_tile_width), 64);
+ ps->plane_size[2] = ALIGN(ps->ystride[2] *
+ ALIGN(DIV_ROUND_UP(height, y_tile_height), 16), 4096);
+
+ /* CbCr meta data stride and plane size */
+ ps->ystride[3] =
+ ALIGN(DIV_ROUND_UP(width / 2, uv_tile_width), 64);
+ ps->plane_size[3] = ALIGN(ps->ystride[3] * ALIGN(
+ DIV_ROUND_UP(height / 2, uv_tile_height), 16), 4096);
+ } else if (fmt->format == MDP_RGBA_8888_UBWC ||
+ fmt->format == MDP_RGBX_8888_UBWC ||
+ fmt->format == MDP_RGB_565_UBWC ||
+ fmt->format == MDP_RGBA_1010102_UBWC ||
+ fmt->format == MDP_RGBX_1010102_UBWC) {
+ uint32_t stride_alignment, bpp, aligned_bitstream_width;
+
+ if (fmt->format == MDP_RGB_565_UBWC) {
+ stride_alignment = 128;
+ bpp = 2;
+ } else {
+ stride_alignment = 64;
+ bpp = 4;
+ }
+ ps->num_planes = 2;
+
+ /* RGB bitstream stride and plane size */
+ aligned_bitstream_width = ALIGN(width, stride_alignment);
+ ps->ystride[0] = aligned_bitstream_width * bpp;
+ ps->plane_size[0] = ALIGN(bpp * aligned_bitstream_width *
+ ALIGN(height, 16), 4096);
+
+ /* RGB meta data stride and plane size */
+ ps->ystride[2] =
+ ALIGN(DIV_ROUND_UP(aligned_bitstream_width, 16), 64);
+ ps->plane_size[2] = ALIGN(ps->ystride[2] *
+ ALIGN(DIV_ROUND_UP(height, 4), 16), 4096);
+ } else {
+ pr_err("%s: UBWC format not supported for fmt:%d\n",
+ __func__, fmt->format);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int mdss_mdp_get_plane_sizes(struct mdss_mdp_format_params *fmt, u32 w, u32 h,
+ struct mdss_mdp_plane_sizes *ps, u32 bwc_mode, bool rotation)
+{
+ int i, rc = 0;
+ u32 bpp;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ if ((w > MAX_IMG_WIDTH) || (h > MAX_IMG_HEIGHT))
+ return -ERANGE;
+
+ bpp = fmt->bpp;
+ memset(ps, 0, sizeof(struct mdss_mdp_plane_sizes));
+
+ if (mdss_mdp_is_ubwc_format(fmt)) {
+ rc = mdss_mdp_get_ubwc_plane_size(fmt, w, h, ps);
+ } else if (bwc_mode) {
+ u32 height, meta_size;
+
+ rc = mdss_mdp_get_rau_strides(w, h, fmt, ps);
+ if (rc)
+ return rc;
+
+ height = DIV_ROUND_UP(h, ps->rau_h[0]);
+ meta_size = DIV_ROUND_UP(ps->rau_cnt, 8);
+ ps->ystride[1] += meta_size;
+ ps->ystride[0] += ps->ystride[1] + meta_size;
+ ps->plane_size[0] = ps->ystride[0] * height;
+
+ ps->ystride[1] = 2;
+ ps->plane_size[1] = 2 * ps->rau_cnt * height;
+
+ pr_debug("BWC data stride=%d size=%d meta size=%d\n",
+ ps->ystride[0], ps->plane_size[0], ps->plane_size[1]);
+ } else {
+ if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
+ ps->num_planes = 1;
+ ps->plane_size[0] = w * h * bpp;
+ ps->ystride[0] = w * bpp;
+ } else if (fmt->format == MDP_Y_CBCR_H2V2_VENUS ||
+ fmt->format == MDP_Y_CRCB_H2V2_VENUS) {
+
+ int cf = (fmt->format == MDP_Y_CBCR_H2V2_VENUS) ?
+ COLOR_FMT_NV12 : COLOR_FMT_NV21;
+ ps->num_planes = 2;
+ ps->ystride[0] = VENUS_Y_STRIDE(cf, w);
+ ps->ystride[1] = VENUS_UV_STRIDE(cf, w);
+ ps->plane_size[0] = VENUS_Y_SCANLINES(cf, h) *
+ ps->ystride[0];
+ ps->plane_size[1] = VENUS_UV_SCANLINES(cf, h) *
+ ps->ystride[1];
+ } else {
+ u8 v_subsample, h_subsample, stride_align, height_align;
+ u32 chroma_samp;
+
+ chroma_samp = fmt->chroma_sample;
+
+ mdss_mdp_get_v_h_subsample_rate(chroma_samp,
+ &v_subsample, &h_subsample);
+
+ switch (fmt->format) {
+ case MDP_Y_CR_CB_GH2V2:
+ stride_align = 16;
+ height_align = 1;
+ break;
+ default:
+ stride_align = 1;
+ height_align = 1;
+ break;
+ }
+
+ w = w << fmt->unpack_dx_format;
+
+ ps->ystride[0] = ALIGN(w, stride_align);
+ ps->ystride[1] = ALIGN(w / h_subsample, stride_align);
+ ps->plane_size[0] = ps->ystride[0] *
+ ALIGN(h, height_align);
+ ps->plane_size[1] = ps->ystride[1] * (h / v_subsample);
+
+ if (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR) {
+ ps->num_planes = 2;
+ ps->plane_size[1] *= 2;
+ ps->ystride[1] *= 2;
+ } else { /* planar */
+ ps->num_planes = 3;
+ ps->plane_size[2] = ps->plane_size[1];
+ ps->ystride[2] = ps->ystride[1];
+ }
+ }
+ }
+
+ /* Safe to use MAX_PLANES as ps is memset at start of function */
+ for (i = 0; i < MAX_PLANES; i++)
+ ps->total_size += ps->plane_size[i];
+
+ return rc;
+}
+
+static int mdss_mdp_ubwc_data_check(struct mdss_mdp_data *data,
+ struct mdss_mdp_plane_sizes *ps,
+ struct mdss_mdp_format_params *fmt)
+{
+ int i, inc;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ unsigned long data_size = 0;
+ dma_addr_t base_addr;
+
+ if (!mdss_mdp_is_ubwc_supported(mdata)) {
+ pr_err("ubwc format is not supported for format: %d\n",
+ fmt->format);
+ return -ENOTSUPP;
+ }
+
+ if (data->p[0].len == ps->plane_size[0])
+ goto end;
+
+ /* From this point, assumption is plane 0 is to be divided */
+ data_size = data->p[0].len;
+ if (data_size < ps->total_size) {
+ pr_err("insufficient current mem len=%lu required mem len=%u\n",
+ data_size, ps->total_size);
+ return -ENOMEM;
+ }
+
+ base_addr = data->p[0].addr;
+
+ if (fmt->format == MDP_Y_CBCR_H2V2_UBWC ||
+ fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** MDP PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ data->p[0].addr = base_addr + ps->plane_size[2];
+ data->p[0].len = ps->plane_size[0];
+
+ /* configure CbCr bitstream plane */
+ data->p[1].addr = base_addr + ps->plane_size[0]
+ + ps->plane_size[2] + ps->plane_size[3];
+ data->p[1].len = ps->plane_size[1];
+
+ /* configure Y metadata plane */
+ data->p[2].addr = base_addr;
+ data->p[2].len = ps->plane_size[2];
+
+ /* configure CbCr metadata plane */
+ data->p[3].addr = base_addr + ps->plane_size[0]
+ + ps->plane_size[2];
+ data->p[3].len = ps->plane_size[3];
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** MDP PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ /* configure RGB bitstream plane */
+ data->p[0].addr = base_addr + ps->plane_size[2];
+ data->p[0].len = ps->plane_size[0];
+
+ /* configure RGB metadata plane */
+ data->p[2].addr = base_addr;
+ data->p[2].len = ps->plane_size[2];
+ }
+ data->num_planes = ps->num_planes;
+
+end:
+ if (data->num_planes != ps->num_planes) {
+ pr_err("num_planes don't match: fmt:%d, data:%d, ps:%d\n",
+ fmt->format, data->num_planes, ps->num_planes);
+ return -EINVAL;
+ }
+
+ inc = ((fmt->format == MDP_Y_CBCR_H2V2_UBWC ||
+ fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC) ? 1 : 2);
+ for (i = 0; i < MAX_PLANES; i += inc) {
+ if (data->p[i].len != ps->plane_size[i]) {
+ pr_err("plane:%d fmt:%d, len does not match: data:%lu, ps:%d\n",
+ i, fmt->format, data->p[i].len,
+ ps->plane_size[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int mdss_mdp_data_check(struct mdss_mdp_data *data,
+ struct mdss_mdp_plane_sizes *ps,
+ struct mdss_mdp_format_params *fmt)
+{
+ struct mdss_mdp_img_data *prev, *curr;
+ int i;
+
+ if (!ps)
+ return 0;
+
+ if (!data || data->num_planes == 0)
+ return -ENOMEM;
+
+ if (mdss_mdp_is_ubwc_format(fmt))
+ return mdss_mdp_ubwc_data_check(data, ps, fmt);
+
+ pr_debug("srcp0=%pa len=%lu frame_size=%u\n", &data->p[0].addr,
+ data->p[0].len, ps->total_size);
+
+ for (i = 0; i < ps->num_planes; i++) {
+ curr = &data->p[i];
+ if (i >= data->num_planes) {
+ u32 psize = ps->plane_size[i-1];
+
+ prev = &data->p[i-1];
+ if (prev->len > psize) {
+ curr->len = prev->len - psize;
+ prev->len = psize;
+ }
+ curr->addr = prev->addr + psize;
+ }
+ if (curr->len < ps->plane_size[i]) {
+ pr_err("insufficient mem=%lu p=%d len=%u\n",
+ curr->len, i, ps->plane_size[i]);
+ return -ENOMEM;
+ }
+ pr_debug("plane[%d] addr=%pa len=%lu\n", i,
+ &curr->addr, curr->len);
+ }
+ data->num_planes = ps->num_planes;
+
+ return 0;
+}
+
+int mdss_mdp_validate_offset_for_ubwc_format(
+ struct mdss_mdp_format_params *fmt, u16 x, u16 y)
+{
+ int ret;
+ u16 micro_w, micro_h;
+
+ ret = mdss_mdp_get_ubwc_micro_dim(fmt->format, µ_w, µ_h);
+ if (ret || !micro_w || !micro_h) {
+ pr_err("Could not get valid micro tile dimensions\n");
+ return -EINVAL;
+ }
+
+ if (x % (micro_w * UBWC_META_MACRO_W_H)) {
+ pr_err("x=%d does not align with meta width=%d\n", x,
+ micro_w * UBWC_META_MACRO_W_H);
+ return -EINVAL;
+ }
+
+ if (y % (micro_h * UBWC_META_MACRO_W_H)) {
+ pr_err("y=%d does not align with meta height=%d\n", y,
+ UBWC_META_MACRO_W_H);
+ return -EINVAL;
+ }
+ return ret;
+}
+
+/* x and y are assumednt to be valid, expected to line up with start of tiles */
+void mdss_mdp_ubwc_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
+ struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u16 macro_w, micro_w, micro_h;
+ u32 offset;
+ int ret;
+
+ if (!mdss_mdp_is_ubwc_supported(mdata)) {
+ pr_err("ubwc format is not supported for format: %d\n",
+ fmt->format);
+ return;
+ }
+
+ ret = mdss_mdp_get_ubwc_micro_dim(fmt->format, µ_w, µ_h);
+ if (ret || !micro_w || !micro_h) {
+ pr_err("Could not get valid micro tile dimensions\n");
+ return;
+ }
+ macro_w = 4 * micro_w;
+
+ if (fmt->format == MDP_Y_CBCR_H2V2_UBWC ||
+ fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC) {
+ u16 chroma_macro_w = macro_w / 2;
+ u16 chroma_micro_w = micro_w / 2;
+
+ /* plane 1 and 3 are chroma, with sub sample of 2 */
+ offset = y * ps->ystride[0] +
+ (x / macro_w) * 4096;
+ if (offset < data->p[0].len) {
+ data->p[0].addr += offset;
+ } else {
+ ret = 1;
+ goto done;
+ }
+
+ offset = y / 2 * ps->ystride[1] +
+ ((x / 2) / chroma_macro_w) * 4096;
+ if (offset < data->p[1].len) {
+ data->p[1].addr += offset;
+ } else {
+ ret = 2;
+ goto done;
+ }
+
+ offset = (y / micro_h) * ps->ystride[2] +
+ ((x / micro_w) / UBWC_META_MACRO_W_H) *
+ UBWC_META_BLOCK_SIZE;
+ if (offset < data->p[2].len) {
+ data->p[2].addr += offset;
+ } else {
+ ret = 3;
+ goto done;
+ }
+
+ offset = ((y / 2) / micro_h) * ps->ystride[3] +
+ (((x / 2) / chroma_micro_w) / UBWC_META_MACRO_W_H) *
+ UBWC_META_BLOCK_SIZE;
+ if (offset < data->p[3].len) {
+ data->p[3].addr += offset;
+ } else {
+ ret = 4;
+ goto done;
+ }
+
+ } else {
+ offset = y * ps->ystride[0] +
+ (x / macro_w) * 4096;
+ if (offset < data->p[0].len) {
+ data->p[0].addr += offset;
+ } else {
+ ret = 1;
+ goto done;
+ }
+
+ offset = DIV_ROUND_UP(y, micro_h) * ps->ystride[2] +
+ ((x / micro_w) / UBWC_META_MACRO_W_H) *
+ UBWC_META_BLOCK_SIZE;
+ if (offset < data->p[2].len) {
+ data->p[2].addr += offset;
+ } else {
+ ret = 3;
+ goto done;
+ }
+ }
+
+done:
+ if (ret) {
+ WARN(1, "idx %d, offsets:%u too large for buflen%lu\n",
+ (ret - 1), offset, data->p[(ret - 1)].len);
+ }
+}
+
+void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
+ struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt)
+{
+ if ((x == 0) && (y == 0))
+ return;
+
+ if (mdss_mdp_is_ubwc_format(fmt)) {
+ mdss_mdp_ubwc_data_calc_offset(data, x, y, ps, fmt);
+ return;
+ }
+
+ data->p[0].addr += y * ps->ystride[0];
+
+ if (data->num_planes == 1) {
+ data->p[0].addr += x * fmt->bpp;
+ } else {
+ u16 xoff, yoff;
+ u8 v_subsample, h_subsample;
+
+ mdss_mdp_get_v_h_subsample_rate(fmt->chroma_sample,
+ &v_subsample, &h_subsample);
+
+ xoff = x / h_subsample;
+ yoff = y / v_subsample;
+
+ data->p[0].addr += x;
+ data->p[1].addr += xoff + (yoff * ps->ystride[1]);
+ if (data->num_planes == 2) /* pseudo planar */
+ data->p[1].addr += xoff;
+ else /* planar */
+ data->p[2].addr += xoff + (yoff * ps->ystride[2]);
+ }
+}
+
+static int mdss_mdp_put_img(struct mdss_mdp_img_data *data, bool rotator,
+ int dir)
+{
+ struct ion_client *iclient = mdss_get_ionclient();
+ u32 domain;
+
+ if (data->flags & MDP_MEMORY_ID_TYPE_FB) {
+ pr_debug("fb mem buf=0x%pa\n", &data->addr);
+ fdput(data->srcp_f);
+ memset(&data->srcp_f, 0, sizeof(struct fd));
+ } else if (data->srcp_f.file) {
+ pr_debug("pmem buf=0x%pa\n", &data->addr);
+ memset(&data->srcp_f, 0, sizeof(struct fd));
+ } else if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+ pr_debug("ion hdl=%pK buf=0x%pa\n", data->srcp_dma_buf,
+ &data->addr);
+ if (!iclient) {
+ pr_err("invalid ion client\n");
+ return -ENOMEM;
+ }
+ if (data->mapped) {
+ domain = mdss_smmu_get_domain_type(data->flags,
+ rotator);
+ mdss_smmu_unmap_dma_buf(data->srcp_table,
+ domain, dir,
+ data->srcp_dma_buf);
+ data->mapped = false;
+ }
+ if (!data->skip_detach) {
+ dma_buf_unmap_attachment(data->srcp_attachment,
+ data->srcp_table,
+ mdss_smmu_dma_data_direction(dir));
+ dma_buf_detach(data->srcp_dma_buf,
+ data->srcp_attachment);
+ dma_buf_put(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ }
+ } else if (data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
+ /*
+ * skip memory unmapping - secure display uses physical
+ * address which does not require buffer unmapping
+ *
+ * For LT targets in secure display usecase, srcp_dma_buf will
+ * be filled due to map call which will be unmapped above.
+ *
+ */
+ pr_debug("skip memory unmapping for secure display content\n");
+ } else {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mdss_mdp_get_img(struct msmfb_data *img,
+ struct mdss_mdp_img_data *data, struct device *dev,
+ bool rotator, int dir)
+{
+ struct fd f;
+ int ret = -EINVAL;
+ int fb_num;
+ unsigned long *len;
+ u32 domain;
+ dma_addr_t *start;
+ struct ion_client *iclient = mdss_get_ionclient();
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ start = &data->addr;
+ len = &data->len;
+ data->flags |= img->flags;
+ data->offset = img->offset;
+ if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
+ f = fdget(img->memory_id);
+ if (f.file == NULL) {
+ pr_err("invalid framebuffer file (%d)\n",
+ img->memory_id);
+ return -EINVAL;
+ }
+ data->srcp_f = f;
+
+ if (MAJOR(f.file->f_path.dentry->d_inode->i_rdev) == FB_MAJOR) {
+ fb_num = MINOR(f.file->f_path.dentry->d_inode->i_rdev);
+ ret = mdss_fb_get_phys_info(start, len, fb_num);
+ if (ret)
+ pr_err("mdss_fb_get_phys_info() failed\n");
+ } else {
+ pr_err("invalid FB_MAJOR\n");
+ ret = -1;
+ }
+ } else if (iclient) {
+ if (mdss_mdp_is_map_needed(mdata, data)) {
+ data->srcp_dma_buf = dma_buf_get(img->memory_id);
+ if (IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+ pr_err("error on ion_import_fd\n");
+ ret = PTR_ERR(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ return ret;
+ }
+ domain = mdss_smmu_get_domain_type(data->flags,
+ rotator);
+
+ data->srcp_attachment =
+ mdss_smmu_dma_buf_attach(data->srcp_dma_buf,
+ dev, domain);
+ if (IS_ERR(data->srcp_attachment)) {
+ ret = PTR_ERR(data->srcp_attachment);
+ goto err_put;
+ }
+
+ data->srcp_table =
+ dma_buf_map_attachment(data->srcp_attachment,
+ mdss_smmu_dma_data_direction(dir));
+ if (IS_ERR(data->srcp_table)) {
+ ret = PTR_ERR(data->srcp_table);
+ goto err_detach;
+ }
+
+ data->addr = 0;
+ data->len = 0;
+ data->mapped = false;
+ data->skip_detach = false;
+ /* return early, mapping will be done later */
+ ret = 0;
+ goto done;
+ } else {
+ struct ion_handle *ihandle = NULL;
+ struct sg_table *sg_ptr = NULL;
+
+ do {
+ ihandle = ion_import_dma_buf_fd(iclient,
+ img->memory_id);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ ret = -EINVAL;
+ pr_err("ion import buffer failed\n");
+ break;
+ }
+
+ sg_ptr = ion_sg_table(iclient, ihandle);
+ if (sg_ptr == NULL) {
+ pr_err("ion sg table get failed\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (sg_ptr->nents != 1) {
+ pr_err("ion buffer mapping failed\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (((uint64_t)sg_dma_address(sg_ptr->sgl) >=
+ PHY_ADDR_4G - sg_ptr->sgl->length)) {
+ pr_err("ion buffer mapped size is invalid\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ data->addr = sg_dma_address(sg_ptr->sgl);
+ data->len = sg_ptr->sgl->length;
+ data->mapped = true;
+ ret = 0;
+ } while (0);
+
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(iclient, ihandle);
+ return ret;
+ }
+ }
+ if (start && !*start) {
+ pr_err("start address is zero!\n");
+ mdss_mdp_put_img(data, rotator, dir);
+ return -ENOMEM;
+ }
+
+ if (!ret && (data->offset < data->len)) {
+ data->addr += data->offset;
+ data->len -= data->offset;
+
+ pr_debug("mem=%d ihdl=%pK buf=0x%pa len=0x%lx\n",
+ img->memory_id, data->srcp_dma_buf, &data->addr,
+ data->len);
+ } else {
+ mdss_mdp_put_img(data, rotator, dir);
+ return ret ? : -EOVERFLOW;
+ }
+
+ return ret;
+err_detach:
+ dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+err_put:
+ dma_buf_put(data->srcp_dma_buf);
+done:
+ return ret;
+}
+
+static int mdss_mdp_map_buffer(struct mdss_mdp_img_data *data, bool rotator,
+ int dir)
+{
+ int ret = -EINVAL;
+ int domain;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct scatterlist *sg;
+ unsigned int i;
+ struct sg_table *table;
+
+ if (data->addr && data->len)
+ return 0;
+
+ if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+ if (mdss_res->mdss_util->iommu_attached() &&
+ (mdss_mdp_is_map_needed(mdata, data))) {
+ domain = mdss_smmu_get_domain_type(data->flags,
+ rotator);
+ data->dir = dir;
+ data->domain = domain;
+ ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf,
+ data->srcp_table, domain,
+ &data->addr, &data->len, dir);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("smmu map dma buf failed: (%d)\n", ret);
+ goto err_unmap;
+ }
+ data->mapped = true;
+ } else {
+ data->addr = sg_phys(data->srcp_table->sgl);
+ data->len = 0;
+ table = data->srcp_table;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ data->len += sg->length;
+ }
+ ret = 0;
+ }
+ }
+
+ if (!data->addr) {
+ pr_err("start address is zero!\n");
+ mdss_mdp_put_img(data, rotator, dir);
+ return -ENOMEM;
+ }
+
+ if (!ret && (data->offset < data->len)) {
+ data->addr += data->offset;
+ data->len -= data->offset;
+
+ pr_debug("ihdl=%pK buf=0x%pa len=0x%lx\n",
+ data->srcp_dma_buf, &data->addr, data->len);
+ } else {
+ mdss_mdp_put_img(data, rotator, dir);
+ return ret ? : -EOVERFLOW;
+ }
+
+ return ret;
+
+err_unmap:
+ dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
+ mdss_smmu_dma_data_direction(dir));
+ dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+ dma_buf_put(data->srcp_dma_buf);
+ return ret;
+}
+
+static int mdss_mdp_data_get(struct mdss_mdp_data *data,
+ struct msmfb_data *planes, int num_planes, u32 flags,
+ struct device *dev, bool rotator, int dir)
+{
+ int i, rc = 0;
+
+ if ((num_planes <= 0) || (num_planes > MAX_PLANES))
+ return -EINVAL;
+
+ for (i = 0; i < num_planes; i++) {
+ data->p[i].flags = flags;
+ rc = mdss_mdp_get_img(&planes[i], &data->p[i], dev, rotator,
+ dir);
+ if (rc) {
+ pr_err("failed to get buf p=%d flags=%x\n", i, flags);
+ while (i > 0) {
+ i--;
+ mdss_mdp_put_img(&data->p[i], rotator, dir);
+ }
+ break;
+ }
+ }
+
+ data->num_planes = i;
+
+ return rc;
+}
+
+int mdss_mdp_data_map(struct mdss_mdp_data *data, bool rotator, int dir)
+{
+ int i, rc = 0;
+
+ if (!data || !data->num_planes || data->num_planes > MAX_PLANES)
+ return -EINVAL;
+
+ for (i = 0; i < data->num_planes; i++) {
+ rc = mdss_mdp_map_buffer(&data->p[i], rotator, dir);
+ if (rc) {
+ pr_err("failed to map buf p=%d\n", i);
+ while (i > 0) {
+ i--;
+ mdss_mdp_put_img(&data->p[i], rotator, dir);
+ }
+ break;
+ }
+ }
+
+ return rc;
+}
+
+void mdss_mdp_data_free(struct mdss_mdp_data *data, bool rotator, int dir)
+{
+ int i;
+
+ mdss_iommu_ctrl(1);
+ for (i = 0; i < data->num_planes && data->p[i].len; i++)
+ mdss_mdp_put_img(&data->p[i], rotator, dir);
+ memset(&data->p, 0, sizeof(struct mdss_mdp_img_data) * MAX_PLANES);
+ mdss_iommu_ctrl(0);
+
+ data->num_planes = 0;
+}
+
+int mdss_mdp_data_get_and_validate_size(struct mdss_mdp_data *data,
+ struct msmfb_data *planes, int num_planes, u32 flags,
+ struct device *dev, bool rotator, int dir,
+ struct mdp_layer_buffer *buffer)
+{
+ struct mdss_mdp_format_params *fmt;
+ struct mdss_mdp_plane_sizes ps;
+ int ret, i;
+ unsigned long total_buf_len = 0;
+
+ fmt = mdss_mdp_get_format_params(buffer->format);
+ if (!fmt) {
+ pr_err("Format %d not supported\n", buffer->format);
+ return -EINVAL;
+ }
+
+ ret = mdss_mdp_data_get(data, planes, num_planes,
+ flags, dev, rotator, dir);
+ if (ret)
+ return ret;
+
+ mdss_mdp_get_plane_sizes(fmt, buffer->width, buffer->height, &ps, 0, 0);
+
+ for (i = 0; i < num_planes ; i++) {
+ unsigned long plane_len = (data->p[i].srcp_dma_buf) ?
+ data->p[i].srcp_dma_buf->size : data->p[i].len;
+
+ if (plane_len < planes[i].offset) {
+ pr_err("Offset=%d larger than buffer size=%lu\n",
+ planes[i].offset, plane_len);
+ ret = -EINVAL;
+ goto buf_too_small;
+ }
+ total_buf_len += plane_len - planes[i].offset;
+ }
+
+ if (total_buf_len < ps.total_size) {
+ pr_err("Buffer size=%lu, expected size=%d\n", total_buf_len,
+ ps.total_size);
+ ret = -EINVAL;
+ goto buf_too_small;
+ }
+ return 0;
+
+buf_too_small:
+ mdss_mdp_data_free(data, rotator, dir);
+ return ret;
+}
+
+int mdss_mdp_calc_phase_step(u32 src, u32 dst, u32 *out_phase)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 unit, residue, result;
+
+ if (src == 0 || dst == 0)
+ return -EINVAL;
+
+ unit = 1 << PHASE_STEP_SHIFT;
+ *out_phase = mult_frac(unit, src, dst);
+
+ /* check if overflow is possible */
+ if (mdss_has_quirk(mdata, MDSS_QUIRK_DOWNSCALE_HANG) && src > dst) {
+ residue = *out_phase - unit;
+ result = (residue * dst) + residue;
+
+ while (result > (unit + (unit >> 1)))
+ result -= unit;
+
+ if ((result > residue) && (result < unit))
+ return -EOVERFLOW;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_wfd.c b/drivers/video/fbdev/msm/mdss_mdp_wfd.c
new file mode 100644
index 0000000..f11459b
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_wfd.c
@@ -0,0 +1,483 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+
+#include "mdss_mdp_wfd.h"
+
+/*
+ * time out value for wfd to wait for any pending frames to finish
+ * assuming 30fps, and max 5 frames in the queue
+ */
+#define WFD_TIMEOUT_IN_MS 150
+
+struct mdss_mdp_wfd *mdss_mdp_wfd_init(struct device *device,
+ struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_wfd *wfd;
+
+ wfd = kzalloc(sizeof(struct mdss_mdp_wfd), GFP_KERNEL);
+ if (!wfd)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&wfd->lock);
+ INIT_LIST_HEAD(&wfd->data_queue);
+ init_completion(&wfd->comp);
+ wfd->ctl = ctl;
+ wfd->device = device;
+
+ return wfd;
+}
+
+void mdss_mdp_wfd_deinit(struct mdss_mdp_wfd *wfd)
+{
+ struct mdss_mdp_wfd_data *node, *temp;
+
+ list_for_each_entry_safe(node, temp, &wfd->data_queue, next)
+ mdss_mdp_wfd_remove_data(wfd, node);
+
+ kfree(wfd);
+}
+
+int mdss_mdp_wfd_wait_for_finish(struct mdss_mdp_wfd *wfd)
+{
+ int ret;
+
+ mutex_lock(&wfd->lock);
+ if (list_empty(&wfd->data_queue)) {
+ mutex_unlock(&wfd->lock);
+ return 0;
+ }
+ init_completion(&wfd->comp);
+ mutex_unlock(&wfd->lock);
+
+ ret = wait_for_completion_timeout(&wfd->comp,
+ msecs_to_jiffies(WFD_TIMEOUT_IN_MS));
+
+ if (ret == 0)
+ ret = -ETIME;
+ else if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+void mdss_mdp_wfd_destroy(struct mdss_mdp_wfd *wfd)
+{
+ struct mdss_mdp_ctl *ctl = wfd->ctl;
+
+ if (!ctl)
+ return;
+
+ if (ctl->ops.stop_fnc)
+ ctl->ops.stop_fnc(ctl, 0);
+
+ mdss_mdp_reset_mixercfg(ctl);
+
+ if (ctl->wb)
+ mdss_mdp_wb_free(ctl->wb);
+
+ if (ctl->mixer_left)
+ mdss_mdp_mixer_free(ctl->mixer_left);
+
+ if (ctl->mixer_right)
+ mdss_mdp_mixer_free(ctl->mixer_right);
+
+ ctl->mixer_left = NULL;
+ ctl->mixer_right = NULL;
+ ctl->wb = NULL;
+}
+
+bool mdss_mdp_wfd_is_config_same(struct msm_fb_data_type *mfd,
+ struct mdp_output_layer *layer)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mdp5_data->wfd->ctl;
+ struct mdss_mdp_writeback *wb = NULL;
+
+ wb = ctl->wb;
+ if (!wb || !ctl->mixer_left)
+ return false;
+
+ if ((wb->num != layer->writeback_ndx)
+ || (ctl->width != layer->buffer.width)
+ || (ctl->height != layer->buffer.height)
+ || (ctl->dst_format != layer->buffer.format))
+ return false;
+
+ return true;
+}
+
+int mdss_mdp_wfd_setup(struct mdss_mdp_wfd *wfd,
+ struct mdp_output_layer *layer)
+{
+ u32 wb_idx = layer->writeback_ndx;
+ struct mdss_mdp_ctl *ctl = wfd->ctl;
+ struct mdss_mdp_writeback *wb = NULL;
+ struct mdss_mdp_format_params *fmt = NULL;
+ int ret = 0;
+ u32 width, height, max_mixer_width;
+
+ if (!ctl)
+ return -EINVAL;
+
+ if (mdss_mdp_wfd_is_config_same(ctl->mfd, layer)) {
+ pr_debug("wfd prepared already\n");
+ return 0;
+ }
+
+ if (ctl->wb) {
+ pr_debug("config change, wait for pending buffer done\n");
+ ret = mdss_mdp_wfd_wait_for_finish(wfd);
+ if (ret) {
+ pr_err("fail to wait for outstanding request\n");
+ return ret;
+ }
+ mdss_mdp_wfd_destroy(wfd);
+ }
+ width = layer->buffer.width;
+ height = layer->buffer.height;
+ max_mixer_width = ctl->mdata->max_mixer_width;
+ pr_debug("widthxheight:%dx%d,wb_idx:%d, ctl:%d\n", width, height,
+ wb_idx, ctl->num);
+
+ wb = mdss_mdp_wb_assign(wb_idx, ctl->num);
+ if (!wb) {
+ pr_err("could not allocate wb\n");
+ ret = -EINVAL;
+ goto wfd_setup_error;
+ }
+ ctl->wb = wb;
+ ctl->dst_format = layer->buffer.format;
+ ctl->dst_comp_ratio = layer->buffer.comp_ratio;
+ ctl->width = width;
+ ctl->height = height;
+ ctl->roi = (struct mdss_rect) {0, 0, width, height};
+ ctl->is_secure = (layer->flags & MDP_LAYER_SECURE_SESSION);
+
+ fmt = mdss_mdp_get_format_params(layer->buffer.format);
+
+ if (fmt == NULL) {
+ pr_err("invalid buffer format\n");
+ ret = -EINVAL;
+ goto wfd_setup_error;
+ }
+
+ /* only 3 csc type supported */
+ if (fmt->is_yuv) {
+ switch (layer->color_space) {
+ case MDP_CSC_ITU_R_601:
+ ctl->csc_type = MDSS_MDP_CSC_RGB2YUV_601L;
+ break;
+ case MDP_CSC_ITU_R_709:
+ ctl->csc_type = MDSS_MDP_CSC_RGB2YUV_709L;
+ break;
+ case MDP_CSC_ITU_R_601_FR:
+ default:
+ ctl->csc_type = MDSS_MDP_CSC_RGB2YUV_601FR;
+ break;
+ }
+ } else {
+ ctl->csc_type = MDSS_MDP_CSC_RGB2RGB;
+ }
+
+ if (ctl->mdata->wfd_mode == MDSS_MDP_WFD_INTERFACE) {
+ ctl->mixer_left = mdss_mdp_mixer_alloc(ctl,
+ MDSS_MDP_MIXER_TYPE_INTF, (width > max_mixer_width), 0);
+ if (width > max_mixer_width) {
+ ctl->mixer_right = mdss_mdp_mixer_alloc(ctl,
+ MDSS_MDP_MIXER_TYPE_INTF, true, 0);
+ ctl->mfd->split_mode = MDP_DUAL_LM_SINGLE_DISPLAY;
+ width = width / 2;
+ } else {
+ ctl->mfd->split_mode = MDP_SPLIT_MODE_NONE;
+ }
+ } else if (width > max_mixer_width) {
+ pr_err("width > max_mixer_width supported only in MDSS_MDP_WB_INTF\n");
+ goto wfd_setup_error;
+ } else if (ctl->mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED) {
+ ctl->mixer_left = mdss_mdp_mixer_alloc(ctl,
+ MDSS_MDP_MIXER_TYPE_WRITEBACK, false, 0);
+ } else {
+ ctl->mixer_left = mdss_mdp_mixer_assign(wb->num, true, false);
+ }
+
+ if (!ctl->mixer_left ||
+ ((ctl->mfd->split_mode ==
+ MDP_DUAL_LM_SINGLE_DISPLAY) && (!ctl->mixer_right))) {
+ if (ctl->mixer_left)
+ mdss_mdp_mixer_free(ctl->mixer_left);
+ if (ctl->mixer_right)
+ mdss_mdp_mixer_free(ctl->mixer_right);
+ pr_err("could not allocate mixer(s) for ctl:%d\n", ctl->num);
+ ret = -ENODEV;
+ goto wfd_setup_error;
+ }
+
+ if (ctl->mixer_left->type == MDSS_MDP_MIXER_TYPE_INTF ||
+ ctl->mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED) {
+ ctl->opmode = MDSS_MDP_CTL_OP_WFD_MODE;
+ } else {
+ switch (ctl->mixer_left->num) {
+ case MDSS_MDP_WB_LAYERMIXER0:
+ ctl->opmode = MDSS_MDP_CTL_OP_WB0_MODE;
+ break;
+ case MDSS_MDP_WB_LAYERMIXER1:
+ ctl->opmode = MDSS_MDP_CTL_OP_WB1_MODE;
+ break;
+ default:
+ pr_err("Incorrect writeback config num=%d\n",
+ ctl->mixer_left->num);
+ ret = -EINVAL;
+ goto wfd_setup_error;
+ }
+ ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_LINE;
+ }
+
+ ctl->mixer_left->width = width;
+ ctl->mixer_left->height = height;
+ ctl->mixer_left->roi = (struct mdss_rect) {0, 0, width, height};
+ ctl->mixer_left->ctl = ctl;
+ ctl->mixer_left->valid_roi = true;
+ ctl->mixer_left->roi_changed = true;
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) {
+ ctl->mixer_right->width = width;
+ ctl->mixer_right->height = height;
+ ctl->mixer_right->roi = (struct mdss_rect) {0, 0,
+ width, height};
+ ctl->mixer_right->valid_roi = true;
+ ctl->mixer_right->roi_changed = true;
+ ctl->mixer_right->ctl = ctl;
+ ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+ MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+ } else {
+ ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+ MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
+ }
+
+ if (ctl->ops.start_fnc) {
+ ret = ctl->ops.start_fnc(ctl);
+ if (ret) {
+ pr_err("wfd start failed %d\n", ret);
+ goto wfd_setup_error;
+ }
+ }
+
+ return ret;
+
+wfd_setup_error:
+ mdss_mdp_wfd_destroy(wfd);
+ return ret;
+}
+
+static int mdss_mdp_wfd_import_data(struct device *device,
+ struct mdss_mdp_wfd_data *wfd_data)
+{
+ int i, ret = 0;
+ u32 flags = 0;
+ struct mdp_layer_buffer *buffer = &wfd_data->layer.buffer;
+ struct mdss_mdp_data *data = &wfd_data->data;
+ struct msmfb_data planes[MAX_PLANES];
+
+ if (wfd_data->layer.flags & MDP_LAYER_SECURE_SESSION)
+ flags = MDP_SECURE_OVERLAY_SESSION;
+
+ if (buffer->plane_count > MAX_PLANES) {
+ pr_err("buffer plane_count exceeds MAX_PLANES limit:%d",
+ buffer->plane_count);
+ return -EINVAL;
+ }
+
+ memset(planes, 0, sizeof(planes));
+
+ for (i = 0; i < buffer->plane_count; i++) {
+ planes[i].memory_id = buffer->planes[i].fd;
+ planes[i].offset = buffer->planes[i].offset;
+ }
+
+ ret = mdss_mdp_data_get_and_validate_size(data, planes,
+ buffer->plane_count, flags, device,
+ false, DMA_FROM_DEVICE, buffer);
+
+ return ret;
+}
+
+struct mdss_mdp_wfd_data *mdss_mdp_wfd_add_data(
+ struct mdss_mdp_wfd *wfd,
+ struct mdp_output_layer *layer)
+{
+ int ret;
+ struct mdss_mdp_wfd_data *wfd_data;
+
+ if (!wfd->ctl || !wfd->ctl->wb) {
+ pr_err("wfd not setup\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ wfd_data = kzalloc(sizeof(struct mdss_mdp_wfd_data), GFP_KERNEL);
+ if (!wfd_data)
+ return ERR_PTR(-ENOMEM);
+
+ wfd_data->layer = *layer;
+ ret = mdss_mdp_wfd_import_data(wfd->device, wfd_data);
+ if (ret) {
+ pr_err("fail to import data\n");
+ mdss_mdp_data_free(&wfd_data->data, true, DMA_FROM_DEVICE);
+ kfree(wfd_data);
+ return ERR_PTR(ret);
+ }
+
+ mutex_lock(&wfd->lock);
+ list_add_tail(&wfd_data->next, &wfd->data_queue);
+ mutex_unlock(&wfd->lock);
+
+ return wfd_data;
+}
+
+void mdss_mdp_wfd_remove_data(struct mdss_mdp_wfd *wfd,
+ struct mdss_mdp_wfd_data *wfd_data)
+{
+ mutex_lock(&wfd->lock);
+ list_del_init(&wfd_data->next);
+ if (list_empty(&wfd->data_queue))
+ complete(&wfd->comp);
+ mutex_unlock(&wfd->lock);
+ mdss_mdp_data_free(&wfd_data->data, true, DMA_FROM_DEVICE);
+ kfree(wfd_data);
+}
+
+static int mdss_mdp_wfd_validate_out_configuration(struct mdss_mdp_wfd *wfd,
+ struct mdp_output_layer *layer)
+{
+ struct mdss_mdp_format_params *fmt = NULL;
+ struct mdss_mdp_ctl *ctl = wfd->ctl;
+ u32 wb_idx = layer->writeback_ndx;
+
+ if (mdss_mdp_is_wb_mdp_intf(wb_idx, ctl->num)) {
+ fmt = mdss_mdp_get_format_params(layer->buffer.format);
+ if (fmt && !(fmt->flag & VALID_MDP_WB_INTF_FORMAT)) {
+ pr_err("wb=%d does not support dst fmt:%d\n", wb_idx,
+ layer->buffer.format);
+ return -EINVAL;
+ }
+
+ if (!ctl->mdata->has_wb_ubwc && mdss_mdp_is_ubwc_format(fmt)) {
+ pr_err("wb=%d does not support UBWC fmt:%d\n", wb_idx,
+ layer->buffer.format);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+int mdss_mdp_wfd_validate(struct mdss_mdp_wfd *wfd,
+ struct mdp_output_layer *layer)
+{
+ u32 wb_idx = layer->writeback_ndx;
+
+ if (mdss_mdp_wfd_validate_out_configuration(wfd, layer)) {
+ pr_err("failed to validate output config\n");
+ return -EINVAL;
+ }
+
+ if (wb_idx > wfd->ctl->mdata->nwb)
+ return -EINVAL;
+
+ return 0;
+}
+
+int mdss_mdp_wfd_kickoff(struct mdss_mdp_wfd *wfd,
+ struct mdss_mdp_commit_cb *commit_cb)
+{
+ struct mdss_mdp_ctl *ctl = wfd->ctl;
+ struct mdss_mdp_writeback_arg wb_args;
+ struct mdss_mdp_wfd_data *wfd_data;
+ int ret = 0;
+
+ if (!ctl) {
+ pr_err("no ctl\n");
+ return -EINVAL;
+ }
+
+ if (!ctl->wb) {
+ pr_err("wfd not prepared\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&wfd->lock);
+ if (list_empty(&wfd->data_queue)) {
+ pr_debug("no output buffer\n");
+ mutex_unlock(&wfd->lock);
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_DONE);
+ return 0;
+ }
+ wfd_data = list_first_entry(&wfd->data_queue,
+ struct mdss_mdp_wfd_data, next);
+ mutex_unlock(&wfd->lock);
+
+ ret = mdss_mdp_data_map(&wfd_data->data, true, DMA_FROM_DEVICE);
+ if (ret) {
+ pr_err("fail to acquire output buffer\n");
+ goto kickoff_error;
+ }
+
+ memset(&wb_args, 0, sizeof(wb_args));
+ wb_args.data = &wfd_data->data;
+
+ ret = mdss_mdp_writeback_display_commit(ctl, &wb_args);
+ if (ret) {
+ pr_err("wfd commit error = %d, ctl=%d\n", ret, ctl->num);
+ goto kickoff_error;
+ }
+
+ if (commit_cb)
+ commit_cb->commit_cb_fnc(
+ MDP_COMMIT_STAGE_SETUP_DONE,
+ commit_cb->data);
+
+ ret = mdss_mdp_display_wait4comp(ctl);
+
+ if (commit_cb)
+ commit_cb->commit_cb_fnc(MDP_COMMIT_STAGE_READY_FOR_KICKOFF,
+ commit_cb->data);
+
+kickoff_error:
+ mdss_mdp_wfd_commit_done(wfd);
+ return ret;
+}
+
+int mdss_mdp_wfd_commit_done(struct mdss_mdp_wfd *wfd)
+{
+ struct mdss_mdp_wfd_data *wfd_data;
+
+ mutex_lock(&wfd->lock);
+ if (list_empty(&wfd->data_queue)) {
+ pr_err("no output buffer\n");
+ mutex_unlock(&wfd->lock);
+ return -EINVAL;
+ }
+ wfd_data = list_first_entry(&wfd->data_queue,
+ struct mdss_mdp_wfd_data, next);
+ mutex_unlock(&wfd->lock);
+
+ mdss_mdp_wfd_remove_data(wfd, wfd_data);
+
+ return 0;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_mdp_wfd.h b/drivers/video/fbdev/msm/mdss_mdp_wfd.h
new file mode 100644
index 0000000..b35feb7
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_wfd.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_MDP_WFD_H__
+#define __MDSS_MDP_WFD_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/msm_mdp_ext.h>
+
+#include "mdss_mdp.h"
+
+struct mdss_mdp_wfd_data {
+ struct mdp_output_layer layer;
+ struct mdss_mdp_data data;
+ bool signal_required;
+ struct list_head next;
+};
+
+struct mdss_mdp_wfd {
+ struct mutex lock;
+ struct list_head data_queue;
+ struct mdss_mdp_ctl *ctl;
+ struct device *device;
+ struct completion comp;
+};
+
+struct mdss_mdp_wfd *mdss_mdp_wfd_init(struct device *device,
+ struct mdss_mdp_ctl *ctl);
+
+void mdss_mdp_wfd_deinit(struct mdss_mdp_wfd *wfd);
+
+int mdss_mdp_wfd_setup(struct mdss_mdp_wfd *wfd,
+ struct mdp_output_layer *layer);
+
+void mdss_mdp_wfd_destroy(struct mdss_mdp_wfd *wfd);
+
+struct mdss_mdp_wfd_data *mdss_mdp_wfd_add_data(
+ struct mdss_mdp_wfd *wfd,
+ struct mdp_output_layer *layer);
+
+void mdss_mdp_wfd_remove_data(struct mdss_mdp_wfd *wfd,
+ struct mdss_mdp_wfd_data *data);
+
+int mdss_mdp_wfd_validate(struct mdss_mdp_wfd *wfd,
+ struct mdp_output_layer *layer);
+
+int mdss_mdp_wfd_kickoff(struct mdss_mdp_wfd *wfd,
+ struct mdss_mdp_commit_cb *commit_cb);
+
+int mdss_mdp_wfd_commit_done(struct mdss_mdp_wfd *wfd);
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_panel.c b/drivers/video/fbdev/msm/mdss_panel.c
new file mode 100644
index 0000000..de69d63
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_panel.c
@@ -0,0 +1,986 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/uaccess.h>
+
+#include "mdss_panel.h"
+
+#define NUM_INTF 2
+
+/*
+ * rc_buf_thresh = {896, 1792, 2688, 3548, 4480, 5376, 6272, 6720,
+ * 7168, 7616, 7744, 7872, 8000, 8064, 8192};
+ * (x >> 6) & 0x0ff)
+ */
+static u32 dsc_rc_buf_thresh[] = {0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54,
+ 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e};
+static char dsc_rc_range_min_qp_1_1[] = {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5,
+ 5, 5, 7, 13};
+static char dsc_rc_range_min_qp_1_1_scr1[] = {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5,
+ 5, 5, 9, 12};
+static char dsc_rc_range_max_qp_1_1[] = {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11,
+ 12, 13, 13, 15};
+static char dsc_rc_range_max_qp_1_1_scr1[] = {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10,
+ 11, 11, 12, 13};
+static char dsc_rc_range_bpg_offset[] = {2, 0, 0, -2, -4, -6, -8, -8,
+ -8, -10, -10, -12, -12, -12, -12};
+
+int mdss_panel_debugfs_fbc_setup(struct mdss_panel_debugfs_info *debugfs_info,
+ struct mdss_panel_info *panel_info, struct dentry *parent)
+{
+ struct dentry *fbc_root;
+ struct fbc_panel_info *fbc = &debugfs_info->panel_info.fbc;
+
+ fbc_root = debugfs_create_dir("fbc", parent);
+ if (IS_ERR_OR_NULL(fbc_root)) {
+ pr_err("Debugfs create fbc dir failed with error: %ld\n",
+ PTR_ERR(fbc_root));
+ return -ENODEV;
+ }
+
+ debugfs_create_bool("enable", 0644, fbc_root,
+ (bool *)&fbc->enabled);
+ debugfs_create_u32("bpp", 0644, fbc_root,
+ (u32 *)&fbc->target_bpp);
+ debugfs_create_u32("packing", 0644, fbc_root,
+ (u32 *)&fbc->comp_mode);
+ debugfs_create_bool("quant_err", 0644, fbc_root,
+ (bool *)&fbc->qerr_enable);
+ debugfs_create_u32("bias", 0644, fbc_root,
+ (u32 *)&fbc->cd_bias);
+ debugfs_create_bool("pat_mode", 0644, fbc_root,
+ (bool *)&fbc->pat_enable);
+ debugfs_create_bool("vlc_mode", 0644, fbc_root,
+ (bool *)&fbc->vlc_enable);
+ debugfs_create_bool("bflc_mode", 0644, fbc_root,
+ (bool *)&fbc->bflc_enable);
+ debugfs_create_u32("hline_budget", 0644, fbc_root,
+ (u32 *)&fbc->line_x_budget);
+ debugfs_create_u32("budget_ctrl", 0644, fbc_root,
+ (u32 *)&fbc->block_x_budget);
+ debugfs_create_u32("block_budget", 0644, fbc_root,
+ (u32 *)&fbc->block_budget);
+ debugfs_create_u32("lossless_thd", 0644, fbc_root,
+ (u32 *)&fbc->lossless_mode_thd);
+ debugfs_create_u32("lossy_thd", 0644, fbc_root,
+ (u32 *)&fbc->lossy_mode_thd);
+ debugfs_create_u32("rgb_thd", 0644, fbc_root,
+ (u32 *)&fbc->lossy_rgb_thd);
+ debugfs_create_u32("lossy_mode_idx", 0644, fbc_root,
+ (u32 *)&fbc->lossy_mode_idx);
+ debugfs_create_u32("slice_height", 0644, fbc_root,
+ (u32 *)&fbc->slice_height);
+ debugfs_create_u32("pred_mode", 0644, fbc_root,
+ (u32 *)&fbc->pred_mode);
+ debugfs_create_u32("enc_mode", 0644, fbc_root,
+ (u32 *)&fbc->enc_mode);
+ debugfs_create_u32("max_pred_err", 0644, fbc_root,
+ (u32 *)&fbc->max_pred_err);
+
+ debugfs_info->panel_info.fbc = panel_info->fbc;
+
+ return 0;
+}
+
+struct array_data {
+ void *array;
+ u32 elements;
+ size_t size; /* size of each data in array */
+};
+
+static int panel_debugfs_array_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t panel_debugfs_array_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ char *buffer, *bufp;
+ int buf_size;
+ struct array_data *data = file->private_data;
+ int i = 0, elements = data->elements;
+ ssize_t ret = 0;
+
+ /*
+ * Max size:
+ * - 10 digits ("0x" + 8 digits value) + ' '/'\n' = 11 bytes per number
+ * - terminating NUL character
+ */
+ buf_size = elements*11 + 1;
+ buffer = kmalloc(buf_size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ bufp = buffer;
+ while (i < elements) {
+ char term = (i < elements-1) ? ' ' : '\n';
+
+ if (data->size == sizeof(u8)) {
+ u8 *array = (u8 *)data->array;
+
+ bufp += snprintf(bufp, buf_size-(bufp-buffer),
+ "0x%02x%c", array[i], term);
+ } else if (data->size == sizeof(u16)) {
+ u16 *array = (u16 *)data->array;
+
+ bufp += snprintf(bufp, buf_size-(bufp-buffer),
+ "0x%02x%c", array[i], term);
+ } else {
+ u32 *array = (u32 *)data->array;
+
+ bufp += snprintf(bufp, buf_size-(bufp-buffer),
+ "0x%02x%c", array[i], term);
+ }
+ i++;
+ }
+ *bufp = '\0';
+ ret = simple_read_from_buffer(buf, len, ppos,
+ buffer, bufp-buffer);
+
+ kfree(buffer);
+ return ret;
+}
+
+static ssize_t panel_debugfs_array_write(struct file *file,
+ const char __user *p, size_t count, loff_t *ppos)
+{
+ struct array_data *data = file->private_data;
+ char *buffer, *bufp;
+ int buf_size;
+ ssize_t res;
+ int i = 0, elements = data->elements;
+
+ /*
+ * Max size:
+ * - 10 digits ("0x" + 8 digits value) + ' '/'\n' = 11 bytes per number
+ * - terminating NUL character
+ */
+ buf_size = elements*11 + 1;
+ buffer = kmalloc(buf_size, GFP_KERNEL);
+ if (!buffer) {
+ pr_err("Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ res = simple_write_to_buffer(buffer, buf_size, ppos, p, count);
+ if (res)
+ *ppos += res;
+
+ buffer[buf_size-1] = '\0';
+ bufp = buffer;
+
+ while (i < elements) {
+ uint32_t value = 0;
+ int step = 0;
+
+ if (sscanf(bufp, "%x%n", &value, &step) > 0) {
+ if (data->size == sizeof(u8)) {
+ u8 *array = (u8 *)data->array;
+ *(array+i) = (u8)value;
+ } else if (data->size == sizeof(u16)) {
+ u16 *array = (u16 *)data->array;
+ *(array+i) = (u16)value;
+ } else {
+ u32 *array = (u32 *)data->array;
+ *(array+i) = (u32)value;
+ }
+ bufp += step;
+ }
+ i++;
+ }
+ kfree(buffer);
+ return res;
+}
+
+static const struct file_operations panel_debugfs_array_fops = {
+ .owner = THIS_MODULE,
+ .open = panel_debugfs_array_open,
+ .read = panel_debugfs_array_read,
+ .write = panel_debugfs_array_write,
+};
+
+struct dentry *panel_debugfs_create_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ void *array, size_t size, u32 elements)
+{
+ struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (data == NULL)
+ return NULL;
+
+ /* only support integer of 3 kinds of length format */
+ if ((size != sizeof(u8)) &&
+ (size != sizeof(u16)) &&
+ (size != sizeof(u32))) {
+ pr_warn("Value size %zu bytes is not supported\n", size);
+ kfree(data);
+ return NULL;
+ }
+
+ data->array = array;
+ data->size = size;
+ data->elements = elements;
+
+ return debugfs_create_file(name, mode, parent, data,
+ &panel_debugfs_array_fops);
+}
+
+#define DEBUGFS_CREATE_ARRAY(name, node, array) \
+ panel_debugfs_create_array(name, 0644, node, array, \
+ sizeof(array[0]), ARRAY_SIZE(array))
+
+static int _create_phy_ctrl_nodes(struct mdss_panel_debugfs_info *debugfs_info,
+ struct dentry *node) {
+
+ struct mdss_panel_info *pinfo = &debugfs_info->panel_info;
+ struct dentry *phy_node;
+
+ phy_node = debugfs_create_dir("dsi_phy_ctrl", node);
+ if (IS_ERR_OR_NULL(phy_node)) {
+ pr_err("Debugfs create phy ctrl node failed with error: %ld\n",
+ PTR_ERR(phy_node));
+ return -ENODEV;
+ }
+
+ DEBUGFS_CREATE_ARRAY("regulator", phy_node,
+ pinfo->mipi.dsi_phy_db.regulator);
+ DEBUGFS_CREATE_ARRAY("strength", phy_node,
+ pinfo->mipi.dsi_phy_db.strength);
+ DEBUGFS_CREATE_ARRAY("bistctrl", phy_node,
+ pinfo->mipi.dsi_phy_db.bistctrl);
+ DEBUGFS_CREATE_ARRAY("lanecfg", phy_node,
+ pinfo->mipi.dsi_phy_db.lanecfg);
+ DEBUGFS_CREATE_ARRAY("timing", phy_node,
+ pinfo->mipi.dsi_phy_db.timing);
+
+ return 0;
+}
+
+static int _create_dsi_panel_nodes(struct mdss_panel_debugfs_info *dfs,
+ struct dentry *parent)
+{
+ struct dentry *lcdc_root, *mipi_root, *te_root;
+ struct mdss_panel_info *pinfo = &dfs->panel_info;
+
+ lcdc_root = debugfs_create_dir("lcdc", parent);
+ if (IS_ERR_OR_NULL(lcdc_root)) {
+ pr_err("Debugfs create lcdc dir failed with error: %ld\n",
+ PTR_ERR(lcdc_root));
+ return -ENODEV;
+ }
+ mipi_root = debugfs_create_dir("mipi", parent);
+ if (IS_ERR_OR_NULL(mipi_root)) {
+ pr_err("Debugfs create mipi dir failed with error: %ld\n",
+ PTR_ERR(mipi_root));
+ return -ENODEV;
+ }
+ te_root = debugfs_create_dir("te", parent);
+ if (IS_ERR_OR_NULL(te_root)) {
+ pr_err("Debugfs create te check dir failed with error: %ld\n",
+ PTR_ERR(te_root));
+ return -ENODEV;
+ }
+
+ debugfs_create_u32("partial_update_enabled", 0644, dfs->root,
+ (u32 *)&pinfo->partial_update_enabled);
+ debugfs_create_u32("partial_update_roi_merge", 0644, dfs->root,
+ (u32 *)&pinfo->partial_update_roi_merge);
+ debugfs_create_u32("dcs_cmd_by_left", 0644, dfs->root,
+ (u32 *)&pinfo->dcs_cmd_by_left);
+ debugfs_create_bool("ulps_feature_enabled", 0644, dfs->root,
+ &pinfo->ulps_feature_enabled);
+ debugfs_create_bool("ulps_suspend_enabled", 0644, dfs->root,
+ &pinfo->ulps_suspend_enabled);
+ debugfs_create_bool("esd_check_enabled", 0644, dfs->root,
+ &pinfo->esd_check_enabled);
+ debugfs_create_bool("panel_ack_disabled", 0644, dfs->root,
+ &pinfo->panel_ack_disabled);
+
+ debugfs_create_u32("hsync_skew", 0644, lcdc_root,
+ (u32 *)&pinfo->lcdc.hsync_skew);
+ debugfs_create_u32("underflow_clr", 0644, lcdc_root,
+ (u32 *)&pinfo->lcdc.underflow_clr);
+ debugfs_create_u32("border_clr", 0644, lcdc_root,
+ (u32 *)&pinfo->lcdc.border_clr);
+ debugfs_create_u32("h_back_porch", 0644, lcdc_root,
+ (u32 *)&pinfo->lcdc.h_back_porch);
+ debugfs_create_u32("h_front_porch", 0644, lcdc_root,
+ (u32 *)&pinfo->lcdc.h_front_porch);
+ debugfs_create_u32("h_pulse_width", 0644, lcdc_root,
+ (u32 *)&pinfo->lcdc.h_pulse_width);
+ debugfs_create_u32("v_back_porch", 0644, lcdc_root,
+ (u32 *)&pinfo->lcdc.v_back_porch);
+ debugfs_create_u32("v_front_porch", 0644, lcdc_root,
+ (u32 *)&pinfo->lcdc.v_front_porch);
+ debugfs_create_u32("v_pulse_width", 0644, lcdc_root,
+ (u32 *)&pinfo->lcdc.v_pulse_width);
+
+ /* Create mipi related nodes */
+ debugfs_create_u8("frame_rate", 0644, mipi_root,
+ (char *)&pinfo->mipi.frame_rate);
+ debugfs_create_u8("hfp_power_stop", 0644, mipi_root,
+ (char *)&pinfo->mipi.hfp_power_stop);
+ debugfs_create_u8("hsa_power_stop", 0644, mipi_root,
+ (char *)&pinfo->mipi.hsa_power_stop);
+ debugfs_create_u8("hbp_power_stop", 0644, mipi_root,
+ (char *)&pinfo->mipi.hbp_power_stop);
+ debugfs_create_u8("last_line_interleave_en", 0644, mipi_root,
+ (char *)&pinfo->mipi.last_line_interleave_en);
+ debugfs_create_u8("bllp_power_stop", 0644, mipi_root,
+ (char *)&pinfo->mipi.bllp_power_stop);
+ debugfs_create_u8("eof_bllp_power_stop", 0644, mipi_root,
+ (char *)&pinfo->mipi.eof_bllp_power_stop);
+ debugfs_create_u8("data_lane0", 0644, mipi_root,
+ (char *)&pinfo->mipi.data_lane0);
+ debugfs_create_u8("data_lane1", 0644, mipi_root,
+ (char *)&pinfo->mipi.data_lane1);
+ debugfs_create_u8("data_lane2", 0644, mipi_root,
+ (char *)&pinfo->mipi.data_lane2);
+ debugfs_create_u8("data_lane3", 0644, mipi_root,
+ (char *)&pinfo->mipi.data_lane3);
+ debugfs_create_u8("t_clk_pre", 0644, mipi_root,
+ (char *)&pinfo->mipi.t_clk_pre);
+ debugfs_create_u8("t_clk_post", 0644, mipi_root,
+ (char *)&pinfo->mipi.t_clk_post);
+ debugfs_create_u8("stream", 0644, mipi_root,
+ (char *)&pinfo->mipi.stream);
+ debugfs_create_u8("interleave_mode", 0644, mipi_root,
+ (char *)&pinfo->mipi.interleave_mode);
+ debugfs_create_u8("vsync_enable", 0644, mipi_root,
+ (char *)&pinfo->mipi.vsync_enable);
+ debugfs_create_u8("hw_vsync_mode", 0644, mipi_root,
+ (char *)&pinfo->mipi.hw_vsync_mode);
+ debugfs_create_u8("te_sel", 0644, mipi_root,
+ (char *)&pinfo->mipi.te_sel);
+ debugfs_create_u8("insert_dcs_cmd", 0644, mipi_root,
+ (char *)&pinfo->mipi.insert_dcs_cmd);
+ debugfs_create_u8("wr_mem_start", 0644, mipi_root,
+ (char *)&pinfo->mipi.wr_mem_start);
+ debugfs_create_u8("wr_mem_continue", 0644, mipi_root,
+ (char *)&pinfo->mipi.wr_mem_continue);
+ debugfs_create_u8("pulse_mode_hsa_he", 0644, mipi_root,
+ (char *)&pinfo->mipi.pulse_mode_hsa_he);
+ debugfs_create_u8("vc", 0644, mipi_root, (char *)&pinfo->mipi.vc);
+ debugfs_create_u8("lp11_init", 0644, mipi_root,
+ (char *)&pinfo->mipi.lp11_init);
+ debugfs_create_u32("init_delay", 0644, mipi_root,
+ (u32 *)&pinfo->mipi.init_delay);
+ debugfs_create_u8("rx_eot_ignore", 0644, mipi_root,
+ (char *)&pinfo->mipi.rx_eot_ignore);
+ debugfs_create_u8("tx_eot_append", 0644, mipi_root,
+ (char *)&pinfo->mipi.tx_eot_append);
+ debugfs_create_u32("adjust_timer_ms", 0644, mipi_root,
+ (u32 *)&pinfo->adjust_timer_delay_ms);
+
+ /* TE reltaed nodes */
+ debugfs_create_u32("te_tear_check_en", 0644, te_root,
+ (u32 *)&pinfo->te.tear_check_en);
+ debugfs_create_u32("te_sync_cfg_height", 0644, te_root,
+ (u32 *)&pinfo->te.sync_cfg_height);
+ debugfs_create_u32("te_vsync_init_val", 0644, te_root,
+ (u32 *)&pinfo->te.vsync_init_val);
+ debugfs_create_u32("te_sync_threshold_start", 0644, te_root,
+ (u32 *)&pinfo->te.sync_threshold_start);
+ debugfs_create_u32("te_sync_threshold_continue", 0644, te_root,
+ (u32 *)&pinfo->te.sync_threshold_continue);
+ debugfs_create_u32("te_start_pos", 0644, te_root,
+ (u32 *)&pinfo->te.sync_threshold_continue);
+ debugfs_create_u32("te_rd_ptr_irq", 0644, te_root,
+ (u32 *)&pinfo->te.rd_ptr_irq);
+ debugfs_create_u32("te_refx100", 0644, te_root,
+ (u32 *)&pinfo->te.refx100);
+
+ return 0;
+}
+
+int mdss_panel_debugfs_panel_setup(struct mdss_panel_debugfs_info *debugfs_info,
+ struct mdss_panel_info *panel_info, struct dentry *parent)
+{
+ /* create panel info nodes */
+ debugfs_create_u32("xres", 0644, debugfs_info->root,
+ (u32 *)&debugfs_info->panel_info.xres);
+ debugfs_create_u32("yres", 0644, debugfs_info->root,
+ (u32 *)&debugfs_info->panel_info.yres);
+ debugfs_create_u32("dynamic_fps", 0644, debugfs_info->root,
+ (u32 *)&debugfs_info->panel_info.dynamic_fps);
+ debugfs_create_u32("physical_width", 0644, debugfs_info->root,
+ (u32 *)&debugfs_info->panel_info.physical_width);
+ debugfs_create_u32("physical_height", 0644, debugfs_info->root,
+ (u32 *)&debugfs_info->panel_info.physical_height);
+ debugfs_create_u32("min_refresh_rate", 0644, debugfs_info->root,
+ (u32 *)&debugfs_info->panel_info.min_fps);
+ debugfs_create_u32("max_refresh_rate", 0644, debugfs_info->root,
+ (u32 *)&debugfs_info->panel_info.max_fps);
+ debugfs_create_u64("clk_rate", 0644, debugfs_info->root,
+ (u64 *)&debugfs_info->panel_info.clk_rate);
+ debugfs_create_u32("bl_min", 0644, debugfs_info->root,
+ (u32 *)&debugfs_info->panel_info.bl_min);
+ debugfs_create_u32("bl_max", 0644, debugfs_info->root,
+ (u32 *)&debugfs_info->panel_info.bl_max);
+ debugfs_create_u32("brightness_max", 0644, debugfs_info->root,
+ (u32 *)&debugfs_info->panel_info.brightness_max);
+
+ if ((panel_info->type == MIPI_CMD_PANEL) ||
+ (panel_info->type == MIPI_VIDEO_PANEL)) {
+ _create_dsi_panel_nodes(debugfs_info, debugfs_info->root);
+ _create_phy_ctrl_nodes(debugfs_info, debugfs_info->root);
+ }
+
+ debugfs_info->panel_info = *panel_info;
+ return 0;
+}
+
+int mdss_panel_debugfs_setup(struct mdss_panel_info *panel_info, struct dentry
+ *parent, char *intf_str)
+{
+ struct mdss_panel_debugfs_info *debugfs_info;
+
+ debugfs_info = kzalloc(sizeof(*debugfs_info), GFP_KERNEL);
+ if (!debugfs_info)
+ return -ENOMEM;
+
+ debugfs_info->parent = parent;
+ debugfs_info->root = debugfs_create_dir(intf_str, parent);
+ if (IS_ERR_OR_NULL(debugfs_info->root)) {
+ pr_err("Debugfs create dir failed with error: %ld\n",
+ PTR_ERR(debugfs_info->root));
+ kfree(debugfs_info);
+ return -ENODEV;
+ }
+
+ debugfs_create_u32("override_flag", 0644, parent,
+ (u32 *)&debugfs_info->override_flag);
+
+ mdss_panel_debugfs_fbc_setup(debugfs_info, panel_info,
+ debugfs_info->root);
+ mdss_panel_debugfs_panel_setup(debugfs_info, panel_info,
+ debugfs_info->root);
+
+ debugfs_info->override_flag = 0;
+
+ panel_info->debugfs_info = debugfs_info;
+ return 0;
+}
+
+int mdss_panel_debugfs_init(struct mdss_panel_info *panel_info,
+ char const *panel_name)
+{
+ struct mdss_panel_data *pdata;
+ struct dentry *parent;
+ char intf_str[10];
+ int intf_index = 0;
+ int rc = 0;
+
+ if (panel_info->type != MIPI_VIDEO_PANEL
+ && panel_info->type != MIPI_CMD_PANEL)
+ return -ENOTSUPP;
+
+ pdata = container_of(panel_info, struct mdss_panel_data, panel_info);
+ parent = debugfs_create_dir(panel_name, NULL);
+ if (IS_ERR_OR_NULL(parent)) {
+ pr_err("Debugfs create dir failed with error: %ld\n",
+ PTR_ERR(parent));
+ return -ENODEV;
+ }
+
+ do {
+ snprintf(intf_str, sizeof(intf_str), "intf%d", intf_index++);
+ rc = mdss_panel_debugfs_setup(&pdata->panel_info, parent,
+ intf_str);
+ if (rc) {
+ pr_err("error in initilizing panel debugfs\n");
+ mdss_panel_debugfs_cleanup(&pdata->panel_info);
+ return rc;
+ }
+ pdata = pdata->next;
+ } while (pdata && intf_index < NUM_INTF);
+
+ pr_debug("Initilized mdss_panel_debugfs_info\n");
+ return 0;
+}
+
+void mdss_panel_debugfs_cleanup(struct mdss_panel_info *panel_info)
+{
+ struct mdss_panel_data *pdata;
+ struct mdss_panel_debugfs_info *debugfs_info;
+ struct dentry *parent = NULL;
+
+ pdata = container_of(panel_info, struct mdss_panel_data, panel_info);
+ do {
+ debugfs_info = pdata->panel_info.debugfs_info;
+ if (debugfs_info && !parent)
+ parent = debugfs_info->parent;
+ kfree(debugfs_info);
+ pdata = pdata->next;
+ } while (pdata);
+ debugfs_remove_recursive(parent);
+ pr_debug("Cleaned up mdss_panel_debugfs_info\n");
+}
+
+void mdss_panel_override_te_params(struct mdss_panel_info *pinfo)
+{
+ pinfo->te.sync_cfg_height = mdss_panel_get_vtotal(pinfo);
+ pinfo->te.vsync_init_val = pinfo->yres;
+ pinfo->te.start_pos = pinfo->yres;
+ pinfo->te.rd_ptr_irq = pinfo->yres + 1;
+ pr_debug("SW TE override: read_ptr:%d,start_pos:%d,height:%d,init_val:%d\n",
+ pinfo->te.rd_ptr_irq, pinfo->te.start_pos,
+ pinfo->te.sync_cfg_height,
+ pinfo->te.vsync_init_val);
+}
+
+void mdss_panel_debugfsinfo_to_panelinfo(struct mdss_panel_info *panel_info)
+{
+ struct mdss_panel_data *pdata;
+ struct mdss_panel_info *pinfo;
+ struct mdss_panel_debugfs_info *dfs_info;
+
+ pdata = container_of(panel_info, struct mdss_panel_data, panel_info);
+
+ do {
+ pinfo = &pdata->panel_info;
+ dfs_info = pinfo->debugfs_info;
+
+ pinfo->xres = dfs_info->panel_info.xres;
+ pinfo->yres = dfs_info->panel_info.yres;
+ pinfo->dynamic_fps = dfs_info->panel_info.dynamic_fps;
+ pinfo->physical_width = dfs_info->panel_info.physical_width;
+ pinfo->physical_height = dfs_info->panel_info.physical_height;
+ pinfo->min_fps = dfs_info->panel_info.min_fps;
+ pinfo->max_fps = dfs_info->panel_info.max_fps;
+ pinfo->clk_rate = dfs_info->panel_info.clk_rate;
+ pinfo->bl_min = dfs_info->panel_info.bl_min;
+ pinfo->bl_max = dfs_info->panel_info.bl_max;
+ pinfo->brightness_max = dfs_info->panel_info.brightness_max;
+ pinfo->adjust_timer_delay_ms =
+ dfs_info->panel_info.adjust_timer_delay_ms;
+
+ if ((pinfo->type == MIPI_CMD_PANEL) ||
+ (pinfo->type == MIPI_VIDEO_PANEL)) {
+ pinfo->fbc = dfs_info->panel_info.fbc;
+ pinfo->lcdc = dfs_info->panel_info.lcdc;
+ pinfo->mipi = dfs_info->panel_info.mipi;
+ pinfo->te = dfs_info->panel_info.te;
+ pinfo->partial_update_enabled =
+ dfs_info->panel_info.partial_update_enabled;
+ pinfo->partial_update_roi_merge =
+ dfs_info->panel_info.partial_update_roi_merge;
+ pinfo->dcs_cmd_by_left =
+ dfs_info->panel_info.dcs_cmd_by_left;
+ pinfo->ulps_feature_enabled =
+ dfs_info->panel_info.ulps_feature_enabled;
+ pinfo->ulps_suspend_enabled =
+ dfs_info->panel_info.ulps_suspend_enabled;
+ pinfo->esd_check_enabled =
+ dfs_info->panel_info.esd_check_enabled;
+ pinfo->panel_ack_disabled =
+ dfs_info->panel_info.panel_ack_disabled;
+ }
+
+ pinfo->panel_max_vtotal = mdss_panel_get_vtotal(pinfo);
+
+ /* override te parameters if panel is in sw te mode */
+ if (panel_info->sim_panel_mode == SIM_SW_TE_MODE)
+ mdss_panel_override_te_params(panel_info);
+
+ pdata = pdata->next;
+ } while (pdata);
+}
+
+struct mdss_panel_timing *mdss_panel_get_timing_by_name(
+ struct mdss_panel_data *pdata,
+ const char *name)
+{
+ struct mdss_panel_timing *pt;
+
+ if (pdata && name) {
+ list_for_each_entry(pt, &pdata->timings_list, list)
+ if (pt->name && !strcmp(pt->name, name))
+ return pt;
+ }
+
+ return NULL;
+}
+
+void mdss_panel_info_from_timing(struct mdss_panel_timing *pt,
+ struct mdss_panel_info *pinfo)
+{
+ if (!pt || !pinfo)
+ return;
+
+ pinfo->clk_rate = pt->clk_rate;
+ pinfo->xres = pt->xres;
+ pinfo->lcdc.h_front_porch = pt->h_front_porch;
+ pinfo->lcdc.h_back_porch = pt->h_back_porch;
+ pinfo->lcdc.h_pulse_width = pt->h_pulse_width;
+
+ pinfo->yres = pt->yres;
+ pinfo->lcdc.v_front_porch = pt->v_front_porch;
+ pinfo->lcdc.v_back_porch = pt->v_back_porch;
+ pinfo->lcdc.v_pulse_width = pt->v_pulse_width;
+
+ pinfo->lcdc.border_bottom = pt->border_bottom;
+ pinfo->lcdc.border_top = pt->border_top;
+ pinfo->lcdc.border_left = pt->border_left;
+ pinfo->lcdc.border_right = pt->border_right;
+ pinfo->lcdc.xres_pad = pt->border_left + pt->border_right;
+ pinfo->lcdc.yres_pad = pt->border_top + pt->border_bottom;
+
+ pinfo->lm_widths[0] = pt->lm_widths[0];
+ pinfo->lm_widths[1] = pt->lm_widths[1];
+
+ pinfo->mipi.frame_rate = pt->frame_rate;
+ pinfo->edp.frame_rate = pinfo->mipi.frame_rate;
+
+ pinfo->dsc = pt->dsc;
+ pinfo->dsc_enc_total = pt->dsc_enc_total;
+ pinfo->fbc = pt->fbc;
+ pinfo->compression_mode = pt->compression_mode;
+
+ pinfo->roi_alignment = pt->roi_alignment;
+ pinfo->te = pt->te;
+
+ /* override te parameters if panel is in sw te mode */
+ if (pinfo->sim_panel_mode == SIM_SW_TE_MODE)
+ mdss_panel_override_te_params(pinfo);
+}
+
+/*
+ * All the calculations done by this routine only depend on slice_width
+ * and slice_height. They are independent of picture dimesion and dsc_merge.
+ * Thus this function should be called only when slice dimension changes.
+ * Since currently we don't support dynamic slice dimension changes, this
+ * routine shall be called only during probe.
+ */
+void mdss_panel_dsc_parameters_calc(struct dsc_desc *dsc)
+{
+ int bpp, bpc;
+ int mux_words_size;
+ int groups_per_line, groups_total;
+ int min_rate_buffer_size;
+ int hrd_delay;
+ int pre_num_extra_mux_bits, num_extra_mux_bits;
+ int slice_bits;
+ int target_bpp_x16;
+ int data;
+ int final_value, final_scale;
+
+ dsc->rc_model_size = 8192; /* rate_buffer_size */
+ if (dsc->version == 0x11 && dsc->scr_rev == 0x1)
+ dsc->first_line_bpg_offset = 15;
+ else
+ dsc->first_line_bpg_offset = 12;
+ dsc->min_qp_flatness = 3;
+ dsc->max_qp_flatness = 12;
+ dsc->line_buf_depth = 9;
+
+ dsc->edge_factor = 6;
+ dsc->quant_incr_limit0 = 11;
+ dsc->quant_incr_limit1 = 11;
+ dsc->tgt_offset_hi = 3;
+ dsc->tgt_offset_lo = 3;
+
+ dsc->buf_thresh = dsc_rc_buf_thresh;
+ if (dsc->version == 0x11 && dsc->scr_rev == 0x1) {
+ dsc->range_min_qp = dsc_rc_range_min_qp_1_1_scr1;
+ dsc->range_max_qp = dsc_rc_range_max_qp_1_1_scr1;
+ } else {
+ dsc->range_min_qp = dsc_rc_range_min_qp_1_1;
+ dsc->range_max_qp = dsc_rc_range_max_qp_1_1;
+ }
+ dsc->range_bpg_offset = dsc_rc_range_bpg_offset;
+
+ bpp = dsc->bpp;
+ bpc = dsc->bpc;
+
+ if (bpp == 8)
+ dsc->initial_offset = 6144;
+ else
+ dsc->initial_offset = 2048; /* bpp = 12 */
+
+ if (bpc == 8)
+ mux_words_size = 48;
+ else
+ mux_words_size = 64; /* bpc == 12 */
+
+ dsc->slice_last_group_size = 3 - (dsc->slice_width % 3);
+
+ dsc->det_thresh_flatness = 7 + 2*(bpc - 8);
+
+ dsc->initial_xmit_delay = dsc->rc_model_size / (2 * bpp);
+
+ groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3);
+
+ dsc->chunk_size = dsc->slice_width * bpp / 8;
+ if ((dsc->slice_width * bpp) % 8)
+ dsc->chunk_size++;
+
+ /* rbs-min */
+ min_rate_buffer_size = dsc->rc_model_size - dsc->initial_offset +
+ dsc->initial_xmit_delay * bpp +
+ groups_per_line * dsc->first_line_bpg_offset;
+
+ hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, bpp);
+
+ dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay;
+
+ dsc->initial_scale_value = 8 * dsc->rc_model_size /
+ (dsc->rc_model_size - dsc->initial_offset);
+
+ slice_bits = 8 * dsc->chunk_size * dsc->slice_height;
+
+ groups_total = groups_per_line * dsc->slice_height;
+
+ data = dsc->first_line_bpg_offset * 2048;
+
+ dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1));
+
+ pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * bpc + 4) - 2);
+
+ num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
+ ((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
+
+ data = 2048 * (dsc->rc_model_size - dsc->initial_offset
+ + num_extra_mux_bits);
+ dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
+
+ /* bpp * 16 + 0.5 */
+ data = bpp * 16;
+ data *= 2;
+ data++;
+ data /= 2;
+ target_bpp_x16 = data;
+
+ data = (dsc->initial_xmit_delay * target_bpp_x16) / 16;
+ final_value = dsc->rc_model_size - data + num_extra_mux_bits;
+
+ final_scale = 8 * dsc->rc_model_size /
+ (dsc->rc_model_size - final_value);
+
+ dsc->final_offset = final_value;
+
+ data = (final_scale - 9) * (dsc->nfl_bpg_offset +
+ dsc->slice_bpg_offset);
+ dsc->scale_increment_interval = (2048 * dsc->final_offset) / data;
+
+ dsc->scale_decrement_interval = groups_per_line /
+ (dsc->initial_scale_value - 8);
+
+ pr_debug("initial_xmit_delay=%d\n", dsc->initial_xmit_delay);
+ pr_debug("bpg_offset, nfl=%d slice=%d\n",
+ dsc->nfl_bpg_offset, dsc->slice_bpg_offset);
+ pr_debug("groups_per_line=%d chunk_size=%d\n",
+ groups_per_line, dsc->chunk_size);
+ pr_debug("min_rate_buffer_size=%d hrd_delay=%d\n",
+ min_rate_buffer_size, hrd_delay);
+ pr_debug("initial_dec_delay=%d initial_scale_value=%d\n",
+ dsc->initial_dec_delay, dsc->initial_scale_value);
+ pr_debug("slice_bits=%d, groups_total=%d\n", slice_bits, groups_total);
+ pr_debug("first_line_bgp_offset=%d slice_height=%d\n",
+ dsc->first_line_bpg_offset, dsc->slice_height);
+ pr_debug("final_value=%d final_scale=%d\n", final_value, final_scale);
+ pr_debug("sacle_increment_interval=%d scale_decrement_interval=%d\n",
+ dsc->scale_increment_interval, dsc->scale_decrement_interval);
+}
+
+void mdss_panel_dsc_update_pic_dim(struct dsc_desc *dsc,
+ int pic_width, int pic_height)
+{
+ if (!dsc || !pic_width || !pic_height) {
+ pr_err("Error: invalid input. pic_width=%d pic_height=%d\n",
+ pic_width, pic_height);
+ return;
+ }
+
+ if ((pic_width % dsc->slice_width) ||
+ (pic_height % dsc->slice_height)) {
+ pr_err("Error: pic_dim=%dx%d has to be multiple of slice_dim=%dx%d\n",
+ pic_width, pic_height,
+ dsc->slice_width, dsc->slice_height);
+ return;
+ }
+
+ dsc->pic_width = pic_width;
+ dsc->pic_height = pic_height;
+}
+
+void mdss_panel_dsc_initial_line_calc(struct dsc_desc *dsc, int enc_ip_width)
+{
+ int ssm_delay, total_pixels, soft_slice_per_enc;
+
+#define MAX_XMIT_DELAY 512
+ if (!dsc || !enc_ip_width || !dsc->slice_width ||
+ (enc_ip_width < dsc->slice_width) ||
+ (dsc->initial_xmit_delay > MAX_XMIT_DELAY)) {
+ pr_err("Error: invalid input\n");
+ return;
+ }
+#undef MAX_XMIT_DELAY
+
+ soft_slice_per_enc = enc_ip_width / dsc->slice_width;
+ ssm_delay = ((dsc->bpc < 10) ? 84 : 92);
+ total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
+ if (soft_slice_per_enc > 1)
+ total_pixels += (ssm_delay * 3);
+
+ dsc->initial_lines = DIV_ROUND_UP(total_pixels, dsc->slice_width);
+}
+
+void mdss_panel_dsc_pclk_param_calc(struct dsc_desc *dsc, int intf_width)
+{
+ int slice_per_pkt, slice_per_intf;
+ int bytes_in_slice, total_bytes_per_intf;
+
+ if (!dsc || !dsc->slice_width || !dsc->slice_per_pkt ||
+ (intf_width < dsc->slice_width)) {
+ pr_err("Error: invalid input. intf_width=%d slice_width=%d\n",
+ intf_width,
+ dsc ? dsc->slice_width : -1);
+ return;
+ }
+
+ slice_per_pkt = dsc->slice_per_pkt;
+ slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
+
+ /*
+ * If slice_per_pkt is greater than slice_per_intf then default to 1.
+ * This can happen during partial update.
+ */
+ if (slice_per_pkt > slice_per_intf)
+ slice_per_pkt = 1;
+
+ bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bpp, 8);
+ total_bytes_per_intf = bytes_in_slice * slice_per_intf;
+
+ dsc->eol_byte_num = total_bytes_per_intf % 3;
+ dsc->pclk_per_line = DIV_ROUND_UP(total_bytes_per_intf, 3);
+ dsc->bytes_in_slice = bytes_in_slice;
+ dsc->bytes_per_pkt = bytes_in_slice * slice_per_pkt;
+ dsc->pkt_per_line = slice_per_intf / slice_per_pkt;
+
+ pr_debug("slice_per_pkt=%d slice_per_intf=%d bytes_in_slice=%d total_bytes_per_intf=%d\n",
+ slice_per_pkt, slice_per_intf,
+ bytes_in_slice, total_bytes_per_intf);
+}
+
+int mdss_panel_dsc_prepare_pps_buf(struct dsc_desc *dsc, char *buf,
+ int pps_id)
+{
+ char *bp;
+ char data;
+ int i, bpp;
+
+ bp = buf;
+ *bp++ = (dsc->version & 0xff); /* pps0 */
+ *bp++ = (pps_id & 0xff); /* pps1 */
+ bp++; /* pps2, reserved */
+
+ data = dsc->line_buf_depth & 0x0f;
+ data |= ((dsc->bpc & 0xf) << 4);
+ *bp++ = data; /* pps3 */
+
+ bpp = dsc->bpp;
+ bpp <<= 4; /* 4 fraction bits */
+ data = (bpp >> 8);
+ data &= 0x03; /* upper two bits */
+ data |= ((dsc->block_pred_enable & 0x1) << 5);
+ data |= ((dsc->convert_rgb & 0x1) << 4);
+ data |= ((dsc->enable_422 & 0x1) << 3);
+ data |= ((dsc->vbr_enable & 0x1) << 2);
+ *bp++ = data; /* pps4 */
+ *bp++ = (bpp & 0xff); /* pps5 */
+
+ *bp++ = ((dsc->pic_height >> 8) & 0xff); /* pps6 */
+ *bp++ = (dsc->pic_height & 0x0ff); /* pps7 */
+ *bp++ = ((dsc->pic_width >> 8) & 0xff); /* pps8 */
+ *bp++ = (dsc->pic_width & 0x0ff); /* pps9 */
+
+ *bp++ = ((dsc->slice_height >> 8) & 0xff);/* pps10 */
+ *bp++ = (dsc->slice_height & 0x0ff); /* pps11 */
+ *bp++ = ((dsc->slice_width >> 8) & 0xff); /* pps12 */
+ *bp++ = (dsc->slice_width & 0x0ff); /* pps13 */
+
+ *bp++ = ((dsc->chunk_size >> 8) & 0xff);/* pps14 */
+ *bp++ = (dsc->chunk_size & 0x0ff); /* pps15 */
+
+ *bp++ = (dsc->initial_xmit_delay >> 8) & 0x3; /* pps16, bit 0, 1 */
+ *bp++ = (dsc->initial_xmit_delay & 0xff);/* pps17 */
+
+ *bp++ = ((dsc->initial_dec_delay >> 8) & 0xff); /* pps18 */
+ *bp++ = (dsc->initial_dec_delay & 0xff);/* pps19 */
+
+ bp++; /* pps20, reserved */
+
+ *bp++ = (dsc->initial_scale_value & 0x3f); /* pps21 */
+
+ *bp++ = ((dsc->scale_increment_interval >> 8) & 0xff); /* pps22 */
+ *bp++ = (dsc->scale_increment_interval & 0xff); /* pps23 */
+
+ *bp++ = ((dsc->scale_decrement_interval >> 8) & 0xf); /* pps24 */
+ *bp++ = (dsc->scale_decrement_interval & 0x0ff);/* pps25 */
+
+ bp++; /* pps26, reserved */
+
+ *bp++ = (dsc->first_line_bpg_offset & 0x1f);/* pps27 */
+
+ *bp++ = ((dsc->nfl_bpg_offset >> 8) & 0xff);/* pps28 */
+ *bp++ = (dsc->nfl_bpg_offset & 0x0ff); /* pps29 */
+ *bp++ = ((dsc->slice_bpg_offset >> 8) & 0xff);/* pps30 */
+ *bp++ = (dsc->slice_bpg_offset & 0x0ff);/* pps31 */
+
+ *bp++ = ((dsc->initial_offset >> 8) & 0xff);/* pps32 */
+ *bp++ = (dsc->initial_offset & 0x0ff); /* pps33 */
+
+ *bp++ = ((dsc->final_offset >> 8) & 0xff);/* pps34 */
+ *bp++ = (dsc->final_offset & 0x0ff); /* pps35 */
+
+ *bp++ = (dsc->min_qp_flatness & 0x1f); /* pps36 */
+ *bp++ = (dsc->max_qp_flatness & 0x1f); /* pps37 */
+
+ *bp++ = ((dsc->rc_model_size >> 8) & 0xff);/* pps38 */
+ *bp++ = (dsc->rc_model_size & 0x0ff); /* pps39 */
+
+ *bp++ = (dsc->edge_factor & 0x0f); /* pps40 */
+
+ *bp++ = (dsc->quant_incr_limit0 & 0x1f); /* pps41 */
+ *bp++ = (dsc->quant_incr_limit1 & 0x1f); /* pps42 */
+
+ data = ((dsc->tgt_offset_hi & 0xf) << 4);
+ data |= (dsc->tgt_offset_lo & 0x0f);
+ *bp++ = data; /* pps43 */
+
+ for (i = 0; i < 14; i++)
+ *bp++ = (dsc->buf_thresh[i] & 0xff);/* pps44 - pps57 */
+
+ for (i = 0; i < 15; i++) { /* pps58 - pps87 */
+ data = (dsc->range_min_qp[i] & 0x1f); /* 5 bits */
+ data <<= 3;
+ data |= ((dsc->range_max_qp[i] >> 2) & 0x07); /* 3 bits */
+ *bp++ = data;
+ data = (dsc->range_max_qp[i] & 0x03); /* 2 bits */
+ data <<= 6;
+ data |= (dsc->range_bpg_offset[i] & 0x3f); /* 6 bits */
+ *bp++ = data;
+ }
+
+ /* pps88 to pps127 are reserved */
+
+ return DSC_PPS_LEN; /* 128 */
+}
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
new file mode 100644
index 0000000..53db752
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -0,0 +1,1207 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_PANEL_H
+#define MDSS_PANEL_H
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+
+#define KHZ_TO_HZ 1000
+
+/* panel id type */
+struct panel_id {
+ u16 id;
+ u16 type;
+};
+
+enum fps_resolution {
+ FPS_RESOLUTION_DEFAULT,
+ FPS_RESOLUTION_HZ,
+ FPS_RESOLUTION_KHZ,
+};
+
+#define DEFAULT_FRAME_RATE 60
+#define DEFAULT_ROTATOR_FRAME_RATE 120
+#define ROTATOR_LOW_FRAME_RATE 30
+#define MDSS_DSI_RST_SEQ_LEN 10
+/* worst case prefill lines for all chipsets including all vertical blank */
+#define MDSS_MDP_MAX_PREFILL_FETCH 25
+
+#define OVERRIDE_CFG "override"
+#define SIM_PANEL "sim"
+#define SIM_SW_TE_PANEL "sim-swte"
+#define SIM_HW_TE_PANEL "sim-hwte"
+
+/* panel type list */
+#define NO_PANEL 0xffff /* No Panel */
+#define MDDI_PANEL 1 /* MDDI */
+#define EBI2_PANEL 2 /* EBI2 */
+#define LCDC_PANEL 3 /* internal LCDC type */
+#define EXT_MDDI_PANEL 4 /* Ext.MDDI */
+#define TV_PANEL 5 /* TV */
+#define HDMI_PANEL 6 /* HDMI TV */
+#define DTV_PANEL 7 /* DTV */
+#define MIPI_VIDEO_PANEL 8 /* MIPI */
+#define MIPI_CMD_PANEL 9 /* MIPI */
+#define WRITEBACK_PANEL 10 /* Wifi display */
+#define LVDS_PANEL 11 /* LVDS */
+#define EDP_PANEL 12 /* LVDS */
+
+#define DSC_PPS_LEN 128
+
+/* HDR propeties count */
+#define DISPLAY_PRIMARIES_COUNT 8 /* WRGB x and y values*/
+
+static inline const char *mdss_panel2str(u32 panel)
+{
+ static const char * const names[] = {
+#define PANEL_NAME(n)[n ## _PANEL] = __stringify(n)
+ PANEL_NAME(MIPI_VIDEO),
+ PANEL_NAME(MIPI_CMD),
+ PANEL_NAME(EDP),
+ PANEL_NAME(HDMI),
+ PANEL_NAME(DTV),
+ PANEL_NAME(WRITEBACK),
+#undef PANEL_NAME
+ };
+
+ if (panel >= ARRAY_SIZE(names) || !names[panel])
+ return "UNKNOWN";
+
+ return names[panel];
+}
+
+/* panel class */
+enum {
+ DISPLAY_LCD = 0, /* lcd = ebi2/mddi */
+ DISPLAY_LCDC, /* lcdc */
+ DISPLAY_TV, /* TV Out */
+ DISPLAY_EXT_MDDI, /* External MDDI */
+ DISPLAY_WRITEBACK,
+};
+
+/* panel device locaiton */
+enum {
+ DISPLAY_1 = 0, /* attached as first device */
+ DISPLAY_2, /* attached on second device */
+ DISPLAY_3, /* attached on third device */
+ DISPLAY_4, /* attached on fourth device */
+ MAX_PHYS_TARGET_NUM,
+};
+
+enum {
+ MDSS_PANEL_INTF_INVALID = -1,
+ MDSS_PANEL_INTF_DSI,
+ MDSS_PANEL_INTF_EDP,
+ MDSS_PANEL_INTF_HDMI,
+};
+
+enum {
+ MDSS_PANEL_POWER_OFF = 0,
+ MDSS_PANEL_POWER_ON,
+ MDSS_PANEL_POWER_LP1,
+ MDSS_PANEL_POWER_LP2,
+};
+
+enum {
+ MDSS_PANEL_LOW_PERSIST_MODE_OFF = 0,
+ MDSS_PANEL_LOW_PERSIST_MODE_ON,
+};
+
+enum {
+ MODE_GPIO_NOT_VALID = 0,
+ MODE_GPIO_HIGH,
+ MODE_GPIO_LOW,
+};
+
+/*
+ * enum sim_panel_modes - Different panel modes for simulator panels
+ *
+ * @SIM_MODE: Disables all host reads for video mode simulator panels.
+ * @SIM_SW_TE_MODE: Disables all host reads and genereates the SW TE. Used
+ * for cmd mode simulator panels.
+ * @SIM_HW_TE_MODE: Disables all host reads and expects TE from hardware
+ * (terminator card). Used for cmd mode simulator panels.
+ */
+enum {
+ SIM_MODE = 1,
+ SIM_SW_TE_MODE,
+ SIM_HW_TE_MODE,
+};
+
+struct mdss_rect {
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+};
+
+#define MDSS_MAX_PANEL_LEN 256
+#define MDSS_INTF_MAX_NAME_LEN 5
+#define MDSS_DISPLAY_ID_MAX_LEN 16
+struct mdss_panel_intf {
+ char name[MDSS_INTF_MAX_NAME_LEN];
+ int type;
+};
+
+struct mdss_panel_cfg {
+ char arg_cfg[MDSS_MAX_PANEL_LEN + 1];
+ int pan_intf;
+ bool lk_cfg;
+ bool init_done;
+};
+
+#define MDP_INTF_DSI_CMD_FIFO_UNDERFLOW 0x0001
+#define MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW 0x0002
+
+
+enum {
+ MDP_INTF_CALLBACK_DSI_WAIT,
+};
+
+struct mdss_intf_recovery {
+ int (*fxn)(void *ctx, int event);
+ void *data;
+};
+
+/**
+ * enum mdss_intf_events - Different events generated by MDP core
+ *
+ * @MDSS_EVENT_RESET: MDP control path is being (re)initialized.
+ * @MDSS_EVENT_LINK_READY Interface data path inited to ready state.
+ * @MDSS_EVENT_UNBLANK: Sent before first frame update from MDP is
+ * sent to panel.
+ * @MDSS_EVENT_PANEL_ON: After first frame update from MDP.
+ * @MDSS_EVENT_POST_PANEL_ON send 2nd phase panel on commands to panel
+ * @MDSS_EVENT_BLANK: MDP has no contents to display only blank screen
+ * is shown in panel. Sent before panel off.
+ * @MDSS_EVENT_PANEL_OFF: MDP has suspended frame updates, panel should be
+ * completely shutdown after this call.
+ * @MDSS_EVENT_CLOSE: MDP has tore down entire session.
+ * @MDSS_EVENT_SUSPEND: Propagation of power suspend event.
+ * @MDSS_EVENT_RESUME: Propagation of power resume event.
+ * @MDSS_EVENT_CHECK_PARAMS: Event generated when a panel reconfiguration is
+ * requested including when resolution changes.
+ * The event handler receives pointer to
+ * struct mdss_panel_info and should return one of:
+ * - negative if the configuration is invalid
+ * - 0 if there is no panel reconfig needed
+ * - 1 if reconfig is needed to take effect
+ * @MDSS_EVENT_CONT_SPLASH_BEGIN: Special event used to handle transition of
+ * display state from boot loader to panel driver.
+ * The event handler will disable the panel.
+ * @MDSS_EVENT_CONT_SPLASH_FINISH: Special event used to handle transition of
+ * display state from boot loader to panel driver.
+ * The event handler will enable the panel and
+ * vote for the display clocks.
+ * @MDSS_EVENT_PANEL_UPDATE_FPS: Event to update the frame rate of the panel.
+ * @MDSS_EVENT_FB_REGISTERED: Called after fb dev driver has been registered,
+ * panel driver gets ptr to struct fb_info which
+ * holds fb dev information.
+ * @MDSS_EVENT_PANEL_CLK_CTRL: panel clock control
+ * - 0 clock disable
+ * - 1 clock enable
+ * @MDSS_EVENT_DSI_CMDLIST_KOFF: acquire dsi_mdp_busy lock before kickoff.
+ * @MDSS_EVENT_ENABLE_PARTIAL_ROI: Event to update ROI of the panel.
+ * @MDSS_EVENT_DSC_PPS_SEND: Event to send DSC PPS command to panel.
+ * @MDSS_EVENT_DSI_STREAM_SIZE: Event to update DSI controller's stream size
+ * @MDSS_EVENT_DSI_UPDATE_PANEL_DATA: Event to update the dsi driver structures
+ * based on the dsi mode passed as argument.
+ * - 0: update to video mode
+ * - 1: update to command mode
+ * @MDSS_EVENT_REGISTER_RECOVERY_HANDLER: Event to recover the interface in
+ * case there was any errors detected.
+ * @MDSS_EVENT_REGISTER_MDP_CALLBACK: Event to register callback to MDP driver.
+ * @MDSS_EVENT_DSI_PANEL_STATUS: Event to check the panel status
+ * <= 0: panel check fail
+ * > 0: panel check success
+ * @MDSS_EVENT_DSI_DYNAMIC_SWITCH: Send DCS command to panel to initiate
+ * switching panel to new mode
+ * - MIPI_VIDEO_PANEL: switch to video mode
+ * - MIPI_CMD_PANEL: switch to command mode
+ * @MDSS_EVENT_DSI_RECONFIG_CMD: Setup DSI controller in new mode
+ * - MIPI_VIDEO_PANEL: switch to video mode
+ * - MIPI_CMD_PANEL: switch to command mode
+ * @MDSS_EVENT_DSI_RESET_WRITE_PTR: Reset the write pointer coordinates on
+ * the panel.
+ * @MDSS_EVENT_PANEL_TIMING_SWITCH: Panel timing switch is requested.
+ * Argument provided is new panel timing.
+ */
+enum mdss_intf_events {
+ MDSS_EVENT_RESET = 1,
+ MDSS_EVENT_LINK_READY,
+ MDSS_EVENT_UNBLANK,
+ MDSS_EVENT_PANEL_ON,
+ MDSS_EVENT_POST_PANEL_ON,
+ MDSS_EVENT_BLANK,
+ MDSS_EVENT_PANEL_OFF,
+ MDSS_EVENT_CLOSE,
+ MDSS_EVENT_SUSPEND,
+ MDSS_EVENT_RESUME,
+ MDSS_EVENT_CHECK_PARAMS,
+ MDSS_EVENT_CONT_SPLASH_BEGIN,
+ MDSS_EVENT_CONT_SPLASH_FINISH,
+ MDSS_EVENT_PANEL_UPDATE_FPS,
+ MDSS_EVENT_FB_REGISTERED,
+ MDSS_EVENT_PANEL_CLK_CTRL,
+ MDSS_EVENT_DSI_CMDLIST_KOFF,
+ MDSS_EVENT_ENABLE_PARTIAL_ROI,
+ MDSS_EVENT_DSC_PPS_SEND,
+ MDSS_EVENT_DSI_STREAM_SIZE,
+ MDSS_EVENT_DSI_UPDATE_PANEL_DATA,
+ MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+ MDSS_EVENT_REGISTER_MDP_CALLBACK,
+ MDSS_EVENT_DSI_PANEL_STATUS,
+ MDSS_EVENT_DSI_DYNAMIC_SWITCH,
+ MDSS_EVENT_DSI_RECONFIG_CMD,
+ MDSS_EVENT_DSI_RESET_WRITE_PTR,
+ MDSS_EVENT_PANEL_TIMING_SWITCH,
+ MDSS_EVENT_UPDATE_PARAMS,
+ MDSS_EVENT_MAX,
+};
+
+struct lcd_panel_info {
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 h_pulse_width;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 v_pulse_width;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+ u32 border_top;
+ u32 border_bottom;
+ u32 border_left;
+ u32 border_right;
+ /* Pad width */
+ u32 xres_pad;
+ /* Pad height */
+ u32 yres_pad;
+ u32 frame_rate;
+};
+
+
+/* DSI PHY configuration */
+struct mdss_dsi_phy_ctrl {
+ char regulator[7]; /* 8996, 1 * 5 */
+ char timing[12];
+ char ctrl[4];
+ char strength[10]; /* 8996, 2 * 5 */
+ char bistctrl[6];
+ uint32_t pll[21];
+ char lanecfg[45]; /* 8996, 4 * 5 */
+ bool reg_ldo_mode;
+
+ char timing_8996[40];/* 8996, 8 * 5 */
+ char regulator_len;
+ char strength_len;
+ char lanecfg_len;
+};
+
+/**
+ * enum dynamic_mode_switch - Dynamic mode switch methods
+ * @DYNAMIC_MODE_SWITCH_DISABLED: Dynamic mode switch is not supported
+ * @DYNAMIC_MODE_SWITCH_SUSPEND_RESUME: Switch requires panel suspend/resume
+ * @DYNAMIC_MODE_SWITCH_IMMEDIATE: Supports video/cmd mode switch immediately
+ * @DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE: Panel supports display resolution
+ * switch immediately.
+ **/
+enum dynamic_mode_switch {
+ DYNAMIC_MODE_SWITCH_DISABLED = 0,
+ DYNAMIC_MODE_SWITCH_SUSPEND_RESUME,
+ DYNAMIC_MODE_SWITCH_IMMEDIATE,
+ DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE,
+};
+
+/**
+ * enum dynamic_switch_modes - Type of dynamic mode switch to be given as
+ * argument to MDSS_EVENT_DSI_DYNAMIC_SWITCH event
+ * @SWITCH_TO_CMD_MODE: Switch from DSI video mode to command mode
+ * @SWITCH_TO_VIDEO_MODE: Switch from DSI command mode to video mode
+ * @SWITCH_RESOLUTION: Switch only display resolution
+ **/
+enum dynamic_switch_modes {
+ SWITCH_MODE_UNKNOWN = 0,
+ SWITCH_TO_CMD_MODE,
+ SWITCH_TO_VIDEO_MODE,
+ SWITCH_RESOLUTION,
+};
+
+/**
+ * struct mdss_panel_timing - structure for panel timing information
+ * @list: List head ptr to track within panel data timings list
+ * @name: A unique name of this timing that can be used to identify it
+ * @xres: Panel width
+ * @yres: Panel height
+ * @h_back_porch: Horizontal back porch
+ * @h_front_porch: Horizontal front porch
+ * @h_pulse_width: Horizontal pulse width
+ * @hsync_skew: Horizontal sync skew
+ * @v_back_porch: Vertical back porch
+ * @v_front_porch: Vertical front porch
+ * @v_pulse_width: Vertical pulse width
+ * @border_top: Border color on top
+ * @border_bottom: Border color on bottom
+ * @border_left: Border color on left
+ * @border_right: Border color on right
+ * @clk_rate: Pixel clock rate of this panel timing
+ * @frame_rate: Display refresh rate
+ * @fbc: Framebuffer compression parameters for this display timing
+ * @te: Tearcheck parameters for this display timing
+ **/
+struct mipi_panel_info {
+ char boot_mode; /* identify if mode switched from starting mode */
+ char mode; /* video/cmd */
+ char interleave_mode;
+ char crc_check;
+ char ecc_check;
+ char dst_format; /* shared by video and command */
+ char data_lane0;
+ char data_lane1;
+ char data_lane2;
+ char data_lane3;
+ char rgb_swap;
+ char b_sel;
+ char g_sel;
+ char r_sel;
+ char rx_eot_ignore;
+ char tx_eot_append;
+ char t_clk_post; /* 0xc0, DSI_CLKOUT_TIMING_CTRL */
+ char t_clk_pre; /* 0xc0, DSI_CLKOUT_TIMING_CTRL */
+ char vc; /* virtual channel */
+ struct mdss_dsi_phy_ctrl dsi_phy_db;
+ /* video mode */
+ char pulse_mode_hsa_he;
+ char hfp_power_stop;
+ char hbp_power_stop;
+ char hsa_power_stop;
+ char eof_bllp_power_stop;
+ char last_line_interleave_en;
+ char bllp_power_stop;
+ char traffic_mode;
+ char frame_rate;
+ /* command mode */
+ char frame_rate_idle;
+ char interleave_max;
+ char insert_dcs_cmd;
+ char wr_mem_continue;
+ char wr_mem_start;
+ char te_sel;
+ char stream; /* 0 or 1 */
+ char mdp_trigger;
+ char dma_trigger;
+ /* Dynamic Switch Support */
+ enum dynamic_mode_switch dms_mode;
+
+ u32 pixel_packing;
+ u32 dsi_pclk_rate;
+ /* The packet-size should not bet changed */
+ char no_max_pkt_size;
+ /* Clock required during LP commands */
+ bool force_clk_lane_hs;
+
+ char vsync_enable;
+ char hw_vsync_mode;
+
+ char lp11_init;
+ u32 init_delay;
+ u32 post_init_delay;
+ u8 default_lanes;
+};
+
+struct edp_panel_info {
+ char frame_rate; /* fps */
+};
+
+/**
+ * struct dynamic_fps_data - defines dynamic fps related data
+ * @hfp: horizontal front porch
+ * @hbp: horizontal back porch
+ * @hpw: horizontal pulse width
+ * @clk_rate: panel clock rate in HZ
+ * @fps: frames per second
+ */
+struct dynamic_fps_data {
+ u32 hfp;
+ u32 hbp;
+ u32 hpw;
+ u32 clk_rate;
+ u32 fps;
+};
+
+/**
+ * enum dynamic_fps_update - defines fps update modes
+ * @DFPS_SUSPEND_RESUME_MODE: suspend/resume mode
+ * @DFPS_IMMEDIATE_CLK_UPDATE_MODE: update fps using clock
+ * @DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP: update fps using vertical timings
+ * @DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP: update fps using horizontal timings
+ * @DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP: update fps using both horizontal
+ * timings and clock.
+ * @DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK: update fps using both
+ * horizontal timings, clock need to be caculate base on new clock and
+ * porches.
+ * @DFPS_MODE_MAX: defines maximum limit of supported modes.
+ */
+enum dynamic_fps_update {
+ DFPS_SUSPEND_RESUME_MODE,
+ DFPS_IMMEDIATE_CLK_UPDATE_MODE,
+ DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP,
+ DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP,
+ DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP,
+ DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK,
+ DFPS_MODE_MAX
+};
+
+enum lvds_mode {
+ LVDS_SINGLE_CHANNEL_MODE,
+ LVDS_DUAL_CHANNEL_MODE,
+};
+
+struct lvds_panel_info {
+ enum lvds_mode channel_mode;
+ /* Channel swap in dual mode */
+ char channel_swap;
+};
+
+enum {
+ COMPRESSION_NONE,
+ COMPRESSION_DSC,
+ COMPRESSION_FBC
+};
+
+struct dsc_desc {
+ u8 version; /* top 4 bits major and lower 4 bits minor version */
+ u8 scr_rev; /* 8 bit value for dsc scr revision */
+
+ /*
+ * Following parameters can change per frame if partial update is on
+ */
+ int pic_height;
+ int pic_width;
+ int initial_lines;
+
+ /*
+ * Following parameters are used for DSI and not for MDP. They can
+ * change per frame if partial update is enabled.
+ */
+ int pkt_per_line;
+ int bytes_in_slice;
+ int bytes_per_pkt;
+ int eol_byte_num;
+ int pclk_per_line; /* width */
+
+ /*
+ * Following parameters only changes when slice dimensions are changed.
+ */
+ int full_frame_slices; /* denotes number of slice per intf */
+ int slice_height;
+ int slice_width;
+ int chunk_size;
+
+ int slice_last_group_size;
+ int bpp; /* target bits per pixel */
+ int bpc; /* uncompressed bits per component */
+ int line_buf_depth;
+ bool config_by_manufacture_cmd;
+ bool block_pred_enable;
+ int vbr_enable;
+ int enable_422;
+ int convert_rgb;
+ int input_10_bits;
+ int slice_per_pkt;
+
+ int initial_dec_delay;
+ int initial_xmit_delay;
+
+ int initial_scale_value;
+ int scale_decrement_interval;
+ int scale_increment_interval;
+
+ int first_line_bpg_offset;
+ int nfl_bpg_offset;
+ int slice_bpg_offset;
+
+ int initial_offset;
+ int final_offset;
+
+ int rc_model_size; /* rate_buffer_size */
+
+ int det_thresh_flatness;
+ int max_qp_flatness;
+ int min_qp_flatness;
+ int edge_factor;
+ int quant_incr_limit0;
+ int quant_incr_limit1;
+ int tgt_offset_hi;
+ int tgt_offset_lo;
+ u32 *buf_thresh;
+ char *range_min_qp;
+ char *range_max_qp;
+ char *range_bpg_offset;
+};
+
+struct fbc_panel_info {
+ u32 enabled;
+ u32 target_bpp;
+ u32 comp_mode;
+ u32 qerr_enable;
+ u32 cd_bias;
+ u32 pat_enable;
+ u32 vlc_enable;
+ u32 bflc_enable;
+
+ u32 line_x_budget;
+ u32 block_x_budget;
+ u32 block_budget;
+
+ u32 lossless_mode_thd;
+ u32 lossy_mode_thd;
+ u32 lossy_rgb_thd;
+ u32 lossy_mode_idx;
+
+ u32 slice_height;
+ bool pred_mode;
+ bool enc_mode;
+ u32 max_pred_err;
+};
+
+struct mdss_mdp_pp_tear_check {
+ u32 tear_check_en;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u32 wr_ptr_irq;
+ u32 refx100;
+};
+
+struct mdss_panel_roi_alignment {
+ u32 xstart_pix_align;
+ u32 width_pix_align;
+ u32 ystart_pix_align;
+ u32 height_pix_align;
+ u32 min_width;
+ u32 min_height;
+};
+
+struct mdss_panel_hdr_properties {
+ bool hdr_enabled;
+
+ /* WRGB X and y values arrayed in format */
+ /* [WX, WY, RX, RY, GX, GY, BX, BY] */
+ u32 display_primaries[DISPLAY_PRIMARIES_COUNT];
+
+ /* peak brightness supported by panel */
+ u32 peak_brightness;
+ /* Blackness level supported by panel */
+ u32 blackness_level;
+};
+
+struct mdss_panel_info {
+ u32 xres;
+ u32 yres;
+ u32 physical_width;
+ u32 physical_height;
+ u32 bpp;
+ u32 type;
+ u32 wait_cycle;
+ u32 pdest;
+ u32 brightness_max;
+ u32 bl_max;
+ u32 bl_min;
+ u32 fb_num;
+ u64 clk_rate;
+ u32 clk_min;
+ u64 clk_max;
+ u32 mdp_transfer_time_us;
+ u32 frame_count;
+ u32 is_3d_panel;
+ u32 out_format;
+ u32 rst_seq[MDSS_DSI_RST_SEQ_LEN];
+ u32 rst_seq_len;
+ u32 vic; /* video identification code */
+ struct mdss_rect roi;
+ int pwm_pmic_gpio;
+ int pwm_lpg_chan;
+ int pwm_period;
+ bool dynamic_fps;
+ bool ulps_feature_enabled;
+ bool ulps_suspend_enabled;
+ bool panel_ack_disabled;
+ bool esd_check_enabled;
+ bool allow_phy_power_off;
+ char dfps_update;
+ /* new requested fps before it is updated in hw */
+ int new_fps;
+ /* stores initial fps after boot */
+ u32 default_fps;
+ /* stores initial vtotal (vfp-method) or htotal (hfp-method) */
+ u32 saved_total;
+ /* stores initial vfp (vfp-method) or hfp (hfp-method) */
+ u32 saved_fporch;
+ /* current fps, once is programmed in hw */
+ int current_fps;
+
+ int panel_max_fps;
+ int panel_max_vtotal;
+ u32 mode_gpio_state;
+ u32 min_fps;
+ u32 max_fps;
+ u32 prg_fet;
+ struct mdss_panel_roi_alignment roi_alignment;
+
+ u32 cont_splash_enabled;
+ bool esd_rdy;
+ bool partial_update_supported; /* value from dts if pu is supported */
+ bool partial_update_enabled; /* is pu currently allowed */
+ u32 dcs_cmd_by_left;
+ u32 partial_update_roi_merge;
+ struct ion_handle *splash_ihdl;
+ int panel_power_state;
+ int compression_mode;
+
+ uint32_t panel_dead;
+ u32 panel_force_dead;
+ u32 panel_orientation;
+ bool dynamic_switch_pending;
+ bool is_lpm_mode;
+ bool is_split_display; /* two DSIs in one display, pp split or not */
+ bool use_pingpong_split;
+
+ /*
+ * index[0] = left layer mixer, value of 0 not valid
+ * index[1] = right layer mixer, 0 is possible
+ *
+ * Ex(1): 1080x1920 display using single DSI and single lm, [1080 0]
+ * Ex(2): 1440x2560 display using two DSIs and two lms,
+ * each with 720x2560, [720 0]
+ * Ex(3): 1440x2560 display using single DSI w/ compression and
+ * single lm, [1440 0]
+ * Ex(4): 1440x2560 display using single DSI w/ compression and
+ * two lms, [720 720]
+ * Ex(5): 1080x1920 display using single DSI and two lm, [540 540]
+ * Ex(6): 1080x1920 display using single DSI and two lm,
+ * [880 400] - not practical but possible
+ */
+ u32 lm_widths[2];
+
+ bool is_prim_panel;
+ bool is_pluggable;
+ char display_id[MDSS_DISPLAY_ID_MAX_LEN];
+ bool is_cec_supported;
+
+ /* refer sim_panel_modes enum for different modes */
+ u8 sim_panel_mode;
+
+ void *edid_data;
+ void *dba_data;
+ void *cec_data;
+
+ char panel_name[MDSS_MAX_PANEL_LEN];
+ struct mdss_mdp_pp_tear_check te;
+
+ /*
+ * Value of 2 only when single DSI is configured with 2 DSC
+ * encoders. When 2 encoders are used, currently both use
+ * same configuration.
+ */
+ u8 dsc_enc_total; /* max 2 */
+ struct dsc_desc dsc;
+
+ /*
+ * To determine, if DSC panel requires the pps to be sent
+ * before or after the switch, during dynamic resolution switching
+ */
+ bool send_pps_before_switch;
+
+ struct lcd_panel_info lcdc;
+ struct fbc_panel_info fbc;
+ struct mipi_panel_info mipi;
+ struct lvds_panel_info lvds;
+ struct edp_panel_info edp;
+
+ bool is_dba_panel;
+
+ /*
+ * Delay(in ms) to accommodate s/w delay while
+ * configuring the event timer wakeup logic.
+ */
+ u32 adjust_timer_delay_ms;
+
+ /* debugfs structure for the panel */
+ struct mdss_panel_debugfs_info *debugfs_info;
+
+ /* persistence mode on/off */
+ bool persist_mode;
+
+ /* HDR properties of display panel*/
+ struct mdss_panel_hdr_properties hdr_properties;
+};
+
+struct mdss_panel_timing {
+ struct list_head list;
+ const char *name;
+
+ u32 xres;
+ u32 yres;
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 h_pulse_width;
+ u32 hsync_skew;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 v_pulse_width;
+
+ u32 border_top;
+ u32 border_bottom;
+ u32 border_left;
+ u32 border_right;
+
+ u32 lm_widths[2];
+
+ u64 clk_rate;
+ char frame_rate;
+
+ u8 dsc_enc_total;
+ struct dsc_desc dsc;
+ struct fbc_panel_info fbc;
+ u32 compression_mode;
+
+ struct mdss_mdp_pp_tear_check te;
+ struct mdss_panel_roi_alignment roi_alignment;
+};
+
+struct mdss_panel_data {
+ struct mdss_panel_info panel_info;
+ void (*set_backlight)(struct mdss_panel_data *pdata, u32 bl_level);
+ int (*apply_display_setting)(struct mdss_panel_data *pdata, u32 mode);
+ unsigned char *mmss_cc_base;
+
+ /**
+ * event_handler() - callback handler for MDP core events
+ * @pdata: Pointer referring to the panel struct associated to this
+ * event. Can be used to retrieve panel info.
+ * @e: Event being generated, see enum mdss_intf_events
+ * @arg: Optional argument to pass some info from some events.
+ *
+ * Used to register handler to be used to propagate different events
+ * happening in MDP core driver. Panel driver can listen for any of
+ * these events to perform appropriate actions for panel initialization
+ * and teardown.
+ */
+ int (*event_handler)(struct mdss_panel_data *pdata, int e, void *arg);
+ struct device_node *(*get_fb_node)(struct platform_device *pdev);
+ bool (*get_idle)(struct mdss_panel_data *pdata);
+
+ struct list_head timings_list;
+ struct mdss_panel_timing *current_timing;
+ bool active;
+
+ /* To store dsc cfg name passed by bootloader */
+ char dsc_cfg_np_name[MDSS_MAX_PANEL_LEN];
+ struct mdss_panel_data *next;
+
+ int panel_te_gpio;
+ struct completion te_done;
+};
+
+struct mdss_panel_debugfs_info {
+ struct dentry *root;
+ struct dentry *parent;
+ struct mdss_panel_info panel_info;
+ u32 override_flag;
+ struct mdss_panel_debugfs_info *next;
+};
+
+/**
+ * mdss_get_panel_framerate() - get panel frame rate based on panel information
+ * @panel_info: Pointer to panel info containing all panel information
+ */
+static inline u32 mdss_panel_get_framerate(struct mdss_panel_info *panel_info,
+ u32 flags)
+{
+ u32 frame_rate, pixel_total;
+ u64 rate;
+ struct mdss_panel_data *panel_data =
+ container_of(panel_info, typeof(*panel_data),
+ panel_info);
+ bool idle = false;
+
+ if (panel_info == NULL) {
+ frame_rate = DEFAULT_FRAME_RATE;
+ goto end;
+ }
+
+ switch (panel_info->type) {
+ case MIPI_VIDEO_PANEL:
+ case MIPI_CMD_PANEL:
+ frame_rate = panel_info->mipi.frame_rate;
+ if (panel_data->get_idle)
+ idle = panel_data->get_idle(panel_data);
+ if (idle)
+ frame_rate = panel_info->mipi.frame_rate_idle;
+ else
+ frame_rate = panel_info->mipi.frame_rate;
+ break;
+ case EDP_PANEL:
+ frame_rate = panel_info->edp.frame_rate;
+ break;
+ case WRITEBACK_PANEL:
+ frame_rate = DEFAULT_FRAME_RATE;
+ break;
+ case DTV_PANEL:
+ if (panel_info->dynamic_fps) {
+ frame_rate = panel_info->lcdc.frame_rate;
+ break;
+ }
+ default:
+ pixel_total = (panel_info->lcdc.h_back_porch +
+ panel_info->lcdc.h_front_porch +
+ panel_info->lcdc.h_pulse_width +
+ panel_info->xres) *
+ (panel_info->lcdc.v_back_porch +
+ panel_info->lcdc.v_front_porch +
+ panel_info->lcdc.v_pulse_width +
+ panel_info->yres);
+ if (pixel_total) {
+ rate = panel_info->clk_rate * KHZ_TO_HZ;
+ do_div(rate, pixel_total);
+ frame_rate = (u32)rate;
+ } else {
+ frame_rate = DEFAULT_FRAME_RATE;
+ }
+ break;
+ }
+end:
+ if (flags == FPS_RESOLUTION_KHZ) {
+ if (!(frame_rate / KHZ_TO_HZ))
+ frame_rate *= KHZ_TO_HZ;
+ } else if (flags == FPS_RESOLUTION_HZ) {
+ if (frame_rate / KHZ_TO_HZ)
+ frame_rate /= KHZ_TO_HZ;
+ }
+
+ return frame_rate;
+}
+
+/*
+ * mdss_panel_get_vtotal() - return panel vertical height
+ * @pinfo: Pointer to panel info containing all panel information
+ *
+ * Returns the total height of the panel including any blanking regions
+ * which are not visible to user but used to calculate panel pixel clock.
+ */
+static inline int mdss_panel_get_vtotal(struct mdss_panel_info *pinfo)
+{
+ return pinfo->yres + pinfo->lcdc.v_back_porch +
+ pinfo->lcdc.v_front_porch +
+ pinfo->lcdc.v_pulse_width+
+ pinfo->lcdc.border_top +
+ pinfo->lcdc.border_bottom;
+}
+
+/*
+ * mdss_panel_get_htotal() - return panel horizontal width
+ * @pinfo: Pointer to panel info containing all panel information
+ * @compression: true to factor fbc settings, false to ignore.
+ *
+ * Returns the total width of the panel including any blanking regions
+ * which are not visible to user but used for calculations. For certain
+ * usescases where the fbc parameters need to be ignored like bandwidth
+ * calculation, the appropriate flag can be passed.
+ */
+static inline int mdss_panel_get_htotal(struct mdss_panel_info *pinfo, bool
+ compression)
+{
+ struct dsc_desc *dsc = NULL;
+
+ int adj_xres = pinfo->xres + pinfo->lcdc.border_left +
+ pinfo->lcdc.border_right;
+
+ if (compression) {
+ if (pinfo->compression_mode == COMPRESSION_DSC) {
+ dsc = &pinfo->dsc;
+ adj_xres = dsc->pclk_per_line;
+ } else if (pinfo->fbc.enabled) {
+ adj_xres = mult_frac(adj_xres,
+ pinfo->fbc.target_bpp, pinfo->bpp);
+ }
+ }
+
+ return adj_xres + pinfo->lcdc.h_back_porch +
+ pinfo->lcdc.h_front_porch +
+ pinfo->lcdc.h_pulse_width;
+}
+
+static inline bool is_dsc_compression(struct mdss_panel_info *pinfo)
+{
+ if (pinfo)
+ return (pinfo->compression_mode == COMPRESSION_DSC);
+
+ return false;
+}
+
+int mdss_register_panel(struct platform_device *pdev,
+ struct mdss_panel_data *pdata);
+
+/*
+ * mdss_panel_is_power_off: - checks if a panel is off
+ * @panel_power_state: enum identifying the power state to be checked
+ */
+static inline bool mdss_panel_is_power_off(int panel_power_state)
+{
+ return (panel_power_state == MDSS_PANEL_POWER_OFF);
+}
+
+/**
+ * mdss_panel_is_power_on_interactive: - checks if a panel is on and interactive
+ * @panel_power_state: enum identifying the power state to be checked
+ *
+ * This function returns true only is the panel is fully interactive and
+ * opertaing in normal mode.
+ */
+static inline bool mdss_panel_is_power_on_interactive(int panel_power_state)
+{
+ return (panel_power_state == MDSS_PANEL_POWER_ON);
+}
+
+/**
+ * mdss_panel_is_panel_power_on: - checks if a panel is on
+ * @panel_power_state: enum identifying the power state to be checked
+ *
+ * A panel is considered to be on as long as it can accept any commands
+ * or data. Sometimes it is possible to program the panel to be in a low
+ * power non-interactive state. This function returns false only if panel
+ * has explicitly been turned off.
+ */
+static inline bool mdss_panel_is_power_on(int panel_power_state)
+{
+ return !mdss_panel_is_power_off(panel_power_state);
+}
+
+/**
+ * mdss_panel_is_panel_power_on_lp: - checks if a panel is in a low power mode
+ * @pdata: pointer to the panel struct associated to the panel
+ * @panel_power_state: enum identifying the power state to be checked
+ *
+ * This function returns true if the panel is in an intermediate low power
+ * state where it is still on but not fully interactive. It may or may not
+ * accept any commands and display updates.
+ */
+static inline bool mdss_panel_is_power_on_lp(int panel_power_state)
+{
+ return !mdss_panel_is_power_off(panel_power_state) &&
+ !mdss_panel_is_power_on_interactive(panel_power_state);
+}
+
+/**
+ * mdss_panel_is_panel_power_on_ulp: - checks if panel is in
+ * ultra low power mode
+ * @pdata: pointer to the panel struct associated to the panel
+ * @panel_power_state: enum identifying the power state to be checked
+ *
+ * This function returns true if the panel is in a ultra low power
+ * state where it is still on but cannot receive any display updates.
+ */
+static inline bool mdss_panel_is_power_on_ulp(int panel_power_state)
+{
+ return panel_power_state == MDSS_PANEL_POWER_LP2;
+}
+
+/**
+ * mdss_panel_update_clk_rate() - update the clock rate based on panel timing
+ * information.
+ * @panel_info: Pointer to panel info containing all panel information
+ * @fps: frame rate of the panel
+ */
+static inline void mdss_panel_update_clk_rate(struct mdss_panel_info *pinfo,
+ u32 fps)
+{
+ struct lcd_panel_info *lcdc = &pinfo->lcdc;
+ u32 htotal, vtotal;
+
+ if (pinfo->type == DTV_PANEL) {
+ htotal = pinfo->xres + lcdc->h_front_porch +
+ lcdc->h_back_porch + lcdc->h_pulse_width;
+ vtotal = pinfo->yres + lcdc->v_front_porch +
+ lcdc->v_back_porch + lcdc->v_pulse_width;
+
+ pinfo->clk_rate = mult_frac(htotal * vtotal, fps, 1000);
+
+ pr_debug("vtotal %d, htotal %d, rate %llu\n",
+ vtotal, htotal, pinfo->clk_rate);
+ }
+}
+
+/**
+ * mdss_panel_calc_frame_rate() - calculate panel frame rate based
+ * on panel timing information.
+ * @panel_info: Pointer to panel info containing all panel information
+ */
+static inline u8 mdss_panel_calc_frame_rate(struct mdss_panel_info *pinfo)
+{
+ u32 pixel_total = 0;
+ u8 frame_rate = 0;
+ unsigned long pclk_rate = pinfo->mipi.dsi_pclk_rate;
+ u32 xres;
+
+ xres = pinfo->xres;
+ if (pinfo->compression_mode == COMPRESSION_DSC)
+ xres /= 3;
+
+ pixel_total = (pinfo->lcdc.h_back_porch +
+ pinfo->lcdc.h_front_porch +
+ pinfo->lcdc.h_pulse_width +
+ xres) *
+ (pinfo->lcdc.v_back_porch +
+ pinfo->lcdc.v_front_porch +
+ pinfo->lcdc.v_pulse_width +
+ pinfo->yres);
+
+ if (pclk_rate && pixel_total)
+ frame_rate =
+ DIV_ROUND_CLOSEST(pclk_rate, pixel_total);
+ else
+ frame_rate = pinfo->panel_max_fps;
+
+ return frame_rate;
+}
+
+/**
+ * mdss_panel_intf_type: - checks if a given intf type is primary
+ * @intf_val: panel interface type of the individual controller
+ *
+ * Individual controller queries with MDP to check if it is
+ * configured as the primary interface.
+ *
+ * returns a pointer to the configured structure mdss_panel_cfg
+ * to the controller that's configured as the primary panel interface.
+ * returns NULL on error or if @intf_val is not the configured
+ * controller.
+ */
+struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val);
+
+/**
+ * mdss_is_ready() - checks if mdss is probed and ready
+ *
+ * Checks if mdss resources have been initialized
+ *
+ * returns true if mdss is ready, else returns false.
+ */
+bool mdss_is_ready(void);
+int mdss_rect_cmp(struct mdss_rect *rect1, struct mdss_rect *rect2);
+
+/**
+ * mdss_panel_override_te_params() - overrides TE params to enable SW TE
+ * @pinfo: panel info
+ */
+void mdss_panel_override_te_params(struct mdss_panel_info *pinfo);
+
+/**
+ * mdss_panel_dsc_parameters_calc: calculate DSC parameters
+ * @dsc: pointer to DSC structure associated with panel_info
+ */
+void mdss_panel_dsc_parameters_calc(struct dsc_desc *dsc);
+
+/**
+ * mdss_panel_dsc_update_pic_dim: update DSC structure with picture dimension
+ * @dsc: pointer to DSC structure associated with panel_info
+ * @pic_width: Picture width
+ * @pic_height: Picture height
+ */
+void mdss_panel_dsc_update_pic_dim(struct dsc_desc *dsc,
+ int pic_width, int pic_height);
+
+/**
+ * mdss_panel_dsc_initial_line_calc: update DSC initial line buffering
+ * @dsc: pointer to DSC structure associated with panel_info
+ * @enc_ip_width: uncompressed input width for DSC enc represented by @dsc
+ * i.e.
+ * * 720 for full frame single_display_dual_lm: 1440x2560
+ * * 1080 for full frame dual_display_dual_lm: 2160x3840
+ * * 360 for partial frame single_display_dual_lm: 360x2560
+ */
+void mdss_panel_dsc_initial_line_calc(struct dsc_desc *dsc, int enc_ip_width);
+
+/**
+ * mdss_panel_dsc_pclk_param_calc: calculate DSC params related to DSI
+ * @dsc: pointer to DSC structure associated with panel_info
+ * @intf_width: Uncompressed width per interface
+ * i.e.
+ * * 1440 for full frame single_display_dual_lm: 1440x2560
+ * * 1080 for full frame dual_display_dual_lm: 2160x3840
+ * * 720 for partial frame single_display_dual_lm: 720x2560
+ */
+void mdss_panel_dsc_pclk_param_calc(struct dsc_desc *dsc, int intf_width);
+
+/**
+ * mdss_panel_dsc_prepare_pps_buf - prepares Picture Parameter Set to be
+ * sent to panel
+ * @dsc: pointer to DSC structure associated with panel_info
+ * @buf: buffer that holds PPS
+ * @pps_id: pps_identifier
+ *
+ * returns length of the PPS buffer.
+ */
+int mdss_panel_dsc_prepare_pps_buf(struct dsc_desc *dsc, char *buf,
+ int pps_id);
+#ifdef CONFIG_FB_MSM_MDSS
+int mdss_panel_debugfs_init(struct mdss_panel_info *panel_info,
+ char const *panel_name);
+void mdss_panel_debugfs_cleanup(struct mdss_panel_info *panel_info);
+void mdss_panel_debugfsinfo_to_panelinfo(struct mdss_panel_info *panel_info);
+
+/*
+ * mdss_panel_info_from_timing() - populate panel info from panel timing
+ * @pt: pointer to source panel timing
+ * @pinfo: pointer to destination panel info
+ *
+ * Populates relevant data from panel timing into panel info
+ */
+void mdss_panel_info_from_timing(struct mdss_panel_timing *pt,
+ struct mdss_panel_info *pinfo);
+
+/**
+ * mdss_panel_get_timing_by_name() - return panel timing with matching name
+ * @pdata: pointer to panel data struct containing list of panel timings
+ * @name: name of the panel timing to be returned
+ *
+ * Looks through list of timings provided in panel data and returns pointer
+ * to panel timing matching it. If none is found, NULL is returned.
+ */
+struct mdss_panel_timing *mdss_panel_get_timing_by_name(
+ struct mdss_panel_data *pdata,
+ const char *name);
+#else
+static inline int mdss_panel_debugfs_init(
+ struct mdss_panel_info *panel_info,
+ char const *panel_name) { return 0; };
+static inline void mdss_panel_debugfs_cleanup(
+ struct mdss_panel_info *panel_info) { };
+static inline void mdss_panel_debugfsinfo_to_panelinfo(
+ struct mdss_panel_info *panel_info) { };
+static inline void mdss_panel_info_from_timing(struct mdss_panel_timing *pt,
+ struct mdss_panel_info *pinfo) { };
+static inline struct mdss_panel_timing *mdss_panel_get_timing_by_name(
+ struct mdss_panel_data *pdata,
+ const char *name) { return NULL; };
+#endif
+#endif /* MDSS_PANEL_H */
diff --git a/drivers/video/fbdev/msm/mdss_qpic.c b/drivers/video/fbdev/msm/mdss_qpic.c
new file mode 100644
index 0000000..3e0ca75
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_qpic.c
@@ -0,0 +1,820 @@
+/* Copyright (c) 2013-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/bootmem.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk/msm-clk.h>
+
+#include <linux/msm-sps.h>
+#include <linux/msm-bus.h>
+
+#include "mdss_fb.h"
+#include "mdss_qpic.h"
+#include "mdss_qpic_panel.h"
+
+static int mdss_qpic_probe(struct platform_device *pdev);
+static int mdss_qpic_remove(struct platform_device *pdev);
+static void qpic_interrupt_en(u32 en);
+
+struct qpic_data_type *qpic_res;
+
+/* for debugging */
+static u32 use_bam = true;
+static u32 use_irq = true;
+static u32 use_vsync;
+
+static const struct of_device_id mdss_qpic_dt_match[] = {
+ { .compatible = "qcom,mdss_qpic",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdss_qpic_dt_match);
+
+static struct platform_driver mdss_qpic_driver = {
+ .probe = mdss_qpic_probe,
+ .remove = mdss_qpic_remove,
+ .shutdown = NULL,
+ .driver = {
+ /*
+ * Simulate mdp hw
+ */
+ .name = "mdp",
+ .of_match_table = mdss_qpic_dt_match,
+ },
+};
+
+static void mdss_qpic_clk_ctrl(bool enable)
+{
+ if (enable) {
+ if (qpic_res->qpic_clk)
+ clk_prepare_enable(qpic_res->qpic_clk);
+ if (qpic_res->qpic_a_clk)
+ clk_prepare_enable(qpic_res->qpic_a_clk);
+ } else {
+ if (qpic_res->qpic_a_clk)
+ clk_disable_unprepare(qpic_res->qpic_a_clk);
+ if (qpic_res->qpic_clk)
+ clk_disable_unprepare(qpic_res->qpic_clk);
+ }
+}
+
+int qpic_on(struct msm_fb_data_type *mfd)
+{
+ int ret;
+
+ mdss_qpic_clk_ctrl(true);
+
+ ret = mdss_qpic_panel_on(qpic_res->panel_data, &qpic_res->panel_io);
+ qpic_res->qpic_is_on = true;
+ return ret;
+}
+
+int qpic_off(struct msm_fb_data_type *mfd)
+{
+ int ret;
+
+ ret = mdss_qpic_panel_off(qpic_res->panel_data, &qpic_res->panel_io);
+ if (use_irq)
+ qpic_interrupt_en(false);
+
+ mdss_qpic_clk_ctrl(false);
+ qpic_res->qpic_is_on = false;
+
+ return ret;
+}
+
+static int msm_qpic_bus_set_vote(u32 vote)
+{
+ int ret;
+
+ if (!qpic_res->bus_handle)
+ return 0;
+ ret = msm_bus_scale_client_update_request(qpic_res->bus_handle,
+ vote);
+ if (ret)
+ pr_err("msm_bus_scale_client_update_request() failed, bus_handle=0x%x, vote=%d, err=%d\n",
+ qpic_res->bus_handle, vote, ret);
+ return ret;
+}
+
+static void mdss_qpic_pan_display(struct msm_fb_data_type *mfd)
+{
+
+ struct fb_info *fbi;
+ u32 offset, fb_offset, size;
+ int bpp;
+
+ if (!mfd) {
+ pr_err("%s: mfd is NULL!", __func__);
+ return;
+ }
+
+ if (!qpic_res->qpic_is_on) {
+ pr_err("%s: Failed since panel is not ON\n", __func__);
+ return;
+ }
+
+ fbi = mfd->fbi;
+
+ bpp = fbi->var.bits_per_pixel / 8;
+ offset = fbi->var.xoffset * bpp +
+ fbi->var.yoffset * fbi->fix.line_length;
+
+ if (offset > fbi->fix.smem_len) {
+ pr_err("invalid fb offset=%u total length=%u\n",
+ offset, fbi->fix.smem_len);
+ return;
+ }
+ if (use_bam)
+ fb_offset = (u32)fbi->fix.smem_start + offset;
+ else
+ fb_offset = (u32)mfd->fbi->screen_base + offset;
+
+ msm_qpic_bus_set_vote(1);
+ mdss_qpic_panel_on(qpic_res->panel_data, &qpic_res->panel_io);
+ size = fbi->var.xres * fbi->var.yres * bpp;
+
+ qpic_send_frame(0, 0, fbi->var.xres - 1, fbi->var.yres - 1,
+ (u32 *)fb_offset, size);
+ msm_qpic_bus_set_vote(0);
+}
+
+int mdss_qpic_alloc_fb_mem(struct msm_fb_data_type *mfd)
+{
+ size_t size;
+ u32 yres = mfd->fbi->var.yres_virtual;
+
+ size = PAGE_ALIGN(mfd->fbi->fix.line_length * yres);
+
+ if (!qpic_res->res_init)
+ return -EINVAL;
+
+ if (mfd->index != 0) {
+ mfd->fbi->fix.smem_start = 0;
+ mfd->fbi->screen_base = NULL;
+ mfd->fbi->fix.smem_len = 0;
+ mfd->iova = 0;
+ return 0;
+ }
+
+ if (!qpic_res->fb_virt) {
+ qpic_res->fb_virt = (void *)dmam_alloc_coherent(
+ &qpic_res->pdev->dev,
+ size,
+ &qpic_res->fb_phys,
+ GFP_KERNEL);
+ pr_debug("%s size=%d vir_addr=%x phys_addr=%x",
+ __func__, size, (int)qpic_res->fb_virt,
+ (int)qpic_res->fb_phys);
+ if (!qpic_res->fb_virt) {
+ pr_err("%s fb allocation failed", __func__);
+ return -ENOMEM;
+ }
+ }
+
+ if (!qpic_res->cmd_buf_virt) {
+ qpic_res->cmd_buf_virt = dma_alloc_writecombine(
+ NULL, QPIC_MAX_CMD_BUF_SIZE,
+ &qpic_res->cmd_buf_phys, GFP_KERNEL);
+ pr_debug("%s cmd_buf virt=%x phys=%x", __func__,
+ (int)qpic_res->cmd_buf_virt,
+ qpic_res->cmd_buf_phys);
+ if (!qpic_res->cmd_buf_virt) {
+ pr_err("%s cmd buf allocation failed", __func__);
+ return -ENOMEM;
+ }
+ }
+ mfd->fbi->fix.smem_start = qpic_res->fb_phys;
+ mfd->fbi->screen_base = qpic_res->fb_virt;
+ mfd->fbi->fix.smem_len = size;
+ mfd->iova = 0;
+ return 0;
+}
+
+u32 mdss_qpic_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+ return xres * bpp;
+}
+
+int mdss_qpic_overlay_init(struct msm_fb_data_type *mfd)
+{
+ struct msm_mdp_interface *qpic_interface = &mfd->mdp;
+
+ qpic_interface->on_fnc = qpic_on;
+ qpic_interface->off_fnc = qpic_off;
+ qpic_interface->do_histogram = NULL;
+ qpic_interface->cursor_update = NULL;
+ qpic_interface->dma_fnc = mdss_qpic_pan_display;
+ qpic_interface->ioctl_handler = NULL;
+ qpic_interface->kickoff_fnc = NULL;
+ return 0;
+}
+
+int qpic_register_panel(struct mdss_panel_data *pdata)
+{
+ struct platform_device *mdss_fb_dev = NULL;
+ int rc;
+
+ if (!qpic_res)
+ return -ENODEV;
+
+ mdss_fb_dev = platform_device_alloc("mdss_fb", pdata->panel_info.pdest);
+ if (!mdss_fb_dev) {
+ pr_err("unable to allocate mdss_fb device\n");
+ return -ENOMEM;
+ }
+
+ mdss_fb_dev->dev.platform_data = pdata;
+
+ rc = platform_device_add(mdss_fb_dev);
+ if (rc) {
+ platform_device_put(mdss_fb_dev);
+ pr_err("unable to probe mdss_fb device (%d)\n", rc);
+ return rc;
+ }
+
+ qpic_res->panel_data = pdata;
+
+ return rc;
+}
+
+int qpic_init_sps(struct platform_device *pdev,
+ struct qpic_sps_endpt *end_point)
+{
+ int rc = 0;
+ struct sps_pipe *pipe_handle;
+ struct sps_connect *sps_config = &end_point->config;
+ struct sps_register_event *sps_event = &end_point->bam_event;
+ struct sps_bam_props bam = {0};
+ unsigned long bam_handle = 0;
+
+ if (qpic_res->sps_init)
+ return 0;
+ bam.phys_addr = qpic_res->qpic_phys + 0x4000;
+ bam.virt_addr = qpic_res->qpic_base + 0x4000;
+ bam.irq = qpic_res->irq - 4;
+ bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
+
+ rc = sps_phy2h(bam.phys_addr, &bam_handle);
+ if (rc)
+ rc = sps_register_bam_device(&bam, &bam_handle);
+ if (rc) {
+ pr_err("%s bam_handle is NULL", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ pipe_handle = sps_alloc_endpoint();
+ if (!pipe_handle) {
+ pr_err("sps_alloc_endpoint() failed\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = sps_get_config(pipe_handle, sps_config);
+ if (rc) {
+ pr_err("sps_get_config() failed %d\n", rc);
+ goto free_endpoint;
+ }
+
+ /* WRITE CASE: source - system memory; destination - BAM */
+ sps_config->source = SPS_DEV_HANDLE_MEM;
+ sps_config->destination = bam_handle;
+ sps_config->mode = SPS_MODE_DEST;
+ sps_config->dest_pipe_index = 6;
+
+ sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
+ sps_config->lock_group = 0;
+ /*
+ * Descriptor FIFO is a cyclic FIFO. If 64 descriptors
+ * are allowed to be submitted before we get any ack for any of them,
+ * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
+ * sizeof(struct sps_iovec).
+ */
+ sps_config->desc.size = (64) *
+ sizeof(struct sps_iovec);
+ sps_config->desc.base = dmam_alloc_coherent(&pdev->dev,
+ sps_config->desc.size,
+ &sps_config->desc.phys_base,
+ GFP_KERNEL);
+ if (!sps_config->desc.base) {
+ pr_err("dmam_alloc_coherent() failed for size %x\n",
+ sps_config->desc.size);
+ rc = -ENOMEM;
+ goto free_endpoint;
+ }
+ memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+ rc = sps_connect(pipe_handle, sps_config);
+ if (rc) {
+ pr_err("sps_connect() failed %d\n", rc);
+ goto free_endpoint;
+ }
+
+ init_completion(&end_point->completion);
+ sps_event->mode = SPS_TRIGGER_WAIT;
+ sps_event->options = SPS_O_EOT;
+ sps_event->xfer_done = &end_point->completion;
+ sps_event->user = (void *)qpic_res;
+
+ rc = sps_register_event(pipe_handle, sps_event);
+ if (rc) {
+ pr_err("sps_register_event() failed %d\n", rc);
+ goto sps_disconnect;
+ }
+
+ end_point->handle = pipe_handle;
+ qpic_res->sps_init = true;
+ goto out;
+sps_disconnect:
+ sps_disconnect(pipe_handle);
+free_endpoint:
+ sps_free_endpoint(pipe_handle);
+out:
+ return rc;
+}
+
+void mdss_qpic_reset(void)
+{
+ u32 time_end;
+
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_RESET, 1 << 0);
+ /* wait 100 us after reset as suggested by hw */
+ usleep_range(100, 110);
+ time_end = (u32)ktime_to_ms(ktime_get()) +
+ QPIC_MAX_VSYNC_WAIT_TIME;
+ while (((QPIC_INP(QPIC_REG_QPIC_LCDC_STTS) & (1 << 8)) == 0)) {
+ if ((u32)ktime_to_ms(ktime_get()) > time_end) {
+ pr_err("%s reset not finished", __func__);
+ break;
+ }
+ /* yield 100 us for next polling by experiment*/
+ usleep_range(100, 110);
+ }
+}
+
+static void qpic_interrupt_en(u32 en)
+{
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+ if (en) {
+ if (!qpic_res->irq_ena) {
+ init_completion(&qpic_res->fifo_eof_comp);
+ qpic_res->irq_ena = true;
+ enable_irq(qpic_res->irq);
+ }
+ } else {
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
+ disable_irq(qpic_res->irq);
+ qpic_res->irq_ena = false;
+ }
+}
+
+static irqreturn_t qpic_irq_handler(int irq, void *ptr)
+{
+ u32 data;
+
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
+
+ if (data & ((1 << 2) | (1 << 4)))
+ complete(&qpic_res->fifo_eof_comp);
+ return IRQ_HANDLED;
+}
+
+static int qpic_send_pkt_bam(u32 cmd, u32 len, u8 *param)
+{
+ int ret = 0;
+ u32 phys_addr, cfg2, block_len, flags;
+
+ if ((cmd != OP_WRITE_MEMORY_START) &&
+ (cmd != OP_WRITE_MEMORY_CONTINUE)) {
+ memcpy((u8 *)qpic_res->cmd_buf_virt, param, len);
+ phys_addr = qpic_res->cmd_buf_phys;
+ } else {
+ phys_addr = (u32)param;
+ }
+ cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+ cfg2 &= ~0xFF;
+ cfg2 |= cmd;
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+ block_len = 0x7FF0;
+ while (len > 0) {
+ if (len <= 0x7FF0) {
+ flags = SPS_IOVEC_FLAG_EOT;
+ block_len = len;
+ } else {
+ flags = 0;
+ }
+ ret = sps_transfer_one(qpic_res->qpic_endpt.handle,
+ phys_addr, block_len, NULL, flags);
+ if (ret)
+ pr_err("failed to submit command %x ret %d\n",
+ cmd, ret);
+ phys_addr += block_len;
+ len -= block_len;
+ }
+ ret = wait_for_completion_timeout(
+ &qpic_res->qpic_endpt.completion,
+ msecs_to_jiffies(100 * 4));
+ if (ret <= 0)
+ pr_err("%s timeout %x", __func__, ret);
+ else
+ ret = 0;
+ return ret;
+}
+
+void qpic_dump_reg(void)
+{
+ pr_info("%s\n", __func__);
+ pr_info("QPIC_REG_QPIC_LCDC_CTRL = %x\n",
+ QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL));
+ pr_info("QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT = %x\n",
+ QPIC_INP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT));
+ pr_info("QPIC_REG_QPIC_LCDC_CFG0 = %x\n",
+ QPIC_INP(QPIC_REG_QPIC_LCDC_CFG0));
+ pr_info("QPIC_REG_QPIC_LCDC_CFG1 = %x\n",
+ QPIC_INP(QPIC_REG_QPIC_LCDC_CFG1));
+ pr_info("QPIC_REG_QPIC_LCDC_CFG2 = %x\n",
+ QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2));
+ pr_info("QPIC_REG_QPIC_LCDC_IRQ_EN = %x\n",
+ QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_EN));
+ pr_info("QPIC_REG_QPIC_LCDC_IRQ_STTS = %x\n",
+ QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS));
+ pr_info("QPIC_REG_QPIC_LCDC_STTS = %x\n",
+ QPIC_INP(QPIC_REG_QPIC_LCDC_STTS));
+ pr_info("QPIC_REG_QPIC_LCDC_FIFO_SOF = %x\n",
+ QPIC_INP(QPIC_REG_QPIC_LCDC_FIFO_SOF));
+}
+
+static int qpic_wait_for_fifo(void)
+{
+ u32 data, time_end;
+ int ret = 0;
+
+ if (use_irq) {
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_STTS);
+ data &= 0x3F;
+ if (data == 0)
+ return ret;
+ reinit_completion(&qpic_res->fifo_eof_comp);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, (1 << 4));
+ ret = wait_for_completion_timeout(&qpic_res->fifo_eof_comp,
+ msecs_to_jiffies(QPIC_MAX_VSYNC_WAIT_TIME));
+ if (ret > 0) {
+ ret = 0;
+ } else {
+ pr_err("%s timeout %x\n", __func__, ret);
+ ret = -ETIMEDOUT;
+ }
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
+ } else {
+ time_end = (u32)ktime_to_ms(ktime_get()) +
+ QPIC_MAX_VSYNC_WAIT_TIME;
+ while (1) {
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_STTS);
+ data &= 0x3F;
+ if (data == 0)
+ break;
+ /* yield 10 us for next polling by experiment*/
+ usleep_range(10, 11);
+ if (ktime_to_ms(ktime_get()) > time_end) {
+ pr_err("%s time out", __func__);
+ ret = -EBUSY;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+static int qpic_wait_for_eof(void)
+{
+ u32 data, time_end;
+ int ret = 0;
+
+ if (use_irq) {
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
+ if (data & (1 << 2))
+ return ret;
+ reinit_completion(&qpic_res->fifo_eof_comp);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, (1 << 2));
+ ret = wait_for_completion_timeout(&qpic_res->fifo_eof_comp,
+ msecs_to_jiffies(QPIC_MAX_VSYNC_WAIT_TIME));
+ if (ret > 0) {
+ ret = 0;
+ } else {
+ pr_err("%s timeout %x\n", __func__, ret);
+ ret = -ETIMEDOUT;
+ }
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
+ } else {
+ time_end = (u32)ktime_to_ms(ktime_get()) +
+ QPIC_MAX_VSYNC_WAIT_TIME;
+ while (1) {
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
+ if (data & (1 << 2))
+ break;
+ /* yield 10 us for next polling by experiment*/
+ usleep_range(10, 11);
+ if (ktime_to_ms(ktime_get()) > time_end) {
+ pr_err("%s wait for eof time out\n", __func__);
+ qpic_dump_reg();
+ ret = -EBUSY;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+static int qpic_send_pkt_sw(u32 cmd, u32 len, u8 *param)
+{
+ u32 bytes_left, space, data, cfg2;
+ int i, ret = 0;
+
+ if (len <= 4) {
+ len = (len + 3) / 4; /* len in dwords */
+ data = 0;
+ if (param) {
+ for (i = 0; i < len; i++)
+ data |= (u32)param[i] << (8 * i);
+ }
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT, len);
+ QPIC_OUTP(QPIC_REG_LCD_DEVICE_CMD0 + (4 * cmd), data);
+ return 0;
+ }
+
+ if ((len & 0x1) != 0) {
+ pr_debug("%s: number of bytes needs be even", __func__);
+ len = (len + 1) & (~0x1);
+ }
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT, 0);
+ cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+ if ((cmd != OP_WRITE_MEMORY_START) &&
+ (cmd != OP_WRITE_MEMORY_CONTINUE))
+ cfg2 |= (1 << 24); /* transparent mode */
+ else
+ cfg2 &= ~(1 << 24);
+
+ cfg2 &= ~0xFF;
+ cfg2 |= cmd;
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_SOF, 0x0);
+ bytes_left = len;
+
+ while (bytes_left > 0) {
+ ret = qpic_wait_for_fifo();
+ if (ret)
+ goto exit_send_cmd_sw;
+
+ space = 16;
+
+ while ((space > 0) && (bytes_left > 0)) {
+ /* write to fifo */
+ if (bytes_left >= 4) {
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
+ *(u32 *)param);
+ param += 4;
+ bytes_left -= 4;
+ space--;
+ } else if (bytes_left == 2) {
+ QPIC_OUTPW(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
+ *(u16 *)param);
+ bytes_left -= 2;
+ }
+ }
+ }
+ /* finished */
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_EOF, 0x0);
+ ret = qpic_wait_for_eof();
+exit_send_cmd_sw:
+ cfg2 &= ~(1 << 24);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+ return ret;
+}
+
+int qpic_send_pkt(u32 cmd, u8 *param, u32 len)
+{
+ if (!use_bam || ((cmd != OP_WRITE_MEMORY_CONTINUE) &&
+ (cmd != OP_WRITE_MEMORY_START)))
+ return qpic_send_pkt_sw(cmd, len, param);
+ else
+ return qpic_send_pkt_bam(cmd, len, param);
+}
+
+int mdss_qpic_init(void)
+{
+ int ret = 0;
+ u32 data;
+
+ mdss_qpic_reset();
+
+ pr_info("%s version=%x", __func__, QPIC_INP(QPIC_REG_LCDC_VERSION));
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+ /* clear vsync wait , bam mode = 0*/
+ data &= ~(3 << 0);
+ data &= ~(0x1f << 3);
+ data |= (1 << 3); /* threshold */
+ data |= (1 << 8); /* lcd_en */
+ data &= ~(0x1f << 9);
+ data |= (1 << 9); /* threshold */
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+
+ if (use_irq && (!qpic_res->irq_requested)) {
+ ret = devm_request_irq(&qpic_res->pdev->dev,
+ qpic_res->irq, qpic_irq_handler,
+ IRQF_DISABLED, "QPIC", qpic_res);
+ if (ret) {
+ pr_err("qpic request_irq() failed!\n");
+ use_irq = false;
+ } else {
+ disable_irq(qpic_res->irq);
+ }
+ qpic_res->irq_requested = true;
+ }
+
+ qpic_interrupt_en(use_irq);
+
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG0, 0x02108501);
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+ data &= ~(0xFFF);
+ data |= 0x200; /* XRGB */
+ data |= 0x2C;
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, data);
+
+ if (use_bam) {
+ qpic_init_sps(qpic_res->pdev, &qpic_res->qpic_endpt);
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+ data |= (1 << 1);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+ }
+ /* TE enable */
+ if (use_vsync) {
+ data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+ data |= (1 << 0);
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+ }
+
+ return ret;
+}
+
+u32 qpic_read_data(u32 cmd_index, u32 size)
+{
+ u32 data = 0;
+
+ if (size <= 4) {
+ QPIC_OUTP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT, size);
+ data = QPIC_INP(QPIC_REG_LCD_DEVICE_CMD0 + (cmd_index * 4));
+ }
+ return data;
+}
+
+static int msm_qpic_bus_register(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct msm_bus_scale_pdata *use_cases;
+
+ use_cases = msm_bus_cl_get_pdata(pdev);
+ if (!use_cases) {
+ pr_err("msm_bus_cl_get_pdata failed\n");
+ return -EINVAL;
+ }
+ qpic_res->bus_handle =
+ msm_bus_scale_register_client(use_cases);
+ if (!qpic_res->bus_handle) {
+ ret = -EINVAL;
+ pr_err("msm_bus_scale_register_client failed\n");
+ }
+ return ret;
+}
+
+static int mdss_qpic_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int rc = 0;
+ static struct msm_mdp_interface qpic_interface = {
+ .init_fnc = mdss_qpic_overlay_init,
+ .fb_mem_alloc_fnc = mdss_qpic_alloc_fb_mem,
+ .fb_stride = mdss_qpic_fb_stride,
+ };
+
+
+ if (!pdev->dev.of_node) {
+ pr_err("qpic driver only supports device tree probe\n");
+ return -ENOTSUPP;
+ }
+
+ if (!qpic_res)
+ qpic_res = devm_kzalloc(&pdev->dev,
+ sizeof(*qpic_res), GFP_KERNEL);
+
+ if (!qpic_res)
+ return -ENOMEM;
+
+ if (qpic_res->res_init) {
+ pr_err("qpic already initialized\n");
+ return -EINVAL;
+ }
+
+ pdev->id = 0;
+
+ qpic_res->pdev = pdev;
+ platform_set_drvdata(pdev, qpic_res);
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qpic_base");
+ if (!res) {
+ pr_err("unable to get QPIC reg base address\n");
+ rc = -ENOMEM;
+ goto probe_done;
+ }
+
+ qpic_res->qpic_reg_size = resource_size(res);
+ qpic_res->qpic_base = devm_ioremap(&pdev->dev, res->start,
+ qpic_res->qpic_reg_size);
+ if (unlikely(!qpic_res->qpic_base)) {
+ pr_err("unable to map MDSS QPIC base\n");
+ rc = -ENOMEM;
+ goto probe_done;
+ }
+ qpic_res->qpic_phys = res->start;
+ pr_info("MDSS QPIC HW Base phy_Address=0x%x virt=0x%x\n",
+ (int) res->start,
+ (int) qpic_res->qpic_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ pr_err("unable to get QPIC irq\n");
+ rc = -ENOMEM;
+ goto probe_done;
+ }
+
+ qpic_res->qpic_a_clk = clk_get(&pdev->dev, "core_a_clk");
+ if (IS_ERR(qpic_res->qpic_a_clk))
+ pr_err("%s: Can't find core_a_clk", __func__);
+
+ qpic_res->qpic_clk = clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(qpic_res->qpic_clk))
+ pr_err("%s: Can't find core_clk", __func__);
+
+ qpic_res->irq = res->start;
+ qpic_res->res_init = true;
+
+ mdss_qpic_panel_io_init(pdev, &qpic_res->panel_io);
+
+ rc = mdss_fb_register_mdp_instance(&qpic_interface);
+ if (rc)
+ pr_err("unable to register QPIC instance\n");
+
+ msm_qpic_bus_register(pdev);
+probe_done:
+ return rc;
+}
+
+static int mdss_qpic_remove(struct platform_device *pdev)
+{
+ if (qpic_res->bus_handle)
+ msm_bus_scale_unregister_client(qpic_res->bus_handle);
+ qpic_res->bus_handle = 0;
+ return 0;
+}
+
+static int __init mdss_qpic_driver_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mdss_qpic_driver);
+ if (ret)
+ pr_err("mdss_qpic_register_driver() failed!\n");
+ return ret;
+}
+
+module_init(mdss_qpic_driver_init);
+
+
diff --git a/drivers/video/fbdev/msm/mdss_qpic.h b/drivers/video/fbdev/msm/mdss_qpic.h
new file mode 100644
index 0000000..5fc7f9d
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_qpic.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_QPIC_H
+#define MDSS_QPIC_H
+
+#include <linux/list.h>
+#include <linux/msm-sps.h>
+
+#include <linux/pinctrl/consumer.h>
+#include "mdss_panel.h"
+#include "mdss_qpic_panel.h"
+
+#define QPIC_REG_QPIC_LCDC_CTRL 0x22000
+#define QPIC_REG_LCDC_VERSION 0x22004
+#define QPIC_REG_QPIC_LCDC_IRQ_EN 0x22008
+#define QPIC_REG_QPIC_LCDC_IRQ_STTS 0x2200C
+#define QPIC_REG_QPIC_LCDC_IRQ_CLR 0x22010
+#define QPIC_REG_QPIC_LCDC_STTS 0x22014
+#define QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT 0x22018
+#define QPIC_REG_QPIC_LCDC_CFG0 0x22020
+#define QPIC_REG_QPIC_LCDC_CFG1 0x22024
+#define QPIC_REG_QPIC_LCDC_CFG2 0x22028
+#define QPIC_REG_QPIC_LCDC_RESET 0x2202C
+#define QPIC_REG_QPIC_LCDC_FIFO_SOF 0x22100
+#define QPIC_REG_LCD_DEVICE_CMD0 0x23000
+#define QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0 0x22140
+#define QPIC_REG_QPIC_LCDC_FIFO_EOF 0x22180
+
+#define QPIC_OUTP(off, data) \
+ writel_relaxed((data), qpic_res->qpic_base + (off))
+#define QPIC_OUTPW(off, data) \
+ writew_relaxed((data), qpic_res->qpic_base + (off))
+#define QPIC_INP(off) \
+ readl_relaxed(qpic_res->qpic_base + (off))
+
+#define QPIC_MAX_VSYNC_WAIT_TIME 500
+#define QPIC_MAX_CMD_BUF_SIZE 512
+
+int mdss_qpic_init(void);
+int qpic_send_pkt(u32 cmd, u8 *param, u32 len);
+u32 qpic_read_data(u32 cmd_index, u32 size);
+u32 msm_qpic_get_bam_hdl(struct sps_bam_props *bam);
+int mdss_qpic_panel_on(struct mdss_panel_data *pdata,
+ struct qpic_panel_io_desc *panel_io);
+int mdss_qpic_panel_off(struct mdss_panel_data *pdata,
+ struct qpic_panel_io_desc *panel_io);
+int qpic_register_panel(struct mdss_panel_data *pdata);
+
+/* Structure that defines an SPS end point for a BAM pipe. */
+struct qpic_sps_endpt {
+ struct sps_pipe *handle;
+ struct sps_connect config;
+ struct sps_register_event bam_event;
+ struct completion completion;
+};
+
+struct qpic_data_type {
+ u32 rev;
+ struct platform_device *pdev;
+ size_t qpic_reg_size;
+ u32 qpic_phys;
+ char __iomem *qpic_base;
+ u32 irq;
+ u32 irq_ena;
+ u32 res_init;
+ void *fb_virt;
+ u32 fb_phys;
+ void *cmd_buf_virt;
+ u32 cmd_buf_phys;
+ struct qpic_sps_endpt qpic_endpt;
+ u32 sps_init;
+ u32 irq_requested;
+ struct mdss_panel_data *panel_data;
+ struct qpic_panel_io_desc panel_io;
+ u32 bus_handle;
+ struct completion fifo_eof_comp;
+ u32 qpic_is_on;
+ struct clk *qpic_clk;
+ struct clk *qpic_a_clk;
+};
+
+u32 qpic_send_frame(
+ u32 x_start,
+ u32 y_start,
+ u32 x_end,
+ u32 y_end,
+ u32 *data,
+ u32 total_bytes);
+
+u32 qpic_panel_get_framerate(void);
+
+#endif /* MDSS_QPIC_H */
diff --git a/drivers/video/fbdev/msm/mdss_qpic_panel.c b/drivers/video/fbdev/msm/mdss_qpic_panel.c
new file mode 100644
index 0000000..3637716
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_qpic_panel.c
@@ -0,0 +1,305 @@
+/* Copyright (c) 2014-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+
+#include <linux/msm-sps.h>
+
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_qpic.h"
+#include "mdss_qpic_panel.h"
+
+static u32 panel_is_on;
+static u32 panel_refresh_rate;
+
+static int (*qpic_panel_on)(struct qpic_panel_io_desc *qpic_panel_io);
+static void (*qpic_panel_off)(struct qpic_panel_io_desc *qpic_panel_io);
+
+static int mdss_qpic_pinctrl_init(struct platform_device *pdev,
+ struct qpic_panel_io_desc *qpic_panel_io);
+
+u32 qpic_panel_get_framerate(void)
+{
+ return panel_refresh_rate;
+}
+
+/* write a frame of pixels to a MIPI screen */
+u32 qpic_send_frame(u32 x_start,
+ u32 y_start,
+ u32 x_end,
+ u32 y_end,
+ u32 *data,
+ u32 total_bytes)
+{
+ u8 param[4];
+ u32 status;
+ u32 start_0_7;
+ u32 end_0_7;
+ u32 start_8_15;
+ u32 end_8_15;
+
+ /* convert to 16 bit representation */
+ x_start = x_start & 0xffff;
+ y_start = y_start & 0xffff;
+ x_end = x_end & 0xffff;
+ y_end = y_end & 0xffff;
+
+ /* set column/page */
+ start_0_7 = x_start & 0xff;
+ end_0_7 = x_end & 0xff;
+ start_8_15 = (x_start >> 8) & 0xff;
+ end_8_15 = (x_end >> 8) & 0xff;
+ param[0] = start_8_15;
+ param[1] = start_0_7;
+ param[2] = end_8_15;
+ param[3] = end_0_7;
+ status = qpic_send_pkt(OP_SET_COLUMN_ADDRESS, param, 4);
+ if (status) {
+ pr_err("Failed to set column address");
+ return status;
+ }
+
+ start_0_7 = y_start & 0xff;
+ end_0_7 = y_end & 0xff;
+ start_8_15 = (y_start >> 8) & 0xff;
+ end_8_15 = (y_end >> 8) & 0xff;
+ param[0] = start_8_15;
+ param[1] = start_0_7;
+ param[2] = end_8_15;
+ param[3] = end_0_7;
+ status = qpic_send_pkt(OP_SET_PAGE_ADDRESS, param, 4);
+ if (status) {
+ pr_err("Failed to set page address");
+ return status;
+ }
+
+ status = qpic_send_pkt(OP_WRITE_MEMORY_START, (u8 *)data, total_bytes);
+ if (status) {
+ pr_err("Failed to start memory write");
+ return status;
+ }
+ return 0;
+}
+
+static int mdss_qpic_pinctrl_init(struct platform_device *pdev,
+ struct qpic_panel_io_desc *qpic_panel_io)
+{
+ qpic_panel_io->pin_res.pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR_OR_NULL(qpic_panel_io->pin_res.pinctrl)) {
+ pr_err("%s: failed to get pinctrl\n", __func__);
+ return PTR_ERR(qpic_panel_io->pin_res.pinctrl);
+ }
+
+ qpic_panel_io->pin_res.gpio_state_active
+ = pinctrl_lookup_state(qpic_panel_io->pin_res.pinctrl,
+ MDSS_PINCTRL_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(qpic_panel_io->pin_res.gpio_state_active))
+ pr_warn("%s: cannot get default pinstate\n", __func__);
+
+ qpic_panel_io->pin_res.gpio_state_suspend
+ = pinctrl_lookup_state(qpic_panel_io->pin_res.pinctrl,
+ MDSS_PINCTRL_STATE_SLEEP);
+ if (IS_ERR_OR_NULL(qpic_panel_io->pin_res.gpio_state_suspend))
+ pr_warn("%s: cannot get sleep pinstate\n", __func__);
+
+ return 0;
+}
+
+int mdss_qpic_panel_on(struct mdss_panel_data *pdata,
+ struct qpic_panel_io_desc *panel_io)
+{
+ int rc = 0;
+
+ if (panel_is_on)
+ return 0;
+ mdss_qpic_init();
+
+ if (qpic_panel_on)
+ rc = qpic_panel_on(panel_io);
+ if (rc)
+ return rc;
+ panel_is_on = true;
+ return 0;
+}
+
+int mdss_qpic_panel_off(struct mdss_panel_data *pdata,
+ struct qpic_panel_io_desc *panel_io)
+{
+ if (qpic_panel_off)
+ qpic_panel_off(panel_io);
+ panel_is_on = false;
+ return 0;
+}
+
+int mdss_qpic_panel_io_init(struct platform_device *pdev,
+ struct qpic_panel_io_desc *qpic_panel_io)
+{
+ int rc = 0;
+ struct device_node *np = pdev->dev.of_node;
+ int rst_gpio, cs_gpio, te_gpio, ad8_gpio, bl_gpio;
+ struct regulator *vdd_vreg;
+ struct regulator *avdd_vreg;
+
+ rc = mdss_qpic_pinctrl_init(pdev, qpic_panel_io);
+ if (rc)
+ pr_warn("%s: failed to get pin resources\n", __func__);
+
+ rst_gpio = of_get_named_gpio(np, "qcom,rst-gpio", 0);
+ cs_gpio = of_get_named_gpio(np, "qcom,cs-gpio", 0);
+ ad8_gpio = of_get_named_gpio(np, "qcom,ad8-gpio", 0);
+ te_gpio = of_get_named_gpio(np, "qcom,te-gpio", 0);
+ bl_gpio = of_get_named_gpio(np, "qcom,bl-gpio", 0);
+
+ if (!gpio_is_valid(rst_gpio))
+ pr_warn("%s: reset gpio not specified\n", __func__);
+ else
+ qpic_panel_io->rst_gpio = rst_gpio;
+
+ if (!gpio_is_valid(cs_gpio))
+ pr_warn("%s: cs gpio not specified\n", __func__);
+ else
+ qpic_panel_io->cs_gpio = cs_gpio;
+
+ if (!gpio_is_valid(ad8_gpio))
+ pr_warn("%s: ad8 gpio not specified\n", __func__);
+ else
+ qpic_panel_io->ad8_gpio = ad8_gpio;
+
+ if (!gpio_is_valid(te_gpio))
+ pr_warn("%s: te gpio not specified\n", __func__);
+ else
+ qpic_panel_io->te_gpio = te_gpio;
+
+ if (!gpio_is_valid(bl_gpio))
+ pr_warn("%s: te gpio not specified\n", __func__);
+ else
+ qpic_panel_io->bl_gpio = bl_gpio;
+
+ vdd_vreg = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(vdd_vreg))
+ pr_err("%s could not get vdd,", __func__);
+ else
+ qpic_panel_io->vdd_vreg = vdd_vreg;
+
+ avdd_vreg = devm_regulator_get(&pdev->dev, "avdd");
+ if (IS_ERR(avdd_vreg))
+ pr_err("%s could not get avdd,", __func__);
+ else
+ qpic_panel_io->avdd_vreg = avdd_vreg;
+
+ return 0;
+}
+
+static int mdss_panel_parse_dt(struct platform_device *pdev,
+ struct mdss_panel_data *panel_data)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 res[6], tmp;
+ int rc;
+
+ rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2);
+ if (rc) {
+ pr_err("%s:%d, panel resolution not specified\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ pr_debug("panel res %d %d\n", res[0], res[1]);
+ panel_data->panel_info.xres = (!rc ? res[0] : 320);
+ panel_data->panel_info.yres = (!rc ? res[1] : 480);
+ rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp);
+ if (rc) {
+ pr_err("%s:%d, panel bpp not specified\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ pr_debug("panel bpp %d\n", tmp);
+ panel_data->panel_info.bpp = (!rc ? tmp : 18);
+ of_property_read_u32(np, "qcom,refresh_rate", &panel_refresh_rate);
+
+ panel_data->panel_info.type = EBI2_PANEL;
+ panel_data->panel_info.pdest = DISPLAY_1;
+
+ return rc;
+}
+
+static int mdss_qpic_panel_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ static struct mdss_panel_data vendor_pdata;
+ static const char *panel_name;
+
+ pr_debug("%s:%d, debug info id=%d", __func__, __LINE__, pdev->id);
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ panel_name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!panel_name)
+ pr_info("%s:%d, panel name not specified\n",
+ __func__, __LINE__);
+ else
+ pr_info("%s: Panel Name = %s\n", __func__, panel_name);
+
+ rc = mdss_panel_parse_dt(pdev, &vendor_pdata);
+ if (rc)
+ return rc;
+
+ /* select panel according to label */
+ if (panel_name && !strcmp(panel_name, "ili qvga lcdc panel")) {
+ qpic_panel_on = ili9341_on;
+ qpic_panel_off = ili9341_off;
+ } else {
+ /* select default panel driver */
+ pr_info("%s: select default panel driver\n", __func__);
+ qpic_panel_on = ili9341_on;
+ qpic_panel_off = ili9341_off;
+ }
+
+
+ rc = qpic_register_panel(&vendor_pdata);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static const struct of_device_id mdss_qpic_panel_match[] = {
+ {.compatible = "qcom,mdss-qpic-panel"},
+ {}
+};
+
+static struct platform_driver this_driver = {
+ .probe = mdss_qpic_panel_probe,
+ .driver = {
+ .name = "qpic_panel",
+ .of_match_table = mdss_qpic_panel_match,
+ },
+};
+
+static int __init mdss_qpic_panel_init(void)
+{
+ return platform_driver_register(&this_driver);
+}
+MODULE_DEVICE_TABLE(of, mdss_qpic_panel_match);
+module_init(mdss_qpic_panel_init);
diff --git a/drivers/video/fbdev/msm/mdss_qpic_panel.h b/drivers/video/fbdev/msm/mdss_qpic_panel.h
new file mode 100644
index 0000000..e570d76
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_qpic_panel.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_QPIC_PANEL_H
+#define MDSS_QPIC_PANEL_H
+
+#include <linux/list.h>
+#include <linux/msm-sps.h>
+
+#include "mdss_panel.h"
+
+#define LCDC_INTERNAL_BUFFER_SIZE 30
+
+/**
+ * Macros for coding MIPI commands
+ */
+#define INV_SIZE 0xFFFF
+/* Size of argument to MIPI command is variable */
+#define OP_SIZE_PAIR(op, size) ((op<<16) | size)
+/* MIPI {command, argument size} tuple */
+#define LCDC_EXTRACT_OP_SIZE(op_identifier) ((op_identifier&0xFFFF))
+/* extract size from command identifier */
+#define LCDC_EXTRACT_OP_CMD(op_identifier) (((op_identifier>>16)&0xFFFF))
+/* extract command id from command identifier */
+
+
+/* MIPI standard efinitions */
+#define LCDC_ADDRESS_MODE_ORDER_BOTTOM_TO_TOP 0x80
+#define LCDC_ADDRESS_MODE_ORDER_RIGHT_TO_LEFT 0x40
+#define LCDC_ADDRESS_MODE_ORDER_REVERSE 0x20
+#define LCDC_ADDRESS_MODE_ORDER_REFRESH_BOTTOM_TO_TOP 0x10
+#define LCDC_ADDRESS_MODE_ORDER_BGER_RGB 0x08
+#define LCDC_ADDRESS_MODE_ORDER_REFERESH_RIGHT_TO_LEFT 0x04
+#define LCDC_ADDRESS_MODE_FLIP_HORIZONTAL 0x02
+#define LCDC_ADDRESS_MODE_FLIP_VERTICAL 0x01
+
+#define LCDC_PIXEL_FORMAT_3_BITS_PER_PIXEL 0x1
+#define LCDC_PIXEL_FORMAT_8_BITS_PER_PIXEL 0x2
+#define LCDC_PIXEL_FORMAT_12_BITS_PER_PIXEL 0x3
+#define LCDC_PIXEL_FORMAT_16_BITS_PER_PIXEL 0x5
+#define LCDC_PIXEL_FORMAT_18_BITS_PER_PIXEL 0x6
+#define LCDC_PIXEL_FORMAT_24_BITS_PER_PIXEL 0x7
+
+#define LCDC_CREATE_PIXEL_FORMAT(dpi_format, dbi_format) \
+ (dpi_format | (dpi_format<<4))
+
+#define POWER_MODE_IDLE_ON 0x40
+#define POWER_MODE_PARTIAL_ON 0x20
+#define POWER_MODE_SLEEP_ON 0x10
+#define POWER_MODE_NORMAL_ON 0x08
+#define POWER_MODE_DISPLAY_ON 0x04
+
+#define LCDC_DISPLAY_MODE_SCROLLING_ON 0x80
+#define LCDC_DISPLAY_MODE_INVERSION_ON 0x20
+#define LCDC_DISPLAY_MODE_GAMMA_MASK 0x07
+
+/**
+ * LDCc MIPI Type B supported commands
+ */
+#define OP_ENTER_IDLE_MODE 0x39
+#define OP_ENTER_INVERT_MODE 0x21
+#define OP_ENTER_NORMAL_MODE 0x13
+#define OP_ENTER_PARTIAL_MODE 0x12
+#define OP_ENTER_SLEEP_MODE 0x10
+#define OP_EXIT_INVERT_MODE 0x20
+#define OP_EXIT_SLEEP_MODE 0x11
+#define OP_EXIT_IDLE_MODE 0x38
+#define OP_GET_ADDRESS_MODE 0x0B /* size 1 */
+#define OP_GET_BLUE_CHANNEL 0x08 /* size 1 */
+#define OP_GET_DIAGNOSTIC 0x0F /* size 2 */
+#define OP_GET_DISPLAY_MODE 0x0D /* size 1 */
+#define OP_GET_GREEN_CHANNEL 0x07 /* size 1 */
+#define OP_GET_PIXEL_FORMAT 0x0C /* size 1 */
+#define OP_GET_POWER_MODE 0x0A /* size 1 */
+#define OP_GET_RED_CHANNEL 0x06 /* size 1 */
+#define OP_GET_SCANLINE 0x45 /* size 1 */
+#define OP_GET_SIGNAL_MODE 0x0E /* size 1 */
+#define OP_NOP 0x00
+#define OP_READ_DDB_CONTINUE 0xA8 /* size not fixed */
+#define OP_READ_DDB_START 0xA1 /* size not fixed */
+#define OP_READ_MEMORY_CONTINUE 0x3E /* size not fixed */
+#define OP_READ_MEMORY_START 0x2E /* size not fixed */
+#define OP_SET_ADDRESS_MODE 0x36 /* size 1 */
+#define OP_SET_COLUMN_ADDRESS 0x2A /* size 4 */
+#define OP_SET_DISPLAY_OFF 0x28
+#define OP_SET_DISPLAY_ON 0x29
+#define OP_SET_GAMMA_CURVE 0x26 /* size 1 */
+#define OP_SET_PAGE_ADDRESS 0x2B /* size 4 */
+#define OP_SET_PARTIAL_COLUMNS 0x31 /* size 4 */
+#define OP_SET_PARTIAL_ROWS 0x30 /* size 4 */
+#define OP_SET_PIXEL_FORMAT 0x3A /* size 1 */
+#define OP_SOFT_RESET 0x01
+#define OP_WRITE_MEMORY_CONTINUE 0x3C /* size not fixed */
+#define OP_WRITE_MEMORY_START 0x2C /* size not fixed */
+
+/**
+ * ILI9341 commands
+ */
+#define OP_ILI9341_INTERFACE_CONTROL 0xf6
+#define OP_ILI9341_TEARING_EFFECT_LINE_ON 0x35
+
+struct qpic_pinctrl_res {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+};
+
+struct qpic_panel_io_desc {
+ int rst_gpio;
+ int cs_gpio;
+ int ad8_gpio;
+ int te_gpio;
+ int bl_gpio;
+ struct regulator *vdd_vreg;
+ struct regulator *avdd_vreg;
+ u32 init;
+ struct qpic_pinctrl_res pin_res;
+};
+
+int mdss_qpic_panel_io_init(struct platform_device *pdev,
+ struct qpic_panel_io_desc *qpic_panel_io);
+u32 qpic_panel_get_cmd(u32 command, u32 size);
+int ili9341_on(struct qpic_panel_io_desc *qpic_panel_io);
+void ili9341_off(struct qpic_panel_io_desc *qpic_panel_io);
+
+#endif /* MDSS_QPIC_PANEL_H */
diff --git a/drivers/video/fbdev/msm/mdss_rotator.c b/drivers/video/fbdev/msm/mdss_rotator.c
new file mode 100644
index 0000000..2dc9a1f
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_rotator.c
@@ -0,0 +1,3025 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/regulator/consumer.h>
+
+#include "mdss_rotator_internal.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+#include "mdss_sync.h"
+
+/* waiting for hw time out, 3 vsync for 30fps*/
+#define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
+
+/* acquire fence time out, following other driver fence time out practice */
+#define ROT_FENCE_WAIT_TIMEOUT MSEC_PER_SEC
+/*
+ * Max rotator hw blocks possible. Used for upper array limits instead of
+ * alloc and freeing small array
+ */
+#define ROT_MAX_HW_BLOCKS 2
+
+#define ROT_CHECK_BOUNDS(offset, size, max_size) \
+ (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+
+#define CLASS_NAME "rotator"
+#define DRIVER_NAME "mdss_rotator"
+
+#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_AMPSS_M0, \
+ .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+#define BUS_VOTE_19_MHZ 153600000
+
+static struct msm_bus_vectors rot_reg_bus_vectors[] = {
+ MDP_REG_BUS_VECTOR_ENTRY(0, 0),
+ MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
+};
+static struct msm_bus_paths rot_reg_bus_usecases[ARRAY_SIZE(
+ rot_reg_bus_vectors)];
+static struct msm_bus_scale_pdata rot_reg_bus_scale_table = {
+ .usecase = rot_reg_bus_usecases,
+ .num_usecases = ARRAY_SIZE(rot_reg_bus_usecases),
+ .name = "mdss_rot_reg",
+ .active_only = 1,
+};
+
+static struct mdss_rot_mgr *rot_mgr;
+static void mdss_rotator_wq_handler(struct work_struct *work);
+
+static int mdss_rotator_bus_scale_set_quota(struct mdss_rot_bus_data_type *bus,
+ u64 quota)
+{
+ int new_uc_idx;
+ int ret;
+
+ if (bus->bus_hdl < 1) {
+ pr_err("invalid bus handle %d\n", bus->bus_hdl);
+ return -EINVAL;
+ }
+
+ if (bus->curr_quota_val == quota) {
+ pr_debug("bw request already requested\n");
+ return 0;
+ }
+
+ if (!quota) {
+ new_uc_idx = 0;
+ } else {
+ struct msm_bus_vectors *vect = NULL;
+ struct msm_bus_scale_pdata *bw_table =
+ bus->bus_scale_pdata;
+ u64 port_quota = quota;
+ u32 total_axi_port_cnt;
+ int i;
+
+ new_uc_idx = (bus->curr_bw_uc_idx %
+ (bw_table->num_usecases - 1)) + 1;
+
+ total_axi_port_cnt = bw_table->usecase[new_uc_idx].num_paths;
+ if (total_axi_port_cnt == 0) {
+ pr_err("Number of bw paths is 0\n");
+ return -ENODEV;
+ }
+ do_div(port_quota, total_axi_port_cnt);
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ vect = &bw_table->usecase[new_uc_idx].vectors[i];
+ vect->ab = port_quota;
+ vect->ib = 0;
+ }
+ }
+ bus->curr_bw_uc_idx = new_uc_idx;
+ bus->curr_quota_val = quota;
+
+ pr_debug("uc_idx=%d quota=%llu\n", new_uc_idx, quota);
+ MDSS_XLOG(new_uc_idx, ((quota >> 32) & 0xFFFFFFFF),
+ (quota & 0xFFFFFFFF));
+ ATRACE_BEGIN("msm_bus_scale_req_rot");
+ ret = msm_bus_scale_client_update_request(bus->bus_hdl,
+ new_uc_idx);
+ ATRACE_END("msm_bus_scale_req_rot");
+ return ret;
+}
+
+static int mdss_rotator_enable_reg_bus(struct mdss_rot_mgr *mgr, u64 quota)
+{
+ int ret = 0, changed = 0;
+ u32 usecase_ndx = 0;
+
+ if (!mgr || !mgr->reg_bus.bus_hdl)
+ return 0;
+
+ if (quota)
+ usecase_ndx = 1;
+
+ if (usecase_ndx != mgr->reg_bus.curr_bw_uc_idx) {
+ mgr->reg_bus.curr_bw_uc_idx = usecase_ndx;
+ changed++;
+ }
+
+ pr_debug("%s, changed=%d register bus %s\n", __func__, changed,
+ quota ? "Enable":"Disable");
+
+ if (changed) {
+ ATRACE_BEGIN("msm_bus_scale_req_rot_reg");
+ ret = msm_bus_scale_client_update_request(mgr->reg_bus.bus_hdl,
+ usecase_ndx);
+ ATRACE_END("msm_bus_scale_req_rot_reg");
+ }
+
+ return ret;
+}
+
+/*
+ * Clock rate of all open sessions working a particular hw block
+ * are added together to get the required rate for that hw block.
+ * The max of each hw block becomes the final clock rate voted for
+ */
+static unsigned long mdss_rotator_clk_rate_calc(
+ struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private)
+{
+ struct mdss_rot_perf *perf;
+ unsigned long clk_rate[ROT_MAX_HW_BLOCKS] = {0};
+ unsigned long total_clk_rate = 0;
+ int i, wb_idx;
+
+ mutex_lock(&private->perf_lock);
+ list_for_each_entry(perf, &private->perf_list, list) {
+ bool rate_accounted_for = false;
+
+ mutex_lock(&perf->work_dis_lock);
+ /*
+ * If there is one session that has two work items across
+ * different hw blocks rate is accounted for in both blocks.
+ */
+ for (i = 0; i < mgr->queue_count; i++) {
+ if (perf->work_distribution[i]) {
+ clk_rate[i] += perf->clk_rate;
+ rate_accounted_for = true;
+ }
+ }
+
+ /*
+ * Sessions that are open but not distributed on any hw block
+ * Still need to be accounted for. Rate is added to last known
+ * wb idx.
+ */
+ wb_idx = perf->last_wb_idx;
+ if ((!rate_accounted_for) && (wb_idx >= 0) &&
+ (wb_idx < mgr->queue_count))
+ clk_rate[wb_idx] += perf->clk_rate;
+ mutex_unlock(&perf->work_dis_lock);
+ }
+ mutex_unlock(&private->perf_lock);
+
+ for (i = 0; i < mgr->queue_count; i++)
+ total_clk_rate = max(clk_rate[i], total_clk_rate);
+
+ pr_debug("Total clk rate calc=%lu\n", total_clk_rate);
+ return total_clk_rate;
+}
+
+static struct clk *mdss_rotator_get_clk(struct mdss_rot_mgr *mgr, u32 clk_idx)
+{
+ if (clk_idx >= MDSS_CLK_ROTATOR_END_IDX) {
+ pr_err("Invalid clk index:%u", clk_idx);
+ return NULL;
+ }
+
+ return mgr->rot_clk[clk_idx];
+}
+
+static void mdss_rotator_set_clk_rate(struct mdss_rot_mgr *mgr,
+ unsigned long rate, u32 clk_idx)
+{
+ unsigned long clk_rate;
+ struct clk *clk = mdss_rotator_get_clk(mgr, clk_idx);
+ int ret;
+
+ if (clk) {
+ mutex_lock(&mgr->clk_lock);
+ clk_rate = clk_round_rate(clk, rate);
+ if (IS_ERR_VALUE(clk_rate)) {
+ pr_err("unable to round rate err=%ld\n", clk_rate);
+ } else if (clk_rate != clk_get_rate(clk)) {
+ ret = clk_set_rate(clk, clk_rate);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ pr_err("clk_set_rate failed, err:%d\n", ret);
+ } else {
+ pr_debug("rotator clk rate=%lu\n", clk_rate);
+ MDSS_XLOG(clk_rate);
+ }
+ }
+ mutex_unlock(&mgr->clk_lock);
+ } else {
+ pr_err("rotator clk not setup properly\n");
+ }
+}
+
+static void mdss_rotator_footswitch_ctrl(struct mdss_rot_mgr *mgr, bool on)
+{
+ int ret;
+
+ if (mgr->regulator_enable == on) {
+ pr_err("Regulators already in selected mode on=%d\n", on);
+ return;
+ }
+
+ pr_debug("%s: rotator regulators", on ? "Enable" : "Disable");
+ ret = msm_mdss_enable_vreg(mgr->module_power.vreg_config,
+ mgr->module_power.num_vreg, on);
+ if (ret) {
+ pr_warn("Rotator regulator failed to %s\n",
+ on ? "enable" : "disable");
+ return;
+ }
+
+ mgr->regulator_enable = on;
+}
+
+static int mdss_rotator_clk_ctrl(struct mdss_rot_mgr *mgr, int enable)
+{
+ struct clk *clk;
+ int ret = 0;
+ int i, changed = 0;
+
+ mutex_lock(&mgr->clk_lock);
+ if (enable) {
+ if (mgr->rot_enable_clk_cnt == 0)
+ changed++;
+ mgr->rot_enable_clk_cnt++;
+ } else {
+ if (mgr->rot_enable_clk_cnt) {
+ mgr->rot_enable_clk_cnt--;
+ if (mgr->rot_enable_clk_cnt == 0)
+ changed++;
+ } else {
+ pr_err("Can not be turned off\n");
+ }
+ }
+
+ if (changed) {
+ pr_debug("Rotator clk %s\n", enable ? "enable" : "disable");
+ for (i = 0; i < MDSS_CLK_ROTATOR_END_IDX; i++) {
+ clk = mgr->rot_clk[i];
+ if (enable) {
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("enable failed clk_idx %d\n", i);
+ goto error;
+ }
+ } else {
+ clk_disable_unprepare(clk);
+ }
+ }
+ mutex_lock(&mgr->bus_lock);
+ if (enable) {
+ /* Active+Sleep */
+ msm_bus_scale_client_update_context(
+ mgr->data_bus.bus_hdl, false,
+ mgr->data_bus.curr_bw_uc_idx);
+ trace_rotator_bw_ao_as_context(0);
+ } else {
+ /* Active Only */
+ msm_bus_scale_client_update_context(
+ mgr->data_bus.bus_hdl, true,
+ mgr->data_bus.curr_bw_uc_idx);
+ trace_rotator_bw_ao_as_context(1);
+ }
+ mutex_unlock(&mgr->bus_lock);
+ }
+ mutex_unlock(&mgr->clk_lock);
+
+ return ret;
+error:
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(mgr->rot_clk[i]);
+ mutex_unlock(&mgr->clk_lock);
+ return ret;
+}
+
+int mdss_rotator_resource_ctrl(struct mdss_rot_mgr *mgr, int enable)
+{
+ int changed = 0;
+ int ret = 0;
+
+ mutex_lock(&mgr->clk_lock);
+ if (enable) {
+ if (mgr->res_ref_cnt == 0)
+ changed++;
+ mgr->res_ref_cnt++;
+ } else {
+ if (mgr->res_ref_cnt) {
+ mgr->res_ref_cnt--;
+ if (mgr->res_ref_cnt == 0)
+ changed++;
+ } else {
+ pr_err("Rot resource already off\n");
+ }
+ }
+
+ pr_debug("%s: res_cnt=%d changed=%d enable=%d\n",
+ __func__, mgr->res_ref_cnt, changed, enable);
+ MDSS_XLOG(mgr->res_ref_cnt, changed, enable);
+
+ if (changed) {
+ if (enable)
+ mdss_rotator_footswitch_ctrl(mgr, true);
+ else
+ mdss_rotator_footswitch_ctrl(mgr, false);
+ }
+ mutex_unlock(&mgr->clk_lock);
+ return ret;
+}
+
+/* caller is expected to hold perf->work_dis_lock lock */
+static bool mdss_rotator_is_work_pending(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_perf *perf)
+{
+ int i;
+
+ for (i = 0; i < mgr->queue_count; i++) {
+ if (perf->work_distribution[i]) {
+ pr_debug("Work is still scheduled to complete\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+static int mdss_rotator_create_fence(struct mdss_rot_entry *entry)
+{
+ int ret = 0, fd;
+ u32 val;
+ struct mdss_fence *fence;
+ struct mdss_rot_timeline *rot_timeline;
+
+ if (!entry->queue)
+ return -EINVAL;
+
+ rot_timeline = &entry->queue->timeline;
+
+ mutex_lock(&rot_timeline->lock);
+ val = 1;
+
+ fence = mdss_get_sync_fence(rot_timeline->timeline,
+ rot_timeline->fence_name, NULL, val);
+ if (fence == NULL) {
+ pr_err("cannot create sync point\n");
+ goto sync_pt_create_err;
+ }
+ fd = mdss_get_sync_fence_fd(fence);
+ if (fd < 0) {
+ pr_err("get_unused_fd_flags failed error:0x%x\n", fd);
+ ret = fd;
+ goto get_fd_err;
+ }
+
+ rot_timeline->next_value++;
+ mutex_unlock(&rot_timeline->lock);
+
+ entry->output_fence_fd = fd;
+ entry->output_fence = fence;
+ pr_debug("output sync point created at %s:val=%u\n",
+ mdss_get_sync_fence_name(fence), val);
+
+ return 0;
+
+get_fd_err:
+ mdss_put_sync_fence(fence);
+sync_pt_create_err:
+ mutex_unlock(&rot_timeline->lock);
+ return ret;
+}
+
+static void mdss_rotator_clear_fence(struct mdss_rot_entry *entry)
+{
+ struct mdss_rot_timeline *rot_timeline;
+
+ if (entry->input_fence) {
+ mdss_put_sync_fence(entry->input_fence);
+ entry->input_fence = NULL;
+ }
+
+ rot_timeline = &entry->queue->timeline;
+
+ /* fence failed to copy to user space */
+ if (entry->output_fence) {
+ mdss_put_sync_fence(entry->output_fence);
+ entry->output_fence = NULL;
+ put_unused_fd(entry->output_fence_fd);
+
+ mutex_lock(&rot_timeline->lock);
+ rot_timeline->next_value--;
+ mutex_unlock(&rot_timeline->lock);
+ }
+}
+
+static int mdss_rotator_signal_output(struct mdss_rot_entry *entry)
+{
+ struct mdss_rot_timeline *rot_timeline;
+
+ if (!entry->queue)
+ return -EINVAL;
+
+ rot_timeline = &entry->queue->timeline;
+
+ if (entry->output_signaled) {
+ pr_debug("output already signaled\n");
+ return 0;
+ }
+
+ mutex_lock(&rot_timeline->lock);
+ mdss_inc_timeline(rot_timeline->timeline, 1);
+ mutex_unlock(&rot_timeline->lock);
+
+ entry->output_signaled = true;
+
+ return 0;
+}
+
+static int mdss_rotator_wait_for_input(struct mdss_rot_entry *entry)
+{
+ int ret;
+
+ if (!entry->input_fence) {
+ pr_debug("invalid input fence, no wait\n");
+ return 0;
+ }
+
+ ret = mdss_wait_sync_fence(entry->input_fence, ROT_FENCE_WAIT_TIMEOUT);
+ mdss_put_sync_fence(entry->input_fence);
+ entry->input_fence = NULL;
+ return ret;
+}
+
+static int mdss_rotator_import_buffer(struct mdp_layer_buffer *buffer,
+ struct mdss_mdp_data *data, u32 flags, struct device *dev, bool input)
+{
+ int i, ret = 0;
+ struct msmfb_data planes[MAX_PLANES];
+ int dir = DMA_TO_DEVICE;
+
+ if (!input)
+ dir = DMA_FROM_DEVICE;
+
+ memset(planes, 0, sizeof(planes));
+
+ if (buffer->plane_count > MAX_PLANES) {
+ pr_err("buffer plane_count exceeds MAX_PLANES limit:%d\n",
+ buffer->plane_count);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < buffer->plane_count; i++) {
+ planes[i].memory_id = buffer->planes[i].fd;
+ planes[i].offset = buffer->planes[i].offset;
+ }
+
+ ret = mdss_mdp_data_get_and_validate_size(data, planes,
+ buffer->plane_count, flags, dev, true, dir, buffer);
+ data->state = MDP_BUF_STATE_READY;
+ data->last_alloc = local_clock();
+
+ return ret;
+}
+
+static int mdss_rotator_map_and_check_data(struct mdss_rot_entry *entry)
+{
+ int ret;
+ struct mdp_layer_buffer *input;
+ struct mdp_layer_buffer *output;
+ struct mdss_mdp_format_params *fmt;
+ struct mdss_mdp_plane_sizes ps;
+ bool rotation;
+
+ input = &entry->item.input;
+ output = &entry->item.output;
+
+ rotation = (entry->item.flags & MDP_ROTATION_90) ? true : false;
+
+ ATRACE_BEGIN(__func__);
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ ATRACE_END(__func__);
+ return ret;
+ }
+
+ /* if error during map, the caller will release the data */
+ entry->src_buf.state = MDP_BUF_STATE_ACTIVE;
+ ret = mdss_mdp_data_map(&entry->src_buf, true, DMA_TO_DEVICE);
+ if (ret) {
+ pr_err("source buffer mapping failed ret:%d\n", ret);
+ goto end;
+ }
+
+ entry->dst_buf.state = MDP_BUF_STATE_ACTIVE;
+ ret = mdss_mdp_data_map(&entry->dst_buf, true, DMA_FROM_DEVICE);
+ if (ret) {
+ pr_err("destination buffer mapping failed ret:%d\n", ret);
+ goto end;
+ }
+
+ fmt = mdss_mdp_get_format_params(input->format);
+ if (!fmt) {
+ pr_err("invalid input format:%d\n", input->format);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = mdss_mdp_get_plane_sizes(
+ fmt, input->width, input->height, &ps, 0, rotation);
+ if (ret) {
+ pr_err("fail to get input plane size ret=%d\n", ret);
+ goto end;
+ }
+
+ ret = mdss_mdp_data_check(&entry->src_buf, &ps, fmt);
+ if (ret) {
+ pr_err("fail to check input data ret=%d\n", ret);
+ goto end;
+ }
+
+ fmt = mdss_mdp_get_format_params(output->format);
+ if (!fmt) {
+ pr_err("invalid output format:%d\n", output->format);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = mdss_mdp_get_plane_sizes(
+ fmt, output->width, output->height, &ps, 0, rotation);
+ if (ret) {
+ pr_err("fail to get output plane size ret=%d\n", ret);
+ goto end;
+ }
+
+ ret = mdss_mdp_data_check(&entry->dst_buf, &ps, fmt);
+ if (ret) {
+ pr_err("fail to check output data ret=%d\n", ret);
+ goto end;
+ }
+
+end:
+ mdss_iommu_ctrl(0);
+ ATRACE_END(__func__);
+
+ return ret;
+}
+
+static struct mdss_rot_perf *__mdss_rotator_find_session(
+ struct mdss_rot_file_private *private,
+ u32 session_id)
+{
+ struct mdss_rot_perf *perf, *perf_next;
+ bool found = false;
+
+ list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
+ if (perf->config.session_id == session_id) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ perf = NULL;
+ return perf;
+}
+
+static struct mdss_rot_perf *mdss_rotator_find_session(
+ struct mdss_rot_file_private *private,
+ u32 session_id)
+{
+ struct mdss_rot_perf *perf;
+
+ mutex_lock(&private->perf_lock);
+ perf = __mdss_rotator_find_session(private, session_id);
+ mutex_unlock(&private->perf_lock);
+ return perf;
+}
+
+static void mdss_rotator_release_data(struct mdss_rot_entry *entry)
+{
+ struct mdss_mdp_data *src_buf = &entry->src_buf;
+ struct mdss_mdp_data *dst_buf = &entry->dst_buf;
+
+ mdss_mdp_data_free(src_buf, true, DMA_TO_DEVICE);
+ src_buf->last_freed = local_clock();
+ src_buf->state = MDP_BUF_STATE_UNUSED;
+
+ mdss_mdp_data_free(dst_buf, true, DMA_FROM_DEVICE);
+ dst_buf->last_freed = local_clock();
+ dst_buf->state = MDP_BUF_STATE_UNUSED;
+}
+
+static int mdss_rotator_import_data(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_entry *entry)
+{
+ int ret;
+ struct mdp_layer_buffer *input;
+ struct mdp_layer_buffer *output;
+ u32 flag = 0;
+
+ input = &entry->item.input;
+ output = &entry->item.output;
+
+ if (entry->item.flags & MDP_ROTATION_SECURE)
+ flag = MDP_SECURE_OVERLAY_SESSION;
+
+ ret = mdss_rotator_import_buffer(input, &entry->src_buf, flag,
+ &mgr->pdev->dev, true);
+ if (ret) {
+ pr_err("fail to import input buffer\n");
+ return ret;
+ }
+
+ /*
+ * driver assumes output buffer is ready to be written
+ * immediately
+ */
+ ret = mdss_rotator_import_buffer(output, &entry->dst_buf, flag,
+ &mgr->pdev->dev, false);
+ if (ret) {
+ pr_err("fail to import output buffer\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct mdss_rot_hw_resource *mdss_rotator_hw_alloc(
+ struct mdss_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
+{
+ struct mdss_rot_hw_resource *hw;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 pipe_ndx, offset = mdss_mdp_get_wb_ctl_support(mdata, true);
+ int ret = 0;
+
+ hw = devm_kzalloc(&mgr->pdev->dev, sizeof(struct mdss_rot_hw_resource),
+ GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ hw->ctl = mdss_mdp_ctl_alloc(mdata, offset);
+ if (IS_ERR_OR_NULL(hw->ctl)) {
+ pr_err("unable to allocate ctl\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ if (wb_id == MDSS_ROTATION_HW_ANY)
+ hw->wb = mdss_mdp_wb_alloc(MDSS_MDP_WB_ROTATOR, hw->ctl->num);
+ else
+ hw->wb = mdss_mdp_wb_assign(wb_id, hw->ctl->num);
+
+ if (IS_ERR_OR_NULL(hw->wb)) {
+ pr_err("unable to allocate wb\n");
+ ret = -ENODEV;
+ goto error;
+ }
+ hw->ctl->wb = hw->wb;
+ hw->mixer = mdss_mdp_mixer_assign(hw->wb->num, true, true);
+
+ if (IS_ERR_OR_NULL(hw->mixer)) {
+ pr_err("unable to allocate wb mixer\n");
+ ret = -ENODEV;
+ goto error;
+ }
+ hw->ctl->mixer_left = hw->mixer;
+ hw->mixer->ctl = hw->ctl;
+
+ hw->mixer->rotator_mode = true;
+
+ switch (hw->mixer->num) {
+ case MDSS_MDP_WB_LAYERMIXER0:
+ hw->ctl->opmode = MDSS_MDP_CTL_OP_ROT0_MODE;
+ break;
+ case MDSS_MDP_WB_LAYERMIXER1:
+ hw->ctl->opmode = MDSS_MDP_CTL_OP_ROT1_MODE;
+ break;
+ default:
+ pr_err("invalid layer mixer=%d\n", hw->mixer->num);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hw->ctl->ops.start_fnc = mdss_mdp_writeback_start;
+ hw->ctl->power_state = MDSS_PANEL_POWER_ON;
+ hw->ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_BLOCK;
+
+
+ if (hw->ctl->ops.start_fnc)
+ ret = hw->ctl->ops.start_fnc(hw->ctl);
+
+ if (ret)
+ goto error;
+
+ if (pipe_id >= mdata->ndma_pipes)
+ goto error;
+
+ pipe_ndx = mdata->dma_pipes[pipe_id].ndx;
+ hw->pipe = mdss_mdp_pipe_assign(mdata, hw->mixer,
+ pipe_ndx, MDSS_MDP_PIPE_RECT0);
+ if (IS_ERR_OR_NULL(hw->pipe)) {
+ pr_err("dma pipe allocation failed\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ hw->pipe->mixer_left = hw->mixer;
+ hw->pipe_id = hw->wb->num;
+ hw->wb_id = hw->wb->num;
+
+ return hw;
+error:
+ if (!IS_ERR_OR_NULL(hw->pipe))
+ mdss_mdp_pipe_destroy(hw->pipe);
+ if (!IS_ERR_OR_NULL(hw->ctl)) {
+ if (hw->ctl->ops.stop_fnc)
+ hw->ctl->ops.stop_fnc(hw->ctl, MDSS_PANEL_POWER_OFF);
+ mdss_mdp_ctl_free(hw->ctl);
+ }
+ devm_kfree(&mgr->pdev->dev, hw);
+
+ return ERR_PTR(ret);
+}
+
+static void mdss_rotator_free_hw(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_hw_resource *hw)
+{
+ struct mdss_mdp_mixer *mixer;
+ struct mdss_mdp_ctl *ctl;
+
+ mixer = hw->pipe->mixer_left;
+
+ mdss_mdp_pipe_destroy(hw->pipe);
+
+ ctl = mdss_mdp_ctl_mixer_switch(mixer->ctl,
+ MDSS_MDP_WB_CTL_TYPE_BLOCK);
+ if (ctl) {
+ if (ctl->ops.stop_fnc)
+ ctl->ops.stop_fnc(ctl, MDSS_PANEL_POWER_OFF);
+ mdss_mdp_ctl_free(ctl);
+ }
+
+ devm_kfree(&mgr->pdev->dev, hw);
+}
+
+struct mdss_rot_hw_resource *mdss_rotator_get_hw_resource(
+ struct mdss_rot_queue *queue, struct mdss_rot_entry *entry)
+{
+ struct mdss_rot_hw_resource *hw = queue->hw;
+
+ if (!hw) {
+ pr_err("no hw in the queue\n");
+ return NULL;
+ }
+
+ mutex_lock(&queue->hw_lock);
+
+ if (hw->workload) {
+ hw = ERR_PTR(-EBUSY);
+ goto get_hw_resource_err;
+ }
+ hw->workload = entry;
+
+get_hw_resource_err:
+ mutex_unlock(&queue->hw_lock);
+ return hw;
+}
+
+static void mdss_rotator_put_hw_resource(struct mdss_rot_queue *queue,
+ struct mdss_rot_hw_resource *hw)
+{
+ mutex_lock(&queue->hw_lock);
+ hw->workload = NULL;
+ mutex_unlock(&queue->hw_lock);
+}
+
+/*
+ * caller will need to call mdss_rotator_deinit_queue when
+ * the function returns error
+ */
+static int mdss_rotator_init_queue(struct mdss_rot_mgr *mgr)
+{
+ int i, size, ret = 0;
+ char name[32];
+
+ size = sizeof(struct mdss_rot_queue) * mgr->queue_count;
+ mgr->queues = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
+ if (!mgr->queues)
+ return -ENOMEM;
+
+ for (i = 0; i < mgr->queue_count; i++) {
+ snprintf(name, sizeof(name), "rot_workq_%d", i);
+ pr_debug("work queue name=%s\n", name);
+ mgr->queues[i].rot_work_queue = alloc_ordered_workqueue("%s",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
+ if (!mgr->queues[i].rot_work_queue) {
+ ret = -EPERM;
+ break;
+ }
+
+ snprintf(name, sizeof(name), "rot_timeline_%d", i);
+ pr_debug("timeline name=%s\n", name);
+ mgr->queues[i].timeline.timeline =
+ mdss_create_timeline(name);
+ if (!mgr->queues[i].timeline.timeline) {
+ ret = -EPERM;
+ break;
+ }
+
+ size = sizeof(mgr->queues[i].timeline.fence_name);
+ snprintf(mgr->queues[i].timeline.fence_name, size,
+ "rot_fence_%d", i);
+ mutex_init(&mgr->queues[i].timeline.lock);
+
+ mutex_init(&mgr->queues[i].hw_lock);
+ }
+
+ return ret;
+}
+
+static void mdss_rotator_deinit_queue(struct mdss_rot_mgr *mgr)
+{
+ int i;
+
+ if (!mgr->queues)
+ return;
+
+ for (i = 0; i < mgr->queue_count; i++) {
+ if (mgr->queues[i].rot_work_queue)
+ destroy_workqueue(mgr->queues[i].rot_work_queue);
+
+ if (mgr->queues[i].timeline.timeline) {
+ struct mdss_timeline *obj;
+
+ obj = (struct mdss_timeline *)
+ mgr->queues[i].timeline.timeline;
+ mdss_destroy_timeline(obj);
+ }
+ }
+ devm_kfree(&mgr->pdev->dev, mgr->queues);
+ mgr->queue_count = 0;
+}
+
+/*
+ * mdss_rotator_assign_queue() - Function assign rotation work onto hw
+ * @mgr: Rotator manager.
+ * @entry: Contains details on rotator work item being requested
+ * @private: Private struct used for access rot session performance struct
+ *
+ * This Function allocates hw required to complete rotation work item
+ * requested.
+ *
+ * Caller is responsible for calling cleanup function if error is returned
+ */
+static int mdss_rotator_assign_queue(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_entry *entry,
+ struct mdss_rot_file_private *private)
+{
+ struct mdss_rot_perf *perf;
+ struct mdss_rot_queue *queue;
+ struct mdss_rot_hw_resource *hw;
+ struct mdp_rotation_item *item = &entry->item;
+ u32 wb_idx = item->wb_idx;
+ u32 pipe_idx = item->pipe_idx;
+ int ret = 0;
+
+ /*
+ * todo: instead of always assign writeback block 0, we can
+ * apply some load balancing logic in the future
+ */
+ if (wb_idx == MDSS_ROTATION_HW_ANY) {
+ wb_idx = 0;
+ pipe_idx = 0;
+ }
+
+ if (wb_idx >= mgr->queue_count) {
+ pr_err("Invalid wb idx = %d\n", wb_idx);
+ return -EINVAL;
+ }
+
+ queue = mgr->queues + wb_idx;
+
+ mutex_lock(&queue->hw_lock);
+
+ if (!queue->hw) {
+ hw = mdss_rotator_hw_alloc(mgr, pipe_idx, wb_idx);
+ if (IS_ERR_OR_NULL(hw)) {
+ pr_err("fail to allocate hw\n");
+ ret = PTR_ERR(hw);
+ } else {
+ queue->hw = hw;
+ }
+ }
+
+ if (queue->hw) {
+ entry->queue = queue;
+ queue->hw->pending_count++;
+ }
+
+ mutex_unlock(&queue->hw_lock);
+
+ perf = mdss_rotator_find_session(private, item->session_id);
+ if (!perf) {
+ pr_err("Could not find session based on rotation work item\n");
+ return -EINVAL;
+ }
+
+ entry->perf = perf;
+ perf->last_wb_idx = wb_idx;
+
+ return ret;
+}
+
+static void mdss_rotator_unassign_queue(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_entry *entry)
+{
+ struct mdss_rot_queue *queue = entry->queue;
+
+ if (!queue)
+ return;
+
+ entry->queue = NULL;
+
+ mutex_lock(&queue->hw_lock);
+
+ if (!queue->hw) {
+ pr_err("entry assigned a queue with no hw\n");
+ mutex_unlock(&queue->hw_lock);
+ return;
+ }
+
+ queue->hw->pending_count--;
+ if (queue->hw->pending_count == 0) {
+ mdss_rotator_free_hw(mgr, queue->hw);
+ queue->hw = NULL;
+ }
+
+ mutex_unlock(&queue->hw_lock);
+}
+
+static void mdss_rotator_queue_request(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private,
+ struct mdss_rot_entry_container *req)
+{
+ struct mdss_rot_entry *entry;
+ struct mdss_rot_queue *queue;
+ unsigned long clk_rate;
+ u32 wb_idx;
+ int i;
+
+ for (i = 0; i < req->count; i++) {
+ entry = req->entries + i;
+ queue = entry->queue;
+ wb_idx = queue->hw->wb_id;
+ mutex_lock(&entry->perf->work_dis_lock);
+ entry->perf->work_distribution[wb_idx]++;
+ mutex_unlock(&entry->perf->work_dis_lock);
+ entry->work_assigned = true;
+ }
+
+ clk_rate = mdss_rotator_clk_rate_calc(mgr, private);
+ mdss_rotator_set_clk_rate(mgr, clk_rate, MDSS_CLK_ROTATOR_CORE);
+
+ for (i = 0; i < req->count; i++) {
+ entry = req->entries + i;
+ queue = entry->queue;
+ entry->output_fence = NULL;
+ queue_work(queue->rot_work_queue, &entry->commit_work);
+ }
+}
+
+static int mdss_rotator_calc_perf(struct mdss_rot_perf *perf)
+{
+ struct mdp_rotation_config *config = &perf->config;
+ u32 read_bw, write_bw;
+ struct mdss_mdp_format_params *in_fmt, *out_fmt;
+
+ in_fmt = mdss_mdp_get_format_params(config->input.format);
+ if (!in_fmt) {
+ pr_err("invalid input format\n");
+ return -EINVAL;
+ }
+ out_fmt = mdss_mdp_get_format_params(config->output.format);
+ if (!out_fmt) {
+ pr_err("invalid output format\n");
+ return -EINVAL;
+ }
+ if (!config->input.width ||
+ (0xffffffff/config->input.width < config->input.height))
+ return -EINVAL;
+
+ perf->clk_rate = config->input.width * config->input.height;
+
+ if (!perf->clk_rate ||
+ (0xffffffff/perf->clk_rate < config->frame_rate))
+ return -EINVAL;
+
+ perf->clk_rate *= config->frame_rate;
+ /* rotator processes 4 pixels per clock */
+ perf->clk_rate /= 4;
+
+ read_bw = config->input.width * config->input.height *
+ config->frame_rate;
+ if (in_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
+ read_bw = (read_bw * 3) / 2;
+ else
+ read_bw *= in_fmt->bpp;
+
+ write_bw = config->output.width * config->output.height *
+ config->frame_rate;
+ if (out_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
+ write_bw = (write_bw * 3) / 2;
+ else
+ write_bw *= out_fmt->bpp;
+
+ read_bw = apply_comp_ratio_factor(read_bw, in_fmt,
+ &config->input.comp_ratio);
+ write_bw = apply_comp_ratio_factor(write_bw, out_fmt,
+ &config->output.comp_ratio);
+
+ perf->bw = read_bw + write_bw;
+ return 0;
+}
+
+static int mdss_rotator_update_perf(struct mdss_rot_mgr *mgr)
+{
+ struct mdss_rot_file_private *priv;
+ struct mdss_rot_perf *perf;
+ int not_in_suspend_mode;
+ u64 total_bw = 0;
+
+ ATRACE_BEGIN(__func__);
+
+ not_in_suspend_mode = !atomic_read(&mgr->device_suspended);
+
+ if (not_in_suspend_mode) {
+ mutex_lock(&mgr->file_lock);
+ list_for_each_entry(priv, &mgr->file_list, list) {
+ mutex_lock(&priv->perf_lock);
+ list_for_each_entry(perf, &priv->perf_list, list) {
+ total_bw += perf->bw;
+ }
+ mutex_unlock(&priv->perf_lock);
+ }
+ mutex_unlock(&mgr->file_lock);
+ }
+
+ mutex_lock(&mgr->bus_lock);
+ total_bw += mgr->pending_close_bw_vote;
+ mdss_rotator_enable_reg_bus(mgr, total_bw);
+ mdss_rotator_bus_scale_set_quota(&mgr->data_bus, total_bw);
+ mutex_unlock(&mgr->bus_lock);
+
+ ATRACE_END(__func__);
+ return 0;
+}
+
+static void mdss_rotator_release_from_work_distribution(
+ struct mdss_rot_mgr *mgr,
+ struct mdss_rot_entry *entry)
+{
+ if (entry->work_assigned) {
+ bool free_perf = false;
+ u32 wb_idx = entry->queue->hw->wb_id;
+
+ mutex_lock(&mgr->lock);
+ mutex_lock(&entry->perf->work_dis_lock);
+ if (entry->perf->work_distribution[wb_idx])
+ entry->perf->work_distribution[wb_idx]--;
+
+ if (!entry->perf->work_distribution[wb_idx]
+ && list_empty(&entry->perf->list)) {
+ /* close session has offloaded perf free to us */
+ free_perf = true;
+ }
+ mutex_unlock(&entry->perf->work_dis_lock);
+ entry->work_assigned = false;
+ if (free_perf) {
+ mutex_lock(&mgr->bus_lock);
+ mgr->pending_close_bw_vote -= entry->perf->bw;
+ mutex_unlock(&mgr->bus_lock);
+ mdss_rotator_resource_ctrl(mgr, false);
+ devm_kfree(&mgr->pdev->dev,
+ entry->perf->work_distribution);
+ devm_kfree(&mgr->pdev->dev, entry->perf);
+ mdss_rotator_update_perf(mgr);
+ mdss_rotator_clk_ctrl(mgr, false);
+ entry->perf = NULL;
+ }
+ mutex_unlock(&mgr->lock);
+ }
+}
+
+static void mdss_rotator_release_entry(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_entry *entry)
+{
+ mdss_rotator_release_from_work_distribution(mgr, entry);
+ mdss_rotator_clear_fence(entry);
+ mdss_rotator_release_data(entry);
+ mdss_rotator_unassign_queue(mgr, entry);
+}
+
+static int mdss_rotator_config_dnsc_factor(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_entry *entry)
+{
+ int ret = 0;
+ u16 src_w, src_h, dst_w, dst_h, bit;
+ struct mdp_rotation_item *item = &entry->item;
+ struct mdss_mdp_format_params *fmt;
+
+ src_w = item->src_rect.w;
+ src_h = item->src_rect.h;
+
+ if (item->flags & MDP_ROTATION_90) {
+ dst_w = item->dst_rect.h;
+ dst_h = item->dst_rect.w;
+ } else {
+ dst_w = item->dst_rect.w;
+ dst_h = item->dst_rect.h;
+ }
+
+ if (!mgr->has_downscale &&
+ (src_w != dst_w || src_h != dst_h)) {
+ pr_err("rotator downscale not supported\n");
+ ret = -EINVAL;
+ goto dnsc_err;
+ }
+
+ entry->dnsc_factor_w = 0;
+ entry->dnsc_factor_h = 0;
+
+ if ((src_w != dst_w) || (src_h != dst_h)) {
+ if ((src_w % dst_w) || (src_h % dst_h)) {
+ ret = -EINVAL;
+ goto dnsc_err;
+ }
+ entry->dnsc_factor_w = src_w / dst_w;
+ bit = fls(entry->dnsc_factor_w);
+ /*
+ * New Chipsets supports downscale upto 1/64
+ * change the Bit check from 5 to 7 to support 1/64 down scale
+ */
+ if ((entry->dnsc_factor_w & ~BIT(bit - 1)) || (bit > 7)) {
+ ret = -EINVAL;
+ goto dnsc_err;
+ }
+ entry->dnsc_factor_h = src_h / dst_h;
+ bit = fls(entry->dnsc_factor_h);
+ if ((entry->dnsc_factor_h & ~BIT(bit - 1)) || (bit > 7)) {
+ ret = -EINVAL;
+ goto dnsc_err;
+ }
+ }
+
+ fmt = mdss_mdp_get_format_params(item->output.format);
+ if (mdss_mdp_is_ubwc_format(fmt) &&
+ (entry->dnsc_factor_h || entry->dnsc_factor_w)) {
+ pr_err("ubwc not supported with downscale %d\n",
+ item->output.format);
+ ret = -EINVAL;
+ }
+
+dnsc_err:
+
+ /* Downscaler does not support asymmetrical dnsc */
+ if (entry->dnsc_factor_w != entry->dnsc_factor_h)
+ ret = -EINVAL;
+
+ if (ret) {
+ pr_err("Invalid rotator downscale ratio %dx%d->%dx%d\n",
+ src_w, src_h, dst_w, dst_h);
+ entry->dnsc_factor_w = 0;
+ entry->dnsc_factor_h = 0;
+ }
+ return ret;
+}
+
+static bool mdss_rotator_verify_format(struct mdss_rot_mgr *mgr,
+ struct mdss_mdp_format_params *in_fmt,
+ struct mdss_mdp_format_params *out_fmt, bool rotation)
+{
+ u8 in_v_subsample, in_h_subsample;
+ u8 out_v_subsample, out_h_subsample;
+
+ if (!mgr->has_ubwc && (mdss_mdp_is_ubwc_format(in_fmt) ||
+ mdss_mdp_is_ubwc_format(out_fmt))) {
+ pr_err("Rotator doesn't allow ubwc\n");
+ return -EINVAL;
+ }
+
+ if (!(out_fmt->flag & VALID_ROT_WB_FORMAT)) {
+ pr_err("Invalid output format\n");
+ return false;
+ }
+
+ if (in_fmt->is_yuv != out_fmt->is_yuv) {
+ pr_err("Rotator does not support CSC\n");
+ return false;
+ }
+
+ /* Forcing same pixel depth */
+ if (memcmp(in_fmt->bits, out_fmt->bits, sizeof(in_fmt->bits))) {
+ /* Exception is that RGB can drop alpha or add X */
+ if (in_fmt->is_yuv || out_fmt->alpha_enable ||
+ (in_fmt->bits[C2_R_Cr] != out_fmt->bits[C2_R_Cr]) ||
+ (in_fmt->bits[C0_G_Y] != out_fmt->bits[C0_G_Y]) ||
+ (in_fmt->bits[C1_B_Cb] != out_fmt->bits[C1_B_Cb])) {
+ pr_err("Bit format does not match\n");
+ return false;
+ }
+ }
+
+ /* Need to make sure that sub-sampling persists through rotation */
+ if (rotation) {
+ mdss_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
+ &in_v_subsample, &in_h_subsample);
+ mdss_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
+ &out_v_subsample, &out_h_subsample);
+
+ if ((in_v_subsample != out_h_subsample) ||
+ (in_h_subsample != out_v_subsample)) {
+ pr_err("Rotation has invalid subsampling\n");
+ return false;
+ }
+ } else {
+ if (in_fmt->chroma_sample != out_fmt->chroma_sample) {
+ pr_err("Format subsampling mismatch\n");
+ return false;
+ }
+ }
+
+ pr_debug("in_fmt=%0d, out_fmt=%d, has_ubwc=%d\n",
+ in_fmt->format, out_fmt->format, mgr->has_ubwc);
+ return true;
+}
+
+static int mdss_rotator_verify_config(struct mdss_rot_mgr *mgr,
+ struct mdp_rotation_config *config)
+{
+ struct mdss_mdp_format_params *in_fmt, *out_fmt;
+ u8 in_v_subsample, in_h_subsample;
+ u8 out_v_subsample, out_h_subsample;
+ u32 input, output;
+ bool rotation;
+
+ input = config->input.format;
+ output = config->output.format;
+ rotation = (config->flags & MDP_ROTATION_90) ? true : false;
+
+ in_fmt = mdss_mdp_get_format_params(input);
+ if (!in_fmt) {
+ pr_err("Unrecognized input format:%u\n", input);
+ return -EINVAL;
+ }
+
+ out_fmt = mdss_mdp_get_format_params(output);
+ if (!out_fmt) {
+ pr_err("Unrecognized output format:%u\n", output);
+ return -EINVAL;
+ }
+
+ mdss_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
+ &in_v_subsample, &in_h_subsample);
+ mdss_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
+ &out_v_subsample, &out_h_subsample);
+
+ /* Dimension of image needs to be divisible by subsample rate */
+ if ((config->input.height % in_v_subsample) ||
+ (config->input.width % in_h_subsample)) {
+ pr_err("In ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
+ config->input.width, config->input.height,
+ in_v_subsample, in_h_subsample);
+ return -EINVAL;
+ }
+
+ if ((config->output.height % out_v_subsample) ||
+ (config->output.width % out_h_subsample)) {
+ pr_err("Out ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
+ config->output.width, config->output.height,
+ out_v_subsample, out_h_subsample);
+ return -EINVAL;
+ }
+
+ if (!mdss_rotator_verify_format(mgr, in_fmt,
+ out_fmt, rotation)) {
+ pr_err("Rot format pairing invalid, in_fmt:%d, out_fmt:%d\n",
+ input, output);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mdss_rotator_validate_item_matches_session(
+ struct mdp_rotation_config *config, struct mdp_rotation_item *item)
+{
+ int ret;
+
+ ret = __compare_session_item_rect(&config->input,
+ &item->src_rect, item->input.format, true);
+ if (ret)
+ return ret;
+
+ ret = __compare_session_item_rect(&config->output,
+ &item->dst_rect, item->output.format, false);
+ if (ret)
+ return ret;
+
+ ret = __compare_session_rotations(config->flags, item->flags);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mdss_rotator_validate_img_roi(struct mdp_rotation_item *item)
+{
+ struct mdss_mdp_format_params *fmt;
+ uint32_t width, height;
+ int ret = 0;
+
+ width = item->input.width;
+ height = item->input.height;
+ if (item->flags & MDP_ROTATION_DEINTERLACE) {
+ width *= 2;
+ height /= 2;
+ }
+
+ /* Check roi bounds */
+ if (ROT_CHECK_BOUNDS(item->src_rect.x, item->src_rect.w, width) ||
+ ROT_CHECK_BOUNDS(item->src_rect.y, item->src_rect.h,
+ height)) {
+ pr_err("invalid src flag=%08x img wh=%dx%d rect=%d,%d,%d,%d\n",
+ item->flags, width, height, item->src_rect.x,
+ item->src_rect.y, item->src_rect.w, item->src_rect.h);
+ return -EINVAL;
+ }
+ if (ROT_CHECK_BOUNDS(item->dst_rect.x, item->dst_rect.w,
+ item->output.width) ||
+ ROT_CHECK_BOUNDS(item->dst_rect.y, item->dst_rect.h,
+ item->output.height)) {
+ pr_err("invalid dst img wh=%dx%d rect=%d,%d,%d,%d\n",
+ item->output.width, item->output.height,
+ item->dst_rect.x, item->dst_rect.y, item->dst_rect.w,
+ item->dst_rect.h);
+ return -EINVAL;
+ }
+
+ fmt = mdss_mdp_get_format_params(item->output.format);
+ if (!fmt) {
+ pr_err("invalid output format:%d\n", item->output.format);
+ return -EINVAL;
+ }
+
+ if (mdss_mdp_is_ubwc_format(fmt))
+ ret = mdss_mdp_validate_offset_for_ubwc_format(fmt,
+ item->dst_rect.x, item->dst_rect.y);
+
+ return ret;
+}
+
+static int mdss_rotator_validate_fmt_and_item_flags(
+ struct mdp_rotation_config *config, struct mdp_rotation_item *item)
+{
+ struct mdss_mdp_format_params *fmt;
+
+ fmt = mdss_mdp_get_format_params(item->input.format);
+ if ((item->flags & MDP_ROTATION_DEINTERLACE) &&
+ mdss_mdp_is_ubwc_format(fmt)) {
+ pr_err("cannot perform mdp deinterlace on tiled formats\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mdss_rotator_validate_entry(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private,
+ struct mdss_rot_entry *entry)
+{
+ int ret;
+ struct mdp_rotation_item *item;
+ struct mdss_rot_perf *perf;
+
+ item = &entry->item;
+
+ if (item->wb_idx != item->pipe_idx) {
+ pr_err("invalid writeback and pipe idx\n");
+ return -EINVAL;
+ }
+
+ if (item->wb_idx != MDSS_ROTATION_HW_ANY &&
+ item->wb_idx > mgr->queue_count) {
+ pr_err("invalid writeback idx\n");
+ return -EINVAL;
+ }
+
+ perf = mdss_rotator_find_session(private, item->session_id);
+ if (!perf) {
+ pr_err("Could not find session:%u\n", item->session_id);
+ return -EINVAL;
+ }
+
+ ret = mdss_rotator_validate_item_matches_session(&perf->config, item);
+ if (ret) {
+ pr_err("Work item does not match session:%u\n",
+ item->session_id);
+ return ret;
+ }
+
+ ret = mdss_rotator_validate_img_roi(item);
+ if (ret) {
+ pr_err("Image roi is invalid\n");
+ return ret;
+ }
+
+ ret = mdss_rotator_validate_fmt_and_item_flags(&perf->config, item);
+ if (ret)
+ return ret;
+
+ ret = mdss_rotator_config_dnsc_factor(mgr, entry);
+ if (ret) {
+ pr_err("fail to configure downscale factor\n");
+ return ret;
+ }
+ return ret;
+}
+
+/*
+ * Upon failure from the function, caller needs to make sure
+ * to call mdss_rotator_remove_request to clean up resources.
+ */
+static int mdss_rotator_add_request(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private,
+ struct mdss_rot_entry_container *req)
+{
+ struct mdss_rot_entry *entry;
+ struct mdp_rotation_item *item;
+ u32 flag = 0;
+ int i, ret;
+
+ for (i = 0; i < req->count; i++) {
+ entry = req->entries + i;
+ item = &entry->item;
+
+ if (item->flags & MDP_ROTATION_SECURE)
+ flag = MDP_SECURE_OVERLAY_SESSION;
+
+ ret = mdss_rotator_validate_entry(mgr, private, entry);
+ if (ret) {
+ pr_err("fail to validate the entry\n");
+ return ret;
+ }
+
+ ret = mdss_rotator_import_data(mgr, entry);
+ if (ret) {
+ pr_err("fail to import the data\n");
+ return ret;
+ }
+
+ if (item->input.fence >= 0) {
+ entry->input_fence = mdss_get_fd_sync_fence(
+ item->input.fence);
+ if (!entry->input_fence) {
+ pr_err("invalid input fence fd\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = mdss_rotator_assign_queue(mgr, entry, private);
+ if (ret) {
+ pr_err("fail to assign queue to entry\n");
+ return ret;
+ }
+
+ entry->request = req;
+
+ INIT_WORK(&entry->commit_work, mdss_rotator_wq_handler);
+
+ ret = mdss_rotator_create_fence(entry);
+ if (ret) {
+ pr_err("fail to create fence\n");
+ return ret;
+ }
+ item->output.fence = entry->output_fence_fd;
+
+ pr_debug("Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%u\n"
+ "dst{%u,%u,%u,%u}f=%u session_id=%u\n", item->wb_idx,
+ item->src_rect.x, item->src_rect.y,
+ item->src_rect.w, item->src_rect.h, item->input.format,
+ item->dst_rect.x, item->dst_rect.y,
+ item->dst_rect.w, item->dst_rect.h, item->output.format,
+ item->session_id);
+ }
+
+ mutex_lock(&private->req_lock);
+ list_add(&req->list, &private->req_list);
+ mutex_unlock(&private->req_lock);
+
+ return 0;
+}
+
+static void mdss_rotator_remove_request(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private,
+ struct mdss_rot_entry_container *req)
+{
+ int i;
+
+ mutex_lock(&private->req_lock);
+ for (i = 0; i < req->count; i++)
+ mdss_rotator_release_entry(mgr, req->entries + i);
+ list_del_init(&req->list);
+ mutex_unlock(&private->req_lock);
+}
+
+/* This function should be called with req_lock */
+static void mdss_rotator_cancel_request(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_entry_container *req)
+{
+ struct mdss_rot_entry *entry;
+ int i;
+
+ /*
+ * To avoid signal the rotation entry output fence in the wrong
+ * order, all the entries in the same request needs to be cancelled
+ * first, before signaling the output fence.
+ */
+ for (i = req->count - 1; i >= 0; i--) {
+ entry = req->entries + i;
+ cancel_work_sync(&entry->commit_work);
+ }
+
+ for (i = req->count - 1; i >= 0; i--) {
+ entry = req->entries + i;
+ mdss_rotator_signal_output(entry);
+ mdss_rotator_release_entry(mgr, entry);
+ }
+
+ list_del_init(&req->list);
+ devm_kfree(&mgr->pdev->dev, req);
+}
+
+static void mdss_rotator_cancel_all_requests(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private)
+{
+ struct mdss_rot_entry_container *req, *req_next;
+
+ pr_debug("Canceling all rotator requests\n");
+
+ mutex_lock(&private->req_lock);
+ list_for_each_entry_safe(req, req_next, &private->req_list, list)
+ mdss_rotator_cancel_request(mgr, req);
+ mutex_unlock(&private->req_lock);
+}
+
+static void mdss_rotator_free_competed_request(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private)
+{
+ struct mdss_rot_entry_container *req, *req_next;
+
+ mutex_lock(&private->req_lock);
+ list_for_each_entry_safe(req, req_next, &private->req_list, list) {
+ if (atomic_read(&req->pending_count) == 0) {
+ list_del_init(&req->list);
+ devm_kfree(&mgr->pdev->dev, req);
+ }
+ }
+ mutex_unlock(&private->req_lock);
+}
+
+static void mdss_rotator_release_rotator_perf_session(
+ struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private)
+{
+ struct mdss_rot_perf *perf, *perf_next;
+
+ pr_debug("Releasing all rotator request\n");
+ mdss_rotator_cancel_all_requests(mgr, private);
+
+ mutex_lock(&private->perf_lock);
+ list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
+ list_del_init(&perf->list);
+ devm_kfree(&mgr->pdev->dev, perf->work_distribution);
+ devm_kfree(&mgr->pdev->dev, perf);
+ }
+ mutex_unlock(&private->perf_lock);
+}
+
+static void mdss_rotator_release_all(struct mdss_rot_mgr *mgr)
+{
+ struct mdss_rot_file_private *priv, *priv_next;
+
+ mutex_lock(&mgr->file_lock);
+ list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
+ mdss_rotator_release_rotator_perf_session(mgr, priv);
+ mdss_rotator_resource_ctrl(mgr, false);
+ list_del_init(&priv->list);
+ priv->file->private_data = NULL;
+ devm_kfree(&mgr->pdev->dev, priv);
+ }
+ mutex_unlock(&rot_mgr->file_lock);
+
+ mdss_rotator_update_perf(mgr);
+}
+
+static int mdss_rotator_prepare_hw(struct mdss_rot_hw_resource *hw,
+ struct mdss_rot_entry *entry)
+{
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_mdp_ctl *orig_ctl, *rot_ctl;
+ int ret;
+
+ pipe = hw->pipe;
+ orig_ctl = pipe->mixer_left->ctl;
+ if (orig_ctl->shared_lock)
+ mutex_lock(orig_ctl->shared_lock);
+
+ rot_ctl = mdss_mdp_ctl_mixer_switch(orig_ctl,
+ MDSS_MDP_WB_CTL_TYPE_BLOCK);
+ if (!rot_ctl) {
+ ret = -EINVAL;
+ goto error;
+ } else {
+ hw->ctl = rot_ctl;
+ pipe->mixer_left = rot_ctl->mixer_left;
+ }
+
+ return 0;
+
+error:
+ if (orig_ctl->shared_lock)
+ mutex_unlock(orig_ctl->shared_lock);
+ return ret;
+}
+
+static void mdss_rotator_translate_rect(struct mdss_rect *dst,
+ struct mdp_rect *src)
+{
+ dst->x = src->x;
+ dst->y = src->y;
+ dst->w = src->w;
+ dst->h = src->h;
+}
+
+static u32 mdss_rotator_translate_flags(u32 input)
+{
+ u32 output = 0;
+
+ if (input & MDP_ROTATION_NOP)
+ output |= MDP_ROT_NOP;
+ if (input & MDP_ROTATION_FLIP_LR)
+ output |= MDP_FLIP_LR;
+ if (input & MDP_ROTATION_FLIP_UD)
+ output |= MDP_FLIP_UD;
+ if (input & MDP_ROTATION_90)
+ output |= MDP_ROT_90;
+ if (input & MDP_ROTATION_DEINTERLACE)
+ output |= MDP_DEINTERLACE;
+ if (input & MDP_ROTATION_SECURE)
+ output |= MDP_SECURE_OVERLAY_SESSION;
+ if (input & MDP_ROTATION_BWC_EN)
+ output |= MDP_BWC_EN;
+
+ return output;
+}
+
+static int mdss_rotator_config_hw(struct mdss_rot_hw_resource *hw,
+ struct mdss_rot_entry *entry)
+{
+ struct mdss_mdp_pipe *pipe;
+ struct mdp_rotation_item *item;
+ struct mdss_rot_perf *perf;
+ int ret;
+
+ ATRACE_BEGIN(__func__);
+ pipe = hw->pipe;
+ item = &entry->item;
+ perf = entry->perf;
+
+ pipe->flags = mdss_rotator_translate_flags(item->flags);
+ pipe->src_fmt = mdss_mdp_get_format_params(item->input.format);
+ pipe->img_width = item->input.width;
+ pipe->img_height = item->input.height;
+ mdss_rotator_translate_rect(&pipe->src, &item->src_rect);
+ mdss_rotator_translate_rect(&pipe->dst, &item->src_rect);
+ pipe->scaler.enable = 0;
+ pipe->frame_rate = perf->config.frame_rate;
+
+ pipe->params_changed++;
+
+ mdss_mdp_smp_release(pipe);
+
+ ret = mdss_mdp_smp_reserve(pipe);
+ if (ret) {
+ pr_err("unable to mdss_mdp_smp_reserve rot data\n");
+ goto done;
+ }
+
+ ret = mdss_mdp_overlay_setup_scaling(pipe);
+ if (ret) {
+ pr_err("scaling setup failed %d\n", ret);
+ goto done;
+ }
+
+ ret = mdss_mdp_pipe_queue_data(pipe, &entry->src_buf);
+ pr_debug("Config pipe. src{%u,%u,%u,%u}f=%u\n"
+ "dst{%u,%u,%u,%u}f=%u session_id=%u\n",
+ item->src_rect.x, item->src_rect.y,
+ item->src_rect.w, item->src_rect.h, item->input.format,
+ item->dst_rect.x, item->dst_rect.y,
+ item->dst_rect.w, item->dst_rect.h, item->output.format,
+ item->session_id);
+ MDSS_XLOG(item->input.format, pipe->img_width, pipe->img_height,
+ pipe->flags);
+done:
+ ATRACE_END(__func__);
+ return ret;
+}
+
+static int mdss_rotator_kickoff_entry(struct mdss_rot_hw_resource *hw,
+ struct mdss_rot_entry *entry)
+{
+ int ret;
+ struct mdss_mdp_writeback_arg wb_args = {
+ .data = &entry->dst_buf,
+ .priv_data = entry,
+ };
+
+ ret = mdss_mdp_writeback_display_commit(hw->ctl, &wb_args);
+ return ret;
+}
+
+static int mdss_rotator_wait_for_entry(struct mdss_rot_hw_resource *hw,
+ struct mdss_rot_entry *entry)
+{
+ int ret;
+ struct mdss_mdp_ctl *ctl = hw->ctl;
+
+ ret = mdss_mdp_display_wait4comp(ctl);
+ if (ctl->shared_lock)
+ mutex_unlock(ctl->shared_lock);
+ return ret;
+}
+
+static int mdss_rotator_commit_entry(struct mdss_rot_hw_resource *hw,
+ struct mdss_rot_entry *entry)
+{
+ int ret;
+
+ ret = mdss_rotator_prepare_hw(hw, entry);
+ if (ret) {
+ pr_err("fail to prepare hw resource %d\n", ret);
+ return ret;
+ }
+
+ ret = mdss_rotator_config_hw(hw, entry);
+ if (ret) {
+ pr_err("fail to configure hw resource %d\n", ret);
+ return ret;
+ }
+
+ ret = mdss_rotator_kickoff_entry(hw, entry);
+ if (ret) {
+ pr_err("fail to do kickoff %d\n", ret);
+ return ret;
+ }
+
+ ret = mdss_rotator_wait_for_entry(hw, entry);
+ if (ret) {
+ pr_err("fail to wait for completion %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int mdss_rotator_handle_entry(struct mdss_rot_hw_resource *hw,
+ struct mdss_rot_entry *entry)
+{
+ int ret;
+
+ ret = mdss_rotator_wait_for_input(entry);
+ if (ret) {
+ pr_err("wait for input buffer failed %d\n", ret);
+ return ret;
+ }
+
+ ret = mdss_rotator_map_and_check_data(entry);
+ if (ret) {
+ pr_err("fail to prepare input/output data %d\n", ret);
+ return ret;
+ }
+
+ ret = mdss_rotator_commit_entry(hw, entry);
+ if (ret)
+ pr_err("rotator commit failed %d\n", ret);
+
+ return ret;
+}
+
+static void mdss_rotator_wq_handler(struct work_struct *work)
+{
+ struct mdss_rot_entry *entry;
+ struct mdss_rot_entry_container *request;
+ struct mdss_rot_hw_resource *hw;
+ int ret;
+
+ entry = container_of(work, struct mdss_rot_entry, commit_work);
+ request = entry->request;
+
+ if (!request) {
+ pr_err("fatal error, no request with entry\n");
+ return;
+ }
+
+ hw = mdss_rotator_get_hw_resource(entry->queue, entry);
+ if (!hw) {
+ pr_err("no hw for the queue\n");
+ goto get_hw_res_err;
+ }
+
+ ret = mdss_rotator_handle_entry(hw, entry);
+ if (ret) {
+ struct mdp_rotation_item *item = &entry->item;
+
+ pr_err("Rot req fail. src{%u,%u,%u,%u}f=%u\n"
+ "dst{%u,%u,%u,%u}f=%u session_id=%u, wbidx%d, pipe_id=%d\n",
+ item->src_rect.x, item->src_rect.y,
+ item->src_rect.w, item->src_rect.h, item->input.format,
+ item->dst_rect.x, item->dst_rect.y,
+ item->dst_rect.w, item->dst_rect.h, item->output.format,
+ item->session_id, item->wb_idx, item->pipe_idx);
+ }
+
+ mdss_rotator_put_hw_resource(entry->queue, hw);
+
+get_hw_res_err:
+ mdss_rotator_signal_output(entry);
+ mdss_rotator_release_entry(rot_mgr, entry);
+ atomic_dec(&request->pending_count);
+}
+
+static int mdss_rotator_validate_request(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private,
+ struct mdss_rot_entry_container *req)
+{
+ int i, ret = 0;
+ struct mdss_rot_entry *entry;
+
+ for (i = 0; i < req->count; i++) {
+ entry = req->entries + i;
+ ret = mdss_rotator_validate_entry(mgr, private,
+ entry);
+ if (ret) {
+ pr_err("fail to validate the entry\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static u32 mdss_rotator_generator_session_id(struct mdss_rot_mgr *mgr)
+{
+ u32 id;
+
+ mutex_lock(&mgr->lock);
+ id = mgr->session_id_generator++;
+ mutex_unlock(&mgr->lock);
+ return id;
+}
+
+static int mdss_rotator_open_session(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private, unsigned long arg)
+{
+ struct mdp_rotation_config config;
+ struct mdss_rot_perf *perf;
+ int ret;
+
+ ret = copy_from_user(&config, (void __user *)arg, sizeof(config));
+ if (ret) {
+ pr_err("fail to copy session data\n");
+ return ret;
+ }
+
+ ret = mdss_rotator_verify_config(mgr, &config);
+ if (ret) {
+ pr_err("Rotator verify format failed\n");
+ return ret;
+ }
+
+ perf = devm_kzalloc(&mgr->pdev->dev, sizeof(*perf), GFP_KERNEL);
+ if (!perf)
+ return -ENOMEM;
+
+ ATRACE_BEGIN(__func__); /* Open session votes for bw */
+ perf->work_distribution = devm_kzalloc(&mgr->pdev->dev,
+ sizeof(u32) * mgr->queue_count, GFP_KERNEL);
+ if (!perf->work_distribution) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+
+ config.session_id = mdss_rotator_generator_session_id(mgr);
+ perf->config = config;
+ perf->last_wb_idx = -1;
+ mutex_init(&perf->work_dis_lock);
+
+ INIT_LIST_HEAD(&perf->list);
+
+ ret = mdss_rotator_calc_perf(perf);
+ if (ret) {
+ pr_err("error setting the session%d\n", ret);
+ goto copy_user_err;
+ }
+
+ ret = copy_to_user((void *)arg, &config, sizeof(config));
+ if (ret) {
+ pr_err("fail to copy to user\n");
+ goto copy_user_err;
+ }
+
+ mutex_lock(&private->perf_lock);
+ list_add(&perf->list, &private->perf_list);
+ mutex_unlock(&private->perf_lock);
+
+ ret = mdss_rotator_resource_ctrl(mgr, true);
+ if (ret) {
+ pr_err("Failed to aqcuire rotator resources\n");
+ goto resource_err;
+ }
+
+ mdss_rotator_clk_ctrl(rot_mgr, true);
+ ret = mdss_rotator_update_perf(mgr);
+ if (ret) {
+ pr_err("fail to open session, not enough clk/bw\n");
+ goto perf_err;
+ }
+ pr_debug("open session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
+ config.session_id, config.input.width, config.input.height,
+ config.input.format, config.output.width, config.output.height,
+ config.output.format);
+
+ goto done;
+perf_err:
+ mdss_rotator_clk_ctrl(rot_mgr, false);
+ mdss_rotator_resource_ctrl(mgr, false);
+resource_err:
+ mutex_lock(&private->perf_lock);
+ list_del_init(&perf->list);
+ mutex_unlock(&private->perf_lock);
+copy_user_err:
+ devm_kfree(&mgr->pdev->dev, perf->work_distribution);
+alloc_err:
+ devm_kfree(&mgr->pdev->dev, perf);
+done:
+ ATRACE_END(__func__);
+ return ret;
+}
+
+static int mdss_rotator_close_session(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private, unsigned long arg)
+{
+ struct mdss_rot_perf *perf;
+ bool offload_release_work = false;
+ u32 id;
+
+ id = (u32)arg;
+ mutex_lock(&mgr->lock);
+ mutex_lock(&private->perf_lock);
+ perf = __mdss_rotator_find_session(private, id);
+ if (!perf) {
+ mutex_unlock(&private->perf_lock);
+ mutex_unlock(&mgr->lock);
+ pr_err("Trying to close session that does not exist\n");
+ return -EINVAL;
+ }
+
+ ATRACE_BEGIN(__func__);
+ mutex_lock(&perf->work_dis_lock);
+ if (mdss_rotator_is_work_pending(mgr, perf)) {
+ pr_debug("Work is still pending, offload free to wq\n");
+ mutex_lock(&mgr->bus_lock);
+ mgr->pending_close_bw_vote += perf->bw;
+ mutex_unlock(&mgr->bus_lock);
+ offload_release_work = true;
+ }
+ list_del_init(&perf->list);
+ mutex_unlock(&perf->work_dis_lock);
+ mutex_unlock(&private->perf_lock);
+
+ if (offload_release_work)
+ goto done;
+
+ mdss_rotator_resource_ctrl(mgr, false);
+ devm_kfree(&mgr->pdev->dev, perf->work_distribution);
+ devm_kfree(&mgr->pdev->dev, perf);
+ mdss_rotator_update_perf(mgr);
+ mdss_rotator_clk_ctrl(rot_mgr, false);
+done:
+ pr_debug("Closed session id:%u", id);
+ ATRACE_END(__func__);
+ mutex_unlock(&mgr->lock);
+ return 0;
+}
+
+static int mdss_rotator_config_session(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private, unsigned long arg)
+{
+ int ret = 0;
+ struct mdss_rot_perf *perf;
+ struct mdp_rotation_config config;
+
+ ret = copy_from_user(&config, (void __user *)arg,
+ sizeof(config));
+ if (ret) {
+ pr_err("fail to copy session data\n");
+ return ret;
+ }
+
+ ret = mdss_rotator_verify_config(mgr, &config);
+ if (ret) {
+ pr_err("Rotator verify format failed\n");
+ return ret;
+ }
+
+ mutex_lock(&mgr->lock);
+ perf = mdss_rotator_find_session(private, config.session_id);
+ if (!perf) {
+ pr_err("No session with id=%u could be found\n",
+ config.session_id);
+ mutex_unlock(&mgr->lock);
+ return -EINVAL;
+ }
+
+ ATRACE_BEGIN(__func__);
+ mutex_lock(&private->perf_lock);
+ perf->config = config;
+ ret = mdss_rotator_calc_perf(perf);
+ mutex_unlock(&private->perf_lock);
+
+ if (ret) {
+ pr_err("error in configuring the session %d\n", ret);
+ goto done;
+ }
+
+ ret = mdss_rotator_update_perf(mgr);
+
+ pr_debug("reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
+ config.session_id, config.input.width, config.input.height,
+ config.input.format, config.output.width, config.output.height,
+ config.output.format);
+done:
+ ATRACE_END(__func__);
+ mutex_unlock(&mgr->lock);
+ return ret;
+}
+
+struct mdss_rot_entry_container *mdss_rotator_req_init(
+ struct mdss_rot_mgr *mgr, struct mdp_rotation_item *items,
+ u32 count, u32 flags)
+{
+ struct mdss_rot_entry_container *req;
+ int size, i;
+
+ /*
+ * Check input and output plane_count from each given item
+ * are within the MAX_PLANES limit
+ */
+ for (i = 0 ; i < count; i++) {
+ if ((items[i].input.plane_count > MAX_PLANES) ||
+ (items[i].output.plane_count > MAX_PLANES)) {
+ pr_err("Input/Output plane_count exceeds MAX_PLANES limit, input:%d, output:%d\n",
+ items[i].input.plane_count,
+ items[i].output.plane_count);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ size = sizeof(struct mdss_rot_entry_container);
+ size += sizeof(struct mdss_rot_entry) * count;
+ req = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
+
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+
+ INIT_LIST_HEAD(&req->list);
+ req->count = count;
+ req->entries = (struct mdss_rot_entry *)
+ ((void *)req + sizeof(struct mdss_rot_entry_container));
+ req->flags = flags;
+ atomic_set(&req->pending_count, count);
+
+ for (i = 0; i < count; i++)
+ req->entries[i].item = items[i];
+
+ return req;
+}
+
+static int mdss_rotator_handle_request_common(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private,
+ struct mdss_rot_entry_container *req,
+ struct mdp_rotation_item *items)
+{
+ int i, ret;
+
+ mdss_rotator_free_competed_request(mgr, private);
+
+ ret = mdss_rotator_add_request(mgr, private, req);
+ if (ret) {
+ pr_err("fail to add rotation request\n");
+ mdss_rotator_remove_request(mgr, private, req);
+ return ret;
+ }
+
+ for (i = 0; i < req->count; i++)
+ items[i].output.fence =
+ req->entries[i].item.output.fence;
+
+ return ret;
+}
+
+static int mdss_rotator_handle_request(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private, unsigned long arg)
+{
+ struct mdp_rotation_request user_req;
+ struct mdp_rotation_item *items = NULL;
+ struct mdss_rot_entry_container *req = NULL;
+ int size, ret;
+ uint32_t req_count;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->handoff_pending) {
+ pr_err("Rotator request failed. Handoff pending\n");
+ return -EPERM;
+ }
+
+ if (mdss_get_sd_client_cnt()) {
+ pr_err("rot request not permitted during secure display session\n");
+ return -EPERM;
+ }
+
+ ret = copy_from_user(&user_req, (void __user *)arg,
+ sizeof(user_req));
+ if (ret) {
+ pr_err("fail to copy rotation request\n");
+ return ret;
+ }
+
+ req_count = user_req.count;
+ if ((!req_count) || (req_count > MAX_LAYER_COUNT)) {
+ pr_err("invalid rotator req count :%d\n", req_count);
+ return -EINVAL;
+ }
+
+ /*
+ * here, we make a copy of the items so that we can copy
+ * all the output fences to the client in one call. Otherwise,
+ * we will have to call multiple copy_to_user
+ */
+ size = sizeof(struct mdp_rotation_item) * req_count;
+ items = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
+ if (!items) {
+ pr_err("fail to allocate rotation items\n");
+ return -ENOMEM;
+ }
+ ret = copy_from_user(items, user_req.list, size);
+ if (ret) {
+ pr_err("fail to copy rotation items\n");
+ goto handle_request_err;
+ }
+
+ req = mdss_rotator_req_init(mgr, items, user_req.count, user_req.flags);
+ if (IS_ERR_OR_NULL(req)) {
+ pr_err("fail to allocate rotation request\n");
+ ret = PTR_ERR(req);
+ goto handle_request_err;
+ }
+
+ mutex_lock(&mgr->lock);
+
+ if (req->flags & MDSS_ROTATION_REQUEST_VALIDATE) {
+ ret = mdss_rotator_validate_request(mgr, private, req);
+ goto handle_request_err1;
+ }
+
+ ret = mdss_rotator_handle_request_common(mgr, private, req, items);
+ if (ret) {
+ pr_err("fail to handle request\n");
+ goto handle_request_err1;
+ }
+
+ ret = copy_to_user(user_req.list, items, size);
+ if (ret) {
+ pr_err("fail to copy output fence to user\n");
+ mdss_rotator_remove_request(mgr, private, req);
+ goto handle_request_err1;
+ }
+
+ mdss_rotator_queue_request(mgr, private, req);
+
+ mutex_unlock(&mgr->lock);
+
+ devm_kfree(&mgr->pdev->dev, items);
+ return ret;
+
+handle_request_err1:
+ mutex_unlock(&mgr->lock);
+handle_request_err:
+ devm_kfree(&mgr->pdev->dev, items);
+ devm_kfree(&mgr->pdev->dev, req);
+ return ret;
+}
+
+static int mdss_rotator_open(struct inode *inode, struct file *file)
+{
+ struct mdss_rot_file_private *private;
+
+ if (!rot_mgr)
+ return -ENODEV;
+
+ if (atomic_read(&rot_mgr->device_suspended))
+ return -EPERM;
+
+ private = devm_kzalloc(&rot_mgr->pdev->dev, sizeof(*private),
+ GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ mutex_init(&private->req_lock);
+ mutex_init(&private->perf_lock);
+ INIT_LIST_HEAD(&private->req_list);
+ INIT_LIST_HEAD(&private->perf_list);
+ INIT_LIST_HEAD(&private->list);
+
+ mutex_lock(&rot_mgr->file_lock);
+ list_add(&private->list, &rot_mgr->file_list);
+ file->private_data = private;
+ private->file = file;
+ mutex_unlock(&rot_mgr->file_lock);
+
+ return 0;
+}
+
+static bool mdss_rotator_file_priv_allowed(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *priv)
+{
+ struct mdss_rot_file_private *_priv, *_priv_next;
+ bool ret = false;
+
+ mutex_lock(&mgr->file_lock);
+ list_for_each_entry_safe(_priv, _priv_next, &mgr->file_list, list) {
+ if (_priv == priv) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&mgr->file_lock);
+ return ret;
+}
+
+static int mdss_rotator_close(struct inode *inode, struct file *file)
+{
+ struct mdss_rot_file_private *private;
+
+ if (!rot_mgr)
+ return -ENODEV;
+
+ if (!file->private_data)
+ return -EINVAL;
+
+ private = (struct mdss_rot_file_private *)file->private_data;
+
+ if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
+ pr_err("Calling close with unrecognized rot_file_private\n");
+ return -EINVAL;
+ }
+
+ mdss_rotator_release_rotator_perf_session(rot_mgr, private);
+
+ mutex_lock(&rot_mgr->file_lock);
+ list_del_init(&private->list);
+ devm_kfree(&rot_mgr->pdev->dev, private);
+ file->private_data = NULL;
+ mutex_unlock(&rot_mgr->file_lock);
+
+ mdss_rotator_update_perf(rot_mgr);
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static int mdss_rotator_handle_request32(struct mdss_rot_mgr *mgr,
+ struct mdss_rot_file_private *private, unsigned long arg)
+{
+ struct mdp_rotation_request32 user_req32;
+ struct mdp_rotation_item *items = NULL;
+ struct mdss_rot_entry_container *req = NULL;
+ int size, ret;
+ uint32_t req_count;
+
+ if (mdss_get_sd_client_cnt()) {
+ pr_err("rot request not permitted during secure display session\n");
+ return -EPERM;
+ }
+
+ ret = copy_from_user(&user_req32, (void __user *)arg,
+ sizeof(user_req32));
+ if (ret) {
+ pr_err("fail to copy rotation request\n");
+ return ret;
+ }
+
+ req_count = user_req32.count;
+ if ((!req_count) || (req_count > MAX_LAYER_COUNT)) {
+ pr_err("invalid rotator req count :%d\n", req_count);
+ return -EINVAL;
+ }
+
+ size = sizeof(struct mdp_rotation_item) * req_count;
+ items = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
+ if (!items) {
+ pr_err("fail to allocate rotation items\n");
+ return -ENOMEM;
+ }
+ ret = copy_from_user(items, compat_ptr(user_req32.list), size);
+ if (ret) {
+ pr_err("fail to copy rotation items\n");
+ goto handle_request32_err;
+ }
+
+ req = mdss_rotator_req_init(mgr, items, user_req32.count,
+ user_req32.flags);
+ if (IS_ERR_OR_NULL(req)) {
+ pr_err("fail to allocate rotation request\n");
+ ret = PTR_ERR(req);
+ goto handle_request32_err;
+ }
+
+ mutex_lock(&mgr->lock);
+
+ if (req->flags & MDSS_ROTATION_REQUEST_VALIDATE) {
+ ret = mdss_rotator_validate_request(mgr, private, req);
+ goto handle_request32_err1;
+ }
+
+ ret = mdss_rotator_handle_request_common(mgr, private, req, items);
+ if (ret) {
+ pr_err("fail to handle request\n");
+ goto handle_request32_err1;
+ }
+
+ ret = copy_to_user(compat_ptr(user_req32.list), items, size);
+ if (ret) {
+ pr_err("fail to copy output fence to user\n");
+ mdss_rotator_remove_request(mgr, private, req);
+ goto handle_request32_err1;
+ }
+
+ mdss_rotator_queue_request(mgr, private, req);
+
+ mutex_unlock(&mgr->lock);
+
+ devm_kfree(&mgr->pdev->dev, items);
+ return ret;
+
+handle_request32_err1:
+ mutex_unlock(&mgr->lock);
+handle_request32_err:
+ devm_kfree(&mgr->pdev->dev, items);
+ devm_kfree(&mgr->pdev->dev, req);
+ return ret;
+}
+
+static unsigned int __do_compat_ioctl_rot(unsigned int cmd32)
+{
+ unsigned int cmd;
+
+ switch (cmd32) {
+ case MDSS_ROTATION_REQUEST32:
+ cmd = MDSS_ROTATION_REQUEST;
+ break;
+ case MDSS_ROTATION_OPEN32:
+ cmd = MDSS_ROTATION_OPEN;
+ break;
+ case MDSS_ROTATION_CLOSE32:
+ cmd = MDSS_ROTATION_CLOSE;
+ break;
+ case MDSS_ROTATION_CONFIG32:
+ cmd = MDSS_ROTATION_CONFIG;
+ break;
+ default:
+ cmd = cmd32;
+ break;
+ }
+
+ return cmd;
+}
+
+static long mdss_rotator_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mdss_rot_file_private *private;
+ int ret = -EINVAL;
+
+ if (!rot_mgr)
+ return -ENODEV;
+
+ if (atomic_read(&rot_mgr->device_suspended))
+ return -EPERM;
+
+ if (!file->private_data)
+ return -EINVAL;
+
+ private = (struct mdss_rot_file_private *)file->private_data;
+
+ if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
+ pr_err("Calling ioctl with unrecognized rot_file_private\n");
+ return -EINVAL;
+ }
+
+ cmd = __do_compat_ioctl_rot(cmd);
+
+ switch (cmd) {
+ case MDSS_ROTATION_REQUEST:
+ ATRACE_BEGIN("rotator_request32");
+ ret = mdss_rotator_handle_request32(rot_mgr, private, arg);
+ ATRACE_END("rotator_request32");
+ break;
+ case MDSS_ROTATION_OPEN:
+ ret = mdss_rotator_open_session(rot_mgr, private, arg);
+ break;
+ case MDSS_ROTATION_CLOSE:
+ ret = mdss_rotator_close_session(rot_mgr, private, arg);
+ break;
+ case MDSS_ROTATION_CONFIG:
+ ret = mdss_rotator_config_session(rot_mgr, private, arg);
+ break;
+ default:
+ pr_err("unexpected IOCTL %d\n", cmd);
+ }
+
+ if (ret)
+ pr_err("rotator ioctl=%d failed, err=%d\n", cmd, ret);
+ return ret;
+
+}
+#endif
+
+static long mdss_rotator_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mdss_rot_file_private *private;
+ int ret = -EINVAL;
+
+ if (!rot_mgr)
+ return -ENODEV;
+
+ if (atomic_read(&rot_mgr->device_suspended))
+ return -EPERM;
+
+ if (!file->private_data)
+ return -EINVAL;
+
+ private = (struct mdss_rot_file_private *)file->private_data;
+
+ if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
+ pr_err("Calling ioctl with unrecognized rot_file_private\n");
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case MDSS_ROTATION_REQUEST:
+ ATRACE_BEGIN("rotator_request");
+ ret = mdss_rotator_handle_request(rot_mgr, private, arg);
+ ATRACE_END("rotator_request");
+ break;
+ case MDSS_ROTATION_OPEN:
+ ret = mdss_rotator_open_session(rot_mgr, private, arg);
+ break;
+ case MDSS_ROTATION_CLOSE:
+ ret = mdss_rotator_close_session(rot_mgr, private, arg);
+ break;
+ case MDSS_ROTATION_CONFIG:
+ ret = mdss_rotator_config_session(rot_mgr, private, arg);
+ break;
+ default:
+ pr_err("unexpected IOCTL %d\n", cmd);
+ }
+
+ if (ret)
+ pr_err("rotator ioctl=%d failed, err=%d\n", cmd, ret);
+ return ret;
+}
+
+static ssize_t mdss_rotator_show_capabilities(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ size_t len = PAGE_SIZE;
+ int cnt = 0;
+
+ if (!rot_mgr)
+ return cnt;
+
+#define SPRINT(fmt, ...) \
+ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+ SPRINT("wb_count=%d\n", rot_mgr->queue_count);
+ SPRINT("downscale=%d\n", rot_mgr->has_downscale);
+
+ return cnt;
+}
+
+static DEVICE_ATTR(caps, 0444, mdss_rotator_show_capabilities, NULL);
+
+static struct attribute *mdss_rotator_fs_attrs[] = {
+ &dev_attr_caps.attr,
+ NULL
+};
+
+static struct attribute_group mdss_rotator_fs_attr_group = {
+ .attrs = mdss_rotator_fs_attrs
+};
+
+static const struct file_operations mdss_rotator_fops = {
+ .owner = THIS_MODULE,
+ .open = mdss_rotator_open,
+ .release = mdss_rotator_close,
+ .unlocked_ioctl = mdss_rotator_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mdss_rotator_compat_ioctl,
+#endif
+};
+
+static int mdss_rotator_parse_dt_bus(struct mdss_rot_mgr *mgr,
+ struct platform_device *dev)
+{
+ struct device_node *node;
+ int ret = 0, i;
+ bool register_bus_needed;
+ int usecases;
+
+ mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
+ if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
+ ret = PTR_ERR(mgr->data_bus.bus_scale_pdata);
+ if (!ret) {
+ ret = -EINVAL;
+ pr_err("msm_bus_cl_get_pdata failed. ret=%d\n", ret);
+ mgr->data_bus.bus_scale_pdata = NULL;
+ }
+ }
+
+ register_bus_needed = of_property_read_bool(dev->dev.of_node,
+ "qcom,mdss-has-reg-bus");
+ if (register_bus_needed) {
+ node = of_get_child_by_name(
+ dev->dev.of_node, "qcom,mdss-rot-reg-bus");
+ if (!node) {
+ mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
+ usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
+ for (i = 0; i < usecases; i++) {
+ rot_reg_bus_usecases[i].num_paths = 1;
+ rot_reg_bus_usecases[i].vectors =
+ &rot_reg_bus_vectors[i];
+ }
+ } else {
+ mgr->reg_bus.bus_scale_pdata =
+ msm_bus_pdata_from_node(dev, node);
+ if (IS_ERR_OR_NULL(mgr->reg_bus.bus_scale_pdata)) {
+ ret = PTR_ERR(mgr->reg_bus.bus_scale_pdata);
+ if (!ret)
+ ret = -EINVAL;
+ pr_err("reg_rot_bus failed rc=%d\n", ret);
+ mgr->reg_bus.bus_scale_pdata = NULL;
+ }
+ }
+ }
+ return ret;
+}
+
+static int mdss_rotator_parse_dt(struct mdss_rot_mgr *mgr,
+ struct platform_device *dev)
+{
+ int ret = 0;
+ u32 data;
+
+ ret = of_property_read_u32(dev->dev.of_node,
+ "qcom,mdss-wb-count", &data);
+ if (ret) {
+ pr_err("Error in device tree\n");
+ return ret;
+ }
+ if (data > ROT_MAX_HW_BLOCKS) {
+ pr_err("Err, num of wb block (%d) larger than sw max %d\n",
+ data, ROT_MAX_HW_BLOCKS);
+ return -EINVAL;
+ }
+
+ rot_mgr->queue_count = data;
+ rot_mgr->has_downscale = of_property_read_bool(dev->dev.of_node,
+ "qcom,mdss-has-downscale");
+ rot_mgr->has_ubwc = of_property_read_bool(dev->dev.of_node,
+ "qcom,mdss-has-ubwc");
+
+ ret = mdss_rotator_parse_dt_bus(mgr, dev);
+ if (ret)
+ pr_err("Failed to parse bus data\n");
+
+ return ret;
+}
+
+static void mdss_rotator_put_dt_vreg_data(struct device *dev,
+ struct mdss_module_power *mp)
+{
+ if (!mp) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ msm_mdss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 0);
+ if (mp->vreg_config) {
+ devm_kfree(dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ }
+ mp->num_vreg = 0;
+}
+
+static int mdss_rotator_get_dt_vreg_data(struct device *dev,
+ struct mdss_module_power *mp)
+{
+ const char *st = NULL;
+ struct device_node *of_node = NULL;
+ int dt_vreg_total = 0;
+ int i;
+ int rc;
+
+ if (!dev || !mp) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ of_node = dev->of_node;
+
+ dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
+ if (dt_vreg_total < 0) {
+ DEV_ERR("%s: vreg not found. rc=%d\n", __func__,
+ dt_vreg_total);
+ return 0;
+ }
+ mp->num_vreg = dt_vreg_total;
+ mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) *
+ dt_vreg_total, GFP_KERNEL);
+ if (!mp->vreg_config) {
+ DEV_ERR("%s: can't alloc vreg mem\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* vreg-name */
+ for (i = 0; i < dt_vreg_total; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,supply-names", i, &st);
+ if (rc) {
+ DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
+ __func__, i, rc);
+ goto error;
+ }
+ snprintf(mp->vreg_config[i].vreg_name, 32, "%s", st);
+ }
+ msm_mdss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 1);
+
+ for (i = 0; i < dt_vreg_total; i++) {
+ DEV_DBG("%s: %s min=%d, max=%d, enable=%d disable=%d\n",
+ __func__,
+ mp->vreg_config[i].vreg_name,
+ mp->vreg_config[i].min_voltage,
+ mp->vreg_config[i].max_voltage,
+ mp->vreg_config[i].load[DSS_REG_MODE_ENABLE],
+ mp->vreg_config[i].load[DSS_REG_MODE_DISABLE]);
+ }
+ return rc;
+
+error:
+ if (mp->vreg_config) {
+ devm_kfree(dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ }
+ mp->num_vreg = 0;
+ return rc;
+}
+
+static void mdss_rotator_bus_scale_unregister(struct mdss_rot_mgr *mgr)
+{
+ pr_debug("unregister bus_hdl=%x, reg_bus_hdl=%x\n",
+ mgr->data_bus.bus_hdl, mgr->reg_bus.bus_hdl);
+
+ if (mgr->data_bus.bus_hdl)
+ msm_bus_scale_unregister_client(mgr->data_bus.bus_hdl);
+
+ if (mgr->reg_bus.bus_hdl)
+ msm_bus_scale_unregister_client(mgr->reg_bus.bus_hdl);
+}
+
+static int mdss_rotator_bus_scale_register(struct mdss_rot_mgr *mgr)
+{
+ if (!mgr->data_bus.bus_scale_pdata) {
+ pr_err("Scale table is NULL\n");
+ return -EINVAL;
+ }
+
+ mgr->data_bus.bus_hdl =
+ msm_bus_scale_register_client(
+ mgr->data_bus.bus_scale_pdata);
+ if (!mgr->data_bus.bus_hdl) {
+ pr_err("bus_client register failed\n");
+ return -EINVAL;
+ }
+ pr_debug("registered bus_hdl=%x\n", mgr->data_bus.bus_hdl);
+
+ if (mgr->reg_bus.bus_scale_pdata) {
+ mgr->reg_bus.bus_hdl =
+ msm_bus_scale_register_client(
+ mgr->reg_bus.bus_scale_pdata);
+ if (!mgr->reg_bus.bus_hdl) {
+ pr_err("register bus_client register failed\n");
+ mdss_rotator_bus_scale_unregister(mgr);
+ return -EINVAL;
+ }
+ pr_debug("registered register bus_hdl=%x\n",
+ mgr->reg_bus.bus_hdl);
+ }
+
+ return 0;
+}
+
+static int mdss_rotator_clk_register(struct platform_device *pdev,
+ struct mdss_rot_mgr *mgr, char *clk_name, u32 clk_idx)
+{
+ struct clk *tmp;
+
+ pr_debug("registered clk_reg\n");
+
+ if (clk_idx >= MDSS_CLK_ROTATOR_END_IDX) {
+ pr_err("invalid clk index %d\n", clk_idx);
+ return -EINVAL;
+ }
+
+ if (mgr->rot_clk[clk_idx]) {
+ pr_err("Stomping on clk prev registered:%d\n", clk_idx);
+ return -EINVAL;
+ }
+
+ tmp = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(tmp)) {
+ pr_err("unable to get clk: %s\n", clk_name);
+ return PTR_ERR(tmp);
+ }
+ mgr->rot_clk[clk_idx] = tmp;
+ return 0;
+}
+
+static int mdss_rotator_res_init(struct platform_device *pdev,
+ struct mdss_rot_mgr *mgr)
+{
+ int ret;
+
+ ret = mdss_rotator_get_dt_vreg_data(&pdev->dev, &mgr->module_power);
+ if (ret)
+ return ret;
+
+ ret = mdss_rotator_clk_register(pdev, mgr,
+ "iface_clk", MDSS_CLK_ROTATOR_AHB);
+ if (ret)
+ goto error;
+
+ ret = mdss_rotator_clk_register(pdev, mgr,
+ "rot_core_clk", MDSS_CLK_ROTATOR_CORE);
+ if (ret)
+ goto error;
+
+ ret = mdss_rotator_bus_scale_register(mgr);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ mdss_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
+ return ret;
+}
+
+static int mdss_rotator_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ rot_mgr = devm_kzalloc(&pdev->dev, sizeof(struct mdss_rot_mgr),
+ GFP_KERNEL);
+ if (!rot_mgr)
+ return -ENOMEM;
+
+ rot_mgr->pdev = pdev;
+ ret = mdss_rotator_parse_dt(rot_mgr, pdev);
+ if (ret) {
+ pr_err("fail to parse the dt\n");
+ goto error_parse_dt;
+ }
+
+ mutex_init(&rot_mgr->lock);
+ mutex_init(&rot_mgr->clk_lock);
+ mutex_init(&rot_mgr->bus_lock);
+ atomic_set(&rot_mgr->device_suspended, 0);
+ ret = mdss_rotator_init_queue(rot_mgr);
+ if (ret) {
+ pr_err("fail to init queue\n");
+ goto error_get_dev_num;
+ }
+
+ mutex_init(&rot_mgr->file_lock);
+ INIT_LIST_HEAD(&rot_mgr->file_list);
+
+ platform_set_drvdata(pdev, rot_mgr);
+
+ ret = alloc_chrdev_region(&rot_mgr->dev_num, 0, 1, DRIVER_NAME);
+ if (ret < 0) {
+ pr_err("alloc_chrdev_region failed ret = %d\n", ret);
+ goto error_get_dev_num;
+ }
+
+ rot_mgr->class = class_create(THIS_MODULE, CLASS_NAME);
+ if (IS_ERR(rot_mgr->class)) {
+ ret = PTR_ERR(rot_mgr->class);
+ pr_err("couldn't create class rc = %d\n", ret);
+ goto error_class_create;
+ }
+
+ rot_mgr->device = device_create(rot_mgr->class, NULL,
+ rot_mgr->dev_num, NULL, DRIVER_NAME);
+ if (IS_ERR(rot_mgr->device)) {
+ ret = PTR_ERR(rot_mgr->device);
+ pr_err("device_create failed %d\n", ret);
+ goto error_class_device_create;
+ }
+
+ cdev_init(&rot_mgr->cdev, &mdss_rotator_fops);
+ ret = cdev_add(&rot_mgr->cdev,
+ MKDEV(MAJOR(rot_mgr->dev_num), 0), 1);
+ if (ret < 0) {
+ pr_err("cdev_add failed %d\n", ret);
+ goto error_cdev_add;
+ }
+
+ ret = sysfs_create_group(&rot_mgr->device->kobj,
+ &mdss_rotator_fs_attr_group);
+ if (ret)
+ pr_err("unable to register rotator sysfs nodes\n");
+
+ ret = mdss_rotator_res_init(pdev, rot_mgr);
+ if (ret < 0) {
+ pr_err("res_init failed %d\n", ret);
+ goto error_res_init;
+ }
+ return 0;
+
+error_res_init:
+ cdev_del(&rot_mgr->cdev);
+error_cdev_add:
+ device_destroy(rot_mgr->class, rot_mgr->dev_num);
+error_class_device_create:
+ class_destroy(rot_mgr->class);
+error_class_create:
+ unregister_chrdev_region(rot_mgr->dev_num, 1);
+error_get_dev_num:
+ mdss_rotator_deinit_queue(rot_mgr);
+error_parse_dt:
+ devm_kfree(&pdev->dev, rot_mgr);
+ rot_mgr = NULL;
+ return ret;
+}
+
+static int mdss_rotator_remove(struct platform_device *dev)
+{
+ struct mdss_rot_mgr *mgr;
+
+ mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
+ if (!mgr)
+ return -ENODEV;
+
+ sysfs_remove_group(&rot_mgr->device->kobj, &mdss_rotator_fs_attr_group);
+
+ mdss_rotator_release_all(mgr);
+
+ mdss_rotator_put_dt_vreg_data(&dev->dev, &mgr->module_power);
+ mdss_rotator_bus_scale_unregister(mgr);
+ cdev_del(&rot_mgr->cdev);
+ device_destroy(rot_mgr->class, rot_mgr->dev_num);
+ class_destroy(rot_mgr->class);
+ unregister_chrdev_region(rot_mgr->dev_num, 1);
+
+ mdss_rotator_deinit_queue(rot_mgr);
+ devm_kfree(&dev->dev, rot_mgr);
+ rot_mgr = NULL;
+ return 0;
+}
+
+static void mdss_rotator_suspend_cancel_rot_work(struct mdss_rot_mgr *mgr)
+{
+ struct mdss_rot_file_private *priv, *priv_next;
+
+ mutex_lock(&mgr->file_lock);
+ list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
+ mdss_rotator_cancel_all_requests(mgr, priv);
+ }
+ mutex_unlock(&rot_mgr->file_lock);
+}
+
+#if defined(CONFIG_PM)
+static int mdss_rotator_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct mdss_rot_mgr *mgr;
+
+ mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
+ if (!mgr)
+ return -ENODEV;
+
+ atomic_inc(&mgr->device_suspended);
+ mdss_rotator_suspend_cancel_rot_work(mgr);
+ mdss_rotator_update_perf(mgr);
+ return 0;
+}
+
+static int mdss_rotator_resume(struct platform_device *dev)
+{
+ struct mdss_rot_mgr *mgr;
+
+ mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
+ if (!mgr)
+ return -ENODEV;
+
+ atomic_dec(&mgr->device_suspended);
+ mdss_rotator_update_perf(mgr);
+ return 0;
+}
+#endif
+
+static const struct of_device_id mdss_rotator_dt_match[] = {
+ { .compatible = "qcom,mdss_rotator",},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, mdss_rotator_dt_match);
+
+static struct platform_driver mdss_rotator_driver = {
+ .probe = mdss_rotator_probe,
+ .remove = mdss_rotator_remove,
+#if defined(CONFIG_PM)
+ .suspend = mdss_rotator_suspend,
+ .resume = mdss_rotator_resume,
+#endif
+ .driver = {
+ .name = "mdss_rotator",
+ .of_match_table = mdss_rotator_dt_match,
+ .pm = NULL,
+ }
+};
+
+static int __init mdss_rotator_init(void)
+{
+ return platform_driver_register(&mdss_rotator_driver);
+}
+
+static void __exit mdss_rotator_exit(void)
+{
+ return platform_driver_unregister(&mdss_rotator_driver);
+}
+
+module_init(mdss_rotator_init);
+module_exit(mdss_rotator_exit);
+
+MODULE_DESCRIPTION("MSM Rotator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/msm/mdss_rotator_internal.h b/drivers/video/fbdev/msm/mdss_rotator_internal.h
new file mode 100644
index 0000000..88f530a
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_rotator_internal.h
@@ -0,0 +1,247 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_ROTATOR_INTERNAL_H
+#define MDSS_MDP_ROTATOR_INTERNAL_H
+
+#include <linux/list.h>
+#include <linux/file.h>
+#include <linux/mdss_rotator.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+
+#include "mdss_mdp.h"
+
+/*
+ * Defining characteristics about rotation work, that has corresponding
+ * fmt and roi checks in open session
+ */
+#define MDSS_MDP_DEFINING_FLAG_BITS MDP_ROTATION_90
+
+struct mdss_rot_entry;
+struct mdss_rot_perf;
+
+enum mdss_rotator_clk_type {
+ MDSS_CLK_ROTATOR_AHB,
+ MDSS_CLK_ROTATOR_CORE,
+ MDSS_CLK_ROTATOR_END_IDX,
+};
+
+/*
+ * placeholder for performance profiling
+ * or debug support, not used currently
+ */
+struct mdss_rot_entry_cb_intf {
+ void (*pre_commit)(struct mdss_rot_entry *entry, void *data);
+ void (*post_commit)(struct mdss_rot_entry *entry,
+ void *data, int status);
+};
+
+struct mdss_rot_timeline {
+ struct mutex lock;
+ struct mdss_timeline *timeline;
+ u32 next_value;
+ char fence_name[32];
+};
+
+struct mdss_rot_hw_resource {
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_mdp_mixer *mixer;
+ struct mdss_mdp_pipe *pipe;
+ struct mdss_mdp_writeback *wb;
+ u32 pipe_id;
+ u32 wb_id;
+
+ u32 pending_count;
+ struct mdss_rot_entry *workload;
+};
+
+struct mdss_rot_queue {
+ struct workqueue_struct *rot_work_queue;
+ struct mdss_rot_timeline timeline;
+
+ struct mutex hw_lock;
+ struct mdss_rot_hw_resource *hw;
+};
+
+struct mdss_rot_entry_container {
+ struct list_head list;
+ u32 flags;
+ u32 count;
+ atomic_t pending_count;
+ struct mdss_rot_entry *entries;
+};
+
+struct mdss_rot_entry {
+ struct mdp_rotation_item item;
+ struct work_struct commit_work;
+
+ struct mdss_rot_queue *queue;
+ struct mdss_rot_entry_container *request;
+
+ struct mdss_mdp_data src_buf;
+ struct mdss_mdp_data dst_buf;
+
+ struct mdss_fence *input_fence;
+
+ int output_fence_fd;
+ struct mdss_fence *output_fence;
+ bool output_signaled;
+
+ u32 dnsc_factor_w;
+ u32 dnsc_factor_h;
+
+ struct mdss_rot_entry_cb_intf intf;
+ void *intf_data;
+
+ struct mdss_rot_perf *perf;
+ bool work_assigned; /* Used when cleaning up work_distribution */
+};
+
+struct mdss_rot_perf {
+ struct list_head list;
+ struct mdp_rotation_config config;
+ unsigned long clk_rate;
+ u64 bw;
+ struct mutex work_dis_lock;
+ u32 *work_distribution;
+ int last_wb_idx; /* last known wb index, used when above count is 0 */
+};
+
+struct mdss_rot_file_private {
+ struct list_head list;
+
+ struct mutex req_lock;
+ struct list_head req_list;
+
+ struct mutex perf_lock;
+ struct list_head perf_list;
+
+ struct file *file;
+};
+
+struct mdss_rot_bus_data_type {
+ struct msm_bus_scale_pdata *bus_scale_pdata;
+ u32 bus_hdl;
+ u32 curr_bw_uc_idx;
+ u64 curr_quota_val;
+};
+
+struct mdss_rot_mgr {
+ struct mutex lock;
+
+ atomic_t device_suspended;
+
+ u32 session_id_generator;
+
+ struct platform_device *pdev;
+
+ dev_t dev_num;
+ struct cdev cdev;
+ struct class *class;
+ struct device *device;
+
+ /*
+ * mangaing rotation queues, depends on
+ * how many hw pipes available on the system
+ */
+ int queue_count;
+ struct mdss_rot_queue *queues;
+
+ struct mutex file_lock;
+ /*
+ * managing all the open file sessions to bw calculations,
+ * and resource clean up during suspend
+ */
+ struct list_head file_list;
+
+ struct mutex bus_lock;
+ u64 pending_close_bw_vote;
+ struct mdss_rot_bus_data_type data_bus;
+ struct mdss_rot_bus_data_type reg_bus;
+
+ /* Module power is only used for regulator management */
+ struct mdss_module_power module_power;
+ bool regulator_enable;
+
+ struct mutex clk_lock;
+ int res_ref_cnt;
+ struct clk *rot_clk[MDSS_CLK_ROTATOR_END_IDX];
+ int rot_enable_clk_cnt;
+
+ bool has_downscale;
+ bool has_ubwc;
+};
+
+#ifdef CONFIG_COMPAT
+
+/* open a rotation session */
+#define MDSS_ROTATION_OPEN32 \
+ _IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 1, compat_caddr_t)
+
+/* change the rotation session configuration */
+#define MDSS_ROTATION_CONFIG32 \
+ _IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 2, compat_caddr_t)
+
+/* queue the rotation request */
+#define MDSS_ROTATION_REQUEST32 \
+ _IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 3, compat_caddr_t)
+
+/* close a rotation session with the specified rotation session ID */
+#define MDSS_ROTATION_CLOSE32 \
+ _IOW(MDSS_ROTATOR_IOCTL_MAGIC, 4, unsigned int)
+
+struct mdp_rotation_request32 {
+ uint32_t version;
+ uint32_t flags;
+ uint32_t count;
+ compat_caddr_t list;
+ uint32_t reserved[6];
+};
+#endif
+
+static inline int __compare_session_item_rect(
+ struct mdp_rotation_buf_info *s_rect,
+ struct mdp_rect *i_rect, uint32_t i_fmt, bool src)
+{
+ if ((s_rect->width != i_rect->w) || (s_rect->height != i_rect->h) ||
+ (s_rect->format != i_fmt)) {
+ pr_err("%s: session{%u,%u}f:%u mismatch from item{%u,%u}f:%u\n",
+ (src ? "src":"dst"), s_rect->width, s_rect->height,
+ s_rect->format, i_rect->w, i_rect->h, i_fmt);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Compare all important flag bits associated with rotation between session
+ * config and item request. Format and roi validation is done during open
+ * session and is based certain defining bits. If these defining bits are
+ * different in item request, there is a possibility that rotation item
+ * is not a valid configuration.
+ */
+static inline int __compare_session_rotations(uint32_t cfg_flag,
+ uint32_t item_flag)
+{
+ cfg_flag &= MDSS_MDP_DEFINING_FLAG_BITS;
+ item_flag &= MDSS_MDP_DEFINING_FLAG_BITS;
+ if (cfg_flag != item_flag) {
+ pr_err("Rotation degree request different from open session\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
new file mode 100644
index 0000000..7a44824
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -0,0 +1,918 @@
+/* Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/of_platform.h>
+#include <linux/msm_dma_iommu_mapping.h>
+
+#include <asm/dma-iommu.h>
+#include "soc/qcom/secure_buffer.h"
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_smmu.h"
+#include "mdss_debug.h"
+
+#define SZ_4G 0xF0000000
+
+#ifdef CONFIG_QCOM_IOMMU
+#include <linux/qcom_iommu.h>
+static inline struct bus_type *mdss_mmu_get_bus(struct device *dev)
+{
+ return msm_iommu_get_bus(dev);
+}
+static inline struct device *mdss_mmu_get_ctx(const char *name)
+{
+ return msm_iommu_get_ctx(name);
+}
+#else
+static inline struct bus_type *mdss_mmu_get_bus(struct device *dev)
+{
+ return &platform_bus_type;
+}
+static inline struct device *mdss_mmu_get_ctx(const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+static DEFINE_MUTEX(mdp_iommu_lock);
+
+void mdss_iommu_lock(void)
+{
+ mutex_lock(&mdp_iommu_lock);
+}
+
+void mdss_iommu_unlock(void)
+{
+ mutex_unlock(&mdp_iommu_lock);
+}
+
+static int mdss_smmu_util_parse_dt_clock(struct platform_device *pdev,
+ struct mdss_module_power *mp)
+{
+ u32 i = 0, rc = 0;
+ const char *clock_name;
+ u32 clock_rate;
+ int num_clk;
+
+ num_clk = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (num_clk <= 0) {
+ pr_err("clocks are not defined\n");
+ goto clk_err;
+ }
+
+ mp->num_clk = num_clk;
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct mdss_clk) * mp->num_clk, GFP_KERNEL);
+ if (!mp->clk_config) {
+ rc = -ENOMEM;
+ mp->num_clk = 0;
+ goto clk_err;
+ }
+
+ for (i = 0; i < mp->num_clk; i++) {
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+ i, &clock_rate);
+ mp->clk_config[i].rate = clock_rate;
+
+ if (!clock_rate)
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ else
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+ }
+
+clk_err:
+ return rc;
+}
+
+static int mdss_smmu_clk_register(struct platform_device *pdev,
+ struct mdss_module_power *mp)
+{
+ int i, ret;
+ struct clk *clk;
+
+ ret = mdss_smmu_util_parse_dt_clock(pdev, mp);
+ if (ret) {
+ pr_err("unable to parse clocks\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mp->num_clk; i++) {
+ clk = devm_clk_get(&pdev->dev,
+ mp->clk_config[i].clk_name);
+ if (IS_ERR(clk)) {
+ pr_err("unable to get clk: %s\n",
+ mp->clk_config[i].clk_name);
+ return PTR_ERR(clk);
+ }
+ mp->clk_config[i].clk = clk;
+ }
+ return 0;
+}
+
+static int mdss_smmu_enable_power(struct mdss_smmu_client *mdss_smmu,
+ bool enable)
+{
+ int rc = 0;
+ struct mdss_module_power *mp;
+
+ if (!mdss_smmu)
+ return -EINVAL;
+
+ mp = &mdss_smmu->mp;
+
+ if (!mp->num_vreg && !mp->num_clk)
+ return 0;
+
+ if (enable) {
+ rc = msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg, true);
+ if (rc) {
+ pr_err("vreg enable failed - rc:%d\n", rc);
+ goto end;
+ }
+ mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt,
+ VOTE_INDEX_LOW);
+ rc = msm_mdss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (rc) {
+ pr_err("clock enable failed - rc:%d\n", rc);
+ mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+ msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg,
+ false);
+ goto end;
+ }
+ } else {
+ msm_mdss_enable_clk(mp->clk_config, mp->num_clk, false);
+ mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+ msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg, false);
+ }
+end:
+ return rc;
+}
+
+/*
+ * mdss_smmu_v2_attach()
+ *
+ * Associates each configured VA range with the corresponding smmu context
+ * bank device. Enables the clks as smmu_v2 requires voting it before the usage.
+ * And iommu attach is done only once during the initial attach and it is never
+ * detached as smmu v2 uses a feature called 'retention'.
+ */
+static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
+{
+ struct mdss_smmu_client *mdss_smmu;
+ int i, rc = 0;
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ if (!mdss_smmu_is_valid_domain_type(mdata, i))
+ continue;
+
+ mdss_smmu = mdss_smmu_get_cb(i);
+ if (mdss_smmu && mdss_smmu->dev) {
+ if (!mdss_smmu->handoff_pending) {
+ rc = mdss_smmu_enable_power(mdss_smmu, true);
+ if (rc) {
+ pr_err("power enable failed - domain:[%d] rc:%d\n",
+ i, rc);
+ goto err;
+ }
+ }
+ mdss_smmu->handoff_pending = false;
+
+ if (!mdss_smmu->domain_attached) {
+ rc = arm_iommu_attach_device(mdss_smmu->dev,
+ mdss_smmu->mmu_mapping);
+ if (rc) {
+ pr_err("iommu attach device failed for domain[%d] with err:%d\n",
+ i, rc);
+ mdss_smmu_enable_power(mdss_smmu,
+ false);
+ goto err;
+ }
+ mdss_smmu->domain_attached = true;
+ pr_debug("iommu v2 domain[%i] attached\n", i);
+ }
+ } else {
+ pr_err("iommu device not attached for domain[%d]\n", i);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+
+err:
+ for (i--; i >= 0; i--) {
+ mdss_smmu = mdss_smmu_get_cb(i);
+ if (mdss_smmu && mdss_smmu->dev) {
+ arm_iommu_detach_device(mdss_smmu->dev);
+ mdss_smmu_enable_power(mdss_smmu, false);
+ mdss_smmu->domain_attached = false;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * mdss_smmu_v2_detach()
+ *
+ * Only disables the clks as it is not required to detach the iommu mapped
+ * VA range from the device in smmu_v2 as explained in the mdss_smmu_v2_attach
+ */
+static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
+{
+ struct mdss_smmu_client *mdss_smmu;
+ int i;
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ if (!mdss_smmu_is_valid_domain_type(mdata, i))
+ continue;
+
+ mdss_smmu = mdss_smmu_get_cb(i);
+ if (mdss_smmu && mdss_smmu->dev && !mdss_smmu->handoff_pending)
+ mdss_smmu_enable_power(mdss_smmu, false);
+ }
+
+ return 0;
+}
+
+static int mdss_smmu_get_domain_id_v2(u32 type)
+{
+ return type;
+}
+
+/*
+ * mdss_smmu_dma_buf_attach_v2()
+ *
+ * Same as mdss_smmu_dma_buf_attach except that the device is got from
+ * the configured smmu v2 context banks.
+ */
+static struct dma_buf_attachment *mdss_smmu_dma_buf_attach_v2(
+ struct dma_buf *dma_buf, struct device *dev, int domain)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return NULL;
+ }
+
+ return dma_buf_attach(dma_buf, mdss_smmu->dev);
+}
+
+/*
+ * mdss_smmu_map_dma_buf_v2()
+ *
+ * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
+ * From which we can take the virtual address and size allocated.
+ * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
+ */
+static int mdss_smmu_map_dma_buf_v2(struct dma_buf *dma_buf,
+ struct sg_table *table, int domain, dma_addr_t *iova,
+ unsigned long *size, int dir)
+{
+ int rc;
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return -EINVAL;
+ }
+ ATRACE_BEGIN("map_buffer");
+ rc = msm_dma_map_sg_lazy(mdss_smmu->dev, table->sgl, table->nents, dir,
+ dma_buf);
+ if (rc != table->nents) {
+ pr_err("dma map sg failed\n");
+ return -ENOMEM;
+ }
+ ATRACE_END("map_buffer");
+ *iova = table->sgl->dma_address;
+ *size = table->sgl->dma_length;
+ return 0;
+}
+
+static void mdss_smmu_unmap_dma_buf_v2(struct sg_table *table, int domain,
+ int dir, struct dma_buf *dma_buf)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return;
+ }
+
+ ATRACE_BEGIN("unmap_buffer");
+ msm_dma_unmap_sg(mdss_smmu->dev, table->sgl, table->nents, dir,
+ dma_buf);
+ ATRACE_END("unmap_buffer");
+}
+
+/*
+ * mdss_smmu_dma_alloc_coherent_v2()
+ *
+ * Allocates buffer same as mdss_smmu_dma_alloc_coherent_v1, but in addition it
+ * also maps to the SMMU domain with the help of the respective SMMU context
+ * bank device
+ */
+static int mdss_smmu_dma_alloc_coherent_v2(struct device *dev, size_t size,
+ dma_addr_t *phys, dma_addr_t *iova, void **cpu_addr,
+ gfp_t gfp, int domain)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return -EINVAL;
+ }
+
+ *cpu_addr = dma_alloc_coherent(mdss_smmu->dev, size, iova, gfp);
+ if (!*cpu_addr) {
+ pr_err("dma alloc coherent failed!\n");
+ return -ENOMEM;
+ }
+ *phys = iommu_iova_to_phys(mdss_smmu->mmu_mapping->domain,
+ *iova);
+ return 0;
+}
+
+static void mdss_smmu_dma_free_coherent_v2(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return;
+ }
+
+ dma_free_coherent(mdss_smmu->dev, size, cpu_addr, iova);
+}
+
+/*
+ * mdss_smmu_map_v1()
+ *
+ * Same as mdss_smmu_map_v1, just that it maps to the appropriate domain
+ * referred by the smmu context bank handles.
+ */
+static int mdss_smmu_map_v2(int domain, phys_addr_t iova, phys_addr_t phys,
+ int gfp_order, int prot)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return -EINVAL;
+ }
+
+ return iommu_map(mdss_smmu->mmu_mapping->domain,
+ iova, phys, gfp_order, prot);
+}
+
+static void mdss_smmu_unmap_v2(int domain, unsigned long iova, int gfp_order)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return;
+ }
+
+ iommu_unmap(mdss_smmu->mmu_mapping->domain, iova, gfp_order);
+}
+
+/*
+ * mdss_smmUdsi_alloc_buf_v2()
+ *
+ * Allocates the buffer and mapping is done later
+ */
+static char *mdss_smmu_dsi_alloc_buf_v2(struct device *dev, int size,
+ dma_addr_t *dmap, gfp_t gfp)
+{
+ char *data;
+
+ data = kzalloc(size, GFP_KERNEL | GFP_DMA);
+ if (data)
+ *dmap = (dma_addr_t) virt_to_phys(data);
+
+ return data;
+}
+
+/*
+ * mdss_smmu_dsi_map_buffer_v2()
+ *
+ * Maps the buffer allocated in mdss_smmu_dsi_alloc_buffer_v2 with the SMMU
+ * domain and uses dma_map_single as msm_iommu_map_contig_buffer is depricated
+ * in smmu v2.
+ */
+static int mdss_smmu_dsi_map_buffer_v2(phys_addr_t phys, unsigned int domain,
+ unsigned long size, dma_addr_t *dma_addr, void *cpu_addr,
+ int dir)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return -EINVAL;
+ }
+
+ *dma_addr = dma_map_single(mdss_smmu->dev, cpu_addr, size, dir);
+ if (IS_ERR_VALUE(*dma_addr)) {
+ pr_err("dma map single failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void mdss_smmu_dsi_unmap_buffer_v2(dma_addr_t dma_addr, int domain,
+ unsigned long size, int dir)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return;
+ }
+
+ if (is_mdss_iommu_attached())
+ dma_unmap_single(mdss_smmu->dev, dma_addr, size, dir);
+}
+
+int mdss_smmu_fault_handler(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *user_data)
+{
+ struct mdss_smmu_client *mdss_smmu =
+ (struct mdss_smmu_client *)user_data;
+ u32 fsynr1, mid, i;
+
+ if (!mdss_smmu || !mdss_smmu->mmu_base)
+ goto end;
+
+ fsynr1 = readl_relaxed(mdss_smmu->mmu_base + SMMU_CBN_FSYNR1);
+ mid = fsynr1 & 0xff;
+ pr_err("mdss_smmu: iova:0x%lx flags:0x%x fsynr1: 0x%x mid: 0x%x\n",
+ iova, flags, fsynr1, mid);
+
+ /* get domain id information */
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ if (mdss_smmu == mdss_smmu_get_cb(i))
+ break;
+ }
+
+ if (i == MDSS_IOMMU_MAX_DOMAIN)
+ goto end;
+
+ mdss_mdp_debug_mid(mid);
+end:
+ return -ENODEV;
+}
+
+static void mdss_smmu_deinit_v2(struct mdss_data_type *mdata)
+{
+ int i;
+ struct mdss_smmu_client *mdss_smmu;
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ mdss_smmu = mdss_smmu_get_cb(i);
+ if (mdss_smmu && mdss_smmu->dev)
+ arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
+ }
+}
+
+/*
+ * sg_clone - Duplicate an existing chained sgl
+ * @orig_sgl: Original sg list to be duplicated
+ * @len: Total length of sg while taking chaining into account
+ * @gfp_mask: GFP allocation mask
+ * @padding: specifies if padding is required
+ *
+ * Description:
+ * Clone a chained sgl. This cloned copy may be modified in some ways while
+ * keeping the original sgl in tact. Also allow the cloned copy to have
+ * a smaller length than the original which may reduce the sgl total
+ * sg entries and also allows cloned copy to have one extra sg entry on
+ * either sides of sgl.
+ *
+ * Returns:
+ * Pointer to new kmalloced sg list, ERR_PTR() on error
+ *
+ */
+static struct scatterlist *sg_clone(struct scatterlist *orig_sgl, u64 len,
+ gfp_t gfp_mask, bool padding)
+{
+ int nents;
+ bool last_entry;
+ struct scatterlist *sgl, *head;
+
+ nents = sg_nents(orig_sgl);
+ if (nents < 0)
+ return ERR_PTR(-EINVAL);
+ if (padding)
+ nents += 2;
+
+ head = kmalloc_array(nents, sizeof(struct scatterlist), gfp_mask);
+ if (!head)
+ return ERR_PTR(-ENOMEM);
+
+ sgl = head;
+
+ sg_init_table(sgl, nents);
+
+ if (padding) {
+ *sgl = *orig_sgl;
+ if (sg_is_chain(orig_sgl)) {
+ orig_sgl = sg_next(orig_sgl);
+ *sgl = *orig_sgl;
+ }
+ sgl->page_link &= (unsigned long)(~0x03);
+ sgl = sg_next(sgl);
+ }
+
+ for (; sgl; orig_sgl = sg_next(orig_sgl), sgl = sg_next(sgl)) {
+
+ last_entry = sg_is_last(sgl);
+
+ /*
+ * * If page_link is pointing to a chained sgl then set
+ * the sg entry in the cloned list to the next sg entry
+ * in the original sg list as chaining is already taken
+ * care.
+ */
+
+ if (sg_is_chain(orig_sgl))
+ orig_sgl = sg_next(orig_sgl);
+
+ if (padding)
+ last_entry = sg_is_last(orig_sgl);
+
+ *sgl = *orig_sgl;
+ sgl->page_link &= (unsigned long)(~0x03);
+
+ if (last_entry) {
+ if (padding) {
+ len -= sg_dma_len(sgl);
+ sgl = sg_next(sgl);
+ *sgl = *orig_sgl;
+ }
+ sg_dma_len(sgl) = len ? len : SZ_4K;
+ /* Set bit 1 to indicate end of sgl */
+ sgl->page_link |= 0x02;
+ } else {
+ len -= sg_dma_len(sgl);
+ }
+ }
+
+ return head;
+}
+
+/*
+ * sg_table_clone - Duplicate an existing sg_table including chained sgl
+ * @orig_table: Original sg_table to be duplicated
+ * @len: Total length of sg while taking chaining into account
+ * @gfp_mask: GFP allocation mask
+ * @padding: specifies if padding is required
+ *
+ * Description:
+ * Clone a sg_table along with chained sgl. This cloned copy may be
+ * modified in some ways while keeping the original table and sgl in tact.
+ * Also allow the cloned sgl copy to have a smaller length than the original
+ * which may reduce the sgl total sg entries.
+ *
+ * Returns:
+ * Pointer to new kmalloced sg_table, ERR_PTR() on error
+ *
+ */
+static struct sg_table *sg_table_clone(struct sg_table *orig_table,
+ gfp_t gfp_mask, bool padding)
+{
+ struct sg_table *table;
+ struct scatterlist *sg = orig_table->sgl;
+ u64 len = 0;
+
+ for (len = 0; sg; sg = sg_next(sg))
+ len += sg->length;
+
+ table = kmalloc(sizeof(struct sg_table), gfp_mask);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ table->sgl = sg_clone(orig_table->sgl, len, gfp_mask, padding);
+ if (IS_ERR(table->sgl)) {
+ kfree(table);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ table->nents = table->orig_nents = sg_nents(table->sgl);
+
+ return table;
+}
+
+static void mdss_smmu_ops_init(struct mdss_data_type *mdata)
+{
+ mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v2;
+ mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v2;
+ mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v2;
+ mdata->smmu_ops.smmu_dma_buf_attach =
+ mdss_smmu_dma_buf_attach_v2;
+ mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v2;
+ mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v2;
+ mdata->smmu_ops.smmu_dma_alloc_coherent =
+ mdss_smmu_dma_alloc_coherent_v2;
+ mdata->smmu_ops.smmu_dma_free_coherent =
+ mdss_smmu_dma_free_coherent_v2;
+ mdata->smmu_ops.smmu_map = mdss_smmu_map_v2;
+ mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v2;
+ mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v2;
+ mdata->smmu_ops.smmu_dsi_map_buffer =
+ mdss_smmu_dsi_map_buffer_v2;
+ mdata->smmu_ops.smmu_dsi_unmap_buffer =
+ mdss_smmu_dsi_unmap_buffer_v2;
+ mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v2;
+ mdata->smmu_ops.smmu_sg_table_clone = sg_table_clone;
+}
+
+/*
+ * mdss_smmu_device_create()
+ * @dev: mdss_mdp device
+ *
+ * For smmu_v2, each context bank is a separate child device of mdss_mdp.
+ * Platform devices are created for those smmu related child devices of
+ * mdss_mdp here. This would facilitate probes to happen for these devices in
+ * which the smmu mapping and initialization is handled.
+ */
+void mdss_smmu_device_create(struct device *dev)
+{
+ struct device_node *parent, *child;
+
+ parent = dev->of_node;
+ for_each_child_of_node(parent, child) {
+ char name[MDSS_SMMU_COMPAT_STR_LEN] = {};
+
+ strlcpy(name, child->name, sizeof(name));
+ if (is_mdss_smmu_compatible_device(name))
+ of_platform_device_create(child, NULL, dev);
+ }
+}
+
+int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev)
+{
+ mdss_smmu_device_create(dev);
+ mdss_smmu_ops_init(mdata);
+ mdata->mdss_util->iommu_lock = mdss_iommu_lock;
+ mdata->mdss_util->iommu_unlock = mdss_iommu_unlock;
+ return 0;
+}
+
+static struct mdss_smmu_domain mdss_mdp_unsec = {
+ "mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_1M, (SZ_4G - SZ_1M)};
+static struct mdss_smmu_domain mdss_rot_unsec = {
+ NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_1M, (SZ_4G - SZ_1M)};
+static struct mdss_smmu_domain mdss_mdp_sec = {
+ "mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_1M, (SZ_4G - SZ_1M)};
+static struct mdss_smmu_domain mdss_rot_sec = {
+ NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_1M, (SZ_4G - SZ_1M)};
+
+static const struct of_device_id mdss_smmu_dt_match[] = {
+ { .compatible = "qcom,smmu_mdp_unsec", .data = &mdss_mdp_unsec},
+ { .compatible = "qcom,smmu_rot_unsec", .data = &mdss_rot_unsec},
+ { .compatible = "qcom,smmu_mdp_sec", .data = &mdss_mdp_sec},
+ { .compatible = "qcom,smmu_rot_sec", .data = &mdss_rot_sec},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdss_smmu_dt_match);
+
+/*
+ * mdss_smmu_probe()
+ * @pdev: platform device
+ *
+ * Each smmu context acts as a separate device and the context banks are
+ * configured with a VA range.
+ * Registeres the clks as each context bank has its own clks, for which voting
+ * has to be done everytime before using that context bank.
+ */
+int mdss_smmu_probe(struct platform_device *pdev)
+{
+ struct device *dev;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_smmu_client *mdss_smmu;
+ int rc = 0;
+ struct mdss_smmu_domain smmu_domain;
+ const struct of_device_id *match;
+ struct mdss_module_power *mp;
+ char name[MAX_CLIENT_NAME_LEN];
+ const __be32 *address = NULL, *size = NULL;
+
+ if (!mdata) {
+ pr_err("probe failed as mdata is not initialized\n");
+ return -EPROBE_DEFER;
+ }
+
+ match = of_match_device(mdss_smmu_dt_match, &pdev->dev);
+ if (!match || !match->data) {
+ pr_err("probe failed as match data is invalid\n");
+ return -EINVAL;
+ }
+
+ smmu_domain = *(struct mdss_smmu_domain *) (match->data);
+ if (smmu_domain.domain >= MDSS_IOMMU_MAX_DOMAIN) {
+ pr_err("no matching device found\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
+ dev = &pdev->dev;
+ } else {
+ /*
+ * For old iommu driver we query the context bank device
+ * rather than getting it from dt.
+ */
+ dev = mdss_mmu_get_ctx(smmu_domain.ctx_name);
+ if (!dev) {
+ pr_err("Invalid SMMU ctx for domain:%d\n",
+ smmu_domain.domain);
+ return -EINVAL;
+ }
+ }
+
+ mdss_smmu = &mdata->mdss_smmu[smmu_domain.domain];
+ mp = &mdss_smmu->mp;
+ memset(mp, 0, sizeof(struct mdss_module_power));
+
+ if (of_find_property(pdev->dev.of_node,
+ "gdsc-mmagic-mdss-supply", NULL)) {
+
+ mp->vreg_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct mdss_vreg), GFP_KERNEL);
+ if (!mp->vreg_config)
+ return -ENOMEM;
+
+ strlcpy(mp->vreg_config->vreg_name, "gdsc-mmagic-mdss",
+ sizeof(mp->vreg_config->vreg_name));
+ mp->num_vreg = 1;
+ }
+
+ rc = msm_mdss_config_vreg(&pdev->dev, mp->vreg_config,
+ mp->num_vreg, true);
+ if (rc) {
+ pr_err("vreg config failed rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = mdss_smmu_clk_register(pdev, mp);
+ if (rc) {
+ pr_err("smmu clk register failed for domain[%d] with err:%d\n",
+ smmu_domain.domain, rc);
+ msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
+ false);
+ return rc;
+ }
+
+ snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
+ mdss_smmu->reg_bus_clt = mdss_reg_bus_vote_client_create(name);
+ if (IS_ERR(mdss_smmu->reg_bus_clt)) {
+ pr_err("mdss bus client register failed\n");
+ msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
+ false);
+ return PTR_ERR(mdss_smmu->reg_bus_clt);
+ }
+
+ rc = mdss_smmu_enable_power(mdss_smmu, true);
+ if (rc) {
+ pr_err("power enable failed - domain:[%d] rc:%d\n",
+ smmu_domain.domain, rc);
+ goto bus_client_destroy;
+ }
+
+ mdss_smmu->mmu_mapping = arm_iommu_create_mapping(
+ mdss_mmu_get_bus(dev), smmu_domain.start, smmu_domain.size);
+ if (IS_ERR(mdss_smmu->mmu_mapping)) {
+ pr_err("iommu create mapping failed for domain[%d]\n",
+ smmu_domain.domain);
+ rc = PTR_ERR(mdss_smmu->mmu_mapping);
+ goto disable_power;
+ }
+
+ if (smmu_domain.domain == MDSS_IOMMU_DOMAIN_SECURE ||
+ smmu_domain.domain == MDSS_IOMMU_DOMAIN_ROT_SECURE) {
+ int secure_vmid = VMID_CP_PIXEL;
+
+ rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain,
+ DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
+ if (rc) {
+ pr_err("couldn't set secure pixel vmid\n");
+ goto release_mapping;
+ }
+ }
+
+ if (!mdata->handoff_pending)
+ mdss_smmu_enable_power(mdss_smmu, false);
+ else
+ mdss_smmu->handoff_pending = true;
+
+ mdss_smmu->dev = dev;
+
+ address = of_get_address_by_name(pdev->dev.of_node, "mmu_cb", 0, 0);
+ if (address) {
+ size = address + 1;
+ mdss_smmu->mmu_base = ioremap(be32_to_cpu(*address),
+ be32_to_cpu(*size));
+ if (mdss_smmu->mmu_base)
+ iommu_set_fault_handler(mdss_smmu->mmu_mapping->domain,
+ mdss_smmu_fault_handler, mdss_smmu);
+ } else {
+ pr_debug("unable to map context bank base\n");
+ }
+
+ pr_info("iommu v2 domain[%d] mapping and clk register successful!\n",
+ smmu_domain.domain);
+ return 0;
+
+release_mapping:
+ arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
+disable_power:
+ mdss_smmu_enable_power(mdss_smmu, false);
+bus_client_destroy:
+ mdss_reg_bus_vote_client_destroy(mdss_smmu->reg_bus_clt);
+ mdss_smmu->reg_bus_clt = NULL;
+ msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
+ false);
+ return rc;
+}
+
+int mdss_smmu_remove(struct platform_device *pdev)
+{
+ int i;
+ struct mdss_smmu_client *mdss_smmu;
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ mdss_smmu = mdss_smmu_get_cb(i);
+ if (mdss_smmu && mdss_smmu->dev &&
+ (mdss_smmu->dev == &pdev->dev))
+ arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
+ }
+ return 0;
+}
+
+static struct platform_driver mdss_smmu_driver = {
+ .probe = mdss_smmu_probe,
+ .remove = mdss_smmu_remove,
+ .shutdown = NULL,
+ .driver = {
+ .name = "mdss_smmu",
+ .of_match_table = mdss_smmu_dt_match,
+ },
+};
+
+static int mdss_smmu_register_driver(void)
+{
+ return platform_driver_register(&mdss_smmu_driver);
+}
+
+static int __init mdss_smmu_driver_init(void)
+{
+ int ret;
+
+ ret = mdss_smmu_register_driver();
+ if (ret)
+ pr_err("mdss_smmu_register_driver() failed!\n");
+
+ return ret;
+}
+module_init(mdss_smmu_driver_init);
+
+static void __exit mdss_smmu_driver_cleanup(void)
+{
+ platform_driver_unregister(&mdss_smmu_driver);
+}
+module_exit(mdss_smmu_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MDSS SMMU driver");
diff --git a/drivers/video/fbdev/msm/mdss_smmu.h b/drivers/video/fbdev/msm/mdss_smmu.h
new file mode 100644
index 0000000..091af3b
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_smmu.h
@@ -0,0 +1,319 @@
+/* Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_SMMU_H
+#define MDSS_SMMU_H
+
+#include <linux/msm_ion.h>
+#include <linux/msm_mdp.h>
+#include <linux/mdss_io_util.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+
+#define MDSS_SMMU_COMPATIBLE "qcom,smmu"
+#define MDSS_SMMU_COMPAT_STR_LEN 10
+#define SMMU_CBN_FSYNR1 0x6c
+
+struct mdss_iommu_map_type {
+ char *client_name;
+ char *ctx_name;
+ unsigned long start;
+ unsigned long size;
+};
+
+struct mdss_smmu_domain {
+ char *ctx_name;
+ int domain;
+ unsigned long start;
+ unsigned long size;
+};
+
+void mdss_smmu_register(struct device *dev);
+int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev);
+
+static inline int mdss_smmu_dma_data_direction(int dir)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ return (mdss_has_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR)) ?
+ DMA_BIDIRECTIONAL : dir;
+}
+
+static inline bool is_mdss_smmu_compatible_device(const char *str)
+{
+ /* check the prefix */
+ return (!strcmp(str, MDSS_SMMU_COMPATIBLE)) ? true : false;
+}
+
+/*
+ * mdss_smmu_is_valid_domain_type()
+ *
+ * Used to check if rotator smmu domain is defined or not by checking if
+ * vbif base is defined and wb rotator exists. As those are associated.
+ */
+static inline bool mdss_smmu_is_valid_domain_type(struct mdss_data_type *mdata,
+ int domain_type)
+{
+ if ((domain_type == MDSS_IOMMU_DOMAIN_ROT_UNSECURE ||
+ domain_type == MDSS_IOMMU_DOMAIN_ROT_SECURE) &&
+ (!mdss_mdp_is_wb_rotator_supported(mdata) ||
+ !mdss_mdp_is_nrt_vbif_base_defined(mdata)))
+ return false;
+ return true;
+}
+
+static inline struct mdss_smmu_client *mdss_smmu_get_cb(u32 domain)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdss_smmu_is_valid_domain_type(mdata, domain))
+ return NULL;
+
+ return (domain >= MDSS_IOMMU_MAX_DOMAIN) ? NULL :
+ &mdata->mdss_smmu[domain];
+}
+
+static inline struct ion_client *mdss_get_ionclient(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ return mdata ? mdata->iclient : NULL;
+}
+
+static inline int is_mdss_iommu_attached(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ return mdata ? mdata->iommu_attached : false;
+}
+
+static inline int mdss_smmu_get_domain_type(u32 flags, bool rotator)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int type;
+
+ if (flags & MDP_SECURE_OVERLAY_SESSION) {
+ type = (rotator &&
+ mdata->mdss_smmu[MDSS_IOMMU_DOMAIN_ROT_SECURE].dev) ?
+ MDSS_IOMMU_DOMAIN_ROT_SECURE : MDSS_IOMMU_DOMAIN_SECURE;
+ } else {
+ type = (rotator &&
+ mdata->mdss_smmu[MDSS_IOMMU_DOMAIN_ROT_UNSECURE].dev) ?
+ MDSS_IOMMU_DOMAIN_ROT_UNSECURE :
+ MDSS_IOMMU_DOMAIN_UNSECURE;
+ }
+ return type;
+}
+
+static inline int mdss_smmu_attach(struct mdss_data_type *mdata)
+{
+ int rc;
+
+ mdata->mdss_util->iommu_lock();
+ MDSS_XLOG(mdata->iommu_attached);
+
+ if (mdata->iommu_attached) {
+ pr_debug("mdp iommu already attached\n");
+ rc = 0;
+ goto end;
+ }
+
+ if (!mdata->smmu_ops.smmu_attach) {
+ rc = -ENODEV;
+ goto end;
+ }
+
+ rc = mdata->smmu_ops.smmu_attach(mdata);
+ if (!rc)
+ mdata->iommu_attached = true;
+
+end:
+ mdata->mdss_util->iommu_unlock();
+ return rc;
+}
+
+static inline int mdss_smmu_detach(struct mdss_data_type *mdata)
+{
+ int rc;
+
+ mdata->mdss_util->iommu_lock();
+ MDSS_XLOG(mdata->iommu_attached);
+
+ if (!mdata->iommu_attached) {
+ pr_debug("mdp iommu already dettached\n");
+ rc = 0;
+ goto end;
+ }
+
+ if (!mdata->smmu_ops.smmu_detach) {
+ rc = -ENODEV;
+ goto end;
+ }
+
+ rc = mdata->smmu_ops.smmu_detach(mdata);
+ if (!rc)
+ mdata->iommu_attached = false;
+
+end:
+ mdata->mdss_util->iommu_unlock();
+ return rc;
+}
+
+static inline int mdss_smmu_get_domain_id(u32 type)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdss_smmu_is_valid_domain_type(mdata, type))
+ return -ENODEV;
+
+ if (!mdata || !mdata->smmu_ops.smmu_get_domain_id
+ || type >= MDSS_IOMMU_MAX_DOMAIN)
+ return -ENODEV;
+
+ return mdata->smmu_ops.smmu_get_domain_id(type);
+}
+
+static inline struct dma_buf_attachment *mdss_smmu_dma_buf_attach(
+ struct dma_buf *dma_buf, struct device *dev, int domain)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata || !mdata->smmu_ops.smmu_dma_buf_attach)
+ return NULL;
+
+ return mdata->smmu_ops.smmu_dma_buf_attach(dma_buf, dev, domain);
+}
+
+static inline int mdss_smmu_map_dma_buf(struct dma_buf *dma_buf,
+ struct sg_table *table, int domain, dma_addr_t *iova,
+ unsigned long *size, int dir)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata->smmu_ops.smmu_map_dma_buf)
+ return -ENODEV;
+
+ return mdata->smmu_ops.smmu_map_dma_buf(dma_buf, table,
+ domain, iova, size,
+ mdss_smmu_dma_data_direction(dir));
+}
+
+static inline void mdss_smmu_unmap_dma_buf(struct sg_table *table, int domain,
+ int dir, struct dma_buf *dma_buf)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->smmu_ops.smmu_unmap_dma_buf)
+ mdata->smmu_ops.smmu_unmap_dma_buf(table, domain,
+ mdss_smmu_dma_data_direction(dir), dma_buf);
+}
+
+static inline int mdss_smmu_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *phys, dma_addr_t *iova, void **cpu_addr,
+ gfp_t gfp, int domain)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata || !mdata->smmu_ops.smmu_dma_alloc_coherent)
+ return -ENODEV;
+
+ return mdata->smmu_ops.smmu_dma_alloc_coherent(dev, size,
+ phys, iova, cpu_addr, gfp, domain);
+}
+
+static inline void mdss_smmu_dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata && mdata->smmu_ops.smmu_dma_free_coherent)
+ mdata->smmu_ops.smmu_dma_free_coherent(dev, size, cpu_addr,
+ phys, iova, domain);
+}
+
+static inline int mdss_smmu_map(int domain, phys_addr_t iova, phys_addr_t phys,
+ int gfp_order, int prot)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata->smmu_ops.smmu_map)
+ return -ENODEV;
+
+ return mdata->smmu_ops.smmu_map(domain, iova, phys, gfp_order, prot);
+}
+
+static inline void mdss_smmu_unmap(int domain, unsigned long iova,
+ int gfp_order)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->smmu_ops.smmu_unmap)
+ mdata->smmu_ops.smmu_unmap(domain, iova, gfp_order);
+}
+
+static inline char *mdss_smmu_dsi_alloc_buf(struct device *dev, int size,
+ dma_addr_t *dmap, gfp_t gfp)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata->smmu_ops.smmu_dsi_alloc_buf)
+ return NULL;
+
+ return mdata->smmu_ops.smmu_dsi_alloc_buf(dev, size, dmap, gfp);
+}
+
+static inline int mdss_smmu_dsi_map_buffer(phys_addr_t phys,
+ unsigned int domain, unsigned long size, dma_addr_t *dma_addr,
+ void *cpu_addr, int dir)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata->smmu_ops.smmu_dsi_map_buffer)
+ return -ENODEV;
+
+ return mdata->smmu_ops.smmu_dsi_map_buffer(phys, domain, size,
+ dma_addr, cpu_addr,
+ mdss_smmu_dma_data_direction(dir));
+}
+
+static inline void mdss_smmu_dsi_unmap_buffer(dma_addr_t dma_addr, int domain,
+ unsigned long size, int dir)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (mdata->smmu_ops.smmu_dsi_unmap_buffer)
+ mdata->smmu_ops.smmu_dsi_unmap_buffer(dma_addr, domain,
+ size, mdss_smmu_dma_data_direction(dir));
+}
+
+static inline void mdss_smmu_deinit(struct mdss_data_type *mdata)
+{
+ if (mdata->smmu_ops.smmu_deinit)
+ mdata->smmu_ops.smmu_deinit(mdata);
+}
+
+static inline struct sg_table *mdss_smmu_sg_table_clone(struct sg_table
+ *orig_table, gfp_t gfp_mask, bool padding)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (!mdata || !mdata->smmu_ops.smmu_sg_table_clone)
+ return NULL;
+
+ return mdata->smmu_ops.smmu_sg_table_clone(orig_table,
+ gfp_mask, padding);
+}
+
+#endif /* MDSS_SMMU_H */
diff --git a/drivers/video/fbdev/msm/mdss_sync.c b/drivers/video/fbdev/msm/mdss_sync.c
new file mode 100644
index 0000000..ed611e7
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_sync.c
@@ -0,0 +1,453 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/fence.h>
+#include <linux/sync_file.h>
+
+#include "mdss_sync.h"
+
+#define MDSS_SYNC_NAME_SIZE 64
+#define MDSS_SYNC_DRIVER_NAME "mdss"
+
+/**
+ * struct mdss_fence - sync fence context
+ * @base: base sync fence object
+ * @name: name of this sync fence
+ * @fence_list: linked list of outstanding sync fence
+ */
+struct mdss_fence {
+ struct fence base;
+ char name[MDSS_SYNC_NAME_SIZE];
+ struct list_head fence_list;
+};
+
+/**
+ * struct mdss_timeline - sync timeline context
+ * @kref: reference count of timeline
+ * @lock: serialization lock for timeline and fence update
+ * @name: name of timeline
+ * @fence_name: fence name prefix
+ * @next_value: next commit sequence number
+ * @value: current retired sequence number
+ * @context: fence context identifier
+ * @fence_list_head: linked list of outstanding sync fence
+ */
+struct mdss_timeline {
+ struct kref kref;
+ spinlock_t lock;
+ char name[MDSS_SYNC_NAME_SIZE];
+ u32 next_value;
+ u32 value;
+ u64 context;
+ struct list_head fence_list_head;
+};
+
+/*
+ * to_mdss_fence - get mdss fence from fence base object
+ * @fence: Pointer to fence base object
+ */
+static struct mdss_fence *to_mdss_fence(struct fence *fence)
+{
+ return container_of(fence, struct mdss_fence, base);
+}
+
+/*
+ * to_mdss_timeline - get mdss timeline from fence base object
+ * @fence: Pointer to fence base object
+ */
+static struct mdss_timeline *to_mdss_timeline(struct fence *fence)
+{
+ return container_of(fence->lock, struct mdss_timeline, lock);
+}
+
+/*
+ * mdss_free_timeline - Free the given timeline object
+ * @kref: Pointer to timeline kref object.
+ */
+static void mdss_free_timeline(struct kref *kref)
+{
+ struct mdss_timeline *tl =
+ container_of(kref, struct mdss_timeline, kref);
+
+ kfree(tl);
+}
+
+/*
+ * mdss_put_timeline - Put the given timeline object
+ * @tl: Pointer to timeline object.
+ */
+static void mdss_put_timeline(struct mdss_timeline *tl)
+{
+ if (!tl) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ kref_put(&tl->kref, mdss_free_timeline);
+}
+
+/*
+ * mdss_get_timeline - Get the given timeline object
+ * @tl: Pointer to timeline object.
+ */
+static void mdss_get_timeline(struct mdss_timeline *tl)
+{
+ if (!tl) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ kref_get(&tl->kref);
+}
+
+static const char *mdss_fence_get_driver_name(struct fence *fence)
+{
+ return MDSS_SYNC_DRIVER_NAME;
+}
+
+static const char *mdss_fence_get_timeline_name(struct fence *fence)
+{
+ struct mdss_timeline *tl = to_mdss_timeline(fence);
+
+ return tl->name;
+}
+
+static bool mdss_fence_enable_signaling(struct fence *fence)
+{
+ return true;
+}
+
+static bool mdss_fence_signaled(struct fence *fence)
+{
+ struct mdss_timeline *tl = to_mdss_timeline(fence);
+ bool status;
+
+ status = ((s32) (tl->value - fence->seqno)) >= 0;
+ pr_debug("status:%d fence seq:%d and timeline:%s:%d next %d\n",
+ status, fence->seqno, tl->name,
+ tl->value, tl->next_value);
+ return status;
+}
+
+static void mdss_fence_release(struct fence *fence)
+{
+ struct mdss_fence *f = to_mdss_fence(fence);
+ unsigned long flags;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (!list_empty(&f->fence_list))
+ list_del(&f->fence_list);
+ spin_unlock_irqrestore(fence->lock, flags);
+ mdss_put_timeline(to_mdss_timeline(fence));
+ kfree_rcu(f, base.rcu);
+}
+
+static void mdss_fence_value_str(struct fence *fence, char *str, int size)
+{
+ snprintf(str, size, "%u", fence->seqno);
+}
+
+static void mdss_fence_timeline_value_str(struct fence *fence, char *str,
+ int size)
+{
+ struct mdss_timeline *tl = to_mdss_timeline(fence);
+
+ snprintf(str, size, "%u", tl->value);
+}
+
+static struct fence_ops mdss_fence_ops = {
+ .get_driver_name = mdss_fence_get_driver_name,
+ .get_timeline_name = mdss_fence_get_timeline_name,
+ .enable_signaling = mdss_fence_enable_signaling,
+ .signaled = mdss_fence_signaled,
+ .wait = fence_default_wait,
+ .release = mdss_fence_release,
+ .fence_value_str = mdss_fence_value_str,
+ .timeline_value_str = mdss_fence_timeline_value_str,
+};
+
+/*
+ * mdss_create_timeline - Create timeline object with the given name
+ * @name: Pointer to name character string.
+ */
+struct mdss_timeline *mdss_create_timeline(const char *name)
+{
+ struct mdss_timeline *tl;
+
+ if (!name) {
+ pr_err("invalid parameters\n");
+ return NULL;
+ }
+
+ tl = kzalloc(sizeof(struct mdss_timeline), GFP_KERNEL);
+ if (!tl)
+ return NULL;
+
+ kref_init(&tl->kref);
+ snprintf(tl->name, sizeof(tl->name), "%s", name);
+ spin_lock_init(&tl->lock);
+ tl->context = fence_context_alloc(1);
+ INIT_LIST_HEAD(&tl->fence_list_head);
+
+ return tl;
+}
+
+/*
+ * mdss_destroy_timeline - Destroy the given timeline object
+ * @tl: Pointer to timeline object.
+ */
+void mdss_destroy_timeline(struct mdss_timeline *tl)
+{
+ mdss_put_timeline(tl);
+}
+
+/*
+ * mdss_inc_timeline_locked - Increment timeline by given amount
+ * @tl: Pointer to timeline object.
+ * @increment: the amount to increase the timeline by.
+ */
+static int mdss_inc_timeline_locked(struct mdss_timeline *tl,
+ int increment)
+{
+ struct mdss_fence *f, *next;
+
+ tl->value += increment;
+ list_for_each_entry_safe(f, next, &tl->fence_list_head, fence_list) {
+ if (fence_is_signaled_locked(&f->base)) {
+ pr_debug("%s signaled\n", f->name);
+ list_del_init(&f->fence_list);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * mdss_resync_timeline - Resync timeline to last committed value
+ * @tl: Pointer to timeline object.
+ */
+void mdss_resync_timeline(struct mdss_timeline *tl)
+{
+ unsigned long flags;
+ s32 val;
+
+ if (!tl) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ spin_lock_irqsave(&tl->lock, flags);
+ val = tl->next_value - tl->value;
+ if (val > 0) {
+ pr_warn("flush %s:%d\n", tl->name, val);
+ mdss_inc_timeline_locked(tl, val);
+ }
+ spin_unlock_irqrestore(&tl->lock, flags);
+}
+
+/*
+ * mdss_get_sync_fence - Create fence object from the given timeline
+ * @tl: Pointer to timeline object
+ * @timestamp: Pointer to timestamp of the returned fence. Null if not required.
+ * Return: pointer fence created on give time line.
+ */
+struct mdss_fence *mdss_get_sync_fence(
+ struct mdss_timeline *tl, const char *fence_name,
+ u32 *timestamp, int offset)
+{
+ struct mdss_fence *f;
+ unsigned long flags;
+ u32 val;
+
+ if (!tl) {
+ pr_err("invalid parameters\n");
+ return NULL;
+ }
+
+ f = kzalloc(sizeof(struct mdss_fence), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ INIT_LIST_HEAD(&f->fence_list);
+ spin_lock_irqsave(&tl->lock, flags);
+ val = tl->next_value + offset;
+ tl->next_value += 1;
+ fence_init(&f->base, &mdss_fence_ops, &tl->lock, tl->context, val);
+ list_add_tail(&f->fence_list, &tl->fence_list_head);
+ mdss_get_timeline(tl);
+ spin_unlock_irqrestore(&tl->lock, flags);
+ snprintf(f->name, sizeof(f->name), "%s_%u", fence_name, val);
+
+ if (timestamp)
+ *timestamp = val;
+
+ pr_debug("fence created at val=%u tl->name %s next_value %d value %d offset %d\n",
+ val, tl->name, tl->next_value, tl->value, offset);
+
+ return (struct mdss_fence *) &f->base;
+}
+
+/*
+ * mdss_inc_timeline - Increment timeline by given amount
+ * @tl: Pointer to timeline object.
+ * @increment: the amount to increase the timeline by.
+ */
+int mdss_inc_timeline(struct mdss_timeline *tl, int increment)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!tl) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tl->lock, flags);
+ rc = mdss_inc_timeline_locked(tl, increment);
+ spin_unlock_irqrestore(&tl->lock, flags);
+
+ return rc;
+}
+
+/*
+ * mdss_get_timeline_commit_ts - Return commit tick of given timeline
+ * @tl: Pointer to timeline object.
+ */
+u32 mdss_get_timeline_commit_ts(struct mdss_timeline *tl)
+{
+ if (!tl) {
+ pr_err("invalid parameters\n");
+ return 0;
+ }
+
+ return tl->next_value;
+}
+
+/*
+ * mdss_get_timeline_retire_ts - Return retire tick of given timeline
+ * @tl: Pointer to timeline object.
+ */
+u32 mdss_get_timeline_retire_ts(struct mdss_timeline *tl)
+{
+ if (!tl) {
+ pr_err("invalid parameters\n");
+ return 0;
+ }
+
+ return tl->value;
+}
+
+/*
+ * mdss_put_sync_fence - Destroy given fence object
+ * @fence: Pointer to fence object.
+ */
+void mdss_put_sync_fence(struct mdss_fence *fence)
+{
+ if (!fence) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ fence_put((struct fence *) fence);
+}
+
+/*
+ * mdss_wait_sync_fence - Wait until fence signal or timeout
+ * @fence: Pointer to fence object.
+ * @timeout: maximum wait time, in msec, for fence to signal.
+ */
+int mdss_wait_sync_fence(struct mdss_fence *fence,
+ long timeout)
+{
+ int rc;
+
+ if (!fence) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ rc = fence_wait_timeout((struct fence *) fence, false,
+ msecs_to_jiffies(timeout));
+ if (rc > 0) {
+ pr_debug("fence signaled\n");
+ rc = 0;
+ } else if (rc == 0) {
+ pr_debug("fence timeout\n");
+ rc = -ETIMEDOUT;
+ }
+
+ return rc;
+}
+
+/*
+ * mdss_get_fd_sync_fence - Get fence object of given file descriptor
+ * @fd: File description of fence object.
+ */
+struct mdss_fence *mdss_get_fd_sync_fence(int fd)
+{
+ return (struct mdss_fence *) sync_file_get_fence(fd);
+}
+
+/*
+ * mdss_get_sync_fence_fd - Get file descriptor of given fence object
+ * @fence: Pointer to fence object.
+ * Return: File descriptor on success, or error code on error
+ */
+int mdss_get_sync_fence_fd(struct mdss_fence *fence)
+{
+ int fd;
+ struct sync_file *sync_file;
+
+ if (!fence) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ pr_err("fail to get unused fd\n");
+ return fd;
+ }
+
+ sync_file = sync_file_create((struct fence *) fence);
+ if (!sync_file) {
+ put_unused_fd(fd);
+ pr_err("failed to create sync file\n");
+ return -ENOMEM;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ return fd;
+}
+
+/*
+ * mdss_put_sync_fence - Destroy given fence object
+ * @fence: Pointer to fence object.
+ * Return: fence name
+ */
+const char *mdss_get_sync_fence_name(struct mdss_fence *fence)
+{
+ if (!fence) {
+ pr_err("invalid parameters\n");
+ return NULL;
+ }
+
+ return fence->name;
+}
diff --git a/drivers/video/fbdev/msm/mdss_sync.h b/drivers/video/fbdev/msm/mdss_sync.h
new file mode 100644
index 0000000..39a1aa7b
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_sync.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_SYNC_H
+#define MDSS_SYNC_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+struct mdss_fence;
+struct mdss_timeline;
+
+#if defined(CONFIG_SYNC_FILE)
+struct mdss_timeline *mdss_create_timeline(const char *name);
+
+void mdss_destroy_timeline(struct mdss_timeline *tl);
+
+struct mdss_fence *mdss_get_sync_fence(
+ struct mdss_timeline *tl, const char *fence_name,
+ u32 *timestamp, int offset);
+
+void mdss_resync_timeline(struct mdss_timeline *tl);
+
+u32 mdss_get_timeline_commit_ts(struct mdss_timeline *tl);
+
+u32 mdss_get_timeline_retire_ts(struct mdss_timeline *tl);
+
+int mdss_inc_timeline(struct mdss_timeline *tl, int increment);
+
+void mdss_put_sync_fence(struct mdss_fence *fence);
+
+int mdss_wait_sync_fence(struct mdss_fence *fence,
+ long timeout);
+
+struct mdss_fence *mdss_get_fd_sync_fence(int fd);
+
+int mdss_get_sync_fence_fd(struct mdss_fence *fence);
+const char *mdss_get_sync_fence_name(struct mdss_fence *fence);
+#else
+static inline
+struct mdss_timeline *mdss_create_timeline(const char *name)
+{
+ return NULL;
+}
+
+static inline
+void mdss_destroy_timeline(struct mdss_timeline *tl)
+{
+}
+
+static inline
+struct mdss_fence *mdss_get_sync_fence(
+ struct mdss_timeline *tl, const char *fence_name,
+ u32 *timestamp, int offset)
+{
+ return NULL;
+}
+
+static inline
+void mdss_resync_timeline(struct mdss_timeline *tl)
+{
+}
+
+static inline
+int mdss_inc_timeline(struct mdss_timeline *tl, int increment)
+{
+ return 0;
+}
+
+static inline
+u32 mdss_get_timeline_commit_ts(struct mdss_timeline *tl)
+{
+ return 0;
+}
+
+static inline
+u32 mdss_get_timeline_retire_ts(struct mdss_timeline *tl)
+{
+ return 0;
+}
+
+static inline
+void mdss_put_sync_fence(struct mdss_fence *fence)
+{
+}
+
+static inline
+int mdss_wait_sync_fence(struct mdss_fence *fence,
+ long timeout)
+{
+ return 0;
+}
+
+static inline
+struct mdss_fence *mdss_get_fd_sync_fence(int fd)
+{
+ return NULL;
+}
+
+static inline
+int mdss_get_sync_fence_fd(struct mdss_fence *fence)
+{
+ return -EBADF;
+}
+const char *mdss_get_sync_fence_name(struct mdss_fence *fence)
+{
+ return NULL;
+}
+}
+#endif
+
+#endif /* MDSS_SYNC_H */
diff --git a/drivers/video/fbdev/msm/mdss_util.c b/drivers/video/fbdev/msm/mdss_util.c
new file mode 100644
index 0000000..30fcf28
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_util.c
@@ -0,0 +1,256 @@
+
+/* Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/interrupt.h>
+#include "mdss_mdp.h"
+
+struct mdss_hw *mdss_irq_handlers[MDSS_MAX_HW_BLK];
+static DEFINE_SPINLOCK(mdss_lock);
+
+int mdss_register_irq(struct mdss_hw *hw)
+{
+ unsigned long irq_flags;
+ u32 ndx_bit;
+
+ if (!hw || hw->hw_ndx >= MDSS_MAX_HW_BLK)
+ return -EINVAL;
+
+ ndx_bit = BIT(hw->hw_ndx);
+
+ spin_lock_irqsave(&mdss_lock, irq_flags);
+ if (!mdss_irq_handlers[hw->hw_ndx])
+ mdss_irq_handlers[hw->hw_ndx] = hw;
+ else
+ pr_err("panel %d's irq at %pK is already registered\n",
+ hw->hw_ndx, hw->irq_handler);
+ spin_unlock_irqrestore(&mdss_lock, irq_flags);
+
+ return 0;
+}
+
+void mdss_enable_irq(struct mdss_hw *hw)
+{
+ unsigned long irq_flags;
+ u32 ndx_bit;
+
+ if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+ return;
+
+ if (!mdss_irq_handlers[hw->hw_ndx]) {
+ pr_err("failed. First register the irq then enable it.\n");
+ return;
+ }
+
+ ndx_bit = BIT(hw->hw_ndx);
+
+ pr_debug("Enable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+ hw->irq_info->irq_ena, hw->irq_info->irq_mask);
+
+ spin_lock_irqsave(&mdss_lock, irq_flags);
+ if (hw->irq_info->irq_mask & ndx_bit) {
+ pr_debug("MDSS HW ndx=%d is already set, mask=%x\n",
+ hw->hw_ndx, hw->irq_info->irq_mask);
+ } else {
+ hw->irq_info->irq_mask |= ndx_bit;
+ if (!hw->irq_info->irq_ena) {
+ hw->irq_info->irq_ena = true;
+ enable_irq(hw->irq_info->irq);
+ }
+ }
+ spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+
+void mdss_disable_irq(struct mdss_hw *hw)
+{
+ unsigned long irq_flags;
+ u32 ndx_bit;
+
+ if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+ return;
+
+ ndx_bit = BIT(hw->hw_ndx);
+
+ pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+ hw->irq_info->irq_ena, hw->irq_info->irq_mask);
+
+ spin_lock_irqsave(&mdss_lock, irq_flags);
+ if (!(hw->irq_info->irq_mask & ndx_bit)) {
+ pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+ } else {
+ hw->irq_info->irq_mask &= ~ndx_bit;
+ if (hw->irq_info->irq_mask == 0) {
+ hw->irq_info->irq_ena = false;
+ disable_irq_nosync(hw->irq_info->irq);
+ }
+ }
+ spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+
+/* called from interrupt context */
+void mdss_disable_irq_nosync(struct mdss_hw *hw)
+{
+ u32 ndx_bit;
+
+ if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+ return;
+
+ ndx_bit = BIT(hw->hw_ndx);
+
+ pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+ hw->irq_info->irq_ena, hw->irq_info->irq_mask);
+
+ spin_lock(&mdss_lock);
+ if (!(hw->irq_info->irq_mask & ndx_bit)) {
+ pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+ } else {
+ hw->irq_info->irq_mask &= ~ndx_bit;
+ if (hw->irq_info->irq_mask == 0) {
+ hw->irq_info->irq_ena = false;
+ disable_irq_nosync(hw->irq_info->irq);
+ }
+ }
+ spin_unlock(&mdss_lock);
+}
+
+int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
+{
+ struct mdss_hw *hw;
+ int rc = -ENODEV;
+
+ spin_lock(&mdss_lock);
+ hw = mdss_irq_handlers[hw_ndx];
+ spin_unlock(&mdss_lock);
+
+ if (hw)
+ rc = hw->irq_handler(irq, hw->ptr);
+
+ return rc;
+}
+
+void mdss_enable_irq_wake(struct mdss_hw *hw)
+{
+ unsigned long irq_flags;
+ u32 ndx_bit;
+
+ if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+ return;
+
+ if (!mdss_irq_handlers[hw->hw_ndx]) {
+ pr_err("failed. First register the irq then enable it.\n");
+ return;
+ }
+
+ ndx_bit = BIT(hw->hw_ndx);
+
+ pr_debug("Enable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+ hw->irq_info->irq_wake_ena,
+ hw->irq_info->irq_wake_mask);
+
+ spin_lock_irqsave(&mdss_lock, irq_flags);
+ if (hw->irq_info->irq_wake_mask & ndx_bit) {
+ pr_debug("MDSS HW ndx=%d is already set, mask=%x\n",
+ hw->hw_ndx, hw->irq_info->irq_wake_mask);
+ } else {
+ hw->irq_info->irq_wake_mask |= ndx_bit;
+ if (!hw->irq_info->irq_wake_ena) {
+ hw->irq_info->irq_wake_ena = true;
+ enable_irq_wake(hw->irq_info->irq);
+ }
+ }
+ spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+
+void mdss_disable_irq_wake(struct mdss_hw *hw)
+{
+ unsigned long irq_flags;
+ u32 ndx_bit;
+
+ if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+ return;
+
+ ndx_bit = BIT(hw->hw_ndx);
+
+ pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+ hw->irq_info->irq_wake_ena,
+ hw->irq_info->irq_wake_mask);
+
+ spin_lock_irqsave(&mdss_lock, irq_flags);
+ if (!(hw->irq_info->irq_wake_mask & ndx_bit)) {
+ pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+ } else {
+ hw->irq_info->irq_wake_mask &= ~ndx_bit;
+ if (hw->irq_info->irq_wake_ena) {
+ hw->irq_info->irq_wake_ena = false;
+ disable_irq_wake(hw->irq_info->irq);
+ }
+ }
+ spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+
+static bool check_display(char *param_string)
+{
+ char *str = NULL;
+ bool display_disable = false;
+
+ str = strnstr(param_string, ";", MDSS_MAX_PANEL_LEN);
+ if (!str)
+ return display_disable;
+
+ str = strnstr(str, ":", MDSS_MAX_PANEL_LEN);
+ if (!str)
+ return display_disable;
+ else if (str[1] == '1')
+ display_disable = 1;
+
+ return display_disable;
+}
+
+struct mdss_util_intf mdss_util = {
+ .register_irq = mdss_register_irq,
+ .enable_irq = mdss_enable_irq,
+ .disable_irq = mdss_disable_irq,
+ .enable_wake_irq = mdss_enable_irq_wake,
+ .disable_wake_irq = mdss_disable_irq_wake,
+ .disable_irq_nosync = mdss_disable_irq_nosync,
+ .irq_dispatch = mdss_irq_dispatch,
+ .get_iommu_domain = NULL,
+ .iommu_attached = NULL,
+ .iommu_ctrl = NULL,
+ .bus_bandwidth_ctrl = NULL,
+ .bus_scale_set_quota = NULL,
+ .panel_intf_type = NULL,
+ .panel_intf_status = NULL,
+ .mdp_probe_done = false,
+ .param_check = check_display,
+ .display_disabled = false
+};
+
+struct mdss_util_intf *mdss_get_util_intf()
+{
+ return &mdss_util;
+}
+EXPORT_SYMBOL(mdss_get_util_intf);
+
+/* This routine should only be called from interrupt context */
+bool mdss_get_irq_enable_state(struct mdss_hw *hw)
+{
+ bool is_irq_enabled;
+
+ spin_lock(&mdss_lock);
+ is_irq_enabled = hw->irq_info->irq_ena;
+ spin_unlock(&mdss_lock);
+
+ return is_irq_enabled;
+}
diff --git a/drivers/video/fbdev/msm/mdss_wb.c b/drivers/video/fbdev/msm/mdss_wb.c
new file mode 100644
index 0000000..c8c5d47
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_wb.c
@@ -0,0 +1,222 @@
+/* Copyright (c) 2011-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/msm_mdp.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/extcon.h>
+#include <linux/module.h>
+
+#include "mdss_panel.h"
+#include "mdss_wb.h"
+
+/**
+ * mdss_wb_check_params - check new panel info params
+ * @pdata: current panel information
+ * @new: updates to panel info
+ *
+ * Checks if there are any changes that require panel reconfiguration
+ * in order to be reflected on writeback buffer.
+ *
+ * Return negative errno if invalid input, zero if there is no panel reconfig
+ * needed and non-zero if reconfiguration is needed.
+ */
+static int mdss_wb_check_params(struct mdss_panel_data *pdata,
+ struct mdss_panel_info *new)
+{
+ struct mdss_panel_info *old;
+
+ if (!pdata || !new) {
+ pr_err("%s: Invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ if (new->xres >= 4096 || new->yres >= 4096) {
+ pr_err("%s: Invalid resolutions\n", __func__);
+ return -EINVAL;
+ }
+
+ old = &pdata->panel_info;
+
+ if ((old->xres != new->xres) || (old->yres != new->yres))
+ return 1;
+
+ return 0;
+}
+
+static int mdss_wb_event_handler(struct mdss_panel_data *pdata,
+ int event, void *arg)
+{
+ int rc = 0;
+
+ switch (event) {
+ case MDSS_EVENT_CHECK_PARAMS:
+ rc = mdss_wb_check_params(pdata, (struct mdss_panel_info *)arg);
+ break;
+ default:
+ pr_debug("%s: panel event (%d) not handled\n", __func__, event);
+ break;
+ }
+ return rc;
+}
+
+static int mdss_wb_parse_dt(struct platform_device *pdev,
+ struct mdss_panel_data *pdata)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 res[2], tmp;
+ int rc;
+
+ rc = of_property_read_u32_array(np, "qcom,mdss_pan_res", res, 2);
+ pdata->panel_info.xres = (!rc ? res[0] : 1280);
+ pdata->panel_info.yres = (!rc ? res[1] : 720);
+
+ rc = of_property_read_u32(np, "qcom,mdss_pan_bpp", &tmp);
+ pdata->panel_info.bpp = (!rc ? tmp : 24);
+
+ return 0;
+}
+
+static const unsigned int mdss_wb_disp_supported_cable[] = {
+ EXTCON_DISP_HMD + 1, /* For WFD */
+ EXTCON_NONE,
+};
+
+static int mdss_wb_dev_init(struct mdss_wb_ctrl *wb_ctrl)
+{
+ int rc = 0;
+
+ if (!wb_ctrl) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ memset(&wb_ctrl->sdev, 0x0, sizeof(wb_ctrl->sdev));
+ wb_ctrl->sdev.supported_cable = mdss_wb_disp_supported_cable;
+ wb_ctrl->sdev.dev.parent = &wb_ctrl->pdev->dev;
+ wb_ctrl->sdev.name = "wfd";
+ rc = extcon_dev_register(&wb_ctrl->sdev);
+ if (rc) {
+ pr_err("Failed to setup switch dev for writeback panel");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int mdss_wb_dev_uninit(struct mdss_wb_ctrl *wb_ctrl)
+{
+ if (!wb_ctrl) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ extcon_dev_unregister(&wb_ctrl->sdev);
+ return 0;
+}
+
+static int mdss_wb_probe(struct platform_device *pdev)
+{
+ struct mdss_panel_data *pdata = NULL;
+ struct mdss_wb_ctrl *wb_ctrl = NULL;
+ int rc = 0;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ wb_ctrl = devm_kzalloc(&pdev->dev, sizeof(*wb_ctrl), GFP_KERNEL);
+ if (!wb_ctrl)
+ return -ENOMEM;
+
+ pdata = &wb_ctrl->pdata;
+ wb_ctrl->pdev = pdev;
+ platform_set_drvdata(pdev, wb_ctrl);
+
+ rc = !mdss_wb_parse_dt(pdev, pdata);
+ if (!rc)
+ goto error_no_mem;
+
+ rc = mdss_wb_dev_init(wb_ctrl);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to set up device nodes for writeback panel\n");
+ goto error_no_mem;
+ }
+
+ pdata->panel_info.type = WRITEBACK_PANEL;
+ pdata->panel_info.clk_rate = 74250000;
+ pdata->panel_info.pdest = DISPLAY_4;
+ pdata->panel_info.out_format = MDP_Y_CBCR_H2V2_VENUS;
+
+ pdata->event_handler = mdss_wb_event_handler;
+ pdev->dev.platform_data = pdata;
+
+ rc = mdss_register_panel(pdev, pdata);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to register writeback panel\n");
+ goto error_init;
+ }
+
+ return rc;
+
+error_init:
+ mdss_wb_dev_uninit(wb_ctrl);
+error_no_mem:
+ devm_kfree(&pdev->dev, wb_ctrl);
+ return rc;
+}
+
+static int mdss_wb_remove(struct platform_device *pdev)
+{
+ struct mdss_wb_ctrl *wb_ctrl = platform_get_drvdata(pdev);
+
+ if (!wb_ctrl) {
+ pr_err("%s: no driver data\n", __func__);
+ return -ENODEV;
+ }
+
+ mdss_wb_dev_uninit(wb_ctrl);
+ devm_kfree(&wb_ctrl->pdev->dev, wb_ctrl);
+ return 0;
+}
+
+static const struct of_device_id mdss_wb_match[] = {
+ { .compatible = "qcom,mdss_wb", },
+ { { 0 } }
+};
+
+static struct platform_driver mdss_wb_driver = {
+ .probe = mdss_wb_probe,
+ .remove = mdss_wb_remove,
+ .driver = {
+ .name = "mdss_wb",
+ .of_match_table = mdss_wb_match,
+ },
+};
+
+static int __init mdss_wb_driver_init(void)
+{
+ int rc = 0;
+
+ rc = platform_driver_register(&mdss_wb_driver);
+ return rc;
+}
+
+module_init(mdss_wb_driver_init);
diff --git a/drivers/video/fbdev/msm/mdss_wb.h b/drivers/video/fbdev/msm/mdss_wb.h
new file mode 100644
index 0000000..010a123
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_wb.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_WB_H
+#define MDSS_WB_H
+
+#include <linux/extcon.h>
+
+struct mdss_wb_ctrl {
+ struct platform_device *pdev;
+ struct mdss_panel_data pdata;
+ struct extcon_dev sdev;
+};
+
+#endif
diff --git a/drivers/video/fbdev/msm/mhl_msc.c b/drivers/video/fbdev/msm/mhl_msc.c
new file mode 100644
index 0000000..ca9e74a
--- /dev/null
+++ b/drivers/video/fbdev/msm/mhl_msc.c
@@ -0,0 +1,729 @@
+/* Copyright (c) 2013-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/mhl_8334.h>
+#include <linux/vmalloc.h>
+#include <linux/input.h>
+#include "mhl_msc.h"
+#include "mdss_hdmi_mhl.h"
+
+static struct mhl_tx_ctrl *mhl_ctrl;
+static DEFINE_MUTEX(msc_send_workqueue_mutex);
+
+const char *devcap_reg_name[] = {
+ "DEV_STATE ",
+ "MHL_VERSION ",
+ "DEV_CAT ",
+ "ADOPTER_ID_H ",
+ "ADOPTER_ID_L ",
+ "VID_LINK_MODE ",
+ "AUD_LINK_MODE ",
+ "VIDEO_TYPE ",
+ "LOG_DEV_MAP ",
+ "BANDWIDTH ",
+ "FEATURE_FLAG ",
+ "DEVICE_ID_H ",
+ "DEVICE_ID_L ",
+ "SCRATCHPAD_SIZE ",
+ "INT_STAT_SIZE ",
+ "Reserved ",
+};
+
+static bool mhl_check_tmds_enabled(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ if (mhl_ctrl && mhl_ctrl->hdmi_mhl_ops) {
+ struct msm_hdmi_mhl_ops *ops = mhl_ctrl->hdmi_mhl_ops;
+ struct platform_device *pdev = mhl_ctrl->pdata->hdmi_pdev;
+
+ return (ops->tmds_enabled(pdev) == true);
+ }
+ pr_err("%s: invalid input\n", __func__);
+ return false;
+}
+
+static void mhl_print_devcap(u8 offset, u8 devcap)
+{
+ switch (offset) {
+ case DEVCAP_OFFSET_DEV_CAT:
+ pr_debug("DCAP: %02X %s: %02X DEV_TYPE=%X POW=%s\n",
+ offset, devcap_reg_name[offset], devcap,
+ devcap & 0x0F, (devcap & 0x10) ? "y" : "n");
+ break;
+ case DEVCAP_OFFSET_FEATURE_FLAG:
+ pr_debug("DCAP: %02X %s: %02X RCP=%s RAP=%s SP=%s\n",
+ offset, devcap_reg_name[offset], devcap,
+ (devcap & 0x01) ? "y" : "n",
+ (devcap & 0x02) ? "y" : "n",
+ (devcap & 0x04) ? "y" : "n");
+ break;
+ default:
+ pr_debug("DCAP: %02X %s: %02X\n",
+ offset, devcap_reg_name[offset], devcap);
+ break;
+ }
+}
+
+static bool mhl_qualify_path_enable(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ int rc = false;
+
+ if (!mhl_ctrl)
+ return rc;
+
+ if (mhl_ctrl->tmds_en_state ||
+ /* Identify sink with non-standard INT STAT SIZE */
+ (mhl_ctrl->devcap[DEVCAP_OFFSET_MHL_VERSION] == 0x10 &&
+ mhl_ctrl->devcap[DEVCAP_OFFSET_INT_STAT_SIZE] == 0x44))
+ rc = true;
+
+ return rc;
+}
+
+void mhl_register_msc(struct mhl_tx_ctrl *ctrl)
+{
+ if (ctrl)
+ mhl_ctrl = ctrl;
+}
+
+static int mhl_flag_scrpd_burst_req(struct mhl_tx_ctrl *mhl_ctrl,
+ struct msc_command_struct *req)
+{
+ int postpone_send = 0;
+
+ if ((req->command == MHL_SET_INT) &&
+ (req->offset == MHL_RCHANGE_INT)) {
+ if (mhl_ctrl->scrpd_busy) {
+ /* reduce priority */
+ if (req->payload.data[0] == MHL_INT_REQ_WRT)
+ postpone_send = 1;
+ } else {
+ if (req->payload.data[0] == MHL_INT_REQ_WRT) {
+ mhl_ctrl->scrpd_busy = true;
+ mhl_ctrl->wr_burst_pending = true;
+ } else if (req->payload.data[0] == MHL_INT_GRT_WRT) {
+ mhl_ctrl->scrpd_busy = true;
+ }
+ }
+ }
+ return postpone_send;
+}
+
+void mhl_msc_send_work(struct work_struct *work)
+{
+ struct mhl_tx_ctrl *mhl_ctrl =
+ container_of(work, struct mhl_tx_ctrl, mhl_msc_send_work);
+ struct msc_cmd_envelope *cmd_env;
+ int ret, postpone_send;
+ /*
+ * Remove item from the queue
+ * and schedule it
+ */
+ mutex_lock(&msc_send_workqueue_mutex);
+ while (!list_empty(&mhl_ctrl->list_cmd)) {
+ cmd_env = list_first_entry(&mhl_ctrl->list_cmd,
+ struct msc_cmd_envelope,
+ msc_queue_envelope);
+ list_del(&cmd_env->msc_queue_envelope);
+ mutex_unlock(&msc_send_workqueue_mutex);
+
+ postpone_send = mhl_flag_scrpd_burst_req(
+ mhl_ctrl,
+ &cmd_env->msc_cmd_msg);
+ if (postpone_send) {
+ if (cmd_env->msc_cmd_msg.retry-- > 0) {
+ mutex_lock(&msc_send_workqueue_mutex);
+ list_add_tail(
+ &cmd_env->msc_queue_envelope,
+ &mhl_ctrl->list_cmd);
+ mutex_unlock(&msc_send_workqueue_mutex);
+ } else {
+ pr_err("%s: max scrpd retry out\n",
+ __func__);
+ }
+ } else {
+ ret = mhl_send_msc_command(mhl_ctrl,
+ &cmd_env->msc_cmd_msg);
+ if (ret == -EAGAIN) {
+ int retry = 2;
+
+ while (retry--) {
+ ret = mhl_send_msc_command(
+ mhl_ctrl,
+ &cmd_env->msc_cmd_msg);
+ if (ret != -EAGAIN)
+ break;
+ }
+ }
+ if (ret == -EAGAIN)
+ pr_err("%s: send_msc_command retry out!\n",
+ __func__);
+ vfree(cmd_env);
+ }
+
+ mutex_lock(&msc_send_workqueue_mutex);
+ }
+ mutex_unlock(&msc_send_workqueue_mutex);
+}
+
+int mhl_queue_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+ struct msc_command_struct *req,
+ int priority_send)
+{
+ struct msc_cmd_envelope *cmd_env;
+
+ mutex_lock(&msc_send_workqueue_mutex);
+ cmd_env = vmalloc(sizeof(struct msc_cmd_envelope));
+ if (!cmd_env) {
+ pr_err("%s: out of memory!\n", __func__);
+ mutex_unlock(&msc_send_workqueue_mutex);
+ return -ENOMEM;
+ }
+
+ memcpy(&cmd_env->msc_cmd_msg, req,
+ sizeof(struct msc_command_struct));
+
+ if (priority_send)
+ list_add(&cmd_env->msc_queue_envelope,
+ &mhl_ctrl->list_cmd);
+ else
+ list_add_tail(&cmd_env->msc_queue_envelope,
+ &mhl_ctrl->list_cmd);
+ mutex_unlock(&msc_send_workqueue_mutex);
+ queue_work(mhl_ctrl->msc_send_workqueue, &mhl_ctrl->mhl_msc_send_work);
+
+ return 0;
+}
+
+static int mhl_update_devcap(struct mhl_tx_ctrl *mhl_ctrl,
+ int offset, u8 devcap)
+{
+ if (!mhl_ctrl)
+ return -EFAULT;
+ if (offset < 0 || offset > 15)
+ return -EFAULT;
+ mhl_ctrl->devcap[offset] = devcap;
+ mhl_print_devcap(offset, mhl_ctrl->devcap[offset]);
+
+ return 0;
+}
+
+int mhl_msc_clear(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ if (!mhl_ctrl)
+ return -EFAULT;
+
+ memset(mhl_ctrl->devcap, 0, 16);
+ mhl_ctrl->devcap_state = 0;
+ mhl_ctrl->path_en_state = 0;
+ mhl_ctrl->status[0] = 0;
+ mhl_ctrl->status[1] = 0;
+ mhl_ctrl->scrpd_busy = 0;
+ mhl_ctrl->wr_burst_pending = 0;
+
+ return 0;
+}
+
+int mhl_msc_command_done(struct mhl_tx_ctrl *mhl_ctrl,
+ struct msc_command_struct *req)
+{
+ bool dongle_pwr_en = false;
+
+ switch (req->command) {
+ case MHL_WRITE_STAT:
+ if (req->offset == MHL_STATUS_REG_LINK_MODE) {
+ if (req->payload.data[0]
+ & MHL_STATUS_PATH_ENABLED) {
+ /* Enable TMDS output */
+ mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+ if (mhl_ctrl->devcap_state == MHL_DEVCAP_ALL) {
+ dongle_pwr_en = mhl_ctrl->devcap[
+ MHL_DEV_CATEGORY_OFFSET] &
+ MHL_DEV_CATEGORY_POW_BIT;
+ if (dongle_pwr_en)
+ mhl_drive_hpd(mhl_ctrl, HPD_UP);
+ }
+ } else {
+ /* Disable TMDS output */
+ mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+ mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+ }
+ }
+ break;
+ case MHL_READ_DEVCAP:
+ mhl_update_devcap(mhl_ctrl,
+ req->offset, req->retval);
+ mhl_ctrl->devcap_state |= BIT(req->offset);
+ switch (req->offset) {
+ case MHL_DEV_CATEGORY_OFFSET:
+ if (req->retval & MHL_DEV_CATEGORY_POW_BIT)
+ pr_debug("%s: devcap pow bit set\n",
+ __func__);
+ else
+ pr_debug("%s: devcap pow bit unset\n",
+ __func__);
+ break;
+ case DEVCAP_OFFSET_RESERVED:
+ mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+ mhl_drive_hpd(mhl_ctrl, HPD_UP);
+ break;
+ case DEVCAP_OFFSET_MHL_VERSION:
+ case DEVCAP_OFFSET_INT_STAT_SIZE:
+ if (mhl_qualify_path_enable(mhl_ctrl))
+ mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+ break;
+ }
+ break;
+ case MHL_WRITE_BURST:
+ mhl_msc_send_set_int(
+ mhl_ctrl,
+ MHL_RCHANGE_INT,
+ MHL_INT_DSCR_CHG,
+ MSC_PRIORITY_SEND);
+ break;
+ }
+ return 0;
+}
+
+int mhl_msc_send_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 offset, u8 mask, u8 prior)
+{
+ struct msc_command_struct req;
+
+ req.command = MHL_SET_INT;
+ req.offset = offset;
+ req.payload.data[0] = mask;
+ return mhl_queue_msc_command(mhl_ctrl, &req, prior);
+}
+
+int mhl_msc_send_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 offset, u8 value)
+{
+ struct msc_command_struct req;
+
+ req.command = MHL_WRITE_STAT;
+ req.offset = offset;
+ req.payload.data[0] = value;
+ return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+static int mhl_msc_write_burst(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 offset, u8 *data, u8 length)
+{
+ struct msc_command_struct req;
+
+ if (!mhl_ctrl)
+ return -EFAULT;
+
+ if (!mhl_ctrl->wr_burst_pending)
+ return -EFAULT;
+
+ req.command = MHL_WRITE_BURST;
+ req.offset = offset;
+ req.length = length;
+ req.payload.burst_data = data;
+ mhl_queue_msc_command(mhl_ctrl, &req, MSC_PRIORITY_SEND);
+ mhl_ctrl->wr_burst_pending = false;
+ return 0;
+}
+
+int mhl_msc_send_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 sub_cmd, u8 cmd_data)
+{
+ struct msc_command_struct req;
+
+ req.command = MHL_MSC_MSG;
+ req.payload.data[0] = sub_cmd;
+ req.payload.data[1] = cmd_data;
+ return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+/*
+ * Certain MSC msgs such as RCPK, RCPE and RAPK
+ * should be transmitted as a high priority
+ * because these msgs should be sent within
+ * 1000ms of a receipt of RCP/RAP. So such msgs can
+ * be added to the head of msc cmd queue.
+ */
+static int mhl_msc_send_prior_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 sub_cmd, u8 cmd_data)
+{
+ struct msc_command_struct req;
+
+ req.command = MHL_MSC_MSG;
+ req.payload.data[0] = sub_cmd;
+ req.payload.data[1] = cmd_data;
+ return mhl_queue_msc_command(mhl_ctrl, &req, MSC_PRIORITY_SEND);
+}
+
+int mhl_msc_read_devcap(struct mhl_tx_ctrl *mhl_ctrl, u8 offset)
+{
+ struct msc_command_struct req;
+
+ if (offset < 0 || offset > 15)
+ return -EFAULT;
+ req.command = MHL_READ_DEVCAP;
+ req.offset = offset;
+ req.payload.data[0] = 0;
+ return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+int mhl_msc_read_devcap_all(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ int offset;
+ int ret;
+
+ for (offset = 0; offset < DEVCAP_SIZE; offset++) {
+ ret = mhl_msc_read_devcap(mhl_ctrl, offset);
+ if (ret == -EBUSY)
+ pr_err("%s: queue busy!\n", __func__);
+ }
+ return ret;
+}
+
+static void mhl_handle_input(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 key_code, u16 input_key_code)
+{
+ int key_press = (key_code & 0x80) == 0;
+
+ pr_debug("%s: send key events[%x][%x][%d]\n",
+ __func__, key_code, input_key_code, key_press);
+ input_report_key(mhl_ctrl->input, input_key_code, key_press);
+ input_sync(mhl_ctrl->input);
+}
+
+int mhl_rcp_recv(struct mhl_tx_ctrl *mhl_ctrl, u8 key_code)
+{
+ u8 index = key_code & 0x7f;
+ u16 input_key_code;
+
+ if (!mhl_ctrl->rcp_key_code_tbl) {
+ pr_err("%s: RCP Key Code Table not initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ input_key_code = mhl_ctrl->rcp_key_code_tbl[index];
+
+ if ((index < mhl_ctrl->rcp_key_code_tbl_len) &&
+ (input_key_code > 0)) {
+ /* prior send rcpk */
+ mhl_msc_send_prior_msc_msg(
+ mhl_ctrl,
+ MHL_MSC_MSG_RCPK,
+ key_code);
+
+ if (mhl_ctrl->input)
+ mhl_handle_input(mhl_ctrl, key_code, input_key_code);
+ } else {
+ /* prior send rcpe */
+ mhl_msc_send_prior_msc_msg(
+ mhl_ctrl,
+ MHL_MSC_MSG_RCPE,
+ MHL_RCPE_INEFFECTIVE_KEY_CODE);
+
+ /* send rcpk after rcpe send */
+ mhl_msc_send_prior_msc_msg(
+ mhl_ctrl,
+ MHL_MSC_MSG_RCPK,
+ key_code);
+ }
+ return 0;
+}
+
+static int mhl_rap_action(struct mhl_tx_ctrl *mhl_ctrl, u8 action_code)
+{
+ switch (action_code) {
+ case MHL_RAP_CONTENT_ON:
+ mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+ break;
+ case MHL_RAP_CONTENT_OFF:
+ /*
+ * instead of only disabling tmds
+ * send power button press - CONTENT_OFF
+ */
+ input_report_key(mhl_ctrl->input, KEY_VENDOR, 1);
+ input_sync(mhl_ctrl->input);
+ input_report_key(mhl_ctrl->input, KEY_VENDOR, 0);
+ input_sync(mhl_ctrl->input);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int mhl_rap_recv(struct mhl_tx_ctrl *mhl_ctrl, u8 action_code)
+{
+ u8 error_code;
+ bool tmds_en;
+
+ tmds_en = mhl_check_tmds_enabled(mhl_ctrl);
+ switch (action_code) {
+ case MHL_RAP_POLL:
+ if (tmds_en)
+ error_code = MHL_RAPK_NO_ERROR;
+ else
+ error_code = MHL_RAPK_UNSUPPORTED_ACTION_CODE;
+ break;
+ case MHL_RAP_CONTENT_ON:
+ case MHL_RAP_CONTENT_OFF:
+ if (tmds_en) {
+ mhl_rap_action(mhl_ctrl, action_code);
+ error_code = MHL_RAPK_NO_ERROR;
+ } else {
+ error_code = MHL_RAPK_UNSUPPORTED_ACTION_CODE;
+ }
+ break;
+ default:
+ error_code = MHL_RAPK_UNRECOGNIZED_ACTION_CODE;
+ break;
+ }
+ /* prior send rapk */
+ return mhl_msc_send_prior_msc_msg(
+ mhl_ctrl,
+ MHL_MSC_MSG_RAPK,
+ error_code);
+}
+
+int mhl_msc_recv_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 sub_cmd, u8 cmd_data)
+{
+ int rc = 0;
+
+ switch (sub_cmd) {
+ case MHL_MSC_MSG_RCP:
+ pr_debug("MHL: receive RCP(0x%02x)\n", cmd_data);
+ rc = mhl_rcp_recv(mhl_ctrl, cmd_data);
+ break;
+ case MHL_MSC_MSG_RCPK:
+ pr_debug("MHL: receive RCPK(0x%02x)\n", cmd_data);
+ break;
+ case MHL_MSC_MSG_RCPE:
+ pr_debug("MHL: receive RCPE(0x%02x)\n", cmd_data);
+ break;
+ case MHL_MSC_MSG_RAP:
+ pr_debug("MHL: receive RAP(0x%02x)\n", cmd_data);
+ rc = mhl_rap_recv(mhl_ctrl, cmd_data);
+ break;
+ case MHL_MSC_MSG_RAPK:
+ pr_debug("MHL: receive RAPK(0x%02x)\n", cmd_data);
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+int mhl_msc_recv_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 offset, u8 set_int)
+{
+ int prior;
+
+ if (offset >= 2)
+ return -EFAULT;
+
+ switch (offset) {
+ case 0:
+ if (set_int & MHL_INT_DCAP_CHG) {
+ /* peer dcap has changed */
+ mhl_ctrl->devcap_state = 0;
+ mhl_msc_read_devcap_all(mhl_ctrl);
+ }
+ if (set_int & MHL_INT_DSCR_CHG) {
+ /* peer's scratchpad reg changed */
+ pr_debug("%s: dscr chg\n", __func__);
+ mhl_read_scratchpad(mhl_ctrl);
+ mhl_ctrl->scrpd_busy = false;
+ }
+ if (set_int & MHL_INT_REQ_WRT) {
+ /* SET_INT: REQ_WRT */
+ if (mhl_ctrl->scrpd_busy) {
+ prior = MSC_NORMAL_SEND;
+ } else {
+ prior = MSC_PRIORITY_SEND;
+ mhl_ctrl->scrpd_busy = true;
+ }
+ mhl_msc_send_set_int(
+ mhl_ctrl,
+ MHL_RCHANGE_INT,
+ MHL_INT_GRT_WRT,
+ prior);
+ }
+ if (set_int & MHL_INT_GRT_WRT) {
+ /* SET_INT: GRT_WRT */
+ pr_debug("%s: recvd req to permit/grant write",
+ __func__);
+ complete_all(&mhl_ctrl->req_write_done);
+ mhl_msc_write_burst(
+ mhl_ctrl,
+ MHL_SCRATCHPAD_OFFSET,
+ mhl_ctrl->scrpd.data,
+ mhl_ctrl->scrpd.length);
+ }
+ break;
+ case 1:
+ if (set_int & MHL_INT_EDID_CHG) {
+ /* peer EDID has changed
+ * toggle HPD to read EDID
+ */
+ pr_debug("%s: EDID CHG\n", __func__);
+ mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+ msleep(110);
+ mhl_drive_hpd(mhl_ctrl, HPD_UP);
+ }
+ }
+ return 0;
+}
+
+int mhl_msc_recv_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 offset, u8 value)
+{
+ bool tmds_en;
+
+ if (offset >= 2)
+ return -EFAULT;
+
+ switch (offset) {
+ case 0:
+ /*
+ * connected device bits
+ * changed and DEVCAP READY
+ */
+ if (((value ^ mhl_ctrl->status[offset]) &
+ MHL_STATUS_DCAP_RDY)) {
+ if (value & MHL_STATUS_DCAP_RDY) {
+ mhl_ctrl->devcap_state = 0;
+ mhl_msc_read_devcap_all(mhl_ctrl);
+ } else {
+ /*
+ * peer dcap turned not ready
+ * use old devap state
+ */
+ pr_debug("%s: DCAP RDY bit cleared\n",
+ __func__);
+ }
+ }
+ break;
+ case 1:
+ /*
+ * connected device bits
+ * changed and PATH ENABLED
+ * bit set
+ */
+ tmds_en = mhl_check_tmds_enabled(mhl_ctrl);
+ if ((value ^ mhl_ctrl->status[offset])
+ & MHL_STATUS_PATH_ENABLED) {
+ if (value & MHL_STATUS_PATH_ENABLED) {
+ if (tmds_en &&
+ (mhl_ctrl->devcap[offset] &
+ MHL_FEATURE_RAP_SUPPORT)) {
+ mhl_msc_send_msc_msg(
+ mhl_ctrl,
+ MHL_MSC_MSG_RAP,
+ MHL_RAP_CONTENT_ON);
+ }
+ mhl_ctrl->path_en_state
+ |= (MHL_STATUS_PATH_ENABLED |
+ MHL_STATUS_CLK_MODE_NORMAL);
+ mhl_msc_send_write_stat(
+ mhl_ctrl,
+ MHL_STATUS_REG_LINK_MODE,
+ mhl_ctrl->path_en_state);
+ } else {
+ mhl_ctrl->path_en_state
+ &= ~(MHL_STATUS_PATH_ENABLED |
+ MHL_STATUS_CLK_MODE_NORMAL);
+ mhl_msc_send_write_stat(
+ mhl_ctrl,
+ MHL_STATUS_REG_LINK_MODE,
+ mhl_ctrl->path_en_state);
+ }
+ }
+ break;
+ }
+ mhl_ctrl->status[offset] = value;
+ return 0;
+}
+
+static int mhl_request_write_burst(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 start_reg,
+ u8 length, u8 *data)
+{
+ int i, reg;
+ int timeout, retry = 20;
+
+ if (!(mhl_ctrl->devcap[DEVCAP_OFFSET_FEATURE_FLAG] &
+ MHL_FEATURE_SP_SUPPORT)) {
+ pr_debug("MHL: SCRATCHPAD_NOT_SUPPORTED\n");
+ return -EFAULT;
+ }
+
+ /*
+ * scratchpad remains busy as long as a peer's permission or
+ * write bursts are pending; experimentally it was found that
+ * 50ms is optimal
+ */
+ while (mhl_ctrl->scrpd_busy && retry--)
+ msleep(50);
+ if (!retry) {
+ pr_debug("MHL: scratchpad_busy\n");
+ return -EBUSY;
+ }
+
+ for (i = 0, reg = start_reg; (i < length) &&
+ (reg < MHL_SCRATCHPAD_SIZE); i++, reg++)
+ mhl_ctrl->scrpd.data[reg] = data[i];
+ mhl_ctrl->scrpd.length = length;
+ mhl_ctrl->scrpd.offset = start_reg;
+
+ retry = 5;
+ do {
+ init_completion(&mhl_ctrl->req_write_done);
+ mhl_msc_send_set_int(
+ mhl_ctrl,
+ MHL_RCHANGE_INT,
+ MHL_INT_REQ_WRT,
+ MSC_PRIORITY_SEND);
+ timeout = wait_for_completion_interruptible_timeout(
+ &mhl_ctrl->req_write_done,
+ msecs_to_jiffies(MHL_BURST_WAIT));
+ if (!timeout)
+ mhl_ctrl->scrpd_busy = false;
+ } while (retry-- && timeout == 0);
+ if (!timeout) {
+ pr_err("%s: timed out!\n", __func__);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/* write scratchpad entry */
+int mhl_write_scratchpad(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 offset, u8 length, u8 *data)
+{
+ int rc;
+
+ if ((length < ADOPTER_ID_SIZE) ||
+ (length > MAX_SCRATCHPAD_TRANSFER_SIZE) ||
+ (offset > (MAX_SCRATCHPAD_TRANSFER_SIZE - ADOPTER_ID_SIZE)) ||
+ ((offset + length) > MAX_SCRATCHPAD_TRANSFER_SIZE)) {
+ pr_debug("MHL: write_burst (0x%02x)\n", -EINVAL);
+ return -EINVAL;
+ }
+
+ rc = mhl_request_write_burst(mhl_ctrl, offset, length, data);
+
+ return rc;
+}
diff --git a/drivers/video/fbdev/msm/mhl_msc.h b/drivers/video/fbdev/msm/mhl_msc.h
new file mode 100644
index 0000000..59fbd25
--- /dev/null
+++ b/drivers/video/fbdev/msm/mhl_msc.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MHL_MSC_H__
+#define __MHL_MSC_H__
+#include <linux/mhl_8334.h>
+
+#define MAX_RCP_KEYS_SUPPORTED 256
+
+#define MSC_NORMAL_SEND 0
+#define MSC_PRIORITY_SEND 1
+
+#define TMDS_ENABLE 1
+#define TMDS_DISABLE 0
+
+/******************************************************************/
+/* the below APIs are implemented by the MSC functionality */
+int mhl_msc_clear(struct mhl_tx_ctrl *mhl_ctrl);
+
+int mhl_msc_command_done(struct mhl_tx_ctrl *mhl_ctrl,
+ struct msc_command_struct *req);
+
+int mhl_msc_send_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 offset, u8 mask, u8 priority);
+
+int mhl_msc_send_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 offset, u8 value);
+int mhl_msc_send_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 sub_cmd, u8 cmd_data);
+
+int mhl_msc_recv_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 offset, u8 set_int);
+
+int mhl_msc_recv_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 offset, u8 value);
+int mhl_msc_recv_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+ u8 sub_cmd, u8 cmd_data);
+void mhl_msc_send_work(struct work_struct *work);
+
+/******************************************************************/
+/* Tx should implement these APIs */
+int mhl_send_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+ struct msc_command_struct *req);
+void mhl_read_scratchpad(struct mhl_tx_ctrl *mhl_ctrl);
+void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state);
+void mhl_tmds_ctrl(struct mhl_tx_ctrl *ctrl, uint8_t on);
+/******************************************************************/
+/* MHL driver registers ctrl with MSC */
+void mhl_register_msc(struct mhl_tx_ctrl *ctrl);
+
+#endif /* __MHL_MSC_H__ */
diff --git a/drivers/video/fbdev/msm/mhl_sii8334.c b/drivers/video/fbdev/msm/mhl_sii8334.c
new file mode 100644
index 0000000..cf45eb6
--- /dev/null
+++ b/drivers/video/fbdev/msm/mhl_sii8334.c
@@ -0,0 +1,2097 @@
+/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/input.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/mhl_8334.h>
+#include <linux/mdss_io_util.h>
+
+#include "mdss_fb.h"
+#include "mdss_hdmi_tx.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mhl_msc.h"
+#include "mdss_hdmi_mhl.h"
+
+#define MHL_DRIVER_NAME "sii8334"
+#define COMPATIBLE_NAME "qcom,mhl-sii8334"
+#define MAX_CURRENT 700000
+
+#define pr_debug_intr(...)
+
+#define MSC_START_BIT_MSC_CMD (0x01 << 0)
+#define MSC_START_BIT_VS_CMD (0x01 << 1)
+#define MSC_START_BIT_READ_REG (0x01 << 2)
+#define MSC_START_BIT_WRITE_REG (0x01 << 3)
+#define MSC_START_BIT_WRITE_BURST (0x01 << 4)
+
+/* supported RCP key code */
+u16 support_rcp_key_code_tbl[] = {
+ KEY_ENTER, /* 0x00 Select */
+ KEY_UP, /* 0x01 Up */
+ KEY_DOWN, /* 0x02 Down */
+ KEY_LEFT, /* 0x03 Left */
+ KEY_RIGHT, /* 0x04 Right */
+ KEY_UNKNOWN, /* 0x05 Right-up */
+ KEY_UNKNOWN, /* 0x06 Right-down */
+ KEY_UNKNOWN, /* 0x07 Left-up */
+ KEY_UNKNOWN, /* 0x08 Left-down */
+ KEY_MENU, /* 0x09 Root Menu */
+ KEY_OPTION, /* 0x0A Setup Menu */
+ KEY_UNKNOWN, /* 0x0B Contents Menu */
+ KEY_UNKNOWN, /* 0x0C Favorite Menu */
+ KEY_EXIT, /* 0x0D Exit */
+ KEY_RESERVED, /* 0x0E */
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED, /* 0x1F */
+ KEY_NUMERIC_0, /* 0x20 NUMERIC_0 */
+ KEY_NUMERIC_1, /* 0x21 NUMERIC_1 */
+ KEY_NUMERIC_2, /* 0x22 NUMERIC_2 */
+ KEY_NUMERIC_3, /* 0x23 NUMERIC_3 */
+ KEY_NUMERIC_4, /* 0x24 NUMERIC_4 */
+ KEY_NUMERIC_5, /* 0x25 NUMERIC_5 */
+ KEY_NUMERIC_6, /* 0x26 NUMERIC_6 */
+ KEY_NUMERIC_7, /* 0x27 NUMERIC_7 */
+ KEY_NUMERIC_8, /* 0x28 NUMERIC_8 */
+ KEY_NUMERIC_9, /* 0x29 NUMERIC_9 */
+ KEY_DOT, /* 0x2A Dot */
+ KEY_ENTER, /* 0x2B Enter */
+ KEY_ESC, /* 0x2C Clear */
+ KEY_RESERVED, /* 0x2D */
+ KEY_RESERVED, /* 0x2E */
+ KEY_RESERVED, /* 0x2F */
+ KEY_UNKNOWN, /* 0x30 Channel Up */
+ KEY_UNKNOWN, /* 0x31 Channel Down */
+ KEY_UNKNOWN, /* 0x32 Previous Channel */
+ KEY_UNKNOWN, /* 0x33 Sound Select */
+ KEY_UNKNOWN, /* 0x34 Input Select */
+ KEY_UNKNOWN, /* 0x35 Show Information */
+ KEY_UNKNOWN, /* 0x36 Help */
+ KEY_UNKNOWN, /* 0x37 Page Up */
+ KEY_UNKNOWN, /* 0x38 Page Down */
+ KEY_RESERVED, /* 0x39 */
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED, /* 0x3F */
+ KEY_RESERVED, /* 0x40 */
+ KEY_VOLUMEUP, /* 0x41 Volume Up */
+ KEY_VOLUMEDOWN, /* 0x42 Volume Down */
+ KEY_MUTE, /* 0x43 Mute */
+ KEY_PLAY, /* 0x44 Play */
+ KEY_STOP, /* 0x45 Stop */
+ KEY_PAUSE, /* 0x46 Pause */
+ KEY_UNKNOWN, /* 0x47 Record */
+ KEY_REWIND, /* 0x48 Rewind */
+ KEY_FASTFORWARD, /* 0x49 Fast Forward */
+ KEY_UNKNOWN, /* 0x4A Eject */
+ KEY_FORWARD, /* 0x4B Forward */
+ KEY_BACK, /* 0x4C Backward */
+ KEY_RESERVED, /* 0x4D */
+ KEY_RESERVED,
+ KEY_RESERVED, /* 0x4F */
+ KEY_UNKNOWN, /* 0x50 Angle */
+ KEY_UNKNOWN, /* 0x51 Subtitle */
+ KEY_RESERVED, /* 0x52 */
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED, /* 0x5F */
+ KEY_PLAYPAUSE, /* 0x60 Play Function */
+ KEY_PLAYPAUSE, /* 0x61 Pause_Play Function */
+ KEY_UNKNOWN, /* 0x62 Record Function */
+ KEY_PAUSE, /* 0x63 Pause Record Function */
+ KEY_STOP, /* 0x64 Stop Function */
+ KEY_MUTE, /* 0x65 Mute Function */
+ KEY_UNKNOWN, /* 0x66 Restore Volume Function */
+ KEY_UNKNOWN, /* 0x67 Tune Function */
+ KEY_UNKNOWN, /* 0x68 Select Media Function */
+ KEY_RESERVED, /* 0x69 */
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED, /* 0x70 */
+ KEY_BLUE, /* 0x71 F1 */
+ KEY_RED, /* 0x72 F2 */
+ KEY_GREEN, /* 0x73 F3 */
+ KEY_YELLOW, /* 0x74 F4 */
+ KEY_UNKNOWN, /* 0x75 F5 */
+ KEY_RESERVED, /* 0x76 */
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED, /* 0x7D */
+ KEY_VENDOR, /* Vendor Specific */
+ KEY_RESERVED, /* 0x7F */
+};
+
+
+uint8_t slave_addrs[MAX_PAGES] = {
+ DEV_PAGE_TPI_0,
+ DEV_PAGE_TX_L0_0,
+ DEV_PAGE_TX_L1_0,
+ DEV_PAGE_TX_2_0,
+ DEV_PAGE_TX_3_0,
+ DEV_PAGE_CBUS,
+ DEV_PAGE_DDC_EDID,
+ DEV_PAGE_DDC_SEGM,
+};
+
+static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl,
+ enum mhl_st_type to_mode, bool hpd_off);
+static void mhl_init_reg_settings(struct mhl_tx_ctrl *mhl_ctrl,
+ bool mhl_disc_en);
+static int mhl_gpio_config(struct mhl_tx_ctrl *mhl_ctrl, int on);
+static int mhl_vreg_config(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on);
+
+int mhl_i2c_reg_read(struct i2c_client *client,
+ uint8_t slave_addr_index, uint8_t reg_offset)
+{
+ int rc = -1;
+ uint8_t buffer = 0;
+
+ rc = mdss_i2c_byte_read(client, slave_addrs[slave_addr_index],
+ reg_offset, &buffer);
+ if (rc) {
+ pr_err("%s: slave=%x, off=%x\n",
+ __func__, slave_addrs[slave_addr_index], reg_offset);
+ return rc;
+ }
+ return buffer;
+}
+
+
+int mhl_i2c_reg_write(struct i2c_client *client,
+ uint8_t slave_addr_index, uint8_t reg_offset,
+ uint8_t value)
+{
+ return mdss_i2c_byte_write(client, slave_addrs[slave_addr_index],
+ reg_offset, &value);
+}
+
+void mhl_i2c_reg_modify(struct i2c_client *client,
+ uint8_t slave_addr_index, uint8_t reg_offset,
+ uint8_t mask, uint8_t val)
+{
+ uint8_t temp;
+
+ temp = mhl_i2c_reg_read(client, slave_addr_index, reg_offset);
+ temp &= (~mask);
+ temp |= (mask & val);
+ mhl_i2c_reg_write(client, slave_addr_index, reg_offset, temp);
+}
+
+
+static int mhl_tx_get_dt_data(struct device *dev,
+ struct mhl_tx_platform_data *pdata)
+{
+ int i, rc = 0;
+ struct device_node *of_node = NULL;
+ struct mdss_gpio *temp_gpio = NULL;
+ struct platform_device *hdmi_pdev = NULL;
+ struct device_node *hdmi_tx_node = NULL;
+ int dt_gpio;
+
+ i = 0;
+
+ if (!dev || !pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ of_node = dev->of_node;
+ if (!of_node) {
+ pr_err("%s: invalid of_node\n", __func__);
+ goto error;
+ }
+
+ pr_debug("%s: id=%d\n", __func__, dev->id);
+
+ /* GPIOs */
+ temp_gpio = NULL;
+ temp_gpio = devm_kzalloc(dev, sizeof(struct mdss_gpio), GFP_KERNEL);
+ pr_debug("%s: gpios allocd\n", __func__);
+ if (!(temp_gpio)) {
+ pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+ goto error;
+ }
+ /* RESET */
+ dt_gpio = of_get_named_gpio(of_node, "mhl-rst-gpio", 0);
+ if (dt_gpio < 0) {
+ pr_err("%s: Can't get mhl-rst-gpio\n", __func__);
+ goto error;
+ }
+
+ temp_gpio->gpio = dt_gpio;
+ snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-rst-gpio");
+ pr_debug("%s: rst gpio=[%d]\n", __func__,
+ temp_gpio->gpio);
+ pdata->gpios[MHL_TX_RESET_GPIO] = temp_gpio;
+
+ /* PWR */
+ temp_gpio = NULL;
+ temp_gpio = devm_kzalloc(dev, sizeof(struct mdss_gpio), GFP_KERNEL);
+ pr_debug("%s: gpios allocd\n", __func__);
+ if (!(temp_gpio)) {
+ pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+ goto error;
+ }
+ dt_gpio = of_get_named_gpio(of_node, "mhl-pwr-gpio", 0);
+ if (dt_gpio < 0) {
+ pr_err("%s: Can't get mhl-pwr-gpio\n", __func__);
+ goto error;
+ }
+
+ temp_gpio->gpio = dt_gpio;
+ snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-pwr-gpio");
+ pr_debug("%s: pmic gpio=[%d]\n", __func__,
+ temp_gpio->gpio);
+ pdata->gpios[MHL_TX_PMIC_PWR_GPIO] = temp_gpio;
+
+ /* INTR */
+ temp_gpio = NULL;
+ temp_gpio = devm_kzalloc(dev, sizeof(struct mdss_gpio), GFP_KERNEL);
+ pr_debug("%s: gpios allocd\n", __func__);
+ if (!(temp_gpio)) {
+ pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+ goto error;
+ }
+ dt_gpio = of_get_named_gpio(of_node, "mhl-intr-gpio", 0);
+ if (dt_gpio < 0) {
+ pr_err("%s: Can't get mhl-intr-gpio\n", __func__);
+ goto error;
+ }
+
+ temp_gpio->gpio = dt_gpio;
+ snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-intr-gpio");
+ pr_debug("%s: intr gpio=[%d]\n", __func__,
+ temp_gpio->gpio);
+ pdata->gpios[MHL_TX_INTR_GPIO] = temp_gpio;
+
+ /* parse phandle for hdmi tx */
+ hdmi_tx_node = of_parse_phandle(of_node, "qcom,hdmi-tx-map", 0);
+ if (!hdmi_tx_node) {
+ pr_err("%s: can't find hdmi phandle\n", __func__);
+ goto error;
+ }
+
+ hdmi_pdev = of_find_device_by_node(hdmi_tx_node);
+ if (!hdmi_pdev) {
+ pr_err("%s: can't find the device by node\n", __func__);
+ goto error;
+ }
+ pr_debug("%s: hdmi_pdev [0X%x] to pdata->pdev\n",
+ __func__, (unsigned int)hdmi_pdev);
+
+ pdata->hdmi_pdev = hdmi_pdev;
+
+ return 0;
+error:
+ pr_err("%s: ret due to err\n", __func__);
+ for (i = 0; i < MHL_TX_MAX_GPIO; i++)
+ if (pdata->gpios[i])
+ devm_kfree(dev, pdata->gpios[i]);
+ return rc;
+} /* mhl_tx_get_dt_data */
+
+static int mhl_sii_reset_pin(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+ if (mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO]) {
+ gpio_set_value(
+ mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO]->gpio,
+ on);
+ }
+ return 0;
+}
+
+
+static int mhl_sii_wait_for_rgnd(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ int timeout;
+
+ pr_debug("%s:%u\n", __func__, __LINE__);
+
+ if (mhl_ctrl->mhl_mode) {
+ pr_debug("%s: already in mhl mode\n", __func__);
+ return 0;
+ }
+
+ reinit_completion(&mhl_ctrl->rgnd_done);
+ /*
+ * after toggling reset line and enabling disc
+ * tx can take a while to generate intr
+ */
+ timeout = wait_for_completion_timeout
+ (&mhl_ctrl->rgnd_done, HZ * 3);
+ if (!timeout) {
+ /*
+ * most likely nothing plugged in USB
+ * USB HOST connected or already in USB mode
+ */
+ pr_warn("%s:%u timedout\n", __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int mhl_sii_config(struct mhl_tx_ctrl *mhl_ctrl, bool on)
+{
+ int rc = 0;
+ struct i2c_client *client = NULL;
+
+ if (!mhl_ctrl) {
+ pr_err("%s: ctrl is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ client = mhl_ctrl->i2c_handle;
+
+ if (on && !mhl_ctrl->irq_req_done) {
+ rc = mhl_vreg_config(mhl_ctrl, 1);
+ if (rc) {
+ pr_err("%s: vreg init failed [%d]\n",
+ __func__, rc);
+ return -ENODEV;
+ }
+
+ rc = mhl_gpio_config(mhl_ctrl, 1);
+ if (rc) {
+ pr_err("%s: gpio init failed [%d]\n",
+ __func__, rc);
+ return -ENODEV;
+ }
+
+ rc = request_threaded_irq(mhl_ctrl->i2c_handle->irq, NULL,
+ &mhl_tx_isr, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->dev.driver->name, mhl_ctrl);
+ if (rc) {
+ pr_err("%s: request_threaded_irq failed, status: %d\n",
+ __func__, rc);
+ return -ENODEV;
+ }
+ mhl_ctrl->irq_req_done = true;
+ } else if (!on && mhl_ctrl->irq_req_done) {
+ free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
+ mhl_gpio_config(mhl_ctrl, 0);
+ mhl_vreg_config(mhl_ctrl, 0);
+ mhl_ctrl->irq_req_done = false;
+ }
+
+ return rc;
+}
+
+static void mhl_sii_disc_intr_work(struct work_struct *work)
+{
+ struct mhl_tx_ctrl *mhl_ctrl = NULL;
+
+ mhl_ctrl = container_of(work, struct mhl_tx_ctrl, mhl_intr_work);
+
+ mhl_sii_config(mhl_ctrl, false);
+}
+
+/* USB_HANDSHAKING FUNCTIONS */
+static int mhl_sii_device_discovery(void *data, int id,
+ void (*usb_notify_cb)(void *, int), void *ctx)
+{
+ int rc;
+ struct mhl_tx_ctrl *mhl_ctrl = data;
+ unsigned long flags;
+
+ if (id) {
+ /* When MHL cable is disconnected we get a sii8334
+ * mhl_disconnect interrupt which is handled separately.
+ */
+ pr_debug("%s: USB ID pin high\n", __func__);
+ return id;
+ }
+
+ if (!mhl_ctrl || !usb_notify_cb) {
+ pr_warn("%s: cb || ctrl is NULL\n", __func__);
+ /* return "USB" so caller can proceed */
+ return -EINVAL;
+ }
+
+ if (!mhl_ctrl->notify_usb_online) {
+ mhl_ctrl->notify_usb_online = usb_notify_cb;
+ mhl_ctrl->notify_ctx = ctx;
+ }
+
+ flush_work(&mhl_ctrl->mhl_intr_work);
+
+ if (!mhl_ctrl->irq_req_done) {
+ rc = mhl_sii_config(mhl_ctrl, true);
+ if (rc) {
+ pr_err("%s: Failed to config vreg/gpio\n", __func__);
+ return rc;
+ }
+
+ /* wait for i2c interrupt line to be activated */
+ msleep(100);
+ }
+
+ if (!mhl_ctrl->disc_enabled) {
+ spin_lock_irqsave(&mhl_ctrl->lock, flags);
+ mhl_ctrl->tx_powered_off = false;
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+ mhl_sii_reset_pin(mhl_ctrl, 0);
+ msleep(50);
+ mhl_sii_reset_pin(mhl_ctrl, 1);
+ /* chipset PR recommends waiting for at least 100 ms
+ * the chipset needs longer to come out of D3 state.
+ */
+ msleep(100);
+ mhl_init_reg_settings(mhl_ctrl, true);
+ /* allow tx to enable dev disc after D3 state */
+ msleep(100);
+ if (mhl_sii_wait_for_rgnd(mhl_ctrl)) {
+ pr_err("%s: discovery timeout\n", __func__);
+
+ mhl_sii_config(mhl_ctrl, false);
+
+ return -EAGAIN;
+ }
+ } else {
+ if (mhl_ctrl->cur_state == POWER_STATE_D3) {
+ mhl_sii_wait_for_rgnd(mhl_ctrl);
+ } else {
+ /* in MHL mode */
+ pr_debug("%s:%u\n", __func__, __LINE__);
+ }
+ }
+
+ rc = mhl_ctrl->mhl_mode ? 0 : 1;
+
+ pr_debug("%s: ret result: %s\n", __func__, rc ? "usb" : " mhl");
+ return rc;
+}
+
+static int mhl_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct mhl_tx_ctrl *mhl_ctrl =
+ container_of(psy, struct mhl_tx_ctrl, mhl_psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = mhl_ctrl->current_val;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = mhl_ctrl->vbus_active;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = mhl_ctrl->vbus_active && mhl_ctrl->mhl_mode;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mhl_power_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct mhl_tx_ctrl *mhl_ctrl =
+ container_of(psy, struct mhl_tx_ctrl, mhl_psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ mhl_ctrl->vbus_active = val->intval;
+ if (mhl_ctrl->vbus_active)
+ mhl_ctrl->current_val = MAX_CURRENT;
+ else
+ mhl_ctrl->current_val = 0;
+ power_supply_changed(psy);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static char *mhl_pm_power_supplied_to[] = {
+ "usb",
+};
+
+static enum power_supply_property mhl_pm_power_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static void cbus_reset(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t i;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /* Read the chip rev ID */
+ mhl_ctrl->chip_rev_id = MHL_SII_PAGE0_RD(0x04);
+ pr_debug("MHL: chip rev ID read=[%x]\n", mhl_ctrl->chip_rev_id);
+
+ /*
+ * REG_SRST
+ */
+ MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, BIT3);
+ msleep(20);
+ MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, 0x00);
+ /*
+ * REG_INTR1 and REG_INTR4
+ */
+ MHL_SII_REG_NAME_WR(REG_INTR1_MASK, BIT6);
+ MHL_SII_REG_NAME_WR(REG_INTR4_MASK,
+ BIT0 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+ if (mhl_ctrl->chip_rev_id < 1)
+ MHL_SII_REG_NAME_WR(REG_INTR5_MASK, BIT3 | BIT4);
+ else
+ MHL_SII_REG_NAME_WR(REG_INTR5_MASK, 0x00);
+
+ /* Unmask CBUS1 Intrs */
+ MHL_SII_REG_NAME_WR(REG_CBUS_INTR_ENABLE,
+ BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+ /* Unmask CBUS2 Intrs */
+ MHL_SII_REG_NAME_WR(REG_CBUS_MSC_INT2_ENABLE, BIT2 | BIT3);
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * Enable WRITE_STAT interrupt for writes to
+ * all 4 MSC Status registers.
+ */
+ MHL_SII_CBUS_WR((0xE0 + i), 0xFF);
+
+ /*
+ * Enable SET_INT interrupt for writes to
+ * all 4 MSC Interrupt registers.
+ */
+ MHL_SII_CBUS_WR((0xF0 + i), 0xFF);
+ }
+}
+
+static void init_cbus_regs(struct i2c_client *client)
+{
+ uint8_t regval;
+
+ /* Increase DDC translation layer timer*/
+ MHL_SII_CBUS_WR(0x0007, 0xF2);
+ /* Drive High Time */
+ MHL_SII_CBUS_WR(0x0036, 0x0B);
+ /* Use programmed timing */
+ MHL_SII_CBUS_WR(0x0039, 0x30);
+ /* CBUS Drive Strength */
+ MHL_SII_CBUS_WR(0x0040, 0x03);
+ /*
+ * Write initial default settings
+ * to devcap regs: default settings
+ */
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEV_STATE, DEVCAP_VAL_DEV_STATE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_MHL_VERSION, DEVCAP_VAL_MHL_VERSION);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEV_CAT, DEVCAP_VAL_DEV_CAT);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_ADOPTER_ID_H, DEVCAP_VAL_ADOPTER_ID_H);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_ADOPTER_ID_L, DEVCAP_VAL_ADOPTER_ID_L);
+ MHL_SII_CBUS_WR(0x0080 | DEVCAP_OFFSET_VID_LINK_MODE,
+ DEVCAP_VAL_VID_LINK_MODE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_AUD_LINK_MODE,
+ DEVCAP_VAL_AUD_LINK_MODE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_VIDEO_TYPE, DEVCAP_VAL_VIDEO_TYPE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_LOG_DEV_MAP, DEVCAP_VAL_LOG_DEV_MAP);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_BANDWIDTH, DEVCAP_VAL_BANDWIDTH);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_FEATURE_FLAG, DEVCAP_VAL_FEATURE_FLAG);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEVICE_ID_H, DEVCAP_VAL_DEVICE_ID_H);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_DEVICE_ID_L, DEVCAP_VAL_DEVICE_ID_L);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_SCRATCHPAD_SIZE,
+ DEVCAP_VAL_SCRATCHPAD_SIZE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_INT_STAT_SIZE,
+ DEVCAP_VAL_INT_STAT_SIZE);
+ MHL_SII_CBUS_WR(0x0080 |
+ DEVCAP_OFFSET_RESERVED, DEVCAP_VAL_RESERVED);
+
+ /* Make bits 2,3 (initiator timeout) to 1,1
+ * for register CBUS_LINK_CONTROL_2
+ * REG_CBUS_LINK_CONTROL_2
+ */
+ regval = MHL_SII_CBUS_RD(0x0031);
+ regval = (regval | 0x0C);
+ /* REG_CBUS_LINK_CONTROL_2 */
+ MHL_SII_CBUS_WR(0x0031, regval);
+ /* REG_MSC_TIMEOUT_LIMIT */
+ MHL_SII_CBUS_WR(0x0022, 0x0F);
+ /* REG_CBUS_LINK_CONTROL_1 */
+ MHL_SII_CBUS_WR(0x0030, 0x01);
+ /* disallow vendor specific commands */
+ MHL_SII_CBUS_MOD(0x002E, BIT4, BIT4);
+}
+
+/*
+ * Configure the initial reg settings
+ */
+static void mhl_init_reg_settings(struct mhl_tx_ctrl *mhl_ctrl,
+ bool mhl_disc_en)
+{
+ uint8_t regval;
+
+ /*
+ * ============================================
+ * POWER UP
+ * ============================================
+ */
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /* Power up 1.2V core */
+ MHL_SII_PAGE1_WR(0x003D, 0x3F);
+ /* Enable Tx PLL Clock */
+ MHL_SII_PAGE2_WR(0x0011, 0x01);
+ /* Enable Tx Clock Path and Equalizer */
+ MHL_SII_PAGE2_WR(0x0012, 0x11);
+ /* Tx Source Termination ON */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+ /* Enable 1X MHL Clock output */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL6, 0xBC);
+ /* Tx Differential Driver Config */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL2, 0x3C);
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL4, 0xC8);
+ /* PLL Bandwidth Control */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL7, 0x03);
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL8, 0x0A);
+ /*
+ * ============================================
+ * Analog PLL Control
+ * ============================================
+ */
+ /* Enable Rx PLL clock */
+ MHL_SII_REG_NAME_WR(REG_TMDS_CCTRL, 0x08);
+ MHL_SII_PAGE0_WR(0x00F8, 0x8C);
+ MHL_SII_PAGE0_WR(0x0085, 0x02);
+ MHL_SII_PAGE2_WR(0x0000, 0x00);
+ regval = MHL_SII_PAGE2_RD(0x0005);
+ regval &= ~BIT5;
+ MHL_SII_PAGE2_WR(0x0005, regval);
+ MHL_SII_PAGE2_WR(0x0013, 0x60);
+ /* PLL Cal ref sel */
+ MHL_SII_PAGE2_WR(0x0017, 0x03);
+ /* VCO Cal */
+ MHL_SII_PAGE2_WR(0x001A, 0x20);
+ /* Auto EQ */
+ MHL_SII_PAGE2_WR(0x0022, 0xE0);
+ MHL_SII_PAGE2_WR(0x0023, 0xC0);
+ MHL_SII_PAGE2_WR(0x0024, 0xA0);
+ MHL_SII_PAGE2_WR(0x0025, 0x80);
+ MHL_SII_PAGE2_WR(0x0026, 0x60);
+ MHL_SII_PAGE2_WR(0x0027, 0x40);
+ MHL_SII_PAGE2_WR(0x0028, 0x20);
+ MHL_SII_PAGE2_WR(0x0029, 0x00);
+ /* Rx PLL Bandwidth 4MHz */
+ MHL_SII_PAGE2_WR(0x0031, 0x0A);
+ /* Rx PLL Bandwidth value from I2C */
+ MHL_SII_PAGE2_WR(0x0045, 0x06);
+ MHL_SII_PAGE2_WR(0x004B, 0x06);
+ MHL_SII_PAGE2_WR(0x004C, 0x60);
+ /* Manual zone control */
+ MHL_SII_PAGE2_WR(0x004C, 0xE0);
+ /* PLL Mode value */
+ MHL_SII_PAGE2_WR(0x004D, 0x00);
+ MHL_SII_PAGE0_WR(0x0008, 0x35);
+ /*
+ * Discovery Control and Status regs
+ * Setting De-glitch time to 50 ms (default)
+ * Switch Control Disabled
+ */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL2, 0xAD);
+ /* 1.8V CBUS VTH */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL5, 0x57);
+ /* RGND and single Discovery attempt */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL6, 0x11);
+ /* Ignore VBUS */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL8, 0x82);
+
+ /* Enable CBUS Discovery */
+ if (mhl_disc_en) {
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL9, 0x24);
+ /* Enable MHL Discovery */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x27);
+ /* Pull-up resistance off for IDLE state */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0x8C);
+ } else {
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL9, 0x26);
+ /* Disable MHL Discovery */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x26);
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0x8C);
+ }
+
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL7, 0x20);
+ /* MHL CBUS Discovery - immediate comm. */
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+
+ MHL_SII_PAGE3_WR(0x3C, 0x80);
+
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL,
+ (BIT6 | BIT5 | BIT4), (BIT6 | BIT4));
+
+ /* Enable Auto Soft RESET */
+ MHL_SII_REG_NAME_WR(REG_SRST, 0x084);
+ /* HDMI Transcode mode enable */
+ MHL_SII_PAGE0_WR(0x000D, 0x1C);
+
+ cbus_reset(mhl_ctrl);
+ init_cbus_regs(client);
+}
+
+
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl, enum mhl_st_type to_mode,
+ bool hpd_off)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ unsigned long flags;
+ int rc;
+ struct msm_hdmi_mhl_ops *hdmi_mhl_ops = mhl_ctrl->hdmi_mhl_ops;
+
+ pr_debug("%s: tx pwr on\n", __func__);
+ spin_lock_irqsave(&mhl_ctrl->lock, flags);
+ mhl_ctrl->tx_powered_off = false;
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+
+ switch (to_mode) {
+ case POWER_STATE_D0_NO_MHL:
+ mhl_ctrl->cur_state = to_mode;
+ mhl_init_reg_settings(mhl_ctrl, true);
+ /* REG_DISC_CTRL1 */
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, BIT0);
+
+ /* TPI_DEVICE_POWER_STATE_CTRL_REG */
+ mhl_i2c_reg_modify(client, TX_PAGE_TPI, 0x001E, BIT1 | BIT0,
+ 0x00);
+ break;
+ case POWER_STATE_D0_MHL:
+ mhl_ctrl->cur_state = to_mode;
+ break;
+ case POWER_STATE_D3:
+ if (mhl_ctrl->cur_state == POWER_STATE_D3) {
+ pr_debug("%s: mhl tx already in low power mode\n",
+ __func__);
+ break;
+ }
+
+ /* Force HPD to 0 when not in MHL mode. */
+ mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+ mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+ /*
+ * Change TMDS termination to high impedance
+ * on disconnection.
+ */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0xD0);
+ msleep(50);
+ if (!mhl_ctrl->disc_enabled)
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, 0x00);
+ if (hdmi_mhl_ops && hpd_off) {
+ rc = hdmi_mhl_ops->set_upstream_hpd(
+ mhl_ctrl->pdata->hdmi_pdev, 0);
+ pr_debug("%s: hdmi unset hpd %s\n", __func__,
+ rc ? "failed" : "passed");
+ }
+ mhl_ctrl->cur_state = POWER_STATE_D3;
+ mhl_ctrl->mhl_mode = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool is_mhl_powered(void *mhl_ctx)
+{
+ struct mhl_tx_ctrl *mhl_ctrl = (struct mhl_tx_ctrl *)mhl_ctx;
+ unsigned long flags;
+ bool r = false;
+
+ spin_lock_irqsave(&mhl_ctrl->lock, flags);
+ if (mhl_ctrl->tx_powered_off)
+ r = false;
+ else
+ r = true;
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+
+ pr_debug("%s: ret pwr state as %x\n", __func__, r);
+ return r;
+}
+
+void mhl_tmds_ctrl(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ if (on) {
+ MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
+ mhl_ctrl->tmds_en_state = true;
+ } else {
+ MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
+ mhl_ctrl->tmds_en_state = false;
+ }
+}
+
+void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ unsigned long flags;
+
+ pr_debug("%s: To state=[0x%x]\n", __func__, to_state);
+ if (to_state == HPD_UP) {
+ /*
+ * Drive HPD to UP state
+ * Set HPD_OUT_OVR_EN = HPD State
+ * EDID read and Un-force HPD (from low)
+ * propagate to src let HPD float by clearing
+ * HPD OUT OVRRD EN
+ */
+ spin_lock_irqsave(&mhl_ctrl->lock, flags);
+ mhl_ctrl->tx_powered_off = false;
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, 0);
+ } else {
+ /* Drive HPD to DOWN state */
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, (BIT4 | BIT5), BIT4);
+ }
+}
+
+static void mhl_msm_connection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t val;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ pr_debug("%s: cur st [0x%x]\n", __func__,
+ mhl_ctrl->cur_state);
+
+ if (mhl_ctrl->cur_state == POWER_STATE_D0_MHL) {
+ /* Already in D0 - MHL power state */
+ pr_err("%s: cur st not D0\n", __func__);
+ return;
+ }
+ switch_mode(mhl_ctrl, POWER_STATE_D0_MHL, true);
+
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+ MHL_SII_CBUS_WR(0x07, 0xF2);
+
+ /*
+ * Keep the discovery enabled. Need RGND interrupt
+ * Possibly chip disables discovery after MHL_EST??
+ * Need to re-enable here
+ */
+ val = MHL_SII_PAGE3_RD(0x10);
+ MHL_SII_PAGE3_WR(0x10, val | BIT0);
+
+ /*
+ * indicate DCAP_RDY and DCAP_CHG
+ * to the peer only after
+ * msm conn has been established
+ */
+ mhl_msc_send_write_stat(mhl_ctrl,
+ MHL_STATUS_REG_CONNECTED_RDY,
+ MHL_STATUS_DCAP_RDY);
+
+ mhl_msc_send_set_int(mhl_ctrl,
+ MHL_RCHANGE_INT,
+ MHL_INT_DCAP_CHG,
+ MSC_PRIORITY_SEND);
+
+}
+
+static void mhl_msm_disconnection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /* disabling Tx termination */
+ MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0xD0);
+ switch_mode(mhl_ctrl, POWER_STATE_D3, true);
+ mhl_msc_clear(mhl_ctrl);
+}
+
+static int mhl_msm_read_rgnd_int(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t rgnd_imp;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ struct msm_hdmi_mhl_ops *hdmi_mhl_ops = mhl_ctrl->hdmi_mhl_ops;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mhl_ctrl->lock, flags);
+ mhl_ctrl->tx_powered_off = false;
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+
+ /* DISC STATUS REG 2 */
+ rgnd_imp = (mhl_i2c_reg_read(client, TX_PAGE_3, 0x001C) &
+ (BIT1 | BIT0));
+ pr_debug("imp range read=%02X\n", (int)rgnd_imp);
+
+ if (rgnd_impi == 0x02) {
+ pr_debug("%s: mhl sink\n", __func__);
+ if (hdmi_mhl_ops) {
+ rc = hdmi_mhl_ops->set_upstream_hpd(
+ mhl_ctrl->pdata->hdmi_pdev, 1);
+ pr_debug("%s: hdmi set hpd %s\n", __func__,
+ rc ? "failed" : "passed");
+ }
+ mhl_ctrl->mhl_mode = 1;
+ power_supply_changed(&mhl_ctrl->mhl_psy);
+ if (mhl_ctrl->notify_usb_online)
+ mhl_ctrl->notify_usb_online(mhl_ctrl->notify_ctx, 1);
+ } else {
+ pr_debug("%s: non-mhl sink\n", __func__);
+ mhl_ctrl->mhl_mode = 0;
+ switch_mode(mhl_ctrl, POWER_STATE_D3, true);
+ }
+ complete(&mhl_ctrl->rgnd_done);
+ return mhl_ctrl->mhl_mode ?
+ MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
+}
+
+static void force_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /*disable discovery*/
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, 0);
+ /* force USB ID switch to open*/
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, BIT6);
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+ /* force HPD to 0 when not in mhl mode. */
+ MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT5 | BIT4, BIT4);
+}
+
+static void release_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ msleep(50);
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, 0x00);
+ MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, BIT0);
+}
+
+static void scdt_st_chg(struct i2c_client *client)
+{
+ uint8_t tmds_cstat;
+ uint8_t mhl_fifo_status;
+
+ /* tmds cstat */
+ tmds_cstat = MHL_SII_PAGE3_RD(0x0040);
+ pr_debug("%s: tmds cstat: 0x%02x\n", __func__,
+ tmds_cstat);
+
+ if (!(tmds_cstat & BIT1))
+ return;
+
+ mhl_fifo_status = MHL_SII_REG_NAME_RD(REG_INTR5);
+ pr_debug("%s: mhl fifo st: 0x%02x\n", __func__,
+ mhl_fifo_status);
+ if (mhl_fifo_status & 0x0C) {
+ MHL_SII_REG_NAME_WR(REG_INTR5, 0x0C);
+ pr_debug("%s: mhl fifo rst\n", __func__);
+ MHL_SII_REG_NAME_WR(REG_SRST, 0x94);
+ MHL_SII_REG_NAME_WR(REG_SRST, 0x84);
+ }
+}
+
+
+static int dev_detect_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t status, reg;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /* INTR_STATUS4 */
+ status = MHL_SII_REG_NAME_RD(REG_INTR4);
+ pr_debug("%s: reg int4 st=%02X\n", __func__, status);
+
+ if ((status == 0x00) &&
+ (mhl_ctrl->cur_state == POWER_STATE_D3)) {
+ pr_warn("%s: invalid intr\n", __func__);
+ return 0;
+ }
+
+ if (status == 0xFF) {
+ pr_warn("%s: invalid intr 0xff\n", __func__);
+ MHL_SII_REG_NAME_WR(REG_INTR4, status);
+ return 0;
+ }
+
+ if ((status & BIT0) && (mhl_ctrl->chip_rev_id < 1)) {
+ pr_debug("%s: scdt intr\n", __func__);
+ scdt_st_chg(client);
+ }
+
+ if (status & BIT1)
+ pr_debug("mhl: int4 bit1 set\n");
+
+ /* mhl_est interrupt */
+ if (status & BIT2) {
+ pr_debug("%s: mhl_est st=%02X\n", __func__,
+ (int) status);
+ mhl_msm_connection(mhl_ctrl);
+ } else if (status & BIT3) {
+ pr_debug("%s: uUSB-a type dev detct\n", __func__);
+ power_supply_changed(&mhl_ctrl->mhl_psy);
+ mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+ return 0;
+ }
+
+ if (status & BIT5) {
+ /* clr intr - reg int4 */
+ pr_debug("%s: mhl discon: int4 st=%02X\n", __func__,
+ (int)status);
+ mhl_ctrl->mhl_det_discon = true;
+
+ reg = MHL_SII_REG_NAME_RD(REG_INTR4);
+ MHL_SII_REG_NAME_WR(REG_INTR4, reg);
+ mhl_msm_disconnection(mhl_ctrl);
+ power_supply_changed(&mhl_ctrl->mhl_psy);
+ if (mhl_ctrl->notify_usb_online)
+ mhl_ctrl->notify_usb_online(mhl_ctrl->notify_ctx, 0);
+
+ queue_work(mhl_ctrl->mhl_workq, &mhl_ctrl->mhl_intr_work);
+
+ return 0;
+ }
+
+ if ((mhl_ctrl->cur_state != POWER_STATE_D0_NO_MHL) &&
+ (status & BIT6)) {
+ /* rgnd rdy Intr */
+ pr_debug("%s: rgnd ready intr\n", __func__);
+ switch_mode(mhl_ctrl, POWER_STATE_D0_NO_MHL, true);
+ mhl_msm_read_rgnd_int(mhl_ctrl);
+ }
+
+ /* Can't succeed at these in D3 */
+ if ((mhl_ctrl->cur_state != POWER_STATE_D3) &&
+ (status & BIT4)) {
+ /* cbus lockout interrupt?
+ * Hardware detection mechanism figures that
+ * CBUS line is latched and raises this intr
+ * where we force usb switch open and release
+ */
+ pr_warn("%s: cbus locked out!\n", __func__);
+ force_usb_switch_open(mhl_ctrl);
+ release_usb_switch_open(mhl_ctrl);
+ }
+ MHL_SII_REG_NAME_WR(REG_INTR4, status);
+ return 0;
+}
+
+static void mhl_misc_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t intr_5_stat;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ /*
+ * Clear INT 5
+ * INTR5 is related to FIFO underflow/overflow reset
+ * which is handled in 8334 by auto FIFO reset
+ */
+ intr_5_stat = MHL_SII_REG_NAME_RD(REG_INTR5);
+ MHL_SII_REG_NAME_WR(REG_INTR5, intr_5_stat);
+}
+
+static void mhl_tx_down(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ unsigned long flags;
+ uint8_t reg;
+
+ switch_mode(mhl_ctrl, POWER_STATE_D3, true);
+
+ reg = MHL_SII_REG_NAME_RD(REG_INTR1);
+ MHL_SII_REG_NAME_WR(REG_INTR1, reg);
+
+ reg = MHL_SII_REG_NAME_RD(REG_INTR4);
+ MHL_SII_REG_NAME_WR(REG_INTR4, reg);
+
+ /* disable INTR1 and INTR4 */
+ MHL_SII_REG_NAME_MOD(REG_INTR1_MASK, BIT6, 0x0);
+ MHL_SII_REG_NAME_MOD(REG_INTR4_MASK,
+ (BIT0 | BIT1 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6), 0x0);
+
+ MHL_SII_PAGE1_MOD(0x003D, BIT0, 0x00);
+ spin_lock_irqsave(&mhl_ctrl->lock, flags);
+ mhl_ctrl->tx_powered_off = true;
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+ pr_debug("%s: disabled\n", __func__);
+ disable_irq_nosync(client->irq);
+}
+
+static void mhl_hpd_stat_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t intr_1_stat, cbus_stat, t;
+ unsigned long flags;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ if (!is_mhl_powered(mhl_ctrl))
+ return;
+
+ /* INTR STATUS 1 */
+ intr_1_stat = MHL_SII_PAGE0_RD(0x0071);
+
+ if (!intr_1_stat)
+ return;
+
+ /* Clear interrupts */
+ MHL_SII_PAGE0_WR(0x0071, intr_1_stat);
+
+ if (BIT6 & intr_1_stat) {
+ /*
+ * HPD status change event is pending
+ * Read CBUS HPD status for this info
+ * MSC REQ ABRT REASON
+ */
+ cbus_stat = MHL_SII_CBUS_RD(0x0D);
+ pr_debug("%s: cbus_stat=[0x%02x] cur_pwr=[%u]\n",
+ __func__, cbus_stat, mhl_ctrl->cur_state);
+
+ spin_lock_irqsave(&mhl_ctrl->lock, flags);
+ t = mhl_ctrl->dwnstream_hpd;
+ pr_debug("%s: %u: dwnstrm_hpd=0x%02x\n",
+ __func__, __LINE__, mhl_ctrl->dwnstream_hpd);
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+
+ if (BIT6 & (cbus_stat ^ t)) {
+ u8 status = cbus_stat & BIT6;
+
+ mhl_drive_hpd(mhl_ctrl, status ? HPD_UP : HPD_DOWN);
+ if (!status && mhl_ctrl->mhl_det_discon) {
+ pr_debug("%s:%u: power_down\n",
+ __func__, __LINE__);
+ mhl_tx_down(mhl_ctrl);
+ }
+ spin_lock_irqsave(&mhl_ctrl->lock, flags);
+ mhl_ctrl->dwnstream_hpd = cbus_stat;
+ pr_debug("%s: %u: dwnstrm_hpd=0x%02x\n",
+ __func__, __LINE__, mhl_ctrl->dwnstream_hpd);
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+ mhl_ctrl->mhl_det_discon = false;
+ }
+ }
+}
+
+static void mhl_sii_cbus_process_errors(struct i2c_client *client,
+ u8 int_status)
+{
+ u8 abort_reason = 0;
+
+ if (int_status & BIT2) {
+ abort_reason = MHL_SII_REG_NAME_RD(REG_DDC_ABORT_REASON);
+ pr_debug("%s: CBUS DDC Abort Reason(0x%02x)\n",
+ __func__, abort_reason);
+ }
+ if (int_status & BIT5) {
+ abort_reason = MHL_SII_REG_NAME_RD(REG_PRI_XFR_ABORT_REASON);
+ pr_debug("%s: CBUS MSC Requestor Abort Reason(0x%02x)\n",
+ __func__, abort_reason);
+ MHL_SII_REG_NAME_WR(REG_PRI_XFR_ABORT_REASON, 0xFF);
+ }
+ if (int_status & BIT6) {
+ abort_reason = MHL_SII_REG_NAME_RD(
+ REG_CBUS_PRI_FWR_ABORT_REASON);
+ pr_debug("%s: CBUS MSC Responder Abort Reason(0x%02x)\n",
+ __func__, abort_reason);
+ MHL_SII_REG_NAME_WR(REG_CBUS_PRI_FWR_ABORT_REASON, 0xFF);
+ }
+}
+
+int mhl_send_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+ struct msc_command_struct *req)
+{
+ int timeout;
+ u8 start_bit = 0x00;
+ u8 *burst_data;
+ int i;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ if (mhl_ctrl->cur_state != POWER_STATE_D0_MHL) {
+ pr_debug("%s: power_state:%02x CBUS(0x0A):%02x\n",
+ __func__,
+ mhl_ctrl->cur_state,
+ MHL_SII_REG_NAME_RD(REG_CBUS_BUS_STATUS));
+ return -EFAULT;
+ }
+
+ if (!req)
+ return -EFAULT;
+
+ pr_debug("%s: command=0x%02x offset=0x%02x %02x %02x",
+ __func__,
+ req->command,
+ req->offset,
+ req->payload.data[0],
+ req->payload.data[1]);
+
+ /* REG_CBUS_PRI_ADDR_CMD = REQ CBUS CMD or OFFSET */
+ MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->offset);
+ MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_1ST,
+ req->payload.data[0]);
+
+ switch (req->command) {
+ case MHL_SET_INT:
+ case MHL_WRITE_STAT:
+ start_bit = MSC_START_BIT_WRITE_REG;
+ break;
+ case MHL_READ_DEVCAP:
+ start_bit = MSC_START_BIT_READ_REG;
+ break;
+ case MHL_GET_STATE:
+ case MHL_GET_VENDOR_ID:
+ case MHL_SET_HPD:
+ case MHL_CLR_HPD:
+ case MHL_GET_SC1_ERRORCODE:
+ case MHL_GET_DDC_ERRORCODE:
+ case MHL_GET_MSC_ERRORCODE:
+ case MHL_GET_SC3_ERRORCODE:
+ start_bit = MSC_START_BIT_MSC_CMD;
+ MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
+ break;
+ case MHL_MSC_MSG:
+ start_bit = MSC_START_BIT_VS_CMD;
+ MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_2ND,
+ req->payload.data[1]);
+ MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
+ break;
+ case MHL_WRITE_BURST:
+ start_bit = MSC_START_BIT_WRITE_BURST;
+ MHL_SII_REG_NAME_WR(REG_MSC_WRITE_BURST_LEN, req->length - 1);
+ if (!(req->payload.burst_data)) {
+ pr_err("%s: burst data is null!\n", __func__);
+ goto cbus_send_fail;
+ }
+ burst_data = req->payload.burst_data;
+ for (i = 0; i < req->length; i++, burst_data++)
+ MHL_SII_REG_NAME_WR(REG_CBUS_SCRATCHPAD_0 + i,
+ *burst_data);
+ break;
+ default:
+ pr_err("%s: unknown command! (%02x)\n",
+ __func__, req->command);
+ goto cbus_send_fail;
+ }
+
+ reinit_completion(&mhl_ctrl->msc_cmd_done);
+ MHL_SII_REG_NAME_WR(REG_CBUS_PRI_START, start_bit);
+ timeout = wait_for_completion_timeout
+ (&mhl_ctrl->msc_cmd_done, msecs_to_jiffies(T_ABORT_NEXT));
+ if (!timeout) {
+ pr_err("%s: cbus_command_send timed out!\n", __func__);
+ goto cbus_send_fail;
+ }
+
+ switch (req->command) {
+ case MHL_READ_DEVCAP:
+ req->retval = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_RD_DATA_1ST);
+ break;
+ case MHL_MSC_MSG:
+ /* check if MSC_MSG NACKed */
+ if (MHL_SII_REG_NAME_RD(REG_MSC_WRITE_BURST_LEN) & BIT6)
+ return -EAGAIN;
+ default:
+ req->retval = 0;
+ break;
+ }
+ mhl_msc_command_done(mhl_ctrl, req);
+ pr_debug("%s: msc cmd done\n", __func__);
+ return 0;
+
+cbus_send_fail:
+ return -EFAULT;
+}
+
+/* read scratchpad */
+void mhl_read_scratchpad(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ int i;
+
+ for (i = 0; i < MHL_SCRATCHPAD_SIZE; i++) {
+ mhl_ctrl->scrpd.data[i] = MHL_SII_REG_NAME_RD(
+ REG_CBUS_SCRATCHPAD_0 + i);
+ }
+}
+
+static void mhl_cbus_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ uint8_t regval;
+ int req_done = 0;
+ uint8_t sub_cmd = 0x0;
+ uint8_t cmd_data = 0x0;
+ int msc_msg_recved = 0;
+ int rc = -1;
+ unsigned long flags;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ regval = MHL_SII_REG_NAME_RD(REG_CBUS_INTR_STATUS);
+ if (regval == 0xff)
+ return;
+
+ if (regval)
+ MHL_SII_REG_NAME_WR(REG_CBUS_INTR_STATUS, regval);
+
+ pr_debug("%s: CBUS_INT = %02x\n", __func__, regval);
+
+ /* MSC_MSG (RCP/RAP) */
+ if (regval & BIT3) {
+ sub_cmd = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_VS_CMD);
+ cmd_data = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_VS_DATA);
+ msc_msg_recved = 1;
+ }
+ /* MSC_MT_ABRT/MSC_MR_ABRT/DDC_ABORT */
+ if (regval & (BIT6 | BIT5 | BIT2))
+ mhl_sii_cbus_process_errors(client, regval);
+
+ /* MSC_REQ_DONE */
+ if (regval & BIT4)
+ req_done = 1;
+
+ /* look for interrupts on CBUS_MSC_INT2 */
+ regval = MHL_SII_REG_NAME_RD(REG_CBUS_MSC_INT2_STATUS);
+
+ /* clear all interrupts */
+ if (regval)
+ MHL_SII_REG_NAME_WR(REG_CBUS_MSC_INT2_STATUS, regval);
+
+ pr_debug("%s: CBUS_MSC_INT2 = %02x\n", __func__, regval);
+
+ /* received SET_INT */
+ if (regval & BIT2) {
+ uint8_t intr;
+
+ intr = MHL_SII_REG_NAME_RD(REG_CBUS_SET_INT_0);
+ MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_0, intr);
+ mhl_msc_recv_set_int(mhl_ctrl, 0, intr);
+ if (intr & MHL_INT_DCAP_CHG) {
+ /* No need to go to low power mode */
+ spin_lock_irqsave(&mhl_ctrl->lock, flags);
+ mhl_ctrl->dwnstream_hpd = 0x00;
+ pr_debug("%s: %u: dwnstrm_hpd=0x%02x\n",
+ __func__, __LINE__, mhl_ctrl->dwnstream_hpd);
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+ }
+
+ pr_debug("%s: MHL_INT_0 = %02x\n", __func__, intr);
+ intr = MHL_SII_REG_NAME_RD(REG_CBUS_SET_INT_1);
+ MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_1, intr);
+ mhl_msc_recv_set_int(mhl_ctrl, 1, intr);
+
+ pr_debug("%s: MHL_INT_1 = %02x\n", __func__, intr);
+ MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_2, 0xFF);
+ MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_3, 0xFF);
+ }
+
+ /* received WRITE_STAT */
+ if (regval & BIT3) {
+ uint8_t stat;
+
+ stat = MHL_SII_REG_NAME_RD(REG_CBUS_WRITE_STAT_0);
+ mhl_msc_recv_write_stat(mhl_ctrl, 0, stat);
+
+ pr_debug("%s: MHL_STATUS_0 = %02x\n", __func__, stat);
+ stat = MHL_SII_REG_NAME_RD(REG_CBUS_WRITE_STAT_1);
+ mhl_msc_recv_write_stat(mhl_ctrl, 1, stat);
+ pr_debug("%s: MHL_STATUS_1 = %02x\n", __func__, stat);
+
+ MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_0, 0xFF);
+ MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_1, 0xFF);
+ MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_2, 0xFF);
+ MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_3, 0xFF);
+ }
+
+ /* received MSC_MSG */
+ if (msc_msg_recved) {
+ /*mhl msc recv msc msg*/
+ rc = mhl_msc_recv_msc_msg(mhl_ctrl, sub_cmd, cmd_data);
+ if (rc)
+ pr_err("MHL: mhl msc recv msc msg failed(%d)!\n", rc);
+ }
+ /* complete last command */
+ if (req_done)
+ complete_all(&mhl_ctrl->msc_cmd_done);
+
+}
+
+static irqreturn_t mhl_tx_isr(int irq, void *data)
+{
+ int rc;
+ struct mhl_tx_ctrl *mhl_ctrl = (struct mhl_tx_ctrl *)data;
+ unsigned long flags;
+
+ pr_debug("%s: Getting Interrupts\n", __func__);
+
+ spin_lock_irqsave(&mhl_ctrl->lock, flags);
+ if (mhl_ctrl->tx_powered_off) {
+ pr_warn("%s: powered off\n", __func__);
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+
+ /*
+ * Check RGND, MHL_EST, CBUS_LOCKOUT, SCDT
+ * interrupts. In D3, we get only RGND
+ */
+ rc = dev_detect_isr(mhl_ctrl);
+ if (rc)
+ pr_debug("%s: dev_detect_isr rc=[%d]\n", __func__, rc);
+
+ pr_debug("%s: cur pwr state is [0x%x]\n",
+ __func__, mhl_ctrl->cur_state);
+
+ /*
+ * If dev_detect_isr() didn't move the tx to D3
+ * on disconnect, continue to check other
+ * interrupt sources.
+ */
+ mhl_misc_isr(mhl_ctrl);
+
+ /*
+ * Check for any peer messages for DCAP_CHG, MSC etc
+ * Dispatch to have the CBUS module working only
+ * once connected.
+ */
+ mhl_cbus_isr(mhl_ctrl);
+ mhl_hpd_stat_isr(mhl_ctrl);
+
+ return IRQ_HANDLED;
+}
+
+
+static int mhl_sii_reg_config(struct i2c_client *client, bool enable)
+{
+ static struct regulator *reg_8941_l24;
+ static struct regulator *reg_8941_l02;
+ static struct regulator *reg_8941_smps3a;
+ static struct regulator *reg_8941_vdda;
+ int rc = -EINVAL;
+
+ pr_debug("%s\n", __func__);
+
+ if (!enable) {
+ if (reg_8941_vdda) {
+ regulator_disable(reg_8941_vdda);
+ regulator_put(reg_8941_vdda);
+ reg_8941_vdda = NULL;
+ }
+
+ if (reg_8941_smps3a) {
+ regulator_disable(reg_8941_smps3a);
+ regulator_put(reg_8941_smps3a);
+ reg_8941_smps3a = NULL;
+ }
+
+ if (reg_8941_l02) {
+ regulator_disable(reg_8941_l02);
+ regulator_put(reg_8941_l02);
+ reg_8941_l02 = NULL;
+ }
+
+ if (reg_8941_l24) {
+ regulator_disable(reg_8941_l24);
+ regulator_put(reg_8941_l24);
+ reg_8941_l24 = NULL;
+ }
+ return 0;
+ }
+
+ if (!reg_8941_l24) {
+ reg_8941_l24 = regulator_get(&client->dev,
+ "avcc_18");
+ if (IS_ERR(reg_8941_l24)) {
+ pr_err("could not get 8941 l24, rc = %ld\n",
+ PTR_ERR(reg_8941_l24));
+ return -ENODEV;
+ }
+ if (enable)
+ rc = regulator_enable(reg_8941_l24);
+ else
+ rc = regulator_disable(reg_8941_l24);
+ if (rc) {
+ pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+ "avcc_1.8V", enable, rc);
+ goto l24_fail;
+ } else {
+ pr_debug("%s: vreg L24 %s\n",
+ __func__, (enable ? "enabled" : "disabled"));
+ }
+ }
+
+ if (!reg_8941_l02) {
+ reg_8941_l02 = regulator_get(&client->dev,
+ "avcc_12");
+ if (IS_ERR(reg_8941_l02)) {
+ pr_err("could not get reg_8941_l02, rc = %ld\n",
+ PTR_ERR(reg_8941_l02));
+ goto l24_fail;
+ }
+ if (enable)
+ rc = regulator_enable(reg_8941_l02);
+ else
+ rc = regulator_disable(reg_8941_l02);
+ if (rc) {
+ pr_debug("'%s' regulator configure[%u] failed, rc=%d\n",
+ "avcc_1.2V", enable, rc);
+ goto l02_fail;
+ } else {
+ pr_debug("%s: vreg L02 %s\n",
+ __func__, (enable ? "enabled" : "disabled"));
+ }
+ }
+
+ if (!reg_8941_smps3a) {
+ reg_8941_smps3a = regulator_get(&client->dev,
+ "smps3a");
+ if (IS_ERR(reg_8941_smps3a)) {
+ pr_err("could not get vreg smps3a, rc = %ld\n",
+ PTR_ERR(reg_8941_smps3a));
+ goto l02_fail;
+ }
+ if (enable)
+ rc = regulator_enable(reg_8941_smps3a);
+ else
+ rc = regulator_disable(reg_8941_smps3a);
+ if (rc) {
+ pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+ "SMPS3A", enable, rc);
+ goto smps3a_fail;
+ } else {
+ pr_debug("%s: vreg SMPS3A %s\n",
+ __func__, (enable ? "enabled" : "disabled"));
+ }
+ }
+
+ if (!reg_8941_vdda) {
+ reg_8941_vdda = regulator_get(&client->dev,
+ "vdda");
+ if (IS_ERR(reg_8941_vdda)) {
+ pr_err("could not get vreg vdda, rc = %ld\n",
+ PTR_ERR(reg_8941_vdda));
+ goto smps3a_fail;
+ }
+ if (enable)
+ rc = regulator_enable(reg_8941_vdda);
+ else
+ rc = regulator_disable(reg_8941_vdda);
+ if (rc) {
+ pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+ "VDDA", enable, rc);
+ goto vdda_fail;
+ } else {
+ pr_debug("%s: vreg VDDA %s\n",
+ __func__, (enable ? "enabled" : "disabled"));
+ }
+ }
+
+ return rc;
+
+vdda_fail:
+ regulator_disable(reg_8941_vdda);
+ regulator_put(reg_8941_vdda);
+smps3a_fail:
+ regulator_disable(reg_8941_smps3a);
+ regulator_put(reg_8941_smps3a);
+l02_fail:
+ regulator_disable(reg_8941_l02);
+ regulator_put(reg_8941_l02);
+l24_fail:
+ regulator_disable(reg_8941_l24);
+ regulator_put(reg_8941_l24);
+
+ return -EINVAL;
+}
+
+
+static int mhl_vreg_config(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
+{
+ int ret;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+ int pwr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio;
+
+ pr_debug("%s\n", __func__);
+ if (on) {
+ ret = gpio_request(pwr_gpio,
+ mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio_name);
+ if (ret < 0) {
+ pr_err("%s: mhl pwr gpio req failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = gpio_direction_output(pwr_gpio, 1);
+ if (ret < 0) {
+ pr_err("%s: set gpio MHL_PWR_EN dircn failed: %d\n",
+ __func__, ret);
+ goto vreg_config_failed;
+ }
+
+ ret = mhl_sii_reg_config(client, true);
+ if (ret) {
+ pr_err("%s: regulator enable failed\n", __func__);
+ goto vreg_config_failed;
+ }
+ pr_debug("%s: mhl sii power on successful\n", __func__);
+ } else {
+ pr_warn("%s: turning off pwr controls\n", __func__);
+ mhl_sii_reg_config(client, false);
+ gpio_free(pwr_gpio);
+ }
+ pr_debug("%s: successful\n", __func__);
+ return 0;
+vreg_config_failed:
+ gpio_free(pwr_gpio);
+ return -EINVAL;
+}
+
+/*
+ * Request for GPIO allocations
+ * Set appropriate GPIO directions
+ */
+static int mhl_gpio_config(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+ int ret;
+ struct mdss_gpio *temp_reset_gpio, *temp_intr_gpio;
+
+ /* caused too many line spills */
+ temp_reset_gpio = mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO];
+ temp_intr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_INTR_GPIO];
+
+ if (on) {
+ if (gpio_is_valid(temp_reset_gpio->gpio)) {
+ ret = gpio_request(temp_reset_gpio->gpio,
+ temp_reset_gpio->gpio_name);
+ if (ret < 0) {
+ pr_err("%s:rst_gpio=[%d] req failed:%d\n",
+ __func__, temp_reset_gpio->gpio, ret);
+ return -EBUSY;
+ }
+ ret = gpio_direction_output(temp_reset_gpio->gpio, 0);
+ if (ret < 0) {
+ pr_err("%s: set dirn rst failed: %d\n",
+ __func__, ret);
+ return -EBUSY;
+ }
+ }
+ if (gpio_is_valid(temp_intr_gpio->gpio)) {
+ ret = gpio_request(temp_intr_gpio->gpio,
+ temp_intr_gpio->gpio_name);
+ if (ret < 0) {
+ pr_err("%s: intr_gpio req failed: %d\n",
+ __func__, ret);
+ return -EBUSY;
+ }
+ ret = gpio_direction_input(temp_intr_gpio->gpio);
+ if (ret < 0) {
+ pr_err("%s: set dirn intr failed: %d\n",
+ __func__, ret);
+ return -EBUSY;
+ }
+ mhl_ctrl->i2c_handle->irq = gpio_to_irq(
+ temp_intr_gpio->gpio);
+ pr_debug("%s: gpio_to_irq=%d\n",
+ __func__, mhl_ctrl->i2c_handle->irq);
+ }
+ } else {
+ pr_warn("%s: freeing gpios\n", __func__);
+ gpio_free(temp_intr_gpio->gpio);
+ gpio_free(temp_reset_gpio->gpio);
+ }
+ pr_debug("%s: successful\n", __func__);
+ return 0;
+}
+
+static int mhl_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct mhl_tx_platform_data *pdata = NULL;
+ struct mhl_tx_ctrl *mhl_ctrl;
+ struct usb_ext_notification *mhl_info = NULL;
+ struct msm_hdmi_mhl_ops *hdmi_mhl_ops = NULL;
+
+ mhl_ctrl = devm_kzalloc(&client->dev, sizeof(*mhl_ctrl), GFP_KERNEL);
+ if (!mhl_ctrl) {
+ rc = -ENOMEM;
+ goto failed_no_mem;
+ }
+
+ if (client->dev.of_node) {
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct mhl_tx_platform_data), GFP_KERNEL);
+ if (!pdata) {
+ rc = -ENOMEM;
+ goto failed_no_mem;
+ }
+
+ rc = mhl_tx_get_dt_data(&client->dev, pdata);
+ if (rc) {
+ pr_err("%s: FAILED: parsing device tree data; rc=%d\n",
+ __func__, rc);
+ goto failed_dt_data;
+ }
+ mhl_ctrl->i2c_handle = client;
+ mhl_ctrl->pdata = pdata;
+ i2c_set_clientdata(client, mhl_ctrl);
+ }
+
+ /*
+ * Other initializations
+ * such tx specific
+ */
+ mhl_ctrl->disc_enabled = false;
+ INIT_WORK(&mhl_ctrl->mhl_msc_send_work, mhl_msc_send_work);
+ mhl_ctrl->cur_state = POWER_STATE_D0_MHL;
+ INIT_LIST_HEAD(&mhl_ctrl->list_cmd);
+ init_completion(&mhl_ctrl->msc_cmd_done);
+ spin_lock_init(&mhl_ctrl->lock);
+ mhl_ctrl->msc_send_workqueue = create_singlethread_workqueue
+ ("mhl_msc_cmd_queue");
+ mhl_ctrl->mhl_workq = create_singlethread_workqueue("mhl_workq");
+
+ INIT_WORK(&mhl_ctrl->mhl_intr_work, mhl_sii_disc_intr_work);
+
+ mhl_ctrl->input = input_allocate_device();
+ if (mhl_ctrl->input) {
+ int i;
+ struct input_dev *input = mhl_ctrl->input;
+
+ mhl_ctrl->rcp_key_code_tbl = vmalloc(
+ sizeof(support_rcp_key_code_tbl));
+ if (!mhl_ctrl->rcp_key_code_tbl)
+ return -ENOMEM;
+
+ mhl_ctrl->rcp_key_code_tbl_len = sizeof(
+ support_rcp_key_code_tbl);
+ memcpy(mhl_ctrl->rcp_key_code_tbl,
+ &support_rcp_key_code_tbl[0],
+ mhl_ctrl->rcp_key_code_tbl_len);
+
+ input->phys = "cbus/input0";
+ input->id.bustype = BUS_VIRTUAL;
+ input->id.vendor = 0x1095;
+ input->id.product = 0x8334;
+ input->id.version = 0xA;
+
+ input->name = "mhl-rcp";
+
+ input->keycode = support_rcp_key_code_tbl;
+ input->keycodesize = sizeof(u16);
+ input->keycodemax = ARRAY_SIZE(support_rcp_key_code_tbl);
+
+ input->evbit[0] = EV_KEY;
+ for (i = 0; i < ARRAY_SIZE(support_rcp_key_code_tbl); i++) {
+ if (support_rcp_key_code_tbl[i] > 1)
+ input_set_capability(input, EV_KEY,
+ support_rcp_key_code_tbl[i]);
+ }
+
+ if (input_register_device(input) < 0) {
+ pr_warn("%s: failed to register input device\n",
+ __func__);
+ input_free_device(input);
+ mhl_ctrl->input = NULL;
+ }
+ }
+
+ mhl_ctrl->dwnstream_hpd = 0;
+ mhl_ctrl->tx_powered_off = false;
+
+
+ init_completion(&mhl_ctrl->rgnd_done);
+
+
+ mhl_ctrl->mhl_psy.name = "ext-vbus";
+ mhl_ctrl->mhl_psy.type = POWER_SUPPLY_TYPE_USB_DCP;
+ mhl_ctrl->mhl_psy.supplied_to = mhl_pm_power_supplied_to;
+ mhl_ctrl->mhl_psy.num_supplicants = ARRAY_SIZE(
+ mhl_pm_power_supplied_to);
+ mhl_ctrl->mhl_psy.properties = mhl_pm_power_props;
+ mhl_ctrl->mhl_psy.num_properties = ARRAY_SIZE(mhl_pm_power_props);
+ mhl_ctrl->mhl_psy.get_property = mhl_power_get_property;
+ mhl_ctrl->mhl_psy.set_property = mhl_power_set_property;
+
+ rc = power_supply_register(&client->dev, &mhl_ctrl->mhl_psy);
+ if (rc < 0) {
+ dev_err(&client->dev, "%s:power_supply_register ext_vbus_psy failed\n",
+ __func__);
+ goto failed_probe;
+ }
+
+ hdmi_mhl_ops = devm_kzalloc(&client->dev,
+ sizeof(struct msm_hdmi_mhl_ops),
+ GFP_KERNEL);
+ if (!hdmi_mhl_ops) {
+ pr_err("%s: alloc hdmi mhl ops failed\n", __func__);
+ rc = -ENOMEM;
+ goto failed_probe_pwr;
+ }
+
+ pr_debug("%s: i2c client addr is [%x]\n", __func__, client->addr);
+ if (mhl_ctrl->pdata->hdmi_pdev) {
+ rc = msm_hdmi_register_mhl(mhl_ctrl->pdata->hdmi_pdev,
+ hdmi_mhl_ops, mhl_ctrl);
+ if (rc) {
+ pr_err("%s: register with hdmi failed\n", __func__);
+ rc = -EPROBE_DEFER;
+ goto failed_probe_pwr;
+ }
+ }
+
+ if (!hdmi_mhl_ops || !hdmi_mhl_ops->tmds_enabled ||
+ !hdmi_mhl_ops->set_mhl_max_pclk) {
+ pr_err("%s: func ptr is NULL\n", __func__);
+ rc = -EINVAL;
+ goto failed_probe_pwr;
+ }
+ mhl_ctrl->hdmi_mhl_ops = hdmi_mhl_ops;
+
+ rc = hdmi_mhl_ops->set_mhl_max_pclk(
+ mhl_ctrl->pdata->hdmi_pdev, MAX_MHL_PCLK);
+ if (rc) {
+ pr_err("%s: can't set max mhl pclk\n", __func__);
+ goto failed_probe_pwr;
+ }
+
+ mhl_info = devm_kzalloc(&client->dev, sizeof(*mhl_info), GFP_KERNEL);
+ if (!mhl_info) {
+ rc = -ENOMEM;
+ goto failed_probe_pwr;
+ }
+
+ mhl_info->ctxt = mhl_ctrl;
+ mhl_info->notify = mhl_sii_device_discovery;
+ if (msm_register_usb_ext_notification(mhl_info)) {
+ pr_err("%s: register for usb notifcn failed\n", __func__);
+ rc = -EPROBE_DEFER;
+ goto failed_probe_pwr;
+ }
+ mhl_ctrl->mhl_info = mhl_info;
+ mhl_register_msc(mhl_ctrl);
+ return 0;
+
+failed_probe_pwr:
+ power_supply_unregister(&mhl_ctrl->mhl_psy);
+failed_probe:
+ mhl_sii_config(mhl_ctrl, false);
+ /* do not deep-free */
+ if (mhl_info)
+ devm_kfree(&client->dev, mhl_info);
+failed_dt_data:
+ if (pdata)
+ devm_kfree(&client->dev, pdata);
+failed_no_mem:
+ if (mhl_ctrl)
+ devm_kfree(&client->dev, mhl_ctrl);
+ mhl_info = NULL;
+ pdata = NULL;
+ mhl_ctrl = NULL;
+ pr_err("%s: PROBE FAILED, rc=%d\n", __func__, rc);
+ return rc;
+}
+
+
+static int mhl_i2c_remove(struct i2c_client *client)
+{
+ struct mhl_tx_ctrl *mhl_ctrl = i2c_get_clientdata(client);
+
+ if (!mhl_ctrl) {
+ pr_warn("%s: i2c get client data failed\n", __func__);
+ return -EINVAL;
+ }
+
+ mhl_sii_config(mhl_ctrl, false);
+
+ destroy_workqueue(mhl_ctrl->mhl_workq);
+
+ if (mhl_ctrl->mhl_info)
+ devm_kfree(&client->dev, mhl_ctrl->mhl_info);
+ if (mhl_ctrl->pdata)
+ devm_kfree(&client->dev, mhl_ctrl->pdata);
+ devm_kfree(&client->dev, mhl_ctrl);
+ return 0;
+}
+
+static struct i2c_device_id mhl_sii_i2c_id[] = {
+ { MHL_DRIVER_NAME, 0 },
+ { }
+};
+
+
+MODULE_DEVICE_TABLE(i2c, mhl_sii_i2c_id);
+
+#if defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP)
+static int mhl_i2c_suspend_sub(struct i2c_client *client)
+{
+ struct mhl_tx_ctrl *mhl_ctrl = i2c_get_clientdata(client);
+
+ pr_debug("%s\n", __func__);
+
+ if (!mhl_ctrl) {
+ pr_err("%s: invalid ctrl data\n", __func__);
+ return 0;
+ }
+
+ if (mhl_ctrl->mhl_mode) {
+ mhl_ctrl->mhl_mode = 0;
+
+ power_supply_changed(&mhl_ctrl->mhl_psy);
+ if (mhl_ctrl->notify_usb_online)
+ mhl_ctrl->notify_usb_online(mhl_ctrl->notify_ctx, 0);
+
+ mhl_sii_config(mhl_ctrl, false);
+ }
+
+ return 0;
+}
+
+static int mhl_i2c_resume_sub(struct i2c_client *client)
+{
+ pr_debug("%s\n", __func__);
+
+ return 0;
+}
+#endif /* defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP) */
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mhl_i2c_suspend(struct i2c_client *client, pm_message_t state)
+{
+ if (!client)
+ return -ENODEV;
+ pr_debug("%s: mhl suspend\n", __func__);
+ return mhl_i2c_suspend_sub(client);
+}
+
+static int mhl_i2c_resume(struct i2c_client *client)
+{
+ if (!client)
+ return -ENODEV;
+ pr_debug("%s: mhl resume\n", __func__);
+ return mhl_i2c_resume_sub(client);
+}
+#else
+#define mhl_i2c_suspend NULL
+#define mhl_i2c_resume NULL
+#endif /* defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP) */
+
+#ifdef CONFIG_PM_SLEEP
+static int mhl_i2c_pm_suspend(struct device *dev)
+{
+ struct i2c_client *client =
+ container_of(dev, struct i2c_client, dev);
+
+ if (!client)
+ return -ENODEV;
+ pr_debug("%s: mhl pm suspend\n", __func__);
+ return mhl_i2c_suspend_sub(client);
+
+}
+
+static int mhl_i2c_pm_resume(struct device *dev)
+{
+ struct i2c_client *client =
+ container_of(dev, struct i2c_client, dev);
+
+ if (!client)
+ return -ENODEV;
+ pr_debug("%s: mhl pm resume\n", __func__);
+ return mhl_i2c_resume_sub(client);
+}
+
+static const struct dev_pm_ops mhl_i2c_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mhl_i2c_pm_suspend, mhl_i2c_pm_resume)
+};
+#endif /* CONFIG_PM_SLEEP */
+
+const struct of_device_id mhl_match_table[] = {
+ {.compatible = COMPATIBLE_NAME,},
+ { },
+};
+
+static struct i2c_driver mhl_sii_i2c_driver = {
+ .driver = {
+ .name = MHL_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mhl_match_table,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mhl_i2c_pm_ops,
+#endif /* CONFIG_PM_SLEEP */
+ },
+ .probe = mhl_i2c_probe,
+ .remove = mhl_i2c_remove,
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+ .suspend = mhl_i2c_suspend,
+ .resume = mhl_i2c_resume,
+#endif /* defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP) */
+ .id_table = mhl_sii_i2c_id,
+};
+
+module_i2c_driver(mhl_sii_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHL SII 8334 TX Driver");
diff --git a/drivers/video/fbdev/msm/msm_dba/Kconfig b/drivers/video/fbdev/msm/msm_dba/Kconfig
new file mode 100644
index 0000000..69894cd
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/Kconfig
@@ -0,0 +1,24 @@
+#
+# MSM DBA
+#
+
+config MSM_DBA
+ bool "MSM Display Bridge Abstraction support"
+ depends on ARM || ARM64
+ ---help---
+ Support for MSM display bridge abstraction interface. MSM display
+ drivers can use the same interface to interact with different third
+ party bridge chips. Drivers implemented for third party bridge chips
+ should support this interface to allow display driver to control the
+ bridge chip. The MSM DBA driver maintains a list of devices supported
+ on the platform and allow clients to register and access these
+ devices.
+
+config MSM_DBA_ADV7533
+ bool "ADV7533 driver support through MSM DBA interface"
+ depends on MSM_DBA
+ default n
+ ---help---
+ Support for ADV7533 DSI to HDMI display bridge driver. The driver
+ controls the ADV7533 HW through the I2C interface and configures
+ the DSI input and HDMI output video format.
diff --git a/drivers/video/fbdev/msm/msm_dba/Makefile b/drivers/video/fbdev/msm/msm_dba/Makefile
new file mode 100644
index 0000000..cf28ad4
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MSM_DBA) += msm_dba.o msm_dba_init.o msm_dba_helpers.o msm_dba_debug.o
+obj-$(CONFIG_MSM_DBA_ADV7533) += adv7533.o
+clean:
+ rm *.o
diff --git a/drivers/video/fbdev/msm/msm_dba/adv7533.c b/drivers/video/fbdev/msm/msm_dba/adv7533.c
new file mode 100644
index 0000000..5b44a49
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/adv7533.c
@@ -0,0 +1,2143 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include "msm_dba_internal.h"
+#include <linux/mdss_io_util.h>
+
+#define ADV7533_REG_CHIP_REVISION (0x00)
+#define ADV7533_DSI_CEC_I2C_ADDR_REG (0xE1)
+#define ADV7533_RESET_DELAY (10)
+
+#define PINCTRL_STATE_ACTIVE "pmx_adv7533_active"
+#define PINCTRL_STATE_SUSPEND "pmx_adv7533_suspend"
+
+#define MDSS_MAX_PANEL_LEN 256
+#define EDID_SEG_SIZE 0x100
+/* size of audio and speaker info Block */
+#define AUDIO_DATA_SIZE 32
+
+/* 0x94 interrupts */
+#define HPD_INT_ENABLE BIT(7)
+#define MONITOR_SENSE_INT_ENABLE BIT(6)
+#define ACTIVE_VSYNC_EDGE BIT(5)
+#define AUDIO_FIFO_FULL BIT(4)
+#define EDID_READY_INT_ENABLE BIT(2)
+#define HDCP_AUTHENTICATED BIT(1)
+#define HDCP_RI_READY BIT(0)
+
+#define MAX_WAIT_TIME (100)
+#define MAX_RW_TRIES (3)
+
+/* 0x95 interrupts */
+#define HDCP_ERROR BIT(7)
+#define HDCP_BKSV_FLAG BIT(6)
+#define CEC_TX_READY BIT(5)
+#define CEC_TX_ARB_LOST BIT(4)
+#define CEC_TX_RETRY_TIMEOUT BIT(3)
+#define CEC_TX_RX_BUF3_READY BIT(2)
+#define CEC_TX_RX_BUF2_READY BIT(1)
+#define CEC_TX_RX_BUF1_READY BIT(0)
+
+#define HPD_INTERRUPTS (HPD_INT_ENABLE | \
+ MONITOR_SENSE_INT_ENABLE)
+#define EDID_INTERRUPTS EDID_READY_INT_ENABLE
+#define HDCP_INTERRUPTS1 HDCP_AUTHENTICATED
+#define HDCP_INTERRUPTS2 (HDCP_BKSV_FLAG | \
+ HDCP_ERROR)
+#define CEC_INTERRUPTS (CEC_TX_READY | \
+ CEC_TX_ARB_LOST | \
+ CEC_TX_RETRY_TIMEOUT | \
+ CEC_TX_RX_BUF3_READY | \
+ CEC_TX_RX_BUF2_READY | \
+ CEC_TX_RX_BUF1_READY)
+
+#define CFG_HPD_INTERRUPTS BIT(0)
+#define CFG_EDID_INTERRUPTS BIT(1)
+#define CFG_HDCP_INTERRUPTS BIT(2)
+#define CFG_CEC_INTERRUPTS BIT(3)
+
+#define MAX_OPERAND_SIZE 14
+#define CEC_MSG_SIZE (MAX_OPERAND_SIZE + 2)
+
+enum adv7533_i2c_addr {
+ I2C_ADDR_MAIN = 0x3D,
+ I2C_ADDR_CEC_DSI = 0x3C,
+};
+
+enum adv7533_cec_buf {
+ ADV7533_CEC_BUF1,
+ ADV7533_CEC_BUF2,
+ ADV7533_CEC_BUF3,
+ ADV7533_CEC_BUF_MAX,
+};
+
+struct adv7533_reg_cfg {
+ u8 i2c_addr;
+ u8 reg;
+ u8 val;
+ int sleep_in_ms;
+};
+
+struct adv7533_cec_msg {
+ u8 buf[CEC_MSG_SIZE];
+ u8 timestamp;
+ bool pending;
+};
+
+struct adv7533 {
+ u8 main_i2c_addr;
+ u8 cec_dsi_i2c_addr;
+ u8 video_mode;
+ int irq;
+ u32 irq_gpio;
+ u32 irq_flags;
+ u32 hpd_irq_gpio;
+ u32 hpd_irq_flags;
+ u32 switch_gpio;
+ u32 switch_flags;
+ struct pinctrl *ts_pinctrl;
+ struct pinctrl_state *pinctrl_state_active;
+ struct pinctrl_state *pinctrl_state_suspend;
+ bool audio;
+ bool disable_gpios;
+ struct mdss_module_power power_data;
+ bool hdcp_enabled;
+ bool cec_enabled;
+ bool is_power_on;
+ void *edid_data;
+ u8 edid_buf[EDID_SEG_SIZE];
+ u8 audio_spkr_data[AUDIO_DATA_SIZE];
+ struct workqueue_struct *workq;
+ struct delayed_work adv7533_intr_work_id;
+ struct msm_dba_device_info dev_info;
+ struct adv7533_cec_msg cec_msg[ADV7533_CEC_BUF_MAX];
+ struct i2c_client *i2c_client;
+ struct mutex ops_mutex;
+};
+
+static char mdss_mdp_panel[MDSS_MAX_PANEL_LEN];
+
+static struct adv7533_reg_cfg adv7533_init_setup[] = {
+ /* power down */
+ {I2C_ADDR_MAIN, 0x41, 0x50, 5},
+ /* HPD override */
+ {I2C_ADDR_MAIN, 0xD6, 0x48, 5},
+ /* color space */
+ {I2C_ADDR_MAIN, 0x16, 0x20, 0},
+ /* Fixed */
+ {I2C_ADDR_MAIN, 0x9A, 0xE0, 0},
+ /* HDCP */
+ {I2C_ADDR_MAIN, 0xBA, 0x70, 0},
+ /* Fixed */
+ {I2C_ADDR_MAIN, 0xDE, 0x82, 0},
+ /* V1P2 */
+ {I2C_ADDR_MAIN, 0xE4, 0x40, 0},
+ /* Fixed */
+ {I2C_ADDR_MAIN, 0xE5, 0x80, 0},
+ /* Fixed */
+ {I2C_ADDR_CEC_DSI, 0x15, 0xD0, 0},
+ /* Fixed */
+ {I2C_ADDR_CEC_DSI, 0x17, 0xD0, 0},
+ /* Fixed */
+ {I2C_ADDR_CEC_DSI, 0x24, 0x20, 0},
+ /* Fixed */
+ {I2C_ADDR_CEC_DSI, 0x57, 0x11, 0},
+};
+
+static struct adv7533_reg_cfg adv7533_video_en[] = {
+ /* Timing Generator Enable */
+ {I2C_ADDR_CEC_DSI, 0x27, 0xCB, 0},
+ {I2C_ADDR_CEC_DSI, 0x27, 0x8B, 0},
+ {I2C_ADDR_CEC_DSI, 0x27, 0xCB, 0},
+ /* power up */
+ {I2C_ADDR_MAIN, 0x41, 0x10, 0},
+ /* hdmi enable */
+ {I2C_ADDR_CEC_DSI, 0x03, 0x89, 0},
+ /* color depth */
+ {I2C_ADDR_MAIN, 0x4C, 0x04, 0},
+ /* down dither */
+ {I2C_ADDR_MAIN, 0x49, 0x02, 0},
+ /* Audio and CEC clock gate */
+ {I2C_ADDR_CEC_DSI, 0x05, 0xC8, 0},
+ /* GC packet enable */
+ {I2C_ADDR_MAIN, 0x40, 0x80, 0},
+};
+
+static struct adv7533_reg_cfg adv7533_cec_en[] = {
+ /* Fixed, clock gate disable */
+ {I2C_ADDR_CEC_DSI, 0x05, 0xC8, 0},
+ /* read divider(7:2) from calc */
+ {I2C_ADDR_CEC_DSI, 0xBE, 0x01, 0},
+};
+
+static struct adv7533_reg_cfg adv7533_cec_tg_init[] = {
+ /* TG programming for 19.2MHz, divider 25 */
+ {I2C_ADDR_CEC_DSI, 0xBE, 0x61, 0},
+ {I2C_ADDR_CEC_DSI, 0xC1, 0x0D, 0},
+ {I2C_ADDR_CEC_DSI, 0xC2, 0x80, 0},
+ {I2C_ADDR_CEC_DSI, 0xC3, 0x0C, 0},
+ {I2C_ADDR_CEC_DSI, 0xC4, 0x9A, 0},
+ {I2C_ADDR_CEC_DSI, 0xC5, 0x0E, 0},
+ {I2C_ADDR_CEC_DSI, 0xC6, 0x66, 0},
+ {I2C_ADDR_CEC_DSI, 0xC7, 0x0B, 0},
+ {I2C_ADDR_CEC_DSI, 0xC8, 0x1A, 0},
+ {I2C_ADDR_CEC_DSI, 0xC9, 0x0A, 0},
+ {I2C_ADDR_CEC_DSI, 0xCA, 0x33, 0},
+ {I2C_ADDR_CEC_DSI, 0xCB, 0x0C, 0},
+ {I2C_ADDR_CEC_DSI, 0xCC, 0x00, 0},
+ {I2C_ADDR_CEC_DSI, 0xCD, 0x07, 0},
+ {I2C_ADDR_CEC_DSI, 0xCE, 0x33, 0},
+ {I2C_ADDR_CEC_DSI, 0xCF, 0x05, 0},
+ {I2C_ADDR_CEC_DSI, 0xD0, 0xDA, 0},
+ {I2C_ADDR_CEC_DSI, 0xD1, 0x08, 0},
+ {I2C_ADDR_CEC_DSI, 0xD2, 0x8D, 0},
+ {I2C_ADDR_CEC_DSI, 0xD3, 0x01, 0},
+ {I2C_ADDR_CEC_DSI, 0xD4, 0xCD, 0},
+ {I2C_ADDR_CEC_DSI, 0xD5, 0x04, 0},
+ {I2C_ADDR_CEC_DSI, 0xD6, 0x80, 0},
+ {I2C_ADDR_CEC_DSI, 0xD7, 0x05, 0},
+ {I2C_ADDR_CEC_DSI, 0xD8, 0x66, 0},
+ {I2C_ADDR_CEC_DSI, 0xD9, 0x03, 0},
+ {I2C_ADDR_CEC_DSI, 0xDA, 0x26, 0},
+ {I2C_ADDR_CEC_DSI, 0xDB, 0x0A, 0},
+ {I2C_ADDR_CEC_DSI, 0xDC, 0xCD, 0},
+ {I2C_ADDR_CEC_DSI, 0xDE, 0x00, 0},
+ {I2C_ADDR_CEC_DSI, 0xDF, 0xC0, 0},
+ {I2C_ADDR_CEC_DSI, 0xE1, 0x00, 0},
+ {I2C_ADDR_CEC_DSI, 0xE2, 0xE6, 0},
+ {I2C_ADDR_CEC_DSI, 0xE3, 0x02, 0},
+ {I2C_ADDR_CEC_DSI, 0xE4, 0xB3, 0},
+ {I2C_ADDR_CEC_DSI, 0xE5, 0x03, 0},
+ {I2C_ADDR_CEC_DSI, 0xE6, 0x9A, 0},
+};
+
+static struct adv7533_reg_cfg adv7533_cec_power[] = {
+ /* cec power up */
+ {I2C_ADDR_MAIN, 0xE2, 0x00, 0},
+ /* hpd override */
+ {I2C_ADDR_MAIN, 0xD6, 0x48, 0},
+ /* edid reread */
+ {I2C_ADDR_MAIN, 0xC9, 0x13, 0},
+ /* read all CEC Rx Buffers */
+ {I2C_ADDR_CEC_DSI, 0xBA, 0x08, 0},
+ /* logical address0 0x04 */
+ {I2C_ADDR_CEC_DSI, 0xBC, 0x04, 0},
+ /* select logical address0 */
+ {I2C_ADDR_CEC_DSI, 0xBB, 0x10, 0},
+};
+
+static struct adv7533_reg_cfg I2S_cfg[] = {
+ {I2C_ADDR_MAIN, 0x0D, 0x18, 0}, /* Bit width = 16Bits*/
+ {I2C_ADDR_MAIN, 0x15, 0x20, 0}, /* Sampling Frequency = 48kHz*/
+ {I2C_ADDR_MAIN, 0x02, 0x18, 0}, /* N value 6144 --> 0x1800*/
+ {I2C_ADDR_MAIN, 0x14, 0x02, 0}, /* Word Length = 16Bits*/
+ {I2C_ADDR_MAIN, 0x73, 0x01, 0}, /* Channel Count = 2 channels */
+};
+
+static int adv7533_write(struct adv7533 *pdata, u8 offset, u8 reg, u8 val)
+{
+ u8 addr = 0;
+ int ret = 0;
+
+ if (!pdata) {
+ pr_debug("%s: Invalid argument\n", __func__);
+ return -EINVAL;
+ }
+
+ if (offset == I2C_ADDR_MAIN)
+ addr = pdata->main_i2c_addr;
+ else if (offset == I2C_ADDR_CEC_DSI)
+ addr = pdata->cec_dsi_i2c_addr;
+ else
+ addr = offset;
+
+ ret = msm_dba_helper_i2c_write_byte(pdata->i2c_client, addr, reg, val);
+ if (ret)
+ pr_err_ratelimited("%s: wr err: addr 0x%x, reg 0x%x, val 0x%x\n",
+ __func__, addr, reg, val);
+ return ret;
+}
+
+static int adv7533_read(struct adv7533 *pdata, u8 offset,
+ u8 reg, char *buf, u32 size)
+{
+ u8 addr = 0;
+ int ret = 0;
+
+ if (!pdata) {
+ pr_debug("%s: Invalid argument\n", __func__);
+ return -EINVAL;
+ }
+
+ if (offset == I2C_ADDR_MAIN)
+ addr = pdata->main_i2c_addr;
+ else if (offset == I2C_ADDR_CEC_DSI)
+ addr = pdata->cec_dsi_i2c_addr;
+ else
+ addr = offset;
+
+ ret = msm_dba_helper_i2c_read(pdata->i2c_client, addr, reg, buf, size);
+ if (ret)
+ pr_err_ratelimited("%s: read err: addr 0x%x, reg 0x%x, size 0x%x\n",
+ __func__, addr, reg, size);
+ return ret;
+}
+
+static int adv7533_dump_debug_info(struct msm_dba_device_info *dev, u32 flags)
+{
+ int rc = 0;
+ u8 byte_val = 0;
+ u16 addr = 0;
+ struct adv7533 *pdata = NULL;
+
+ if (!dev) {
+ pr_err("%s: dev is NULL\n", __func__);
+ return -EINVAL;
+ }
+ pdata = container_of(dev, struct adv7533, dev_info);
+ if (!pdata) {
+ pr_err("%s: pdata is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* dump main addr*/
+ pr_err("========Main I2C=0x%02x Start==========\n",
+ pdata->main_i2c_addr);
+ for (addr = 0; addr <= 0xFF; addr++) {
+ rc = adv7533_read(pdata, I2C_ADDR_MAIN,
+ (u8)addr, &byte_val, 1);
+ if (rc)
+ pr_err("%s: read reg=0x%02x failed @ addr=0x%02x\n",
+ __func__, addr, pdata->main_i2c_addr);
+ else
+ pr_err("0x%02x -> 0x%02X\n", addr, byte_val);
+ }
+ pr_err("========Main I2C=0x%02x End==========\n",
+ pdata->main_i2c_addr);
+ /* dump CEC addr*/
+ pr_err("=======CEC I2C=0x%02x Start=========\n",
+ pdata->cec_dsi_i2c_addr);
+ for (addr = 0; addr <= 0xFF; addr++) {
+ rc = adv7533_read(pdata, I2C_ADDR_CEC_DSI,
+ (u8)addr, &byte_val, 1);
+ if (rc)
+ pr_err("%s: read reg=0x%02x failed @ addr=0x%02x\n",
+ __func__, addr, pdata->cec_dsi_i2c_addr);
+ else
+ pr_err("0x%02x -> 0x%02X\n", addr, byte_val);
+ }
+ pr_err("========CEC I2C=0x%02x End==========\n",
+ pdata->cec_dsi_i2c_addr);
+
+ return rc;
+}
+
+static int adv7533_write_array(struct adv7533 *pdata,
+ struct adv7533_reg_cfg *cfg, int size)
+{
+ int ret = 0;
+ int i;
+
+ size = size / sizeof(struct adv7533_reg_cfg);
+ for (i = 0; i < size; i++) {
+ switch (cfg[i].i2c_addr) {
+ case I2C_ADDR_MAIN:
+ ret = adv7533_write(pdata, I2C_ADDR_MAIN,
+ cfg[i].reg, cfg[i].val);
+ if (ret != 0)
+ pr_err("%s: adv7533_write_byte returned %d\n",
+ __func__, ret);
+ break;
+ case I2C_ADDR_CEC_DSI:
+ ret = adv7533_write(pdata, I2C_ADDR_CEC_DSI,
+ cfg[i].reg, cfg[i].val);
+ if (ret != 0)
+ pr_err("%s: adv7533_write_byte returned %d\n",
+ __func__, ret);
+ break;
+ default:
+ ret = -EINVAL;
+ pr_err("%s: Default case? BUG!\n", __func__);
+ break;
+ }
+ if (ret != 0) {
+ pr_err("%s: adv7533 reg writes failed. ", __func__);
+ pr_err("Last write %02X to %02X\n",
+ cfg[i].val, cfg[i].reg);
+ goto w_regs_fail;
+ }
+ if (cfg[i].sleep_in_ms)
+ msleep(cfg[i].sleep_in_ms);
+ }
+
+w_regs_fail:
+ if (ret != 0)
+ pr_err("%s: Exiting with ret = %d after %d writes\n",
+ __func__, ret, i);
+ return ret;
+}
+
+static int adv7533_read_device_rev(struct adv7533 *pdata)
+{
+ u8 rev = 0;
+ int ret;
+
+ ret = adv7533_read(pdata, I2C_ADDR_MAIN, ADV7533_REG_CHIP_REVISION,
+ &rev, 1);
+
+ return ret;
+}
+
+static int adv7533_program_i2c_addr(struct adv7533 *pdata)
+{
+ u8 i2c_8bits = pdata->cec_dsi_i2c_addr << 1;
+ int ret = 0;
+
+ if (pdata->cec_dsi_i2c_addr != I2C_ADDR_CEC_DSI) {
+ ret = adv7533_write(pdata, I2C_ADDR_MAIN,
+ ADV7533_DSI_CEC_I2C_ADDR_REG,
+ i2c_8bits);
+
+ if (ret)
+ pr_err("%s: write err CEC_ADDR[0x%02x] main_addr=0x%02x\n",
+ __func__, ADV7533_DSI_CEC_I2C_ADDR_REG,
+ pdata->main_i2c_addr);
+ }
+
+ return ret;
+}
+
+static void adv7533_parse_vreg_dt(struct device *dev,
+ struct mdss_module_power *mp)
+{
+ int i, rc = 0;
+ int dt_vreg_total = 0;
+ struct device_node *of_node = NULL;
+ u32 *val_array = NULL;
+
+ of_node = dev->of_node;
+
+ dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
+ if (dt_vreg_total <= 0) {
+ pr_warn("%s: vreg not found. rc=%d\n", __func__,
+ dt_vreg_total);
+ goto end;
+ }
+ mp->num_vreg = dt_vreg_total;
+ mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) *
+ dt_vreg_total, GFP_KERNEL);
+ if (!mp->vreg_config)
+ goto end;
+
+ val_array = devm_kzalloc(dev, sizeof(u32) * dt_vreg_total, GFP_KERNEL);
+ if (!val_array)
+ goto end;
+
+ for (i = 0; i < dt_vreg_total; i++) {
+ const char *st = NULL;
+ /* vreg-name */
+ rc = of_property_read_string_index(of_node,
+ "qcom,supply-names", i, &st);
+ if (rc) {
+ pr_warn("%s: error reading name. i=%d, rc=%d\n",
+ __func__, i, rc);
+ goto end;
+ }
+ snprintf(mp->vreg_config[i].vreg_name, 32, "%s", st);
+
+ /* vreg-min-voltage */
+ memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+ rc = of_property_read_u32_array(of_node,
+ "qcom,min-voltage-level", val_array,
+ dt_vreg_total);
+ if (rc) {
+ pr_warn("%s: error read min volt. rc=%d\n",
+ __func__, rc);
+ goto end;
+ }
+ mp->vreg_config[i].min_voltage = val_array[i];
+
+ /* vreg-max-voltage */
+ memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+ rc = of_property_read_u32_array(of_node,
+ "qcom,max-voltage-level", val_array,
+ dt_vreg_total);
+ if (rc) {
+ pr_warn("%s: error read max volt. rc=%d\n",
+ __func__, rc);
+ goto end;
+ }
+ mp->vreg_config[i].max_voltage = val_array[i];
+
+ /* vreg-op-mode */
+ memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+ rc = of_property_read_u32_array(of_node,
+ "qcom,enable-load", val_array,
+ dt_vreg_total);
+ if (rc) {
+ pr_warn("%s: error read enable load. rc=%d\n",
+ __func__, rc);
+ goto end;
+ }
+ mp->vreg_config[i].enable_load = val_array[i];
+
+ memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+ rc = of_property_read_u32_array(of_node,
+ "qcom,disable-load", val_array,
+ dt_vreg_total);
+ if (rc) {
+ pr_warn("%s: error read disable load. rc=%d\n",
+ __func__, rc);
+ goto end;
+ }
+ mp->vreg_config[i].disable_load = val_array[i];
+
+ /* post-on-sleep */
+ memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+ rc = of_property_read_u32_array(of_node,
+ "qcom,post-on-sleep", val_array,
+ dt_vreg_total);
+ if (rc)
+ pr_warn("%s: error read post on sleep. rc=%d\n",
+ __func__, rc);
+ else
+ mp->vreg_config[i].post_on_sleep = val_array[i];
+
+ pr_debug("%s: %s min=%d, max=%d, enable=%d disable=%d post-on-sleep=%d\n",
+ __func__,
+ mp->vreg_config[i].vreg_name,
+ mp->vreg_config[i].min_voltage,
+ mp->vreg_config[i].max_voltage,
+ mp->vreg_config[i].enable_load,
+ mp->vreg_config[i].disable_load,
+ mp->vreg_config[i].post_on_sleep);
+ }
+
+ devm_kfree(dev, val_array);
+ return;
+
+end:
+ if (mp->vreg_config) {
+ devm_kfree(dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ }
+ mp->num_vreg = 0;
+
+ if (val_array)
+ devm_kfree(dev, val_array);
+}
+
+static int adv7533_parse_dt(struct device *dev,
+ struct adv7533 *pdata)
+{
+ struct device_node *np = dev->of_node;
+ u32 temp_val = 0;
+ int ret = 0;
+
+ ret = of_property_read_u32(np, "instance_id", &temp_val);
+ pr_debug("%s: DT property %s is %X\n", __func__, "instance_id",
+ temp_val);
+ if (ret)
+ goto end;
+ pdata->dev_info.instance_id = temp_val;
+
+ ret = of_property_read_u32(np, "adi,main-addr", &temp_val);
+ pr_debug("%s: DT property %s is %X\n", __func__, "adi,main-addr",
+ temp_val);
+ if (ret)
+ goto end;
+ pdata->main_i2c_addr = (u8)temp_val;
+
+ ret = of_property_read_u32(np, "adi,cec-dsi-addr", &temp_val);
+ pr_debug("%s: DT property %s is %X\n", __func__, "adi,cec-dsi-addr",
+ temp_val);
+ if (ret)
+ goto end;
+ pdata->cec_dsi_i2c_addr = (u8)temp_val;
+
+ ret = of_property_read_u32(np, "adi,video-mode", &temp_val);
+ pr_debug("%s: DT property %s is %X\n", __func__, "adi,video-mode",
+ temp_val);
+ if (ret)
+ goto end;
+ pdata->video_mode = (u8)temp_val;
+
+ pdata->audio = of_property_read_bool(np, "adi,enable-audio");
+
+ adv7533_parse_vreg_dt(dev, &pdata->power_data);
+
+ /* Get pinctrl if target uses pinctrl */
+ pdata->ts_pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(pdata->ts_pinctrl)) {
+ ret = PTR_ERR(pdata->ts_pinctrl);
+ pr_err("%s: Pincontrol DT property returned %X\n",
+ __func__, ret);
+ }
+
+ pdata->pinctrl_state_active = pinctrl_lookup_state(pdata->ts_pinctrl,
+ "pmx_adv7533_active");
+ if (IS_ERR_OR_NULL(pdata->pinctrl_state_active)) {
+ ret = PTR_ERR(pdata->pinctrl_state_active);
+ pr_err("Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_ACTIVE, ret);
+ }
+
+ pdata->pinctrl_state_suspend = pinctrl_lookup_state(pdata->ts_pinctrl,
+ "pmx_adv7533_suspend");
+ if (IS_ERR_OR_NULL(pdata->pinctrl_state_suspend)) {
+ ret = PTR_ERR(pdata->pinctrl_state_suspend);
+ pr_err("Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_SUSPEND, ret);
+ }
+
+ pdata->disable_gpios = of_property_read_bool(np,
+ "adi,disable-gpios");
+
+ if (!(pdata->disable_gpios)) {
+ pdata->irq_gpio = of_get_named_gpio_flags(np,
+ "adi,irq-gpio", 0, &pdata->irq_flags);
+
+ pdata->hpd_irq_gpio = of_get_named_gpio_flags(np,
+ "adi,hpd-irq-gpio", 0,
+ &pdata->hpd_irq_flags);
+
+ pdata->switch_gpio = of_get_named_gpio_flags(np,
+ "adi,switch-gpio", 0, &pdata->switch_flags);
+ }
+
+end:
+ return ret;
+}
+
+static int adv7533_gpio_configure(struct adv7533 *pdata, bool on)
+{
+ int ret = 0;
+
+ if (pdata->disable_gpios)
+ return 0;
+
+ if (on) {
+ if (gpio_is_valid(pdata->irq_gpio)) {
+ ret = gpio_request(pdata->irq_gpio, "adv7533_irq_gpio");
+ if (ret) {
+ pr_err("%d unable to request gpio [%d] ret=%d\n",
+ __LINE__, pdata->irq_gpio, ret);
+ goto err_none;
+ }
+ ret = gpio_direction_input(pdata->irq_gpio);
+ if (ret) {
+ pr_err("unable to set dir for gpio[%d]\n",
+ pdata->irq_gpio);
+ goto err_irq_gpio;
+ }
+ } else {
+ pr_err("irq gpio not provided\n");
+ goto err_none;
+ }
+
+ if (gpio_is_valid(pdata->hpd_irq_gpio)) {
+ ret = gpio_request(pdata->hpd_irq_gpio,
+ "adv7533_hpd_irq_gpio");
+ if (ret) {
+ pr_err("unable to request gpio [%d]\n",
+ pdata->hpd_irq_gpio);
+ goto err_irq_gpio;
+ }
+ ret = gpio_direction_input(pdata->hpd_irq_gpio);
+ if (ret) {
+ pr_err("unable to set dir for gpio[%d]\n",
+ pdata->hpd_irq_gpio);
+ goto err_hpd_irq_gpio;
+ }
+ } else {
+ pr_warn("hpd irq gpio not provided\n");
+ }
+
+ if (gpio_is_valid(pdata->switch_gpio)) {
+ ret = gpio_request(pdata->switch_gpio,
+ "adv7533_switch_gpio");
+ if (ret) {
+ pr_err("%d unable to request gpio [%d] ret=%d\n",
+ __LINE__, pdata->irq_gpio, ret);
+ goto err_hpd_irq_gpio;
+ }
+
+ ret = gpio_direction_output(pdata->switch_gpio, 0);
+ if (ret) {
+ pr_err("unable to set dir for gpio [%d]\n",
+ pdata->switch_gpio);
+ goto err_switch_gpio;
+ }
+
+ gpio_set_value(pdata->switch_gpio,
+ !pdata->switch_flags);
+ msleep(ADV7533_RESET_DELAY);
+ }
+
+ return 0;
+ }
+ if (gpio_is_valid(pdata->irq_gpio))
+ gpio_free(pdata->irq_gpio);
+ if (gpio_is_valid(pdata->hpd_irq_gpio))
+ gpio_free(pdata->hpd_irq_gpio);
+ if (gpio_is_valid(pdata->switch_gpio))
+ gpio_free(pdata->switch_gpio);
+
+ return 0;
+ }
+
+err_switch_gpio:
+ if (gpio_is_valid(pdata->switch_gpio))
+ gpio_free(pdata->switch_gpio);
+err_hpd_irq_gpio:
+ if (gpio_is_valid(pdata->hpd_irq_gpio))
+ gpio_free(pdata->hpd_irq_gpio);
+err_irq_gpio:
+ if (gpio_is_valid(pdata->irq_gpio))
+ gpio_free(pdata->irq_gpio);
+err_none:
+ return ret;
+}
+
+static void adv7533_notify_clients(struct msm_dba_device_info *dev,
+ enum msm_dba_callback_event event)
+{
+ struct msm_dba_client_info *c;
+ struct list_head *pos = NULL;
+
+ if (!dev) {
+ pr_err("%s: invalid input\n", __func__);
+ return;
+ }
+
+ list_for_each(pos, &dev->client_list) {
+ c = list_entry(pos, struct msm_dba_client_info, list);
+
+ pr_debug("%s: notifying event %d to client %s\n", __func__,
+ event, c->client_name);
+
+ if (c && c->cb)
+ c->cb(c->cb_data, event);
+ }
+}
+
+u32 adv7533_read_edid(struct adv7533 *pdata, u32 size, char *edid_buf)
+{
+ u32 ret = 0, read_size = size / 2;
+ u8 edid_addr = 0;
+ int ndx;
+
+ if (!pdata || !edid_buf)
+ return 0;
+
+ pr_debug("%s: size %d\n", __func__, size);
+
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x43, &edid_addr, 1);
+
+ pr_debug("%s: edid address 0x%x\n", __func__, edid_addr);
+
+ adv7533_read(pdata, edid_addr >> 1, 0x00, edid_buf, read_size);
+
+ adv7533_read(pdata, edid_addr >> 1, read_size,
+ edid_buf + read_size, read_size);
+
+ for (ndx = 0; ndx < size; ndx += 4)
+ pr_debug("%s: EDID[%02x-%02x] %02x %02x %02x %02x\n",
+ __func__, ndx, ndx + 3,
+ edid_buf[ndx + 0], edid_buf[ndx + 1],
+ edid_buf[ndx + 2], edid_buf[ndx + 3]);
+
+ return ret;
+}
+
+static int adv7533_cec_prepare_msg(struct adv7533 *pdata, u8 *msg, u32 size)
+{
+ int i, ret = -EINVAL;
+ int op_sz;
+
+ if (!pdata || !msg) {
+ pr_err("%s: invalid input\n", __func__);
+ goto end;
+ }
+
+ if (size <= 0 || size > CEC_MSG_SIZE) {
+ pr_err("%s: ERROR: invalid msg size\n", __func__);
+ goto end;
+ }
+
+ /* operand size = total size - header size - opcode size */
+ op_sz = size - 2;
+
+ /* write header */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x70, msg[0]);
+
+ /* write opcode */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x71, msg[1]);
+
+ /* write operands */
+ for (i = 0; i < op_sz && i < MAX_OPERAND_SIZE; i++) {
+ pr_debug("%s: writing operands\n", __func__);
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x72 + i, msg[i + 2]);
+ }
+
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x80, size);
+
+end:
+ return ret;
+}
+
+static int adv7533_rd_cec_msg(struct adv7533 *pdata, u8 *cec_buf, int msg_num)
+{
+ int ret = -EINVAL;
+ u8 reg = 0;
+
+ if (!pdata || !cec_buf) {
+ pr_err("%s: Invalid input\n", __func__);
+ goto end;
+ }
+
+ if (msg_num == ADV7533_CEC_BUF1)
+ reg = 0x85;
+ else if (msg_num == ADV7533_CEC_BUF2)
+ reg = 0x97;
+ else if (msg_num == ADV7533_CEC_BUF3)
+ reg = 0xA8;
+ else
+ pr_err("%s: Invalid msg_num %d\n", __func__, msg_num);
+
+ if (!reg)
+ goto end;
+
+ adv7533_read(pdata, I2C_ADDR_CEC_DSI, reg, cec_buf, CEC_MSG_SIZE);
+end:
+ return ret;
+}
+
+static void adv7533_handle_hdcp_intr(struct adv7533 *pdata, u8 hdcp_status)
+{
+ u8 ddc_status = 0;
+
+ if (!pdata) {
+ pr_err("%s: Invalid input\n", __func__);
+ goto end;
+ }
+
+ /* HDCP ready for read */
+ if (hdcp_status & BIT(6))
+ pr_debug("%s: BKSV FLAG\n", __func__);
+
+ /* check for HDCP error */
+ if (hdcp_status & BIT(7)) {
+ pr_err("%s: HDCP ERROR\n", __func__);
+
+ /* get error details */
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0xC8, &ddc_status, 1);
+
+ switch (ddc_status & 0xF0 >> 4) {
+ case 0:
+ pr_debug("%s: DDC: NO ERROR\n", __func__);
+ break;
+ case 1:
+ pr_err("%s: DDC: BAD RX BKSV\n", __func__);
+ break;
+ case 2:
+ pr_err("%s: DDC: Ri MISMATCH\n", __func__);
+ break;
+ case 3:
+ pr_err("%s: DDC: Pj MISMATCH\n", __func__);
+ break;
+ case 4:
+ pr_err("%s: DDC: I2C ERROR\n", __func__);
+ break;
+ case 5:
+ pr_err("%s: DDC: TIMED OUT DS DONE\n", __func__);
+ break;
+ case 6:
+ pr_err("%s: DDC: MAX CAS EXC\n", __func__);
+ break;
+ default:
+ pr_debug("%s: DDC: UNKNOWN ERROR\n", __func__);
+ }
+ }
+end:
+ return;
+}
+
+static void adv7533_handle_cec_intr(struct adv7533 *pdata, u8 cec_status)
+{
+ u8 cec_int_clear = 0x08;
+ bool cec_rx_intr = false;
+ u8 cec_rx_ready = 0;
+ u8 cec_rx_timestamp = 0;
+
+ if (!pdata) {
+ pr_err("%s: Invalid input\n", __func__);
+ goto end;
+ }
+
+ if (cec_status & 0x07) {
+ cec_rx_intr = true;
+ adv7533_read(pdata, I2C_ADDR_CEC_DSI, 0xBA, &cec_int_clear, 1);
+ }
+
+ if (cec_status & BIT(5))
+ pr_debug("%s: CEC TX READY\n", __func__);
+
+ if (cec_status & BIT(4))
+ pr_debug("%s: CEC TX Arbitration lost\n", __func__);
+
+ if (cec_status & BIT(3))
+ pr_debug("%s: CEC TX retry timout\n", __func__);
+
+ if (!cec_rx_intr)
+ return;
+
+
+ adv7533_read(pdata, I2C_ADDR_CEC_DSI, 0xB9, &cec_rx_ready, 1);
+
+ adv7533_read(pdata, I2C_ADDR_CEC_DSI, 0x96, &cec_rx_timestamp, 1);
+
+ if (cec_rx_ready & BIT(0)) {
+ pr_debug("%s: CEC Rx buffer 1 ready\n", __func__);
+ adv7533_rd_cec_msg(pdata,
+ pdata->cec_msg[ADV7533_CEC_BUF1].buf,
+ ADV7533_CEC_BUF1);
+
+ pdata->cec_msg[ADV7533_CEC_BUF1].pending = true;
+
+ pdata->cec_msg[ADV7533_CEC_BUF1].timestamp =
+ cec_rx_timestamp & (BIT(0) | BIT(1));
+
+ adv7533_notify_clients(&pdata->dev_info,
+ MSM_DBA_CB_CEC_READ_PENDING);
+ }
+
+ if (cec_rx_ready & BIT(1)) {
+ pr_debug("%s: CEC Rx buffer 2 ready\n", __func__);
+ adv7533_rd_cec_msg(pdata,
+ pdata->cec_msg[ADV7533_CEC_BUF2].buf,
+ ADV7533_CEC_BUF2);
+
+ pdata->cec_msg[ADV7533_CEC_BUF2].pending = true;
+
+ pdata->cec_msg[ADV7533_CEC_BUF2].timestamp =
+ cec_rx_timestamp & (BIT(2) | BIT(3));
+
+ adv7533_notify_clients(&pdata->dev_info,
+ MSM_DBA_CB_CEC_READ_PENDING);
+ }
+
+ if (cec_rx_ready & BIT(2)) {
+ pr_debug("%s: CEC Rx buffer 3 ready\n", __func__);
+ adv7533_rd_cec_msg(pdata,
+ pdata->cec_msg[ADV7533_CEC_BUF3].buf,
+ ADV7533_CEC_BUF3);
+
+ pdata->cec_msg[ADV7533_CEC_BUF3].pending = true;
+
+ pdata->cec_msg[ADV7533_CEC_BUF3].timestamp =
+ cec_rx_timestamp & (BIT(4) | BIT(5));
+
+ adv7533_notify_clients(&pdata->dev_info,
+ MSM_DBA_CB_CEC_READ_PENDING);
+ }
+
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0xBA,
+ cec_int_clear | (cec_status & 0x07));
+
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0xBA, cec_int_clear & ~0x07);
+
+end:
+ return;
+}
+
+static int adv7533_edid_read_init(struct adv7533 *pdata)
+{
+ int ret = -EINVAL;
+
+ if (!pdata) {
+ pr_err("%s: invalid pdata\n", __func__);
+ goto end;
+ }
+
+ /* initiate edid read in adv7533 */
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x41, 0x10);
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0xC9, 0x13);
+
+end:
+ return ret;
+}
+
+static void *adv7533_handle_hpd_intr(struct adv7533 *pdata)
+{
+ int ret = 0;
+ u8 hpd_state;
+ u8 connected = 0, disconnected = 0;
+
+ if (!pdata) {
+ pr_err("%s: invalid pdata\n", __func__);
+ goto end;
+ }
+
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x42, &hpd_state, 1);
+
+ connected = (hpd_state & BIT(5)) && (hpd_state & BIT(6));
+ disconnected = !(hpd_state & (BIT(5) | BIT(6)));
+
+ if (connected) {
+ pr_debug("%s: Rx CONNECTED\n", __func__);
+ } else if (disconnected) {
+ pr_debug("%s: Rx DISCONNECTED\n", __func__);
+
+ adv7533_notify_clients(&pdata->dev_info,
+ MSM_DBA_CB_HPD_DISCONNECT);
+ } else {
+ pr_debug("%s: HPD Intermediate state\n", __func__);
+ }
+
+ ret = connected ? 1 : 0;
+end:
+ return ERR_PTR(ret);
+}
+
+static int adv7533_enable_interrupts(struct adv7533 *pdata, int interrupts)
+{
+ int ret = 0;
+ u8 reg_val, init_reg_val;
+
+ if (!pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ goto end;
+ }
+
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x94, ®_val, 1);
+
+ init_reg_val = reg_val;
+
+ if (interrupts & CFG_HPD_INTERRUPTS)
+ reg_val |= HPD_INTERRUPTS;
+
+ if (interrupts & CFG_EDID_INTERRUPTS)
+ reg_val |= EDID_INTERRUPTS;
+
+ if (interrupts & CFG_HDCP_INTERRUPTS)
+ reg_val |= HDCP_INTERRUPTS1;
+
+ if (reg_val != init_reg_val) {
+ pr_debug("%s: enabling 0x94 interrupts\n", __func__);
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x94, reg_val);
+ }
+
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x95, ®_val, 1);
+
+ init_reg_val = reg_val;
+
+ if (interrupts & CFG_HDCP_INTERRUPTS)
+ reg_val |= HDCP_INTERRUPTS2;
+
+ if (interrupts & CFG_CEC_INTERRUPTS)
+ reg_val |= CEC_INTERRUPTS;
+
+ if (reg_val != init_reg_val) {
+ pr_debug("%s: enabling 0x95 interrupts\n", __func__);
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x95, reg_val);
+ }
+end:
+ return ret;
+}
+
+static int adv7533_disable_interrupts(struct adv7533 *pdata, int interrupts)
+{
+ int ret = 0;
+ u8 reg_val, init_reg_val;
+
+ if (!pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ goto end;
+ }
+
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x94, ®_val, 1);
+
+ init_reg_val = reg_val;
+
+ if (interrupts & CFG_HPD_INTERRUPTS)
+ reg_val &= ~HPD_INTERRUPTS;
+
+ if (interrupts & CFG_EDID_INTERRUPTS)
+ reg_val &= ~EDID_INTERRUPTS;
+
+ if (interrupts & CFG_HDCP_INTERRUPTS)
+ reg_val &= ~HDCP_INTERRUPTS1;
+
+ if (reg_val != init_reg_val) {
+ pr_debug("%s: disabling 0x94 interrupts\n", __func__);
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x94, reg_val);
+ }
+
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x95, ®_val, 1);
+
+ init_reg_val = reg_val;
+
+ if (interrupts & CFG_HDCP_INTERRUPTS)
+ reg_val &= ~HDCP_INTERRUPTS2;
+
+ if (interrupts & CFG_CEC_INTERRUPTS)
+ reg_val &= ~CEC_INTERRUPTS;
+
+ if (reg_val != init_reg_val) {
+ pr_debug("%s: disabling 0x95 interrupts\n", __func__);
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x95, reg_val);
+ }
+end:
+ return ret;
+}
+
+static void adv7533_intr_work(struct work_struct *work)
+{
+ int ret;
+ u8 int_status = 0xFF;
+ u8 hdcp_cec_status = 0xFF;
+ u32 interrupts = 0;
+ int connected = false;
+ struct adv7533 *pdata;
+ struct delayed_work *dw = to_delayed_work(work);
+
+ pdata = container_of(dw, struct adv7533,
+ adv7533_intr_work_id);
+ if (!pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ return;
+ }
+
+ /* READ Interrupt registers */
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x96, &int_status, 1);
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x97, &hdcp_cec_status, 1);
+
+ if (int_status & (BIT(6) | BIT(7))) {
+ void *ptr_val = adv7533_handle_hpd_intr(pdata);
+
+ ret = PTR_ERR(ptr_val);
+ if (IS_ERR(ptr_val)) {
+ pr_err("%s: error in hpd handing: %d\n",
+ __func__, ret);
+ goto reset;
+ }
+ connected = ret;
+ }
+
+ /* EDID ready for read */
+ if ((int_status & BIT(2)) && pdata->is_power_on) {
+ pr_debug("%s: EDID READY\n", __func__);
+
+ ret = adv7533_read_edid(pdata, sizeof(pdata->edid_buf),
+ pdata->edid_buf);
+ if (ret)
+ pr_err("%s: edid read failed\n", __func__);
+
+ adv7533_notify_clients(&pdata->dev_info,
+ MSM_DBA_CB_HPD_CONNECT);
+ }
+
+ if (pdata->hdcp_enabled)
+ adv7533_handle_hdcp_intr(pdata, hdcp_cec_status);
+
+ if (pdata->cec_enabled)
+ adv7533_handle_cec_intr(pdata, hdcp_cec_status);
+reset:
+ /* Clear HPD/EDID interrupts */
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x96, int_status);
+
+ /* Clear HDCP/CEC interrupts */
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x97, hdcp_cec_status);
+
+ /* Re-enable HPD interrupts */
+ interrupts |= CFG_HPD_INTERRUPTS;
+
+ /* Re-enable EDID interrupts */
+ interrupts |= CFG_EDID_INTERRUPTS;
+
+ /* Re-enable HDCP interrupts */
+ if (pdata->hdcp_enabled)
+ interrupts |= CFG_HDCP_INTERRUPTS;
+
+ /* Re-enable CEC interrupts */
+ if (pdata->cec_enabled)
+ interrupts |= CFG_CEC_INTERRUPTS;
+
+ if (adv7533_enable_interrupts(pdata, interrupts))
+ pr_err("%s: err enabling interrupts\n", __func__);
+
+ /* initialize EDID read after cable connected */
+ if (connected)
+ adv7533_edid_read_init(pdata);
+}
+
+static irqreturn_t adv7533_irq(int irq, void *data)
+{
+ struct adv7533 *pdata = data;
+ u32 interrupts = 0;
+
+ if (!pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ return IRQ_HANDLED;
+ }
+
+ /* disable HPD interrupts */
+ interrupts |= CFG_HPD_INTERRUPTS;
+
+ /* disable EDID interrupts */
+ interrupts |= CFG_EDID_INTERRUPTS;
+
+ /* disable HDCP interrupts */
+ if (pdata->hdcp_enabled)
+ interrupts |= CFG_HDCP_INTERRUPTS;
+
+ /* disable CEC interrupts */
+ if (pdata->cec_enabled)
+ interrupts |= CFG_CEC_INTERRUPTS;
+
+ if (adv7533_disable_interrupts(pdata, interrupts))
+ pr_err("%s: err disabling interrupts\n", __func__);
+
+ queue_delayed_work(pdata->workq, &pdata->adv7533_intr_work_id, 0);
+
+ return IRQ_HANDLED;
+}
+
+static struct i2c_device_id adv7533_id[] = {
+ { "adv7533", 0},
+ {}
+};
+
+static struct adv7533 *adv7533_get_platform_data(void *client)
+{
+ struct adv7533 *pdata = NULL;
+ struct msm_dba_device_info *dev;
+ struct msm_dba_client_info *cinfo =
+ (struct msm_dba_client_info *)client;
+
+ if (!cinfo) {
+ pr_err("%s: invalid client data\n", __func__);
+ goto end;
+ }
+
+ dev = cinfo->dev;
+ if (!dev) {
+ pr_err("%s: invalid device data\n", __func__);
+ goto end;
+ }
+
+ pdata = container_of(dev, struct adv7533, dev_info);
+ if (!pdata)
+ pr_err("%s: invalid platform data\n", __func__);
+
+end:
+ return pdata;
+}
+
+static int adv7533_cec_enable(void *client, bool cec_on, u32 flags)
+{
+ int ret = -EINVAL;
+ struct adv7533 *pdata = adv7533_get_platform_data(client);
+
+ if (!pdata) {
+ pr_err("%s: invalid platform data\n", __func__);
+ goto end;
+ }
+
+ if (cec_on) {
+ adv7533_write_array(pdata, adv7533_cec_en,
+ sizeof(adv7533_cec_en));
+ adv7533_write_array(pdata, adv7533_cec_tg_init,
+ sizeof(adv7533_cec_tg_init));
+ adv7533_write_array(pdata, adv7533_cec_power,
+ sizeof(adv7533_cec_power));
+
+ pdata->cec_enabled = true;
+
+ ret = adv7533_enable_interrupts(pdata, CFG_CEC_INTERRUPTS);
+
+ } else {
+ pdata->cec_enabled = false;
+ ret = adv7533_disable_interrupts(pdata, CFG_CEC_INTERRUPTS);
+ }
+end:
+ return ret;
+}
+static void adv7533_set_audio_block(void *client, u32 size, void *buf)
+{
+ struct adv7533 *pdata =
+ adv7533_get_platform_data(client);
+
+ if (!pdata || !buf) {
+ pr_err("%s: invalid data\n", __func__);
+ return;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ size = min_t(u32, size, AUDIO_DATA_SIZE);
+
+ memset(pdata->audio_spkr_data, 0, AUDIO_DATA_SIZE);
+ memcpy(pdata->audio_spkr_data, buf, size);
+
+ mutex_unlock(&pdata->ops_mutex);
+}
+
+static void adv7533_get_audio_block(void *client, u32 size, void *buf)
+{
+ struct adv7533 *pdata =
+ adv7533_get_platform_data(client);
+
+ if (!pdata || !buf) {
+ pr_err("%s: invalid data\n", __func__);
+ return;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ size = min_t(u32, size, AUDIO_DATA_SIZE);
+
+ memcpy(buf, pdata->audio_spkr_data, size);
+
+ mutex_unlock(&pdata->ops_mutex);
+}
+
+static int adv7533_check_hpd(void *client, u32 flags)
+{
+ struct adv7533 *pdata = adv7533_get_platform_data(client);
+ u8 reg_val = 0;
+ u8 intr_status;
+ int connected = 0;
+
+ if (!pdata) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check if cable is already connected.
+ * Since adv7533_irq line is edge triggered,
+ * if cable is already connected by this time
+ * it won't trigger HPD interrupt.
+ */
+ mutex_lock(&pdata->ops_mutex);
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x42, ®_val, 1);
+
+ connected = (reg_val & BIT(6));
+ if (connected) {
+ pr_debug("%s: cable is connected\n", __func__);
+ /* Clear the interrupts before initiating EDID read */
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x96, &intr_status, 1);
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x96, intr_status);
+ adv7533_enable_interrupts(pdata, (CFG_EDID_INTERRUPTS |
+ CFG_HPD_INTERRUPTS));
+
+ adv7533_edid_read_init(pdata);
+ }
+ mutex_unlock(&pdata->ops_mutex);
+
+ return connected;
+}
+
+/* Device Operations */
+static int adv7533_power_on(void *client, bool on, u32 flags)
+{
+ int ret = -EINVAL;
+ struct adv7533 *pdata = adv7533_get_platform_data(client);
+
+ if (!pdata) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return ret;
+ }
+
+ pr_debug("%s: %d\n", __func__, on);
+ mutex_lock(&pdata->ops_mutex);
+
+ if (on && !pdata->is_power_on) {
+ adv7533_write_array(pdata, adv7533_init_setup,
+ sizeof(adv7533_init_setup));
+
+ ret = adv7533_enable_interrupts(pdata, CFG_HPD_INTERRUPTS);
+ if (ret) {
+ pr_err("%s: Failed: enable HPD intr %d\n",
+ __func__, ret);
+ goto end;
+ }
+ pdata->is_power_on = true;
+ } else if (!on) {
+ /* power down hdmi */
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x41, 0x50);
+ pdata->is_power_on = false;
+
+ adv7533_notify_clients(&pdata->dev_info,
+ MSM_DBA_CB_HPD_DISCONNECT);
+ }
+end:
+ mutex_unlock(&pdata->ops_mutex);
+ return ret;
+}
+
+static void adv7533_video_setup(struct adv7533 *pdata,
+ struct msm_dba_video_cfg *cfg)
+{
+ u32 h_total, hpw, hfp, hbp;
+ u32 v_total, vpw, vfp, vbp;
+
+ if (!pdata || !cfg) {
+ pr_err("%s: invalid input\n", __func__);
+ return;
+ }
+
+ h_total = cfg->h_active + cfg->h_front_porch +
+ cfg->h_pulse_width + cfg->h_back_porch;
+ v_total = cfg->v_active + cfg->v_front_porch +
+ cfg->v_pulse_width + cfg->v_back_porch;
+
+ hpw = cfg->h_pulse_width;
+ hfp = cfg->h_front_porch;
+ hbp = cfg->h_back_porch;
+
+ vpw = cfg->v_pulse_width;
+ vfp = cfg->v_front_porch;
+ vbp = cfg->v_back_porch;
+
+ pr_debug("h_total 0x%x, h_active 0x%x, hfp 0x%x, hpw 0x%x, hbp 0x%x\n",
+ h_total, cfg->h_active, cfg->h_front_porch,
+ cfg->h_pulse_width, cfg->h_back_porch);
+
+ pr_debug("v_total 0x%x, v_active 0x%x, vfp 0x%x, vpw 0x%x, vbp 0x%x\n",
+ v_total, cfg->v_active, cfg->v_front_porch,
+ cfg->v_pulse_width, cfg->v_back_porch);
+
+
+ /* h_width */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x28, ((h_total & 0xFF0) >> 4));
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x29, ((h_total & 0xF) << 4));
+
+ /* hsync_width */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2A, ((hpw & 0xFF0) >> 4));
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2B, ((hpw & 0xF) << 4));
+
+ /* hfp */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2C, ((hfp & 0xFF0) >> 4));
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2D, ((hfp & 0xF) << 4));
+
+ /* hbp */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2E, ((hbp & 0xFF0) >> 4));
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2F, ((hbp & 0xF) << 4));
+
+ /* v_total */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x30, ((v_total & 0xFF0) >> 4));
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x31, ((v_total & 0xF) << 4));
+
+ /* vsync_width */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x32, ((vpw & 0xFF0) >> 4));
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x33, ((vpw & 0xF) << 4));
+
+ /* vfp */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x34, ((vfp & 0xFF0) >> 4));
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x35, ((vfp & 0xF) << 4));
+
+ /* vbp */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x36, ((vbp & 0xFF0) >> 4));
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x37, ((vbp & 0xF) << 4));
+}
+
+static int adv7533_config_vreg(struct adv7533 *pdata, int enable)
+{
+ int rc = 0;
+ struct mdss_module_power *power_data = NULL;
+
+ if (!pdata) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ power_data = &pdata->power_data;
+ if (!power_data || !power_data->num_vreg) {
+ pr_warn("%s: Error: invalid power data\n", __func__);
+ return 0;
+ }
+
+ if (enable) {
+ rc = msm_mdss_config_vreg(&pdata->i2c_client->dev,
+ power_data->vreg_config,
+ power_data->num_vreg, 1);
+ if (rc) {
+ pr_err("%s: Failed to config vreg. Err=%d\n",
+ __func__, rc);
+ goto exit;
+ }
+ } else {
+ rc = msm_mdss_config_vreg(&pdata->i2c_client->dev,
+ power_data->vreg_config,
+ power_data->num_vreg, 0);
+ if (rc) {
+ pr_err("%s: Failed to deconfig vreg. Err=%d\n",
+ __func__, rc);
+ goto exit;
+ }
+ }
+exit:
+ return rc;
+
+}
+
+static int adv7533_enable_vreg(struct adv7533 *pdata, int enable)
+{
+ int rc = 0;
+ struct mdss_module_power *power_data = NULL;
+
+ if (!pdata) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ power_data = &pdata->power_data;
+ if (!power_data || !power_data->num_vreg) {
+ pr_warn("%s: Error: invalid power data\n", __func__);
+ return 0;
+ }
+
+ if (enable) {
+ rc = msm_mdss_enable_vreg(power_data->vreg_config,
+ power_data->num_vreg, 1);
+ if (rc) {
+ pr_err("%s: Failed to enable vreg. Err=%d\n",
+ __func__, rc);
+ goto exit;
+ }
+ } else {
+ rc = msm_mdss_enable_vreg(power_data->vreg_config,
+ power_data->num_vreg, 0);
+ if (rc) {
+ pr_err("%s: Failed to disable vreg. Err=%d\n",
+ __func__, rc);
+ goto exit;
+ }
+ }
+exit:
+ return rc;
+
+}
+
+static int adv7533_video_on(void *client, bool on,
+ struct msm_dba_video_cfg *cfg, u32 flags)
+{
+ int ret = 0;
+ u8 lanes;
+ u8 reg_val = 0;
+ struct adv7533 *pdata = adv7533_get_platform_data(client);
+
+ if (!pdata || !cfg) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ /* DSI lane configuration */
+ lanes = (cfg->num_of_input_lanes << 4);
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x1C, lanes);
+
+ adv7533_video_setup(pdata, cfg);
+
+ /* hdmi/dvi mode */
+ if (cfg->hdmi_mode)
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0xAF, 0x06);
+ else
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0xAF, 0x04);
+
+ /* set scan info for AVI Infoframe*/
+ if (cfg->scaninfo) {
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0x55, ®_val, 1);
+ reg_val |= cfg->scaninfo & (BIT(1) | BIT(0));
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x55, reg_val);
+ }
+
+ /*
+ * aspect ratio and sync polarity set up.
+ * Currently adv only supports 16:9 or 4:3 aspect ratio
+ * configuration.
+ */
+ if (cfg->h_active * 3 - cfg->v_active * 4) {
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x17, 0x02);
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x56, 0x28);
+ } else {
+ /* 4:3 aspect ratio */
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x17, 0x00);
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0x56, 0x18);
+ }
+
+ adv7533_write_array(pdata, adv7533_video_en,
+ sizeof(adv7533_video_en));
+
+ mutex_unlock(&pdata->ops_mutex);
+ return ret;
+}
+
+static int adv7533_hdcp_enable(void *client, bool hdcp_on,
+ bool enc_on, u32 flags)
+{
+ int ret = -EINVAL;
+ u8 reg_val;
+ struct adv7533 *pdata =
+ adv7533_get_platform_data(client);
+
+ if (!pdata) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return ret;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ adv7533_read(pdata, I2C_ADDR_MAIN, 0xAF, ®_val, 1);
+
+ if (hdcp_on)
+ reg_val |= BIT(7);
+ else
+ reg_val &= ~BIT(7);
+
+ if (enc_on)
+ reg_val |= BIT(4);
+ else
+ reg_val &= ~BIT(4);
+
+ adv7533_write(pdata, I2C_ADDR_MAIN, 0xAF, reg_val);
+
+ pdata->hdcp_enabled = hdcp_on;
+
+ if (pdata->hdcp_enabled)
+ adv7533_enable_interrupts(pdata, CFG_HDCP_INTERRUPTS);
+ else
+ adv7533_disable_interrupts(pdata, CFG_HDCP_INTERRUPTS);
+
+ mutex_unlock(&pdata->ops_mutex);
+ return ret;
+}
+
+static int adv7533_configure_audio(void *client,
+ struct msm_dba_audio_cfg *cfg, u32 flags)
+{
+ int ret = -EINVAL;
+ int sampling_rate = 0;
+ struct adv7533 *pdata =
+ adv7533_get_platform_data(client);
+ struct adv7533_reg_cfg reg_cfg[] = {
+ {I2C_ADDR_MAIN, 0x12, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x13, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x14, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x15, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x0A, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x0C, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x0D, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x03, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x02, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x01, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x09, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x08, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x07, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x73, 0x00, 0},
+ {I2C_ADDR_MAIN, 0x76, 0x00, 0},
+ };
+
+ if (!pdata || !cfg) {
+ pr_err("%s: invalid data\n", __func__);
+ return ret;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ if (cfg->copyright == MSM_DBA_AUDIO_COPYRIGHT_NOT_PROTECTED)
+ reg_cfg[0].val |= BIT(5);
+
+ if (cfg->pre_emphasis == MSM_DBA_AUDIO_PRE_EMPHASIS_50_15us)
+ reg_cfg[0].val |= BIT(2);
+
+ if (cfg->clock_accuracy == MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL1)
+ reg_cfg[0].val |= BIT(0);
+ else if (cfg->clock_accuracy == MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL3)
+ reg_cfg[0].val |= BIT(1);
+
+ reg_cfg[1].val = cfg->channel_status_category_code;
+
+ reg_cfg[2].val = (cfg->channel_status_word_length & 0xF) << 0 |
+ (cfg->channel_status_source_number & 0xF) << 4;
+
+ if (cfg->sampling_rate == MSM_DBA_AUDIO_32KHZ)
+ sampling_rate = 0x3;
+ else if (cfg->sampling_rate == MSM_DBA_AUDIO_44P1KHZ)
+ sampling_rate = 0x0;
+ else if (cfg->sampling_rate == MSM_DBA_AUDIO_48KHZ)
+ sampling_rate = 0x2;
+ else if (cfg->sampling_rate == MSM_DBA_AUDIO_88P2KHZ)
+ sampling_rate = 0x8;
+ else if (cfg->sampling_rate == MSM_DBA_AUDIO_96KHZ)
+ sampling_rate = 0xA;
+ else if (cfg->sampling_rate == MSM_DBA_AUDIO_176P4KHZ)
+ sampling_rate = 0xC;
+ else if (cfg->sampling_rate == MSM_DBA_AUDIO_192KHZ)
+ sampling_rate = 0xE;
+
+ reg_cfg[3].val = (sampling_rate & 0xF) << 4;
+
+ if (cfg->mode == MSM_DBA_AUDIO_MODE_MANUAL)
+ reg_cfg[4].val |= BIT(7);
+
+ if (cfg->interface == MSM_DBA_AUDIO_SPDIF_INTERFACE)
+ reg_cfg[4].val |= BIT(4);
+
+ if (cfg->interface == MSM_DBA_AUDIO_I2S_INTERFACE) {
+ /* i2s enable */
+ reg_cfg[5].val |= BIT(2);
+
+ /* audio samp freq select */
+ reg_cfg[5].val |= BIT(7);
+ }
+
+ /* format */
+ reg_cfg[5].val |= cfg->i2s_fmt & 0x3;
+
+ /* channel status override */
+ reg_cfg[5].val |= (cfg->channel_status_source & 0x1) << 6;
+
+ /* sample word lengths, default 24 */
+ reg_cfg[6].val |= 0x18;
+
+ /* endian order of incoming I2S data */
+ if (cfg->word_endianness == MSM_DBA_AUDIO_WORD_LITTLE_ENDIAN)
+ reg_cfg[6].val |= 0x1 << 7;
+
+ /* compressed audio v - bit */
+ reg_cfg[6].val |= (cfg->channel_status_v_bit & 0x1) << 5;
+
+ /* ACR - N */
+ reg_cfg[7].val |= (cfg->n & 0x000FF) >> 0;
+ reg_cfg[8].val |= (cfg->n & 0x0FF00) >> 8;
+ reg_cfg[9].val |= (cfg->n & 0xF0000) >> 16;
+
+ /* ACR - CTS */
+ reg_cfg[10].val |= (cfg->cts & 0x000FF) >> 0;
+ reg_cfg[11].val |= (cfg->cts & 0x0FF00) >> 8;
+ reg_cfg[12].val |= (cfg->cts & 0xF0000) >> 16;
+
+ /* channel count */
+ reg_cfg[13].val |= (cfg->channels & 0x3);
+
+ /* CA */
+ reg_cfg[14].val = cfg->channel_allocation;
+
+ adv7533_write_array(pdata, reg_cfg, sizeof(reg_cfg));
+
+ mutex_unlock(&pdata->ops_mutex);
+ return ret;
+}
+
+static int adv7533_hdmi_cec_write(void *client, u32 size,
+ char *buf, u32 flags)
+{
+ int ret = -EINVAL;
+ struct adv7533 *pdata =
+ adv7533_get_platform_data(client);
+
+ if (!pdata) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return ret;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ ret = adv7533_cec_prepare_msg(pdata, buf, size);
+ if (ret)
+ goto end;
+
+ /* Enable CEC msg tx with NACK 3 retries */
+ adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x81, 0x07);
+end:
+ mutex_unlock(&pdata->ops_mutex);
+ return ret;
+}
+
+static int adv7533_hdmi_cec_read(void *client, u32 *size, char *buf, u32 flags)
+{
+ int ret = -EINVAL;
+ int i;
+ struct adv7533 *pdata =
+ adv7533_get_platform_data(client);
+
+ if (!pdata) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return ret;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ for (i = 0; i < ADV7533_CEC_BUF_MAX; i++) {
+ struct adv7533_cec_msg *msg = &pdata->cec_msg[i];
+
+ if (msg->pending && msg->timestamp) {
+ memcpy(buf, msg->buf, CEC_MSG_SIZE);
+ msg->pending = false;
+ break;
+ }
+ }
+
+ if (i < ADV7533_CEC_BUF_MAX) {
+ *size = CEC_MSG_SIZE;
+ ret = 0;
+ } else {
+ pr_err("%s: no pending cec msg\n", __func__);
+ *size = 0;
+ }
+
+ mutex_unlock(&pdata->ops_mutex);
+ return ret;
+}
+
+static int adv7533_get_edid_size(void *client, u32 *size, u32 flags)
+{
+ int ret = 0;
+ struct adv7533 *pdata =
+ adv7533_get_platform_data(client);
+
+ if (!pdata) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return ret;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ if (!size) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ *size = EDID_SEG_SIZE;
+end:
+ mutex_unlock(&pdata->ops_mutex);
+ return ret;
+}
+
+static int adv7533_get_raw_edid(void *client,
+ u32 size, char *buf, u32 flags)
+{
+ struct adv7533 *pdata =
+ adv7533_get_platform_data(client);
+
+ if (!pdata || !buf) {
+ pr_err("%s: invalid data\n", __func__);
+ goto end;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ size = min_t(u32, size, sizeof(pdata->edid_buf));
+
+ memcpy(buf, pdata->edid_buf, size);
+end:
+ mutex_unlock(&pdata->ops_mutex);
+ return 0;
+}
+
+static int adv7533_write_reg(struct msm_dba_device_info *dev,
+ u32 reg, u32 val)
+{
+ struct adv7533 *pdata;
+ int ret = -EINVAL;
+ u8 i2ca = 0;
+
+ if (!dev)
+ goto end;
+
+ pdata = container_of(dev, struct adv7533, dev_info);
+ if (!pdata)
+ goto end;
+
+ i2ca = ((reg & 0x100) ? pdata->cec_dsi_i2c_addr : pdata->main_i2c_addr);
+
+ adv7533_write(pdata, i2ca, (u8)(reg & 0xFF), (u8)(val & 0xFF));
+end:
+ return ret;
+}
+
+static int adv7533_read_reg(struct msm_dba_device_info *dev,
+ u32 reg, u32 *val)
+{
+ int ret = 0;
+ u8 byte_val = 0;
+ u8 i2ca = 0;
+ struct adv7533 *pdata;
+
+ if (!dev)
+ goto end;
+
+ pdata = container_of(dev, struct adv7533, dev_info);
+ if (!pdata)
+ goto end;
+
+ i2ca = ((reg & 0x100) ? pdata->cec_dsi_i2c_addr : pdata->main_i2c_addr);
+
+ adv7533_read(pdata, i2ca, (u8)(reg & 0xFF), &byte_val, 1);
+
+ *val = (u32)byte_val;
+
+end:
+ return ret;
+}
+
+static int adv7533_register_dba(struct adv7533 *pdata)
+{
+ struct msm_dba_ops *client_ops;
+ struct msm_dba_device_ops *dev_ops;
+
+ if (!pdata)
+ return -EINVAL;
+
+ client_ops = &pdata->dev_info.client_ops;
+ dev_ops = &pdata->dev_info.dev_ops;
+
+ client_ops->power_on = adv7533_power_on;
+ client_ops->video_on = adv7533_video_on;
+ client_ops->configure_audio = adv7533_configure_audio;
+ client_ops->hdcp_enable = adv7533_hdcp_enable;
+ client_ops->hdmi_cec_on = adv7533_cec_enable;
+ client_ops->hdmi_cec_write = adv7533_hdmi_cec_write;
+ client_ops->hdmi_cec_read = adv7533_hdmi_cec_read;
+ client_ops->get_edid_size = adv7533_get_edid_size;
+ client_ops->get_raw_edid = adv7533_get_raw_edid;
+ client_ops->check_hpd = adv7533_check_hpd;
+ client_ops->get_audio_block = adv7533_get_audio_block;
+ client_ops->set_audio_block = adv7533_set_audio_block;
+
+ dev_ops->write_reg = adv7533_write_reg;
+ dev_ops->read_reg = adv7533_read_reg;
+ dev_ops->dump_debug_info = adv7533_dump_debug_info;
+
+ strlcpy(pdata->dev_info.chip_name, "adv7533",
+ sizeof(pdata->dev_info.chip_name));
+
+ mutex_init(&pdata->dev_info.dev_mutex);
+
+ INIT_LIST_HEAD(&pdata->dev_info.client_list);
+
+ return msm_dba_add_probed_device(&pdata->dev_info);
+}
+
+static void adv7533_unregister_dba(struct adv7533 *pdata)
+{
+ if (!pdata)
+ return;
+
+ msm_dba_remove_probed_device(&pdata->dev_info);
+}
+
+
+static int adv7533_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ static struct adv7533 *pdata;
+ int ret = 0;
+
+ if (!client || !client->dev.of_node) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct adv7533), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = adv7533_parse_dt(&client->dev, pdata);
+ if (ret) {
+ pr_err("%s: Failed to parse DT\n", __func__);
+ goto err_dt_parse;
+ }
+
+ pdata->i2c_client = client;
+
+ ret = adv7533_config_vreg(pdata, 1);
+ if (ret) {
+ pr_err("%s: Failed to config vreg\n", __func__);
+ return -EPROBE_DEFER;
+ }
+ adv7533_enable_vreg(pdata, 1);
+
+ mutex_init(&pdata->ops_mutex);
+
+ ret = adv7533_read_device_rev(pdata);
+ if (ret) {
+ pr_err("%s: Failed to read chip rev\n", __func__);
+ goto err_i2c_prog;
+ }
+
+ ret = adv7533_program_i2c_addr(pdata);
+ if (ret != 0) {
+ pr_err("%s: Failed to program i2c addr\n", __func__);
+ goto err_i2c_prog;
+ }
+
+ ret = adv7533_register_dba(pdata);
+ if (ret) {
+ pr_err("%s: Error registering with DBA %d\n",
+ __func__, ret);
+ goto err_dba_reg;
+ }
+
+ ret = pinctrl_select_state(pdata->ts_pinctrl,
+ pdata->pinctrl_state_active);
+ if (ret < 0)
+ pr_err("%s: Failed to select %s pinstate %d\n",
+ __func__, PINCTRL_STATE_ACTIVE, ret);
+
+ ret = adv7533_gpio_configure(pdata, true);
+ if (ret) {
+ pr_err("%s: Failed to configure GPIOs\n", __func__);
+ goto err_gpio_cfg;
+ }
+
+ if (gpio_is_valid(pdata->switch_gpio))
+ gpio_set_value(pdata->switch_gpio, pdata->switch_flags);
+
+ pdata->irq = gpio_to_irq(pdata->irq_gpio);
+
+ ret = request_threaded_irq(pdata->irq, NULL, adv7533_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "adv7533", pdata);
+ if (ret) {
+ pr_err("%s: Failed to enable ADV7533 interrupt\n",
+ __func__);
+ goto err_irq;
+ }
+
+ dev_set_drvdata(&client->dev, &pdata->dev_info);
+ ret = msm_dba_helper_sysfs_init(&client->dev);
+ if (ret) {
+ pr_err("%s: sysfs init failed\n", __func__);
+ goto err_dba_helper;
+ }
+
+ pdata->workq = create_workqueue("adv7533_workq");
+ if (!pdata->workq) {
+ pr_err("%s: workqueue creation failed.\n", __func__);
+ ret = -EPERM;
+ goto err_workqueue;
+ }
+
+ if (pdata->audio) {
+ pr_debug("%s: enabling default audio configs\n", __func__);
+ if (adv7533_write_array(pdata, I2S_cfg, sizeof(I2S_cfg)))
+ goto end;
+ }
+
+ INIT_DELAYED_WORK(&pdata->adv7533_intr_work_id, adv7533_intr_work);
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(&client->dev);
+
+ return 0;
+end:
+ if (pdata->workq)
+ destroy_workqueue(pdata->workq);
+err_workqueue:
+ msm_dba_helper_sysfs_remove(&client->dev);
+err_dba_helper:
+ disable_irq(pdata->irq);
+ free_irq(pdata->irq, pdata);
+err_irq:
+ adv7533_gpio_configure(pdata, false);
+err_gpio_cfg:
+ adv7533_unregister_dba(pdata);
+err_dba_reg:
+err_i2c_prog:
+ adv7533_enable_vreg(pdata, 0);
+ adv7533_config_vreg(pdata, 0);
+err_dt_parse:
+ devm_kfree(&client->dev, pdata);
+ return ret;
+}
+
+static int adv7533_remove(struct i2c_client *client)
+{
+ int ret = -EINVAL;
+ struct msm_dba_device_info *dev;
+ struct adv7533 *pdata;
+
+ if (!client)
+ goto end;
+
+ dev = dev_get_drvdata(&client->dev);
+ if (!dev)
+ goto end;
+
+ pdata = container_of(dev, struct adv7533, dev_info);
+ if (!pdata)
+ goto end;
+
+ pm_runtime_disable(&client->dev);
+ disable_irq(pdata->irq);
+ free_irq(pdata->irq, pdata);
+
+ adv7533_config_vreg(pdata, 0);
+ ret = adv7533_gpio_configure(pdata, false);
+
+ mutex_destroy(&pdata->ops_mutex);
+
+ devm_kfree(&client->dev, pdata);
+
+end:
+ return ret;
+}
+
+static struct i2c_driver adv7533_driver = {
+ .driver = {
+ .name = "adv7533",
+ .owner = THIS_MODULE,
+ },
+ .probe = adv7533_probe,
+ .remove = adv7533_remove,
+ .id_table = adv7533_id,
+};
+
+static int __init adv7533_init(void)
+{
+ return i2c_add_driver(&adv7533_driver);
+}
+
+static void __exit adv7533_exit(void)
+{
+ i2c_del_driver(&adv7533_driver);
+}
+
+module_param_string(panel, mdss_mdp_panel, MDSS_MAX_PANEL_LEN, 0000);
+
+module_init(adv7533_init);
+module_exit(adv7533_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("adv7533 driver");
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba.c b/drivers/video/fbdev/msm/msm_dba/msm_dba.c
new file mode 100644
index 0000000..cffefae
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <video/msm_dba.h>
+#include <msm_dba_internal.h>
+
+static DEFINE_MUTEX(register_mutex);
+
+void *msm_dba_register_client(struct msm_dba_reg_info *info,
+ struct msm_dba_ops *ops)
+{
+ int rc = 0;
+ struct msm_dba_device_info *device = NULL;
+ struct msm_dba_client_info *client = NULL;
+
+ pr_debug("%s: ENTER\n", __func__);
+
+ if (!info || !ops) {
+ pr_err("%s: Invalid params\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(®ister_mutex);
+
+ pr_debug("%s: Client(%s) Chip(%s) Instance(%d)\n", __func__,
+ info->client_name, info->chip_name, info->instance_id);
+
+ rc = msm_dba_get_probed_device(info, &device);
+ if (rc) {
+ pr_err("%s: Device not found (%s, %d)\n", __func__,
+ info->chip_name,
+ info->instance_id);
+ mutex_unlock(®ister_mutex);
+ return ERR_PTR(rc);
+ }
+
+ pr_debug("%s: Client(%s) device found\n", __func__, info->client_name);
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client) {
+ mutex_unlock(®ister_mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(client, 0x0, sizeof(*client));
+ client->dev = device;
+ strlcpy(client->client_name, info->client_name,
+ MSM_DBA_CLIENT_NAME_LEN);
+
+ client->cb = info->cb;
+ client->cb_data = info->cb_data;
+
+ mutex_lock_nested(&device->dev_mutex, SINGLE_DEPTH_NESTING);
+ list_add(&client->list, &device->client_list);
+ *ops = device->client_ops;
+ mutex_unlock(&device->dev_mutex);
+
+ if (device->reg_fxn) {
+ rc = device->reg_fxn(client);
+ if (rc) {
+ pr_err("%s: Client register failed (%s, %d)\n",
+ __func__, info->chip_name, info->instance_id);
+ /* remove the client from list before freeing */
+ mutex_lock_nested(&device->dev_mutex,
+ SINGLE_DEPTH_NESTING);
+ list_del(&client->list);
+ mutex_unlock(&device->dev_mutex);
+ kfree(client);
+ mutex_unlock(®ister_mutex);
+ return ERR_PTR(rc);
+ }
+ }
+
+ mutex_unlock(®ister_mutex);
+
+ pr_debug("%s: EXIT\n", __func__);
+ return client;
+}
+EXPORT_SYMBOL(msm_dba_register_client);
+
+int msm_dba_deregister_client(void *client)
+{
+ int rc = 0;
+ struct msm_dba_client_info *handle = client;
+ struct msm_dba_client_info *node = NULL;
+ struct list_head *tmp = NULL;
+ struct list_head *position = NULL;
+
+ pr_debug("%s: ENTER\n", __func__);
+
+ if (!handle) {
+ pr_err("%s: Invalid Params\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(®ister_mutex);
+
+ pr_debug("%s: Client(%s) Chip(%s) Instance(%d)\n", __func__,
+ handle->client_name, handle->dev->chip_name,
+ handle->dev->instance_id);
+
+ if (handle->dev->dereg_fxn) {
+ rc = handle->dev->dereg_fxn(handle);
+ if (rc) {
+ pr_err("%s: Client deregister failed (%s)\n",
+ __func__, handle->client_name);
+ }
+ }
+
+ mutex_lock_nested(&handle->dev->dev_mutex, SINGLE_DEPTH_NESTING);
+
+ list_for_each_safe(position, tmp, &handle->dev->client_list) {
+
+ node = list_entry(position, struct msm_dba_client_info, list);
+
+ if (node == handle) {
+ list_del(&node->list);
+ break;
+ }
+ }
+
+ mutex_unlock(&handle->dev->dev_mutex);
+
+ kfree(handle);
+
+ mutex_unlock(®ister_mutex);
+
+ pr_debug("%s: EXIT (%d)\n", __func__, rc);
+ return rc;
+}
+EXPORT_SYMBOL(msm_dba_deregister_client);
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba_debug.c b/drivers/video/fbdev/msm/msm_dba/msm_dba_debug.c
new file mode 100644
index 0000000..f59b7f5
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba_debug.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/stat.h>
+
+#include <video/msm_dba.h>
+#include "msm_dba_internal.h"
+
+static inline struct msm_dba_device_info *to_dba_dev(struct device *dev)
+{
+ if (!dev) {
+ pr_err("%s: dev is NULL\n", __func__);
+ return NULL;
+ }
+ return dev_get_drvdata(dev);
+}
+
+static ssize_t device_name_rda_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_dba_device_info *device = to_dba_dev(dev);
+
+ if (!device) {
+ pr_err("%s: device is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s:%d\n", device->chip_name,
+ device->instance_id);
+}
+
+static ssize_t client_list_rda_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_dba_device_info *device = to_dba_dev(dev);
+ struct msm_dba_client_info *c;
+ struct list_head *pos = NULL;
+ ssize_t bytes = 0;
+
+ if (!device) {
+ pr_err("%s: device is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&device->dev_mutex);
+
+ list_for_each(pos, &device->client_list) {
+ c = list_entry(pos, struct msm_dba_client_info, list);
+ bytes += snprintf(buf + bytes, (PAGE_SIZE - bytes), "%s\n",
+ c->client_name);
+ }
+
+ mutex_unlock(&device->dev_mutex);
+
+ return bytes;
+}
+
+static ssize_t power_status_rda_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_dba_device_info *device = to_dba_dev(dev);
+ struct msm_dba_client_info *c;
+ struct list_head *pos = NULL;
+ ssize_t bytes = 0;
+
+ if (!device) {
+ pr_err("%s: device is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&device->dev_mutex);
+ bytes = snprintf(buf, PAGE_SIZE, "power_status:%d\n",
+ device->power_status);
+
+ list_for_each(pos, &device->client_list) {
+ c = list_entry(pos, struct msm_dba_client_info, list);
+ bytes += snprintf(buf + bytes, (PAGE_SIZE - bytes),
+ "client: %s, status = %d\n",
+ c->client_name, c->power_on);
+ }
+
+ mutex_unlock(&device->dev_mutex);
+ return bytes;
+}
+
+static ssize_t video_status_rda_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_dba_device_info *device = to_dba_dev(dev);
+ struct msm_dba_client_info *c;
+ struct list_head *pos = NULL;
+ ssize_t bytes = 0;
+
+ if (!device) {
+ pr_err("%s: device is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&device->dev_mutex);
+ bytes = snprintf(buf, PAGE_SIZE, "video_status:%d\n",
+ device->video_status);
+
+ list_for_each(pos, &device->client_list) {
+ c = list_entry(pos, struct msm_dba_client_info, list);
+ bytes += snprintf(buf + bytes, (PAGE_SIZE - bytes),
+ "client: %s, status = %d\n",
+ c->client_name, c->video_on);
+ }
+
+ mutex_unlock(&device->dev_mutex);
+ return bytes;
+}
+
+static ssize_t audio_status_rda_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_dba_device_info *device = to_dba_dev(dev);
+ struct msm_dba_client_info *c;
+ struct list_head *pos = NULL;
+ ssize_t bytes = 0;
+
+ if (!device) {
+ pr_err("%s: device is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&device->dev_mutex);
+ bytes = snprintf(buf, PAGE_SIZE, "audio_status:%d\n",
+ device->audio_status);
+
+ list_for_each(pos, &device->client_list) {
+ c = list_entry(pos, struct msm_dba_client_info, list);
+ bytes += snprintf(buf + bytes, (PAGE_SIZE - bytes),
+ "client: %s, status = %d\n",
+ c->client_name, c->audio_on);
+ }
+
+ mutex_unlock(&device->dev_mutex);
+ return bytes;
+}
+
+static ssize_t write_reg_wta_attr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct msm_dba_device_info *device = to_dba_dev(dev);
+ char *regstr, *valstr, *ptr;
+ char str[20];
+ long reg = 0;
+ long val = 0;
+ int rc = 0;
+ int len;
+
+ if (!device) {
+ pr_err("%s: device is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ len = strlen(buf);
+ strlcpy(str, buf, 20);
+ if (len < 20)
+ str[len] = '\0';
+ else
+ str[19] = '\0';
+
+ ptr = str;
+ regstr = strsep(&ptr, ":");
+ valstr = strsep(&ptr, ":");
+
+ rc = kstrtol(regstr, 0, ®);
+ if (rc) {
+ pr_err("%s: kstrol error %d\n", __func__, rc);
+ } else {
+ rc = kstrtol(valstr, 0, &val);
+ if (rc)
+ pr_err("%s: kstrol error for val %d\n", __func__, rc);
+ }
+
+ if (!rc) {
+ mutex_lock(&device->dev_mutex);
+
+ if (device->dev_ops.write_reg) {
+ rc = device->dev_ops.write_reg(device,
+ (u32)reg,
+ (u32)val);
+
+ if (rc) {
+ pr_err("%s: failed to write reg %d", __func__,
+ rc);
+ }
+ } else {
+ pr_err("%s: not supported\n", __func__);
+ }
+
+ mutex_unlock(&device->dev_mutex);
+ }
+
+ return count;
+}
+
+static ssize_t read_reg_rda_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_dba_device_info *device = to_dba_dev(dev);
+ ssize_t bytes;
+
+ if (!device) {
+ pr_err("%s: device is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&device->dev_mutex);
+
+ bytes = snprintf(buf, PAGE_SIZE, "0x%x\n", device->register_val);
+
+ mutex_unlock(&device->dev_mutex);
+
+ return bytes;
+}
+
+static ssize_t read_reg_wta_attr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct msm_dba_device_info *device = to_dba_dev(dev);
+ long reg = 0;
+ int rc = 0;
+ u32 val = 0;
+
+ if (!device) {
+ pr_err("%s: device is NULL\n", __func__);
+ return count;
+ }
+
+ rc = kstrtol(buf, 0, ®);
+ if (rc) {
+ pr_err("%s: kstrol error %d\n", __func__, rc);
+ } else {
+ mutex_lock(&device->dev_mutex);
+
+ if (device->dev_ops.read_reg) {
+ rc = device->dev_ops.read_reg(device,
+ (u32)reg,
+ &val);
+
+ if (rc) {
+ pr_err("%s: failed to write reg %d", __func__,
+ rc);
+ } else {
+ device->register_val = val;
+ }
+ } else {
+ pr_err("%s: not supported\n", __func__);
+ }
+
+ mutex_unlock(&device->dev_mutex);
+ }
+
+ return count;
+}
+
+static ssize_t dump_info_wta_attr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct msm_dba_device_info *device = to_dba_dev(dev);
+ int rc;
+
+ if (!device) {
+ pr_err("%s: device is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (device->dev_ops.dump_debug_info) {
+ rc = device->dev_ops.dump_debug_info(device, 0x00);
+ if (rc)
+ pr_err("%s: failed to dump debug data\n", __func__);
+ } else {
+ pr_err("%s: not supported\n", __func__);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(device_name, 0444, device_name_rda_attr, NULL);
+static DEVICE_ATTR(client_list, 0444, client_list_rda_attr, NULL);
+static DEVICE_ATTR(power_status, 0444, power_status_rda_attr, NULL);
+static DEVICE_ATTR(video_status, 0444, video_status_rda_attr, NULL);
+static DEVICE_ATTR(audio_status, 0444, audio_status_rda_attr, NULL);
+static DEVICE_ATTR(write_reg, 0200, NULL, write_reg_wta_attr);
+static DEVICE_ATTR(read_reg, 0644, read_reg_rda_attr,
+ read_reg_wta_attr);
+static DEVICE_ATTR(dump_info, 0200, NULL, dump_info_wta_attr);
+
+static struct attribute *msm_dba_sysfs_attrs[] = {
+ &dev_attr_device_name.attr,
+ &dev_attr_client_list.attr,
+ &dev_attr_power_status.attr,
+ &dev_attr_video_status.attr,
+ &dev_attr_audio_status.attr,
+ &dev_attr_write_reg.attr,
+ &dev_attr_read_reg.attr,
+ &dev_attr_dump_info.attr,
+ NULL,
+};
+
+static struct attribute_group msm_dba_sysfs_attr_grp = {
+ .attrs = msm_dba_sysfs_attrs,
+};
+
+int msm_dba_helper_sysfs_init(struct device *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = sysfs_create_group(&dev->kobj, &msm_dba_sysfs_attr_grp);
+ if (rc)
+ pr_err("%s: sysfs group creation failed %d\n", __func__, rc);
+
+ return rc;
+}
+
+void msm_dba_helper_sysfs_remove(struct device *dev)
+{
+ if (!dev) {
+ pr_err("%s: Invalid params\n", __func__);
+ return;
+ }
+
+ sysfs_remove_group(&dev->kobj, &msm_dba_sysfs_attr_grp);
+}
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c b/drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c
new file mode 100644
index 0000000..5074624
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+
+#include <video/msm_dba.h>
+#include "msm_dba_internal.h"
+
+static void msm_dba_helper_hdcp_handler(struct work_struct *work)
+{
+ struct msm_dba_device_info *dev;
+ int rc = 0;
+
+ if (!work) {
+ pr_err("%s: Invalid params\n", __func__);
+ return;
+ }
+
+ dev = container_of(work, struct msm_dba_device_info, hdcp_work);
+
+ mutex_lock(&dev->dev_mutex);
+ if (dev->hdcp_status) {
+ pr_debug("%s[%s:%d] HDCP is authenticated\n", __func__,
+ dev->chip_name, dev->instance_id);
+ mutex_unlock(&dev->dev_mutex);
+ return;
+ }
+
+ if (dev->dev_ops.hdcp_reset) {
+ rc = dev->dev_ops.hdcp_reset(dev);
+ if (rc)
+ pr_err("%s[%s:%d] HDCP reset failed\n", __func__,
+ dev->chip_name, dev->instance_id);
+ }
+
+ if (dev->dev_ops.hdcp_retry) {
+ rc = dev->dev_ops.hdcp_retry(dev, MSM_DBA_ASYNC_FLAG);
+ if (rc)
+ pr_err("%s[%s:%d] HDCP retry failed\n", __func__,
+ dev->chip_name, dev->instance_id);
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static void msm_dba_helper_issue_cb(struct msm_dba_device_info *dev,
+ struct msm_dba_client_info *client,
+ enum msm_dba_callback_event event)
+{
+ struct msm_dba_client_info *c;
+ struct list_head *pos = NULL;
+ u32 user_mask = 0;
+
+ list_for_each(pos, &dev->client_list) {
+ c = list_entry(pos, struct msm_dba_client_info, list);
+ if (client && client == c)
+ continue;
+
+ user_mask = c->event_mask & event;
+ if (c->cb && user_mask)
+ c->cb(c->cb_data, user_mask);
+ }
+}
+
+static irqreturn_t msm_dba_helper_irq_handler(int irq, void *dev)
+{
+ struct msm_dba_device_info *device = dev;
+ u32 mask = 0;
+ int rc = 0;
+ bool ret;
+
+ mutex_lock(&device->dev_mutex);
+ if (device->dev_ops.handle_interrupts) {
+ rc = device->dev_ops.handle_interrupts(device, &mask);
+ if (rc)
+ pr_err("%s: interrupt handler failed\n", __func__);
+ }
+
+ pr_debug("%s(%s:%d): Eventmask = 0x%x\n", __func__, device->chip_name,
+ device->instance_id, mask);
+ if (mask)
+ msm_dba_helper_issue_cb(device, NULL, mask);
+
+ if ((mask & MSM_DBA_CB_HDCP_LINK_UNAUTHENTICATED) &&
+ device->hdcp_monitor_on) {
+ ret = queue_work(device->hdcp_wq, &device->hdcp_work);
+ if (!ret)
+ pr_err("%s: queue_work failed %d\n", __func__, rc);
+ }
+
+ if (device->dev_ops.unmask_interrupts)
+ rc = device->dev_ops.unmask_interrupts(device, mask);
+
+ mutex_unlock(&device->dev_mutex);
+ return IRQ_HANDLED;
+}
+
+int msm_dba_helper_i2c_write_byte(struct i2c_client *client,
+ u8 addr,
+ u8 reg,
+ u8 val)
+{
+ int rc = 0;
+ struct i2c_msg msg;
+ u8 buf[2] = {reg, val};
+
+ if (!client) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: [%s:0x%02x] : W[0x%02x, 0x%02x]\n", __func__,
+ client->name, addr, reg, val);
+ client->addr = addr;
+
+ msg.addr = addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = buf;
+
+ if (i2c_transfer(client->adapter, &msg, 1) < 1) {
+ pr_err("%s: i2c write failed\n", __func__);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+int msm_dba_helper_i2c_write_buffer(struct i2c_client *client,
+ u8 addr,
+ u8 *buf,
+ u32 size)
+{
+ int rc = 0;
+ struct i2c_msg msg;
+
+ if (!client) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s: [%s:0x02%x] : W %d bytes\n", __func__,
+ client->name, addr, size);
+
+ client->addr = addr;
+
+ msg.addr = addr;
+ msg.flags = 0;
+ msg.len = size;
+ msg.buf = buf;
+
+ if (i2c_transfer(client->adapter, &msg, 1) != 1) {
+ pr_err("%s: i2c write failed\n", __func__);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+int msm_dba_helper_i2c_read(struct i2c_client *client,
+ u8 addr,
+ u8 reg,
+ char *buf,
+ u32 size)
+{
+ int rc = 0;
+ struct i2c_msg msg[2];
+
+ if (!client || !buf) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ client->addr = addr;
+
+ msg[0].addr = addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = ®
+
+ msg[1].addr = addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = size;
+ msg[1].buf = buf;
+
+ if (i2c_transfer(client->adapter, msg, 2) != 2) {
+ pr_err("%s: i2c read failed\n", __func__);
+ rc = -EIO;
+ }
+
+ pr_debug("%s: [%s:0x02%x] : R[0x%02x, 0x%02x]\n", __func__,
+ client->name, addr, reg, *buf);
+ return rc;
+}
+
+int msm_dba_helper_power_on(void *client, bool on, u32 flags)
+{
+ int rc = 0;
+ struct msm_dba_client_info *c = client;
+ struct msm_dba_device_info *device;
+ struct msm_dba_client_info *node;
+ struct list_head *pos = NULL;
+ bool power_on = false;
+
+ if (!c) {
+ pr_err("%s: Invalid Params\n", __func__);
+ return -EINVAL;
+ }
+
+ device = c->dev;
+ mutex_lock(&device->dev_mutex);
+
+ /*
+ * Power on the device if atleast one client powers on the device. But
+ * power off will be done only after all the clients have called power
+ * off
+ */
+ if (on == device->power_status) {
+ c->power_on = on;
+ } else if (on) {
+ rc = device->dev_ops.dev_power_on(device, on);
+ if (rc)
+ pr_err("%s:%s: power on failed\n", device->chip_name,
+ __func__);
+ else
+ c->power_on = on;
+ } else {
+ c->power_on = false;
+
+ list_for_each(pos, &device->client_list) {
+ node = list_entry(pos, struct msm_dba_client_info,
+ list);
+ if (c->power_on) {
+ power_on = true;
+ break;
+ }
+ }
+
+ if (!power_on) {
+ rc = device->dev_ops.dev_power_on(device, false);
+ if (rc) {
+ pr_err("%s:%s: power off failed\n",
+ device->chip_name, __func__);
+ c->power_on = true;
+ }
+ }
+ }
+
+ mutex_unlock(&device->dev_mutex);
+ return rc;
+}
+
+int msm_dba_helper_video_on(void *client, bool on,
+ struct msm_dba_video_cfg *cfg, u32 flags)
+{
+ int rc = 0;
+ struct msm_dba_client_info *c = client;
+ struct msm_dba_device_info *device;
+ struct msm_dba_client_info *node;
+ struct list_head *pos = NULL;
+ bool video_on = false;
+
+ if (!c) {
+ pr_err("%s: Invalid Params\n", __func__);
+ return -EINVAL;
+ }
+
+ device = c->dev;
+ mutex_lock(&device->dev_mutex);
+
+ /*
+ * Video will be turned on if at least one client turns on video. But
+ * video off will be done only after all the clients have called video
+ * off
+ */
+ if (on == device->video_status) {
+ c->video_on = on;
+ } else if (on) {
+ rc = device->dev_ops.dev_video_on(device, cfg, on);
+ if (rc)
+ pr_err("%s:%s: video on failed\n", device->chip_name,
+ __func__);
+ else
+ c->video_on = on;
+ } else {
+ c->video_on = false;
+
+ list_for_each(pos, &device->client_list) {
+ node = list_entry(pos, struct msm_dba_client_info,
+ list);
+ if (c->video_on) {
+ video_on = true;
+ break;
+ }
+ }
+
+ if (!video_on) {
+ rc = device->dev_ops.dev_video_on(device, cfg, false);
+ if (rc) {
+ pr_err("%s:%s: video off failed\n",
+ device->chip_name, __func__);
+ c->video_on = true;
+ }
+ }
+ }
+
+ mutex_unlock(&device->dev_mutex);
+ return rc;
+}
+
+int msm_dba_helper_interrupts_enable(void *client, bool on,
+ u32 event_mask, u32 flags)
+{
+ struct msm_dba_client_info *c = client;
+ struct msm_dba_device_info *device;
+
+ if (!c) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ device = c->dev;
+ mutex_lock(&device->dev_mutex);
+
+ if (on)
+ c->event_mask = event_mask;
+ else
+ c->event_mask = 0;
+
+ mutex_unlock(&device->dev_mutex);
+ return 0;
+}
+
+int msm_dba_helper_register_irq(struct msm_dba_device_info *dev,
+ u32 irq, u32 irq_flags)
+{
+ int rc;
+
+ if (!dev) {
+ pr_err("%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->dev_mutex);
+
+ rc = request_threaded_irq(irq, NULL, msm_dba_helper_irq_handler,
+ irq_flags, dev->chip_name, dev);
+
+ if (rc)
+ pr_err("%s:%s: Failed to register irq\n", dev->chip_name,
+ __func__);
+
+ mutex_unlock(&dev->dev_mutex);
+ return rc;
+}
+
+int msm_dba_helper_get_caps(void *client, struct msm_dba_capabilities *caps)
+{
+ struct msm_dba_client_info *c = client;
+ struct msm_dba_device_info *device;
+
+ if (!c || !caps) {
+ pr_err("%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ device = c->dev;
+ mutex_lock(&device->dev_mutex);
+
+ memcpy(caps, &device->caps, sizeof(*caps));
+
+ mutex_unlock(&device->dev_mutex);
+ return 0;
+}
+
+int msm_dba_register_hdcp_monitor(struct msm_dba_device_info *dev, bool enable)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ dev->hdcp_wq = alloc_workqueue("hdcp_monitor(%s:%d)", 0, 0,
+ dev->chip_name,
+ dev->instance_id);
+ if (!dev->hdcp_wq) {
+ pr_err("%s: failed to allocate wq\n", __func__);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_WORK(&dev->hdcp_work, msm_dba_helper_hdcp_handler);
+ dev->hdcp_monitor_on = true;
+ } else if (!enable && dev->hdcp_wq) {
+ destroy_workqueue(dev->hdcp_wq);
+ dev->hdcp_wq = NULL;
+ dev->hdcp_monitor_on = false;
+ }
+
+fail:
+ return rc;
+}
+
+int msm_dba_helper_force_reset(void *client, u32 flags)
+{
+ struct msm_dba_client_info *c = client;
+ struct msm_dba_device_info *device;
+ int rc = 0;
+
+ if (!c) {
+ pr_err("%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ device = c->dev;
+ mutex_lock(&device->dev_mutex);
+
+ msm_dba_helper_issue_cb(device, c, MSM_DBA_CB_PRE_RESET);
+
+ if (device->dev_ops.force_reset)
+ rc = device->dev_ops.force_reset(device, flags);
+
+ if (rc)
+ pr_err("%s: Force reset failed\n", __func__);
+
+ msm_dba_helper_issue_cb(device, c, MSM_DBA_CB_POST_RESET);
+
+ mutex_unlock(&device->dev_mutex);
+ return rc;
+}
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba_init.c b/drivers/video/fbdev/msm/msm_dba/msm_dba_init.c
new file mode 100644
index 0000000..0c40faf
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba_init.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2015, 2018,The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <video/msm_dba.h>
+#include "msm_dba_internal.h"
+
+struct msm_dba_device_list {
+ struct msm_dba_device_info *dev;
+ struct list_head list;
+};
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(init_mutex);
+
+int msm_dba_add_probed_device(struct msm_dba_device_info *dev)
+{
+ struct msm_dba_device_list *node;
+
+ if (!dev) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&init_mutex);
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ mutex_unlock(&init_mutex);
+ return -ENOMEM;
+ }
+
+ memset(node, 0x0, sizeof(*node));
+ node->dev = dev;
+ list_add(&node->list, &device_list);
+
+ pr_debug("%s: Added new device (%s, %d)\n", __func__, dev->chip_name,
+ dev->instance_id);
+ mutex_unlock(&init_mutex);
+
+ return 0;
+}
+
+int msm_dba_get_probed_device(struct msm_dba_reg_info *reg,
+ struct msm_dba_device_info **dev)
+{
+ int rc = 0;
+ struct msm_dba_device_list *node;
+ struct list_head *position = NULL;
+
+ if (!reg || !dev) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&init_mutex);
+
+ *dev = NULL;
+ list_for_each(position, &device_list) {
+ node = list_entry(position, struct msm_dba_device_list, list);
+ if (!strcmp(reg->chip_name, node->dev->chip_name) &&
+ reg->instance_id == node->dev->instance_id) {
+ pr_debug("%s: Found device (%s, %d)\n", __func__,
+ reg->chip_name,
+ reg->instance_id);
+ *dev = node->dev;
+ break;
+ }
+ }
+
+ if (!*dev) {
+ pr_err("%s: Device not found (%s, %d)\n", __func__,
+ reg->chip_name,
+ reg->instance_id);
+ rc = -ENODEV;
+ }
+
+ mutex_unlock(&init_mutex);
+
+ return rc;
+}
+
+int msm_dba_remove_probed_device(struct msm_dba_device_info *dev)
+{
+ struct msm_dba_device_list *node;
+ struct list_head *position = NULL;
+ struct list_head *temp = NULL;
+
+ if (!dev) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&init_mutex);
+
+ list_for_each_safe(position, temp, &device_list) {
+ node = list_entry(position, struct msm_dba_device_list, list);
+ if (node->dev == dev) {
+ list_del(&node->list);
+ pr_debug("%s: Removed device (%s, %d)\n", __func__,
+ dev->chip_name,
+ dev->instance_id);
+ kfree(node);
+ break;
+ }
+ }
+
+ mutex_unlock(&init_mutex);
+
+ return 0;
+}
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba_internal.h b/drivers/video/fbdev/msm/msm_dba/msm_dba_internal.h
new file mode 100644
index 0000000..78c6d2a
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba_internal.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_DBA_INTERNAL_H
+#define _MSM_DBA_INTERNAL_H
+
+#include <video/msm_dba.h>
+
+struct msm_dba_client_info;
+struct msm_dba_device_info;
+
+/**
+ * struct msm_dba_device_ops - Function pointers to device specific operations
+ * @dev_power_on: Power on operation called by msm_dba_helper_power_on. Mutex
+ * protection is handled by the caller.
+ * @dev_video_on: Video on operation called by msm_dba_helper_video_on. Mutex
+ * protection is handled by the caller.
+ * @handle_interrupts: Function pointer called when an interrupt is fired. If
+ * the bridge driver uses msm_dba_helper_register_irq
+ * for handling interrupts, irq handler will call
+ * handle_interrupts to figure out the event mask.
+ * @unmask_interrupts: Function pointer called by irq handler for unmasking
+ * interrupts.
+ * @hdcp_reset: Function pointer to reset the HDCP block. This needs to be valid
+ * if HDCP monitor is used.
+ * @hdcp_retry: Function pointer to retry HDCP authentication. This needs to be
+ * valid if HDCP monitor is used.
+ * @write_reg: Function pointer to write to device specific register.
+ * @read_reg: Function pointer to read device specific register.
+ * @force_reset: Function pointer to force reset the device.
+ * @dump_debug_info: Function pointer to trigger a dump to dmesg.
+ *
+ * The device operation function pointers are used if bridge driver uses helper
+ * functions in place of some client operations. If used, the helper functions
+ * will call the device function pointers to perform device specific
+ * programming.
+ */
+struct msm_dba_device_ops {
+ int (*dev_power_on)(struct msm_dba_device_info *dev, bool on);
+ int (*dev_video_on)(struct msm_dba_device_info *dev,
+ struct msm_dba_video_cfg *cfg, bool on);
+ int (*handle_interrupts)(struct msm_dba_device_info *dev, u32 *mask);
+ int (*unmask_interrupts)(struct msm_dba_device_info *dev, u32 mask);
+ int (*hdcp_reset)(struct msm_dba_device_info *dev);
+ int (*hdcp_retry)(struct msm_dba_device_info *dev, u32 flags);
+ int (*write_reg)(struct msm_dba_device_info *dev, u32 reg, u32 val);
+ int (*read_reg)(struct msm_dba_device_info *dev, u32 reg, u32 *val);
+ int (*force_reset)(struct msm_dba_device_info *dev, u32 flags);
+ int (*dump_debug_info)(struct msm_dba_device_info *dev, u32 flags);
+};
+
+/**
+ * struct msm_dba_device_info - Device specific information
+ * @chip_name: chip name
+ * @instance_id: Instance id
+ * @caps: Capabilities of the bridge chip
+ * @dev_ops: function pointers to device specific operations
+ * @client_ops: function pointers to client operations
+ * @dev_mutex: mutex for protecting device access
+ * @hdcp_wq: HDCP workqueue for handling failures.
+ * @client_list: list head for client list
+ * @reg_fxn: Function pointer called when a client registers with dba driver
+ * @dereg_fxn: Function pointer called when a client deregisters.
+ * @power_status: current power status of device
+ * @video_status: current video status of device
+ * @audio_status: current audio status of device
+ * @hdcp_on: hdcp enable status.
+ * @enc-on: encryption enable status.
+ * @hdcp_status: hdcp link status.
+ * @hdcp_monitor_on: hdcp monitor status
+ * @register_val: debug field used to support read register.
+ *
+ * Structure containing device specific information. This structure is allocated
+ * by the bridge driver. This structure should be unique to each device.
+ *
+ */
+struct msm_dba_device_info {
+ char chip_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+ u32 instance_id;
+ struct msm_dba_capabilities caps;
+ struct msm_dba_device_ops dev_ops;
+ struct msm_dba_ops client_ops;
+ struct mutex dev_mutex;
+ struct workqueue_struct *hdcp_wq;
+ struct work_struct hdcp_work;
+ struct list_head client_list;
+ int (*reg_fxn)(struct msm_dba_client_info *client);
+ int (*dereg_fxn)(struct msm_dba_client_info *client);
+
+ bool power_status;
+ bool video_status;
+ bool audio_status;
+ bool hdcp_on;
+ bool enc_on;
+ bool hdcp_status;
+ bool hdcp_monitor_on;
+
+ /* Debug info */
+ u32 register_val;
+};
+
+/**
+ * struct msm_dba_client_info - Client specific information
+ * @dev: pointer to device information
+ * @client_name: client name
+ * @power_on: client power on status
+ * @video_on: client video on status
+ * @audio_on: client audio on status
+ * @event_mask: client event mask for callbacks.
+ * @cb: callback function for the client
+ * @cb_data: callback data pointer.
+ * @list: list pointer
+ *
+ * This structure is used to uniquely identify a client for a bridge chip. The
+ * pointer to this structure is returned as a handle from
+ * msm_dba_register_client.
+ */
+struct msm_dba_client_info {
+ struct msm_dba_device_info *dev;
+ char client_name[MSM_DBA_CLIENT_NAME_LEN];
+ bool power_on;
+ bool video_on;
+ bool audio_on;
+ u32 event_mask;
+ msm_dba_cb cb;
+ void *cb_data;
+ struct list_head list;
+};
+
+/**
+ * msm_dba_add_probed_device() - Add a new device to the probed devices list.
+ * @info: Pointer to structure containing the device information. This should be
+ * allocated by the specific bridge driver and kept until
+ * msm_dba_remove_probed_device() is called.
+ *
+ * Once a bridge chip is initialized and probed, it should add its device to the
+ * existing list of all probed display bridge chips. This list is maintained by
+ * the MSM DBA driver and is checked whenever there is a client register
+ * request.
+ */
+int msm_dba_add_probed_device(struct msm_dba_device_info *info);
+
+/**
+ * msm_dba_remove_probed_device() - Remove a device from the probed devices list
+ * @info: Pointer to structure containing the device info. This should be the
+ * same pointer used for msm_dba_add_probed_device().
+ *
+ * Bridge chip driver should call this to remove device from probed list.
+ */
+int msm_dba_remove_probed_device(struct msm_dba_device_info *info);
+
+/**
+ * msm_dba_get_probed_device() - Check if a device is present in the device list
+ * @reg: Pointer to structure containing the chip info received from the client
+ * driver
+ * @info: Pointer to the device info pointer that will be returned if the device
+ * has been found in the device list
+ *
+ * When clients of the MSM DBA driver call msm_dba_register_client(), the MSM
+ * DBA driver will use this function to check if the specific device requested
+ * by the client has been probed. If probed, function will return a pointer to
+ * the device information structure.
+ */
+int msm_dba_get_probed_device(struct msm_dba_reg_info *reg,
+ struct msm_dba_device_info **info);
+
+/**
+ * msm_dba_helper_i2c_read() - perform an i2c read transaction
+ * @client: i2c client pointer
+ * @addr: i2c slave address
+ * @reg: register where the data should be read from
+ * @buf: buffer where the read data is stored.
+ * @size: bytes to read from slave. buffer should be atleast size bytes.
+ *
+ * Helper function to perform a read from an i2c slave. Internally this calls
+ * i2c_transfer().
+ */
+int msm_dba_helper_i2c_read(struct i2c_client *client,
+ u8 addr,
+ u8 reg,
+ char *buf,
+ u32 size);
+
+/**
+ * msm_dba_helper_i2c_write_buffer() - write buffer to i2c slave.
+ * @client: i2c client pointer
+ * @addr: i2c slave address
+ * @buf: buffer where the data will be read from.
+ * @size: bytes to write.
+ *
+ * Helper function to perform a write to an i2c slave. Internally this calls
+ * i2c_transfer().
+ */
+int msm_dba_helper_i2c_write_buffer(struct i2c_client *client,
+ u8 addr,
+ u8 *buf,
+ u32 size);
+
+/**
+ * msm_dba_helper_i2c_write_byte() - write to a register on an i2c slave.
+ * @client: i2c client pointer
+ * @addr: i2c slave address
+ * @reg: slave register to write to
+ * @val: data to write.
+ *
+ * Helper function to perform a write to an i2c slave. Internally this calls
+ * i2c_transfer().
+ */
+int msm_dba_helper_i2c_write_byte(struct i2c_client *client,
+ u8 addr,
+ u8 reg,
+ u8 val);
+
+/**
+ * msm_dba_helper_power_on() - power on bridge chip
+ * @client: client handle
+ * @on: on/off
+ * @flags: flags
+ *
+ * This helper function can be used as power_on() function defined in struct
+ * msm_dba_ops. Internally, this function does some bookkeeping to figure out
+ * when to actually power on/off the device. If used, bridge driver should
+ * provide a dev_power_on to do the device specific power change.
+ */
+int msm_dba_helper_power_on(void *client, bool on, u32 flags);
+
+/**
+ * msm_dba_helper_video_on() - video on bridge chip
+ * @client: client handle
+ * @on: on/off
+ * @flags: flags
+ *
+ * This helper function can be used as video_on() function defined in struct
+ * msm_dba_ops. Internally, this function does some bookkeeping to figure out
+ * when to actually video on/off the device. If used, bridge driver should
+ * provide a dev_video_on to do the device specific video change.
+ */
+int msm_dba_helper_video_on(void *client, bool on,
+ struct msm_dba_video_cfg *cfg, u32 flags);
+
+/**
+ * msm_dba_helper_interrupts_enable() - manage interrupt callbacks
+ * @client: client handle
+ * @on: on/off
+ * @events_mask: events on which callbacks are required.
+ * @flags: flags
+ *
+ * This helper function provides the functionality needed for interrupts_enable
+ * function pointer in struct msm_dba_ops.
+ */
+int msm_dba_helper_interrupts_enable(void *client, bool on,
+ u32 events_mask, u32 flags);
+
+/**
+ * msm_dba_helper_get_caps() - return device capabilities
+ * @client: client handle
+ * @flags: flags
+ *
+ * Helper function to replace get_caps function pointer in struct msm_dba_ops
+ * structure.
+ */
+int msm_dba_helper_get_caps(void *client, struct msm_dba_capabilities *caps);
+
+/**
+ * msm_dba_helper_register_irq() - register irq and handle interrupts.
+ * @dev: pointer to device structure
+ * @irq: irq number
+ * @irq_flags: irq_flags.
+ *
+ * Helper function register an irq and handling interrupts. This will attach a
+ * threaded interrupt handler to the irq provided as input. When the irq
+ * handler is triggered, handler will call handle_interrupts in the device
+ * specific functions pointers so that bridge driver can parse the interrupt
+ * status registers and return the event mask. IRQ handler will use this event
+ * mask to provide callbacks to the clients. Once the callbacks are done,
+ * handler will call unmask_interrupts() before returning,
+ */
+int msm_dba_helper_register_irq(struct msm_dba_device_info *dev,
+ u32 irq, u32 irq_flags);
+
+/**
+ * msm_dba_register_hdcp_monitor() - kicks off monitoring for hdcp failures
+ * @dev: pointer to device structure.
+ * @enable: enable/disable
+ *
+ * Helper function to enable HDCP monitoring. This should be called only if irq
+ * is handled through msm dba helper functions.
+ */
+int msm_dba_register_hdcp_monitor(struct msm_dba_device_info *dev, bool enable);
+
+/**
+ * msm_dba_helper_sysfs_init() - create sysfs attributes for debugging
+ * @dev: pointer to struct device structure.
+ *
+ */
+int msm_dba_helper_sysfs_init(struct device *dev);
+
+/**
+ * msm_dba_helper_sysfs_remove() - remove sysfs attributes
+ * @dev: pointer to struct device structure.
+ *
+ */
+void msm_dba_helper_sysfs_remove(struct device *dev);
+
+/**
+ * msm_dba_helper_force_reset() - force reset bridge chip
+ * @client: client handle
+ * @flags: flags
+ *
+ * Helper function to replace force_reset function pointer in struct msm_dba_ops
+ * structure. Driver should set dev_ops.force_reset to a valid function.
+ */
+int msm_dba_helper_force_reset(void *client, u32 flags);
+#endif /* _MSM_DBA_INTERNAL_H */
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
new file mode 100644
index 0000000..39d26a4
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -0,0 +1,2714 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/iopoll.h>
+#include <linux/kthread.h>
+
+#include "mdss_dsi.h"
+#include "mdss_edp.h"
+#include "mdss_dsi_phy.h"
+
+#define MDSS_DSI_DSIPHY_REGULATOR_CTRL_0 0x00
+#define MDSS_DSI_DSIPHY_REGULATOR_CTRL_1 0x04
+#define MDSS_DSI_DSIPHY_REGULATOR_CTRL_2 0x08
+#define MDSS_DSI_DSIPHY_REGULATOR_CTRL_3 0x0c
+#define MDSS_DSI_DSIPHY_REGULATOR_CTRL_4 0x10
+#define MDSS_DSI_DSIPHY_REGULATOR_CAL_PWR_CFG 0x18
+#define MDSS_DSI_DSIPHY_LDO_CNTRL 0x1dc
+#define MDSS_DSI_DSIPHY_REGULATOR_TEST 0x294
+#define MDSS_DSI_DSIPHY_STRENGTH_CTRL_0 0x184
+#define MDSS_DSI_DSIPHY_STRENGTH_CTRL_1 0x188
+#define MDSS_DSI_DSIPHY_STRENGTH_CTRL_2 0x18c
+#define MDSS_DSI_DSIPHY_TIMING_CTRL_0 0x140
+#define MDSS_DSI_DSIPHY_GLBL_TEST_CTRL 0x1d4
+#define MDSS_DSI_DSIPHY_CTRL_0 0x170
+#define MDSS_DSI_DSIPHY_CTRL_1 0x174
+
+#define SW_RESET BIT(2)
+#define SW_RESET_PLL BIT(0)
+#define PWRDN_B BIT(7)
+
+/* 8996 */
+#define DATALANE_OFFSET_FROM_BASE_8996 0x100
+#define DSIPHY_CMN_PLL_CNTRL 0x0048
+#define DATALANE_SIZE_8996 0x80
+
+#define DSIPHY_CMN_GLBL_TEST_CTRL 0x0018
+#define DSIPHY_CMN_CTRL_0 0x001c
+#define DSIPHY_CMN_CTRL_1 0x0020
+#define DSIPHY_CMN_LDO_CNTRL 0x004c
+#define DSIPHY_PLL_CLKBUFLR_EN 0x041c
+#define DSIPHY_PLL_PLL_BANDGAP 0x0508
+
+#define DSIPHY_LANE_STRENGTH_CTRL_1 0x003c
+#define DSIPHY_LANE_VREG_CNTRL 0x0064
+
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0 0x214
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1 0x218
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2 0x21C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3 0x220
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4 0x224
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5 0x228
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6 0x22C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7 0x230
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8 0x234
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9 0x238
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10 0x23C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11 0x240
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12 0x244
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13 0x248
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14 0x24C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15 0x250
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16 0x254
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17 0x258
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18 0x25C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 0x260
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 0x260
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20 0x264
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21 0x268
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22 0x26C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23 0x270
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24 0x274
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25 0x278
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26 0x27C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27 0x280
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28 0x284
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29 0x288
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30 0x28C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31 0x290
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR 0x294
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2 0x298
+
+#define DSIPHY_DLN0_CFG1 0x0104
+#define DSIPHY_DLN0_TIMING_CTRL_4 0x0118
+#define DSIPHY_DLN0_TIMING_CTRL_5 0x011C
+#define DSIPHY_DLN0_TIMING_CTRL_6 0x0120
+#define DSIPHY_DLN0_TIMING_CTRL_7 0x0124
+#define DSIPHY_DLN0_TIMING_CTRL_8 0x0128
+
+#define DSIPHY_DLN1_CFG1 0x0184
+#define DSIPHY_DLN1_TIMING_CTRL_4 0x0198
+#define DSIPHY_DLN1_TIMING_CTRL_5 0x019C
+#define DSIPHY_DLN1_TIMING_CTRL_6 0x01A0
+#define DSIPHY_DLN1_TIMING_CTRL_7 0x01A4
+#define DSIPHY_DLN1_TIMING_CTRL_8 0x01A8
+
+#define DSIPHY_DLN2_CFG1 0x0204
+#define DSIPHY_DLN2_TIMING_CTRL_4 0x0218
+#define DSIPHY_DLN2_TIMING_CTRL_5 0x021C
+#define DSIPHY_DLN2_TIMING_CTRL_6 0x0220
+#define DSIPHY_DLN2_TIMING_CTRL_7 0x0224
+#define DSIPHY_DLN2_TIMING_CTRL_8 0x0228
+
+#define DSIPHY_DLN3_CFG1 0x0284
+#define DSIPHY_DLN3_TIMING_CTRL_4 0x0298
+#define DSIPHY_DLN3_TIMING_CTRL_5 0x029C
+#define DSIPHY_DLN3_TIMING_CTRL_6 0x02A0
+#define DSIPHY_DLN3_TIMING_CTRL_7 0x02A4
+#define DSIPHY_DLN3_TIMING_CTRL_8 0x02A8
+
+#define DSIPHY_CKLN_CFG1 0x0304
+#define DSIPHY_CKLN_TIMING_CTRL_4 0x0318
+#define DSIPHY_CKLN_TIMING_CTRL_5 0x031C
+#define DSIPHY_CKLN_TIMING_CTRL_6 0x0320
+#define DSIPHY_CKLN_TIMING_CTRL_7 0x0324
+#define DSIPHY_CKLN_TIMING_CTRL_8 0x0328
+
+#define DSIPHY_PLL_RESETSM_CNTRL5 0x043c
+
+#define PLL_CALC_DATA(addr0, addr1, data0, data1) \
+ (((data1) << 24) | ((((addr1)/4) & 0xFF) << 16) | \
+ ((data0) << 8) | (((addr0)/4) & 0xFF))
+
+#define MDSS_DYN_REF_REG_W(base, offset, addr0, addr1, data0, data1) \
+ writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
+ (base) + (offset))
+
+void mdss_dsi_dfps_config_8996(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_panel_data *pdata;
+ struct mdss_panel_info *pinfo;
+ struct mdss_dsi_phy_ctrl *pd;
+ int glbl_tst_cntrl =
+ MIPI_INP(ctrl->phy_io.base + DSIPHY_CMN_GLBL_TEST_CTRL);
+
+ pdata = &ctrl->panel_data;
+ if (!pdata) {
+ pr_err("%s: Invalid panel data\n", __func__);
+ return;
+ }
+ pinfo = &pdata->panel_info;
+ pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+
+ if (mdss_dsi_is_ctrl_clk_slave(ctrl)) {
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL0,
+ DSIPHY_DLN0_CFG1, DSIPHY_DLN1_CFG1,
+ 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+ DSIPHY_DLN2_CFG1, DSIPHY_DLN3_CFG1,
+ 0x0, 0x0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+ DSIPHY_CKLN_CFG1, DSIPHY_DLN0_TIMING_CTRL_4,
+ 0x0, pd->timing_8996[0]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+ DSIPHY_DLN1_TIMING_CTRL_4,
+ DSIPHY_DLN2_TIMING_CTRL_4,
+ pd->timing_8996[8],
+ pd->timing_8996[16]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+ DSIPHY_DLN3_TIMING_CTRL_4,
+ DSIPHY_CKLN_TIMING_CTRL_4,
+ pd->timing_8996[24],
+ pd->timing_8996[32]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+ DSIPHY_DLN0_TIMING_CTRL_5,
+ DSIPHY_DLN1_TIMING_CTRL_5,
+ pd->timing_8996[1],
+ pd->timing_8996[9]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+ DSIPHY_DLN2_TIMING_CTRL_5,
+ DSIPHY_DLN3_TIMING_CTRL_5,
+ pd->timing_8996[17],
+ pd->timing_8996[25]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+ DSIPHY_CKLN_TIMING_CTRL_5,
+ DSIPHY_DLN0_TIMING_CTRL_6,
+ pd->timing_8996[33],
+ pd->timing_8996[2]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+ DSIPHY_DLN1_TIMING_CTRL_6,
+ DSIPHY_DLN2_TIMING_CTRL_6,
+ pd->timing_8996[10],
+ pd->timing_8996[18]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL9,
+ DSIPHY_DLN3_TIMING_CTRL_6,
+ DSIPHY_CKLN_TIMING_CTRL_6,
+ pd->timing_8996[26],
+ pd->timing_8996[34]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+ DSIPHY_DLN0_TIMING_CTRL_7,
+ DSIPHY_DLN1_TIMING_CTRL_7,
+ pd->timing_8996[3],
+ pd->timing_8996[11]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL11,
+ DSIPHY_DLN2_TIMING_CTRL_7,
+ DSIPHY_DLN3_TIMING_CTRL_7,
+ pd->timing_8996[19],
+ pd->timing_8996[27]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL12,
+ DSIPHY_CKLN_TIMING_CTRL_7,
+ DSIPHY_DLN0_TIMING_CTRL_8,
+ pd->timing_8996[35],
+ pd->timing_8996[4]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL13,
+ DSIPHY_DLN1_TIMING_CTRL_8,
+ DSIPHY_DLN2_TIMING_CTRL_8,
+ pd->timing_8996[12],
+ pd->timing_8996[20]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL14,
+ DSIPHY_DLN3_TIMING_CTRL_8,
+ DSIPHY_CKLN_TIMING_CTRL_8,
+ pd->timing_8996[28],
+ pd->timing_8996[36]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL15,
+ 0x0110, 0x0110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL16,
+ 0x0110, 0x0110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL17,
+ 0x0110, 0x0110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL18,
+ 0x0110, 0x0110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+ 0x0110, 0x0110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+ 0x110, 0x110, 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+ 0x110, 0x110, 0, 0);
+ MIPI_OUTP(ctrl->ctrl_base +
+ DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, 0x0);
+ MIPI_OUTP(ctrl->ctrl_base +
+ DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0x0);
+ } else {
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL0,
+ DSIPHY_CMN_GLBL_TEST_CTRL,
+ DSIPHY_PLL_PLL_BANDGAP,
+ glbl_tst_cntrl | BIT(1), 0x1);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+ DSIPHY_PLL_RESETSM_CNTRL5,
+ DSIPHY_PLL_PLL_BANDGAP,
+ 0x0D, 0x03);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+ DSIPHY_PLL_RESETSM_CNTRL5,
+ DSIPHY_CMN_PLL_CNTRL,
+ 0x1D, 0x00);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+ DSIPHY_CMN_CTRL_1, DSIPHY_DLN0_CFG1,
+ 0x20, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+ DSIPHY_DLN1_CFG1, DSIPHY_DLN2_CFG1,
+ 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+ DSIPHY_DLN3_CFG1, DSIPHY_CKLN_CFG1,
+ 0, 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+ DSIPHY_DLN0_TIMING_CTRL_4,
+ DSIPHY_DLN1_TIMING_CTRL_4,
+ pd->timing_8996[0],
+ pd->timing_8996[8]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+ DSIPHY_DLN2_TIMING_CTRL_4,
+ DSIPHY_DLN3_TIMING_CTRL_4,
+ pd->timing_8996[16],
+ pd->timing_8996[24]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+ DSIPHY_CKLN_TIMING_CTRL_4,
+ DSIPHY_DLN0_TIMING_CTRL_5,
+ pd->timing_8996[32],
+ pd->timing_8996[1]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL9,
+ DSIPHY_DLN1_TIMING_CTRL_5,
+ DSIPHY_DLN2_TIMING_CTRL_5,
+ pd->timing_8996[9],
+ pd->timing_8996[17]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+ DSIPHY_DLN3_TIMING_CTRL_5,
+ DSIPHY_CKLN_TIMING_CTRL_5,
+ pd->timing_8996[25],
+ pd->timing_8996[33]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL11,
+ DSIPHY_DLN0_TIMING_CTRL_6,
+ DSIPHY_DLN1_TIMING_CTRL_6,
+ pd->timing_8996[2],
+ pd->timing_8996[10]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL12,
+ DSIPHY_DLN2_TIMING_CTRL_6,
+ DSIPHY_DLN3_TIMING_CTRL_6,
+ pd->timing_8996[18],
+ pd->timing_8996[26]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL13,
+ DSIPHY_CKLN_TIMING_CTRL_6,
+ DSIPHY_DLN0_TIMING_CTRL_7,
+ pd->timing_8996[34],
+ pd->timing_8996[3]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL14,
+ DSIPHY_DLN1_TIMING_CTRL_7,
+ DSIPHY_DLN2_TIMING_CTRL_7,
+ pd->timing_8996[11],
+ pd->timing_8996[19]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL15,
+ DSIPHY_DLN3_TIMING_CTRL_7,
+ DSIPHY_CKLN_TIMING_CTRL_7,
+ pd->timing_8996[27],
+ pd->timing_8996[35]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL16,
+ DSIPHY_DLN0_TIMING_CTRL_8,
+ DSIPHY_DLN1_TIMING_CTRL_8,
+ pd->timing_8996[4],
+ pd->timing_8996[12]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL17,
+ DSIPHY_DLN2_TIMING_CTRL_8,
+ DSIPHY_DLN3_TIMING_CTRL_8,
+ pd->timing_8996[20],
+ pd->timing_8996[28]);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL18,
+ DSIPHY_CKLN_TIMING_CTRL_8,
+ DSIPHY_CMN_CTRL_1,
+ pd->timing_8996[36], 0);
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+ DSIPHY_CMN_GLBL_TEST_CTRL,
+ DSIPHY_CMN_GLBL_TEST_CTRL,
+ ((glbl_tst_cntrl) & (~BIT(2))),
+ ((glbl_tst_cntrl) & (~BIT(2))));
+ MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+ DSIPHY_CMN_GLBL_TEST_CTRL,
+ DSIPHY_CMN_GLBL_TEST_CTRL,
+ ((glbl_tst_cntrl) & (~BIT(2))),
+ ((glbl_tst_cntrl) & (~BIT(2))));
+ }
+
+ wmb(); /* make sure phy timings are updated*/
+}
+
+static void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ /* start phy sw reset */
+ MIPI_OUTP(ctrl->ctrl_base + 0x12c, 0x0001);
+ udelay(1000);
+ wmb(); /* make sure reset */
+ /* end phy sw reset */
+ MIPI_OUTP(ctrl->ctrl_base + 0x12c, 0x0000);
+ udelay(100);
+ wmb(); /* maek sure reset cleared */
+}
+
+int mdss_dsi_phy_pll_reset_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int rc;
+ u32 val;
+ u32 const sleep_us = 10, timeout_us = 100;
+
+ pr_debug("%s: polling for RESETSM_READY_STATUS.CORE_READY\n",
+ __func__);
+ rc = readl_poll_timeout(ctrl->phy_io.base + 0x4cc, val,
+ (val & 0x1), sleep_us, timeout_us);
+
+ return rc;
+}
+
+static void mdss_dsi_phy_sw_reset_sub(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+ struct dsi_shared_data *sdata;
+ struct mdss_dsi_ctrl_pdata *octrl;
+ u32 reg_val = 0;
+
+ if (ctrl == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ sdata = ctrl->shared_data;
+ octrl = mdss_dsi_get_other_ctrl(ctrl);
+
+ if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20) {
+ if (mdss_dsi_is_ctrl_clk_master(ctrl))
+ sctrl = mdss_dsi_get_ctrl_clk_slave();
+ else
+ return;
+ }
+
+ /*
+ * For dual dsi case if we do DSI PHY sw reset,
+ * this will reset DSI PHY regulators also.
+ * Since DSI PHY regulator is shared among both
+ * the DSI controllers, we should not do DSI PHY
+ * sw reset when the other DSI controller is still
+ * active.
+ */
+ mutex_lock(&sdata->phy_reg_lock);
+ if ((mdss_dsi_is_hw_config_dual(sdata) &&
+ (octrl && octrl->is_phyreg_enabled))) {
+ /* start phy lane and HW reset */
+ reg_val = MIPI_INP(ctrl->ctrl_base + 0x12c);
+ reg_val |= (BIT(16) | BIT(8));
+ MIPI_OUTP(ctrl->ctrl_base + 0x12c, reg_val);
+ /* wait for 1ms as per HW design */
+ usleep_range(1000, 2000);
+ /* ensure phy lane and HW reset starts */
+ wmb();
+ /* end phy lane and HW reset */
+ reg_val = MIPI_INP(ctrl->ctrl_base + 0x12c);
+ reg_val &= ~(BIT(16) | BIT(8));
+ MIPI_OUTP(ctrl->ctrl_base + 0x12c, reg_val);
+ /* wait for 100us as per HW design */
+ usleep_range(100, 200);
+ /* ensure phy lane and HW reset ends */
+ wmb();
+ } else {
+ /* start phy sw reset */
+ mdss_dsi_ctrl_phy_reset(ctrl);
+ if (sctrl)
+ mdss_dsi_ctrl_phy_reset(sctrl);
+
+ }
+ mutex_unlock(&sdata->phy_reg_lock);
+}
+
+void mdss_dsi_phy_sw_reset(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+ struct dsi_shared_data *sdata;
+
+ if (ctrl == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ sdata = ctrl->shared_data;
+
+ /*
+ * When operating in split display mode, make sure that the PHY reset
+ * is only done from the clock master. This will ensure that the PLL is
+ * off when PHY reset is called.
+ */
+ if (mdss_dsi_is_ctrl_clk_slave(ctrl))
+ return;
+
+ mdss_dsi_phy_sw_reset_sub(ctrl);
+
+ if (mdss_dsi_is_ctrl_clk_master(ctrl)) {
+ sctrl = mdss_dsi_get_ctrl_clk_slave();
+ if (sctrl)
+ mdss_dsi_phy_sw_reset_sub(sctrl);
+ else
+ pr_warn("%s: unable to get slave ctrl\n", __func__);
+ }
+
+ /* All other quirks go here */
+ if ((sdata->hw_rev == MDSS_DSI_HW_REV_103) &&
+ !mdss_dsi_is_hw_config_dual(sdata) &&
+ mdss_dsi_is_right_ctrl(ctrl)) {
+
+ /*
+ * phy sw reset will wipe out the pll settings for PLL.
+ * Need to explicitly turn off PLL1 if unused to avoid
+ * current leakage issues.
+ */
+ if ((mdss_dsi_is_hw_config_split(sdata) ||
+ mdss_dsi_is_pll_src_pll0(sdata)) &&
+ ctrl->vco_dummy_clk) {
+ pr_debug("Turn off unused PLL1 registers\n");
+ clk_set_rate(ctrl->vco_dummy_clk, 1);
+ }
+ }
+}
+
+static void mdss_dsi_phy_regulator_disable(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (!ctrl) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20)
+ return;
+
+ MIPI_OUTP(ctrl->phy_regulator_io.base + 0x018, 0x000);
+}
+
+static void mdss_dsi_phy_shutdown(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (!ctrl) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20) {
+ MIPI_OUTP(ctrl->phy_io.base + DSIPHY_PLL_CLKBUFLR_EN, 0);
+ MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_GLBL_TEST_CTRL, 0);
+ MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0, 0);
+ } else {
+ MIPI_OUTP(ctrl->phy_io.base + MDSS_DSI_DSIPHY_CTRL_0, 0x000);
+ }
+}
+
+/**
+ * mdss_dsi_lp_cd_rx() -- enable LP and CD at receiving
+ * @ctrl: pointer to DSI controller structure
+ *
+ * LP: low power
+ * CD: contention detection
+ */
+void mdss_dsi_lp_cd_rx(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_dsi_phy_ctrl *pd;
+
+ if (!ctrl) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20)
+ return;
+
+ pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+
+ MIPI_OUTP((ctrl->phy_io.base) + 0x0188, pd->strength[1]);
+ /* Strength ctrl 1, LP Rx + CD Rxcontention detection */
+ wmb();
+}
+
+static void mdss_dsi_28nm_phy_regulator_enable(
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct mdss_dsi_phy_ctrl *pd;
+
+ pd = &(((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db);
+
+ if (pd->reg_ldo_mode) {
+ /* Regulator ctrl 0 */
+ MIPI_OUTP(ctrl_pdata->phy_regulator_io.base, 0x0);
+ /* Regulator ctrl - CAL_PWR_CFG */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0x18, pd->regulator[6]);
+ /* Add H/w recommended delay */
+ udelay(1000);
+ /* Regulator ctrl - TEST */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0x14, pd->regulator[5]);
+ /* Regulator ctrl 3 */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0xc, pd->regulator[3]);
+ /* Regulator ctrl 2 */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0x8, pd->regulator[2]);
+ /* Regulator ctrl 1 */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0x4, pd->regulator[1]);
+ /* Regulator ctrl 4 */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0x10, pd->regulator[4]);
+ /* LDO ctrl */
+ if ((ctrl_pdata->shared_data->hw_rev ==
+ MDSS_DSI_HW_REV_103_1)
+ || (ctrl_pdata->shared_data->hw_rev ==
+ MDSS_DSI_HW_REV_104_2))
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x1dc, 0x05);
+ else
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x1dc, 0x0d);
+ } else {
+ /* Regulator ctrl 0 */
+ MIPI_OUTP(ctrl_pdata->phy_regulator_io.base,
+ 0x0);
+ /* Regulator ctrl - CAL_PWR_CFG */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0x18, pd->regulator[6]);
+ /* Add H/w recommended delay */
+ udelay(1000);
+ /* Regulator ctrl 1 */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0x4, pd->regulator[1]);
+ /* Regulator ctrl 2 */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0x8, pd->regulator[2]);
+ /* Regulator ctrl 3 */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0xc, pd->regulator[3]);
+ /* Regulator ctrl 4 */
+ MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+ + 0x10, pd->regulator[4]);
+ /* LDO ctrl */
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x1dc, 0x00);
+ /* Regulator ctrl 0 */
+ MIPI_OUTP(ctrl_pdata->phy_regulator_io.base,
+ pd->regulator[0]);
+ }
+}
+
+static void mdss_dsi_28nm_phy_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct mdss_dsi_phy_ctrl *pd;
+ int i, off, ln, offset;
+
+ if (!ctrl_pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ pd = &(((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db);
+
+ /* Strength ctrl 0 for 28nm PHY*/
+ if ((ctrl_pdata->shared_data->hw_rev <= MDSS_DSI_HW_REV_104_2) &&
+ (ctrl_pdata->shared_data->hw_rev != MDSS_DSI_HW_REV_103)) {
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0170, 0x5b);
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0184, pd->strength[0]);
+ /* make sure PHY strength ctrl is set */
+ wmb();
+ }
+
+ off = 0x0140; /* phy timing ctrl 0 - 11 */
+ for (i = 0; i < 12; i++) {
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + off, pd->timing[i]);
+ /* make sure phy timing register is programed */
+ wmb();
+ off += 4;
+ }
+
+ /* 4 lanes + clk lane configuration */
+ /* lane config n * (0 - 4) & DataPath setup */
+ for (ln = 0; ln < 5; ln++) {
+ off = (ln * 0x40);
+ for (i = 0; i < 9; i++) {
+ offset = i + (ln * 9);
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + off,
+ pd->lanecfg[offset]);
+ /* make sure lane config register is programed */
+ wmb();
+ off += 4;
+ }
+ }
+
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0180, 0x0a);
+ /* MMSS_DSI_0_PHY_DSIPHY_CTRL_4 */
+ wmb();
+
+ /* DSI_0_PHY_DSIPHY_GLBL_TEST_CTRL */
+ if (!mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) {
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x01d4, 0x01);
+ } else {
+ if (((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1) ||
+ (ctrl_pdata->shared_data->hw_rev == MDSS_DSI_HW_REV_103_1))
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x01d4, 0x01);
+ else
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x01d4, 0x00);
+ }
+ /* ensure DSIPHY_GLBL_TEST_CTRL is set */
+ wmb();
+
+ /* MMSS_DSI_0_PHY_DSIPHY_CTRL_0 */
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0170, 0x5f);
+ /* make sure PHY lanes are powered on */
+ wmb();
+
+ off = 0x01b4; /* phy BIST ctrl 0 - 5 */
+ for (i = 0; i < 6; i++) {
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + off, pd->bistctrl[i]);
+ wmb(); /* make sure PHY bit control is configured */
+ off += 4;
+ }
+
+}
+
+static void mdss_dsi_20nm_phy_regulator_enable(struct mdss_dsi_ctrl_pdata
+ *ctrl_pdata)
+{
+ struct mdss_dsi_phy_ctrl *pd;
+ void __iomem *phy_io_base;
+
+ pd = &(((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db);
+ phy_io_base = ctrl_pdata->phy_regulator_io.base;
+
+ if (pd->regulator_len != 7) {
+ pr_err("%s: wrong regulator settings\n", __func__);
+ return;
+ }
+
+ if (pd->reg_ldo_mode) {
+ MIPI_OUTP(ctrl_pdata->phy_io.base + MDSS_DSI_DSIPHY_LDO_CNTRL,
+ 0x1d);
+ } else {
+ MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CTRL_1,
+ pd->regulator[1]);
+ MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CTRL_2,
+ pd->regulator[2]);
+ MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CTRL_3,
+ pd->regulator[3]);
+ MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CTRL_4,
+ pd->regulator[4]);
+ MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CAL_PWR_CFG,
+ pd->regulator[6]);
+ MIPI_OUTP(ctrl_pdata->phy_io.base + MDSS_DSI_DSIPHY_LDO_CNTRL,
+ 0x00);
+ MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CTRL_0,
+ pd->regulator[0]);
+ }
+}
+
+static void mdss_dsi_20nm_phy_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ struct mdss_dsi_phy_ctrl *pd;
+ int i, off, ln, offset;
+
+ pd = &(((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db);
+
+ if (pd->strength_len != 2) {
+ pr_err("%s: wrong strength ctrl\n", __func__);
+ return;
+ }
+
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + MDSS_DSI_DSIPHY_STRENGTH_CTRL_0,
+ pd->strength[0]);
+
+
+ if (!mdss_dsi_is_hw_config_dual(ctrl_pdata->shared_data)) {
+ if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) ||
+ mdss_dsi_is_left_ctrl(ctrl_pdata) ||
+ (mdss_dsi_is_right_ctrl(ctrl_pdata) &&
+ mdss_dsi_is_pll_src_pll0(ctrl_pdata->shared_data)))
+ MIPI_OUTP((ctrl_pdata->phy_io.base) +
+ MDSS_DSI_DSIPHY_GLBL_TEST_CTRL, 0x00);
+ else
+ MIPI_OUTP((ctrl_pdata->phy_io.base) +
+ MDSS_DSI_DSIPHY_GLBL_TEST_CTRL, 0x01);
+ } else {
+ if (mdss_dsi_is_left_ctrl(ctrl_pdata))
+ MIPI_OUTP((ctrl_pdata->phy_io.base) +
+ MDSS_DSI_DSIPHY_GLBL_TEST_CTRL, 0x00);
+ else
+ MIPI_OUTP((ctrl_pdata->phy_io.base) +
+ MDSS_DSI_DSIPHY_GLBL_TEST_CTRL, 0x01);
+ }
+
+ if (pd->lanecfg_len != 45) {
+ pr_err("%s: wrong lane cfg\n", __func__);
+ return;
+ }
+
+ /* 4 lanes + clk lane configuration */
+ /* lane config n * (0 - 4) & DataPath setup */
+ for (ln = 0; ln < 5; ln++) {
+ off = (ln * 0x40);
+ for (i = 0; i < 9; i++) {
+ offset = i + (ln * 9);
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + off,
+ pd->lanecfg[offset]);
+ /* make sure lane config register is programed */
+ wmb();
+ off += 4;
+ }
+ }
+
+ off = 0; /* phy timing ctrl 0 - 11 */
+ for (i = 0; i < 12; i++) {
+ MIPI_OUTP((ctrl_pdata->phy_io.base) +
+ MDSS_DSI_DSIPHY_TIMING_CTRL_0 + off, pd->timing[i]);
+ wmb(); /* make sure phy timing register is programed */
+ off += 4;
+ }
+
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + MDSS_DSI_DSIPHY_CTRL_1, 0);
+ /* make sure everything is written before enable */
+ wmb();
+ MIPI_OUTP((ctrl_pdata->phy_io.base) + MDSS_DSI_DSIPHY_CTRL_0, 0x7f);
+}
+
+static void mdss_dsi_8996_pll_source_standalone(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 data;
+
+ /*
+ * pll right output enabled
+ * bit clk select from left
+ */
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_PLL_CLKBUFLR_EN, 0x01);
+ data = MIPI_INP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL);
+ data &= ~BIT(2);
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL, data);
+}
+
+static void mdss_dsi_8996_pll_source_from_right(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 data;
+
+ /*
+ * pll left + right output disabled
+ * bit clk select from right
+ */
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_PLL_CLKBUFLR_EN, 0x00);
+ data = MIPI_INP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL);
+ data |= BIT(2);
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL, data);
+
+ /* enable bias current for pll1 during split display case */
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_PLL_PLL_BANDGAP, 0x3);
+}
+
+static void mdss_dsi_8996_pll_source_from_left(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 data;
+
+ /*
+ * pll left + right output enabled
+ * bit clk select from left
+ */
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_PLL_CLKBUFLR_EN, 0x03);
+ data = MIPI_INP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL);
+ data &= ~BIT(2);
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL, data);
+}
+
+static void mdss_dsi_8996_phy_regulator_enable(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_dsi_phy_ctrl *pd;
+ int j, off, ln, cnt, ln_off;
+ char *ip;
+ void __iomem *base;
+
+ pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+ /* 4 lanes + clk lane configuration */
+ for (ln = 0; ln < 5; ln++) {
+ /*
+ * data lane offset from base: 0x100
+ * data lane size: 0x80
+ */
+ base = ctrl->phy_io.base +
+ DATALANE_OFFSET_FROM_BASE_8996;
+ base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+ /* vreg ctrl, 1 * 5 */
+ cnt = 1;
+ ln_off = cnt * ln;
+ ip = &pd->regulator[ln_off];
+ off = 0x64;
+ for (j = 0; j < cnt; j++, off += 4)
+ MIPI_OUTP(base + off, *ip++);
+ }
+
+ wmb(); /* make sure registers committed */
+
+}
+
+static void mdss_dsi_8996_phy_power_off(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int ln;
+ void __iomem *base;
+ u32 data;
+
+ /* Turn off PLL power */
+ data = MIPI_INP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0);
+ MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0, data & ~BIT(7));
+
+ /* 4 lanes + clk lane configuration */
+ for (ln = 0; ln < 5; ln++) {
+ base = ctrl->phy_io.base +
+ DATALANE_OFFSET_FROM_BASE_8996;
+ base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+ /* turn off phy ldo */
+ MIPI_OUTP(base + DSIPHY_LANE_VREG_CNTRL, 0x1c);
+ }
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_LDO_CNTRL, 0x1c);
+
+ /* 4 lanes + clk lane configuration */
+ for (ln = 0; ln < 5; ln++) {
+ base = ctrl->phy_io.base +
+ DATALANE_OFFSET_FROM_BASE_8996;
+ base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+ MIPI_OUTP(base + DSIPHY_LANE_STRENGTH_CTRL_1, 0x0);
+ }
+
+ wmb(); /* make sure registers committed */
+}
+
+static void mdss_dsi_phy_power_off(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_panel_info *pinfo;
+
+ if (ctrl->phy_power_off)
+ return;
+
+ pinfo = &ctrl->panel_data.panel_info;
+
+ if ((ctrl->shared_data->phy_rev != DSI_PHY_REV_20) ||
+ !pinfo->allow_phy_power_off) {
+ pr_debug("%s: ctrl%d phy rev:%d panel support for phy off:%d\n",
+ __func__, ctrl->ndx, ctrl->shared_data->phy_rev,
+ pinfo->allow_phy_power_off);
+ return;
+ }
+
+ /* supported for phy rev 2.0 and if panel allows it*/
+ mdss_dsi_8996_phy_power_off(ctrl);
+
+ ctrl->phy_power_off = true;
+}
+
+static void mdss_dsi_8996_phy_power_on(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int j, off, ln, cnt, ln_off;
+ void __iomem *base;
+ struct mdss_dsi_phy_ctrl *pd;
+ char *ip;
+ u32 data;
+
+ pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+
+ /* 4 lanes + clk lane configuration */
+ for (ln = 0; ln < 5; ln++) {
+ base = ctrl->phy_io.base +
+ DATALANE_OFFSET_FROM_BASE_8996;
+ base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+ /* strength, 2 * 5 */
+ cnt = 2;
+ ln_off = cnt * ln;
+ ip = &pd->strength[ln_off];
+ off = 0x38;
+ for (j = 0; j < cnt; j++, off += 4)
+ MIPI_OUTP(base + off, *ip++);
+ }
+
+ mdss_dsi_8996_phy_regulator_enable(ctrl);
+
+ /* Turn on PLL power */
+ data = MIPI_INP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0);
+ MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0, data | BIT(7));
+}
+
+static void mdss_dsi_phy_power_on(
+ struct mdss_dsi_ctrl_pdata *ctrl, bool mmss_clamp)
+{
+ if (mmss_clamp && !ctrl->phy_power_off)
+ mdss_dsi_phy_init(ctrl);
+ else if ((ctrl->shared_data->phy_rev == DSI_PHY_REV_20) &&
+ ctrl->phy_power_off)
+ mdss_dsi_8996_phy_power_on(ctrl);
+
+ ctrl->phy_power_off = false;
+}
+
+static void mdss_dsi_8996_phy_config(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_dsi_phy_ctrl *pd;
+ int j, off, ln, cnt, ln_off;
+ char *ip;
+ void __iomem *base;
+
+ pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_LDO_CNTRL, 0x1c);
+
+ /* clk_en */
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
+
+ if (pd->lanecfg_len != 20) {
+ pr_err("%s: wrong lane cfg\n", __func__);
+ return;
+ }
+
+ if (pd->strength_len != 10) {
+ pr_err("%s: wrong strength ctrl\n", __func__);
+ return;
+ }
+
+ if (pd->regulator_len != 5) {
+ pr_err("%s: wrong regulator setting\n", __func__);
+ return;
+ }
+
+ /* 4 lanes + clk lane configuration */
+ for (ln = 0; ln < 5; ln++) {
+ /*
+ * data lane offset from base: 0x100
+ * data lane size: 0x80
+ */
+ base = ctrl->phy_io.base +
+ DATALANE_OFFSET_FROM_BASE_8996;
+ base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+ /* lane cfg, 4 * 5 */
+ cnt = 4;
+ ln_off = cnt * ln;
+ ip = &pd->lanecfg[ln_off];
+ off = 0x0;
+ for (j = 0; j < cnt; j++) {
+ MIPI_OUTP(base + off, *ip++);
+ off += 4;
+ }
+
+ /* test str */
+ MIPI_OUTP(base + 0x14, 0x0088); /* fixed */
+
+ /* phy timing, 8 * 5 */
+ cnt = 8;
+ ln_off = cnt * ln;
+ ip = &pd->timing_8996[ln_off];
+ off = 0x18;
+ for (j = 0; j < cnt; j++, off += 4)
+ MIPI_OUTP(base + off, *ip++);
+
+ /* strength, 2 * 5 */
+ cnt = 2;
+ ln_off = cnt * ln;
+ ip = &pd->strength[ln_off];
+ off = 0x38;
+ for (j = 0; j < cnt; j++, off += 4)
+ MIPI_OUTP(base + off, *ip++);
+ }
+
+ wmb(); /* make sure registers committed */
+
+ /* reset digital block */
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_CTRL_1, 0x80);
+ udelay(100);
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_CTRL_1, 0x00);
+
+ if (mdss_dsi_is_hw_config_split(ctrl->shared_data)) {
+ if (mdss_dsi_is_left_ctrl(ctrl))
+ mdss_dsi_8996_pll_source_from_left(ctrl);
+ else
+ mdss_dsi_8996_pll_source_from_right(ctrl);
+ } else {
+ if (mdss_dsi_is_right_ctrl(ctrl) &&
+ mdss_dsi_is_pll_src_pll0(ctrl->shared_data))
+ mdss_dsi_8996_pll_source_from_left(ctrl);
+ else
+ mdss_dsi_8996_pll_source_standalone(ctrl);
+ }
+
+ MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0, 0x7f);
+ wmb(); /* make sure registers committed */
+}
+
+static void mdss_dsi_phy_regulator_ctrl(struct mdss_dsi_ctrl_pdata *ctrl,
+ bool enable)
+{
+ struct mdss_dsi_ctrl_pdata *other_ctrl;
+ struct dsi_shared_data *sdata;
+
+ if (!ctrl) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ sdata = ctrl->shared_data;
+ other_ctrl = mdss_dsi_get_other_ctrl(ctrl);
+
+ mutex_lock(&sdata->phy_reg_lock);
+ if (enable) {
+ if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20) {
+ mdss_dsi_8996_phy_regulator_enable(ctrl);
+ } else {
+ switch (ctrl->shared_data->hw_rev) {
+ case MDSS_DSI_HW_REV_103:
+ mdss_dsi_20nm_phy_regulator_enable(ctrl);
+ break;
+ default:
+ /*
+ * For dual dsi case, do not reconfigure dsi phy
+ * regulator if the other dsi controller is still
+ * active.
+ */
+ if (!mdss_dsi_is_hw_config_dual(sdata) ||
+ (other_ctrl && (!other_ctrl->is_phyreg_enabled
+ || other_ctrl->mmss_clamp)))
+ mdss_dsi_28nm_phy_regulator_enable(ctrl);
+ break;
+ }
+ }
+ ctrl->is_phyreg_enabled = 1;
+ } else {
+ /*
+ * In split-dsi/dual-dsi configuration, the dsi phy regulator
+ * should be turned off only when both the DSI devices are
+ * going to be turned off since it is shared.
+ */
+ if (mdss_dsi_is_hw_config_split(ctrl->shared_data) ||
+ mdss_dsi_is_hw_config_dual(ctrl->shared_data)) {
+ if (other_ctrl && !other_ctrl->is_phyreg_enabled)
+ mdss_dsi_phy_regulator_disable(ctrl);
+ } else {
+ mdss_dsi_phy_regulator_disable(ctrl);
+ }
+ ctrl->is_phyreg_enabled = 0;
+ }
+ mutex_unlock(&sdata->phy_reg_lock);
+}
+
+static void mdss_dsi_phy_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, bool enable)
+{
+ struct mdss_dsi_ctrl_pdata *other_ctrl;
+
+ if (!ctrl) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ if (enable) {
+
+ if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20) {
+ mdss_dsi_8996_phy_config(ctrl);
+ } else {
+ switch (ctrl->shared_data->hw_rev) {
+ case MDSS_DSI_HW_REV_103:
+ mdss_dsi_20nm_phy_config(ctrl);
+ break;
+ default:
+ mdss_dsi_28nm_phy_config(ctrl);
+ break;
+ }
+ }
+ } else {
+ /*
+ * In split-dsi configuration, the phy should be disabled for
+ * the first controller only when the second controller is
+ * disabled. This is true regardless of whether broadcast
+ * mode is enabled.
+ */
+ if (mdss_dsi_is_hw_config_split(ctrl->shared_data)) {
+ other_ctrl = mdss_dsi_get_other_ctrl(ctrl);
+ if (mdss_dsi_is_right_ctrl(ctrl) && other_ctrl) {
+ mdss_dsi_phy_shutdown(other_ctrl);
+ mdss_dsi_phy_shutdown(ctrl);
+ }
+ } else {
+ mdss_dsi_phy_shutdown(ctrl);
+ }
+ }
+}
+
+void mdss_dsi_phy_disable(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (ctrl == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return;
+ }
+
+ mdss_dsi_phy_ctrl(ctrl, false);
+ mdss_dsi_phy_regulator_ctrl(ctrl, false);
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+}
+
+static void mdss_dsi_phy_init_sub(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ mdss_dsi_phy_regulator_ctrl(ctrl, true);
+ mdss_dsi_phy_ctrl(ctrl, true);
+}
+
+void mdss_dsi_phy_init(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+
+ /*
+ * When operating in split display mode, make sure that both the PHY
+ * blocks are initialized together prior to the PLL being enabled. This
+ * is achieved by calling the phy_init function for the clk_slave from
+ * the clock_master.
+ */
+ if (mdss_dsi_is_ctrl_clk_slave(ctrl))
+ return;
+
+ mdss_dsi_phy_init_sub(ctrl);
+
+ if (mdss_dsi_is_ctrl_clk_master(ctrl)) {
+ sctrl = mdss_dsi_get_ctrl_clk_slave();
+ if (sctrl)
+ mdss_dsi_phy_init_sub(sctrl);
+ else
+ pr_warn("%s: unable to get slave ctrl\n", __func__);
+ }
+}
+
+void mdss_dsi_core_clk_deinit(struct device *dev, struct dsi_shared_data *sdata)
+{
+ if (sdata->mmss_misc_ahb_clk)
+ devm_clk_put(dev, sdata->mmss_misc_ahb_clk);
+ if (sdata->ext_pixel1_clk)
+ devm_clk_put(dev, sdata->ext_pixel1_clk);
+ if (sdata->ext_byte1_clk)
+ devm_clk_put(dev, sdata->ext_byte1_clk);
+ if (sdata->ext_pixel0_clk)
+ devm_clk_put(dev, sdata->ext_pixel0_clk);
+ if (sdata->ext_byte0_clk)
+ devm_clk_put(dev, sdata->ext_byte0_clk);
+ if (sdata->axi_clk)
+ devm_clk_put(dev, sdata->axi_clk);
+ if (sdata->ahb_clk)
+ devm_clk_put(dev, sdata->ahb_clk);
+ if (sdata->mdp_core_clk)
+ devm_clk_put(dev, sdata->mdp_core_clk);
+}
+
+int mdss_dsi_clk_refresh(struct mdss_panel_data *pdata, bool update_phy)
+{
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo = NULL;
+ int rc = 0;
+
+ if (!pdata) {
+ pr_err("%s: invalid panel data\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+ pinfo = &pdata->panel_info;
+
+ if (!ctrl_pdata || !pinfo) {
+ pr_err("%s: invalid ctrl data\n", __func__);
+ return -EINVAL;
+ }
+
+ if (update_phy) {
+ pinfo->mipi.frame_rate = mdss_panel_calc_frame_rate(pinfo);
+ pr_debug("%s: new frame rate %d\n",
+ __func__, pinfo->mipi.frame_rate);
+ }
+
+ rc = mdss_dsi_clk_div_config(&pdata->panel_info,
+ pdata->panel_info.mipi.frame_rate);
+ if (rc) {
+ pr_err("%s: unable to initialize the clk dividers\n",
+ __func__);
+ return rc;
+ }
+ ctrl_pdata->refresh_clk_rate = false;
+ ctrl_pdata->pclk_rate = pdata->panel_info.mipi.dsi_pclk_rate;
+ ctrl_pdata->byte_clk_rate = pdata->panel_info.clk_rate / 8;
+ pr_debug("%s ctrl_pdata->byte_clk_rate=%d ctrl_pdata->pclk_rate=%d\n",
+ __func__, ctrl_pdata->byte_clk_rate, ctrl_pdata->pclk_rate);
+
+ rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_BYTE_CLK, ctrl_pdata->byte_clk_rate,
+ MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON);
+ if (rc) {
+ pr_err("%s: dsi_byte_clk - clk_set_rate failed\n",
+ __func__);
+ return rc;
+ }
+
+ rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_LINK_PIX_CLK, ctrl_pdata->pclk_rate,
+ MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON);
+ if (rc) {
+ pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n",
+ __func__);
+ return rc;
+ }
+
+ if (update_phy) {
+ /* phy panel timing calaculation */
+ rc = mdss_dsi_phy_calc_timing_param(pinfo,
+ ctrl_pdata->shared_data->phy_rev,
+ pinfo->mipi.frame_rate);
+ if (rc) {
+ pr_err("Error in calculating phy timings\n");
+ return rc;
+ }
+ ctrl_pdata->update_phy_timing = false;
+ }
+
+ return rc;
+}
+
+int mdss_dsi_core_clk_init(struct platform_device *pdev,
+ struct dsi_shared_data *sdata)
+{
+ struct device *dev = NULL;
+ int rc = 0;
+
+ if (!pdev) {
+ pr_err("%s: Invalid pdev\n", __func__);
+ goto error;
+ }
+
+ dev = &pdev->dev;
+
+ /* Mandatory Clocks */
+ sdata->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
+ if (IS_ERR(sdata->mdp_core_clk)) {
+ rc = PTR_ERR(sdata->mdp_core_clk);
+ pr_err("%s: Unable to get mdp core clk. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+
+ sdata->ahb_clk = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(sdata->ahb_clk)) {
+ rc = PTR_ERR(sdata->ahb_clk);
+ pr_err("%s: Unable to get mdss ahb clk. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+
+ sdata->axi_clk = devm_clk_get(dev, "bus_clk");
+ if (IS_ERR(sdata->axi_clk)) {
+ rc = PTR_ERR(sdata->axi_clk);
+ pr_err("%s: Unable to get axi bus clk. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+
+ /* Optional Clocks */
+ sdata->ext_byte0_clk = devm_clk_get(dev, "ext_byte0_clk");
+ if (IS_ERR(sdata->ext_byte0_clk)) {
+ pr_debug("%s: unable to get byte0 clk rcg. rc=%d\n",
+ __func__, rc);
+ sdata->ext_byte0_clk = NULL;
+ }
+
+ sdata->ext_pixel0_clk = devm_clk_get(dev, "ext_pixel0_clk");
+ if (IS_ERR(sdata->ext_pixel0_clk)) {
+ pr_debug("%s: unable to get pixel0 clk rcg. rc=%d\n",
+ __func__, rc);
+ sdata->ext_pixel0_clk = NULL;
+ }
+
+ sdata->ext_byte1_clk = devm_clk_get(dev, "ext_byte1_clk");
+ if (IS_ERR(sdata->ext_byte1_clk)) {
+ pr_debug("%s: unable to get byte1 clk rcg. rc=%d\n",
+ __func__, rc);
+ sdata->ext_byte1_clk = NULL;
+ }
+
+ sdata->ext_pixel1_clk = devm_clk_get(dev, "ext_pixel1_clk");
+ if (IS_ERR(sdata->ext_pixel1_clk)) {
+ pr_debug("%s: unable to get pixel1 clk rcg. rc=%d\n",
+ __func__, rc);
+ sdata->ext_pixel1_clk = NULL;
+ }
+
+ sdata->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
+ if (IS_ERR(sdata->mmss_misc_ahb_clk)) {
+ sdata->mmss_misc_ahb_clk = NULL;
+ pr_debug("%s: Unable to get mmss misc ahb clk\n",
+ __func__);
+ }
+
+error:
+ if (rc)
+ mdss_dsi_core_clk_deinit(dev, sdata);
+ return rc;
+}
+
+void mdss_dsi_link_clk_deinit(struct device *dev,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (ctrl->vco_dummy_clk)
+ devm_clk_put(dev, ctrl->vco_dummy_clk);
+ if (ctrl->pixel_clk_rcg)
+ devm_clk_put(dev, ctrl->pixel_clk_rcg);
+ if (ctrl->byte_clk_rcg)
+ devm_clk_put(dev, ctrl->byte_clk_rcg);
+ if (ctrl->byte_clk)
+ devm_clk_put(dev, ctrl->byte_clk);
+ if (ctrl->esc_clk)
+ devm_clk_put(dev, ctrl->esc_clk);
+ if (ctrl->pixel_clk)
+ devm_clk_put(dev, ctrl->pixel_clk);
+}
+
+int mdss_dsi_link_clk_init(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct device *dev = NULL;
+ int rc = 0;
+
+ if (!pdev) {
+ pr_err("%s: Invalid pdev\n", __func__);
+ goto error;
+ }
+
+ dev = &pdev->dev;
+
+ /* Mandatory Clocks */
+ ctrl->byte_clk = devm_clk_get(dev, "byte_clk");
+ if (IS_ERR(ctrl->byte_clk)) {
+ rc = PTR_ERR(ctrl->byte_clk);
+ pr_err("%s: can't find dsi_byte_clk. rc=%d\n",
+ __func__, rc);
+ ctrl->byte_clk = NULL;
+ goto error;
+ }
+
+ ctrl->pixel_clk = devm_clk_get(dev, "pixel_clk");
+ if (IS_ERR(ctrl->pixel_clk)) {
+ rc = PTR_ERR(ctrl->pixel_clk);
+ pr_err("%s: can't find dsi_pixel_clk. rc=%d\n",
+ __func__, rc);
+ ctrl->pixel_clk = NULL;
+ goto error;
+ }
+
+ ctrl->esc_clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(ctrl->esc_clk)) {
+ rc = PTR_ERR(ctrl->esc_clk);
+ pr_err("%s: can't find dsi_esc_clk. rc=%d\n",
+ __func__, rc);
+ ctrl->esc_clk = NULL;
+ goto error;
+ }
+
+ /* Optional Clocks */
+ ctrl->byte_clk_rcg = devm_clk_get(dev, "byte_clk_rcg");
+ if (IS_ERR(ctrl->byte_clk_rcg)) {
+ pr_debug("%s: can't find byte clk rcg. rc=%d\n", __func__, rc);
+ ctrl->byte_clk_rcg = NULL;
+ }
+
+ ctrl->pixel_clk_rcg = devm_clk_get(dev, "pixel_clk_rcg");
+ if (IS_ERR(ctrl->pixel_clk_rcg)) {
+ pr_debug("%s: can't find pixel clk rcg. rc=%d\n", __func__, rc);
+ ctrl->pixel_clk_rcg = NULL;
+ }
+
+ ctrl->vco_dummy_clk = devm_clk_get(dev, "pll_vco_dummy_clk");
+ if (IS_ERR(ctrl->vco_dummy_clk)) {
+ pr_debug("%s: can't find vco dummy clk. rc=%d\n", __func__, rc);
+ ctrl->vco_dummy_clk = NULL;
+ }
+
+error:
+ if (rc)
+ mdss_dsi_link_clk_deinit(dev, ctrl);
+ return rc;
+}
+
+void mdss_dsi_shadow_clk_deinit(struct device *dev,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (ctrl->mux_byte_clk)
+ devm_clk_put(dev, ctrl->mux_byte_clk);
+ if (ctrl->mux_pixel_clk)
+ devm_clk_put(dev, ctrl->mux_pixel_clk);
+ if (ctrl->pll_byte_clk)
+ devm_clk_put(dev, ctrl->pll_byte_clk);
+ if (ctrl->pll_pixel_clk)
+ devm_clk_put(dev, ctrl->pll_pixel_clk);
+ if (ctrl->shadow_byte_clk)
+ devm_clk_put(dev, ctrl->shadow_byte_clk);
+ if (ctrl->shadow_pixel_clk)
+ devm_clk_put(dev, ctrl->shadow_pixel_clk);
+}
+
+int mdss_dsi_shadow_clk_init(struct platform_device *pdev,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct device *dev = NULL;
+ int rc = 0;
+
+ if (!pdev) {
+ pr_err("%s: Invalid pdev\n", __func__);
+ return -EINVAL;
+ }
+
+ dev = &pdev->dev;
+ ctrl->mux_byte_clk = devm_clk_get(dev, "pll_byte_clk_mux");
+ if (IS_ERR(ctrl->mux_byte_clk)) {
+ rc = PTR_ERR(ctrl->mux_byte_clk);
+ pr_err("%s: can't find mux_byte_clk. rc=%d\n",
+ __func__, rc);
+ ctrl->mux_byte_clk = NULL;
+ goto error;
+ }
+
+ ctrl->mux_pixel_clk = devm_clk_get(dev, "pll_pixel_clk_mux");
+ if (IS_ERR(ctrl->mux_pixel_clk)) {
+ rc = PTR_ERR(ctrl->mux_pixel_clk);
+ pr_err("%s: can't find mdss_mux_pixel_clk. rc=%d\n",
+ __func__, rc);
+ ctrl->mux_pixel_clk = NULL;
+ goto error;
+ }
+
+ ctrl->pll_byte_clk = devm_clk_get(dev, "pll_byte_clk_src");
+ if (IS_ERR(ctrl->pll_byte_clk)) {
+ rc = PTR_ERR(ctrl->pll_byte_clk);
+ pr_err("%s: can't find pll_byte_clk. rc=%d\n",
+ __func__, rc);
+ ctrl->pll_byte_clk = NULL;
+ goto error;
+ }
+
+ ctrl->pll_pixel_clk = devm_clk_get(dev, "pll_pixel_clk_src");
+ if (IS_ERR(ctrl->pll_pixel_clk)) {
+ rc = PTR_ERR(ctrl->pll_pixel_clk);
+ pr_err("%s: can't find pll_pixel_clk. rc=%d\n",
+ __func__, rc);
+ ctrl->pll_pixel_clk = NULL;
+ goto error;
+ }
+
+ ctrl->shadow_byte_clk = devm_clk_get(dev, "pll_shadow_byte_clk_src");
+ if (IS_ERR(ctrl->shadow_byte_clk)) {
+ rc = PTR_ERR(ctrl->shadow_byte_clk);
+ pr_err("%s: can't find shadow_byte_clk. rc=%d\n",
+ __func__, rc);
+ ctrl->shadow_byte_clk = NULL;
+ goto error;
+ }
+
+ ctrl->shadow_pixel_clk = devm_clk_get(dev, "pll_shadow_pixel_clk_src");
+ if (IS_ERR(ctrl->shadow_pixel_clk)) {
+ rc = PTR_ERR(ctrl->shadow_pixel_clk);
+ pr_err("%s: can't find shadow_pixel_clk. rc=%d\n",
+ __func__, rc);
+ ctrl->shadow_pixel_clk = NULL;
+ goto error;
+ }
+
+error:
+ if (rc)
+ mdss_dsi_shadow_clk_deinit(dev, ctrl);
+ return rc;
+}
+
+bool is_diff_frame_rate(struct mdss_panel_info *panel_info,
+ u32 frame_rate)
+{
+ if (panel_info->dynamic_fps && panel_info->current_fps)
+ return (frame_rate != panel_info->current_fps);
+ else
+ return (frame_rate != panel_info->mipi.frame_rate);
+}
+
+int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
+ int frame_rate)
+{
+ struct mdss_panel_data *pdata = container_of(panel_info,
+ struct mdss_panel_data, panel_info);
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = container_of(pdata,
+ struct mdss_dsi_ctrl_pdata, panel_data);
+ u64 h_period, v_period, clk_rate;
+ u32 dsi_pclk_rate;
+ u8 lanes = 0, bpp;
+
+ if (panel_info->mipi.data_lane3)
+ lanes += 1;
+ if (panel_info->mipi.data_lane2)
+ lanes += 1;
+ if (panel_info->mipi.data_lane1)
+ lanes += 1;
+ if (panel_info->mipi.data_lane0)
+ lanes += 1;
+
+ switch (panel_info->mipi.dst_format) {
+ case DSI_CMD_DST_FORMAT_RGB888:
+ case DSI_VIDEO_DST_FORMAT_RGB888:
+ case DSI_VIDEO_DST_FORMAT_RGB666_LOOSE:
+ bpp = 3;
+ break;
+ case DSI_CMD_DST_FORMAT_RGB565:
+ case DSI_VIDEO_DST_FORMAT_RGB565:
+ bpp = 2;
+ break;
+ default:
+ bpp = 3; /* Default format set to RGB888 */
+ break;
+ }
+
+ h_period = mdss_panel_get_htotal(panel_info, true);
+ v_period = mdss_panel_get_vtotal(panel_info);
+
+ if (ctrl_pdata->refresh_clk_rate || is_diff_frame_rate(panel_info,
+ frame_rate) || (!panel_info->clk_rate)) {
+ if (lanes > 0) {
+ panel_info->clk_rate = h_period * v_period * frame_rate
+ * bpp * 8;
+ do_div(panel_info->clk_rate, lanes);
+ } else {
+ pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+ panel_info->clk_rate =
+ h_period * v_period * frame_rate * bpp * 8;
+ }
+ }
+
+ if (panel_info->clk_rate == 0)
+ panel_info->clk_rate = 454000000;
+
+ clk_rate = panel_info->clk_rate;
+ do_div(clk_rate, 8 * bpp);
+ dsi_pclk_rate = (u32) clk_rate * lanes;
+
+ if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 250000000))
+ dsi_pclk_rate = 35000000;
+ panel_info->mipi.dsi_pclk_rate = dsi_pclk_rate;
+
+ return 0;
+}
+
+static bool mdss_dsi_is_ulps_req_valid(struct mdss_dsi_ctrl_pdata *ctrl,
+ int enable)
+{
+ struct mdss_dsi_ctrl_pdata *octrl = NULL;
+ struct mdss_panel_data *pdata = &ctrl->panel_data;
+ struct mdss_panel_info *pinfo = &pdata->panel_info;
+
+ pr_debug("%s: checking ulps req validity for ctrl%d\n",
+ __func__, ctrl->ndx);
+
+ if (!mdss_dsi_ulps_feature_enabled(pdata) &&
+ !pinfo->ulps_suspend_enabled) {
+ pr_debug("%s: ULPS feature is not enabled\n", __func__);
+ return false;
+ }
+
+ /*
+ * No need to enter ULPS when transitioning from splash screen to
+ * boot animation since it is expected that the clocks would be turned
+ * right back on.
+ */
+ if (enable && pinfo->cont_splash_enabled) {
+ pr_debug("%s: skip ULPS config with splash screen enabled\n",
+ __func__);
+ return false;
+ }
+
+ /*
+ * No need to enable ULPS if panel is not yet initialized.
+ * However, this should be allowed in following usecases:
+ * 1. If ULPS during suspend feature is enabled, where we
+ * configure the lanes in ULPS after turning off the panel.
+ * 2. When coming out of idle PC with clamps enabled, where we
+ * transition the controller HW state back to ULPS prior to
+ * disabling ULPS.
+ */
+ if (enable && !ctrl->mmss_clamp &&
+ !(ctrl->ctrl_state & CTRL_STATE_PANEL_INIT) &&
+ !pdata->panel_info.ulps_suspend_enabled) {
+ pr_debug("%s: panel not yet initialized\n", __func__);
+ return false;
+ }
+
+ /*
+ * For split-DSI usecase, wait till both controllers are initialized.
+ * The same exceptions as above are applicable here too.
+ */
+ if (mdss_dsi_is_hw_config_split(ctrl->shared_data)) {
+ octrl = mdss_dsi_get_other_ctrl(ctrl);
+ if (enable && !ctrl->mmss_clamp && octrl &&
+ !(octrl->ctrl_state & CTRL_STATE_PANEL_INIT) &&
+ !pdata->panel_info.ulps_suspend_enabled) {
+ pr_debug("%s: split-DSI, other ctrl not ready yet\n",
+ __func__);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * mdss_dsi_ulps_config() - Program DSI lanes to enter/exit ULPS mode
+ * @ctrl: pointer to DSI controller structure
+ * @enable: 1 to enter ULPS, 0 to exit ULPS
+ *
+ * This function executes the necessary programming sequence to enter/exit
+ * DSI Ultra-Low Power State (ULPS). This function assumes that the link and
+ * core clocks are already on.
+ */
+static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
+ int enable)
+{
+ int ret = 0;
+ struct mdss_panel_data *pdata = NULL;
+ struct mdss_panel_info *pinfo;
+ struct mipi_panel_info *mipi;
+ u32 lane_status = 0;
+ u32 active_lanes = 0;
+
+ if (!ctrl) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = &ctrl->panel_data;
+ if (!pdata) {
+ pr_err("%s: Invalid panel data\n", __func__);
+ return -EINVAL;
+ }
+ pinfo = &pdata->panel_info;
+ mipi = &pinfo->mipi;
+
+ if (!mdss_dsi_is_ulps_req_valid(ctrl, enable)) {
+ pr_debug("%s: skiping ULPS config for ctrl%d, enable=%d\n",
+ __func__, ctrl->ndx, enable);
+ return 0;
+ }
+
+ /* clock lane will always be programmed for ulps */
+ active_lanes = BIT(4);
+ /*
+ * make a note of all active data lanes for which ulps entry/exit
+ * is needed
+ */
+ if (mipi->data_lane0)
+ active_lanes |= BIT(0);
+ if (mipi->data_lane1)
+ active_lanes |= BIT(1);
+ if (mipi->data_lane2)
+ active_lanes |= BIT(2);
+ if (mipi->data_lane3)
+ active_lanes |= BIT(3);
+
+ pr_debug("%s: configuring ulps (%s) for ctrl%d, active lanes=0x%08x,clamps=%s\n",
+ __func__, (enable ? "on" : "off"), ctrl->ndx,
+ active_lanes, ctrl->mmss_clamp ? "enabled" : "disabled");
+
+ if (enable && !ctrl->ulps) {
+ /*
+ * Ensure that the lanes are idle prior to placing a ULPS entry
+ * request. This is needed to ensure that there is no overlap
+ * between any HS or LP commands being sent out on the lane and
+ * a potential ULPS entry request.
+ *
+ * This check needs to be avoided when we are resuming from idle
+ * power collapse and just restoring the controller state to
+ * ULPS with the clamps still in place.
+ */
+ if (!ctrl->mmss_clamp) {
+ ret = mdss_dsi_wait_for_lane_idle(ctrl);
+ if (ret) {
+ pr_warn("%s: lanes not idle, skip ulps\n",
+ __func__);
+ ret = 0;
+ goto error;
+ }
+ }
+
+ /*
+ * ULPS Entry Request.
+ * Wait for a short duration to ensure that the lanes
+ * enter ULP state.
+ */
+ MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes);
+ usleep_range(100, 110);
+
+ /* Check to make sure that all active data lanes are in ULPS */
+ lane_status = MIPI_INP(ctrl->ctrl_base + 0xA8);
+ if (lane_status & (active_lanes << 8)) {
+ pr_err("%s: ULPS entry req failed for ctrl%d. Lane status=0x%08x\n",
+ __func__, ctrl->ndx, lane_status);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ctrl->ulps = true;
+ } else if (!enable && ctrl->ulps) {
+ /*
+ * Clear out any phy errors prior to exiting ULPS
+ * This fixes certain instances where phy does not exit
+ * ULPS cleanly. Also, do not print error during such cases.
+ */
+ mdss_dsi_dln0_phy_err(ctrl, false);
+
+ /*
+ * ULPS Exit Request
+ * Hardware requirement is to wait for at least 1ms
+ */
+ MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes << 8);
+ usleep_range(1000, 1100);
+
+ /*
+ * Sometimes when exiting ULPS, it is possible that some DSI
+ * lanes are not in the stop state which could lead to DSI
+ * commands not going through. To avoid this, force the lanes
+ * to be in stop state.
+ */
+ MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes << 16);
+ wmb(); /* ensure lanes are put to stop state */
+
+ MIPI_OUTP(ctrl->ctrl_base + 0x0AC, 0x0);
+ wmb(); /* ensure lanes are in proper state */
+
+ /*
+ * Wait for a short duration before enabling
+ * data transmission
+ */
+ usleep_range(100, 110);
+
+ lane_status = MIPI_INP(ctrl->ctrl_base + 0xA8);
+ ctrl->ulps = false;
+ } else {
+ pr_debug("%s: No change requested: %s -> %s\n", __func__,
+ ctrl->ulps ? "enabled" : "disabled",
+ enable ? "enabled" : "disabled");
+ }
+
+ pr_debug("%s: DSI lane status = 0x%08x. Ulps %s\n", __func__,
+ lane_status, enable ? "enabled" : "disabled");
+
+error:
+ return ret;
+}
+
+/**
+ * mdss_dsi_clamp_ctrl() - Program DSI clamps for supporting power collapse
+ * @ctrl: pointer to DSI controller structure
+ * @enable: 1 to enable clamps, 0 to disable clamps
+ *
+ * For idle-screen usecases with command mode panels, MDSS can be power
+ * collapsed. However, DSI phy needs to remain on. To avoid any mismatch
+ * between the DSI controller state, DSI phy needs to be clamped before
+ * power collapsing. This function executes the required programming
+ * sequence to configure these DSI clamps. This function should only be called
+ * when the DSI link clocks are disabled.
+ */
+static int mdss_dsi_clamp_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
+{
+ struct mipi_panel_info *mipi = NULL;
+ u32 clamp_reg, regval = 0;
+ u32 clamp_reg_off, phyrst_reg_off;
+
+ if (!ctrl) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!ctrl->mmss_misc_io.base) {
+ pr_err("%s: mmss_misc_io not mapped\n", __func__);
+ return -EINVAL;
+ }
+
+ clamp_reg_off = ctrl->shared_data->ulps_clamp_ctrl_off;
+ phyrst_reg_off = ctrl->shared_data->ulps_phyrst_ctrl_off;
+ mipi = &ctrl->panel_data.panel_info.mipi;
+
+ /* clock lane will always be clamped */
+ clamp_reg = BIT(9);
+ if (ctrl->ulps)
+ clamp_reg |= BIT(8);
+ /* make a note of all active data lanes which need to be clamped */
+ if (mipi->data_lane0) {
+ clamp_reg |= BIT(7);
+ if (ctrl->ulps)
+ clamp_reg |= BIT(6);
+ }
+ if (mipi->data_lane1) {
+ clamp_reg |= BIT(5);
+ if (ctrl->ulps)
+ clamp_reg |= BIT(4);
+ }
+ if (mipi->data_lane2) {
+ clamp_reg |= BIT(3);
+ if (ctrl->ulps)
+ clamp_reg |= BIT(2);
+ }
+ if (mipi->data_lane3) {
+ clamp_reg |= BIT(1);
+ if (ctrl->ulps)
+ clamp_reg |= BIT(0);
+ }
+ pr_debug("%s: called for ctrl%d, enable=%d, clamp_reg=0x%08x\n",
+ __func__, ctrl->ndx, enable, clamp_reg);
+ if (enable && !ctrl->mmss_clamp) {
+ regval = MIPI_INP(ctrl->mmss_misc_io.base + clamp_reg_off);
+ /* Enable MMSS DSI Clamps */
+ if (ctrl->ndx == DSI_CTRL_0) {
+ MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+ regval | clamp_reg);
+ MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+ regval | (clamp_reg | BIT(15)));
+ } else if (ctrl->ndx == DSI_CTRL_1) {
+ MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+ regval | (clamp_reg << 16));
+ MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+ regval | ((clamp_reg << 16) | BIT(31)));
+ }
+ /* update clamp ctrl before setting phy reset disable */
+ wmb();
+
+ /*
+ * This register write ensures that DSI PHY will not be
+ * reset when mdss ahb clock reset is asserted while coming
+ * out of power collapse
+ */
+ if (IS_MDSS_MAJOR_MINOR_SAME(ctrl->shared_data->hw_rev,
+ MDSS_DSI_HW_REV_104) &&
+ (MDSS_GET_STEP(ctrl->shared_data->hw_rev) !=
+ MDSS_DSI_HW_REV_STEP_2)) {
+
+ regval = MIPI_INP(ctrl->mmss_misc_io.base +
+ clamp_reg_off);
+ MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+ regval | BIT(30));
+ } else {
+ MIPI_OUTP(ctrl->mmss_misc_io.base + phyrst_reg_off,
+ 0x1);
+ }
+ /* make sure that clamp ctrl is updated before disable call */
+ wmb();
+ ctrl->mmss_clamp = true;
+ } else if (!enable && ctrl->mmss_clamp) {
+ if (IS_MDSS_MAJOR_MINOR_SAME(ctrl->shared_data->hw_rev,
+ MDSS_DSI_HW_REV_104) &&
+ (MDSS_GET_STEP(ctrl->shared_data->hw_rev) !=
+ MDSS_DSI_HW_REV_STEP_2)) {
+
+ regval = MIPI_INP(ctrl->mmss_misc_io.base +
+ clamp_reg_off);
+ MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+ regval & ~BIT(30));
+ } else {
+ MIPI_OUTP(ctrl->mmss_misc_io.base + phyrst_reg_off,
+ 0x0);
+ }
+ /* update clamp ctrl before unsetting phy reset disable */
+ wmb();
+
+ regval = MIPI_INP(ctrl->mmss_misc_io.base + clamp_reg_off);
+ /* Disable MMSS DSI Clamps */
+ if (ctrl->ndx == DSI_CTRL_0)
+ MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+ regval & ~(clamp_reg | BIT(15)));
+ else if (ctrl->ndx == DSI_CTRL_1)
+ MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+ regval & ~((clamp_reg << 16) | BIT(31)));
+ /* make sure that clamp ctrl is updated before enable call */
+ wmb();
+ ctrl->mmss_clamp = false;
+ } else {
+ pr_debug("%s: No change requested: %s -> %s\n", __func__,
+ ctrl->mmss_clamp ? "enabled" : "disabled",
+ enable ? "enabled" : "disabled");
+ }
+
+ return 0;
+}
+
+DEFINE_MUTEX(dsi_clk_mutex);
+
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void *clk_handle,
+ enum mdss_dsi_clk_type clk_type, enum mdss_dsi_clk_state clk_state)
+{
+ int rc = 0;
+ struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+ int i, *vote_cnt;
+
+ void *m_clk_handle;
+ bool is_ecg = false;
+ int state = MDSS_DSI_CLK_OFF;
+
+ if (!ctrl) {
+ pr_err("%s: Invalid arg\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_clk_mutex);
+ /*
+ * In sync_wait_broadcast mode, we need to enable clocks
+ * for the other controller as well when enabling clocks
+ * for the trigger controller.
+ *
+ * If sync wait_broadcase mode is not enabled, but if split display
+ * mode is enabled where both DSI controller's branch clocks are
+ * sourced out of a single PLL, then we need to ensure that the
+ * controller associated with that PLL also has it's clocks turned
+ * on. This is required to make sure that if that controller's PLL/PHY
+ * are clamped then they can be removed.
+ */
+ if (mdss_dsi_sync_wait_trigger(ctrl)) {
+ mctrl = mdss_dsi_get_other_ctrl(ctrl);
+ if (!mctrl)
+ pr_warn("%s: Unable to get other control\n", __func__);
+ } else if (mdss_dsi_is_ctrl_clk_slave(ctrl)) {
+ mctrl = mdss_dsi_get_ctrl_clk_master();
+ if (!mctrl)
+ pr_warn("%s: Unable to get clk master control\n",
+ __func__);
+ }
+
+ /*
+ * it should add and remove extra votes based on voting clients to avoid
+ * removal of legitimate vote from DSI client.
+ */
+ if (mctrl && (clk_handle == ctrl->dsi_clk_handle)) {
+ m_clk_handle = mctrl->dsi_clk_handle;
+ vote_cnt = &mctrl->m_dsi_vote_cnt;
+ } else if (mctrl) {
+ m_clk_handle = mctrl->mdp_clk_handle;
+ vote_cnt = &mctrl->m_mdp_vote_cnt;
+ }
+
+ /*
+ * When DSI is used in split mode, the link clock for master controller
+ * has to be turned on first before the link clock for slave can be
+ * turned on. In case the current controller is a slave, an ON vote is
+ * cast for master before changing the state of the slave clock. After
+ * the state change for slave, the ON votes will be removed depending on
+ * the new state.
+ */
+ pr_debug("%s: DSI_%d: clk = %d, state = %d, caller = %pS, mctrl=%d\n",
+ __func__, ctrl->ndx, clk_type, clk_state,
+ __builtin_return_address(0), mctrl ? 1 : 0);
+ if (mctrl && (clk_type & MDSS_DSI_LINK_CLK)) {
+ if (clk_state != MDSS_DSI_CLK_ON) {
+ /* preserve clk state; do not turn off forcefully */
+ is_ecg = is_dsi_clk_in_ecg_state(m_clk_handle);
+ if (is_ecg)
+ state = MDSS_DSI_CLK_EARLY_GATE;
+ }
+
+ rc = mdss_dsi_clk_req_state(m_clk_handle,
+ MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON, mctrl->ndx);
+ if (rc) {
+ pr_err("%s: failed to turn on mctrl clocks, rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ (*vote_cnt)++;
+ }
+
+ rc = mdss_dsi_clk_req_state(clk_handle, clk_type, clk_state, ctrl->ndx);
+ if (rc) {
+ pr_err("%s: failed set clk state, rc = %d\n", __func__, rc);
+ goto error;
+ }
+
+ if (mctrl && (clk_type & MDSS_DSI_LINK_CLK) &&
+ clk_state != MDSS_DSI_CLK_ON) {
+
+ /*
+ * In case of split dsi, an ON vote is cast for all state change
+ * requests. If the current state is ON, then the vote would not
+ * be released.
+ *
+ * If the current state is ECG, there is one possible way to
+ * transition in to this state, which is ON -> ECG. In this case
+ * two votes will be removed because one was cast at ON and
+ * other when entering ECG.
+ *
+ * If the current state is OFF, it could have been due to two
+ * possible transitions in to OFF state.
+ * 1. ON -> OFF: In this case two votes were cast by the
+ * slave controller, one during ON (which is not
+ * removed) and one during OFF. So we need to remove two
+ * votes.
+ * 2. ECG -> OFF: In this case there is only one vote
+ * for ON, since the previous ECG state must have
+ * removed two votes to let clocks turn off.
+ *
+ * To satisfy the above requirement, vote_cnt keeps track of
+ * the number of ON votes for master requested by slave. For
+ * every OFF/ECG state request, Either 2 or vote_cnt number of
+ * votes are removed depending on which is lower.
+ */
+ for (i = 0; (i < *vote_cnt && i < 2); i++) {
+ rc = mdss_dsi_clk_req_state(m_clk_handle,
+ MDSS_DSI_ALL_CLKS, state, mctrl->ndx);
+ if (rc) {
+ pr_err("%s: failed to set mctrl clk state, rc = %d\n",
+ __func__, rc);
+ goto error;
+ }
+ }
+ (*vote_cnt) -= i;
+ pr_debug("%s: ctrl=%d, vote_cnt=%d dsi_vote_cnt=%d mdp_vote_cnt:%d\n",
+ __func__, ctrl->ndx, *vote_cnt, mctrl->m_dsi_vote_cnt,
+ mctrl->m_mdp_vote_cnt);
+ }
+
+error:
+ mutex_unlock(&dsi_clk_mutex);
+ return rc;
+}
+
+int mdss_dsi_pre_clkoff_cb(void *priv,
+ enum mdss_dsi_clk_type clk,
+ enum mdss_dsi_clk_state new_state)
+{
+ int rc = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl = priv;
+ struct mdss_panel_data *pdata = NULL;
+
+ pdata = &ctrl->panel_data;
+
+ if ((clk & MDSS_DSI_LINK_CLK) && (new_state == MDSS_DSI_CLK_OFF)) {
+ /*
+ * If ULPS feature is enabled, enter ULPS first.
+ * However, when blanking the panel, we should enter ULPS
+ * only if ULPS during suspend feature is enabled.
+ */
+ if (!(ctrl->ctrl_state & CTRL_STATE_PANEL_INIT)) {
+ if (pdata->panel_info.ulps_suspend_enabled)
+ mdss_dsi_ulps_config(ctrl, 1);
+ } else if (mdss_dsi_ulps_feature_enabled(pdata)) {
+ rc = mdss_dsi_ulps_config(ctrl, 1);
+ }
+ if (rc) {
+ pr_err("%s: failed enable ulps, rc = %d\n",
+ __func__, rc);
+ }
+ }
+
+ if ((clk & MDSS_DSI_CORE_CLK) && (new_state == MDSS_DSI_CLK_OFF)) {
+ /*
+ * Enable DSI clamps only if entering idle power collapse or
+ * when ULPS during suspend is enabled.
+ */
+ if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) ||
+ pdata->panel_info.ulps_suspend_enabled) {
+ mdss_dsi_phy_power_off(ctrl);
+ rc = mdss_dsi_clamp_ctrl(ctrl, 1);
+ if (rc)
+ pr_err("%s: Failed to enable dsi clamps. rc=%d\n",
+ __func__, rc);
+ } else {
+ /*
+ * Make sure that controller is not in ULPS state when
+ * the DSI link is not active.
+ */
+ rc = mdss_dsi_ulps_config(ctrl, 0);
+ if (rc)
+ pr_err("%s: failed to disable ulps. rc=%d\n",
+ __func__, rc);
+ }
+ }
+
+ return rc;
+}
+
+int mdss_dsi_post_clkon_cb(void *priv,
+ enum mdss_dsi_clk_type clk,
+ enum mdss_dsi_clk_state curr_state)
+{
+ int rc = 0;
+ struct mdss_panel_data *pdata = NULL;
+ struct mdss_dsi_ctrl_pdata *ctrl = priv;
+ bool mmss_clamp;
+
+ pdata = &ctrl->panel_data;
+
+ if (clk & MDSS_DSI_CORE_CLK) {
+ mmss_clamp = ctrl->mmss_clamp;
+ /*
+ * controller setup is needed if coming out of idle
+ * power collapse with clamps enabled.
+ */
+ if (mmss_clamp)
+ mdss_dsi_ctrl_setup(ctrl);
+
+ if (ctrl->ulps) {
+ /*
+ * ULPS Entry Request. This is needed if the lanes were
+ * in ULPS prior to power collapse, since after
+ * power collapse and reset, the DSI controller resets
+ * back to idle state and not ULPS. This ulps entry
+ * request will transition the state of the DSI
+ * controller to ULPS which will match the state of the
+ * DSI phy. This needs to be done prior to disabling
+ * the DSI clamps.
+ *
+ * Also, reset the ulps flag so that ulps_config
+ * function would reconfigure the controller state to
+ * ULPS.
+ */
+ ctrl->ulps = false;
+ rc = mdss_dsi_ulps_config(ctrl, 1);
+ if (rc) {
+ pr_err("%s: Failed to enter ULPS. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ }
+
+ rc = mdss_dsi_clamp_ctrl(ctrl, 0);
+ if (rc) {
+ pr_err("%s: Failed to disable dsi clamps. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+
+ /*
+ * Phy setup is needed if coming out of idle
+ * power collapse with clamps enabled.
+ */
+ if (ctrl->phy_power_off || mmss_clamp)
+ mdss_dsi_phy_power_on(ctrl, mmss_clamp);
+ }
+ if (clk & MDSS_DSI_LINK_CLK) {
+ if (ctrl->ulps) {
+ rc = mdss_dsi_ulps_config(ctrl, 0);
+ if (rc) {
+ pr_err("%s: failed to disable ulps, rc= %d\n",
+ __func__, rc);
+ goto error;
+ }
+ }
+ }
+error:
+ return rc;
+}
+
+int mdss_dsi_post_clkoff_cb(void *priv,
+ enum mdss_dsi_clk_type clk_type,
+ enum mdss_dsi_clk_state curr_state)
+{
+ int rc = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl = priv;
+ struct mdss_panel_data *pdata = NULL;
+ struct dsi_shared_data *sdata;
+ int i;
+
+ if (!ctrl) {
+ pr_err("%s: Invalid arg\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = &ctrl->panel_data;
+ if ((clk_type & MDSS_DSI_CORE_CLK) &&
+ (curr_state == MDSS_DSI_CLK_OFF)) {
+ sdata = ctrl->shared_data;
+
+ for (i = DSI_MAX_PM - 1; i >= DSI_CORE_PM; i--) {
+ if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) &&
+ (i != DSI_CORE_PM))
+ continue;
+ rc = msm_mdss_enable_vreg(
+ sdata->power_data[i].vreg_config,
+ sdata->power_data[i].num_vreg, 0);
+ if (rc) {
+ pr_warn("%s: failed to disable vregs for %s\n",
+ __func__,
+ __mdss_dsi_pm_name(i));
+ rc = 0;
+ } else {
+ ctrl->core_power = false;
+ }
+ }
+
+ /*
+ * temp workaround until framework issues pertaining to LP2
+ * power state transitions are fixed. For now, we internally
+ * transition to LP2 state whenever core power is turned off
+ * in LP1 state
+ */
+ if (mdss_dsi_is_panel_on_lp(pdata))
+ mdss_dsi_panel_power_ctrl(pdata,
+ MDSS_PANEL_POWER_LP2);
+ }
+ return rc;
+}
+
+int mdss_dsi_pre_clkon_cb(void *priv,
+ enum mdss_dsi_clk_type clk_type,
+ enum mdss_dsi_clk_state new_state)
+{
+ int rc = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl = priv;
+ struct mdss_panel_data *pdata = NULL;
+ struct dsi_shared_data *sdata;
+ int i;
+
+ if (!ctrl) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = &ctrl->panel_data;
+ if ((clk_type & MDSS_DSI_CORE_CLK) && (new_state == MDSS_DSI_CLK_ON) &&
+ (ctrl->core_power == false)) {
+ sdata = ctrl->shared_data;
+ /*
+ * Enable DSI core power
+ * 1.> PANEL_PM are controlled as part of
+ * panel_power_ctrl. Needed not be handled here.
+ * 2.> CORE_PM are controlled by dsi clk manager.
+ * 3.> CTRL_PM need to be enabled/disabled
+ * only during unblank/blank. Their state should
+ * not be changed during static screen.
+ */
+ pr_debug("%s: Enable DSI core power\n", __func__);
+ for (i = DSI_CORE_PM; i < DSI_MAX_PM; i++) {
+ if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) &&
+ (!pdata->panel_info.cont_splash_enabled) &&
+ (i != DSI_CORE_PM))
+ continue;
+ rc = msm_mdss_enable_vreg(
+ sdata->power_data[i].vreg_config,
+ sdata->power_data[i].num_vreg, 1);
+ if (rc) {
+ pr_err("%s: failed to enable vregs for %s\n",
+ __func__,
+ __mdss_dsi_pm_name(i));
+ } else {
+ ctrl->core_power = true;
+ }
+
+ }
+ /*
+ * temp workaround until framework issues pertaining to LP2
+ * power state transitions are fixed. For now, if we intend to
+ * send a frame update when in LP1, we have to explicitly exit
+ * LP2 state here
+ */
+ if (mdss_dsi_is_panel_on_ulp(pdata))
+ mdss_dsi_panel_power_ctrl(pdata, MDSS_PANEL_POWER_LP1);
+ }
+ /* Disable dynamic clock gating*/
+ if (ctrl->mdss_util->dyn_clk_gating_ctrl)
+ ctrl->mdss_util->dyn_clk_gating_ctrl(0);
+
+ return rc;
+}
+
+void mdss_edp_clk_deinit(struct mdss_edp_drv_pdata *edp_drv)
+{
+ if (edp_drv->aux_clk)
+ clk_put(edp_drv->aux_clk);
+ if (edp_drv->pixel_clk)
+ clk_put(edp_drv->pixel_clk);
+ if (edp_drv->ahb_clk)
+ clk_put(edp_drv->ahb_clk);
+ if (edp_drv->link_clk)
+ clk_put(edp_drv->link_clk);
+ if (edp_drv->mdp_core_clk)
+ clk_put(edp_drv->mdp_core_clk);
+}
+
+int mdss_edp_clk_init(struct mdss_edp_drv_pdata *edp_drv)
+{
+ struct device *dev = &(edp_drv->pdev->dev);
+
+ edp_drv->aux_clk = clk_get(dev, "core_clk");
+ if (IS_ERR(edp_drv->aux_clk)) {
+ pr_err("%s: Can't find aux_clk", __func__);
+ edp_drv->aux_clk = NULL;
+ goto mdss_edp_clk_err;
+ }
+
+ edp_drv->pixel_clk = clk_get(dev, "pixel_clk");
+ if (IS_ERR(edp_drv->pixel_clk)) {
+ pr_err("%s: Can't find pixel_clk", __func__);
+ edp_drv->pixel_clk = NULL;
+ goto mdss_edp_clk_err;
+ }
+
+ edp_drv->ahb_clk = clk_get(dev, "iface_clk");
+ if (IS_ERR(edp_drv->ahb_clk)) {
+ pr_err("%s: Can't find ahb_clk", __func__);
+ edp_drv->ahb_clk = NULL;
+ goto mdss_edp_clk_err;
+ }
+
+ edp_drv->link_clk = clk_get(dev, "link_clk");
+ if (IS_ERR(edp_drv->link_clk)) {
+ pr_err("%s: Can't find link_clk", __func__);
+ edp_drv->link_clk = NULL;
+ goto mdss_edp_clk_err;
+ }
+
+ /* need mdss clock to receive irq */
+ edp_drv->mdp_core_clk = clk_get(dev, "mdp_core_clk");
+ if (IS_ERR(edp_drv->mdp_core_clk)) {
+ pr_err("%s: Can't find mdp_core_clk", __func__);
+ edp_drv->mdp_core_clk = NULL;
+ goto mdss_edp_clk_err;
+ }
+
+ return 0;
+
+mdss_edp_clk_err:
+ mdss_edp_clk_deinit(edp_drv);
+ return -EPERM;
+}
+
+int mdss_edp_aux_clk_enable(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret;
+
+ if (clk_set_rate(edp_drv->aux_clk, 19200000) < 0)
+ pr_err("%s: aux_clk - clk_set_rate failed\n",
+ __func__);
+
+ ret = clk_enable(edp_drv->aux_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable aux clk\n", __func__);
+ goto c2;
+ }
+
+ ret = clk_enable(edp_drv->ahb_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable ahb clk\n", __func__);
+ goto c1;
+ }
+
+ /* need mdss clock to receive irq */
+ ret = clk_enable(edp_drv->mdp_core_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable mdp_core_clk\n", __func__);
+ goto c0;
+ }
+
+ return 0;
+c0:
+ clk_disable(edp_drv->ahb_clk);
+c1:
+ clk_disable(edp_drv->aux_clk);
+c2:
+ return ret;
+
+}
+
+void mdss_edp_aux_clk_disable(struct mdss_edp_drv_pdata *edp_drv)
+{
+ clk_disable(edp_drv->aux_clk);
+ clk_disable(edp_drv->ahb_clk);
+ clk_disable(edp_drv->mdp_core_clk);
+}
+
+static void mdss_edp_clk_set_rate(struct mdss_edp_drv_pdata *edp_drv)
+{
+ if (clk_set_rate(edp_drv->link_clk, edp_drv->link_rate * 27000000) < 0)
+ pr_err("%s: link_clk - clk_set_rate failed\n",
+ __func__);
+
+ if (clk_set_rate(edp_drv->pixel_clk, edp_drv->pixel_rate) < 0)
+ pr_err("%s: pixel_clk - clk_set_rate failed\n",
+ __func__);
+}
+
+int mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret;
+
+ if (edp_drv->clk_on) {
+ pr_info("%s: edp clks are already ON\n", __func__);
+ return 0;
+ }
+
+ if (clk_set_rate(edp_drv->link_clk, edp_drv->link_rate * 27000000) < 0)
+ pr_err("%s: link_clk - clk_set_rate failed\n",
+ __func__);
+
+ if (clk_set_rate(edp_drv->aux_clk, edp_drv->aux_rate) < 0)
+ pr_err("%s: aux_clk - clk_set_rate failed\n",
+ __func__);
+
+ if (clk_set_rate(edp_drv->pixel_clk, edp_drv->pixel_rate) < 0)
+ pr_err("%s: pixel_clk - clk_set_rate failed\n",
+ __func__);
+
+ ret = clk_enable(edp_drv->aux_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable aux clk\n", __func__);
+ goto c4;
+ }
+ ret = clk_enable(edp_drv->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable pixel clk\n", __func__);
+ goto c3;
+ }
+ ret = clk_enable(edp_drv->ahb_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable ahb clk\n", __func__);
+ goto c2;
+ }
+ ret = clk_enable(edp_drv->link_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable link clk\n", __func__);
+ goto c1;
+ }
+ ret = clk_enable(edp_drv->mdp_core_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable mdp_core_clk\n", __func__);
+ goto c0;
+ }
+
+ edp_drv->clk_on = 1;
+
+ return 0;
+
+c0:
+ clk_disable(edp_drv->link_clk);
+c1:
+ clk_disable(edp_drv->ahb_clk);
+c2:
+ clk_disable(edp_drv->pixel_clk);
+c3:
+ clk_disable(edp_drv->aux_clk);
+c4:
+ return ret;
+}
+
+void mdss_edp_clk_disable(struct mdss_edp_drv_pdata *edp_drv)
+{
+ if (edp_drv->clk_on == 0) {
+ pr_info("%s: edp clks are already OFF\n", __func__);
+ return;
+ }
+
+ clk_disable(edp_drv->aux_clk);
+ clk_disable(edp_drv->pixel_clk);
+ clk_disable(edp_drv->ahb_clk);
+ clk_disable(edp_drv->link_clk);
+ clk_disable(edp_drv->mdp_core_clk);
+
+ edp_drv->clk_on = 0;
+}
+
+int mdss_edp_prepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret;
+
+ /* ahb clock should be prepared first */
+ ret = clk_prepare(edp_drv->ahb_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare ahb clk\n", __func__);
+ goto c3;
+ }
+ ret = clk_prepare(edp_drv->aux_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare aux clk\n", __func__);
+ goto c2;
+ }
+
+ /* need mdss clock to receive irq */
+ ret = clk_prepare(edp_drv->mdp_core_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare mdp_core clk\n", __func__);
+ goto c1;
+ }
+
+ return 0;
+c1:
+ clk_unprepare(edp_drv->aux_clk);
+c2:
+ clk_unprepare(edp_drv->ahb_clk);
+c3:
+ return ret;
+
+}
+
+void mdss_edp_unprepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+ clk_unprepare(edp_drv->mdp_core_clk);
+ clk_unprepare(edp_drv->aux_clk);
+ clk_unprepare(edp_drv->ahb_clk);
+}
+
+int mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+ int ret;
+
+ mdss_edp_clk_set_rate(edp_drv);
+
+ /* ahb clock should be prepared first */
+ ret = clk_prepare(edp_drv->ahb_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare ahb clk\n", __func__);
+ goto c4;
+ }
+ ret = clk_prepare(edp_drv->aux_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare aux clk\n", __func__);
+ goto c3;
+ }
+ ret = clk_prepare(edp_drv->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare pixel clk\n", __func__);
+ goto c2;
+ }
+ ret = clk_prepare(edp_drv->link_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare link clk\n", __func__);
+ goto c1;
+ }
+ ret = clk_prepare(edp_drv->mdp_core_clk);
+ if (ret) {
+ pr_err("%s: Failed to prepare mdp_core clk\n", __func__);
+ goto c0;
+ }
+
+ return 0;
+c0:
+ clk_unprepare(edp_drv->link_clk);
+c1:
+ clk_unprepare(edp_drv->pixel_clk);
+c2:
+ clk_unprepare(edp_drv->aux_clk);
+c3:
+ clk_unprepare(edp_drv->ahb_clk);
+c4:
+ return ret;
+}
+
+void mdss_edp_unprepare_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+ clk_unprepare(edp_drv->mdp_core_clk);
+ clk_unprepare(edp_drv->aux_clk);
+ clk_unprepare(edp_drv->pixel_clk);
+ clk_unprepare(edp_drv->link_clk);
+ /* ahb clock should be last one to disable */
+ clk_unprepare(edp_drv->ahb_clk);
+}
+
+void mdss_edp_clk_debug(unsigned char *edp_base, unsigned char *mmss_cc_base)
+{
+ u32 da4, da0, d32c;
+ u32 dc4, dc0, d330;
+
+ /* pixel clk */
+ da0 = edp_read(mmss_cc_base + 0x0a0);
+ da4 = edp_read(mmss_cc_base + 0x0a4);
+ d32c = edp_read(mmss_cc_base + 0x32c);
+
+ /* main link clk */
+ dc0 = edp_read(mmss_cc_base + 0x0c0);
+ dc4 = edp_read(mmss_cc_base + 0x0c4);
+ d330 = edp_read(mmss_cc_base + 0x330);
+
+ pr_err("%s: da0=%x da4=%x d32c=%x dc0=%x dc4=%x d330=%x\n", __func__,
+ (int)da0, (int)da4, (int)d32c, (int)dc0, (int)dc4, (int)d330);
+
+}
diff --git a/drivers/video/fbdev/msm/qpic_panel_ili_qvga.c b/drivers/video/fbdev/msm/qpic_panel_ili_qvga.c
new file mode 100644
index 0000000..d644f4f
--- /dev/null
+++ b/drivers/video/fbdev/msm/qpic_panel_ili_qvga.c
@@ -0,0 +1,235 @@
+/* Copyright (c) 2013-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/memory.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+
+#include "mdss.h"
+#include "mdss_qpic.h"
+#include "mdss_qpic_panel.h"
+
+static int mdss_qpic_pinctrl_set_state(struct qpic_panel_io_desc *qpic_panel_io,
+ bool active);
+static int panel_io_init(struct qpic_panel_io_desc *panel_io)
+{
+ int rc;
+
+ if (panel_io->vdd_vreg) {
+ rc = regulator_set_voltage(panel_io->vdd_vreg,
+ 1800000, 1800000);
+ if (rc) {
+ pr_err("vdd_vreg->set_voltage failed, rc=%d\n", rc);
+ return -EINVAL;
+ }
+ }
+ if (panel_io->avdd_vreg) {
+ rc = regulator_set_voltage(panel_io->avdd_vreg,
+ 2704000, 2704000);
+ if (rc) {
+ pr_err("vdd_vreg->set_voltage failed, rc=%d\n", rc);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void panel_io_off(struct qpic_panel_io_desc *qpic_panel_io)
+{
+ if (mdss_qpic_pinctrl_set_state(qpic_panel_io, false))
+ pr_warn("%s panel on: pinctrl not enabled\n", __func__);
+
+ if (qpic_panel_io->ad8_gpio)
+ gpio_free(qpic_panel_io->ad8_gpio);
+ if (qpic_panel_io->cs_gpio)
+ gpio_free(qpic_panel_io->cs_gpio);
+ if (qpic_panel_io->rst_gpio)
+ gpio_free(qpic_panel_io->rst_gpio);
+ if (qpic_panel_io->te_gpio)
+ gpio_free(qpic_panel_io->te_gpio);
+ if (qpic_panel_io->bl_gpio)
+ gpio_free(qpic_panel_io->bl_gpio);
+ if (qpic_panel_io->vdd_vreg)
+ regulator_disable(qpic_panel_io->vdd_vreg);
+ if (qpic_panel_io->avdd_vreg)
+ regulator_disable(qpic_panel_io->avdd_vreg);
+}
+
+void ili9341_off(struct qpic_panel_io_desc *qpic_panel_io)
+{
+ qpic_send_pkt(OP_SET_DISPLAY_OFF, NULL, 0);
+ /* wait for 20 ms after display off */
+ msleep(20);
+ panel_io_off(qpic_panel_io);
+}
+
+static int panel_io_on(struct qpic_panel_io_desc *qpic_panel_io)
+{
+ int rc;
+
+ if (qpic_panel_io->vdd_vreg) {
+ rc = regulator_enable(qpic_panel_io->vdd_vreg);
+ if (rc) {
+ pr_err("enable vdd failed, rc=%d\n", rc);
+ return -ENODEV;
+ }
+ }
+
+ if (qpic_panel_io->avdd_vreg) {
+ rc = regulator_enable(qpic_panel_io->avdd_vreg);
+ if (rc) {
+ pr_err("enable avdd failed, rc=%d\n", rc);
+ goto power_on_error;
+ }
+ }
+
+ /* GPIO settings using pinctrl */
+ if (mdss_qpic_pinctrl_set_state(qpic_panel_io, true)) {
+ pr_warn("%s panel on: pinctrl not enabled\n", __func__);
+
+ if ((qpic_panel_io->rst_gpio) &&
+ (gpio_request(qpic_panel_io->rst_gpio, "disp_rst_n"))) {
+ pr_err("%s request reset gpio failed\n", __func__);
+ goto power_on_error;
+ }
+
+ if ((qpic_panel_io->cs_gpio) &&
+ (gpio_request(qpic_panel_io->cs_gpio, "disp_cs_n"))) {
+ pr_err("%s request cs gpio failed\n", __func__);
+ goto power_on_error;
+ }
+
+ if ((qpic_panel_io->ad8_gpio) &&
+ (gpio_request(qpic_panel_io->ad8_gpio, "disp_ad8_n"))) {
+ pr_err("%s request ad8 gpio failed\n", __func__);
+ goto power_on_error;
+ }
+
+ if ((qpic_panel_io->te_gpio) &&
+ (gpio_request(qpic_panel_io->te_gpio, "disp_te_n"))) {
+ pr_err("%s request te gpio failed\n", __func__);
+ goto power_on_error;
+ }
+
+ if ((qpic_panel_io->bl_gpio) &&
+ (gpio_request(qpic_panel_io->bl_gpio, "disp_bl_n"))) {
+ pr_err("%s request bl gpio failed\n", __func__);
+ goto power_on_error;
+ }
+ }
+
+ /* wait for 20 ms after enable gpio as suggested by hw */
+ msleep(20);
+ return 0;
+power_on_error:
+ panel_io_off(qpic_panel_io);
+ return -EINVAL;
+}
+
+int ili9341_on(struct qpic_panel_io_desc *qpic_panel_io)
+{
+ u8 param[4];
+ int ret;
+
+ if (!qpic_panel_io->init) {
+ panel_io_init(qpic_panel_io);
+ qpic_panel_io->init = true;
+ }
+ ret = panel_io_on(qpic_panel_io);
+ if (ret)
+ return ret;
+ qpic_send_pkt(OP_SOFT_RESET, NULL, 0);
+ /* wait for 120 ms after reset as panel spec suggests */
+ msleep(120);
+ qpic_send_pkt(OP_SET_DISPLAY_OFF, NULL, 0);
+ /* wait for 20 ms after disply off */
+ msleep(20);
+
+ /* set memory access control */
+ param[0] = 0x48;
+ qpic_send_pkt(OP_SET_ADDRESS_MODE, param, 1);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ param[0] = 0x66;
+ qpic_send_pkt(OP_SET_PIXEL_FORMAT, param, 1);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ /* set interface */
+ param[0] = 1;
+ param[1] = 0;
+ param[2] = 0;
+ qpic_send_pkt(OP_ILI9341_INTERFACE_CONTROL, param, 3);
+ /* wait for 20 ms after command sent */
+ msleep(20);
+
+ /* exit sleep mode */
+ qpic_send_pkt(OP_EXIT_SLEEP_MODE, NULL, 0);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ /* normal mode */
+ qpic_send_pkt(OP_ENTER_NORMAL_MODE, NULL, 0);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ /* display on */
+ qpic_send_pkt(OP_SET_DISPLAY_ON, NULL, 0);
+ /* wait for 20 ms after command sent as panel spec suggests */
+ msleep(20);
+
+ param[0] = 0;
+ qpic_send_pkt(OP_ILI9341_TEARING_EFFECT_LINE_ON, param, 1);
+
+ /* test */
+ param[0] = qpic_read_data(OP_GET_PIXEL_FORMAT, 1);
+ pr_debug("Pixel format =%x", param[0]);
+
+ return 0;
+}
+
+static int mdss_qpic_pinctrl_set_state(struct qpic_panel_io_desc *qpic_panel_io,
+ bool active)
+{
+ struct pinctrl_state *pin_state;
+ int rc = -EFAULT;
+
+ if (IS_ERR_OR_NULL(qpic_panel_io->pin_res.pinctrl))
+ return PTR_ERR(qpic_panel_io->pin_res.pinctrl);
+
+ pin_state = active ? qpic_panel_io->pin_res.gpio_state_active
+ : qpic_panel_io->pin_res.gpio_state_suspend;
+ if (!IS_ERR_OR_NULL(pin_state)) {
+ rc = pinctrl_select_state(qpic_panel_io->pin_res.pinctrl,
+ pin_state);
+ if (rc)
+ pr_err("%s: can not set %s pins\n", __func__,
+ active ? MDSS_PINCTRL_STATE_DEFAULT
+ : MDSS_PINCTRL_STATE_SLEEP);
+ } else {
+ pr_err("%s: invalid '%s' pinstate\n", __func__,
+ active ? MDSS_PINCTRL_STATE_DEFAULT
+ : MDSS_PINCTRL_STATE_SLEEP);
+ }
+ return rc;
+}
diff --git a/drivers/video/fbdev/msm/splash.h b/drivers/video/fbdev/msm/splash.h
new file mode 100644
index 0000000..1cb7aa1
--- /dev/null
+++ b/drivers/video/fbdev/msm/splash.h
@@ -0,0 +1,5279 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __SPLASH_H_
+#define __SPLASH_H_
+
+#define SPLASH_IMAGE_WIDTH 113
+#define SPLASH_IMAGE_HEIGHT 124
+#define SPLASH_IMAGE_FORMAT MDP_BGR_888
+#define SPLASH_IMAGE_BPP 3
+
+char splash_bgr888_image[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x10,
+ 0x29, 0x19, 0x31, 0x31,
+ 0x29, 0x31, 0x31, 0x29, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x31, 0x31,
+ 0x29, 0x4a, 0x52, 0x4a, 0x6b, 0x5a, 0x73, 0x4a, 0x52, 0x4a, 0x10, 0x29,
+ 0x19, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x31, 0x31, 0x29, 0x6b, 0x5a, 0x73, 0x6b, 0x7b, 0x73, 0x6b, 0x5a,
+ 0x4a, 0x31, 0x31, 0x29,
+ 0x3a, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x6b, 0x5a, 0x4a, 0x6b, 0x5a,
+ 0x73, 0x3a, 0x31, 0x4a,
+ 0x31, 0x31, 0x29, 0x10, 0x29, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x3a, 0x31,
+ 0x4a, 0x31, 0x31, 0x29,
+ 0x10, 0x29, 0x19, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x10, 0x21, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x08,
+ 0x10, 0x08, 0x08, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0x08, 0x08, 0x10,
+ 0x08, 0x08, 0x10, 0x10,
+ 0x29, 0x19, 0x10, 0x29, 0x19, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10,
+ 0x4a, 0x52, 0x4a, 0x08,
+ 0x08, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x08,
+ 0x08, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10, 0x29, 0x19, 0x4a, 0x52, 0x4a, 0x3a, 0x31, 0x4a,
+ 0x08, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x4a, 0x52, 0x4a, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x4a,
+ 0x52, 0x4a, 0x9c, 0xa5, 0x94, 0x9c, 0x7b, 0x94, 0x08, 0x08, 0x10, 0x08,
+ 0x08, 0x10, 0x10, 0x29,
+ 0x19, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x10, 0x21, 0x00,
+ 0x08, 0x08, 0x10, 0x6b, 0x7b, 0x73, 0x9c, 0x7b, 0x94, 0x9c, 0xa5, 0x94,
+ 0xce, 0xad, 0xad, 0xa5,
+ 0xb5, 0xb5, 0x31, 0x31, 0x29, 0x08, 0x08, 0x10, 0x10, 0x29, 0x19, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x4a,
+ 0x52, 0x4a, 0xa5, 0x9c, 0xad, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0x9c,
+ 0x7b, 0x94, 0x10, 0x29,
+ 0x19, 0x3a, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x9c, 0x7b, 0x94, 0x9c, 0xa5, 0x94, 0xa5, 0xb5, 0xb5,
+ 0xa5, 0xb5, 0xb5, 0xce,
+ 0xde, 0xce, 0xc5, 0xad, 0xd6, 0x9c, 0xa5, 0x94, 0x3a, 0x10, 0x21, 0x00,
+ 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x9c, 0x7b, 0x94, 0xce, 0xad, 0xad, 0xce, 0xe6, 0xef, 0xce,
+ 0xe6, 0xef, 0xe6, 0xde,
+ 0xde, 0xa5, 0x9c, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x6b, 0x5a, 0x73, 0xce, 0xde, 0xce, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xde, 0xce, 0x6b,
+ 0x7b, 0x73, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x10, 0x29, 0x19, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0x9c,
+ 0xa5, 0x94, 0xce, 0xde,
+ 0xce, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0xce, 0xde, 0xce, 0xff, 0xf7, 0xff,
+ 0xff, 0xf7, 0xff, 0x9c,
+ 0xa5, 0x94, 0x00, 0x00, 0x00, 0x31, 0x31, 0x29, 0xef, 0xf7, 0xe6, 0xff,
+ 0xf7, 0xff, 0xef, 0xde,
+ 0xef, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0xff, 0xf7, 0xff, 0x10,
+ 0x29, 0x19, 0x08, 0x08,
+ 0x10, 0x4a, 0x52, 0x4a, 0xce, 0xad, 0xad, 0xff, 0xff, 0xff, 0x4a, 0x52,
+ 0x4a, 0x08, 0x08, 0x10,
+ 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0xce, 0xad, 0xad,
+ 0xef, 0xf7, 0xff, 0xce,
+ 0xde, 0xce, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x9c, 0x7b, 0x94, 0x31,
+ 0x31, 0x29, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff, 0x4a, 0x52, 0x4a, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x5a, 0x73, 0xef,
+ 0xf7, 0xe6, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x6b, 0x7b, 0x73, 0x08, 0x08, 0x10, 0xff, 0xff,
+ 0xff, 0x6b, 0x7b, 0x73,
+ 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x4a, 0x52, 0x4a, 0x08, 0x08, 0x10,
+ 0xce, 0xde, 0xce, 0xff,
+ 0xff, 0xff, 0x10, 0x29, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31,
+ 0x31, 0x29, 0x4a, 0x52,
+ 0x4a, 0xa5, 0xb5, 0xb5, 0xff, 0xff, 0xff, 0x9c, 0x7b, 0x94, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a,
+ 0x52, 0x4a, 0xe6, 0xde,
+ 0xde, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x31, 0x31, 0x29, 0x08, 0x08,
+ 0x10, 0xff, 0xff, 0xff,
+ 0x4a, 0x52, 0x4a, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x9c,
+ 0x7b, 0x94, 0xff, 0xff, 0xff, 0x3a, 0x10, 0x21, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x08, 0x08, 0x10, 0x6b, 0x5a, 0x73, 0xff, 0xff, 0xff, 0xa5, 0xb5,
+ 0xb5, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4a, 0x52,
+ 0x4a, 0xef, 0xf7, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xe6, 0xde, 0xde, 0x08, 0x31, 0x5a, 0x10, 0x7b, 0x9c, 0x10, 0x7b, 0x9c,
+ 0x10, 0x7b, 0x9c, 0x10,
+ 0x52, 0x7b, 0x31, 0x31, 0x29, 0xef, 0xf7, 0xff, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0xa5, 0x94, 0xff, 0xff,
+ 0xff, 0xa5, 0x9c, 0xad,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x21, 0x00, 0x08,
+ 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x08, 0x08, 0x10, 0xff, 0xf7, 0xff, 0x4a, 0x52, 0x4a, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x31, 0x5a, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xd6, 0x00, 0xbd, 0xef,
+ 0x00, 0xbd, 0xef, 0x00,
+ 0x9c, 0xd6, 0x08, 0xa5, 0xad, 0x08, 0xad, 0xd6, 0x10, 0xce, 0xce, 0x6b,
+ 0x7b, 0x9c, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x6b, 0x5a, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0xce, 0xde, 0xce, 0xff, 0xf7,
+ 0xff, 0x10, 0x29, 0x19,
+ 0x10, 0x5a, 0x9c, 0x00, 0x9c, 0xd6, 0x19, 0x94, 0xce, 0x00, 0xbd, 0xf7,
+ 0x10, 0xc5, 0xef, 0x08,
+ 0xad, 0xd6, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xd6, 0x10,
+ 0xc5, 0xef, 0x00, 0xbd,
+ 0xef, 0x08, 0xa5, 0xad, 0x08, 0x31, 0x5a, 0x10, 0x29, 0x19, 0xff, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xe6, 0xde, 0xde, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x5a,
+ 0x73, 0xef, 0xf7, 0xe6,
+ 0x19, 0x7b, 0xbd, 0x19, 0x7b, 0xbd, 0x08, 0xad, 0xd6, 0x00, 0xbd, 0xef,
+ 0x10, 0xc5, 0xef, 0x00,
+ 0xbd, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x3a, 0xde, 0xef, 0x19,
+ 0xbd, 0xf7, 0x3a, 0xde,
+ 0xef, 0x3a, 0xde, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad,
+ 0xd6, 0x19, 0x94, 0xce,
+ 0xa5, 0xb5, 0xb5, 0x4a, 0x5a, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x08, 0x10,
+ 0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xd6,
+ 0x08, 0xad, 0xef, 0x10,
+ 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x10, 0xc5, 0xef, 0x10, 0xe6, 0xef, 0x19,
+ 0xbd, 0xf7, 0x3a, 0xde,
+ 0xef, 0x3a, 0xde, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xe6, 0xef, 0x10, 0xe6,
+ 0xef, 0x10, 0xc5, 0xef,
+ 0x10, 0xe6, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xd6, 0x08, 0x31, 0x3a,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x08, 0x10,
+ 0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6,
+ 0x10, 0xc5, 0xef, 0x08,
+ 0xad, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x10,
+ 0xc5, 0xef, 0x6b, 0xe6,
+ 0xef, 0x3a, 0xde, 0xef, 0x10, 0xe6, 0xef, 0x10, 0xe6, 0xef, 0x10, 0xc5,
+ 0xef, 0x10, 0xe6, 0xef,
+ 0x10, 0xc5, 0xef, 0x10, 0xe6, 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7,
+ 0x19, 0x7b, 0xbd, 0x00,
+ 0x00, 0x00, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x31, 0x5a, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6,
+ 0x08, 0xad, 0xef, 0x08,
+ 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xef, 0x10, 0xc5, 0xef, 0x10,
+ 0xc5, 0xef, 0x10, 0xe6,
+ 0xef, 0x3a, 0xde, 0xef, 0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xe6,
+ 0xef, 0x10, 0xc5, 0xef,
+ 0x10, 0xe6, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0x84, 0xbd, 0x10, 0x7b, 0x9c,
+ 0x00, 0xbd, 0xef, 0x10,
+ 0x73, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10, 0x52, 0x7b, 0x19, 0x7b, 0xbd, 0x00, 0x9c, 0xd6,
+ 0x19, 0x94, 0xce, 0x08,
+ 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x10,
+ 0xe6, 0xef, 0x19, 0xbd,
+ 0xf7, 0x6b, 0xe6, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xe6, 0xef, 0x10, 0xe6,
+ 0xef, 0x10, 0xe6, 0xef,
+ 0x10, 0xc5, 0xef, 0x00, 0x9c, 0xd6, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6,
+ 0x19, 0x94, 0xce, 0x00,
+ 0x9c, 0xd6, 0x10, 0x52, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x31, 0x5a,
+ 0x08, 0xa5, 0xad, 0x08,
+ 0xad, 0xef, 0x00, 0xbd, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x10,
+ 0xc5, 0xef, 0x10, 0xc5,
+ 0xef, 0x10, 0xe6, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xe6, 0xef, 0x10, 0xc5,
+ 0xef, 0x10, 0xc5, 0xef,
+ 0x19, 0xbd, 0xf7, 0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce,
+ 0x08, 0xad, 0xd6, 0x00,
+ 0x9c, 0xd6, 0x19, 0x94, 0xce, 0x08, 0x31, 0x3a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x31, 0x31, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x31, 0x3a, 0x10,
+ 0x52, 0x7b, 0x10, 0x7b, 0x9c, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x10,
+ 0xc5, 0xef, 0x10, 0xc5,
+ 0xef, 0x3a, 0xde, 0xef, 0x10, 0xe6, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xe6,
+ 0xef, 0x08, 0xad, 0xd6,
+ 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x00, 0x9c, 0xd6, 0x19, 0x94, 0xce,
+ 0x00, 0x9c, 0xd6, 0x00,
+ 0x9c, 0xd6, 0x19, 0x7b, 0xbd, 0x19, 0x7b, 0xbd, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x08, 0x10, 0x9c, 0x7b, 0x73, 0x6b, 0x5a, 0x73, 0x10, 0x29,
+ 0x19, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x73, 0xa5, 0xad, 0x10, 0x52, 0x7b, 0x10, 0x52, 0x7b, 0x00,
+ 0x84, 0xbd, 0x08, 0xa5,
+ 0xad, 0x08, 0xad, 0xd6, 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x10, 0x73,
+ 0x7b, 0x10, 0x5a, 0x9c,
+ 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6, 0x00, 0x9c, 0xd6, 0x00, 0x9c, 0xd6,
+ 0x00, 0x9c, 0xd6, 0x00,
+ 0x84, 0xbd, 0x3a, 0xa5, 0xce, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0x6b,
+ 0x5a, 0x73, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0x6b, 0x7b,
+ 0x9c, 0x9c, 0x7b, 0x73,
+ 0x31, 0x31, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x31, 0x31, 0x29, 0xc5, 0xad, 0xd6, 0x52, 0xa5, 0xa5, 0x10,
+ 0x5a, 0x9c, 0x10, 0x7b,
+ 0x9c, 0x19, 0x94, 0xce, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xd6, 0x19, 0x94,
+ 0xce, 0x00, 0x9c, 0xd6,
+ 0x00, 0x9c, 0xd6, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce,
+ 0x00, 0x84, 0xbd, 0x19,
+ 0x7b, 0xbd, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xce, 0xde, 0xce, 0xa5,
+ 0xb5, 0xb5, 0xa5, 0xb5,
+ 0xb5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21,
+ 0x00, 0x6b, 0x7b, 0x73,
+ 0x9c, 0x7b, 0x94, 0x6b, 0x7b, 0x73, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08, 0x10, 0x4a, 0x52, 0x4a, 0xa5, 0xd6, 0xad, 0xc5,
+ 0xad, 0xd6, 0x4a, 0x7b,
+ 0x9c, 0x10, 0x5a, 0x9c, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6, 0x00, 0x84,
+ 0xbd, 0x00, 0x9c, 0xd6,
+ 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd,
+ 0x19, 0x7b, 0xbd, 0x73,
+ 0xa5, 0xad, 0xa5, 0xb5, 0xb5, 0xc5, 0xad, 0xd6, 0xa5, 0xb5, 0xb5, 0xe6,
+ 0xde, 0xde, 0xce, 0xde,
+ 0xce, 0xce, 0xad, 0xad, 0x10, 0x29, 0x19, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x4a, 0x52, 0x4a, 0x6b, 0x7b, 0x73, 0x31, 0x31, 0x29, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x31, 0x4a, 0xce,
+ 0xad, 0xad, 0xce, 0xde,
+ 0xce, 0xa5, 0xb5, 0xb5, 0x73, 0xa5, 0xad, 0x10, 0x73, 0x7b, 0x10, 0x5a,
+ 0x9c, 0x10, 0x7b, 0x9c,
+ 0x10, 0x5a, 0xbd, 0x10, 0x7b, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c,
+ 0x19, 0x7b, 0xbd, 0xa5,
+ 0xb5, 0xb5, 0xa5, 0xb5, 0xb5, 0xc5, 0xad, 0xd6, 0xa5, 0xd6, 0xad, 0xef,
+ 0xde, 0xef, 0xef, 0xf7,
+ 0xe6, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xa5, 0x9c, 0xad, 0x08, 0x08,
+ 0x10, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a,
+ 0x52, 0x4a, 0xe6, 0xde,
+ 0xde, 0xc5, 0xad, 0xd6, 0xa5, 0xb5, 0xb5, 0xce, 0xad, 0xad, 0x9c, 0xad,
+ 0xce, 0x52, 0xa5, 0xa5,
+ 0x10, 0x5a, 0x9c, 0x10, 0x7b, 0x9c, 0x10, 0x5a, 0x9c, 0x52, 0xa5, 0xa5,
+ 0x73, 0xa5, 0xad, 0xc5,
+ 0xad, 0xd6, 0xa5, 0xb5, 0xb5, 0xce, 0xde, 0xce, 0xce, 0xde, 0xce, 0xef,
+ 0xde, 0xef, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7,
+ 0xe6, 0x08, 0x08, 0x10,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xef, 0xf7,
+ 0xe6, 0xef, 0xf7, 0xe6, 0xce, 0xde, 0xce, 0xce, 0xde, 0xce, 0xa5, 0xb5,
+ 0xb5, 0xce, 0xde, 0xce,
+ 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xa5, 0xb5, 0xb5, 0xce, 0xde, 0xce,
+ 0xce, 0xad, 0xad, 0xc5,
+ 0xad, 0xd6, 0xa5, 0xd6, 0xad, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xe6,
+ 0xde, 0xde, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xa5, 0xb5, 0xb5, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x10, 0x21, 0x00, 0x00,
+ 0x00, 0x00, 0x9c, 0xa5,
+ 0x94, 0xff, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xc5, 0xad,
+ 0xd6, 0xa5, 0xb5, 0xb5,
+ 0xce, 0xad, 0xad, 0xc5, 0xad, 0xd6, 0xa5, 0xb5, 0xb5, 0xce, 0xad, 0xad,
+ 0xc5, 0xad, 0xd6, 0xa5,
+ 0xd6, 0xad, 0xc5, 0xad, 0xd6, 0xce, 0xde, 0xce, 0xef, 0xde, 0xef, 0xef,
+ 0xf7, 0xe6, 0xff, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x31, 0x31, 0x29, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x08,
+ 0x08, 0x10, 0x10, 0x29,
+ 0x19, 0xef, 0xde, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xc5, 0xad, 0xd6, 0xa5, 0xd6, 0xad, 0xa5, 0xb5, 0xb5, 0xce, 0xde, 0xce,
+ 0xa5, 0xb5, 0xb5, 0xce,
+ 0xde, 0xce, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xef, 0xf7, 0xe6, 0xef,
+ 0xf7, 0xff, 0xff, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde,
+ 0x08, 0x08, 0x10, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x10, 0x21,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0xe6, 0xde, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xe6, 0xde, 0xde, 0xce, 0xad, 0xad, 0xc5, 0xad, 0xd6,
+ 0xa5, 0xb5, 0xb5, 0xce,
+ 0xad, 0xad, 0xce, 0xde, 0xce, 0xce, 0xe6, 0xef, 0xff, 0xf7, 0xff, 0xff,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x4a, 0x52, 0x4a, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xe6,
+ 0xe6, 0xde, 0xde, 0xef,
+ 0xf7, 0xe6, 0xce, 0xe6, 0xef, 0xff, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xa5, 0x9c, 0xad, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08,
+ 0x08, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0xce, 0xde, 0xce, 0xef, 0xde,
+ 0xef, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef,
+ 0xf7, 0xff, 0x31, 0x31,
+ 0x29, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0xce, 0xad,
+ 0xad, 0xef, 0xf7, 0xe6,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xef,
+ 0xde, 0xef, 0xef, 0xf7,
+ 0xe6, 0x9c, 0xa5, 0x94, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x31,
+ 0x29, 0xa5, 0xb5, 0xb5,
+ 0xce, 0xde, 0xce, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xe6, 0xef, 0xde, 0xef, 0xef,
+ 0xf7, 0xe6, 0xe6, 0xde,
+ 0xde, 0xce, 0xe6, 0xef, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xf7,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xe6,
+ 0xef, 0xf7, 0xe6, 0xef,
+ 0xde, 0xef, 0xce, 0xe6, 0xef, 0xe6, 0xde, 0xde, 0xe6, 0xde, 0xde, 0xce,
+ 0xde, 0xce, 0xce, 0xde,
+ 0xce, 0xe6, 0xde, 0xde, 0xce, 0xad, 0xad, 0x08, 0x08, 0x10, 0x00, 0x00,
+ 0x00, 0x10, 0x21, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x6b, 0x5a, 0x4a,
+ 0x73, 0xa5, 0xad, 0xce, 0xad, 0xad, 0xc5, 0xad, 0xd6, 0xe6, 0xde, 0xde,
+ 0xe6, 0xde, 0xde, 0xff,
+ 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef,
+ 0xde, 0xef, 0xce, 0xde,
+ 0xce, 0xef, 0xde, 0xef, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xde, 0xef,
+ 0xce, 0xde, 0xce, 0xc5,
+ 0xad, 0xd6, 0xce, 0xde, 0xce, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xa5,
+ 0xb5, 0xb5, 0xc5, 0xad,
+ 0xd6, 0xa5, 0xb5, 0xb5, 0xc5, 0xad, 0xd6, 0xef, 0xf7, 0xff, 0x4a, 0x52,
+ 0x4a, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00,
+ 0x00, 0x10, 0x29, 0x19,
+ 0x6b, 0x5a, 0x73, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xce, 0xde, 0xce,
+ 0xef, 0xf7, 0xe6, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xef, 0xde, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xe6, 0xde, 0xde, 0xce, 0xde, 0xce, 0xc5,
+ 0xad, 0xd6, 0xce, 0xde,
+ 0xce, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xa5, 0xb5, 0xb5, 0xce, 0xde,
+ 0xce, 0xff, 0xf7, 0xff,
+ 0x31, 0x31, 0x29, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a, 0x52, 0x4a,
+ 0x31, 0x31, 0x29, 0x08,
+ 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0xa5, 0x9c, 0xad, 0xce, 0xde, 0xce, 0xce, 0xe6, 0xef,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xf7,
+ 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xce, 0xe6, 0xef, 0xe6, 0xde, 0xde, 0xce, 0xde, 0xce, 0xc5, 0xad,
+ 0xd6, 0xce, 0xad, 0xad,
+ 0xe6, 0xde, 0xde, 0xff, 0xf7, 0xff, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x31, 0x31, 0x29, 0x3a, 0x10, 0x21, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x31, 0x31,
+ 0x29, 0x00, 0x00, 0x00,
+ 0x10, 0x21, 0x00, 0x6b, 0x5a, 0x73, 0xce, 0xde, 0xce, 0xff, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xce, 0xde, 0xce,
+ 0xce, 0xde, 0xce, 0x9c, 0xad, 0xce, 0xef, 0xf7, 0xe6, 0xa5, 0x9c, 0xad,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x3a, 0x31, 0x4a, 0x10,
+ 0x29, 0x19, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x52,
+ 0x4a, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0xe6, 0xde, 0xde, 0xef, 0xf7, 0xe6,
+ 0xff, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xf7, 0xff,
+ 0xef, 0xf7, 0xff, 0xef, 0xde, 0xef, 0xce, 0xe6, 0xad, 0xc5, 0xad, 0xd6,
+ 0xff, 0xff, 0xff, 0x10,
+ 0x29, 0x19, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x31, 0x31,
+ 0x29, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x29,
+ 0x19, 0x08, 0x08, 0x10,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x5a, 0x73, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xde, 0xef,
+ 0xce, 0xde, 0xce, 0xce,
+ 0xde, 0xce, 0xce, 0xad, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
+ 0x21, 0x00, 0x08, 0x08,
+ 0x10, 0x08, 0x08, 0x10, 0x4a, 0x52, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x4a, 0x52, 0x4a,
+ 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xf7, 0xe6,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xde, 0xef, 0xce, 0xde, 0xce, 0xef, 0xf7, 0xff, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x3a, 0x10,
+ 0x21, 0x31, 0x31, 0x29, 0x31, 0x31, 0x29, 0x08, 0x08, 0x10, 0x10, 0x29,
+ 0x19, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0x29, 0x19,
+ 0x3a, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x7b, 0x94,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x4a,
+ 0x52, 0x4a, 0x00, 0x00,
+ 0x00, 0x10, 0x29, 0x19, 0x4a, 0x52, 0x4a, 0x31, 0x31, 0x29, 0x08, 0x08,
+ 0x10, 0x3a, 0x31, 0x4a,
+ 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x4a, 0x52, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10,
+ 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xce, 0xde,
+ 0xce, 0x3a, 0x10, 0x21, 0x10, 0x29, 0x19, 0x3a, 0x31, 0x4a, 0x3a, 0x10,
+ 0x21, 0x10, 0x21, 0x00,
+ 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x3a, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x9c, 0x7b, 0x94, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0x08, 0x08, 0x10,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xe6, 0xef, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0x3a, 0x10, 0x21, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x31, 0x29,
+ 0x08, 0x08, 0x10, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x31, 0x31, 0x29, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x4a,
+ 0x52, 0x4a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xe6, 0xde, 0xde,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x4a, 0x52,
+ 0x4a, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x31,
+ 0x31, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x31, 0x29, 0x08, 0x08, 0x10,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xe6, 0xde, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+ 0xe6, 0xde, 0xde, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xf7,
+ 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf7, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0x9c, 0x7b, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x10, 0x29, 0x19, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x10, 0x29, 0x19, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xce,
+ 0xe6, 0xef, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xa5, 0xb5, 0xb5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x08, 0x08, 0x10, 0x3a, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x31, 0x31, 0x29, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xf7, 0xff, 0xef,
+ 0xf7, 0xe6, 0xe6, 0xde, 0xde, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xce, 0xde, 0xce, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x10, 0x29, 0x19, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x31,
+ 0x31, 0x29, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0xce, 0xad, 0xad, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xde, 0xef, 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x10, 0x29, 0x19, 0x08,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10,
+ 0x21, 0x00, 0x3a, 0x31, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xce,
+ 0xe6, 0xef, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xf7, 0xff, 0xce, 0xe6, 0xef, 0xe6, 0xde, 0xde, 0xef,
+ 0xf7, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x3a,
+ 0x31, 0x4a, 0x08, 0x08,
+ 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08, 0x10, 0x4a, 0x52, 0x4a, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde, 0xe6,
+ 0xde, 0xde, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x4a, 0x52,
+ 0x4a, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x10, 0x21, 0x4a, 0x52, 0x4a, 0x00,
+ 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce,
+ 0xe6, 0xef, 0xe6, 0xde,
+ 0xde, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x10, 0x29, 0x19, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x3a, 0x31, 0x4a, 0x6b,
+ 0x7b, 0x73, 0x08, 0x00,
+ 0x00, 0x10, 0x29, 0x19, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xe6, 0xde,
+ 0xde, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0x29, 0x19, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x10,
+ 0x29, 0x19, 0x6b, 0x5a,
+ 0x73, 0x08, 0x00, 0x00, 0x10, 0x29, 0x19, 0xff, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xe6, 0xde, 0xde, 0xe6, 0xde, 0xde, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x31, 0x31, 0x29, 0x08, 0x00, 0x00, 0x31, 0x31, 0x29, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xf7,
+ 0xff, 0xef, 0xf7, 0xff, 0xe6, 0xde, 0xde, 0xef, 0xf7, 0xe6, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xe6, 0xde, 0xde, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x10, 0x29, 0x19, 0x08, 0x08,
+ 0x10, 0x08, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x31, 0x4a, 0x31, 0x31,
+ 0x29, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde, 0xce, 0xe6,
+ 0xef, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xff, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa5,
+ 0xb5, 0xb5, 0x00, 0x00,
+ 0x00, 0x10, 0x21, 0x00, 0x3a, 0x31, 0x4a, 0x10, 0x29, 0x19, 0x3a, 0x08,
+ 0x00, 0x08, 0x08, 0x10,
+ 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x31, 0x31, 0x29, 0x10, 0x29, 0x19,
+ 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x10,
+ 0x7b, 0x9c, 0x10, 0xc5,
+ 0xef, 0x10, 0xc5, 0xef, 0x10, 0x7b, 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x4a, 0x52, 0x4a,
+ 0xef, 0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xf7,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xde,
+ 0xde, 0xef, 0xf7, 0xe6,
+ 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x6b, 0x5a,
+ 0x4a, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x08, 0x08, 0x10,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+ 0x3a, 0x10, 0x21, 0x6b,
+ 0x5a, 0x73, 0x10, 0x29, 0x19, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+ 0x94, 0xce, 0x08, 0xad,
+ 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad,
+ 0xd6, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x4a, 0x5a, 0x73, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xe6, 0xde, 0xde,
+ 0xef, 0xde, 0xef, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xe6, 0xef, 0xce,
+ 0xe6, 0xef, 0xce, 0xe6,
+ 0xef, 0x08, 0x10, 0x42, 0x10, 0x29, 0x19, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x31, 0x31, 0x29, 0x4a, 0x52, 0x4a, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+ 0x7b, 0xbd, 0x00, 0x9c,
+ 0xd6, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0x9c, 0xd6, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a, 0x52, 0x4a,
+ 0xef, 0xde, 0xef, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff,
+ 0xe6, 0xde, 0xde, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0x08, 0xad, 0xd6, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x08, 0x08, 0x10, 0x08, 0x08,
+ 0x10, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x10, 0x29, 0x19, 0x3a,
+ 0x10, 0x21, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x84,
+ 0xbd, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5,
+ 0xef, 0x00, 0xbd, 0xf7,
+ 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0x7b, 0xbd, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x31,
+ 0x31, 0x29, 0xe6, 0xde, 0xde, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xce, 0xe6, 0xef, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xe6, 0xef, 0x08,
+ 0xad, 0xef, 0x00, 0xbd,
+ 0xf7, 0x10, 0xe6, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xe6, 0xef, 0x08, 0x08,
+ 0x10, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x10, 0x29, 0x19, 0x10, 0x29, 0x19, 0x00,
+ 0x00, 0x00, 0x00, 0x84,
+ 0xbd, 0x10, 0xe6, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x19, 0x7b,
+ 0xbd, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7,
+ 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x10, 0xc5, 0xef,
+ 0x10, 0x5a, 0x9c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x29, 0x19, 0xce, 0xde, 0xce, 0xff,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde, 0xef, 0xde, 0xef,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xce,
+ 0xe6, 0xef, 0x00, 0x9c,
+ 0xd6, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad,
+ 0xd6, 0x08, 0x08, 0x10,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x08, 0x31,
+ 0x3a, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x84,
+ 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad,
+ 0xef, 0x00, 0xbd, 0xef,
+ 0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x10, 0xc5, 0xef, 0x00,
+ 0xbd, 0xef, 0x08, 0x31, 0x3a, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x9c, 0xa5,
+ 0x94, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xce, 0xe6,
+ 0xef, 0x08, 0xad, 0xd6, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x19, 0x94, 0xce,
+ 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x19, 0x94, 0xce, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xef, 0x10, 0xc5,
+ 0xef, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x84, 0xbd, 0x00,
+ 0x84, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x00, 0x84, 0xbd, 0x19,
+ 0x94, 0xce, 0x00, 0x84,
+ 0xbd, 0x08, 0xad, 0xd6, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x10, 0xc5,
+ 0xef, 0x19, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0xff, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xef, 0xde,
+ 0xef, 0x9c, 0xde, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x00, 0x84, 0xbd, 0x08, 0x31, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x10, 0x5a, 0x9c, 0x08, 0xad, 0xd6, 0x00, 0xbd, 0xef, 0x10, 0xc5,
+ 0xef, 0x10, 0xc5, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x19,
+ 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x08,
+ 0xad, 0xef, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5,
+ 0xef, 0x00, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x10, 0xc5, 0xef, 0x00,
+ 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19,
+ 0x7b, 0xbd, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x31,
+ 0x29, 0xce, 0xe6, 0xef,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xce, 0xe6,
+ 0xef, 0xce, 0xad, 0xad, 0x9c, 0xad, 0xce, 0x19, 0x94, 0xce, 0x08, 0xad,
+ 0xef, 0x00, 0xbd, 0xef,
+ 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x10, 0x5a, 0x9c, 0x08, 0x08, 0x10,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x08, 0x08,
+ 0x10, 0x10, 0x73, 0x7b, 0x00, 0x9c, 0xd6, 0x00, 0xbd, 0xef, 0x10, 0xc5,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x00, 0xbd, 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x84, 0xbd, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08,
+ 0xad, 0xef, 0x10, 0xc5,
+ 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x19, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xef, 0x10, 0xce,
+ 0xce, 0x08, 0x31, 0x3a, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x08, 0x10, 0xce, 0xde, 0xce, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xc5, 0xad, 0xd6, 0xce, 0xde, 0xce, 0x9c, 0xad, 0xce, 0x08, 0xad,
+ 0xd6, 0x08, 0xad, 0xef,
+ 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x00, 0x9c, 0xd6, 0x19, 0x7b, 0xbd,
+ 0x10, 0x7b, 0x9c, 0x08,
+ 0x31, 0x5a, 0x08, 0x31, 0x3a, 0x08, 0x08, 0x10, 0x08, 0x31, 0x3a, 0x08,
+ 0x31, 0x3a, 0x10, 0x5a,
+ 0x9c, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6, 0x19, 0x94, 0xce, 0x00, 0xbd,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x19, 0x7b, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08,
+ 0xad, 0xef, 0x00, 0xbd,
+ 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5,
+ 0xef, 0x00, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x10, 0xc5,
+ 0xef, 0x00, 0xbd, 0xf7, 0x19, 0x94, 0xce, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x7b, 0x94,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xa5, 0xb5, 0xb5, 0xce, 0xde, 0xce, 0x9c, 0xad,
+ 0xce, 0x19, 0x94, 0xce,
+ 0x08, 0xad, 0xd6, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x00, 0x9c, 0xd6,
+ 0x00, 0x9c, 0xd6, 0x00,
+ 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x19,
+ 0x7b, 0xbd, 0x00, 0x84,
+ 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xef, 0x08, 0xad,
+ 0xef, 0x10, 0xc5, 0xef,
+ 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xef, 0x08, 0xad, 0xef,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xd6, 0x08,
+ 0xad, 0xef, 0x10, 0xc5,
+ 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00,
+ 0xbd, 0xf7, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xef, 0x10, 0x52,
+ 0x7b, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x6b,
+ 0x7b, 0x73, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7,
+ 0xe6, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xce, 0xad, 0xad, 0xce, 0xde,
+ 0xce, 0xa5, 0xb5, 0xb5,
+ 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef,
+ 0x19, 0x94, 0xce, 0x00,
+ 0x9c, 0xd6, 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00,
+ 0x84, 0xbd, 0x00, 0x84,
+ 0xbd, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad,
+ 0xef, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+ 0x10, 0xc5, 0xef, 0x19,
+ 0x94, 0xce, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x7b, 0xbd, 0x19,
+ 0x94, 0xce, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+ 0x00, 0xbd, 0xf7, 0x08,
+ 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x10, 0xc5,
+ 0xef, 0x08, 0xad, 0xd6,
+ 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xce, 0xde, 0xce, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xce, 0xde,
+ 0xce, 0xc5, 0xad, 0xd6,
+ 0xa5, 0xb5, 0xb5, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xd6,
+ 0x08, 0xad, 0xef, 0x08,
+ 0xad, 0xef, 0x08, 0xad, 0xef, 0x00, 0x9c, 0xd6, 0x00, 0x9c, 0xd6, 0x00,
+ 0x9c, 0xd6, 0x00, 0x9c,
+ 0xd6, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad,
+ 0xef, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7,
+ 0x10, 0xc5, 0xef, 0x08,
+ 0xad, 0xef, 0x08, 0xad, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x84, 0xbd, 0x00, 0x9c,
+ 0xd6, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xef,
+ 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10,
+ 0xc5, 0xef, 0x00, 0xbd,
+ 0xf7, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xef, 0x19, 0x7b, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x5a, 0x73, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xce, 0xde, 0xce,
+ 0xa5, 0xb5, 0xb5, 0x9c, 0xad, 0xce, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6,
+ 0x08, 0xad, 0xef, 0x10,
+ 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x19,
+ 0x94, 0xce, 0x08, 0xad,
+ 0xef, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad,
+ 0xef, 0x00, 0xbd, 0xef,
+ 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef,
+ 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0x7b, 0xe6, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x00, 0xbd,
+ 0xef, 0x00, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x08,
+ 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08,
+ 0xad, 0xef, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x10, 0xc5, 0xef, 0x10, 0xe6, 0xef, 0x00, 0xbd, 0xef, 0x08, 0x31, 0x3a,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a,
+ 0x52, 0x4a, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff,
+ 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xf7, 0xff,
+ 0xe6, 0xde, 0xde, 0xc5, 0xad, 0xd6, 0x29, 0x5a, 0x4a, 0x00, 0x84, 0xbd,
+ 0x19, 0x94, 0xce, 0x08,
+ 0xad, 0xd6, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00,
+ 0xbd, 0xef, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x19, 0x94, 0xce, 0x08, 0xad, 0xd6, 0x08, 0xad,
+ 0xef, 0x19, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+ 0x10, 0xc5, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10,
+ 0xc5, 0xef, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7,
+ 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+ 0x08, 0xad, 0xd6, 0x08,
+ 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x9c, 0xa5,
+ 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7,
+ 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xff, 0xf7, 0xff,
+ 0xef, 0xf7, 0xff, 0xce, 0xde, 0xce, 0x4a, 0x52, 0x4a, 0x08, 0x31, 0x5a,
+ 0x00, 0x84, 0xbd, 0x00,
+ 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x10,
+ 0xc5, 0xef, 0x08, 0xad,
+ 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xef, 0x10, 0xc5,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x00, 0xbd, 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xef, 0x00, 0xbd,
+ 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0xd6, 0x19, 0x94,
+ 0xce, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+ 0x08, 0xad, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10,
+ 0xc5, 0xef, 0x00, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7,
+ 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x10, 0xc5, 0xef, 0x10,
+ 0xc5, 0xef, 0x10, 0x5a, 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x9c, 0x7b,
+ 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x4a, 0x52, 0x4a, 0x08, 0x00, 0x00,
+ 0x08, 0x31, 0x5a, 0x00,
+ 0x84, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08,
+ 0xad, 0xef, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef,
+ 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x94,
+ 0xce, 0x00, 0x9c, 0xd6,
+ 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef,
+ 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+ 0x08, 0xad, 0xef, 0x00,
+ 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x10, 0x73, 0x7b, 0xe6,
+ 0xde, 0xde, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0x31, 0x31, 0x29, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10,
+ 0x52, 0x7b, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x08,
+ 0xad, 0xef, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad,
+ 0xef, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xef, 0x08, 0xad, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x84, 0xbd,
+ 0x00, 0x9c, 0xd6, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x08, 0xad, 0xef,
+ 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x10, 0xc5, 0xef, 0x19,
+ 0x94, 0xce, 0x9c, 0xad,
+ 0xce, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xe6, 0xff,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xef, 0xf7, 0xff, 0xef, 0xde, 0xef, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x10, 0x52, 0x7b, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x08,
+ 0xad, 0xef, 0x08, 0xad,
+ 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5,
+ 0xef, 0x00, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+ 0x00, 0xbd, 0xf7, 0x10,
+ 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x08,
+ 0xad, 0xef, 0x10, 0xc5,
+ 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5,
+ 0xef, 0x00, 0xbd, 0xf7,
+ 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x84, 0xbd,
+ 0x00, 0x84, 0xbd, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x10, 0xc5, 0xef,
+ 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+ 0x00, 0xbd, 0xf7, 0x10,
+ 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10,
+ 0xc5, 0xef, 0x08, 0xad,
+ 0xd6, 0x10, 0x7b, 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+ 0xff, 0xef, 0xf7, 0xff,
+ 0xff, 0xff, 0xff, 0x9c, 0xa5, 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x08, 0x08, 0x10, 0x10, 0x52, 0x7b, 0x00, 0x84, 0xbd, 0x08,
+ 0xad, 0xd6, 0x00, 0xbd,
+ 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7,
+ 0x08, 0xad, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0x94, 0xce, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef,
+ 0x00, 0xbd, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x10, 0xc5, 0xef,
+ 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7,
+ 0x08, 0xad, 0xef, 0x00,
+ 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00,
+ 0xbd, 0xef, 0x10, 0xc5,
+ 0xef, 0x08, 0xad, 0xd6, 0x00, 0x84, 0xbd, 0x10, 0x52, 0x7b, 0xef, 0xf7,
+ 0xe6, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xf7, 0xff,
+ 0xce, 0xde, 0xce, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x52, 0x7b, 0x00,
+ 0x84, 0xbd, 0x19, 0x94,
+ 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x19, 0xbd, 0xf7,
+ 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x00, 0x9c, 0xd6, 0x19, 0x7b, 0xbd,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef,
+ 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x19, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xd6, 0x10, 0x52,
+ 0x7b, 0x3a, 0x31, 0x4a,
+ 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+ 0xff, 0xff, 0xff, 0xef,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xce, 0xde, 0xce,
+ 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x10,
+ 0x52, 0x7b, 0x00, 0x84,
+ 0xbd, 0x00, 0x9c, 0xd6, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd,
+ 0xf7, 0x10, 0xc5, 0xef,
+ 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x00, 0xbd, 0xf7,
+ 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x08, 0xad, 0xef, 0x19, 0x94, 0xce, 0x19, 0x94, 0xce, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x19, 0x7b, 0xbd, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xef,
+ 0x10, 0xc5, 0xef, 0x00,
+ 0xbd, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad,
+ 0xef, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x10,
+ 0xc5, 0xef, 0x00, 0xbd,
+ 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x00, 0x9c,
+ 0xd6, 0x10, 0x7b, 0x9c,
+ 0x08, 0x31, 0x5a, 0x00, 0x00, 0x00, 0x4a, 0x5a, 0x73, 0xce, 0xde, 0xce,
+ 0xef, 0xf7, 0xe6, 0xff,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xef, 0xf7,
+ 0xe6, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xa5, 0xb5, 0xb5, 0x6b, 0x5a,
+ 0x73, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x31, 0x3a, 0x10, 0x52,
+ 0x7b, 0x19, 0x7b, 0xbd, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x10, 0xc5,
+ 0xef, 0x19, 0xbd, 0xf7,
+ 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x08, 0xad, 0xef, 0x10,
+ 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x19, 0x7b, 0xbd, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6,
+ 0x19, 0x94, 0xce, 0x08,
+ 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00,
+ 0xbd, 0xf7, 0x10, 0xc5,
+ 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd,
+ 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef,
+ 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x19,
+ 0xbd, 0xf7, 0x08, 0xad,
+ 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x08, 0xad,
+ 0xd6, 0x19, 0x94, 0xce,
+ 0x00, 0x84, 0xbd, 0x10, 0x52, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08, 0x10, 0x10, 0x29, 0x19, 0x31, 0x31, 0x29, 0x31,
+ 0x31, 0x29, 0x08, 0x08,
+ 0x10, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x31,
+ 0x3a, 0x10, 0x52, 0x7b, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6, 0x08, 0xad,
+ 0xef, 0x10, 0xc5, 0xef,
+ 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19,
+ 0xbd, 0xf7, 0x08, 0xad,
+ 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad,
+ 0xef, 0x00, 0x84, 0xbd,
+ 0x19, 0x94, 0xce, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0xbd,
+ 0x19, 0x94, 0xce, 0x00,
+ 0x9c, 0xd6, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08,
+ 0xad, 0xef, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x00, 0x9c, 0xd6, 0x10, 0x5a, 0x9c, 0x10, 0x52, 0x7b, 0x08, 0x31, 0x3a,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x08,
+ 0x08, 0x10, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x31, 0x5a, 0x10, 0x5a, 0x9c, 0x00, 0x84, 0xbd, 0x19, 0x94,
+ 0xce, 0x08, 0xad, 0xef,
+ 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+ 0x00, 0xbd, 0xf7, 0x10,
+ 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10,
+ 0xc5, 0xef, 0x00, 0xbd,
+ 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0x94, 0xce, 0x00, 0x9c,
+ 0xd6, 0x00, 0x84, 0xbd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x7b, 0x9c, 0x00,
+ 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x19,
+ 0x94, 0xce, 0x00, 0x9c,
+ 0xd6, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xd6, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xd6,
+ 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef,
+ 0x10, 0xc5, 0xef, 0x08,
+ 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd,
+ 0xf7, 0x08, 0xad, 0xd6,
+ 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x10, 0x7b, 0x9c, 0x10, 0x52, 0x7b,
+ 0x08, 0x31, 0x5a, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x31, 0x3a, 0x10, 0x52, 0x7b, 0x00, 0x84,
+ 0xbd, 0x00, 0x84, 0xbd,
+ 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+ 0x00, 0xbd, 0xf7, 0x10,
+ 0xc5, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00,
+ 0xbd, 0xf7, 0x08, 0xad,
+ 0xef, 0x19, 0x94, 0xce, 0x08, 0xad, 0xd6, 0x00, 0x9c, 0xd6, 0x00, 0x9c,
+ 0xd6, 0x10, 0x5a, 0x9c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10,
+ 0x5a, 0x9c, 0x00, 0x84,
+ 0xbd, 0x19, 0x7b, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x19, 0x94,
+ 0xce, 0x00, 0x84, 0xbd,
+ 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef,
+ 0x19, 0x94, 0xce, 0x08,
+ 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xef, 0x19,
+ 0xbd, 0xf7, 0x00, 0xbd,
+ 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x08, 0xad, 0xef, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x10, 0x5a, 0x9c,
+ 0x10, 0x52, 0x7b, 0x08,
+ 0x31, 0x3a, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31, 0x5a, 0x10, 0x52,
+ 0x7b, 0x00, 0x84, 0xbd,
+ 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef,
+ 0x19, 0xbd, 0xf7, 0x08,
+ 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00,
+ 0xbd, 0xef, 0x08, 0xad,
+ 0xef, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x00, 0x84, 0xbd, 0x00, 0x84,
+ 0xbd, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x73, 0x7b, 0x10, 0x5a, 0x9c, 0x10, 0x7b,
+ 0x9c, 0x10, 0x5a, 0xbd,
+ 0x00, 0x84, 0xbd, 0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd,
+ 0x00, 0x84, 0xbd, 0x00,
+ 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x08,
+ 0xad, 0xef, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xef,
+ 0x08, 0xad, 0xd6, 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x10, 0x5a, 0x9c,
+ 0x10, 0x52, 0x7b, 0x10,
+ 0x52, 0x7b, 0x08, 0x31, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x21, 0x00, 0x08,
+ 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31,
+ 0x3a, 0x10, 0x52, 0x7b,
+ 0x10, 0x5a, 0x9c, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xef,
+ 0x08, 0xad, 0xd6, 0x08,
+ 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08,
+ 0xad, 0xef, 0x19, 0x94,
+ 0xce, 0x00, 0x9c, 0xd6, 0x00, 0x84, 0xbd, 0x19, 0x7b, 0xbd, 0x10, 0x5a,
+ 0x9c, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10, 0x5a, 0x9c, 0x10, 0x52, 0x7b, 0x10, 0x5a, 0x9c,
+ 0x10, 0x5a, 0x9c, 0x10,
+ 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x00,
+ 0x84, 0xbd, 0x19, 0x94,
+ 0xce, 0x00, 0x9c, 0xd6, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x08, 0xad,
+ 0xef, 0x08, 0xad, 0xd6,
+ 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x10, 0x5a, 0x9c,
+ 0x10, 0x52, 0x7b, 0x10,
+ 0x52, 0x7b, 0x08, 0x31, 0x5a, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x31, 0x3a,
+ 0x08, 0x31, 0x5a, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x19, 0x7b, 0xbd,
+ 0x00, 0x84, 0xbd, 0x00,
+ 0x9c, 0xd6, 0x08, 0xad, 0xd6, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x19,
+ 0x94, 0xce, 0x00, 0x9c,
+ 0xd6, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x10, 0x5a,
+ 0x9c, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x10, 0x52, 0x7b, 0x10, 0x52, 0x7b, 0x10, 0x5a, 0x9c, 0x10,
+ 0x5a, 0x9c, 0x10, 0x5a,
+ 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x19, 0x7b,
+ 0xbd, 0x00, 0x84, 0xbd,
+ 0x19, 0x7b, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x10, 0x5a, 0x9c,
+ 0x10, 0x52, 0x7b, 0x10,
+ 0x52, 0x7b, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x5a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x31, 0x5a, 0x10, 0x5a, 0x9c, 0x10, 0x7b, 0x9c,
+ 0x00, 0x84, 0xbd, 0x00,
+ 0x84, 0xbd, 0x19, 0x7b, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x00,
+ 0x84, 0xbd, 0x00, 0x84,
+ 0xbd, 0x19, 0x7b, 0xbd, 0x10, 0x7b, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x52,
+ 0x7b, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x31, 0x5a, 0x10, 0x52, 0x7b, 0x10, 0x52, 0x7b, 0x10, 0x5a,
+ 0x9c, 0x10, 0x73, 0x7b,
+ 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x52, 0x7b,
+ 0x10, 0x52, 0x7b, 0x10,
+ 0x52, 0x7b, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x3a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x5a,
+ 0x10, 0x52, 0x7b, 0x10,
+ 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x7b, 0x9c, 0x10, 0x5a, 0x9c, 0x00,
+ 0x84, 0xbd, 0x00, 0x84,
+ 0xbd, 0x10, 0x7b, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x08, 0x31,
+ 0x5a, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31,
+ 0x5a, 0x10, 0x52, 0x7b,
+ 0x08, 0x31, 0x5a, 0x10, 0x52, 0x7b, 0x10, 0x52, 0x7b, 0x08, 0x31, 0x5a,
+ 0x10, 0x52, 0x7b, 0x08,
+ 0x31, 0x5a, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x3a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x31, 0x5a, 0x08,
+ 0x31, 0x5a, 0x10, 0x52, 0x7b, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10,
+ 0x5a, 0x9c, 0x10, 0x5a,
+ 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x52, 0x7b, 0x10, 0x52, 0x7b, 0x08, 0x31,
+ 0x5a, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x5a,
+ 0x08, 0x31, 0x5a, 0x08,
+ 0x31, 0x5a, 0x08, 0x31, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31, 0x3a, 0x08, 0x31, 0x5a, 0x08,
+ 0x31, 0x5a, 0x10, 0x52,
+ 0x7b, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x3a, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+#endif
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 8718af8..80fdfad 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -90,6 +90,7 @@
return ERR_PTR(-ENOMEM);
}
d_instantiate(dentry, inode);
+ dentry->d_flags |= DCACHE_RCUACCESS;
dentry->d_fsdata = (void *)ns->ops;
d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
if (d) {
diff --git a/include/dt-bindings/arm/arm-smmu.h b/include/dt-bindings/arm/arm-smmu.h
new file mode 100644
index 0000000..3a1dbd3
--- /dev/null
+++ b/include/dt-bindings/arm/arm-smmu.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_ARM_SMMU_H__
+#define __DT_ARM_SMMU_H__
+
+#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
+#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
+#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
+#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
+#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
+#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
+#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
+#define ARM_SMMU_OPT_MMU500_ERRATA1 (1 << 7)
+#define ARM_SMMU_OPT_STATIC_CB (1 << 8)
+#define ARM_SMMU_OPT_HALT (1 << 9)
+
+#endif
diff --git a/include/dt-bindings/clock/msm-clocks-8952.h b/include/dt-bindings/clock/msm-clocks-8952.h
new file mode 100644
index 0000000..80a95d9
--- /dev/null
+++ b/include/dt-bindings/clock/msm-clocks-8952.h
@@ -0,0 +1,344 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8952_H
+#define __MSM_CLOCKS_8952_H
+
+/* clock_gcc controlled clocks */
+
+/* GPLLs */
+#define clk_gpll0_clk_src_8952 0x1617c790
+#define clk_gpll0_ao_clk_src_8952 0x9b4db4e8
+#define clk_gpll0_clk_src_8937 0x94350fc4
+#define clk_gpll0_ao_clk_src_8937 0x923c7546
+#define clk_gpll0_clk_src 0x5933b69f
+#define clk_gpll0_ao_clk_src 0x6b2fb034
+#define clk_gpll0_sleep_clk_src 0x4f89fcf0
+#define clk_gpll0_out_main 0x850fecec
+#define clk_gpll0_out_aux 0x64e55d63
+#define clk_gpll0_misc 0xe06ee816
+#define clk_gpll3_clk_src 0x5b1eccd5
+#define clk_gpll3_out_main 0xf5fc71ab
+#define clk_gpll3_out_aux 0xe72bea1a
+#define clk_gpll4_clk_src 0x10525d57
+#define clk_gpll4_out_main 0xdca8db2a
+#define clk_gpll6_clk_src 0x17dceaad
+#define clk_gpll6_out_main 0x27b8b7be
+#define clk_a53ss_c0_pll 0xf761da94
+#define clk_a53ss_c1_pll 0xfbc57bbd
+#define clk_a53ss_cci_pll 0x17d32f1e
+
+/* SRCs */
+#define clk_apss_ahb_clk_src 0x36f8495f
+#define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68
+#define clk_gcc_blsp2_ahb_clk 0x8f283c1d
+#define clk_gcc_blsp2_sleep_clk 0x429ca5d2
+#define clk_gcc_blsp2_qup1_spi_apps_clk 0xa32604cc
+#define clk_gcc_blsp2_qup1_i2c_apps_clk 0x9ace11dd
+#define clk_blsp2_qup1_spi_apps_clk_src 0xcc1b8365
+#define clk_blsp2_qup1_i2c_apps_clk_src 0xd6d1e95d
+#define clk_gcc_blsp2_uart1_apps_clk 0x8c3512ff
+#define clk_gcc_blsp2_uart1_sim_clk 0x2ea81633
+#define clk_blsp2_uart1_apps_clk_src 0x562c66dc
+#define clk_gcc_blsp2_qup2_spi_apps_clk 0xbf54ca6d
+#define clk_gcc_blsp2_qup2_i2c_apps_clk 0x1bf9a57e
+#define clk_blsp2_qup2_spi_apps_clk_src 0xd577dc44
+#define clk_blsp2_qup2_i2c_apps_clk_src 0x603b5c51
+#define clk_gcc_blsp2_uart2_apps_clk 0x1e1965a3
+#define clk_gcc_blsp2_uart2_sim_clk 0xca05dfe2
+#define clk_blsp2_uart2_apps_clk_src 0xdd448080
+#define clk_gcc_blsp2_qup3_spi_apps_clk 0xc68509d6
+#define clk_gcc_blsp2_qup3_i2c_apps_clk 0x336d4170
+#define clk_blsp2_qup3_spi_apps_clk_src 0xd04b1e92
+#define clk_blsp2_qup3_i2c_apps_clk_src 0xea82959c
+#define clk_gcc_blsp2_qup4_spi_apps_clk 0x01a72b93
+#define clk_gcc_blsp2_qup4_i2c_apps_clk 0xbd22539d
+#define clk_blsp2_qup4_spi_apps_clk_src 0x25d4a2b1
+#define clk_blsp2_qup4_i2c_apps_clk_src 0x73dc968c
+#define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb
+#define clk_blsp1_uart1_apps_clk_src 0xf8146114
+#define clk_blsp1_uart2_apps_clk_src 0xfc9c2f73
+#define clk_byte0_clk_src 0x75cc885b
+#define clk_cci_clk_src 0x822f3d97
+#define clk_camss_top_ahb_clk_src 0xf92304fb
+#define clk_camss_gp0_clk_src 0x43b063e9
+#define clk_camss_gp1_clk_src 0xa3315f1b
+#define clk_crypto_clk_src 0x37a21414
+#define clk_csi0_clk_src 0x227e65bc
+#define clk_csi1_clk_src 0x6a2a6c36
+#define clk_csi2_clk_src 0x4113589f
+#define clk_csi0phytimer_clk_src 0xc8a309be
+#define clk_csi1phytimer_clk_src 0x7c0fe23a
+#define clk_esc0_clk_src 0xb41d7c38
+#define clk_gfx3d_clk_src 0x917f76ef
+#define clk_gp1_clk_src 0xad85b97a
+#define clk_gp2_clk_src 0xfb1f0065
+#define clk_gp3_clk_src 0x63b693d6
+#define clk_jpeg0_clk_src 0x9a0a0ac3
+#define clk_mdp_clk_src 0x6dc1f8f1
+#define clk_mclk0_clk_src 0x266b3853
+#define clk_mclk1_clk_src 0xa73cad0c
+#define clk_mclk2_clk_src 0x42545468
+#define clk_pclk0_clk_src 0xccac1f35
+#define clk_pdm2_clk_src 0x31e494fd
+#define clk_sdcc1_apps_clk_src 0xd4975db2
+#define clk_sdcc1_ice_core_clk_src 0xfd6a4301
+#define clk_sdcc2_apps_clk_src 0xfc46c821
+#define clk_usb_hs_system_clk_src 0x28385546
+#define clk_usb_fs_system_clk_src 0x06ee1762
+#define clk_usb_fs_ic_clk_src 0x25d4acc8
+#define clk_usb_fs_ic_clk_src 0x25d4acc8
+#define clk_gcc_qusb2_phy_clk 0x996884d5
+#define clk_gcc_usb2_hs_phy_only_clk 0x0047179d
+#define clk_vsync_clk_src 0xecb43940
+#define clk_vfe0_clk_src 0xa0c2bd8f
+#define clk_vcodec0_clk_src 0xbc193019
+#define clk_gcc_blsp1_ahb_clk 0x8caa5b4f
+#define clk_gcc_boot_rom_ahb_clk 0xde2adeb1
+#define clk_gcc_crypto_ahb_clk 0x94de4919
+#define clk_gcc_crypto_axi_clk 0xd4415c9b
+#define clk_gcc_crypto_clk 0x00d390d2
+#define clk_gcc_prng_ahb_clk 0x397e7eaa
+#define clk_gcc_qdss_dap_clk 0x7fa9aa73
+#define clk_gcc_apss_tcu_clk 0xaf56a329
+#define clk_gcc_ipa_tbu_clk 0x75bbfb5c
+#define clk_gcc_gfx_tbu_clk 0x18bb9a90
+#define clk_gcc_gtcu_ahb_clk 0xb432168e
+#define clk_gcc_jpeg_tbu_clk 0xcf8fd944
+#define clk_gcc_mdp_tbu_clk 0x82287f76
+#define clk_gcc_smmu_cfg_clk 0x75eaefa5
+#define clk_gcc_venus_tbu_clk 0x7e0b97ce
+#define clk_gcc_vfe_tbu_clk 0x061f2f95
+#define clk_gcc_vfe1_tbu_clk 0x4888e70f
+#define clk_gcc_cpp_tbu_clk 0xab6f19ab
+#define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f
+#define clk_gcc_blsp1_uart1_apps_clk 0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk 0xf8a61c96
+#define clk_gcc_camss_cci_ahb_clk 0xa81c11ba
+#define clk_gcc_camss_cci_clk 0xb7dd8824
+#define clk_gcc_camss_csi0_ahb_clk 0x175d672a
+#define clk_gcc_camss_csi0_clk 0x6b01b3e1
+#define clk_gcc_camss_csi0phy_clk 0x06a41ff7
+#define clk_gcc_camss_csi0pix_clk 0x61a8a930
+#define clk_gcc_camss_csi0rdi_clk 0x7053c7ae
+#define clk_gcc_camss_csi1_ahb_clk 0x2c2dc261
+#define clk_gcc_camss_csi1_clk 0x1aba4a8c
+#define clk_gcc_camss_csi1phy_clk 0x0fd1d1fa
+#define clk_gcc_camss_csi1pix_clk 0x87fc98d8
+#define clk_gcc_camss_csi1rdi_clk 0x6ac996fe
+#define clk_gcc_camss_csi2_ahb_clk 0xf3f25940
+#define clk_gcc_camss_csi2_clk 0xb6857fa2
+#define clk_gcc_camss_csi2phy_clk 0xbeeffbcd
+#define clk_gcc_camss_csi2pix_clk 0xa619561a
+#define clk_gcc_camss_csi2rdi_clk 0x019fd3f1
+#define clk_vfe1_clk_src 0x4e357366
+#define clk_gcc_camss_vfe1_clk 0xcaf20d99
+#define clk_gcc_camss_vfe1_ahb_clk 0x634a738a
+#define clk_gcc_camss_vfe1_axi_clk 0xaf7463b3
+#define clk_gcc_vfe1_qdss_at_clk 0xfff1e0be
+#define clk_cpp_clk_src 0x8382f56d
+#define clk_gcc_camss_cpp_clk 0x7118a0de
+#define clk_gcc_camss_cpp_ahb_clk 0x4ac95e14
+#define clk_gcc_camss_cpp_axi_clk 0xbbf73861
+#define clk_gcc_cpp_qdss_at_clk 0x05805d0d
+#define clk_gcc_cpp_qdss_tsctr_div8_clk 0xebd2c356
+#define clk_gcc_camss_csi_vfe0_clk 0xcc73453c
+#define clk_gcc_camss_csi_vfe1_clk 0xb1ef6e8b
+#define clk_gcc_camss_gp0_clk 0xd2bc3892
+#define clk_gcc_camss_gp1_clk 0xe4c013e1
+#define clk_gcc_camss_ispif_ahb_clk 0x3c0a858f
+#define clk_gcc_camss_jpeg0_clk 0x1ed3f032
+#define clk_gcc_camss_jpeg_ahb_clk 0x3bfa7603
+#define clk_gcc_camss_jpeg_axi_clk 0x3e278896
+#define clk_gcc_camss_mclk0_clk 0x80902deb
+#define clk_gcc_camss_mclk1_clk 0x5002d85f
+#define clk_gcc_camss_mclk2_clk 0x222f8fff
+#define clk_gcc_camss_micro_ahb_clk 0xfbbee8cf
+#define clk_gcc_camss_csi0phytimer_clk 0xf8897589
+#define clk_gcc_camss_csi1phytimer_clk 0x4d26438f
+#define clk_gcc_camss_ahb_clk 0x9894b414
+#define clk_gcc_camss_top_ahb_clk 0x4e814a78
+#define clk_gcc_camss_vfe0_clk 0xaaa3cd97
+#define clk_gcc_camss_vfe_ahb_clk 0x4050f47a
+#define clk_gcc_camss_vfe_axi_clk 0x77fe2384
+#define clk_gcc_sys_mm_noc_axi_clk 0xb75a7187
+#define clk_gcc_oxili_gmem_clk 0x5620913a
+#define clk_gcc_gp1_clk 0x057f7b69
+#define clk_gcc_gp2_clk 0x9bf83ffd
+#define clk_gcc_gp3_clk 0xec6539ee
+#define clk_gcc_mdss_ahb_clk 0xbfb92ed3
+#define clk_gcc_mdss_axi_clk 0x668f51de
+#define clk_gcc_mdss_byte0_clk 0x35da7862
+#define clk_gcc_mdss_esc0_clk 0xaec5cb25
+#define clk_gcc_mdss_mdp_clk 0x22f3521f
+#define clk_gcc_mdss_pclk0_clk 0xcc5c5c77
+#define clk_gcc_mdss_vsync_clk 0x32a09f1f
+#define clk_gcc_mss_cfg_ahb_clk 0x111cde81
+#define clk_gcc_mss_q6_bimc_axi_clk 0x67544d62
+#define clk_gcc_oxili_ahb_clk 0xd15c8a00
+#define clk_gcc_oxili_gfx3d_clk 0x49a51fd9
+#define clk_gcc_oxili_timer_clk 0x1180db06
+#define clk_gcc_oxili_aon_clk 0xae18e54d
+#define clk_gcc_pdm2_clk 0x99d55711
+#define clk_gcc_pdm_ahb_clk 0x365664f6
+#define clk_gcc_sdcc1_ahb_clk 0x691e0caa
+#define clk_gcc_sdcc1_apps_clk 0x9ad6fb96
+#define clk_gcc_sdcc1_ice_core_clk 0x0fd5680a
+#define clk_gcc_sdcc2_ahb_clk 0x23d5727f
+#define clk_gcc_sdcc2_apps_clk 0x861b20ac
+#define clk_gcc_usb2a_phy_sleep_clk 0x6caa736f
+#define clk_gcc_usb_hs_phy_cfg_ahb_clk 0xe13808fd
+#define clk_gcc_usb_hs_ahb_clk 0x72ce8032
+#define clk_gcc_usb_fs_ahb_clk 0x00e31116
+#define clk_gcc_usb_fs_ic_clk 0xbd533d37
+#define clk_gcc_usb_hs_system_clk 0xa11972e5
+#define clk_gcc_usb_fs_system_clk 0xea3b114c
+#define clk_gcc_venus0_ahb_clk 0x08d778c6
+#define clk_gcc_venus0_axi_clk 0xcdf4c8f6
+#define clk_gcc_venus0_vcodec0_clk 0xf76a02bb
+#define clk_gcc_venus0_core0_vcodec0_clk 0x83a7f549
+#define clk_gcc_venus0_core1_vcodec0_clk 0xa0813de6
+#define clk_gcc_gfx_tcu_clk 0x59505e55
+#define clk_gcc_gtcu_ahb_bridge_clk 0x19d2c5fe
+#define clk_gcc_bimc_gpu_clk 0x19922503
+#define clk_gcc_bimc_gfx_clk 0x3edd69ad
+#define clk_ipa_clk 0xfa685cda
+#define clk_ipa_a_clk 0xeeec2919
+#define clk_mdss_mdp_vote_clk 0x588460a4
+#define clk_mdss_rotator_vote_clk 0x5b1f675e
+
+#define clk_pixel_clk_src 0x8b6f83d8
+#define clk_byte_clk_src 0x3a911c53
+#define clk_ext_pclk0_clk_src 0x087c1612
+#define clk_ext_byte0_clk_src 0xfb32f31e
+
+#define clk_dsi_pll0_byte_clk_src 0x44539836
+#define clk_dsi_pll0_pixel_clk_src 0x5767c287
+#define clk_dsi_pll1_byte_clk_src 0x73e88d02
+#define clk_dsi_pll1_pixel_clk_src 0xce233fcf
+#define clk_ext_pclk1_clk_src 0x8067c5a3
+#define clk_ext_byte1_clk_src 0x585ef6d4
+#define clk_byte1_clk_src 0x63c2c955
+#define clk_esc1_clk_src 0x3b0afa42
+#define clk_pclk1_clk_src 0x090f68ac
+#define clk_gcc_mdss_pclk1_clk 0x9a9c430d
+#define clk_gcc_mdss_byte1_clk 0x41f97fd8
+#define clk_gcc_mdss_esc1_clk 0x34653cc7
+#define clk_gcc_dcc_clk 0xd1000c50
+#define clk_gcc_debug_mux_8937 0x917968c2
+
+/* clock_rpm controlled clocks */
+#define clk_pnoc_clk 0xc1296d0f
+#define clk_pnoc_a_clk 0x9bcffee4
+#define clk_pnoc_msmbus_clk 0x2b53b688
+#define clk_pnoc_msmbus_a_clk 0x9753a54f
+#define clk_pnoc_keepalive_a_clk 0x9464f720
+#define clk_pnoc_sps_clk 0x23d3f584
+#define clk_pnoc_usb_a_clk 0x11d6a74e
+#define clk_pnoc_usb_clk 0x266d8376
+#define clk_snoc_clk 0x2c341aa0
+#define clk_snoc_a_clk 0x8fcef2af
+#define clk_snoc_usb_a_clk 0x34b7821b
+#define clk_snoc_wcnss_a_clk 0xd3949ebc
+#define clk_snoc_usb_clk 0x29f9d73d
+#define clk_snoc_msmbus_clk 0xe6900bb6
+#define clk_snoc_msmbus_a_clk 0x5d4683bd
+#define clk_snoc_mmnoc_axi_clk 0xfedd4bd5
+#define clk_snoc_mmnoc_ahb_clk 0xd2149dbb
+#define clk_sysmmnoc_clk 0xebb1df78
+#define clk_sysmmnoc_a_clk 0x6ca682a2
+#define clk_sysmmnoc_msmbus_clk 0xd61e5721
+#define clk_sysmmnoc_msmbus_a_clk 0x50600f1b
+#define clk_bimc_clk 0x4b80bf00
+#define clk_bimc_a_clk 0x4b25668a
+#define clk_bimc_acpu_a_clk 0x4446311b
+#define clk_bimc_msmbus_clk 0xd212feea
+#define clk_bimc_msmbus_a_clk 0x71d1a499
+#define clk_bimc_usb_a_clk 0xea410834
+#define clk_bimc_wcnss_a_clk 0x5a6df715
+#define clk_bimc_usb_clk 0x9bd2b2bf
+#define clk_bimc_gpu_clk 0xd3e0a327
+#define clk_bimc_gpu_a_clk 0x67f0e9a5
+#define clk_qdss_clk 0x1492202a
+#define clk_qdss_a_clk 0xdd121669
+#define clk_xo_clk_src 0x23f5649f
+#define clk_xo_a_clk_src 0x2fdd2c7c
+#define clk_xo_otg_clk 0x79bca5cc
+#define clk_xo_a2 0xeba5a83d
+#define clk_xo_dwc3_clk 0xfad488ce
+#define clk_xo_ehci_host_clk 0xc7c340b1
+#define clk_xo_lpm_clk 0x2be48257
+#define clk_xo_pil_mss_clk 0xe97a8354
+#define clk_xo_pil_pronto_clk 0x89dae6d0
+#define clk_xo_wlan_clk 0x0116b76f
+#define clk_xo_pil_lpass_clk 0xb72aa4c9
+#define clk_bb_clk1 0xf5304268
+#define clk_bb_clk1_a 0xfa113810
+#define clk_bb_clk1_pin 0x6dd0a779
+#define clk_bb_clk1_a_pin 0x9b637772
+#define clk_bb_clk2 0xfe15cb87
+#define clk_bb_clk2_a 0x59682706
+#define clk_bb_clk2_pin 0x498938e5
+#define clk_bb_clk2_a_pin 0x52513787
+#define clk_rf_clk1 0xaabeea5a
+#define clk_rf_clk1_a 0x72a10cb8
+#define clk_rf_clk1_pin 0x8f463562
+#define clk_rf_clk1_a_pin 0x62549ff6
+#define clk_rf_clk2 0x24a30992
+#define clk_rf_clk2_a 0x944d8bbd
+#define clk_rf_clk2_pin 0xa7c5602a
+#define clk_rf_clk2_a_pin 0x2d75eb4d
+#define clk_div_clk1 0xaa1157a6
+#define clk_div_clk1_a 0x6b943d68
+#define clk_div_clk2 0xd454019f
+#define clk_div_clk2_a 0x4bd7bfa8
+#define clk_ln_bb_clk 0x3ab0b36d
+#define clk_ln_bb_a_clk 0xc7257ea8
+
+
+/* clock_debug controlled clocks */
+#define clk_gcc_debug_mux 0x8121ac15
+#define clk_rpm_debug_mux 0x25cd1f3a
+#define clk_wcnss_m_clk 0x709f430b
+#define clk_apss_debug_pri_mux 0xc691ff55
+#define clk_apss_debug_sec_mux 0xc0b680f9
+#define clk_apss_debug_ter_mux 0x32041c48
+#define clk_apc0_m_clk 0xce1e9473
+#define clk_apc1_m_clk 0x990fbaf7
+#define clk_cci_m_clk 0xec7e8afc
+
+#define clk_a53ssmux_lc 0x71a9377b
+#define clk_a53ssmux_bc 0xb5983c42
+#define clk_a53ssmux_cci 0x15560bd5
+
+#define clk_a53_lc_clk 0xc69f0878
+#define clk_a53_bc_clk 0xcf28e63a
+#define clk_cci_clk 0x96854074
+
+#define clk_audio_ap_clk 0x312ac429
+#define clk_audio_pmi_clk 0xb7ba2274
+#define clk_audio_lpass_mclk 0x575ec22b
+
+#endif
diff --git a/include/dt-bindings/clock/msm-clocks-8996.h b/include/dt-bindings/clock/msm-clocks-8996.h
new file mode 100644
index 0000000..1f515f2
--- /dev/null
+++ b/include/dt-bindings/clock/msm-clocks-8996.h
@@ -0,0 +1,548 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8996_H
+#define __MSM_CLOCKS_8996_H
+
+/* clock_gcc controlled clocks */
+#define clk_cxo_clk_src 0x79e95308
+#define clk_pnoc_clk 0x4325d220
+#define clk_pnoc_a_clk 0x2808c12b
+#define clk_bimc_clk 0x4b80bf00
+#define clk_bimc_a_clk 0x4b25668a
+#define clk_cnoc_clk 0xd5ccb7f4
+#define clk_cnoc_a_clk 0xd8fe2ccc
+#define clk_snoc_clk 0x2c341aa0
+#define clk_snoc_a_clk 0x8fcef2af
+#define clk_bb_clk1 0xf5304268
+#define clk_bb_clk1_ao 0xfa113810
+#define clk_bb_clk1_pin 0x6dd0a779
+#define clk_bb_clk1_pin_ao 0x9b637772
+#define clk_bb_clk2 0xfe15cb87
+#define clk_bb_clk2_ao 0x59682706
+#define clk_bb_clk2_pin 0x498938e5
+#define clk_bb_clk2_pin_ao 0x52513787
+#define clk_bimc_msmbus_clk 0xd212feea
+#define clk_bimc_msmbus_a_clk 0x71d1a499
+#define clk_ce1_a_clk 0x44a833fe
+#define clk_cnoc_msmbus_clk 0x62228b5d
+#define clk_cnoc_msmbus_a_clk 0x67442955
+#define clk_cxo_clk_src_ao 0x64eb6004
+#define clk_cxo_dwc3_clk 0xf79c19f6
+#define clk_cxo_lpm_clk 0x94adbf3d
+#define clk_cxo_otg_clk 0x4eec0bb9
+#define clk_cxo_pil_lpass_clk 0xe17f0ff6
+#define clk_cxo_pil_ssc_clk 0x81832015
+#define clk_div_clk1 0xaa1157a6
+#define clk_div_clk1_ao 0x6b943d68
+#define clk_div_clk2 0xd454019f
+#define clk_div_clk2_ao 0x53f9e788
+#define clk_div_clk3 0xa9a55a68
+#define clk_div_clk3_ao 0x3d6725a8
+#define clk_ipa_a_clk 0xeeec2919
+#define clk_ipa_clk 0xfa685cda
+#define clk_ln_bb_clk 0x3ab0b36d
+#define clk_ln_bb_a_clk 0xc7257ea8
+#define clk_ln_bb_clk_pin 0x1b1c476a
+#define clk_ln_bb_a_clk_pin 0x9cbb5411
+#define clk_mcd_ce1_clk 0xbb615d26
+#define clk_pnoc_keepalive_a_clk 0xf8f91f0b
+#define clk_pnoc_msmbus_clk 0x38b95c77
+#define clk_pnoc_msmbus_a_clk 0x8c9b4e93
+#define clk_pnoc_pm_clk 0xd6f7dfb9
+#define clk_pnoc_sps_clk 0xd482ecc7
+#define clk_qdss_a_clk 0xdd121669
+#define clk_qdss_clk 0x1492202a
+#define clk_rf_clk1 0xaabeea5a
+#define clk_rf_clk1_ao 0x72a10cb8
+#define clk_rf_clk1_pin 0x8f463562
+#define clk_rf_clk1_pin_ao 0x62549ff6
+#define clk_rf_clk2 0x24a30992
+#define clk_rf_clk2_ao 0x944d8bbd
+#define clk_rf_clk2_pin 0xa7c5602a
+#define clk_rf_clk2_pin_ao 0x2d75eb4d
+#define clk_snoc_msmbus_clk 0xe6900bb6
+#define clk_snoc_msmbus_a_clk 0x5d4683bd
+#define clk_mcd_ce1_clk 0xbb615d26
+#define clk_qcedev_ce1_clk 0x293f97b0
+#define clk_qcrypto_ce1_clk 0xa6ac14df
+#define clk_qseecom_ce1_clk 0xaa858373
+#define clk_scm_ce1_clk 0xd8ebcc62
+#define clk_ce1_clk 0x42229c55
+#define clk_gcc_ce1_ahb_m_clk 0x2eb28c01
+#define clk_gcc_ce1_axi_m_clk 0xc174dfba
+#define clk_measure_only_bimc_hmss_axi_clk 0xc1cc4f11
+#define clk_aggre1_noc_clk 0x049abba8
+#define clk_aggre1_noc_a_clk 0xc12e4220
+#define clk_aggre2_noc_clk 0xaa681404
+#define clk_aggre2_noc_a_clk 0xcab67089
+#define clk_mmssnoc_axi_rpm_clk 0x4d7f8cdc
+#define clk_mmssnoc_axi_rpm_a_clk 0xfbea899b
+#define clk_mmssnoc_axi_clk 0xdb4b31e6
+#define clk_mmssnoc_axi_a_clk 0xd4970614
+#define clk_mmssnoc_gds_clk 0x06a22afa
+
+#define clk_gpll0 0x1ebe3bc4
+#define clk_gpll0_ao 0xa1368304
+#define clk_gpll0_out_main 0xe9374de7
+#define clk_gpll4 0xb3b5d85b
+#define clk_gpll4_out_main 0xa9a0ab9d
+#define clk_ufs_axi_clk_src 0x297ca380
+#define clk_pcie_aux_clk_src 0xebc50566
+#define clk_usb30_master_clk_src 0xc6262f89
+#define clk_usb20_master_clk_src 0x5680ac83
+#define clk_ufs_ice_core_clk_src 0xda8e7119
+#define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src 0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src 0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src 0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src 0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src 0xf8146114
+#define clk_blsp1_uart2_apps_clk_src 0xfc9c2f73
+#define clk_blsp1_uart3_apps_clk_src 0x600497f2
+#define clk_blsp1_uart4_apps_clk_src 0x56bff15c
+#define clk_blsp1_uart5_apps_clk_src 0x218ef697
+#define clk_blsp1_uart6_apps_clk_src 0x8fbdbe4c
+#define clk_blsp2_qup1_i2c_apps_clk_src 0xd6d1e95d
+#define clk_blsp2_qup1_spi_apps_clk_src 0xcc1b8365
+#define clk_blsp2_qup2_i2c_apps_clk_src 0x603b5c51
+#define clk_blsp2_qup2_spi_apps_clk_src 0xd577dc44
+#define clk_blsp2_qup3_i2c_apps_clk_src 0xea82959c
+#define clk_blsp2_qup3_spi_apps_clk_src 0xd04b1e92
+#define clk_blsp2_qup4_i2c_apps_clk_src 0x73dc968c
+#define clk_blsp2_qup4_spi_apps_clk_src 0x25d4a2b1
+#define clk_blsp2_qup5_i2c_apps_clk_src 0xcc3698bd
+#define clk_blsp2_qup5_spi_apps_clk_src 0xfa0cf45e
+#define clk_blsp2_qup6_i2c_apps_clk_src 0x2fa53151
+#define clk_blsp2_qup6_spi_apps_clk_src 0x5ca86755
+#define clk_blsp2_uart1_apps_clk_src 0x562c66dc
+#define clk_blsp2_uart2_apps_clk_src 0xdd448080
+#define clk_blsp2_uart3_apps_clk_src 0x46b2e90f
+#define clk_blsp2_uart4_apps_clk_src 0x23a093d2
+#define clk_blsp2_uart5_apps_clk_src 0xe067616a
+#define clk_blsp2_uart6_apps_clk_src 0xe02d2829
+#define clk_gp1_clk_src 0xad85b97a
+#define clk_gp2_clk_src 0xfb1f0065
+#define clk_gp3_clk_src 0x63b693d6
+#define clk_hmss_rbcpr_clk_src 0xedd9a474
+#define clk_pdm2_clk_src 0x31e494fd
+#define clk_sdcc1_apps_clk_src 0xd4975db2
+#define clk_sdcc2_apps_clk_src 0xfc46c821
+#define clk_sdcc3_apps_clk_src 0xea34c7f4
+#define clk_sdcc4_apps_clk_src 0x7aaaaa0c
+#define clk_tsif_ref_clk_src 0x4e9042d1
+#define clk_usb20_mock_utmi_clk_src 0xc3aaeecb
+#define clk_usb30_mock_utmi_clk_src 0xa024a976
+#define clk_usb3_phy_aux_clk_src 0x15eec63c
+#define clk_gcc_qusb2phy_prim_reset 0x07550fa1
+#define clk_gcc_qusb2phy_sec_reset 0x3f3a87d0
+#define clk_gcc_periph_noc_usb20_ahb_clk 0xfb9f26e9
+#define clk_gcc_mmss_gcc_dbg_clk 0xe89d461c
+#define clk_cpu_dbg_clk 0x6550dfa9
+#define clk_gcc_blsp1_ahb_clk 0x8caa5b4f
+#define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk 0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk 0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk 0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk 0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk 0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk 0xf8a61c96
+#define clk_gcc_blsp1_uart3_apps_clk 0xc3298bd7
+#define clk_gcc_blsp1_uart4_apps_clk 0x26be16c0
+#define clk_gcc_blsp1_uart5_apps_clk 0x28a6bc74
+#define clk_gcc_blsp1_uart6_apps_clk 0x28fd3466
+#define clk_gcc_blsp2_ahb_clk 0x8f283c1d
+#define clk_gcc_blsp2_qup1_i2c_apps_clk 0x9ace11dd
+#define clk_gcc_blsp2_qup1_spi_apps_clk 0xa32604cc
+#define clk_gcc_blsp2_qup2_i2c_apps_clk 0x1bf9a57e
+#define clk_gcc_blsp2_qup2_spi_apps_clk 0xbf54ca6d
+#define clk_gcc_blsp2_qup3_i2c_apps_clk 0x336d4170
+#define clk_gcc_blsp2_qup3_spi_apps_clk 0xc68509d6
+#define clk_gcc_blsp2_qup4_i2c_apps_clk 0xbd22539d
+#define clk_gcc_blsp2_qup4_spi_apps_clk 0x01a72b93
+#define clk_gcc_blsp2_qup5_i2c_apps_clk 0xe2b2ce1d
+#define clk_gcc_blsp2_qup5_spi_apps_clk 0xf40999cd
+#define clk_gcc_blsp2_qup6_i2c_apps_clk 0x894bcea4
+#define clk_gcc_blsp2_qup6_spi_apps_clk 0xfe1bd34a
+#define clk_gcc_blsp2_uart1_apps_clk 0x8c3512ff
+#define clk_gcc_blsp2_uart2_apps_clk 0x1e1965a3
+#define clk_gcc_blsp2_uart3_apps_clk 0x382415ab
+#define clk_gcc_blsp2_uart4_apps_clk 0x87a44b42
+#define clk_gcc_blsp2_uart5_apps_clk 0x5cd30649
+#define clk_gcc_blsp2_uart6_apps_clk 0x8feee5ab
+#define clk_gcc_boot_rom_ahb_clk 0xde2adeb1
+#define clk_gcc_gp1_clk 0x057f7b69
+#define clk_gcc_gp2_clk 0x9bf83ffd
+#define clk_gcc_gp3_clk 0xec6539ee
+#define clk_gcc_hmss_rbcpr_clk 0x699183be
+#define clk_gcc_mmss_noc_cfg_ahb_clk 0xb41a9d99
+#define clk_gcc_pcie_0_aux_clk 0x3d2e3ece
+#define clk_gcc_pcie_0_cfg_ahb_clk 0x4dd325c3
+#define clk_gcc_pcie_0_mstr_axi_clk 0x3f85285b
+#define clk_gcc_pcie_0_slv_axi_clk 0xd69638a1
+#define clk_gcc_pcie_0_pipe_clk 0x4f37621e
+#define clk_gcc_pcie_0_phy_reset 0xdc3201c1
+#define clk_gcc_pcie_1_aux_clk 0xc9bb962c
+#define clk_gcc_pcie_1_cfg_ahb_clk 0xb6338658
+#define clk_gcc_pcie_1_mstr_axi_clk 0xc20f6269
+#define clk_gcc_pcie_1_slv_axi_clk 0xd54e40d6
+#define clk_gcc_pcie_1_pipe_clk 0xc1627422
+#define clk_gcc_pcie_1_phy_reset 0x674481bb
+#define clk_gcc_pcie_2_aux_clk 0xa4dc7ae8
+#define clk_gcc_pcie_2_cfg_ahb_clk 0x4f1d3121
+#define clk_gcc_pcie_2_mstr_axi_clk 0x9e81724a
+#define clk_gcc_pcie_2_slv_axi_clk 0x7990d8b2
+#define clk_gcc_pcie_2_pipe_clk 0xa757a834
+#define clk_gcc_pcie_2_phy_reset 0x82634880
+#define clk_gcc_pcie_phy_reset 0x9bc3c959
+#define clk_gcc_pcie_phy_com_reset 0x8bf513e6
+#define clk_gcc_pcie_phy_nocsr_com_phy_reset 0x0c16a2da
+#define clk_gcc_pcie_phy_aux_clk 0x4746e74f
+#define clk_gcc_pcie_phy_cfg_ahb_clk 0x8533671a
+#define clk_gcc_pdm2_clk 0x99d55711
+#define clk_gcc_pdm_ahb_clk 0x365664f6
+#define clk_gcc_prng_ahb_clk 0x397e7eaa
+#define clk_gcc_sdcc1_ahb_clk 0x691e0caa
+#define clk_gcc_sdcc1_apps_clk 0x9ad6fb96
+#define clk_gcc_sdcc2_ahb_clk 0x23d5727f
+#define clk_gcc_sdcc2_apps_clk 0x861b20ac
+#define clk_gcc_sdcc3_ahb_clk 0x565b2c03
+#define clk_gcc_sdcc3_apps_clk 0x0b27aeac
+#define clk_gcc_sdcc4_ahb_clk 0x64f3e6a8
+#define clk_gcc_sdcc4_apps_clk 0xbf7c4dc8
+#define clk_gcc_tsif_ahb_clk 0x88d2822c
+#define clk_gcc_tsif_ref_clk 0x8f1ed2c2
+#define clk_gcc_ufs_ahb_clk 0x1914bb84
+#define clk_gcc_ufs_axi_clk 0x47c743a7
+#define clk_gcc_ufs_ice_core_clk 0x310b0710
+#define clk_gcc_ufs_rx_cfg_clk 0xa6747786
+#define clk_gcc_ufs_rx_symbol_0_clk 0x7f43251c
+#define clk_gcc_ufs_rx_symbol_1_clk 0x03182fde
+#define clk_gcc_ufs_tx_cfg_clk 0xba2cf8b5
+#define clk_gcc_ufs_tx_symbol_0_clk 0x6a9f747a
+#define clk_gcc_ufs_unipro_core_clk 0x2daf7fd2
+#define clk_gcc_ufs_sys_clk_core_clk 0x360e5ac8
+#define clk_gcc_ufs_tx_symbol_clk_core_clk 0xf6fb0df7
+#define clk_gcc_usb20_master_clk 0x24c3b66a
+#define clk_gcc_usb20_mock_utmi_clk 0xe8db8203
+#define clk_gcc_usb20_sleep_clk 0x6e8cb4b2
+#define clk_gcc_usb30_master_clk 0xb3b4e2cb
+#define clk_gcc_usb30_mock_utmi_clk 0xa800b65a
+#define clk_gcc_usb30_sleep_clk 0xd0b65c92
+#define clk_gcc_usb3_phy_aux_clk 0x0d9a36e0
+#define clk_gcc_usb3_phy_pipe_clk 0xf279aff2
+#define clk_gcc_usb_phy_cfg_ahb2phy_clk 0xd1231a0e
+#define clk_gcc_aggre0_cnoc_ahb_clk 0x53a35559
+#define clk_gcc_aggre0_snoc_axi_clk 0x3c446400
+#define clk_gcc_aggre0_noc_qosgen_extref_clk 0x8c4356ba
+#define clk_hlos1_vote_lpass_core_smmu_clk 0x3aaa1743
+#define clk_hlos1_vote_lpass_adsp_smmu_clk 0xc76f702f
+#define clk_gcc_usb3_phy_reset 0x03d559f1
+#define clk_gcc_usb3phy_phy_reset 0xb1a4f885
+#define clk_gcc_usb3_clkref_clk 0xb6cc8f01
+#define clk_gcc_hdmi_clkref_clk 0x4d4eec04
+#define clk_gcc_edp_clkref_clk 0xa8685c3f
+#define clk_gcc_ufs_clkref_clk 0x92aa126f
+#define clk_gcc_pcie_clkref_clk 0xa2e247fa
+#define clk_gcc_rx2_usb2_clkref_clk 0x27ec24ba
+#define clk_gcc_rx1_usb2_clkref_clk 0x53351d25
+#define clk_gcc_smmu_aggre0_ahb_clk 0x47a06ce4
+#define clk_gcc_smmu_aggre0_axi_clk 0x3cac4a6c
+#define clk_gcc_sys_noc_usb3_axi_clk 0x94d26800
+#define clk_gcc_sys_noc_ufs_axi_clk 0x19d38312
+#define clk_gcc_aggre2_usb3_axi_clk 0xd5822a8e
+#define clk_gcc_aggre2_ufs_axi_clk 0xb31e5191
+#define clk_gcc_mmss_gpll0_div_clk 0xdd06848d
+#define clk_gcc_mmss_bimc_gfx_clk 0xe4f28754
+#define clk_gcc_bimc_gfx_clk 0x3edd69ad
+#define clk_gcc_qspi_ahb_clk 0x96969dc8
+#define clk_gcc_qspi_ser_clk 0xfaf1e266
+#define clk_qspi_ser_clk_src 0x426676ee
+#define clk_sdcc1_ice_core_clk_src 0xfd6a4301
+#define clk_gcc_sdcc1_ice_core_clk 0x0fd5680a
+#define clk_gcc_mss_cfg_ahb_clk 0x111cde81
+#define clk_gcc_mss_snoc_axi_clk 0x0e71de85
+#define clk_gcc_mss_q6_bimc_axi_clk 0x67544d62
+#define clk_gcc_mss_mnoc_bimc_axi_clk 0xf665d03f
+#define clk_gpll0_out_msscc 0x7d794829
+#define clk_gcc_debug_mux_v2 0xf7e749f0
+#define clk_gcc_dcc_ahb_clk 0xfa14a88c
+#define clk_gcc_aggre0_noc_mpu_cfg_ahb_clk 0x5c1bb8e2
+
+/* clock_mmss controlled clocks */
+#define clk_mmsscc_xo 0x05e63704
+#define clk_mmsscc_gpll0 0xe900c515
+#define clk_mmsscc_gpll0_div 0x73892e05
+#define clk_mmsscc_mmssnoc_ahb 0x7b4bd6f7
+#define clk_mmpll0 0xdd83b751
+#define clk_mmpll0_out_main 0x2f996a31
+#define clk_mmpll1 0x6da7fb90
+#define clk_mmpll1_out_main 0xa0d3a7da
+#define clk_mmpll4 0x22c063c1
+#define clk_mmpll4_out_main 0xfb21c2fd
+#define clk_mmpll3 0x18c76899
+#define clk_mmpll3_out_main 0x6eb6328f
+#define clk_ahb_clk_src 0x86f49203
+#define clk_mmpll2 0x1190e4d8
+#define clk_mmpll2_out_main 0x1e9e24a8
+#define clk_mmpll8 0xd06ad45e
+#define clk_mmpll8_out_main 0x75b1f386
+#define clk_mmpll9 0x1c50684c
+#define clk_mmpll9_out_main 0x16b74937
+#define clk_mmpll5 0xa41e1936
+#define clk_mmpll5_out_main 0xcc1897bf
+#define clk_csi0_clk_src 0x227e65bc
+#define clk_vfe0_clk_src 0xa0c2bd8f
+#define clk_vfe1_clk_src 0x4e357366
+#define clk_csi1_clk_src 0x6a2a6c36
+#define clk_csi2_clk_src 0x4113589f
+#define clk_csi3_clk_src 0xfd934012
+#define clk_maxi_clk_src 0x52c09777
+#define clk_cpp_clk_src 0x8382f56d
+#define clk_jpeg0_clk_src 0x9a0a0ac3
+#define clk_jpeg2_clk_src 0x5ad927f3
+#define clk_jpeg_dma_clk_src 0xb68afcea
+#define clk_mdp_clk_src 0x6dc1f8f1
+#define clk_video_core_clk_src 0x8be4c944
+#define clk_fd_core_clk_src 0xe4799ab7
+#define clk_cci_clk_src 0x822f3d97
+#define clk_csiphy0_3p_clk_src 0xd2474b12
+#define clk_csiphy1_3p_clk_src 0x46a02aff
+#define clk_csiphy2_3p_clk_src 0x1447813f
+#define clk_camss_gp0_clk_src 0x6b57cfe6
+#define clk_camss_gp1_clk_src 0xf735368a
+#define clk_jpeg_dma_clk_src 0xb68afcea
+#define clk_mclk0_clk_src 0x266b3853
+#define clk_mclk1_clk_src 0xa73cad0c
+#define clk_mclk2_clk_src 0x42545468
+#define clk_mclk3_clk_src 0x2bfbb714
+#define clk_csi0phytimer_clk_src 0xc8a309be
+#define clk_csi1phytimer_clk_src 0x7c0fe23a
+#define clk_csi2phytimer_clk_src 0x62ffea9c
+#define clk_rbbmtimer_clk_src 0x17649ecc
+#define clk_esc0_clk_src 0xb41d7c38
+#define clk_esc1_clk_src 0x3b0afa42
+#define clk_hdmi_clk_src 0xb40aeea9
+#define clk_vsync_clk_src 0xecb43940
+#define clk_rbcpr_clk_src 0x2c2e9af2
+#define clk_video_subcore0_clk_src 0x88d79636
+#define clk_video_subcore1_clk_src 0x4966930c
+#define clk_mmss_bto_ahb_clk 0xfdf8c361
+#define clk_camss_ahb_clk 0xc4ff91d4
+#define clk_camss_cci_ahb_clk 0x04c4441a
+#define clk_camss_cci_clk 0xd6cb5eb9
+#define clk_camss_cpp_ahb_clk 0x12e9a87b
+#define clk_camss_cpp_clk 0xb82f366b
+#define clk_camss_cpp_axi_clk 0x5598c804
+#define clk_camss_cpp_vbif_ahb_clk 0xb5f31be4
+#define clk_camss_csi0_ahb_clk 0x6e29c972
+#define clk_camss_csi0_clk 0x30862ddb
+#define clk_camss_csi0phy_clk 0x2cecfb84
+#define clk_camss_csi0pix_clk 0x6946f77b
+#define clk_camss_csi0rdi_clk 0x83645ef5
+#define clk_camss_csi1_ahb_clk 0xccc15f06
+#define clk_camss_csi1_clk 0xb150f052
+#define clk_camss_csi1phy_clk 0xb989f06d
+#define clk_camss_csi1pix_clk 0x58d19bf3
+#define clk_camss_csi1rdi_clk 0x4d2f3352
+#define clk_camss_csi2_ahb_clk 0x92d02d75
+#define clk_camss_csi2_clk 0x74fc92e8
+#define clk_camss_csi2phy_clk 0xda05d9d8
+#define clk_camss_csi2pix_clk 0xf8ed0731
+#define clk_camss_csi2rdi_clk 0xdc1b2081
+#define clk_camss_csi3_ahb_clk 0xee5e459c
+#define clk_camss_csi3_clk 0x39488fdd
+#define clk_camss_csi3phy_clk 0x8b6063b9
+#define clk_camss_csi3pix_clk 0xd82bd467
+#define clk_camss_csi3rdi_clk 0xb6750046
+#define clk_camss_csi_vfe0_clk 0x3023937a
+#define clk_camss_csi_vfe1_clk 0xe66fa522
+#define clk_camss_csiphy0_3p_clk 0xf2a54f5a
+#define clk_camss_csiphy1_3p_clk 0x8bf70cb2
+#define clk_camss_csiphy2_3p_clk 0x1c14c939
+#define clk_camss_gp0_clk 0xcee7e51d
+#define clk_camss_gp1_clk 0x41f1c2e3
+#define clk_camss_ispif_ahb_clk 0x9a212c6d
+#define clk_camss_jpeg0_clk 0x0b0e2db7
+#define clk_camss_jpeg2_clk 0xd7291c8d
+#define clk_camss_jpeg_ahb_clk 0x1f47fd28
+#define clk_camss_jpeg_axi_clk 0x9e5545c8
+#define clk_camss_jpeg_dma_clk 0x2336e65d
+#define clk_camss_mclk0_clk 0xcf0c61e0
+#define clk_camss_mclk1_clk 0xd1410ed4
+#define clk_camss_mclk2_clk 0x851286f2
+#define clk_camss_mclk3_clk 0x4db11c45
+#define clk_camss_micro_ahb_clk 0x33a23277
+#define clk_camss_csi0phytimer_clk 0xff93b3c8
+#define clk_camss_csi1phytimer_clk 0x6c399ab6
+#define clk_camss_csi2phytimer_clk 0x24f47f49
+#define clk_camss_top_ahb_clk 0x8f8b2d33
+#define clk_camss_vfe_ahb_clk 0x595197bc
+#define clk_camss_vfe_axi_clk 0x273d4c31
+#define clk_camss_vfe0_ahb_clk 0x4652833c
+#define clk_camss_vfe0_clk 0x1e9bb8c4
+#define clk_camss_vfe0_stream_clk 0x22835fa4
+#define clk_camss_vfe1_ahb_clk 0x6a56abd3
+#define clk_camss_vfe1_clk 0x5bffa69b
+#define clk_camss_vfe1_stream_clk 0x92f849b9
+#define clk_fd_ahb_clk 0x868a2c5c
+#define clk_fd_core_clk 0x3badcae4
+#define clk_fd_core_uar_clk 0x7e624e15
+#define clk_gpu_ahb_clk 0xf97f1d43
+#define clk_gpu_aon_isense_clk 0xa9e9b297
+#define clk_gpu_gx_gfx3d_clk 0xb7ece823
+#define clk_gpu_mx_clk 0xb80ccedf
+#define clk_gpu_gx_rbbmtimer_clk 0xdeba634e
+#define clk_mdss_ahb_clk 0x684ccb41
+#define clk_mdss_axi_clk 0xcc07d687
+#define clk_mdss_esc0_clk 0x28cafbe6
+#define clk_mdss_esc1_clk 0xc22c6883
+#define clk_mdss_hdmi_ahb_clk 0x01cef516
+#define clk_mdss_hdmi_clk 0x097a6de9
+#define clk_mdss_mdp_clk 0x618336ac
+#define clk_mdss_vsync_clk 0x42a022d3
+#define clk_mmss_misc_ahb_clk 0xea30b0e7
+#define clk_mmss_misc_cxo_clk 0xe620cd80
+#define clk_mmagic_bimc_noc_cfg_ahb_clk 0x12d5ba72
+#define clk_mmagic_camss_axi_clk 0xa8b1c16b
+#define clk_mmagic_camss_noc_cfg_ahb_clk 0x5182c819
+#define clk_mmss_mmagic_cfg_ahb_clk 0x5e94a822
+#define clk_mmagic_mdss_axi_clk 0xa0359d10
+#define clk_mmagic_mdss_noc_cfg_ahb_clk 0x9c6d5482
+#define clk_mmagic_video_axi_clk 0x7b9219c3
+#define clk_mmagic_video_noc_cfg_ahb_clk 0x5124d256
+#define clk_mmss_mmagic_ahb_clk 0x3d15f2b0
+#define clk_mmss_mmagic_maxi_clk 0xbdaf5af7
+#define clk_mmss_rbcpr_ahb_clk 0x623ba55f
+#define clk_mmss_rbcpr_clk 0x69a23a6f
+#define clk_mmss_spdm_cpp_clk 0xefe35cd2
+#define clk_mmss_spdm_jpeg_dma_clk 0xcb7bd5a0
+#define clk_smmu_cpp_ahb_clk 0x3ad82d84
+#define clk_smmu_cpp_axi_clk 0xa6bb2f4a
+#define clk_smmu_jpeg_ahb_clk 0x10c436ec
+#define clk_smmu_jpeg_axi_clk 0x41112f37
+#define clk_smmu_mdp_ahb_clk 0x04994cb2
+#define clk_smmu_mdp_axi_clk 0x7fd71687
+#define clk_smmu_rot_ahb_clk 0xa30772c9
+#define clk_smmu_rot_axi_clk 0xfed7c078
+#define clk_smmu_vfe_ahb_clk 0x4dabebe7
+#define clk_smmu_vfe_axi_clk 0xde483725
+#define clk_smmu_video_ahb_clk 0x2d738e2c
+#define clk_smmu_video_axi_clk 0xe2b5b887
+#define clk_video_ahb_clk 0x90775cfb
+#define clk_video_axi_clk 0xe6c16dba
+#define clk_video_core_clk 0x7e876ec3
+#define clk_video_maxi_clk 0x97749db6
+#define clk_video_subcore0_clk 0xb6f63e6c
+#define clk_video_subcore1_clk 0x26c29cb4
+#define clk_vmem_ahb_clk 0xab6223ff
+#define clk_vmem_maxi_clk 0x15ef32db
+#define clk_mmss_debug_mux 0xe646ffda
+#define clk_mmss_gcc_dbg_clk 0xafa4d48a
+#define clk_gfx3d_clk_src 0x917f76ef
+#define clk_extpclk_clk_src 0xb2c31abd
+#define clk_mdss_byte0_clk 0xf5a03f64
+#define clk_mdss_byte1_clk 0xb8c7067d
+#define clk_mdss_extpclk_clk 0xfa5aadb0
+#define clk_mdss_pclk0_clk 0x3487234a
+#define clk_mdss_pclk1_clk 0xd5804246
+#define clk_gpu_gcc_dbg_clk 0x0ccc42cd
+#define clk_mdss_mdp_vote_clk 0x588460a4
+#define clk_mdss_rotator_vote_clk 0x5b1f675e
+#define clk_mmpll2_postdiv_clk 0x4fdeaaba
+#define clk_mmpll8_postdiv_clk 0xedf57882
+#define clk_mmpll9_postdiv_clk 0x3064b618
+#define clk_gfx3d_clk_src_v2 0x4210acb7
+#define clk_byte0_clk_src 0x75cc885b
+#define clk_byte1_clk_src 0x63c2c955
+#define clk_pclk0_clk_src 0xccac1f35
+#define clk_pclk1_clk_src 0x090f68ac
+#define clk_ext_byte0_clk_src 0xfb32f31e
+#define clk_ext_byte1_clk_src 0x585ef6d4
+#define clk_ext_pclk0_clk_src 0x087c1612
+#define clk_ext_pclk1_clk_src 0x8067c5a3
+
+/* clock_debug controlled clocks */
+#define clk_gcc_debug_mux 0x8121ac15
+
+/* external multimedia clocks */
+#define clk_dsi0pll_pixel_clk_mux 0x792379e1
+#define clk_dsi0pll_byte_clk_mux 0x60e83f06
+#define clk_dsi0pll_byte_clk_src 0xbbaa30be
+#define clk_dsi0pll_pixel_clk_src 0x45b3260f
+#define clk_dsi0pll_n2_div_clk 0x1474c213
+#define clk_dsi0pll_post_n1_div_clk 0xdab8c389
+#define clk_dsi0pll_vco_clk 0x15940d40
+#define clk_dsi1pll_pixel_clk_mux 0x36458019
+#define clk_dsi1pll_byte_clk_mux 0xb5a42b7b
+#define clk_dsi1pll_byte_clk_src 0x63930a8f
+#define clk_dsi1pll_pixel_clk_src 0x0e4c9b56
+#define clk_dsi1pll_n2_div_clk 0x2c9d4007
+#define clk_dsi1pll_post_n1_div_clk 0x03020041
+#define clk_dsi1pll_vco_clk 0x99797b50
+#define clk_mdss_dsi1_vco_clk_src 0xfcd15658
+#define clk_hdmi_vco_clk 0x66003284
+
+#define clk_dsi0pll_shadow_byte_clk_src 0x177c029c
+#define clk_dsi0pll_shadow_pixel_clk_src 0x98ae3c92
+#define clk_dsi0pll_shadow_n2_div_clk 0xd5f0dad9
+#define clk_dsi0pll_shadow_post_n1_div_clk 0x1f7c8cf8
+#define clk_dsi0pll_shadow_vco_clk 0xb100ca83
+#define clk_dsi1pll_shadow_byte_clk_src 0xfc021ce5
+#define clk_dsi1pll_shadow_pixel_clk_src 0xdcca3ffc
+#define clk_dsi1pll_shadow_n2_div_clk 0x189541bf
+#define clk_dsi1pll_shadow_post_n1_div_clk 0x1637020e
+#define clk_dsi1pll_shadow_vco_clk 0x68d8b6f7
+
+/* CPU clocks */
+#define clk_pwrcl_clk 0xc554130e
+#define clk_pwrcl_pll 0x25454ca1
+#define clk_pwrcl_alt_pll 0xc445471b
+#define clk_pwrcl_pll_main 0x28948e22
+#define clk_pwrcl_alt_pll_main 0x25c8270e
+#define clk_pwrcl_hf_mux 0x77706ae6
+#define clk_pwrcl_lf_mux 0xd99e334d
+#define clk_perfcl_clk 0x58869997
+#define clk_perfcl_pll 0x97dcec1c
+#define clk_perfcl_alt_pll 0xfe2eaea1
+#define clk_perfcl_pll_main 0x0dbf0c0b
+#define clk_perfcl_alt_pll_main 0x0b892aab
+#define clk_perfcl_hf_mux 0x9e8bbe59
+#define clk_perfcl_lf_mux 0x2f9c278d
+#define clk_cbf_pll 0xfe2e96a3
+#define clk_cbf_pll_main 0x2b05cf95
+#define clk_cbf_hf_mux 0x71244f73
+#define clk_cbf_clk 0x48e9e16b
+#define clk_xo_ao 0x428c856d
+#define clk_sys_apcsaux_clk 0x0b0dd513
+#define clk_cpu_debug_mux 0xc7acaa31
+
+/* Audio External Clocks */
+#define clk_audio_ap_clk 0x312ac429
+#define clk_audio_pmi_clk 0xb7ba2274
+#define clk_audio_ap_clk2 0xf0fbaf5b
+#define clk_audio_lpass_mclk2 0x0122abee
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
index 7e1394c..36d34b1 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -62,47 +62,44 @@
#define GCC_GP2_CLK_SRC 44
#define GCC_GP3_CLK 45
#define GCC_GP3_CLK_SRC 46
-#define GCC_MSS_CFG_AHB_CLK 47
-#define GCC_MSS_GPLL0_DIV_CLK_SRC 48
-#define GCC_MSS_SNOC_AXI_CLK 49
-#define GCC_PCIE_0_CLKREF_CLK 50
-#define GCC_PCIE_AUX_CLK 51
-#define GCC_PCIE_AUX_PHY_CLK_SRC 52
-#define GCC_PCIE_CFG_AHB_CLK 53
-#define GCC_PCIE_MSTR_AXI_CLK 54
-#define GCC_PCIE_PHY_REFGEN_CLK 55
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC 56
-#define GCC_PCIE_PIPE_CLK 57
-#define GCC_PCIE_SLEEP_CLK 58
-#define GCC_PCIE_SLV_AXI_CLK 59
-#define GCC_PCIE_SLV_Q2A_AXI_CLK 60
-#define GCC_PDM2_CLK 61
-#define GCC_PDM2_CLK_SRC 62
-#define GCC_PDM_AHB_CLK 63
-#define GCC_PDM_XO4_CLK 64
-#define GCC_PRNG_AHB_CLK 65
-#define GCC_SDCC1_AHB_CLK 66
-#define GCC_SDCC1_APPS_CLK 67
-#define GCC_SDCC1_APPS_CLK_SRC 68
-#define GCC_SPMI_FETCHER_AHB_CLK 69
-#define GCC_SPMI_FETCHER_CLK 70
-#define GCC_SPMI_FETCHER_CLK_SRC 71
-#define GCC_SYS_NOC_CPUSS_AHB_CLK 72
-#define GCC_SYS_NOC_USB3_CLK 73
-#define GCC_USB30_MASTER_CLK 74
-#define GCC_USB30_MASTER_CLK_SRC 75
-#define GCC_USB30_MOCK_UTMI_CLK 76
-#define GCC_USB30_MOCK_UTMI_CLK_SRC 77
-#define GCC_USB30_SLEEP_CLK 78
-#define GCC_USB3_PHY_AUX_CLK 79
-#define GCC_USB3_PHY_AUX_CLK_SRC 80
-#define GCC_USB3_PHY_PIPE_CLK 81
-#define GCC_USB3_PRIM_CLKREF_CLK 82
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK 83
-#define GPLL0 84
-#define GPLL0_OUT_EVEN 85
-#define GPLL4 86
-#define GPLL4_OUT_EVEN 87
+#define GCC_PCIE_0_CLKREF_CLK 47
+#define GCC_PCIE_AUX_CLK 48
+#define GCC_PCIE_AUX_PHY_CLK_SRC 49
+#define GCC_PCIE_CFG_AHB_CLK 50
+#define GCC_PCIE_MSTR_AXI_CLK 51
+#define GCC_PCIE_PHY_REFGEN_CLK 52
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 53
+#define GCC_PCIE_PIPE_CLK 54
+#define GCC_PCIE_SLEEP_CLK 55
+#define GCC_PCIE_SLV_AXI_CLK 56
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM2_CLK_SRC 59
+#define GCC_PDM_AHB_CLK 60
+#define GCC_PDM_XO4_CLK 61
+#define GCC_PRNG_AHB_CLK 62
+#define GCC_SDCC1_AHB_CLK 63
+#define GCC_SDCC1_APPS_CLK 64
+#define GCC_SDCC1_APPS_CLK_SRC 65
+#define GCC_SPMI_FETCHER_AHB_CLK 66
+#define GCC_SPMI_FETCHER_CLK 67
+#define GCC_SPMI_FETCHER_CLK_SRC 68
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 69
+#define GCC_SYS_NOC_USB3_CLK 70
+#define GCC_USB30_MASTER_CLK 71
+#define GCC_USB30_MASTER_CLK_SRC 72
+#define GCC_USB30_MOCK_UTMI_CLK 73
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 74
+#define GCC_USB30_SLEEP_CLK 75
+#define GCC_USB3_PHY_AUX_CLK 76
+#define GCC_USB3_PHY_AUX_CLK_SRC 77
+#define GCC_USB3_PHY_PIPE_CLK 78
+#define GCC_USB3_PRIM_CLKREF_CLK 79
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 80
+#define GPLL0 81
+#define GPLL0_OUT_EVEN 82
+#define GPLL4 83
+#define GPLL4_OUT_EVEN 84
/* CPU clocks */
#define CLOCK_A7SS 0
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 0d442e3..47f5ba6 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -199,7 +199,7 @@
extern void clockevents_resume(void);
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-# ifdef CONFIG_ARCH_HAS_TICK_BROADCAST
+# if defined(CONFIG_ARCH_HAS_TICK_BROADCAST) && defined(CONFIG_SMP)
extern void tick_broadcast(const struct cpumask *mask);
# else
# define tick_broadcast NULL
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 32c3d42..fabfc0b 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -272,11 +272,16 @@
struct device *dev, struct device_node *node);
extern struct coresight_cti_data *of_get_coresight_cti_data(
struct device *dev, struct device_node *node);
+extern int of_get_coresight_csr_name(struct device_node *node,
+ const char **csr_name);
+
#else
static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node) { return NULL; }
static inline struct coresight_cti_data *of_get_coresight_cti_data(
struct device *dev, struct device_node *node) { return NULL; }
+static inline int of_get_coresight_csr_name(struct device_node *node,
+ const char **csr_name){ return -EINVAL; }
#endif
#ifdef CONFIG_PID_NS
diff --git a/include/linux/fb.h b/include/linux/fb.h
index a964d07..2b9ece8 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -303,10 +303,18 @@
int (*fb_ioctl)(struct fb_info *info, unsigned int cmd,
unsigned long arg);
+ /* perform fb specific ioctl v2 (optional) - provides file param */
+ int (*fb_ioctl_v2)(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file);
+
/* Handle 32bit compat ioctl (optional) */
- int (*fb_compat_ioctl)(struct fb_info *info, unsigned cmd,
+ int (*fb_compat_ioctl)(struct fb_info *info, unsigned int cmd,
unsigned long arg);
+ /* Handle 32bit compat ioctl (optional) */
+ int (*fb_compat_ioctl_v2)(struct fb_info *info, unsigned int cmd,
+ unsigned long arg, struct file *file);
+
/* perform fb specific mmap */
int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma);
@@ -475,6 +483,7 @@
struct fb_cmap cmap; /* Current cmap */
struct list_head modelist; /* mode list */
struct fb_videomode *mode; /* current mode */
+ struct file *file; /* current file node */
#ifdef CONFIG_FB_BACKLIGHT
/* assigned backlight device */
diff --git a/include/linux/input/synaptics_dsx_v2_6.h b/include/linux/input/synaptics_dsx_v2_6.h
new file mode 100644
index 0000000..52241e5
--- /dev/null
+++ b/include/linux/input/synaptics_dsx_v2_6.h
@@ -0,0 +1,114 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_H_
+#define _SYNAPTICS_DSX_H_
+
+#define PLATFORM_DRIVER_NAME "synaptics_dsxv26"
+#define STYLUS_DRIVER_NAME "synaptics_dsxv26_stylus"
+#define ACTIVE_PEN_DRIVER_NAME "synaptics_dsxv26_active_pen"
+#define PROXIMITY_DRIVER_NAME "synaptics_dsxv26_proximity"
+#define GESTURE_DRIVER_NAME "synaptics_dsxv26_gesture"
+#define I2C_DRIVER_NAME "synaptics_dsxv26"
+#define SPI_DRIVER_NAME "synaptics_dsxv26"
+
+/*
+ * struct synaptics_dsx_button_map - button map
+ * @nbuttons: number of buttons
+ * @map: pointer to array of button codes
+ */
+struct synaptics_dsx_button_map {
+ unsigned char nbuttons;
+ unsigned int *map;
+};
+
+/*
+ * struct synaptics_dsx_board_data - DSX board data
+ * @x_flip: x flip flag
+ * @y_flip: y flip flag
+ * @swap_axes: swap axes flag
+ * @resume_in_workqueue: defer resume function to workqueue
+ * @irq_gpio: attention interrupt GPIO
+ * @irq_on_state: attention interrupt active state
+ * @power_gpio: power switch GPIO
+ * @power_on_state: power switch active state
+ * @reset_gpio: reset GPIO
+ * @reset_on_state: reset active state
+ * @max_y_for_2d: maximum y value for 2D area when virtual buttons are present
+ * @irq_flags: IRQ flags
+ * @i2c_addr: I2C slave address
+ * @ub_i2c_addr: microbootloader mode I2C slave address
+ * @device_descriptor_addr: HID device descriptor address
+ * @panel_x: x-axis resolution of display panel
+ * @panel_y: y-axis resolution of display panel
+ * @power_delay_ms: delay time to wait after powering up device
+ * @reset_delay_ms: delay time to wait after resetting device
+ * @reset_active_ms: reset active time
+ * @byte_delay_us: delay time between two bytes of SPI data
+ * @block_delay_us: delay time between two SPI transfers
+ * @pwr_reg_name: pointer to name of regulator for power control
+ * @bus_reg_name: pointer to name of regulator for bus pullup control
+ * @cap_button_map: pointer to 0D button map
+ * @vir_button_map: pointer to virtual button map
+ */
+struct synaptics_dsx_board_data {
+ bool x_flip;
+ bool y_flip;
+ bool swap_axes;
+ bool resume_in_workqueue;
+ int irq_gpio;
+ int irq_on_state;
+ int power_gpio;
+ int power_on_state;
+ int reset_gpio;
+ int reset_on_state;
+ int max_y_for_2d;
+ unsigned long irq_flags;
+ unsigned short i2c_addr;
+ unsigned short ub_i2c_addr;
+ unsigned short device_descriptor_addr;
+ unsigned int panel_x;
+ unsigned int panel_y;
+ unsigned int power_delay_ms;
+ unsigned int reset_delay_ms;
+ unsigned int reset_active_ms;
+ unsigned int byte_delay_us;
+ unsigned int block_delay_us;
+ const char *pwr_reg_name;
+ const char *bus_reg_name;
+ struct synaptics_dsx_button_map *cap_button_map;
+ struct synaptics_dsx_button_map *vir_button_map;
+};
+
+#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index bbc65ef..3562047 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,7 @@
#include <linux/hrtimer.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
@@ -498,6 +499,13 @@
return this_cpu_read(ksoftirqd);
}
+static inline bool ksoftirqd_running_on(int cpu)
+{
+ struct task_struct *tsk = per_cpu(ksoftirqd, cpu);
+
+ return tsk && (tsk->state == TASK_RUNNING);
+}
+
/* Tasklets --- multithreaded analogue of BHs.
Main feature differing them of generic softirqs: tasklet
diff --git a/include/linux/ipa_usb.h b/include/linux/ipa_usb.h
index de11633..cceae83 100644
--- a/include/linux/ipa_usb.h
+++ b/include/linux/ipa_usb.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -121,14 +121,16 @@
* @dir: channel direction
* @xfer_ring_len: length of transfer ring in bytes (must be integral
* multiple of transfer element size - 16B for xDCI)
- * @xfer_ring_base_addr: physical base address of transfer ring. Address must be
- * aligned to xfer_ring_len rounded to power of two
* @xfer_scratch: parameters for xDCI channel scratch
- * @xfer_ring_base_addr_iova: IO virtual address mapped to xfer_ring_base_addr
+ * @xfer_ring_base_addr_iova: IO virtual address mapped to pysical base address
* @data_buff_base_len: length of data buffer allocated by USB driver
- * @data_buff_base_addr: physical base address for the data buffer (where TRBs
- * points)
- * @data_buff_base_addr_iova: IO virtual address mapped to data_buff_base_addr
+ * @data_buff_base_addr_iova: IO virtual address mapped to pysical base address
+ * @sgt_xfer_rings: Scatter table for Xfer rings,contains valid non NULL
+ * value
+ * when USB S1-SMMU enabed, else NULL.
+ * @sgt_data_buff: Scatter table for data buffs,contains valid non NULL
+ * value
+ * when USB S1-SMMU enabed, else NULL.
*
*/
struct ipa_usb_xdci_chan_params {
@@ -143,12 +145,12 @@
/* transfer ring params */
enum gsi_chan_dir dir;
u16 xfer_ring_len;
- u64 xfer_ring_base_addr;
struct ipa_usb_xdci_chan_scratch xfer_scratch;
u64 xfer_ring_base_addr_iova;
u32 data_buff_base_len;
- u64 data_buff_base_addr;
u64 data_buff_base_addr_iova;
+ struct sg_table *sgt_xfer_rings;
+ struct sg_table *sgt_data_buff;
};
/**
diff --git a/include/linux/leds-qpnp-flash.h b/include/linux/leds-qpnp-flash.h
index 1fe6e17..e3b9cf1 100644
--- a/include/linux/leds-qpnp-flash.h
+++ b/include/linux/leds-qpnp-flash.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,7 +21,14 @@
#define FLASH_LED_PREPARE_OPTIONS_MASK GENMASK(3, 0)
-int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+#if (defined CONFIG_LEDS_QPNP_FLASH || defined CONFIG_LEDS_QPNP_FLASH_V2)
+extern int (*qpnp_flash_led_prepare)(struct led_trigger *trig, int options,
int *max_current);
-
+#else
+static inline int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+ int *max_current)
+{
+ return -ENODEV;
+}
+#endif
#endif
diff --git a/include/linux/mdss_io_util.h b/include/linux/mdss_io_util.h
new file mode 100644
index 0000000..dd0b17c
--- /dev/null
+++ b/include/linux/mdss_io_util.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_IO_UTIL_H__
+#define __MDSS_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...) pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
+
+struct mdss_io_data {
+ u32 len;
+ void __iomem *base;
+};
+
+void mdss_reg_w(struct mdss_io_data *io, u32 offset, u32 value, u32 debug);
+u32 mdss_reg_r(struct mdss_io_data *io, u32 offset, u32 debug);
+void mdss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define DSS_REG_W_ND(io, offset, val) mdss_reg_w(io, offset, val, false)
+#define DSS_REG_W(io, offset, val) mdss_reg_w(io, offset, val, true)
+#define DSS_REG_R_ND(io, offset) mdss_reg_r(io, offset, false)
+#define DSS_REG_R(io, offset) mdss_reg_r(io, offset, true)
+
+enum mdss_vreg_type {
+ DSS_REG_LDO,
+ DSS_REG_VS,
+};
+
+enum mdss_vreg_mode {
+ DSS_REG_MODE_ENABLE,
+ DSS_REG_MODE_DISABLE,
+ DSS_REG_MODE_LP,
+ DSS_REG_MODE_ULP,
+ DSS_REG_MODE_MAX,
+};
+
+struct mdss_vreg {
+ struct regulator *vreg; /* vreg handle */
+ char vreg_name[32];
+ int min_voltage;
+ int max_voltage;
+ u32 load[DSS_REG_MODE_MAX];
+ int pre_on_sleep;
+ int post_on_sleep;
+ int pre_off_sleep;
+ int post_off_sleep;
+};
+
+struct mdss_gpio {
+ unsigned int gpio;
+ unsigned int value;
+ char gpio_name[32];
+};
+
+enum mdss_clk_type {
+ DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+ DSS_CLK_PCLK,
+ DSS_CLK_OTHER,
+};
+
+struct mdss_clk {
+ struct clk *clk; /* clk handle */
+ char clk_name[32];
+ enum mdss_clk_type type;
+ unsigned long rate;
+};
+
+struct mdss_module_power {
+ unsigned int num_vreg;
+ struct mdss_vreg *vreg_config;
+ unsigned int num_gpio;
+ struct mdss_gpio *gpio_config;
+ unsigned int num_clk;
+ struct mdss_clk *clk_config;
+};
+
+int msm_mdss_ioremap_byname(struct platform_device *pdev,
+ struct mdss_io_data *io_data, const char *name);
+void msm_mdss_iounmap(struct mdss_io_data *io_data);
+
+int msm_mdss_enable_gpio(struct mdss_gpio *in_gpio, int num_gpio, int enable);
+int msm_mdss_gpio_enable(struct mdss_gpio *in_gpio, int num_gpio, int enable);
+
+int msm_mdss_config_vreg(struct device *dev, struct mdss_vreg *in_vreg,
+ int num_vreg, int config);
+int msm_mdss_enable_vreg(struct mdss_vreg *in_vreg, int num_vreg, int enable);
+int msm_mdss_config_vreg_opt_mode(struct mdss_vreg *in_vreg, int num_vreg,
+ enum mdss_vreg_mode mode);
+
+int msm_mdss_get_clk(struct device *dev, struct mdss_clk *clk_arry,
+ int num_clk);
+void msm_mdss_put_clk(struct mdss_clk *clk_arry, int num_clk);
+int msm_mdss_clk_set_rate(struct mdss_clk *clk_arry, int num_clk);
+int msm_mdss_enable_clk(struct mdss_clk *clk_arry, int num_clk, int enable);
+
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf);
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value);
+
+#endif /* __MDSS_IO_UTIL_H__ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 8b35bdb..fd77f83 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -490,9 +490,21 @@
extern int do_swap_account;
#endif
-void lock_page_memcg(struct page *page);
+struct mem_cgroup *lock_page_memcg(struct page *page);
+void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page);
+static inline void __mem_cgroup_update_page_stat(struct page *page,
+ struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx,
+ int val)
+{
+ VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
+
+ if (memcg && memcg->stat)
+ this_cpu_add(memcg->stat->count[idx], val);
+}
+
/**
* mem_cgroup_update_page_stat - update page state statistics
* @page: the page
@@ -508,13 +520,12 @@
* mem_cgroup_update_page_stat(page, state, -1);
* unlock_page(page) or unlock_page_memcg(page)
*/
+
static inline void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
- VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
- if (page->mem_cgroup)
- this_cpu_add(page->mem_cgroup->stat->count[idx], val);
+ __mem_cgroup_update_page_stat(page, page->mem_cgroup, idx, val);
}
static inline void mem_cgroup_inc_page_stat(struct page *page,
@@ -709,7 +720,12 @@
{
}
-static inline void lock_page_memcg(struct page *page)
+static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
{
}
@@ -745,6 +761,13 @@
{
}
+static inline void __mem_cgroup_update_page_stat(struct page *page,
+ struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx,
+ int nr)
+{
+}
+
static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 16155d0..932e99c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1901,6 +1901,7 @@
/* page_alloc.c */
extern int min_free_kbytes;
extern int watermark_scale_factor;
+extern int extra_free_kbytes;
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
diff --git a/include/linux/msm_hdmi.h b/include/linux/msm_hdmi.h
new file mode 100644
index 0000000..afaa08a20
--- /dev/null
+++ b/include/linux/msm_hdmi.h
@@ -0,0 +1,97 @@
+/* include/linux/msm_hdmi.h
+ *
+<<<<<<< HEAD
+ * Copyright (c) 2014-2015, 2018, The Linux Foundation. All rights reserved.
+=======
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+>>>>>>> dfa46f9... fbdev: msm: fix compilation error
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_HDMI_H_
+#define _MSM_HDMI_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+/*
+ * HDMI cable notify handler structure.
+ * link A link for the linked list
+ * status Current status of HDMI cable connection
+ * hpd_notify Callback function to provide cable status
+ */
+struct hdmi_cable_notify {
+ struct list_head link;
+ int status;
+ void (*hpd_notify)(struct hdmi_cable_notify *h);
+};
+
+struct msm_hdmi_audio_edid_blk {
+ u8 *audio_data_blk;
+ unsigned int audio_data_blk_size; /* in bytes */
+ u8 *spk_alloc_data_blk;
+ unsigned int spk_alloc_data_blk_size; /* in bytes */
+};
+
+struct msm_hdmi_audio_setup_params {
+ u32 sample_rate_hz;
+ u32 num_of_channels;
+ u32 channel_allocation;
+ u32 level_shift;
+ bool down_mix;
+ u32 sample_present;
+};
+
+struct msm_hdmi_audio_codec_ops {
+ int (*audio_info_setup)(struct platform_device *pdev,
+ struct msm_hdmi_audio_setup_params *params);
+ int (*get_audio_edid_blk)(struct platform_device *pdev,
+ struct msm_hdmi_audio_edid_blk *blk);
+ int (*hdmi_cable_status)(struct platform_device *pdev, u32 vote);
+};
+
+#ifdef CONFIG_FB_MSM_MDSS_HDMI_PANEL
+/*
+ * Register for HDMI cable connect or disconnect notification.
+ * @param handler callback handler for notification
+ * @return negative value as error otherwise current status of cable
+ */
+int register_hdmi_cable_notification(
+ struct hdmi_cable_notify *handler);
+
+/*
+ * Un-register for HDMI cable connect or disconnect notification.
+ * @param handler callback handler for notification
+ * @return negative value as error
+ */
+int unregister_hdmi_cable_notification(
+ struct hdmi_cable_notify *handler);
+
+int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+ struct msm_hdmi_audio_codec_ops *ops);
+
+#else
+static inline int register_hdmi_cable_notification(
+ struct hdmi_cable_notify *handler) {
+ return 0;
+}
+
+static inline int unregister_hdmi_cable_notification(
+ struct hdmi_cable_notify *handler) {
+ return 0;
+}
+
+static inline int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+ struct msm_hdmi_audio_codec_ops *ops) {
+ return 0;
+}
+#endif /* CONFIG_FB_MSM_MDSS_HDMI_PANEL */
+
+#endif /*_MSM_HDMI_H_*/
diff --git a/include/linux/platform_data/qcom_wcnss_device.h b/include/linux/platform_data/qcom_wcnss_device.h
new file mode 100644
index 0000000..f9156ef
--- /dev/null
+++ b/include/linux/platform_data/qcom_wcnss_device.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2011, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_WCNSS_DEVICE__H
+#define __QCOM_WCNSS_DEVICE__H
+
+struct qcom_wcnss_opts {
+ bool has_48mhz_xo;
+};
+
+#endif /* __QCOM_WCNSS_DEVICE__H */
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 3e060d9..9a079a6 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -338,6 +338,7 @@
* %CHAN_PATH_SCALING5: ratio of {1, 8}
* %CHAN_PATH_SCALING6: ratio of {10, 81} The actual ratio is (1/8.1).
* %CHAN_PATH_SCALING7: ratio of {1, 10}
+ * %CHAN_PATH_SCALING8: ratio of {1, 16}
* %CHAN_PATH_NONE: Do not use this pre-scaling ratio type.
*
* The pre-scaling is applied for signals to be within the voltage range
@@ -352,6 +353,7 @@
PATH_SCALING5,
PATH_SCALING6,
PATH_SCALING7,
+ PATH_SCALING8,
PATH_SCALING_NONE,
};
@@ -380,6 +382,12 @@
* %SCALE_QRD_SKUT1_BATT_THERM: Conversion to temperature(decidegC) based on
* btm parameters for SKUT1
* %SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * %SCALE_BATT_THERM_TEMP: Conversion to temperature(decidegC) based on btm
+ * parameters.
+ * %SCALE_CHRG_TEMP: Conversion for charger temp.
+ * %SCALE_DIE_TEMP: Conversion for die temp.
+ * %SCALE_I_DEFAULT: Default scaling to convert raw adc code to current (uA).
+ * %SCALE_USBIN_I: Conversion for USB input current.
* %SCALE_NONE: Do not use this scaling type.
*/
enum qpnp_adc_scale_fn_type {
@@ -397,6 +405,11 @@
SCALE_NCP_03WF683_THERM,
SCALE_QRD_SKUT1_BATT_THERM,
SCALE_PMI_CHG_TEMP = 16,
+ SCALE_BATT_THERM_TEMP,
+ SCALE_CHRG_TEMP,
+ SCALE_DIE_TEMP,
+ SCALE_I_DEFAULT,
+ SCALE_USBIN_I,
SCALE_NONE,
};
@@ -1125,7 +1138,8 @@
{1, 20},
{1, 8},
{10, 81},
- {1, 10}
+ {1, 10},
+ {1, 16}
};
/**
@@ -1347,6 +1361,23 @@
const struct qpnp_vadc_chan_properties *chan_prop,
struct qpnp_vadc_result *chan_rslt);
/**
+ * qpnp_iadc_scale_default() - Scales the pre-calibrated digital output
+ * of current ADC to the ADC reference and compensates for the
+ * gain and offset.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: Physical result to be stored.
+ */
+int32_t qpnp_iadc_scale_default(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
* qpnp_adc_scale_pmic_therm() - Scales the pre-calibrated digital output
* of an ADC to the ADC reference and compensates for the
* gain and offset. Performs the AMUX out as 2mV/K and returns
@@ -1384,6 +1415,23 @@
const struct qpnp_vadc_chan_properties *chan_prop,
struct qpnp_vadc_result *chan_rslt);
/**
+ * qpnp_adc_batt_therm() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature in decidegC.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_batt_therm(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
* qpnp_adc_scale_batt_therm() - Scales the pre-calibrated digital output
* of an ADC to the ADC reference and compensates for the
* gain and offset. Returns the temperature in decidegC.
@@ -1401,6 +1449,61 @@
const struct qpnp_vadc_chan_properties *chan_prop,
struct qpnp_vadc_result *chan_rslt);
/**
+ * qpnp_adc_scale_chrg_temp() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. The voltage measured by HKADC is related to
+ * the junction temperature according to
+ * Tj = 377.5 degC - (V_adc / 0.004)
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_chrg_temp(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_die_temp() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. The voltage measured by HKADC is related to
+ * the junction temperature according to
+ * Tj = -273.15 degC + (V_adc / 0.002)
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_die_temp(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_usbin_curr() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_usbin_curr(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
* qpnp_adc_scale_qrd_batt_therm() - Scales the pre-calibrated digital output
* of an ADC to the ADC reference and compensates for the
* gain and offset. Returns the temperature in decidegC.
@@ -1906,6 +2009,12 @@
const struct qpnp_vadc_chan_properties *chan_prop,
struct qpnp_vadc_result *chan_rslt)
{ return -ENXIO; }
+static inline int32_t qpnp_iadc_scale_default(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
static inline int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
int32_t adc_code,
const struct qpnp_adc_properties *adc_prop,
@@ -1918,12 +2027,36 @@
const struct qpnp_vadc_chan_properties *chan_prop,
struct qpnp_vadc_result *chan_rslt)
{ return -ENXIO; }
+static inline int32_t qpnp_adc_batt_therm(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
static inline int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *vadc,
int32_t adc_code,
const struct qpnp_adc_properties *adc_prop,
const struct qpnp_vadc_chan_properties *chan_prop,
struct qpnp_vadc_result *chan_rslt)
{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_chrg_temp(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_die_temp(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_usbin_curr(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
static inline int32_t qpnp_adc_scale_qrd_batt_therm(
struct qpnp_vadc_chip *vadc, int32_t adc_code,
const struct qpnp_adc_properties *adc_prop,
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 972dabc..3fda92f 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -84,6 +84,8 @@
* @db_reg_phs_addr_lsb: IPA channel doorbell register's physical address LSB
* @mapped_db_reg_phs_addr_lsb: doorbell LSB IOVA address mapped with IOMMU
* @db_reg_phs_addr_msb: IPA channel doorbell register's physical address MSB
+ * @sgt_trb_xfer_ring: USB TRB ring related sgtable entries
+ * @sgt_data_buff: Data buffer related sgtable entries
*/
struct usb_gsi_request {
void *buf_base_addr;
@@ -93,6 +95,8 @@
u32 db_reg_phs_addr_lsb;
dma_addr_t mapped_db_reg_phs_addr_lsb;
u32 db_reg_phs_addr_msb;
+ struct sg_table sgt_trb_xfer_ring;
+ struct sg_table sgt_data_buff;
};
/*
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
new file mode 100644
index 0000000..c3e980e
--- /dev/null
+++ b/include/linux/usb/msm_hsusb.h
@@ -0,0 +1,357 @@
+/* include/linux/usb/msm_hsusb.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ * Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_HSUSB_H
+#define __ASM_ARCH_MSM_HSUSB_H
+
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/clk.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+#include <linux/power_supply.h>
+#include <linux/cdev.h>
+#include <linux/usb_bam.h>
+#include <linux/extcon.h>
+#include <linux/regulator/driver.h>
+/**
+ * Requested USB votes for NOC frequency
+ *
+ * USB_NOC_NOM_VOTE Vote for NOM set of NOC frequencies
+ * USB_NOC_SVS_VOTE Vote for SVS set of NOC frequencies
+ *
+ */
+enum usb_noc_mode {
+ USB_NOC_NOM_VOTE = 0,
+ USB_NOC_SVS_VOTE,
+ USB_NOC_NUM_VOTE,
+};
+
+/**
+ * Different states involved in USB charger detection.
+ *
+ * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
+ * process is not yet started.
+ * USB_CHG_STATE_IN_PROGRESS Charger detection in progress
+ * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
+ * between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
+ * between DCP and CDP).
+ * USB_CHG_STATE_DETECTED USB charger type is determined.
+ *
+ */
+enum usb_chg_state {
+ USB_CHG_STATE_UNDEFINED = 0,
+ USB_CHG_STATE_IN_PROGRESS,
+ USB_CHG_STATE_WAIT_FOR_DCD,
+ USB_CHG_STATE_DCD_DONE,
+ USB_CHG_STATE_PRIMARY_DONE,
+ USB_CHG_STATE_SECONDARY_DONE,
+ USB_CHG_STATE_DETECTED,
+};
+
+/**
+ * USB charger types
+ *
+ * USB_INVALID_CHARGER Invalid USB charger.
+ * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
+ * on USB2.0 compliant host/hub.
+ * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
+ * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
+ * IDEV_CHG_MAX can be drawn irrespective of USB state.
+ * USB_NONCOMPLIANT_CHARGER A non-compliant charger pull DP and DM to specific
+ * voltages between 2.0-3.3v for identification.
+ *
+ */
+enum usb_chg_type {
+ USB_INVALID_CHARGER = 0,
+ USB_SDP_CHARGER,
+ USB_DCP_CHARGER,
+ USB_CDP_CHARGER,
+ USB_NONCOMPLIANT_CHARGER,
+ USB_FLOATED_CHARGER,
+};
+
+/**
+ * Maintain state for hvdcp external charger status
+ * DEFAULT This is used when DCP is detected
+ * ACTIVE This is used when ioctl is called to block LPM
+ * INACTIVE This is used when ioctl is called to unblock LPM
+ */
+
+enum usb_ext_chg_status {
+ DEFAULT = 1,
+ ACTIVE,
+ INACTIVE,
+};
+
+/**
+ * USB ID state
+ */
+enum usb_id_state {
+ USB_ID_GROUND = 0,
+ USB_ID_FLOAT,
+};
+
+#define USB_NUM_BUS_CLOCKS 3
+
+/**
+ * struct msm_otg: OTG driver data. Shared by HCD and DCD.
+ * @otg: USB OTG Transceiver structure.
+ * @pdata: otg device platform data.
+ * @irq: IRQ number assigned for HSUSB controller.
+ * @async_irq: IRQ number used by some controllers during low power state
+ * @phy_irq: IRQ number assigned for PHY to notify events like id and line
+ state changes.
+ * @pclk: clock struct of iface_clk.
+ * @core_clk: clock struct of core_bus_clk.
+ * @sleep_clk: clock struct of sleep_clk for USB PHY.
+ * @phy_reset_clk: clock struct of phy_reset_clk for USB PHY. This clock is
+ a reset only clock and resets the PHY, ULPI bridge and
+ CSR wrapper.
+ * @phy_por_clk: clock struct of phy_por_clk for USB PHY. This clock is
+ a reset only clock and resets only the PHY (POR).
+ * @phy_csr_clk: clock struct of phy_csr_clk for USB PHY. This clock is
+ required to access PHY CSR registers via AHB2PHY interface.
+ * @bus_clks: bimc/snoc/pcnoc clock struct.
+ * @core_reset: Reset control for core_clk
+ * @phy_reset: Reset control for phy_reset_clk
+ * @phy_por_reset: Reset control for phy_por_clk
+ * @default_noc_mode: default frequency for NOC clocks - SVS or NOM
+ * @core_clk_rate: core clk max frequency
+ * @regs: ioremapped register base address.
+ * @usb_phy_ctrl_reg: relevant PHY_CTRL_REG register base address.
+ * @inputs: OTG state machine inputs(Id, SessValid etc).
+ * @sm_work: OTG state machine work.
+ * @sm_work_pending: OTG state machine work is pending, queued post pm_resume
+ * @resume_pending: USB h/w lpm_exit pending. Done on next sm_work run
+ * @pm_suspended: OTG device is system(PM) suspended.
+ * @pm_notify: Notifier to receive system wide PM transition events.
+ It is used to defer wakeup events processing until
+ system is RESUMED.
+ * @in_lpm: indicates low power mode (LPM) state.
+ * @async_int: IRQ line on which ASYNC interrupt arrived in LPM.
+ * @cur_power: The amount of mA available from downstream port.
+ * @otg_wq: Strict order otg workqueue for OTG works (SM/ID/SUSPEND).
+ * @chg_work: Charger detection work.
+ * @chg_state: The state of charger detection process.
+ * @chg_type: The type of charger attached.
+ * @bus_perf_client: Bus performance client handle to request BUS bandwidth
+ * @host_bus_suspend: indicates host bus suspend or not.
+ * @device_bus_suspend: indicates device bus suspend or not.
+ * @bus_clks_enabled: indicates pcnoc/snoc/bimc clocks are on or not.
+ * @chg_check_timer: The timer used to implement the workaround to detect
+ * very slow plug in of wall charger.
+ * @bc1p2_current_max: Max charging current allowed as per bc1.2 chg detection
+ * @typec_current_max: Max charging current allowed as per type-c chg detection
+ * @is_ext_chg_dcp: To indicate whether charger detected by external entity
+ SMB hardware is DCP charger or not.
+ * @ext_id_irq: IRQ for ID interrupt.
+ * @phy_irq_pending: Gets set when PHY IRQ arrives in LPM.
+ * @id_state: Indicates USBID line status.
+ * @rm_pulldown: Indicates pulldown status on D+ and D- data lines.
+ * @extcon_vbus: Used for VBUS notification registration.
+ * @extcon_id: Used for ID notification registration.
+ * @vbus_nb: Notification callback for VBUS event.
+ * @id_nb: Notification callback for ID event.
+ * @dpdm_desc: Regulator descriptor for D+ and D- voting.
+ * @dpdm_rdev: Regulator class device for dpdm regulator.
+ * @dbg_idx: Dynamic debug buffer Index.
+ * @dbg_lock: Dynamic debug buffer Lock.
+ * @buf: Dynamic Debug Buffer.
+ * @max_nominal_system_clk_rate: max freq at which system clock can run in
+ nominal mode.
+ */
+struct msm_otg {
+ struct usb_phy phy;
+ struct msm_otg_platform_data *pdata;
+ struct platform_device *pdev;
+ int irq;
+ int async_irq;
+ int phy_irq;
+ struct clk *xo_clk;
+ struct clk *pclk;
+ struct clk *core_clk;
+ struct clk *sleep_clk;
+ struct clk *phy_reset_clk;
+ struct clk *phy_por_clk;
+ struct clk *phy_csr_clk;
+ struct clk *bus_clks[USB_NUM_BUS_CLOCKS];
+ struct clk *phy_ref_clk;
+ struct reset_control *core_reset;
+ struct reset_control *phy_reset;
+ struct reset_control *phy_por_reset;
+ long core_clk_rate;
+ long core_clk_svs_rate;
+ long core_clk_nominal_rate;
+ enum usb_noc_mode default_noc_mode;
+ struct resource *io_res;
+ void __iomem *regs;
+ void __iomem *phy_csr_regs;
+ void __iomem *usb_phy_ctrl_reg;
+#define ID 0
+#define B_SESS_VLD 1
+#define A_BUS_SUSPEND 14
+#define B_FALSE_SDP 18
+ unsigned long inputs;
+ struct work_struct sm_work;
+ bool sm_work_pending;
+ bool resume_pending;
+ atomic_t pm_suspended;
+ struct notifier_block pm_notify;
+ atomic_t in_lpm;
+ bool err_event_seen;
+ int async_int;
+ unsigned int cur_power;
+ struct workqueue_struct *otg_wq;
+ struct delayed_work chg_work;
+ struct delayed_work id_status_work;
+ enum usb_chg_state chg_state;
+ enum usb_chg_type chg_type;
+ unsigned int dcd_time;
+ unsigned long caps;
+ uint32_t bus_perf_client;
+ bool host_bus_suspend;
+ bool device_bus_suspend;
+ bool bus_clks_enabled;
+ struct timer_list chg_check_timer;
+ /*
+ * Allowing PHY power collpase turns off the HSUSB 3.3v and 1.8v
+ * analog regulators while going to low power mode.
+ * Currently only 28nm PHY has the support to allowing PHY
+ * power collapse since it doesn't have leakage currents while
+ * turning off the power rails.
+ */
+#define ALLOW_PHY_POWER_COLLAPSE BIT(0)
+ /*
+ * Allow PHY RETENTION mode before turning off the digital
+ * voltage regulator(VDDCX).
+ */
+#define ALLOW_PHY_RETENTION BIT(1)
+ /*
+ * Allow putting the core in Low Power mode, when
+ * USB bus is suspended but cable is connected.
+ */
+#define ALLOW_LPM_ON_DEV_SUSPEND BIT(2)
+ /*
+ * Allowing PHY regulators LPM puts the HSUSB 3.3v and 1.8v
+ * analog regulators into LPM while going to USB low power mode.
+ */
+#define ALLOW_PHY_REGULATORS_LPM BIT(3)
+ /*
+ * Allow PHY RETENTION mode before turning off the digital
+ * voltage regulator(VDDCX) during host mode.
+ */
+#define ALLOW_HOST_PHY_RETENTION BIT(4)
+ /*
+ * Allow VDD minimization without putting PHY into retention
+ * for fixing PHY current leakage issue when LDOs ar turned off.
+ */
+#define ALLOW_VDD_MIN_WITH_RETENTION_DISABLED BIT(5)
+
+ /*
+ * PHY can keep D+ pull-up during peripheral bus suspend and
+ * D+/D- pull-down during host bus suspend without any
+ * re-work. This is possible only when PHY DVDD is supplied
+ * by a PMIC LDO (unlike VDDCX/VDDMX).
+ */
+#define ALLOW_BUS_SUSPEND_WITHOUT_REWORK BIT(6)
+ unsigned long lpm_flags;
+#define PHY_PWR_COLLAPSED BIT(0)
+#define PHY_RETENTIONED BIT(1)
+#define XO_SHUTDOWN BIT(2)
+#define CLOCKS_DOWN BIT(3)
+#define PHY_REGULATORS_LPM BIT(4)
+ int reset_counter;
+ unsigned int online;
+ unsigned int host_mode;
+ unsigned int bc1p2_current_max;
+ unsigned int typec_current_max;
+
+ dev_t ext_chg_dev;
+ struct cdev ext_chg_cdev;
+ struct class *ext_chg_class;
+ struct device *ext_chg_device;
+ bool ext_chg_opened;
+ enum usb_ext_chg_status ext_chg_active;
+ struct completion ext_chg_wait;
+ struct pinctrl *phy_pinctrl;
+ bool is_ext_chg_dcp;
+ struct qpnp_vadc_chip *vadc_dev;
+ int ext_id_irq;
+ bool phy_irq_pending;
+ enum usb_id_state id_state;
+ bool rm_pulldown;
+ struct extcon_dev *extcon_vbus;
+ struct extcon_dev *extcon_id;
+ struct notifier_block vbus_nb;
+ struct notifier_block id_nb;
+ struct regulator_desc dpdm_rdesc;
+ struct regulator_dev *dpdm_rdev;
+/* Maximum debug message length */
+#define DEBUG_MSG_LEN 128UL
+/* Maximum number of messages */
+#define DEBUG_MAX_MSG 256UL
+ unsigned int dbg_idx;
+ rwlock_t dbg_lock;
+
+ char (buf[DEBUG_MAX_MSG])[DEBUG_MSG_LEN]; /* buffer */
+ unsigned int vbus_state;
+ unsigned int usb_irq_count;
+ int pm_qos_latency;
+ struct pm_qos_request pm_qos_req_dma;
+ struct delayed_work perf_vote_work;
+};
+
+struct ci13xxx_platform_data {
+ u8 usb_core_id;
+ /*
+ * value of 2^(log2_itc-1) will be used as the interrupt threshold
+ * (ITC), when log2_itc is between 1 to 7.
+ */
+ int log2_itc;
+ bool l1_supported;
+ bool enable_ahb2ahb_bypass;
+ bool enable_streaming;
+ bool enable_axi_prefetch;
+};
+
+#ifdef CONFIG_USB_BAM
+void msm_bam_set_usb_host_dev(struct device *dev);
+bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable);
+int msm_do_bam_disable_enable(enum usb_ctrl ctrl);
+#else
+static inline void msm_bam_set_usb_host_dev(struct device *dev) {}
+static inline bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable)
+{
+ return true;
+}
+int msm_do_bam_disable_enable(enum usb_ctrl ctrl) { return true; }
+#endif
+#ifdef CONFIG_USB_CI13XXX_MSM
+void msm_hw_soft_reset(void);
+#else
+static inline void msm_hw_soft_reset(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index 974c379..daa245d 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -25,6 +25,9 @@
#define ULPI_TX_PKT_EN_CLR_FIX BIT(19)
#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
+#define USB_HS_APF_CTRL (MSM_USB_BASE + 0x0380)
+
+#define APF_CTRL_EN BIT(0)
#define USB_USBCMD (MSM_USB_BASE + 0x0140)
#define USB_PORTSC (MSM_USB_BASE + 0x0184)
@@ -34,15 +37,24 @@
#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278)
#define GENCONFIG_2_SESS_VLD_CTRL_EN BIT(7)
+#define GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN BIT(12)
+#define GENCONFIG_2_DPSE_DMSE_HV_INTR_EN BIT(15)
#define USBCMD_SESS_VLD_CTRL BIT(25)
#define USBCMD_RESET 2
#define USB_USBINTR (MSM_USB_BASE + 0x0148)
+#define AHB2AHB_BYPASS BIT(31)
+#define AHB2AHB_BYPASS_BIT_MASK BIT(31)
+#define AHB2AHB_BYPASS_CLEAR (0 << 31)
+
#define PORTSC_PHCD (1 << 23) /* phy suspend mode */
#define PORTSC_PTS_MASK (3 << 30)
#define PORTSC_PTS_ULPI (2 << 30)
#define PORTSC_PTS_SERIAL (3 << 30)
+#define PORTSC_LS (3 << 10)
+#define PORTSC_LS_DM (1 << 10)
+#define PORTSC_CCS (1 << 0)
#define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170)
#define ULPI_RUN (1 << 30)
@@ -63,10 +75,16 @@
#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */
#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */
#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */
+#define PHY_IDHV_INTEN (1 << 8) /* PHY ID HV interrupt */
+#define PHY_OTGSESSVLDHV_INTEN (1 << 9) /* PHY Session Valid HV int. */
+#define PHY_CLAMP_DPDMSE_EN (1 << 21) /* PHY mpm DP DM clamp enable */
+#define PHY_POR_BIT_MASK BIT(0)
#define PHY_POR_ASSERT (1 << 0) /* USB2 28nm PHY POR ASSERT */
+#define PHY_POR_DEASSERT (0 << 0) /* USB2 28nm PHY POR DEASSERT */
/* OTG definitions */
#define OTGSC_INTSTS_MASK (0x7f << 16)
+#define OTGSC_IDPU (1 << 5)
#define OTGSC_ID (1 << 8)
#define OTGSC_BSV (1 << 11)
#define OTGSC_IDIS (1 << 16)
@@ -74,4 +92,29 @@
#define OTGSC_IDIE (1 << 24)
#define OTGSC_BSVIE (1 << 27)
+/* USB PHY CSR registers and bit definitions */
+
+#define USB_PHY_CSR_PHY_CTRL_COMMON0 (MSM_USB_PHY_CSR_BASE + 0x078)
+#define SIDDQ BIT(2)
+
+#define USB_PHY_CSR_PHY_CTRL1 (MSM_USB_PHY_CSR_BASE + 0x08C)
+#define ID_HV_CLAMP_EN_N BIT(1)
+
+#define USB_PHY_CSR_PHY_CTRL3 (MSM_USB_PHY_CSR_BASE + 0x094)
+#define CLAMP_MPM_DPSE_DMSE_EN_N BIT(2)
+
+#define USB2_PHY_USB_PHY_IRQ_CMD (MSM_USB_PHY_CSR_BASE + 0x0D0)
+#define USB2_PHY_USB_PHY_INTERRUPT_SRC_STATUS (MSM_USB_PHY_CSR_BASE + 0x05C)
+
+#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR0 (MSM_USB_PHY_CSR_BASE + 0x0DC)
+#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR1 (MSM_USB_PHY_CSR_BASE + 0x0E0)
+
+#define USB2_PHY_USB_PHY_INTERRUPT_MASK1 (MSM_USB_PHY_CSR_BASE + 0x0D8)
+
+#define USB_PHY_IDDIG_1_0 BIT(7)
+
+#define USB_PHY_IDDIG_RISE_MASK BIT(0)
+#define USB_PHY_IDDIG_FALL_MASK BIT(1)
+#define USB_PHY_ID_MASK (USB_PHY_IDDIG_RISE_MASK | USB_PHY_IDDIG_FALL_MASK)
+
#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 64aa52e..d999b3c 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -58,6 +58,7 @@
OTG_STATE_B_SRP_INIT,
OTG_STATE_B_PERIPHERAL,
OTG_STATE_B_SUSPEND,
+ OTG_STATE_B_CHARGER,
/* extra dual-role default-b states */
OTG_STATE_B_WAIT_ACON,
@@ -141,6 +142,10 @@
/* reset the PHY clocks */
int (*reset)(struct usb_phy *x);
+
+ /* for notification of usb_phy_dbg_events */
+ void (*dbg_event)(struct usb_phy *x,
+ char *event, int msg1, int msg2);
int (*disable_chirp)(struct usb_phy *x, bool disable);
};
diff --git a/include/linux/usb/usb_qdss.h b/include/linux/usb/usb_qdss.h
index b58d8ee..fe626c18 100644
--- a/include/linux/usb/usb_qdss.h
+++ b/include/linux/usb/usb_qdss.h
@@ -15,6 +15,9 @@
#include <linux/kernel.h>
+#define USB_QDSS_CH_MDM "qdss_mdm"
+#define USB_QDSS_CH_MSM "qdss"
+
struct qdss_request {
char *buf;
int length;
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
new file mode 100644
index 0000000..b37f8df
--- /dev/null
+++ b/include/linux/wcnss_wlan.h
@@ -0,0 +1,167 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _WCNSS_WLAN_H_
+#define _WCNSS_WLAN_H_
+
+#include <linux/device.h>
+#include <linux/sched.h>
+
+#define IRIS_REGULATORS 4
+#define PRONTO_REGULATORS 3
+
+enum wcnss_opcode {
+ WCNSS_WLAN_SWITCH_OFF = 0,
+ WCNSS_WLAN_SWITCH_ON,
+};
+
+enum wcnss_hw_type {
+ WCNSS_RIVA_HW = 0,
+ WCNSS_PRONTO_HW,
+};
+
+struct vregs_level {
+ int nominal_min;
+ int low_power_min;
+ int max_voltage;
+ int uA_load;
+};
+
+struct wcnss_wlan_config {
+ int use_48mhz_xo;
+ int is_pronto_vadc;
+ int is_pronto_v3;
+ void __iomem *msm_wcnss_base;
+ int iris_id;
+ int vbatt;
+ struct vregs_level pronto_vlevel[PRONTO_REGULATORS];
+ struct vregs_level iris_vlevel[IRIS_REGULATORS];
+};
+
+enum {
+ WCNSS_XO_48MHZ = 1,
+ WCNSS_XO_19MHZ,
+ WCNSS_XO_INVALID,
+};
+
+enum {
+ WCNSS_WLAN_DATA2,
+ WCNSS_WLAN_DATA1,
+ WCNSS_WLAN_DATA0,
+ WCNSS_WLAN_SET,
+ WCNSS_WLAN_CLK,
+ WCNSS_WLAN_MAX_GPIO,
+};
+
+#define WCNSS_VBATT_THRESHOLD 3500000
+#define WCNSS_VBATT_GUARD 20000
+#define WCNSS_VBATT_HIGH 3700000
+#define WCNSS_VBATT_LOW 3300000
+#define WCNSS_VBATT_INITIAL 3000000
+#define WCNSS_WLAN_IRQ_INVALID -1
+#define HAVE_WCNSS_SUSPEND_RESUME_NOTIFY 1
+#define HAVE_WCNSS_RESET_INTR 1
+#define HAVE_WCNSS_CAL_DOWNLOAD 1
+#define HAVE_CBC_DONE 1
+#define HAVE_WCNSS_RX_BUFF_COUNT 1
+#define HAVE_WCNSS_SNOC_HIGH_FREQ_VOTING 1
+#define HAVE_WCNSS_5G_DISABLE 1
+#define WLAN_MAC_ADDR_SIZE (6)
+#define WLAN_RF_REG_ADDR_START_OFFSET 0x3
+#define WLAN_RF_REG_DATA_START_OFFSET 0xf
+#define WLAN_RF_READ_REG_CMD 0x3
+#define WLAN_RF_WRITE_REG_CMD 0x2
+#define WLAN_RF_READ_CMD_MASK 0x3fff
+#define WLAN_RF_CLK_WAIT_CYCLE 2
+#define WLAN_RF_PREPARE_CMD_DATA 5
+#define WLAN_RF_READ_DATA 6
+#define WLAN_RF_DATA_LEN 3
+#define WLAN_RF_DATA0_SHIFT 0
+#define WLAN_RF_DATA1_SHIFT 1
+#define WLAN_RF_DATA2_SHIFT 2
+#define PRONTO_PMU_OFFSET 0x1004
+#define WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP BIT(5)
+
+struct device *wcnss_wlan_get_device(void);
+void wcnss_get_monotonic_boottime(struct timespec *ts);
+struct resource *wcnss_wlan_get_memory_map(struct device *dev);
+int wcnss_wlan_get_dxe_tx_irq(struct device *dev);
+int wcnss_wlan_get_dxe_rx_irq(struct device *dev);
+void wcnss_wlan_register_pm_ops(struct device *dev,
+ const struct dev_pm_ops *pm_ops);
+void wcnss_wlan_unregister_pm_ops(struct device *dev,
+ const struct dev_pm_ops *pm_ops);
+void wcnss_register_thermal_mitigation(struct device *dev,
+ void (*tm_notify)(struct device *dev,
+ int));
+void wcnss_unregister_thermal_mitigation(void (*tm_notify)(struct device *dev,
+ int));
+struct platform_device *wcnss_get_platform_device(void);
+struct wcnss_wlan_config *wcnss_get_wlan_config(void);
+void wcnss_set_iris_xo_mode(int iris_xo_mode_set);
+int wcnss_wlan_power(struct device *dev,
+ struct wcnss_wlan_config *cfg,
+ enum wcnss_opcode opcode,
+ int *iris_xo_mode_set);
+int wcnss_req_power_on_lock(char *driver_name);
+int wcnss_free_power_on_lock(char *driver_name);
+unsigned int wcnss_get_serial_number(void);
+int wcnss_get_wlan_mac_address(char mac_addr[WLAN_MAC_ADDR_SIZE]);
+void wcnss_allow_suspend(void);
+void wcnss_prevent_suspend(void);
+int wcnss_hardware_type(void);
+void *wcnss_prealloc_get(unsigned int size);
+int wcnss_prealloc_put(void *ptr);
+void wcnss_reset_fiq(bool clk_chk_en);
+void wcnss_suspend_notify(void);
+void wcnss_resume_notify(void);
+void wcnss_riva_log_debug_regs(void);
+void wcnss_pronto_log_debug_regs(void);
+int wcnss_is_hw_pronto_ver3(void);
+int wcnss_device_ready(void);
+bool wcnss_cbc_complete(void);
+int wcnss_device_is_shutdown(void);
+void wcnss_riva_dump_pmic_regs(void);
+int wcnss_xo_auto_detect_enabled(void);
+u32 wcnss_get_wlan_rx_buff_count(void);
+int wcnss_wlan_iris_xo_mode(void);
+int wcnss_wlan_dual_band_disabled(void);
+void wcnss_flush_work(struct work_struct *work);
+void wcnss_flush_delayed_work(struct delayed_work *dwork);
+void wcnss_init_work(struct work_struct *work, void *callbackptr);
+void wcnss_init_delayed_work(struct delayed_work *dwork, void *callbackptr);
+int wcnss_get_iris_name(char *iris_version);
+void wcnss_dump_stack(struct task_struct *task);
+void wcnss_snoc_vote(bool clk_chk_en);
+int wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
+ struct device *dev);
+
+#ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE
+void wcnss_log_debug_regs_on_bite(void);
+#else
+static inline void wcnss_log_debug_regs_on_bite(void)
+{
+}
+#endif
+int wcnss_set_wlan_unsafe_channel(
+ u16 *unsafe_ch_list, u16 ch_count);
+int wcnss_get_wlan_unsafe_channel(
+ u16 *unsafe_ch_list, u16 buffer_size,
+ u16 *ch_count);
+#define wcnss_wlan_get_drvdata(dev) dev_get_drvdata(dev)
+#define wcnss_wlan_set_drvdata(dev, data) dev_set_drvdata((dev), (data))
+/* WLAN driver uses these names */
+#define req_riva_power_on_lock(name) wcnss_req_power_on_lock(name)
+#define free_riva_power_on_lock(name) wcnss_free_power_on_lock(name)
+
+#endif /* _WCNSS_WLAN_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 63ce902..d5e79f1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -55,6 +55,12 @@
*/
#define CFG80211_ROAMED_API_UNIFIED 1
+/* Indicate backport support for DBS scan control */
+#define CFG80211_SCAN_DBS_CONTROL_SUPPORT 1
+
+/* Indicate backport support for per chain rssi scan */
+#define CFG80211_SCAN_PER_CHAIN_RSSI_SUPPORT 1
+
/**
* DOC: Introduction
*
@@ -1742,6 +1748,8 @@
* by %parent_bssid.
* @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
* the BSS that requested the scan in which the beacon/probe was received.
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
*/
struct cfg80211_inform_bss {
struct ieee80211_channel *chan;
@@ -1750,6 +1758,8 @@
u64 boottime_ns;
u64 parent_tsf;
u8 parent_bssid[ETH_ALEN] __aligned(2);
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
};
/**
@@ -1793,6 +1803,8 @@
* that holds the beacon data. @beacon_ies is still valid, of course, and
* points to the same data as hidden_beacon_bss->beacon_ies in that case.
* @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
struct cfg80211_bss {
@@ -1811,6 +1823,8 @@
u16 capability;
u8 bssid[ETH_ALEN];
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 priv[0] __aligned(sizeof(void *));
};
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index ad38816..31b4cce 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,6 @@
#endif
enum icnss_uevent {
- ICNSS_UEVENT_FW_READY,
ICNSS_UEVENT_FW_CRASHED,
ICNSS_UEVENT_FW_DOWN,
};
diff --git a/include/soc/qcom/msm_tz_smmu.h b/include/soc/qcom/msm_tz_smmu.h
index 1d47f1f..43a3069 100644
--- a/include/soc/qcom/msm_tz_smmu.h
+++ b/include/soc/qcom/msm_tz_smmu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -60,11 +60,16 @@
/* Donot write to smmu global space with CONFIG_MSM_TZ_SMMU */
#undef writel_relaxed
+#undef writeq_relaxed
#define writel_relaxed(v, c) do { \
if (!arm_smmu_skip_write(c)) \
((void)__raw_writel((__force u32)cpu_to_le32(v), (c))); \
} while (0)
+#define writeq_relaxed(v, c) do { \
+ if (!arm_smmu_skip_write(c)) \
+ ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c))); \
+ } while (0)
#else
static inline int msm_tz_smmu_atos_start(struct device *dev, int cb_num)
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 505e82b..cbfe7e4 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -108,6 +108,8 @@
of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda670")
#define early_machine_is_msm8953() \
of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8953")
+#define early_machine_is_msm8937() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8937")
#define early_machine_is_sdm450() \
of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm450")
#define early_machine_is_sdm632() \
@@ -155,6 +157,7 @@
#define early_machine_is_qcs605() 0
#define early_machine_is_sda670() 0
#define early_machine_is_msm8953() 0
+#define early_machine_is_msm8937() 0
#define early_machine_is_sdm450() 0
#define early_machine_is_sdm632() 0
#endif
@@ -225,6 +228,7 @@
MSM_CPU_SDM450,
MSM_CPU_SDM632,
MSM_CPU_SDA632,
+ MSM_CPU_8937
};
struct msm_soc_info {
diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h
index 7ba1817..52b52c4 100644
--- a/include/sound/wcd-dsp-mgr.h
+++ b/include/sound/wcd-dsp-mgr.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -80,6 +80,7 @@
/* Software generated signal indicating debug dumps to be collected */
WDSP_DEBUG_DUMP,
+ WDSP_DEBUG_DUMP_INTERNAL,
};
/*
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 204e81c..6016e9e 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -40,7 +40,7 @@
#define EPOLLRDHUP 0x00002000
/* Set exclusive wakeup mode for the target file descriptor */
-#define EPOLLEXCLUSIVE (1 << 28)
+#define EPOLLEXCLUSIVE (1U << 28)
/*
* Request the handling of system wakeup events so as to prevent system suspends
@@ -52,13 +52,13 @@
*
* Requires CAP_BLOCK_SUSPEND
*/
-#define EPOLLWAKEUP (1 << 29)
+#define EPOLLWAKEUP (1U << 29)
/* Set the One Shot behaviour for the target file descriptor */
-#define EPOLLONESHOT (1 << 30)
+#define EPOLLONESHOT (1U << 30)
/* Set the Edge Triggered behaviour for the target file descriptor */
-#define EPOLLET (1 << 31)
+#define EPOLLET (1U << 31)
/*
* On x86-64 make the 64bit structure have the same alignment as the
diff --git a/include/uapi/linux/msm_mdp.h b/include/uapi/linux/msm_mdp.h
index 73f4938..8a0e4cf 100644
--- a/include/uapi/linux/msm_mdp.h
+++ b/include/uapi/linux/msm_mdp.h
@@ -1410,6 +1410,11 @@
MDP_CSC_ITU_R_709,
};
+/*
+ * These definitions are a continuation of the mdp_color_space enum above
+ */
+#define MDP_CSC_ITU_R_2020 (MDP_CSC_ITU_R_709 + 1)
+#define MDP_CSC_ITU_R_2020_FR (MDP_CSC_ITU_R_2020 + 1)
enum {
mdp_igc_v1_7 = 1,
mdp_igc_vmax,
diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h
index 05a105b..1a2a7e2c 100644
--- a/include/uapi/linux/msm_mdp_ext.h
+++ b/include/uapi/linux/msm_mdp_ext.h
@@ -34,9 +34,9 @@
* To allow proper structure padding for 64bit/32bit target
*/
#ifdef __LP64
-#define MDP_LAYER_COMMIT_V1_PAD 3
+#define MDP_LAYER_COMMIT_V1_PAD 2
#else
-#define MDP_LAYER_COMMIT_V1_PAD 4
+#define MDP_LAYER_COMMIT_V1_PAD 3
#endif
/*
@@ -350,8 +350,11 @@
/* Buffer attached with output layer. Device uses it for commit call */
struct mdp_layer_buffer buffer;
+ /* color space of the destination */
+ enum mdp_color_space color_space;
+
/* 32bits reserved value for future usage. */
- uint32_t reserved[6];
+ uint32_t reserved[5];
};
/*
@@ -389,6 +392,18 @@
uint64_t __user scale;
};
+/* Enable Deterministic Frame Rate Control (FRC) */
+#define MDP_VIDEO_FRC_ENABLE (1 << 0)
+
+struct mdp_frc_info {
+ /* flags to control FRC feature */
+ uint32_t flags;
+ /* video frame count per frame */
+ uint32_t frame_cnt;
+ /* video timestamp per frame in millisecond unit */
+ int64_t timestamp;
+};
+
/*
* Commit structure holds layer stack send by client for validate and commit
* call. If layers are different between validate and commit call then commit
@@ -467,6 +482,9 @@
*/
uint32_t dest_scaler_cnt;
+ /* FRC info per device which contains frame count and timestamp */
+ struct mdp_frc_info __user *frc_info;
+
/* 32-bits reserved value for future usage. */
uint32_t reserved[MDP_LAYER_COMMIT_V1_PAD];
};
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 9fbdc11..3092188 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -3782,6 +3782,9 @@
* @NL80211_BSS_PARENT_BSSID. (u64).
* @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF
* is set.
+ * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update.
+ * Contains a nested array of signal strength attributes (u8, dBm),
+ * using the nesting index as the antenna number.
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -3805,6 +3808,7 @@
NL80211_BSS_PAD,
NL80211_BSS_PARENT_TSF,
NL80211_BSS_PARENT_BSSID,
+ NL80211_BSS_CHAIN_SIGNAL,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -4835,6 +4839,27 @@
* RSSI threshold values to monitor rather than exactly one threshold.
* @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key
* authentication with %NL80211_CMD_CONNECT.
+ * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK: Device wants to do 4-way
+ * handshake with PSK in station mode (PSK is passed as part of the connect
+ * and associate commands), doing it in the host might not be supported.
+ * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X: Device wants to do doing 4-way
+ * handshake with 802.1X in station mode (will pass EAP frames to the host
+ * and accept the set_pmk/del_pmk commands), doing it in the host might not
+ * be supported.
+ * @NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME: Driver is capable of overriding
+ * the max channel attribute in the FILS request params IE with the
+ * actual dwell time.
+ * @NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP: Driver accepts broadcast probe
+ * response
+ * @NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE: Driver supports sending
+ * the first probe request in each channel at rate of at least 5.5Mbps.
+ * @NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: Driver supports
+ * probe request tx deferral and suppression
+ * @NL80211_EXT_FEATURE_MFP_OPTIONAL: Driver supports the %NL80211_MFP_OPTIONAL
+ * value in %NL80211_ATTR_USE_MFP.
+ * @NL80211_EXT_FEATURE_LOW_SPAN_SCAN: Driver supports low span scan.
+ * @NL80211_EXT_FEATURE_LOW_POWER_SCAN: Driver supports low power scan.
+ * @NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN: Driver supports high accuracy scan.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4855,6 +4880,16 @@
NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
NL80211_EXT_FEATURE_CQM_RSSI_LIST,
NL80211_EXT_FEATURE_FILS_SK_OFFLOAD,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X,
+ NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME,
+ NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION,
+ NL80211_EXT_FEATURE_MFP_OPTIONAL,
+ NL80211_EXT_FEATURE_LOW_SPAN_SCAN,
+ NL80211_EXT_FEATURE_LOW_POWER_SCAN,
+ NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -4915,6 +4950,10 @@
* of NL80211_CMD_TRIGGER_SCAN and NL80211_CMD_START_SCHED_SCAN
* requests.
*
+ * NL80211_SCAN_FLAG_LOW_SPAN, NL80211_SCAN_FLAG_LOW_POWER, and
+ * NL80211_SCAN_FLAG_HIGH_ACCURACY flags are exclusive of each other, i.e., only
+ * one of them can be used in the request.
+ *
* @NL80211_SCAN_FLAG_LOW_PRIORITY: scan request has low priority
* @NL80211_SCAN_FLAG_FLUSH: flush cache before scanning
* @NL80211_SCAN_FLAG_AP: force a scan even if the interface is configured
@@ -4931,12 +4970,29 @@
* locally administered 1, multicast 0) is assumed.
* This flag must not be requested when the feature isn't supported, check
* the nl80211 feature flags for the device.
+ * SSID and/or RSSI.
+ * @NL80211_SCAN_FLAG_LOW_SPAN: Span corresponds to the total time taken to
+ * accomplish the scan. Thus, this flag intends the driver to perform the
+ * scan request with lesser span/duration. It is specific to the driver
+ * implementations on how this is accomplished. Scan accuracy may get
+ * impacted with this flag.
+ * @NL80211_SCAN_FLAG_LOW_POWER: This flag intends the scan attempts to consume
+ * optimal possible power. Drivers can resort to their specific means to
+ * optimize the power. Scan accuracy may get impacted with this flag.
+ * @NL80211_SCAN_FLAG_HIGH_ACCURACY: Accuracy here intends to the extent of scan
+ * results obtained. Thus HIGH_ACCURACY scan flag aims to get maximum
+ * possible scan results. This flag hints the driver to use the best
+ * possible scan configuration to improve the accuracy in scanning.
+ * Latency and power use may get impacted with this flag.
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
NL80211_SCAN_FLAG_FLUSH = 1<<1,
NL80211_SCAN_FLAG_AP = 1<<2,
NL80211_SCAN_FLAG_RANDOM_ADDR = 1<<3,
+ NL80211_SCAN_FLAG_LOW_SPAN = 1<<8,
+ NL80211_SCAN_FLAG_LOW_POWER = 1<<9,
+ NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10,
};
/**
diff --git a/include/uapi/media/cam_sensor.h b/include/uapi/media/cam_sensor.h
index 87f25b0..f5af604 100644
--- a/include/uapi/media/cam_sensor.h
+++ b/include/uapi/media/cam_sensor.h
@@ -119,20 +119,20 @@
*
* @slave_addr : OIS i2c slave address
* @i2c_freq_mode : i2c frequency mode
+ * @cmd_type : Explains type of command
* @ois_fw_flag : indicates if fw is present or not
* @is_ois_calib : indicates the calibration data is available
* @ois_name : OIS name
* @opcode : opcode
- * @cmd_type : Explains type of command
*/
struct cam_cmd_ois_info {
uint16_t slave_addr;
uint8_t i2c_freq_mode;
+ uint8_t cmd_type;
uint8_t ois_fw_flag;
uint8_t is_ois_calib;
char ois_name[MAX_OIS_NAME_SIZE];
struct cam_ois_opcode opcode;
- uint8_t cmd_type;
} __attribute__((packed));
/**
diff --git a/include/video/msm_dba.h b/include/video/msm_dba.h
new file mode 100644
index 0000000..f251048
--- /dev/null
+++ b/include/video/msm_dba.h
@@ -0,0 +1,608 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_DBA_H
+#define _MSM_DBA_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define MSM_DBA_CHIP_NAME_MAX_LEN 20
+#define MSM_DBA_CLIENT_NAME_LEN 20
+
+#define MSM_DBA_DEFER_PROPERTY_FLAG 0x1
+#define MSM_DBA_ASYNC_FLAG 0x2
+
+/**
+ * enum msm_dba_callback_event - event types for callback notification
+ * @MSM_DBA_CB_REMOTE_INT: Event associated with remote devices on an interface
+ * that supports a bi-directional control channel.
+ * @MSM_DBA_CB_HDCP_LINK_AUTHENTICATED: Authentication session is successful.
+ * The link is authenticated and encryption
+ * can be enabled if not enabled already.
+ * @MSM_DBA_CB_HDCP_LINK_UNAUTHENTICATED: A previously authenticated link has
+ * failed. The content on the interface
+ * is no longer secure.
+ * @MSM_DBA_CB_HPD_CONNECT: Detected a cable connect event.
+ * @MSM_DBA_CB_HPD_DISCONNECT: Detected a cable disconnect event.
+ * @MSM_DBA_CB_VIDEO_FAILURE: Detected a failure with respect to video data on
+ * the interface. This is a generic failure and
+ * client should request a debug dump to debug the
+ * issue. Client can also attempt a reset to recover
+ * the device.
+ * @MSM_DBA_CB_AUDIO_FAILURE: Detected a failure with respect to audio data on
+ * the interface. This is a generic failure and
+ * client should request a debug dump. Client can
+ * also attempt a reset to recover the device.
+ * @MSM_DBA_CB_CEC_WRITE_SUCCESS: The asynchronous CEC write request is
+ * successful.
+ * @MSM_DBA_CB_CEC_WRITE_FAIL: The asynchronous CEC write request failed.
+ * @MSM_DBA_CB_CEC_READ_PENDING: There is a pending CEC read message.
+ * @MSM_DBA_CB_PRE_RESET: This callback is called just before the device is
+ * being reset.
+ * @MSM_DBA_CB_POST_RESET: This callback is called after device reset is
+ * complete and the driver has applied back all the
+ * properties.
+ *
+ * Clients for this driver can register for receiving callbacks for specific
+ * events. This enum defines the type of events supported by the driver. An
+ * event mask is typically used to denote multiple events.
+ */
+enum msm_dba_callback_event {
+ MSM_DBA_CB_REMOTE_INT = BIT(0),
+ MSM_DBA_CB_HDCP_LINK_AUTHENTICATED = BIT(1),
+ MSM_DBA_CB_HDCP_LINK_UNAUTHENTICATED = BIT(2),
+ MSM_DBA_CB_HPD_CONNECT = BIT(3),
+ MSM_DBA_CB_HPD_DISCONNECT = BIT(4),
+ MSM_DBA_CB_VIDEO_FAILURE = BIT(5),
+ MSM_DBA_CB_AUDIO_FAILURE = BIT(6),
+ MSM_DBA_CB_CEC_WRITE_SUCCESS = BIT(7),
+ MSM_DBA_CB_CEC_WRITE_FAIL = BIT(8),
+ MSM_DBA_CB_CEC_READ_PENDING = BIT(9),
+ MSM_DBA_CB_PRE_RESET = BIT(10),
+ MSM_DBA_CB_POST_RESET = BIT(11),
+};
+
+/**
+ * enum msm_dba_audio_interface_type - audio interface type
+ * @MSM_DBA_AUDIO_I2S_INTERFACE: I2S interface for audio
+ * @MSM_DBA_AUDIO_SPDIF_INTERFACE: SPDIF interface for audio
+ */
+enum msm_dba_audio_interface_type {
+ MSM_DBA_AUDIO_I2S_INTERFACE = BIT(0),
+ MSM_DBA_AUDIO_SPDIF_INTERFACE = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_format_type - audio format type
+ * @MSM_DBA_AUDIO_FMT_UNCOMPRESSED_LPCM: uncompressed format
+ * @MSM_DBA_AUDIO_FMT_COMPRESSED: compressed formats
+ */
+enum msm_dba_audio_format_type {
+ MSM_DBA_AUDIO_FMT_UNCOMPRESSED_LPCM = BIT(0),
+ MSM_DBA_AUDIO_FMT_COMPRESSED = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_copyright_type - audio copyright
+ * @MSM_DBA_AUDIO_COPYRIGHT_PROTECTED: copy right protected
+ * @MSM_DBA_AUDIO_COPYRIGHT_NOT_PROTECTED: not copy right protected
+ */
+enum msm_dba_audio_copyright_type {
+ MSM_DBA_AUDIO_COPYRIGHT_PROTECTED = BIT(0),
+ MSM_DBA_AUDIO_COPYRIGHT_NOT_PROTECTED = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_pre_emphasis_type - pre-emphasis
+ * @MSM_DBA_AUDIO_NO_PRE_EMPHASIS: 2 audio channels w/o pre-emphasis
+ * @MSM_DBA_AUDIO_PRE_EMPHASIS_50_15us: 2 audio channels with 50/15uS
+ */
+enum msm_dba_audio_pre_emphasis_type {
+ MSM_DBA_AUDIO_NO_PRE_EMPHASIS = BIT(0),
+ MSM_DBA_AUDIO_PRE_EMPHASIS_50_15us = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_clock_accuracy - Audio Clock Accuracy
+ * @MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL1: normal accuracy +/-1000 x 10^-6
+ * @MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL2: high accuracy +/- 50 x 10^-6
+ * @MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL3: variable pitch shifted clock
+ */
+enum msm_dba_audio_clock_accuracy {
+ MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL1 = BIT(1),
+ MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL2 = BIT(0),
+ MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL3 = BIT(2),
+};
+
+/**
+ * enum msm_dba_channel_status_source - CS override
+ * @MSM_DBA_AUDIO_CS_SOURCE_I2S_STREAM: use channel status bits from I2S stream
+ * @MSM_DBA_AUDIO_CS_SOURCE_REGISTERS: use channel status bits from registers
+ */
+enum msm_dba_channel_status_source {
+ MSM_DBA_AUDIO_CS_SOURCE_I2S_STREAM,
+ MSM_DBA_AUDIO_CS_SOURCE_REGISTERS
+};
+
+/**
+ * enum msm_dba_audio_sampling_rates_type - audio sampling rates
+ * @MSM_DBA_AUDIO_32KHZ: 32KHz sampling rate
+ * @MSM_DBA_AUDIO_44P1KHZ: 44.1KHz sampling rate
+ * @MSM_DBA_AUDIO_48KHZ: 48KHz sampling rate
+ * @MSM_DBA_AUDIO_96KHZ: 96KHz sampling rate
+ * @MSM_DBA_AUDIO_192KHZ: 192KHz sampling rate
+ */
+enum msm_dba_audio_sampling_rates_type {
+ MSM_DBA_AUDIO_32KHZ = BIT(0),
+ MSM_DBA_AUDIO_44P1KHZ = BIT(1),
+ MSM_DBA_AUDIO_48KHZ = BIT(2),
+ MSM_DBA_AUDIO_88P2KHZ = BIT(1),
+ MSM_DBA_AUDIO_96KHZ = BIT(3),
+ MSM_DBA_AUDIO_176P4KHZ = BIT(1),
+ MSM_DBA_AUDIO_192KHZ = BIT(4),
+};
+
+/**
+ * enum msm_dba_audio_word_bit_depth - audio word size
+ * @MSM_DBA_AUDIO_WORD_16BIT: 16 bits per word
+ * @MSM_DBA_AUDIO_WORD_24BIT: 24 bits per word
+ * @MSM_DBA_AUDIO_WORD_32BIT: 32 bits per word
+ */
+enum msm_dba_audio_word_bit_depth {
+ MSM_DBA_AUDIO_WORD_16BIT = BIT(1),
+ MSM_DBA_AUDIO_WORD_24BIT = BIT(2),
+ MSM_DBA_AUDIO_WORD_32BIT = BIT(3),
+};
+
+/**
+ * enum msm_dba_audio_channel_count - audio channel count
+ * @MSM_DBA_AUDIO_CHANNEL_2: 2 channel audio
+ * @MSM_DBA_AUDIO_CHANNEL_4: 4 channel audio
+ * @MSM_DBA_AUDIO_CHANNEL_8: 8 channel audio
+ */
+enum msm_dba_audio_channel_count {
+ MSM_DBA_AUDIO_CHANNEL_2 = BIT(0),
+ MSM_DBA_AUDIO_CHANNEL_4 = BIT(1),
+ MSM_DBA_AUDIO_CHANNEL_8 = BIT(2),
+};
+
+/**
+ * enum msm_dba_audio_i2s_format - i2s audio data format
+ * @MSM_DBA_AUDIO_I2S_FMT_STANDARD: Standard format
+ * @MSM_DBA_AUDIO_I2S_FMT_RIGHT_JUSTIFIED: i2s data is right justified
+ * @MSM_DBA_AUDIO_I2S_FMT_LEFT_JUSTIFIED: i2s data is left justified
+ * @MSM_DBA_AUDIO_I2S_FMT_AES3_DIRECT: AES signal format
+ */
+enum msm_dba_audio_i2s_format {
+ MSM_DBA_AUDIO_I2S_FMT_STANDARD = 0,
+ MSM_DBA_AUDIO_I2S_FMT_RIGHT_JUSTIFIED,
+ MSM_DBA_AUDIO_I2S_FMT_LEFT_JUSTIFIED,
+ MSM_DBA_AUDIO_I2S_FMT_AES3_DIRECT,
+ MSM_DBA_AUDIO_I2S_FMT_MAX,
+};
+
+enum msm_dba_video_aspect_ratio {
+ MSM_DBA_AR_UNKNOWN = 0,
+ MSM_DBA_AR_4_3,
+ MSM_DBA_AR_5_4,
+ MSM_DBA_AR_16_9,
+ MSM_DBA_AR_16_10,
+ MSM_DBA_AR_64_27,
+ MSM_DBA_AR_256_135,
+ MSM_DBA_AR_MAX
+};
+
+enum msm_dba_audio_word_endian_type {
+ MSM_DBA_AUDIO_WORD_LITTLE_ENDIAN = 0,
+ MSM_DBA_AUDIO_WORD_BIG_ENDIAN,
+ MSM_DBA_AUDIO_WORD_ENDIAN_MAX
+};
+
+/**
+ * msm_dba_audio_op_mode - i2s audio operation mode
+ * @MSM_DBA_AUDIO_MODE_MANUAL: Manual mode
+ * @MSM_DBA_AUDIO_MODE_AUTOMATIC: Automatic mode
+ */
+enum msm_dba_audio_op_mode {
+ MSM_DBA_AUDIO_MODE_MANUAL,
+ MSM_DBA_AUDIO_MODE_AUTOMATIC,
+};
+
+/**
+ * typedef *msm_dba_cb() - Prototype for callback function
+ * @data: Pointer to user data provided with register API
+ * @event: Event type associated with callback. This can be a bitmask.
+ */
+typedef void (*msm_dba_cb)(void *data, enum msm_dba_callback_event event);
+
+/**
+ * struct msm_dba_reg_info - Client information used with register API
+ * @client_name: Name of the client for debug purposes
+ * @chip_name: Bridge chip ID
+ * @instance_id: Instance ID of the bridge chip in case of multiple instances
+ * @cb: callback function called in case of events.
+ * @cb_data: pointer to a data structure that will be returned with callback
+ *
+ * msm_dba_reg_info structure will be used to provide information during
+ * registering with driver. This structure will contain the information required
+ * to identify the specific bridge chip the client wants to use.
+ *
+ * Client should also specify the callback function which needs to be called in
+ * case of events. There is an optional data field which is a pointer that will
+ * be returned as one of arguments in the callback function. This data field can
+ * be NULL if client does not wish to use it.
+ */
+struct msm_dba_reg_info {
+ char client_name[MSM_DBA_CLIENT_NAME_LEN];
+ char chip_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+ u32 instance_id;
+ msm_dba_cb cb;
+ void *cb_data;
+};
+
+/**
+ * struct msm_dba_video_caps_info - video capabilities of the bridge chip
+ * @hdcp_support: if hdcp is supported
+ * @edid_support: if reading edid from sink is supported
+ * @data_lanes_lp_support: if low power mode is supported on data lanes
+ * @clock_lanes_lp_support: If low power mode is supported on clock lanes
+ * @max_pclk_khz: maximum pixel clock supported
+ * @num_of_input_lanes: Number of input data lanes supported by the bridge chip
+ */
+struct msm_dba_video_caps_info {
+ bool hdcp_support;
+ bool edid_support;
+ bool data_lanes_lp_support;
+ bool clock_lanes_lp_support;
+ u32 max_pclk_khz;
+ u32 num_of_input_lanes;
+};
+
+/**
+ * struct msm_dba_audio_caps_info - audio capabilities of the bridge chip
+ * @audio_support: if audio is supported
+ * @audio_rates: audio sampling rates supported
+ * @audio_fmts: audio formats supported
+ */
+struct msm_dba_audio_caps_info {
+ u32 audio_support;
+ u32 audio_rates;
+ u32 audio_fmts;
+};
+
+/**
+ * struct msm_dba_capabilities - general capabilities of the bridge chip
+ * @vid_caps: video capabilities
+ * @aud_caps: audio capabilities
+ * @av_mute_support: av mute support in bridge chip
+ * @deferred_commit_support: support for deferred commit
+ */
+struct msm_dba_capabilities {
+ struct msm_dba_video_caps_info vid_caps;
+ struct msm_dba_audio_caps_info aud_caps;
+ bool av_mute_support;
+ bool deferred_commit_support;
+};
+
+/**
+ * struct msm_dba_audio_cfg - Structure for audio configuration
+ * @interface: Specifies audio interface type. Client should check the
+ * capabilities for the interfaces supported by the bridge.
+ * @format: Compressed vs Uncompressed formats.
+ * @channels: Number of channels.
+ * @i2s_fmt: I2S data packing format. This is valid only if interface is I2S.
+ * @sampling_rate: sampling rate of audio data
+ * @word_size: word size
+ * @word_endianness: little or big endian words
+ */
+struct msm_dba_audio_cfg {
+ enum msm_dba_audio_interface_type interface;
+ enum msm_dba_audio_format_type format;
+ enum msm_dba_audio_channel_count channels;
+ enum msm_dba_audio_i2s_format i2s_fmt;
+ enum msm_dba_audio_sampling_rates_type sampling_rate;
+ enum msm_dba_audio_word_bit_depth word_size;
+ enum msm_dba_audio_word_endian_type word_endianness;
+ enum msm_dba_audio_copyright_type copyright;
+ enum msm_dba_audio_pre_emphasis_type pre_emphasis;
+ enum msm_dba_audio_clock_accuracy clock_accuracy;
+ enum msm_dba_channel_status_source channel_status_source;
+ enum msm_dba_audio_op_mode mode;
+
+ u32 channel_status_category_code;
+ u32 channel_status_source_number;
+ u32 channel_status_v_bit;
+ u32 channel_allocation;
+ u32 channel_status_word_length;
+
+ u32 n;
+ u32 cts;
+};
+
+/**
+ * struct msm_dba_video_cfg - video configuration data
+ * @h_active: active width of the video signal
+ * @h_front_porch: horizontal front porch in pixels
+ * @h_pulse_width: pulse width of hsync in pixels
+ * @h_back_porch: horizontal back porch in pixels
+ * @h_polarity: polarity of hsync signal
+ * @v_active: active height of the video signal
+ * @v_front_porch: vertical front porch in lines
+ * @v_pulse_width: pulse width of vsync in lines
+ * @v_back_porch: vertical back porch in lines
+ * @v_polarity: polarity of vsync signal
+ * @pclk_khz: pixel clock in KHz
+ * @interlaced: if video is interlaced
+ * @vic: video indetification code
+ * @hdmi_mode: hdmi or dvi mode for the sink
+ * @ar: aspect ratio of the signal
+ * @num_of_input_lanes: number of input lanes in case of DSI/LVDS
+ */
+struct msm_dba_video_cfg {
+ u32 h_active;
+ u32 h_front_porch;
+ u32 h_pulse_width;
+ u32 h_back_porch;
+ bool h_polarity;
+ u32 v_active;
+ u32 v_front_porch;
+ u32 v_pulse_width;
+ u32 v_back_porch;
+ bool v_polarity;
+ u32 pclk_khz;
+ bool interlaced;
+ u32 vic;
+ bool hdmi_mode;
+ enum msm_dba_video_aspect_ratio ar;
+ u32 num_of_input_lanes;
+ u8 scaninfo;
+};
+
+struct mdss_dba_timing_info {
+ u16 xres;
+ u16 yres;
+ u8 bpp;
+ u8 fps;
+ u8 lanes;
+};
+
+/**
+ * struct msm_dba_ops- operation supported by bridge chip
+ * @get_caps: returns the bridge chip capabilities
+ * DEFER and ASYNC flags are not supported.
+ * @power_on: powers on/off the bridge chip. This usually involves turning on
+ * the power regulators and bringing the chip out of reset. Chip
+ * should be capable of raising interrupts at this point.
+ * DEFER and ASYNC flags are supported.
+ * @video_on: turn on/off video stream. This function also requires the video
+ * timing information that might be needed for programming the bridge
+ * chip.
+ * DEFER flag is supported.
+ * ASYNC flag is not supported.
+ * @audio_on: turn on/off audio stream.
+ * DEFER flag is supported.
+ * ASYNC flag is not supported.
+ * @configure_audio: setup audio configuration
+ * DEFER flag is supported.
+ * ASYNC flag is not supported.
+ * @av_mute: controls av mute functionalities if supported. AV mute is different
+ * from audio_on and video_on where in even though the actual data is
+ * sent, mute is specified through control packets.
+ * DEFER flag is supported.
+ * ASYNC flag is not supported.
+ * @interupts_enable: enables interrupts to get event callbacks. Clients need
+ * to specify an event mask of the events they are
+ * interested in. If a client provides an event as part of
+ * the mask, it will receive the interrupt regardless of the
+ * client modifying the property.
+ * DEFER flag is supported.
+ * ASYNC flag is not supported.
+ * @hdcp_enable: enable/disable hdcp. If HDCP is enabled, this function will
+ * start a new authentication session. There is a separate
+ * argument for enabling encryption. Encryption can be enabled any
+ * time after HDCP has been fully authenticated. This function
+ * will support an asynchronous mode where calling this function
+ * will kick off HDCP and return to the caller. Caller has to wait
+ * for MSM_DBA_CB_HDCP_SUCCESS callback to ensure link is
+ * authenticated.
+ * DEFER flag is not supported.
+ * ASYNC flag is supported.
+ * @hdcp_get_ksv_list_size: returns the KSV list size. In case of a simple sink
+ * the size will be 1. In case of a repeater, this can
+ * be more than one.
+ * DEFER and ASYNC flags are not supported.
+ * @hdcp_get_ksv_list: return the KSV list. Client can query the KSV information
+ * from the bridge. Client should call
+ * hdcp_get_ksv_list_size first and then allocate 40*size
+ * bytes to hold all the KSVs.
+ * DEFER and ASYNC flags are not supported.
+ * @hdmi_cec_on: enable or disable cec module. Clients need to enable CEC
+ * feature before they do read or write CEC messages.
+ * @hdmi_cec_write: perform a CEC write. For bridges with HDMI as output
+ * interface, this function allows clients to send a CEC
+ * message. Client should pack the data according to the CEC
+ * specification and provide the final buffer. Since CEC writes
+ * can take longer time to ascertaining if they are successful,
+ * this function supports the ASYNC flag. Driver will return
+ * either MSM_DBA_CB_CEC_WRITE_SUCCESS or
+ * MSM_DBA_CB_CEC_WRITE_FAIL callbacks.
+ * DEFER is not supported.
+ * ASYNC flag is supported.
+ * @hdmi_cec_read: get a pending CEC read message. In case of an incoming CEC
+ * message, driver will return MSM_DBA_CB_CEC_READ_PENDING
+ * callback. On getting this event callback, client should call
+ * hdmi_cec_read to get the message. The buffer should at least
+ * be 15 bytes or more. Client should read the CEC message from
+ * a thread different from the callback.
+ * DEFER and ASYNC flags are not supported.
+ * @get_edid_size: returns size of the edid.
+ * DEFER and ASYNC flags are not supported.
+ * @get_raw_edid: returns raw edid data.
+ * DEFER and ASYNC flags are not supported.
+ * @enable_remote_comm: enable/disable remote communication. Some interfaces
+ * like FPDLINK III support a bi-directional control
+ * channel that could be used to send control data using an
+ * I2C or SPI protocol. This Function will enable this
+ * control channel if supported.
+ * DEFER and ASYNC flags are not supported.
+ * @add_remote_device: add slaves on remote side for enabling communication. For
+ * interfaces that support bi directional control channel,
+ * this function allows clients to specify slave IDs of
+ * devices on remote bus. Messages addressed to these IDs
+ * will be trapped by the bridge chip and put on the remote
+ * bus.
+ * DEFER and ASYNC flags are not supported.
+ * @commit_deferred_props: commits deferred properties
+ * DEFER and ASYNC flags are not supported.
+ * @force_reset: reset the device forcefully. In case the device goes into a bad
+ * state, a client can force reset to try and recover the device.
+ * The reset will be applied in spite of different configurations
+ * from other clients. Driver will apply all the properties that
+ * have been applied so far after the reset is complete. In case
+ * of multiple clients, driver will issue a reset callback.
+ * @dump_debug_info: dumps debug information to dmesg.
+ * @check_hpd: Check if cable is connected or not. if cable is connected we
+ * send notification to display framework.
+ * @set_audio_block: This function will populate the raw audio speaker block
+ * data along with size of each block in bridgechip buffer.
+ * @get_audio_block: This function will return the raw audio speaker block
+ * along with size of each block.
+ *
+ * The msm_dba_ops structure represents a set of operations that can be
+ * supported by each bridge chip. Depending on the functionality supported by a
+ * specific bridge chip, some of the operations need not be supported. For
+ * example if a bridge chip does not support reading EDID from a sink device,
+ * get_edid_size and get_raw_edid can be NULL.
+ *
+ * Deferring properties: The deferred flag allows us to address any quirks with
+ * respect to specific bridge chips. If there is a need for some properties to
+ * be committed together, turning on video and audio at the same time, the
+ * deferred flag can be used. Properties that are set using a DEFER flag will
+ * not be committed to hardware until commit_deferred_props() function is
+ * called.
+ *
+ */
+struct msm_dba_ops {
+ int (*get_caps)(void *client,
+ struct msm_dba_capabilities *caps);
+
+ int (*power_on)(void *client,
+ bool on,
+ u32 flags);
+
+ int (*video_on)(void *client,
+ bool on,
+ struct msm_dba_video_cfg *cfg,
+ u32 flags);
+
+ int (*audio_on)(void *client,
+ bool on,
+ u32 flags);
+
+ int (*configure_audio)(void *client,
+ struct msm_dba_audio_cfg *cfg,
+ u32 flags);
+
+ int (*av_mute)(void *client,
+ bool video_mute,
+ bool audio_mute,
+ u32 flags);
+
+ int (*interrupts_enable)(void *client,
+ bool on,
+ u32 event_mask,
+ u32 flags);
+
+ int (*hdcp_enable)(void *client,
+ bool hdcp_on,
+ bool enc_on,
+ u32 flags);
+
+ int (*hdcp_get_ksv_list_size)(void *client,
+ u32 *count,
+ u32 flags);
+
+ int (*hdcp_get_ksv_list)(void *client,
+ u32 count,
+ char *buf,
+ u32 flags);
+
+ int (*hdmi_cec_on)(void *client,
+ bool enable,
+ u32 flags);
+
+ int (*hdmi_cec_write)(void *client,
+ u32 size,
+ char *buf,
+ u32 flags);
+
+ int (*hdmi_cec_read)(void *client,
+ u32 *size,
+ char *buf,
+ u32 flags);
+
+ int (*get_edid_size)(void *client,
+ u32 *size,
+ u32 flags);
+
+ int (*get_raw_edid)(void *client,
+ u32 size,
+ char *buf,
+ u32 flags);
+
+ int (*enable_remote_comm)(void *client,
+ bool on,
+ u32 flags);
+
+ int (*add_remote_device)(void *client,
+ u32 *slave_ids,
+ u32 count,
+ u32 flags);
+
+ int (*commit_deferred_props)(void *client,
+ u32 flags);
+
+ int (*force_reset)(void *client, u32 flags);
+ int (*dump_debug_info)(void *client, u32 flags);
+ int (*check_hpd)(void *client, u32 flags);
+ void (*set_audio_block)(void *client, u32 size, void *buf);
+ void (*get_audio_block)(void *client, u32 size, void *buf);
+ void* (*get_supp_timing_info)(void);
+};
+
+/**
+ * msm_dba_register_client() - Allows a client to register with the driver.
+ * @info: Client information along with the bridge chip id the client wishes to
+ * program.
+ * @ops: Function pointers to bridge chip operations. Some function pointers can
+ * be NULL depending on the functionalities supported by bridge chip.
+ *
+ * The register API supports multiple clients to register for the same bridge
+ * chip. If Successful, this will return a pointer that should be used as a
+ * handle for all subsequent function calls.
+ */
+void *msm_dba_register_client(struct msm_dba_reg_info *info,
+ struct msm_dba_ops *ops);
+
+/**
+ * msm_dba_deregister_client() - Allows client to de-register with the driver.
+ * @client: client handle returned by register API.
+ *
+ * This function will release all the resources used by a particular client. If
+ * it is the only client using the bridge chip, the bridge chip will be powered
+ * down and put into reset.
+ */
+int msm_dba_deregister_client(void *client);
+
+#endif /* _MSM_DBA_H */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 26c624e..a83771f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4407,7 +4407,11 @@
*/
do {
css_task_iter_start(&from->self, &it);
- task = css_task_iter_next(&it);
+
+ do {
+ task = css_task_iter_next(&it);
+ } while (task && (task->flags & PF_EXITING));
+
if (task)
get_task_struct(task);
css_task_iter_end(&it);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 59fcdbe..7b02ae6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1051,8 +1051,13 @@
*/
static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
{
- if (unlikely(!cpu_active(dest_cpu)))
- return rq;
+ if (p->flags & PF_KTHREAD) {
+ if (unlikely(!cpu_online(dest_cpu)))
+ return rq;
+ } else {
+ if (unlikely(!cpu_active(dest_cpu)))
+ return rq;
+ }
/* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 645b472..73f11c4 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2178,9 +2178,8 @@
* the rt_loop_next will cause the iterator to perform another scan.
*
*/
-static int rto_next_cpu(struct rq *rq)
+static int rto_next_cpu(struct root_domain *rd)
{
- struct root_domain *rd = rq->rd;
int next;
int cpu;
@@ -2256,7 +2255,7 @@
* Otherwise it is finishing up and an ipi needs to be sent.
*/
if (rq->rd->rto_cpu < 0)
- cpu = rto_next_cpu(rq);
+ cpu = rto_next_cpu(rq->rd);
raw_spin_unlock(&rq->rd->rto_lock);
@@ -2269,6 +2268,8 @@
/* Called from hardirq context */
void rto_push_irq_work_func(struct irq_work *work)
{
+ struct root_domain *rd =
+ container_of(work, struct root_domain, rto_push_work);
struct rq *rq;
int cpu;
@@ -2284,18 +2285,18 @@
raw_spin_unlock(&rq->lock);
}
- raw_spin_lock(&rq->rd->rto_lock);
+ raw_spin_lock(&rd->rto_lock);
/* Pass the IPI to the next rt overloaded queue */
- cpu = rto_next_cpu(rq);
+ cpu = rto_next_cpu(rd);
- raw_spin_unlock(&rq->rd->rto_lock);
+ raw_spin_unlock(&rd->rto_lock);
if (cpu < 0)
return;
/* Try the next RT overloaded CPU */
- irq_work_queue_on(&rq->rd->rto_push_work, cpu);
+ irq_work_queue_on(&rd->rto_push_work, cpu);
}
#endif /* HAVE_RT_PUSH_IPI */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index dea7e55..a01c821 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -127,7 +127,9 @@
static int __maybe_unused four = 4;
static unsigned long one_ul = 1;
static int one_hundred = 100;
+#ifdef CONFIG_PERF_EVENTS
static int one_thousand = 1000;
+#endif
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@@ -1572,8 +1574,16 @@
.maxlen = sizeof(watermark_scale_factor),
.mode = 0644,
.proc_handler = watermark_scale_factor_sysctl_handler,
- .extra1 = &one,
- .extra2 = &one_thousand,
+ .extra1 = &zero,
+ .extra2 = &zero,
+ },
+ {
+ .procname = "extra_free_kbytes",
+ .data = &extra_free_kbytes,
+ .maxlen = sizeof(extra_free_kbytes),
+ .mode = 0644,
+ .proc_handler = min_free_kbytes_sysctl_handler,
+ .extra1 = &zero,
},
{
.procname = "percpu_pagelist_fraction",
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 41481dc..a6f8a44 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -208,7 +208,6 @@
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
struct timer_base timer_base_deferrable;
-static atomic_t deferrable_pending;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
unsigned int sysctl_timer_migration = 1;
@@ -1489,6 +1488,8 @@
#ifdef CONFIG_SMP
+static atomic_t deferrable_pending;
+
/*
* check_pending_deferrable_timers - Check for unbound deferrable timer expiry
* @cpu - Current CPU
@@ -1669,6 +1670,27 @@
spin_unlock_irq(&base->lock);
}
+#ifdef CONFIG_SMP
+static inline bool should_this_cpu_run_deferrable_timers(void)
+{
+ int tick_cpu = READ_ONCE(tick_do_timer_cpu);
+
+ if (atomic_cmpxchg(&deferrable_pending, 1, 0) &&
+ tick_cpu == TICK_DO_TIMER_NONE)
+ return true;
+
+ if (tick_cpu == smp_processor_id())
+ return true;
+
+ return (tick_cpu >= 0 && ksoftirqd_running_on(tick_cpu));
+}
+#else
+static inline bool should_this_cpu_run_deferrable_timers(void)
+{
+ return true;
+}
+#endif
+
/*
* This function runs timers and the timer-tq in bottom half context.
*/
@@ -1693,9 +1715,7 @@
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
- if ((atomic_cmpxchg(&deferrable_pending, 1, 0) &&
- tick_do_timer_cpu == TICK_DO_TIMER_NONE) ||
- tick_do_timer_cpu == smp_processor_id())
+ if (should_this_cpu_run_deferrable_timers())
__run_timers(&timer_base_deferrable);
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b031d0b..5ef61bd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1642,7 +1642,7 @@
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
/*
- * Sanity check nr_running. Because wq_unbind_fn() releases
+ * Sanity check nr_running. Because unbind_workers() releases
* pool->lock between setting %WORKER_UNBOUND and zapping
* nr_running, the warning may trigger spuriously. Check iff
* unbind is not in progress.
@@ -4491,9 +4491,8 @@
* cpu comes back online.
*/
-static void wq_unbind_fn(struct work_struct *work)
+static void unbind_workers(int cpu)
{
- int cpu = smp_processor_id();
struct worker_pool *pool;
struct worker *worker;
@@ -4690,12 +4689,13 @@
int workqueue_offline_cpu(unsigned int cpu)
{
- struct work_struct unbind_work;
struct workqueue_struct *wq;
/* unbinding per-cpu workers should happen on the local CPU */
- INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
- queue_work_on(cpu, system_highpri_wq, &unbind_work);
+ if (WARN_ON(cpu != smp_processor_id()))
+ return -1;
+
+ unbind_workers(cpu);
/* update NUMA affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex);
@@ -4703,9 +4703,6 @@
wq_update_unbound_numa(wq, cpu, false);
mutex_unlock(&wq_pool_mutex);
- /* wait for per-cpu unbinding to finish */
- flush_work(&unbind_work);
- destroy_work_on_stack(&unbind_work);
return 0;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a29f774..34eec18 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1619,9 +1619,13 @@
* @page: the page
*
* This function protects unlocked LRU pages from being moved to
- * another cgroup and stabilizes their page->mem_cgroup binding.
+ * another cgroup.
+ *
+ * It ensures lifetime of the returned memcg. Caller is responsible
+ * for the lifetime of the page; __unlock_page_memcg() is available
+ * when @page might get freed inside the locked section.
*/
-void lock_page_memcg(struct page *page)
+struct mem_cgroup *lock_page_memcg(struct page *page)
{
struct mem_cgroup *memcg;
unsigned long flags;
@@ -1630,18 +1634,24 @@
* The RCU lock is held throughout the transaction. The fast
* path can get away without acquiring the memcg->move_lock
* because page moving starts with an RCU grace period.
- */
+ *
+ * The RCU lock also protects the memcg from being freed when
+ * the page state that is going to change is the only thing
+ * preventing the page itself from being freed. E.g. writeback
+ * doesn't hold a page reference and relies on PG_writeback to
+ * keep off truncation, migration and so forth.
+ */
rcu_read_lock();
if (mem_cgroup_disabled())
- return;
+ return NULL;
again:
memcg = page->mem_cgroup;
if (unlikely(!memcg))
- return;
+ return NULL;
if (atomic_read(&memcg->moving_account) <= 0)
- return;
+ return memcg;
spin_lock_irqsave(&memcg->move_lock, flags);
if (memcg != page->mem_cgroup) {
@@ -1657,18 +1667,18 @@
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;
- return;
+ return memcg;
}
EXPORT_SYMBOL(lock_page_memcg);
/**
- * unlock_page_memcg - unlock a page->mem_cgroup binding
- * @page: the page
+ * __unlock_page_memcg - unlock and unpin a memcg
+ * @memcg: the memcg
+ *
+ * Unlock and unpin a memcg returned by lock_page_memcg().
*/
-void unlock_page_memcg(struct page *page)
+void __unlock_page_memcg(struct mem_cgroup *memcg)
{
- struct mem_cgroup *memcg = page->mem_cgroup;
-
if (memcg && memcg->move_lock_task == current) {
unsigned long flags = memcg->move_lock_flags;
@@ -1680,6 +1690,15 @@
rcu_read_unlock();
}
+
+/**
+ * unlock_page_memcg - unlock a page->mem_cgroup binding
+ * @page: the page
+ */
+void unlock_page_memcg(struct page *page)
+{
+ __unlock_page_memcg(page->mem_cgroup);
+}
EXPORT_SYMBOL(unlock_page_memcg);
/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 439cc63..dd7817cd3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2713,9 +2713,10 @@
int test_clear_page_writeback(struct page *page)
{
struct address_space *mapping = page_mapping(page);
+ struct mem_cgroup *memcg;
int ret;
- lock_page_memcg(page);
+ memcg = lock_page_memcg(page);
if (mapping && mapping_use_writeback_tags(mapping)) {
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2743,13 +2744,20 @@
} else {
ret = TestClearPageWriteback(page);
}
+ /*
+ * NOTE: Page might be free now! Writeback doesn't hold a page
+ * reference on its own, it relies on truncation to wait for
+ * the clearing of PG_writeback. The below can only access
+ * page state that is static across allocation cycles.
+ */
if (ret) {
- mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
+ __mem_cgroup_update_page_stat(page, memcg,
+ MEM_CGROUP_STAT_WRITEBACK, -1);
dec_node_page_state(page, NR_WRITEBACK);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
inc_node_page_state(page, NR_WRITTEN);
}
- unlock_page_memcg(page);
+ __unlock_page_memcg(memcg);
return ret;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b1d4386..4a60459 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -254,9 +254,21 @@
#endif
};
+/*
+ * Try to keep at least this much lowmem free. Do not allow normal
+ * allocations below this point, only high priority ones. Automatically
+ * tuned according to the amount of memory in the system.
+ */
int min_free_kbytes = 1024;
int user_min_free_kbytes = -1;
-int watermark_scale_factor = 10;
+int watermark_scale_factor;
+
+/*
+ * Extra memory for the system to try freeing. Used to temporarily
+ * free memory, to make space for new workloads. Anyone can allocate
+ * down to the min watermarks controlled by min_free_kbytes above.
+ */
+int extra_free_kbytes;
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
@@ -6742,6 +6754,7 @@
static void __setup_per_zone_wmarks(void)
{
unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+ unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10);
unsigned long lowmem_pages = 0;
struct zone *zone;
unsigned long flags;
@@ -6753,11 +6766,14 @@
}
for_each_zone(zone) {
- u64 tmp;
+ u64 min, low;
spin_lock_irqsave(&zone->lock, flags);
- tmp = (u64)pages_min * zone->managed_pages;
- do_div(tmp, lowmem_pages);
+ min = (u64)pages_min * zone->managed_pages;
+ do_div(min, lowmem_pages);
+ low = (u64)pages_low * zone->managed_pages;
+ do_div(low, vm_total_pages);
+
if (is_highmem(zone)) {
/*
* __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -6778,7 +6794,7 @@
* If it's a lowmem zone, reserve a number of pages
* proportionate to the zone's size.
*/
- zone->watermark[WMARK_MIN] = tmp;
+ zone->watermark[WMARK_MIN] = min;
}
/*
@@ -6786,12 +6802,13 @@
* scale factor in proportion to available memory, but
* ensure a minimum size on small systems.
*/
- tmp = max_t(u64, tmp >> 2,
+ min = max_t(u64, min >> 2,
mult_frac(zone->managed_pages,
watermark_scale_factor, 10000));
- zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
- zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
+ zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + low + min;
+ zone->watermark[WMARK_HIGH] =
+ min_wmark_pages(zone) + low + min * 2;
spin_unlock_irqrestore(&zone->lock, flags);
}
@@ -6872,7 +6889,7 @@
/*
* min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
* that we can call two helper functions whenever min_free_kbytes
- * changes.
+ * or extra_free_kbytes changes.
*/
int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 5edfe66..64ec233 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -263,6 +263,7 @@
* this case. -DaveM
*/
pr_debug("end of fragment not rounded to 8 bytes.\n");
+ inet_frag_kill(&fq->q, &nf_frags);
return -EPROTO;
}
if (end > fq->q.len) {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index ec87467..313a6dd 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -161,10 +161,13 @@
#endif
if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) {
- struct udphdr _hdr, *hp;
+ struct udphdr *hp;
+ struct tcphdr _hdr;
hp = skb_header_pointer(skb, ip_hdrlen(skb),
- sizeof(_hdr), &_hdr);
+ iph->protocol == IPPROTO_UDP ?
+ sizeof(*hp) : sizeof(_hdr),
+ &_hdr);
if (hp == NULL)
return NULL;
@@ -370,9 +373,11 @@
}
if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) {
- struct udphdr _hdr, *hp;
+ struct udphdr *hp;
+ struct tcphdr _hdr;
- hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
+ hp = skb_header_pointer(skb, thoff, tproto == IPPROTO_UDP ?
+ sizeof(*hp) : sizeof(_hdr), &_hdr);
if (hp == NULL)
return NULL;
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 8faf7a7..50dd516 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -453,6 +453,13 @@
}
ep = &config->muxed_ep[mux_id];
+ if (!ep->refcount) {
+ LOGD("Packet on %s:%d; has no logical endpoint config",
+ skb->dev->name, mux_id);
+
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
+ return RX_HANDLER_CONSUMED;
+ }
skb->dev = ep->egress_dev;
diff --git a/net/rmnet_data/rmnet_data_stats.h b/net/rmnet_data/rmnet_data_stats.h
index 366e486..75ed434 100644
--- a/net/rmnet_data/rmnet_data_stats.h
+++ b/net/rmnet_data/rmnet_data_stats.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,7 @@
RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0,
RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM,
RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED,
+ RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP,
RMNET_STATS_SKBFREE_MAX
};
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index ff9887f..7fe91b1 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -54,7 +54,8 @@
country AR:
(2402 - 2482 @ 40), (36)
- (5170 - 5330 @ 160), (23)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (36), AUTO-BW
(5490 - 5590 @ 80), (36)
(5650 - 5730 @ 80), (36)
(5735 - 5835 @ 80), (36)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d8387b1..8f9bd38 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6701,8 +6701,17 @@
if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) {
request->flags = nla_get_u32(
info->attrs[NL80211_ATTR_SCAN_FLAGS]);
- if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
- !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) {
+ if (((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+ !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) ||
+ ((request->flags & NL80211_SCAN_FLAG_LOW_SPAN) &&
+ !wiphy_ext_feature_isset(wiphy,
+ NL80211_EXT_FEATURE_LOW_SPAN_SCAN)) ||
+ ((request->flags & NL80211_SCAN_FLAG_LOW_POWER) &&
+ !wiphy_ext_feature_isset(wiphy,
+ NL80211_EXT_FEATURE_LOW_POWER_SCAN)) ||
+ ((request->flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) &&
+ !wiphy_ext_feature_isset(wiphy,
+ NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN))) {
err = -EOPNOTSUPP;
goto out_free;
}
@@ -7585,6 +7594,11 @@
intbss->ts_boottime, NL80211_BSS_PAD))
goto nla_put_failure;
+ if (!nl80211_put_signal(msg, intbss->pub.chains,
+ intbss->pub.chain_signal,
+ NL80211_BSS_CHAIN_SIGNAL))
+ goto nla_put_failure;
+
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 35ad69f..ad8611b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -904,6 +904,9 @@
found->ts = tmp->ts;
found->ts_boottime = tmp->ts_boottime;
found->parent_tsf = tmp->parent_tsf;
+ found->pub.chains = tmp->pub.chains;
+ memcpy(found->pub.chain_signal, tmp->pub.chain_signal,
+ IEEE80211_MAX_CHAINS);
ether_addr_copy(found->parent_bssid, tmp->parent_bssid);
} else {
struct cfg80211_internal_bss *new;
@@ -1156,6 +1159,8 @@
tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
tmp.ts_boottime = data->boottime_ns;
tmp.parent_tsf = data->parent_tsf;
+ tmp.pub.chains = data->chains;
+ memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index e60c79d..52f3c55 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -348,27 +348,26 @@
struct avc_xperms_decision_node *xpd_node;
struct extended_perms_decision *xpd;
- xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
- GFP_ATOMIC | __GFP_NOMEMALLOC);
+ xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
if (!xpd_node)
return NULL;
xpd = &xpd_node->xpd;
if (which & XPERMS_ALLOWED) {
xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_ATOMIC | __GFP_NOMEMALLOC);
+ GFP_NOWAIT);
if (!xpd->allowed)
goto error;
}
if (which & XPERMS_AUDITALLOW) {
xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_ATOMIC | __GFP_NOMEMALLOC);
+ GFP_NOWAIT);
if (!xpd->auditallow)
goto error;
}
if (which & XPERMS_DONTAUDIT) {
xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_ATOMIC | __GFP_NOMEMALLOC);
+ GFP_NOWAIT);
if (!xpd->dontaudit)
goto error;
}
@@ -396,8 +395,7 @@
{
struct avc_xperms_node *xp_node;
- xp_node = kmem_cache_zalloc(avc_xperms_cachep,
- GFP_ATOMIC|__GFP_NOMEMALLOC);
+ xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
if (!xp_node)
return xp_node;
INIT_LIST_HEAD(&xp_node->xpd_head);
@@ -550,7 +548,7 @@
{
struct avc_node *node;
- node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
+ node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
if (!node)
goto out;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 082b20c..051ee18 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -78,8 +78,7 @@
static struct sidtab sidtab;
struct policydb policydb;
-int ss_initialized;
-
+int ss_initialized __aligned(0x1000) __attribute__((section(".bss_rtic")));
/*
* The largest sequence number that has been used when
* providing an access decision to the access vector cache.
diff --git a/sound/usb/helper.c b/sound/usb/helper.c
index 7712e2b..4783648 100644
--- a/sound/usb/helper.c
+++ b/sound/usb/helper.c
@@ -122,7 +122,7 @@
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
if (get_endpoint(alts, 0)->bInterval >= 1 &&
- get_endpoint(alts, 0)->bInterval <= 4)
+ get_endpoint(alts, 0)->bInterval <= 16)
return get_endpoint(alts, 0)->bInterval - 1;
break;
default: