Merge "msm: mdss: Define number of concurrent Assertive Display HW"
diff --git a/Documentation/arm/msm/msm_sharedmem.txt b/Documentation/arm/msm/msm_sharedmem.txt
new file mode 100644
index 0000000..d9c939e
--- /dev/null
+++ b/Documentation/arm/msm/msm_sharedmem.txt
@@ -0,0 +1,115 @@
+Introduction
+============
+
+This is a new platform driver for newly introduced UIO devices
+to facilitate clients in Userspace.
+
+Hardware description
+====================
+This driver does not implement any specific hardware driver.
+
+Software description
+====================
+
+Design
+======
+
+The goal of this driver is to ensure there is no security lapse in the
+Userspace clients' functionality. This new driver uses the existing
+UIO framework to facilitate the clients to be able to memory map their
+respective allotted shared memory address in the client's address space.
+
+ |
+ Userspace | Kernel space
+ +--------------+ +---------------+ +---------------+
+ | Client | | Shared | | shrdmem_uio |
+ | <-------> Memory <-------> driver |
+ +--------------+ +---------------+ +---------------+
+ |
+ |
+
+The shared memory (a transport buffer) address is unique for each
+individual client and is made available to the driver via device tree.
+
+For a given client the probe would be called once in the shrdmem_uio driver.
+This driver would parse the device tree and register a new UIO device with kernel
+available under /dev/uioX (where X would start from zero, being serially
+incremented for the next UIO device probed)
+
+The client in Userspace would be able to access the respective UIO device
+under the sysfs entry(/sys/class/uio/uioX) upon verifying the name and version
+of the device under this sysfs node. Once verified it could access the physical
+address under /sys/class/uio/uioX/maps/map0/addr
+
+The client would request for memory mapping which would be taken care of in the
+kernel space by the UIO framework. No explicit mmap() implementation required by
+the shrdmem_uio driver.
+
+Power Management
+================
+Does not implement any power management.
+
+SMP/multi-core
+==============
+
+The platform driver would be loaded/probed once per client.
+DTS files will be looked up for shared memory addresses and sizes for all the clients.
+The UIO char device will be created under /dev/uioX.
+
+This being one time activity for a given client it does not require SMP/multi-core safety.
+
+Security
+========
+
+The devices (/dev/uioX) would have permission checks for restricted access
+
+Performance
+===========
+
+None.
+
+Interface
+=========
+
+This driver does not export any APIs for kernel.
+Android user space can access the shared memory by mmaping it.
+
+Driver parameters
+=================
+
+None.
+
+Config options
+==============
+
+None.
+
+Dependencies
+============
+
+The only dependency is the kernel device tree files for the
+Userspace client details.
+
+User space utilities
+====================
+This driver communicates with the following user space clients/utilities:
+
+Remote File System:
+ - Based on Qualcomm Messaging Interface (QMI)
+ - This service enables the modules on the MSM modem processor to
+ read data from and write data to the embedded multimedia card (eMMC),
+ which is solely controlled by the applications processor.
+
+Remote File System Access (QMI_RFSA):
+ - Based on Qualcomm Messaging Interface (QMI)
+ - This service provides access from the Hexagon processor to a High-Level
+ Operating Sytem (HLOS) file system
+Other
+=====
+
+None.
+
+Known issues
+============
+
+None.
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index b4ae5e6..92bbd16 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -234,7 +234,17 @@
above_hispeed_delay: When speed is at or above hispeed_freq, wait for
this long before raising speed in response to continued high load.
-Default is 20000 uS.
+The format is a single delay value, optionally followed by pairs of
+CPU speeds and the delay to use at or above those speeds. Colons can
+be used between the speeds and associated delays for readability. For
+example:
+
+ 80000 1300000:200000 1500000:40000
+
+uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
+200000 uS is used until speed 1.5 GHz, at which speed (and above)
+delay 40000 uS is used. If speeds are specified these must appear in
+ascending order. Default is 20000 uS.
timer_rate: Sample rate for reevaluating CPU load when the CPU is not
idle. A deferrable timer is used, such that the CPU will not be woken
diff --git a/Documentation/cpuidle/driver.txt b/Documentation/cpuidle/driver.txt
index 7a9e09e..1b0d81d 100644
--- a/Documentation/cpuidle/driver.txt
+++ b/Documentation/cpuidle/driver.txt
@@ -15,11 +15,17 @@
cpuidle driver initializes the cpuidle_device structure for each CPU device
and registers with cpuidle using cpuidle_register_device.
+If all the idle states are the same, the wrapper function cpuidle_register
+could be used instead.
+
It can also support the dynamic changes (like battery <-> AC), by using
cpuidle_pause_and_lock, cpuidle_disable_device and cpuidle_enable_device,
cpuidle_resume_and_unlock.
Interfaces:
+extern int cpuidle_register(struct cpuidle_driver *drv,
+ const struct cpumask *const coupled_cpus);
+extern int cpuidle_unregister(struct cpuidle_driver *drv);
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
diff --git a/Documentation/devicetree/bindings/arm/msm/acpuclock/clock-a7.txt b/Documentation/devicetree/bindings/arm/msm/acpuclock/clock-a7.txt
new file mode 100644
index 0000000..10eb0fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/acpuclock/clock-a7.txt
@@ -0,0 +1,37 @@
+* Qualcomm Application CPU clock driver
+
+clock-a7 is the driver for the Root Clock Generator (rcg) hw which controls
+the cpu rate. RCGs support selecting one of several clock inputs, as well as
+a configurable divider. This hw is different than normal rcgs in that it may
+optionally have a register which encodes the maximum rate supported by hw.
+
+Required properties:
+- compatible: "qcom,clock-a7-8226"
+- reg: pairs of physical address and region size
+- reg-names: "rcg-base" is expected
+- clock-names: list of names of clock inputs
+- qcom,speedX-bin-vZ:
+ A table of CPU frequency (Hz) to regulator voltage (uV) mapping.
+ Format: <freq uV>
+ This represents the max frequency possible for each possible
+ power configuration for a CPU that's binned as speed bin X,
+ speed bin revision Z. Speed bin values can be between [0-7]
+ and the version can be between [0-3].
+
+- cpu-vdd-supply: regulator phandle for cpu power domain.
+
+Optional properties:
+- reg-names: "efuse"
+
+Example:
+ qcom,acpuclk@f9011050 {
+ compatible = "qcom,clock-a7-8226";
+ reg = <0xf9011050 0x8>;
+ reg-names = "rcg_base";
+ cpu-vdd-supply = <&apc_vreg_corner>;
+
+ clock-names = "clk-4", "clk-5";
+ qcom,speed0-bin-v0 =
+ <384000000 1150000>,
+ <600000000 1200000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
index 917ea75..0696730 100644
--- a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
@@ -1,52 +1,106 @@
* Low Power Management Levels
The application processor in MSM can do a variety of C-States for low power
-management. These C-States are invoked by the CPUIdle framework when the core
-becomes idle. But based on the time available until the next scheduled wakeup,
-the system can do several low power modes. The combination is captured in the
-device tree as lpm-level.
+management. The LPM module performs the CPU and System low power modes based
+on it latency and residency information of the individual CPU/System low power
+levels.
-The required nodes for lpm-levels are:
+The first level node represents the properties of the system and includes
+second level node to represent the low power modes of cpu and system.
+
+[First Level Nodes]
+Required properties:
- compatible: "qcom,lpm-levels"
-- reg: The numeric level id
+
+The optional nodes for the First level nodes are:
+- qcom,no-l2-saw: Indicates if this target has an L2 SAW (SPM and AVS wrapper).
+- qcom,default-l2-state: Indicates what the default low power state of the L2
+ SAW should be. This property should be mentioned when there is
+ a L2 saw.
+- qcom,allow-synced-levels: Indicates if certain low power modes should be
+ synchronized across all cores so as to configure a system
+ low power mode.
+
+[Second Level Nodes]
+Required properties to define CPU low power modes :
+- compatible = "qcom,cpu-modes";
- qcom,mode: The sleep mode of the processor, values for the property are:
"wfi" - Wait for Interrupt
- "ramp_down_and_wfi" - Ramp down and wait for interrupt
+ "retention" - Retention
"standalone_pc" - Standalone power collapse
"pc" - Power Collapse
- "retention" - Retention
- "pc_suspend" - Suspended Power Collapse
- "pc_no_xo_shutdown" - Power Collapse with no XO shutdown
+- qcom,latency-us: The latency in handling the interrupt if this level was
+ chosen, in uSec
+- qcom,ss-power: The steady state power expelled when the processor is in this
+ level in mWatts
+- qcom,energy-overhead: The energy used up in entering and exiting this level
+ in mWatts.uSec
+- qcom,time-overhead: The time spent in entering and exiting this level in uS
+
+Required propertieis to define System low power mode :
+- compatible: "qcom,system-modes"
- qcom,l2: The state of L2 cache. Values are:
"l2_cache_pc" - L2 cache in power collapse
- "l2_cache_pc_no_rpm" - L2 cache in power collapse. This mode wouldn't inform the RPM
+ "l2_cache_pc_no_rpm" - L2 cache in power collapse. This mode
+ wouldn't inform the RPM
"l2_cache_retenetion" - L2 cache in retention
"l2_cache_gdhs" - L2 cache in GDHS
"l2_cache_active" - L2 cache in active mode
-- qcom,latency-us: The latency in handling the interrupt if this level was
- chosen, in uSec
-- qcom,ss-power: The steady state power expelled when the processor is in this
- level in mWatts
-- qcom,energy-overhead: The energy used up in entering and exiting this level
- in mWatts.uSec
-- qcom,time-overhead: The time spent in entering and exiting this level in uS
-The optional nodes for lpm-levels are :
-- qcom,no-l2-saw: Indicates if this target has an L2 SAW (SPM and AVS wrapper).
-- qcom,default-l2-state: Indicates what the default low power state of the L2 SAW should be. This property is used only when there is an L2 SAW.
+- qcom,latency-us: The latency in handling the interrupt if this level was
+ chosen, in uSec
+- qcom,ss-power: The steady state power expelled when the processor is in this
+ level in mWatts
+- qcom,energy-overhead: The energy used up in entering and exiting this level
+ in mWatts.uSec
+- qcom,time-overhead: The time spent in entering and exiting this level in uS
+- qcom,min-cpu-mode: The min cpu sleep mode at which the given system level is
+ valid. All cpus should have entered this low power mode before
+ this system level can be chosen.
Example:
-
qcom,lpm-levels {
- qcom,no-l2-saw;
- qcom,lpm-level@0 {
- reg = <0>;
- qcom,mode = "wfi";
- qcom,l2 = "l2_cache_active";
- qcom,latency-us = <100>;
- qcom,ss-power = <650>;
- qcom,energy-overhead = <801>;
- qcom,time-overhead = <200>;
- };
+ compatible = "qcom,lpm-levels";
+ qcom,default-l2-state = "l2_cache_retention";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qcom,cpu-modes {
+ compatible = "qcom,cpu-modes";
+ qcom,cpu-mode@0 {
+ qcom,mode = "wfi";
+ qcom,latency-us = <1>;
+ qcom,ss-power = <715>;
+ qcom,energy-overhead = <17700>;
+ qcom,time-overhead = <2>;
+ };
+
+ qcom,cpu-mode@1 {
+ qcom,mode = "retention";
+ qcom,latency-us = <35>;
+ qcom,ss-power = <542>;
+ qcom,energy-overhead = <34920>;
+ qcom,time-overhead = <40>;
+ };
+ };
+ qcom,system-modes {
+ compatible = "qcom,system-modes";
+
+ qcom,system-mode@0 {
+ qcom,l2 = "l2_cache_gdhs";
+ qcom,latency-us = <20000>;
+ qcom,ss-power = <163>;
+ qcom,energy-overhead = <1577736>;
+ qcom,time-overhead = <5067>;
+ };
+
+ qcom,system-mode@1 {
+ qcom,l2 = "l2_cache_pc";
+ qcom,latency-us = <30000>;
+ qcom,ss-power = <83>;
+ qcom,energy-overhead = <2274420>;
+ qcom,time-overhead = <6605>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
index fbf1a1f..6283a82 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
@@ -101,7 +101,7 @@
other parameters used in Limiter and Regular mode
for static BKE configuration. It is defined in KBps.
qcom,bimc,gp: Grant Period for configuring a master in limiter
- mode. This is an integer value in micro-seconds.
+ mode. This is an integer value in nano-seconds.
qcom,bimc,thmp: Medium threshold percentage for BIMC masters.
This percentage is used to calculate medium threshold
value for BIMC Masters in Limiter mode for static
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
index 2d83614..f3cf8f3 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
@@ -17,6 +17,16 @@
Required properties for Ion heaps
- reg: The ID of the ION heap.
+- qcom,ion-heap-type: The heap type to use for this heap. Should be one of
+ the following:
+ - "SYSTEM"
+ - "SYSTEM_CONTIG"
+ - "CARVEOUT"
+ - "CHUNK"
+ - "CP"
+ - "DMA"
+ - "SECURE_DMA"
+ - "REMOVED"
Optional properties for Ion heaps
@@ -34,16 +44,17 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
+ qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;
qcom,heap-align = <0x1000>;
- qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
- qcom,memory-reservation-size = <0x7800000>;
+ linux,contiguous-region = <&secure_mem>;
+ qcom,ion-heap-type = "SECURE_DMA";
};
qcom,ion-heap@29 { /* FIRMWARE HEAP */
@@ -53,5 +64,6 @@
qcom,heap-adjacent = <8>;
qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
qcom,memory-reservation-size = <0xA00000>;
-
+ qcom,ion-heap-type = "CARVEOUT";
+ };
};
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 1a44f5a..0e236f2 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -178,34 +178,33 @@
- qcom,mdss-dsi-bllp-power-mode: Boolean to determine DSI lane state during
blanking low power period (BLLP) mode.
- qcom,mdss-dsi-traffic-mode: Specifies the panel traffic mode.
- 0 = non burst with sync pulses (default mode).
- 1 = non burst with sync start event.
- 2 = burst mode.
+ "non_burst_sync_pulse" = non burst with sync pulses (default).
+ "non_burst_sync_event" = non burst with sync start event.
+ "burst_mode" = burst mode.
- qcom,mdss-dsi-pixel-packing: Specifies if pixel packing is used (in case of RGB666).
- 0 = Tight packing (default value).
- 1 = Loose packing.
+ "tight" = Tight packing (default value).
+ "loose" = Loose packing.
- qcom,mdss-dsi-virtual-channel-id: Specifies the virtual channel identefier.
0 = default value.
- qcom,mdss-dsi-color-order: Specifies the R, G and B channel ordering.
- 0 = DSI_RGB_SWAP_RGB (default value)
- 1 = DSI_RGB_SWAP_RBG
- 2 = DSI_RGB_SWAP_BGR
- 3 = DSI_RGB_SWAP_BRG
- 4 = DSI_RGB_SWAP_GRB
- 5 = DSI_RGB_SWAP_GBR
+ "rgb_swap_rgb" = DSI_RGB_SWAP_RGB (default value)
+ "rgb_swap_rbg" = DSI_RGB_SWAP_RBG
+ "rgb_swap_brg" = DSI_RGB_SWAP_BRG
+ "rgb_swap_grb" = DSI_RGB_SWAP_GRB
+ "rgb_swap_gbr" = DSI_RGB_SWAP_GBR
- qcom,mdss-dsi-lane-0-state: Boolean that specifies whether data lane 0 is enabled.
- qcom,mdss-dsi-lane-1-state: Boolean that specifies whether data lane 1 is enabled.
- qcom,mdss-dsi-lane-2-state: Boolean that specifies whether data lane 2 is enabled.
- qcom,mdss-dsi-lane-3-state: Boolean that specifies whether data lane 3 is enabled.
- qcom,mdss-dsi-lane-map: Specifies the data lane swap configuration.
- 0 = <0 1 2 3> (default value)
- 1 = <3 0 1 2>
- 2 = <2 3 0 1>
- 3 = <1 2 3 0>
- 4 = <0 3 2 1>
- 5 = <1 0 3 2>
- 6 = <2 1 0 3>
- 7 = <3 2 1 0>
+ "lane_map_0123" = <0 1 2 3> (default value)
+ "lane_map_3012" = <3 0 1 2>
+ "lane_map_2301" = <2 3 0 1>
+ "lane_map_1230" = <1 2 3 0>
+ "lane_map_0321" = <0 3 2 1>
+ "lane_map_1032" = <1 0 3 2>
+ "lane_map_2103" = <2 1 0 3>
+ "lane_map_3210" = <3 2 1 0>
- qcom,mdss-dsi-t-clk-post: Specifies the byte clock cycles after mode switch.
0x03 = default value.
- qcom,mdss-dsi-t-clk-pre: Specifies the byte clock cycles before mode switch.
@@ -214,16 +213,16 @@
0 = stream 0 (default)
1 = stream 1
- qcom,mdss-dsi-mdp-trigger: Specifies the trigger mechanism to be used for MDP path.
- 0 = no trigger
- 2 = Tear check signal line used for trigger
- 4 = Triggered by software (default)
- 6 = Software trigger and TE
+ "none" = no trigger
+ "trigger_te" = Tear check signal line used for trigger
+ "trigger_sw" = Triggered by software (default)
+ "trigger_sw_te" = Software trigger and TE
- qcom,mdss-dsi-dma-trigger: Specifies the trigger mechanism to be used for DMA path.
- 0 = no trigger
- 2 = Tear check signal line used for trigger
- 4 = Triggered by software (default)
- 5 = Software trigger and start/end of frame trigger.
- 6 = Software trigger and TE
+ "none" = no trigger
+ "trigger_te" = Tear check signal line used for trigger
+ "trigger_sw" = Triggered by software (default)
+ "trigger_sw_seof" = Software trigger and start/end of frame trigger.
+ "trigger_sw_te" = Software trigger and TE
- qcom,mdss-dsi-panel-framerate: Specifies the frame rate for the panel.
60 = 60 frames per second (default)
- qcom,mdss-dsi-panel-clockrate: Specifies the panel clock speed in Hz.
@@ -250,7 +249,12 @@
as below:
--> Reset GPIO value
--> Sleep value (in ms)
-
+- qcom,mdss-dsi-lp11-init: Boolean used to enable the DSI clocks and data lanes (low power 11)
+ before issuing hardware reset line.
+- qcom,mdss-dsi-init-delay-us: Delay in microseconds(us) before performing any DSI activity in lp11
+ mode. This master delay (t_init_delay as per DSI spec) should be sum
+ of DSI internal delay to reach fuctional after power up and minimum
+ delay required by panel to reach functional.
Note, if a given optional qcom,* binding is not present, then the driver will configure
the default values specified.
@@ -343,5 +347,7 @@
qcom,mdss-dsi-panel-mode-gpio-state = "low";
qcom,partial-update-enabled;
qcom,mdss-dsi-reset-sequence = <1 2>, <0 10>, <1 10>;
+ qcom,mdss-dsi-lp11-init;
+ qcom,mdss-dsi-init-delay-us = <100>;
};
};
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index aa0aa8c..656f3a4 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -64,7 +64,7 @@
Optional Properties:
- qcom,initial-powerlevel: This value indicates which qcom,gpu-pwrlevel should be used at start time
and when coming back out of resume
-- qcom,step-pwrlevel: How many qcom,gpu-pwrlevel should be decremented at once
+- qcom,bus-control: Boolean. Enables an independent bus vote from the gpu frequency
- qcom,idle-timeout: This property represents the time in microseconds for idle timeout.
- qcom,chipid: If it exists this property is used to replace
the chip identification read from the GPU hardware.
diff --git a/Documentation/devicetree/bindings/uio/msm_sharedmem.txt b/Documentation/devicetree/bindings/uio/msm_sharedmem.txt
new file mode 100644
index 0000000..5af50da
--- /dev/null
+++ b/Documentation/devicetree/bindings/uio/msm_sharedmem.txt
@@ -0,0 +1,13 @@
+msm_sharedmem provides the shared memory addresses for various clients in user-space
+
+Required properties:
+- compatible: Must be "qcom,sharedmem-uio"
+- reg : The address and size of the shared memory. The address/sizes may vary.
+- reg-names : indicates various client-names.
+
+Example:
+ msm_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0dc80000 0x00180000>,
+ reg-names = "rmtfs";
+ };
diff --git a/arch/arm/boot/dts/apq8074-v1-ion.dtsi b/arch/arm/boot/dts/apq8074-v1-ion.dtsi
index 49d7ee1..3611132 100644
--- a/arch/arm/boot/dts/apq8074-v1-ion.dtsi
+++ b/arch/arm/boot/dts/apq8074-v1-ion.dtsi
@@ -17,6 +17,7 @@
reg = <23>;
qcom,heap-align = <0x1000>;
qcom,memory-fixed = <0x0dc00000 0x1e00000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
};
};
diff --git a/arch/arm/boot/dts/apq8074-v2.0-1-ion.dtsi b/arch/arm/boot/dts/apq8074-v2.0-1-ion.dtsi
index 49d7ee1..3611132 100644
--- a/arch/arm/boot/dts/apq8074-v2.0-1-ion.dtsi
+++ b/arch/arm/boot/dts/apq8074-v2.0-1-ion.dtsi
@@ -17,6 +17,7 @@
reg = <23>;
qcom,heap-align = <0x1000>;
qcom,memory-fixed = <0x0dc00000 0x1e00000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
};
};
diff --git a/arch/arm/boot/dts/apq8074-v2.2-ion.dtsi b/arch/arm/boot/dts/apq8074-v2.2-ion.dtsi
index 49d7ee1..3611132 100644
--- a/arch/arm/boot/dts/apq8074-v2.2-ion.dtsi
+++ b/arch/arm/boot/dts/apq8074-v2.2-ion.dtsi
@@ -17,6 +17,7 @@
reg = <23>;
qcom,heap-align = <0x1000>;
qcom,memory-fixed = <0x0dc00000 0x1e00000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
};
};
diff --git a/arch/arm/boot/dts/apq8084-ion.dtsi b/arch/arm/boot/dts/apq8084-ion.dtsi
index ea954b8..167b8b7 100644
--- a/arch/arm/boot/dts/apq8084-ion.dtsi
+++ b/arch/arm/boot/dts/apq8084-ion.dtsi
@@ -16,16 +16,14 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
- };
-
- qcom,ion-heap@21 { /* SYSTEM CONTIG HEAP */
- reg = <21>;
- };
-
- qcom,ion-heap@25 { /* IOMMU HEAP */
+ qcom,ion-heap@25 {
reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ qcom,ion-heap@21 {
+ reg = <21>;
+ qcom,ion-heap-type = "SYSTEM_CONTIG";
};
};
};
diff --git a/arch/arm/boot/dts/dsi-panel-generic-720p-cmd.dtsi b/arch/arm/boot/dts/dsi-panel-generic-720p-cmd.dtsi
index 2963d15..17e6e94 100644
--- a/arch/arm/boot/dts/dsi-panel-generic-720p-cmd.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-generic-720p-cmd.dtsi
@@ -37,7 +37,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [05 01 00 00 78 00 01 11
@@ -52,8 +51,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
- qcom,mdss-dsi-traffic-mode = <1>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -73,8 +71,8 @@
qcom,mdss-dsi-bl-max-level = <255>;
qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
qcom,mdss-dsi-bl-pmic-bank-select = <7>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 200>, <1 20>;
};
diff --git a/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi b/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi
index 92e6fc1..88ccd08 100644
--- a/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi
@@ -38,7 +38,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [
@@ -137,8 +136,8 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
qcom,mdss-dsi-h-sync-pulse = <1>;
- qcom,mdss-dsi-traffic-mode = <2>;
- qcom,mdss-dsi-lane-map = <1>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-lane-map = "lane_map_3012";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -148,8 +147,8 @@
qcom,mdss-dsi-t-clk-pre = <0x1b>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <255>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 2>, <1 20>;
diff --git a/arch/arm/boot/dts/dsi-panel-hx8389b-qhd-video.dtsi b/arch/arm/boot/dts/dsi-panel-hx8389b-qhd-video.dtsi
index 8f94502..8c79bb9 100755
--- a/arch/arm/boot/dts/dsi-panel-hx8389b-qhd-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-hx8389b-qhd-video.dtsi
@@ -38,7 +38,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [
@@ -115,8 +114,8 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
qcom,mdss-dsi-h-sync-pulse = <1>;
- qcom,mdss-dsi-traffic-mode = <2>;
- qcom,mdss-dsi-lane-map = <1>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-lane-map = "lane_map_3012";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -126,8 +125,8 @@
qcom,mdss-dsi-t-clk-pre = <0x1b>;
qcom,mdss-dsi-bl-min-level = <26>;
qcom,mdss-dsi-bl-max-level = <255>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 2>, <1 20>;
diff --git a/arch/arm/boot/dts/dsi-panel-hx8394a-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-hx8394a-720p-video.dtsi
index 83351ca..5302d8ae 100644
--- a/arch/arm/boot/dts/dsi-panel-hx8394a-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-hx8394a-720p-video.dtsi
@@ -38,7 +38,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [39 01 00 00 00 00 04 b9 ff 83 94
@@ -62,8 +61,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <1>;
- qcom,mdss-dsi-traffic-mode = <2>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -75,8 +73,8 @@
qcom,mdss-dsi-t-clk-pre = <0x2d>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
diff --git a/arch/arm/boot/dts/dsi-panel-jdi-1080p-video.dtsi b/arch/arm/boot/dts/dsi-panel-jdi-1080p-video.dtsi
index 1b64cf7..be42509 100644
--- a/arch/arm/boot/dts/dsi-panel-jdi-1080p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-jdi-1080p-video.dtsi
@@ -38,7 +38,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 55 00
@@ -51,8 +50,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
- qcom,mdss-dsi-traffic-mode = <2>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -64,8 +62,8 @@
qcom,mdss-dsi-t-clk-pre = <0x1b>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-dma-trigger = <0x04>;
- qcom,mdss-dsi-mdp-trigger = <0x0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 200>, <1 20>;
};
diff --git a/arch/arm/boot/dts/dsi-panel-nt35521-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-nt35521-720p-video.dtsi
index 9bb11da..8be4e34 100644
--- a/arch/arm/boot/dts/dsi-panel-nt35521-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-nt35521-720p-video.dtsi
@@ -38,10 +38,9 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-pixel-packing = <0>;
+ qcom,mdss-dsi-pixel-packing = "tight";
qcom,mdss-dsi-on-command = [29 01 00 00 00 00 06 F0 55 AA 52 08 00
29 01 00 00 00 00 03 B1 68 21
23 01 00 00 00 00 02 B5 C8
@@ -252,8 +251,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <1>;
- qcom,mdss-dsi-traffic-mode = <2>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -265,8 +263,8 @@
qcom,mdss-dsi-t-clk-pre = <0x2D>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <255>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
diff --git a/arch/arm/boot/dts/dsi-panel-nt35590-720p-cmd.dtsi b/arch/arm/boot/dts/dsi-panel-nt35590-720p-cmd.dtsi
index a24cb58..d3547d8 100644
--- a/arch/arm/boot/dts/dsi-panel-nt35590-720p-cmd.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-nt35590-720p-cmd.dtsi
@@ -38,7 +38,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 FF EE
@@ -510,8 +509,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <1>;
- qcom,mdss-dsi-traffic-mode = <2>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -528,8 +526,8 @@
qcom,mdss-dsi-t-clk-pre = <0x2c>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
qcom,mdss-pan-physical-width-dimension = <59>;
diff --git a/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
index 79618b9..8d28996 100644
--- a/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-nt35590-720p-video.dtsi
@@ -38,7 +38,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 FF EE
@@ -511,8 +510,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <1>;
- qcom,mdss-dsi-traffic-mode = <2>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -524,8 +522,8 @@
qcom,mdss-dsi-t-clk-pre = <0x2c>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
qcom,mdss-pan-physical-width-dimension = <59>;
diff --git a/arch/arm/boot/dts/dsi-panel-nt35596-1080p-video.dtsi b/arch/arm/boot/dts/dsi-panel-nt35596-1080p-video.dtsi
index 2312b37..770bac4 100644
--- a/arch/arm/boot/dts/dsi-panel-nt35596-1080p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-nt35596-1080p-video.dtsi
@@ -38,7 +38,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 FF EE
@@ -566,8 +565,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <1>;
- qcom,mdss-dsi-traffic-mode = <2>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -579,8 +577,8 @@
qcom,mdss-dsi-t-clk-pre = <0x38>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
diff --git a/arch/arm/boot/dts/dsi-panel-otm8018b-fwvga-video.dtsi b/arch/arm/boot/dts/dsi-panel-otm8018b-fwvga-video.dtsi
index 393419b..8d6e703 100644
--- a/arch/arm/boot/dts/dsi-panel-otm8018b-fwvga-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-otm8018b-fwvga-video.dtsi
@@ -38,7 +38,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [
@@ -247,8 +246,8 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
- qcom,mdss-dsi-traffic-mode = <1>;
- qcom,mdss-dsi-lane-map = <1>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+ qcom,mdss-dsi-lane-map = "lane_map_3012";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -258,8 +257,8 @@
qcom,mdss-dsi-t-clk-pre = <0x1b>;
qcom,mdss-dsi-bl-min-level = <26>;
qcom,mdss-dsi-bl-max-level = <255>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 2>, <1 20>;
diff --git a/arch/arm/boot/dts/dsi-panel-sharp-qhd-video.dtsi b/arch/arm/boot/dts/dsi-panel-sharp-qhd-video.dtsi
index 285d8fc..d23e3de 100644
--- a/arch/arm/boot/dts/dsi-panel-sharp-qhd-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-sharp-qhd-video.dtsi
@@ -38,7 +38,7 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <2>;
+ qcom,mdss-dsi-color-order = "rgb_swap_bgr";
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [05 01 00 00 32 00 02 01 00
@@ -53,8 +53,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <1>;
- qcom,mdss-dsi-traffic-mode = <0>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_pulse";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -64,8 +63,8 @@
qcom,mdss-dsi-t-clk-pre = <0x1c>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <4>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "trigger_sw";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 200>, <1 20>;
diff --git a/arch/arm/boot/dts/dsi-panel-ssd2080m-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-ssd2080m-720p-video.dtsi
index b510e6b..60bba5d 100644
--- a/arch/arm/boot/dts/dsi-panel-ssd2080m-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-ssd2080m-720p-video.dtsi
@@ -31,16 +31,17 @@
qcom,mdss-dsi-h-pulse-width = <14>;
qcom,mdss-dsi-h-sync-skew = <0>;
qcom,mdss-dsi-v-back-porch = <14>;
- qcom,mdss-dsi-v-front-porch = <11>;
+ qcom,mdss-dsi-v-front-porch = <30>;
qcom,mdss-dsi-v-pulse-width = <2>;
qcom,mdss-dsi-h-left-border = <0>;
qcom,mdss-dsi-h-right-border = <0>;
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
+ qcom,mdss-dsi-color-order = "rgb_swap_rgb";
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-pixel-packing = "tight";
qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 FF 01
29 01 00 00 00 00 05 C6 63 00 81 31
29 01 00 00 00 00 05 CB E7 80 73 33
@@ -81,40 +82,38 @@
29 01 00 00 00 00 05 F6 00 00 00 00
29 01 00 00 00 00 05 F7 00 00 00 00
29 01 00 00 00 00 03 E1 90 00
- 29 01 00 00 00 00 03 DE 95 CF
+ 29 01 00 00 00 00 07 DE 95 CF E2 CE 11 15
29 01 00 00 00 00 02 CF 46
29 01 00 00 00 00 03 C5 77 47
29 01 00 00 00 00 03 ED 00 20
05 01 00 00 B4 00 02 11 00
05 01 00 00 82 00 02 29 00
15 01 00 00 00 00 02 53 2c];
-
- qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 10 00
- 05 01 00 00 32 00 02 53 00
- 05 01 00 00 00 00 02 c2 00
+ qcom,mdss-dsi-off-command = [15 01 00 00 32 00 02 10 00
+ 15 01 00 00 32 00 02 53 00
+ 15 01 00 00 00 00 02 c2 00
39 01 00 00 00 00 02 cf 40
- 05 01 00 00 50 00 03 de 84 00
+ 15 01 00 00 50 00 03 de 84 00
39 01 00 00 00 00 02 cb 22
- 05 01 00 00 00 00 02 c3 00];
-
+ 15 01 00 00 00 00 02 c3 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <1>;
- qcom,mdss-dsi-traffic-mode = <2>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-lane-map = "lane_map_0123";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-lane-0-state;
qcom,mdss-dsi-lane-1-state;
qcom,mdss-dsi-lane-2-state;
qcom,mdss-dsi-lane-3-state;
- qcom,mdss-dsi-panel-timings = [a8 1f 17 00 2f 2d 1c 21 29 03 04 00];
+ qcom,mdss-dsi-panel-timings = [68 1d 15 00 2e 2d 19 1f 24 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x20>;
qcom,mdss-dsi-t-clk-pre = <0x2f>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 2>, <1 20>;
};
};
diff --git a/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi b/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi
index f7de416..10f53b9 100644
--- a/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-toshiba-720p-video.dtsi
@@ -38,7 +38,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [23 01 00 00 0a 00 02 b0 00
@@ -74,8 +73,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
- qcom,mdss-dsi-traffic-mode = <1>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -87,8 +85,8 @@
qcom,mdss-dsi-t-clk-pre = <0x1b>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-dma-trigger = <0x04>;
- qcom,mdss-dsi-mdp-trigger = <0x0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode";
diff --git a/arch/arm/boot/dts/dsi-panel-truly-wvga-cmd.dtsi b/arch/arm/boot/dts/dsi-panel-truly-wvga-cmd.dtsi
index d170833..168dda4 100644
--- a/arch/arm/boot/dts/dsi-panel-truly-wvga-cmd.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-truly-wvga-cmd.dtsi
@@ -39,7 +39,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [
@@ -146,8 +145,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
- qcom,mdss-dsi-traffic-mode = <1>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -163,8 +161,8 @@
qcom,mdss-dsi-t-clk-pre = <0x1b>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <255>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <2>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "trigger_te";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 2>, <1 20>;
};
diff --git a/arch/arm/boot/dts/dsi-panel-truly-wvga-video.dtsi b/arch/arm/boot/dts/dsi-panel-truly-wvga-video.dtsi
index 546a90f..121e54d 100644
--- a/arch/arm/boot/dts/dsi-panel-truly-wvga-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-truly-wvga-video.dtsi
@@ -39,7 +39,6 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-color-order = <0>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [
@@ -150,8 +149,7 @@
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
- qcom,mdss-dsi-traffic-mode = <1>;
- qcom,mdss-dsi-lane-map = <0>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -161,8 +159,8 @@
qcom,mdss-dsi-t-clk-pre = <0x1b>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <255>;
- qcom,mdss-dsi-dma-trigger = <4>;
- qcom,mdss-dsi-mdp-trigger = <0>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 2>, <1 20>;
};
diff --git a/arch/arm/boot/dts/fsm9900.dtsi b/arch/arm/boot/dts/fsm9900.dtsi
index 1c48bf0..705a512 100644
--- a/arch/arm/boot/dts/fsm9900.dtsi
+++ b/arch/arm/boot/dts/fsm9900.dtsi
@@ -85,8 +85,9 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
+ qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
};
};
diff --git a/arch/arm/boot/dts/mpq8092-ion.dtsi b/arch/arm/boot/dts/mpq8092-ion.dtsi
index f9f5985..903610d 100644
--- a/arch/arm/boot/dts/mpq8092-ion.dtsi
+++ b/arch/arm/boot/dts/mpq8092-ion.dtsi
@@ -16,16 +16,14 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
- };
-
- qcom,ion-heap@21 { /* SYSTEM CONTIG HEAP */
- reg = <21>;
- };
-
- qcom,ion-heap@25 { /* IOMMU HEAP */
+ qcom,ion-heap@25 {
reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ qcom,ion-heap@21 {
+ reg = <21>;
+ qcom,ion-heap-type = "SYSTEM_CONTIG";
};
};
diff --git a/arch/arm/boot/dts/msm8226-ion.dtsi b/arch/arm/boot/dts/msm8226-ion.dtsi
index 30c3209..06e2779 100644
--- a/arch/arm/boot/dts/msm8226-ion.dtsi
+++ b/arch/arm/boot/dts/msm8226-ion.dtsi
@@ -16,12 +16,14 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
+ qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
};
- qcom,ion-heap@21 { /* SYSTEM CONTIG HEAP */
+ qcom,ion-heap@21 {
reg = <21>;
+ qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
@@ -29,10 +31,7 @@
reg = <8>;
qcom,heap-align = <0x1000>;
linux,contiguous-region = <&secure_mem>;
- };
-
- qcom,ion-heap@25 { /* IOMMU HEAP */
- reg = <25>;
+ qcom,ion-heap-type = "SECURE_DMA";
};
qcom,ion-heap@22 { /* adsp heap */
@@ -40,12 +39,14 @@
reg = <22>;
qcom,heap-align = <0x1000>;
linux,contiguous-region = <&adsp_mem>;
+ qcom,ion-heap-type = "DMA";
};
qcom,ion-heap@27 { /* QSECOM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <27>;
linux,contiguous-region = <&qsecom_mem>;
+ qcom,ion-heap-type = "DMA";
};
qcom,ion-heap@28 { /* AUDIO HEAP */
@@ -54,6 +55,7 @@
qcom,heap-align = <0x1000>;
qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
qcom,memory-reservation-size = <0x314000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
qcom,ion-heap@23 { /* OTHER PIL HEAP */
@@ -61,6 +63,7 @@
reg = <23>;
qcom,heap-align = <0x1000>;
qcom,memory-fixed = <0x0dc00000 0x1900000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
};
diff --git a/arch/arm/boot/dts/msm8226-v1-pm.dtsi b/arch/arm/boot/dts/msm8226-v1-pm.dtsi
index 02feec8..38ca03b 100644
--- a/arch/arm/boot/dts/msm8226-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8226-v1-pm.dtsi
@@ -111,44 +111,68 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,lpm-level@0 {
- reg = <0x0>;
- qcom,mode = "wfi";
- qcom,l2 = "l2_cache_active";
- qcom,latency-us = <1>;
- qcom,ss-power = <530>;
- qcom,energy-overhead = <52800>;
- qcom,time-overhead = <100>;
+ qcm,cpu-modes {
+ compatible = "qcom,cpu-modes";
+
+ qcom,cpu-modes@0 {
+ qcom,mode = "wfi";
+ qcom,latency-us = <1>;
+ qcom,ss-power = <530>;
+ qcom,energy-overhead = <52800>;
+ qcom,time-overhead = <100>;
+ };
+
+ qcom,cpu-modes@1 {
+ qcom,mode = "standalone_pc";
+ qcom,latency-us = <500>;
+ qcom,ss-power = <410>;
+ qcom,energy-overhead = <603400>;
+ qcom,time-overhead = <1200>;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,cpu-modes@2 {
+ qcom,mode = "pc";
+ qcom,latency-us = <550>;
+ qcom,ss-power = <372>;
+ qcom,energy-overhead = <700000>;
+ qcom,time-overhead = <1410>;
+ qcom,use-broadcast-timer;
+ };
};
- qcom,lpm-level@1 {
- reg = <0x1>;
- qcom,mode = "standalone_pc";
- qcom,l2 = "l2_cache_active";
- qcom,latency-us = <500>;
- qcom,ss-power = <410>;
- qcom,energy-overhead = <603400>;
- qcom,time-overhead = <1200>;
- };
+ qcom,system-modes {
+ compatible = "qcom,system-modes";
- qcom,lpm-level@2 {
- reg = <0x2>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_pc_no_rpm";
- qcom,latency-us = <1000>;
- qcom,ss-power = <315>;
- qcom,energy-overhead = <1027150>;
- qcom,time-overhead = <2400>;
- };
+ qcom,system-modes@0 {
+ qcom,l2 = "l2_cache_gdhs";
+ qcom,latency-us = <10700>;
+ qcom,ss-power = <372>;
+ qcom,energy-overhead = <738750>;
+ qcom,time-overhead = <1410>;
+ qcom,min-cpu-mode= "pc";
+ qcom,sync-cpus;
+ };
- qcom,lpm-level@3 {
- reg = <0x3>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_pc";
- qcom,latency-us = <11000>;
- qcom,ss-power = <315>;
- qcom,energy-overhead = <1027150>;
- qcom,time-overhead = <2400>;
+ qcom,system-modes@1 {
+ qcom,l2 = "l2_cache_pc_no_rpm";
+ qcom,latency-us = <1000>;
+ qcom,ss-power = <315>;
+ qcom,energy-overhead = <1027150>;
+ qcom,time-overhead = <2400>;
+ qcom,min-cpu-mode= "spc";
+ qcom,sync-cpus;
+ };
+
+ qcom,system-modes@2 {
+ qcom,l2 = "l2_cache_pc";
+ qcom,latency-us = <12700>;
+ qcom,ss-power = <315>;
+ qcom,energy-overhead = <1027150>;
+ qcom,time-overhead = <2400>;
+ qcom,min-cpu-mode= "pc";
+ qcom,sync-cpus;
+ };
};
};
@@ -280,6 +304,8 @@
qcom,pc-mode = "tz_l2_int";
qcom,use-sync-timer;
qcom,pc-resets-timer;
+ qcom,cpus-as-clocks;
+ qcom,synced-clocks;
};
qcom,cpu-sleep-status@f9088008{
diff --git a/arch/arm/boot/dts/msm8226-v2-pm.dtsi b/arch/arm/boot/dts/msm8226-v2-pm.dtsi
index 31d5a8f..a0da9cc 100644
--- a/arch/arm/boot/dts/msm8226-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2-pm.dtsi
@@ -113,54 +113,68 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,lpm-level@0 {
- reg = <0x0>;
- qcom,mode = "wfi";
- qcom,l2 = "l2_cache_active";
- qcom,latency-us = <1>;
- qcom,ss-power = <530>;
- qcom,energy-overhead = <52800>;
- qcom,time-overhead = <100>;
+ qcm,cpu-modes {
+ compatible = "qcom,cpu-modes";
+
+ qcom,cpu-modes@0 {
+ qcom,mode = "wfi";
+ qcom,latency-us = <1>;
+ qcom,ss-power = <530>;
+ qcom,energy-overhead = <52800>;
+ qcom,time-overhead = <100>;
+ };
+
+ qcom,cpu-modes@1 {
+ qcom,mode = "standalone_pc";
+ qcom,latency-us = <500>;
+ qcom,ss-power = <410>;
+ qcom,energy-overhead = <603400>;
+ qcom,time-overhead = <1200>;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,cpu-modes@2 {
+ qcom,mode = "pc";
+ qcom,latency-us = <550>;
+ qcom,ss-power = <372>;
+ qcom,energy-overhead = <700000>;
+ qcom,time-overhead = <1410>;
+ qcom,use-broadcast-timer;
+ };
};
- qcom,lpm-level@1 {
- reg = <0x1>;
- qcom,mode = "standalone_pc";
- qcom,l2 = "l2_cache_active";
- qcom,latency-us = <500>;
- qcom,ss-power = <410>;
- qcom,energy-overhead = <603400>;
- qcom,time-overhead = <1200>;
- };
+ qcom,system-modes {
+ compatible = "qcom,system-modes";
- qcom,lpm-level@2 {
- reg = <0x2>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_gdhs";
- qcom,latency-us = <10700>;
- qcom,ss-power = <372>;
- qcom,energy-overhead = <738750>;
- qcom,time-overhead = <1410>;
- };
+ qcom,system-modes@0 {
+ qcom,l2 = "l2_cache_gdhs";
+ qcom,latency-us = <10700>;
+ qcom,ss-power = <372>;
+ qcom,energy-overhead = <738750>;
+ qcom,time-overhead = <1410>;
+ qcom,min-cpu-mode= "pc";
+ qcom,sync-cpus;
+ };
- qcom,lpm-level@3 {
- reg = <0x3>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_pc_no_rpm";
- qcom,latency-us = <1000>;
- qcom,ss-power = <315>;
- qcom,energy-overhead = <1027150>;
- qcom,time-overhead = <2400>;
- };
+ qcom,system-modes@1 {
+ qcom,l2 = "l2_cache_pc_no_rpm";
+ qcom,latency-us = <1000>;
+ qcom,ss-power = <315>;
+ qcom,energy-overhead = <1027150>;
+ qcom,time-overhead = <2400>;
+ qcom,min-cpu-mode= "spc";
+ qcom,sync-cpus;
+ };
- qcom,lpm-level@4 {
- reg = <0x4>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_pc";
- qcom,latency-us = <11000>;
- qcom,ss-power = <315>;
- qcom,energy-overhead = <1027150>;
- qcom,time-overhead = <2400>;
+ qcom,system-modes@2 {
+ qcom,l2 = "l2_cache_pc";
+ qcom,latency-us = <12700>;
+ qcom,ss-power = <315>;
+ qcom,energy-overhead = <1027150>;
+ qcom,time-overhead = <2400>;
+ qcom,min-cpu-mode= "pc";
+ qcom,sync-cpus;
+ };
};
};
@@ -292,6 +306,8 @@
qcom,pc-mode = "tz_l2_int";
qcom,use-sync-timer;
qcom,pc-resets-timer;
+ qcom,cpus-as-clocks;
+ qcom,synced-clocks;
};
qcom,cpu-sleep-status@f9088008{
diff --git a/arch/arm/boot/dts/msm8226-v2.dtsi b/arch/arm/boot/dts/msm8226-v2.dtsi
index a57adcd..d74554f 100644
--- a/arch/arm/boot/dts/msm8226-v2.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2.dtsi
@@ -74,10 +74,45 @@
};
&soc {
- qcom,acpuclk@f9011050 {
+ qcom,clock-a7@f9011050 {
reg = <0xf9011050 0x8>,
<0xfc4b80b0 0x8>;
- reg-names = "rcg_base", "pte_efuse";
+ reg-names = "rcg-base", "efuse";
+ qcom,speed0-bin-v2 =
+ < 0 0>,
+ < 384000000 1>,
+ < 787200000 2>,
+ <1190400000 3>;
+ qcom,speed6-bin-v2 =
+ < 0 0>,
+ < 384000000 1>,
+ < 787200000 2>,
+ <1190400000 3>;
+ qcom,speed2-bin-v2 =
+ < 0 0>,
+ < 384000000 1>,
+ < 787200000 2>,
+ <1401600000 3>;
+ qcom,speed5-bin-v2 =
+ < 0 0>,
+ < 384000000 1>,
+ < 787200000 2>,
+ <1401600000 3>;
+ qcom,speed4-bin-v2 =
+ < 0 0>,
+ < 384000000 1>,
+ < 787200000 2>,
+ <149700000 3>;
+ qcom,speed7-bin-v2 =
+ < 0 0>,
+ < 384000000 1>,
+ < 787200000 2>,
+ <1497600000 3>;
+ qcom,speed1-bin-v2 =
+ < 0 0>,
+ < 384000000 1>,
+ < 787200000 2>,
+ <1593600000 3>;
};
};
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 3ef0d6d..c86d3f3 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -726,6 +726,24 @@
compatible = "qcom,bcl";
};
+ rmtfs_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0fd80000 0x00180000>;
+ reg-names = "rmtfs";
+ };
+
+ dsp_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0fd60000 0x00020000>;
+ reg-names = "rfsa_dsp";
+ };
+
+ mdm_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0fd60000 0x00020000>;
+ reg-names = "rfsa_mdm";
+ };
+
sdcc1: qcom,sdcc@f9824000 {
cell-index = <1>; /* SDC1 eMMC slot */
compatible = "qcom,msm-sdcc";
@@ -982,11 +1000,37 @@
qcom,scl-gpio = <&msmgpio 19 0>;
};
- qcom,acpuclk@f9011050 {
- compatible = "qcom,acpuclk-a7";
+ qcom,clock-a7@f9011050 {
+ compatible = "qcom,clock-a7-8226";
reg = <0xf9011050 0x8>;
- reg-names = "rcg_base";
- a7_cpu-supply = <&apc_vreg_corner>;
+ reg-names = "rcg-base";
+ clock-names = "clk-4", "clk-5";
+ qcom,speed0-bin-v0 =
+ < 0 0>,
+ < 384000000 1>,
+ < 787200000 2>,
+ <1190400000 3>;
+
+ cpu-vdd-supply = <&apc_vreg_corner>;
+ };
+
+ qcom,msm-cpufreq@0 {
+ reg = <0 4>;
+ compatible = "qcom,msm-cpufreq";
+ qcom,cpu-mem-ports = <1 512>;
+ qcom,cpufreq-table =
+ < 300000 1600 /* 200 MHz */ >,
+ < 384000 1600 /* 200 MHz */ >,
+ < 600000 3200 /* 320 MHz */ >,
+ < 787200 4264 /* 533 MHz */ >,
+ < 998400 4264 /* 533 MHz */ >,
+ < 1094400 4264 /* 533 MHz */ >,
+ < 1190400 4264 /* 533 MHz */ >,
+ < 1305600 4264 /* 533 MHz */ >,
+ < 1344000 4264 /* 533 MHz */ >,
+ < 1401600 4264 /* 533 MHz */ >,
+ < 1497600 4264 /* 533 MHz */ >,
+ < 1593600 4264 /* 533 MHz */ >;
};
qcom,ocmem@fdd00000 {
diff --git a/arch/arm/boot/dts/msm8610-bus.dtsi b/arch/arm/boot/dts/msm8610-bus.dtsi
index c6e81d8..54c698c 100644
--- a/arch/arm/boot/dts/msm8610-bus.dtsi
+++ b/arch/arm/boot/dts/msm8610-bus.dtsi
@@ -941,7 +941,7 @@
qcom,thresh = <800000>;
qcom,dual-conf;
qcom,bimc,bw = <300000>;
- qcom,bimc,gp = <5>;
+ qcom,bimc,gp = <5000>;
qcom,bimc,thmp = <50>;
};
diff --git a/arch/arm/boot/dts/msm8610-ion.dtsi b/arch/arm/boot/dts/msm8610-ion.dtsi
index 77cd582..601f8ed 100644
--- a/arch/arm/boot/dts/msm8610-ion.dtsi
+++ b/arch/arm/boot/dts/msm8610-ion.dtsi
@@ -16,22 +16,21 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
- };
-
- qcom,ion-heap@21 { /* SYSTEM CONTIG HEAP */
- reg = <21>;
- };
-
- qcom,ion-heap@25 { /* IOMMU HEAP */
+ qcom,ion-heap@25 {
reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ qcom,ion-heap@21 {
+ reg = <21>;
+ qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@27 { /* QSECOM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <27>;
linux,contiguous-region = <&qsecom_mem>;
+ qcom,ion-heap-type = "DMA";
};
qcom,ion-heap@23 { /* OTHER PIL HEAP */
@@ -39,6 +38,7 @@
reg = <23>;
qcom,heap-align = <0x1000>;
qcom,memory-fixed = <0x0c500000 0x1300000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
qcom,ion-heap@26 { /* MODEM HEAP */
@@ -46,6 +46,7 @@
reg = <26>;
qcom,heap-align = <0x1000>;
qcom,memory-fixed = <0x08800000 0x3d00000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
};
diff --git a/arch/arm/boot/dts/msm8610-mtp.dtsi b/arch/arm/boot/dts/msm8610-mtp.dtsi
index 6ce0109..3b0f2a2 100644
--- a/arch/arm/boot/dts/msm8610-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8610-mtp.dtsi
@@ -142,7 +142,7 @@
0x2800>;
capella,ps_close_thd_set = <0xa>;
capella,ps_away_thd_set = <0x5>;
- capella,ls_cmd = <0x44>; /* PS_IT=160ms, INT_PERS=2*/
+ capella,ls_cmd = <0x04>; /* ALS_IT=80ms, INT_PERS=2*/
capella,ps_conf1_val = <0x0006>;
capella,ps_conf3_val = <0x3010>;
};
diff --git a/arch/arm/boot/dts/msm8610-v1-pm.dtsi b/arch/arm/boot/dts/msm8610-v1-pm.dtsi
index 62aa0f4..e5aa53c 100644
--- a/arch/arm/boot/dts/msm8610-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-v1-pm.dtsi
@@ -111,46 +111,69 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,lpm-level@0 {
- reg = <0x0>;
- qcom,mode = "wfi";
- qcom,l2 = "l2_cache_active";
- qcom,latency-us = <1>;
- qcom,ss-power = <530>;
- qcom,energy-overhead = <52800>;
- qcom,time-overhead = <100>;
+ qcm,cpu-modes {
+ compatible = "qcom,cpu-modes";
+
+ qcom,cpu-modes@0 {
+ qcom,mode = "wfi";
+ qcom,latency-us = <1>;
+ qcom,ss-power = <530>;
+ qcom,energy-overhead = <52800>;
+ qcom,time-overhead = <100>;
+ };
+
+ qcom,cpu-modes@1 {
+ qcom,mode = "standalone_pc";
+ qcom,latency-us = <500>;
+ qcom,ss-power = <410>;
+ qcom,energy-overhead = <603400>;
+ qcom,time-overhead = <1200>;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,cpu-modes@2 {
+ qcom,mode = "pc";
+ qcom,latency-us = <550>;
+ qcom,ss-power = <372>;
+ qcom,energy-overhead = <700000>;
+ qcom,time-overhead = <1410>;
+ qcom,use-broadcast-timer;
+ };
};
- qcom,lpm-level@1 {
- reg = <0x1>;
- qcom,mode = "standalone_pc";
- qcom,l2 = "l2_cache_active";
- qcom,latency-us = <500>;
- qcom,ss-power = <410>;
- qcom,energy-overhead = <603400>;
- qcom,time-overhead = <1410>;
- };
+ qcom,system-modes {
+ compatible = "qcom,system-modes";
- qcom,lpm-level@2 {
- reg = <0x2>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_pc_no_rpm";
- qcom,latency-us = <1000>;
- qcom,ss-power = <315>;
- qcom,energy-overhead = <1027150>;
- qcom,time-overhead = <2400>;
- };
+ qcom,system-modes@0 {
+ qcom,l2 = "l2_cache_gdhs";
+ qcom,latency-us = <10700>;
+ qcom,ss-power = <372>;
+ qcom,energy-overhead = <738750>;
+ qcom,time-overhead = <1410>;
+ qcom,min-cpu-mode= "pc";
+ qcom,sync-cpus;
+ };
- qcom,lpm-level@3 {
- reg = <0x3>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_pc";
- qcom,latency-us = <12700>;
- qcom,ss-power = <315>;
- qcom,energy-overhead = <1027150>;
- qcom,time-overhead = <2400>;
- };
+ qcom,system-modes@1 {
+ qcom,l2 = "l2_cache_pc_no_rpm";
+ qcom,latency-us = <1000>;
+ qcom,ss-power = <315>;
+ qcom,energy-overhead = <1027150>;
+ qcom,time-overhead = <2400>;
+ qcom,min-cpu-mode= "spc";
+ qcom,sync-cpus;
+ };
+ qcom,system-modes@2 {
+ qcom,l2 = "l2_cache_pc";
+ qcom,latency-us = <12700>;
+ qcom,ss-power = <315>;
+ qcom,energy-overhead = <1027150>;
+ qcom,time-overhead = <2400>;
+ qcom,min-cpu-mode= "pc";
+ qcom,sync-cpus;
+ };
+ };
};
qcom,pm-boot {
diff --git a/arch/arm/boot/dts/msm8610-v2-pm.dtsi b/arch/arm/boot/dts/msm8610-v2-pm.dtsi
index e401f7a..c819c49 100644
--- a/arch/arm/boot/dts/msm8610-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-v2-pm.dtsi
@@ -113,54 +113,68 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,lpm-level@0 {
- reg = <0x0>;
- qcom,mode = "wfi";
- qcom,l2 = "l2_cache_active";
- qcom,latency-us = <1>;
- qcom,ss-power = <530>;
- qcom,energy-overhead = <52800>;
- qcom,time-overhead = <100>;
+ qcm,cpu-modes {
+ compatible = "qcom,cpu-modes";
+
+ qcom,cpu-modes@0 {
+ qcom,mode = "wfi";
+ qcom,latency-us = <1>;
+ qcom,ss-power = <530>;
+ qcom,energy-overhead = <52800>;
+ qcom,time-overhead = <100>;
+ };
+
+ qcom,cpu-modes@1 {
+ qcom,mode = "standalone_pc";
+ qcom,latency-us = <500>;
+ qcom,ss-power = <410>;
+ qcom,energy-overhead = <603400>;
+ qcom,time-overhead = <1200>;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,cpu-modes@2 {
+ qcom,mode = "pc";
+ qcom,latency-us = <550>;
+ qcom,ss-power = <372>;
+ qcom,energy-overhead = <700000>;
+ qcom,time-overhead = <1410>;
+ qcom,use-broadcast-timer;
+ };
};
- qcom,lpm-level@1 {
- reg = <0x1>;
- qcom,mode = "standalone_pc";
- qcom,l2 = "l2_cache_active";
- qcom,latency-us = <500>;
- qcom,ss-power = <410>;
- qcom,energy-overhead = <603400>;
- qcom,time-overhead = <1200>;
- };
+ qcom,system-modes {
+ compatible = "qcom,system-modes";
- qcom,lpm-level@2 {
- reg = <0x2>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_gdhs";
- qcom,latency-us = <11700>;
- qcom,ss-power = <372>;
- qcom,energy-overhead = <738750>;
- qcom,time-overhead = <1410>;
- };
+ qcom,system-modes@0 {
+ qcom,l2 = "l2_cache_gdhs";
+ qcom,latency-us = <10700>;
+ qcom,ss-power = <372>;
+ qcom,energy-overhead = <738750>;
+ qcom,time-overhead = <1410>;
+ qcom,min-cpu-mode= "pc";
+ qcom,sync-cpus;
+ };
- qcom,lpm-level@3 {
- reg = <0x3>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_pc_no_rpm";
- qcom,latency-us = <1000>;
- qcom,ss-power = <315>;
- qcom,energy-overhead = <1027150>;
- qcom,time-overhead = <2400>;
- };
+ qcom,system-modes@1 {
+ qcom,l2 = "l2_cache_pc_no_rpm";
+ qcom,latency-us = <1000>;
+ qcom,ss-power = <315>;
+ qcom,energy-overhead = <1027150>;
+ qcom,time-overhead = <2400>;
+ qcom,min-cpu-mode= "spc";
+ qcom,sync-cpus;
+ };
- qcom,lpm-level@4 {
- reg = <0x4>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_pc";
- qcom,latency-us = <12700>;
- qcom,ss-power = <315>;
- qcom,energy-overhead = <1027150>;
- qcom,time-overhead = <2400>;
+ qcom,system-modes@2 {
+ qcom,l2 = "l2_cache_pc";
+ qcom,latency-us = <12700>;
+ qcom,ss-power = <315>;
+ qcom,energy-overhead = <1027150>;
+ qcom,time-overhead = <2400>;
+ qcom,min-cpu-mode= "pc";
+ qcom,sync-cpus;
+ };
};
};
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index 42b7887..26efa78 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -257,6 +257,24 @@
qcom,streaming-func = "rndis";
};
+ rmtfs_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0dc80000 0x00180000>;
+ reg-names = "rmtfs";
+ };
+
+ dsp_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0dc60000 0x00020000>;
+ reg-names = "rfsa_dsp";
+ };
+
+ mdm_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0dc60000 0x00020000>;
+ reg-names = "rfsa_mdm";
+ };
+
sdcc1: qcom,sdcc@f9824000 {
cell-index = <1>; /* SDC1 eMMC slot */
compatible = "qcom,msm-sdcc";
diff --git a/arch/arm/boot/dts/msm8974-bus.dtsi b/arch/arm/boot/dts/msm8974-bus.dtsi
index 609a1b3..af51327 100644
--- a/arch/arm/boot/dts/msm8974-bus.dtsi
+++ b/arch/arm/boot/dts/msm8974-bus.dtsi
@@ -1168,18 +1168,12 @@
qcom,masterp = <0>;
qcom,tier = <2>;
qcom,hw-sel = "BIMC";
- qcom,mode = "Limiter";
+ qcom,mode = "Fixed";
qcom,qport = <0>;
qcom,ws = <10000>;
qcom,mas-hw-id = <0>;
qcom,prio-rd = <0>;
qcom,prio-wr = <0>;
- qcom,mode-thresh = "Fixed";
- qcom,thresh = <2000000>;
- qcom,dual-conf;
- qcom,bimc,bw = <300000>;
- qcom,bimc,gp = <5>;
- qcom,bimc,thmp = <50>;
};
mas-ampss-m1 {
@@ -1188,18 +1182,12 @@
qcom,masterp = <1>;
qcom,tier = <2>;
qcom,hw-sel = "BIMC";
- qcom,mode = "Limiter";
+ qcom,mode = "Fixed";
qcom,qport = <1>;
qcom,ws = <10000>;
qcom,mas-hw-id = <0>;
qcom,prio-rd = <0>;
qcom,prio-wr = <0>;
- qcom,mode-thresh = "Fixed";
- qcom,thresh = <2000000>;
- qcom,dual-conf;
- qcom,bimc,bw = <300000>;
- qcom,bimc,gp = <5>;
- qcom,bimc,thmp = <50>;
};
mas-mss-proc {
diff --git a/arch/arm/boot/dts/msm8974-gpu.dtsi b/arch/arm/boot/dts/msm8974-gpu.dtsi
index 06b9c18..695e452 100644
--- a/arch/arm/boot/dts/msm8974-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8974-gpu.dtsi
@@ -22,8 +22,7 @@
qcom,chipid = <0x03030000>;
- qcom,initial-pwrlevel = <2>;
- qcom,step-pwrlevel = <2>;
+ qcom,initial-pwrlevel = <1>;
qcom,idle-timeout = <8>; //<HZ/12>
qcom,strtstp-sleepwake;
@@ -31,14 +30,17 @@
/* Bus Scale Settings */
qcom,msm-bus,name = "grp3d";
- qcom,msm-bus,num-cases = <6>;
+ qcom,msm-bus,num-cases = <9>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>, <89 604 0 0>,
+ <26 512 0 1600000>, <89 604 0 3000000>,
<26 512 0 2200000>, <89 604 0 3000000>,
<26 512 0 4000000>, <89 604 0 3000000>,
+ <26 512 0 2200000>, <89 604 0 4500000>,
<26 512 0 4000000>, <89 604 0 4500000>,
<26 512 0 6400000>, <89 604 0 4500000>,
+ <26 512 0 4000000>, <89 604 0 7600000>,
<26 512 0 6400000>, <89 604 0 7600000>;
/* GDSC oxili regulators */
@@ -67,40 +69,26 @@
qcom,gpu-pwrlevel@0 {
reg = <0>;
qcom,gpu-freq = <450000000>;
- qcom,bus-freq = <5>;
+ qcom,bus-freq = <8>;
qcom,io-fraction = <33>;
};
qcom,gpu-pwrlevel@1 {
reg = <1>;
qcom,gpu-freq = <320000000>;
- qcom,bus-freq = <4>;
+ qcom,bus-freq = <5>;
qcom,io-fraction = <66>;
};
qcom,gpu-pwrlevel@2 {
reg = <2>;
- qcom,gpu-freq = <320000000>;
- qcom,bus-freq = <3>;
- qcom,io-fraction = <66>;
- };
-
- qcom,gpu-pwrlevel@3 {
- reg = <3>;
qcom,gpu-freq = <200000000>;
qcom,bus-freq = <2>;
qcom,io-fraction = <100>;
};
- qcom,gpu-pwrlevel@4 {
- reg = <4>;
- qcom,gpu-freq = <200000000>;
- qcom,bus-freq = <1>;
- qcom,io-fraction = <100>;
- };
-
- qcom,gpu-pwrlevel@5 {
- reg = <5>;
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
qcom,gpu-freq = <27000000>;
qcom,bus-freq = <0>;
qcom,io-fraction = <0>;
diff --git a/arch/arm/boot/dts/msm8974-ion.dtsi b/arch/arm/boot/dts/msm8974-ion.dtsi
index 455ed2d..5829f05 100644
--- a/arch/arm/boot/dts/msm8974-ion.dtsi
+++ b/arch/arm/boot/dts/msm8974-ion.dtsi
@@ -16,12 +16,14 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
+ qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
};
- qcom,ion-heap@21 { /* SYSTEM CONTIG HEAP */
+ qcom,ion-heap@21 {
reg = <21>;
+ qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
@@ -29,6 +31,7 @@
reg = <8>;
qcom,heap-align = <0x1000>;
linux,contiguous-region = <&secure_mem>;
+ qcom,ion-heap-type = "SECURE_DMA";
};
qcom,ion-heap@22 { /* adsp heap */
@@ -36,16 +39,14 @@
reg = <22>;
qcom,heap-align = <0x1000>;
linux,contiguous-region = <&adsp_mem>;
- };
-
- qcom,ion-heap@25 { /* IOMMU HEAP */
- reg = <25>;
+ qcom,ion-heap-type = "DMA";
};
qcom,ion-heap@27 { /* QSECOM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <27>;
linux,contiguous-region = <&qsecom_mem>;
+ qcom,ion-heap-type = "DMA";
};
qcom,ion-heap@28 { /* AUDIO HEAP */
@@ -54,6 +55,7 @@
qcom,heap-align = <0x1000>;
qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
qcom,memory-reservation-size = <0x614000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
qcom,ion-heap@23 { /* OTHER PIL HEAP */
@@ -61,6 +63,7 @@
reg = <23>;
qcom,heap-align = <0x1000>;
qcom,memory-fixed = <0x05d00000 0x1e00000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
};
};
diff --git a/arch/arm/boot/dts/msm8974-v2-pm.dtsi b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
index eba053f..e2d40f7 100644
--- a/arch/arm/boot/dts/msm8974-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
@@ -130,68 +130,65 @@
compatible = "qcom,lpm-levels";
qcom,default-l2-state = "l2_cache_retention";
#address-cells = <1>;
- #size-cells = <0>;
+ #size-cells = <1>;
- qcom,lpm-level@0 {
- reg = <0x0>;
- qcom,mode = "wfi";
- qcom,l2 = "l2_cache_retention";
- qcom,latency-us = <1>;
- qcom,ss-power = <715>;
- qcom,energy-overhead = <17700>;
- qcom,time-overhead = <2>;
+ qcom,cpu-modes {
+ compatible = "qcom,cpu-modes";
+ qcom,cpu-mode@0 {
+ qcom,mode = "wfi";
+ qcom,latency-us = <1>;
+ qcom,ss-power = <715>;
+ qcom,energy-overhead = <17700>;
+ qcom,time-overhead = <2>;
+ };
+
+ qcom,cpu-mode@1 {
+ qcom,mode = "retention";
+ qcom,latency-us = <35>;
+ qcom,ss-power = <542>;
+ qcom,energy-overhead = <34920>;
+ qcom,time-overhead = <40>;
+ };
+
+ qcom,cpu-mode@2 {
+ qcom,mode = "standalone_pc";
+ qcom,latency-us = <300>;
+ qcom,ss-power = <476>;
+ qcom,energy-overhead = <225300>;
+ qcom,time-overhead = <350>;
+ };
+
+ qcom,cpu-mode@3 {
+ qcom,mode = "pc";
+ qcom,latency-us = <500>;
+ qcom,ss-power = <400>;
+ qcom,energy-overhead = <280000>;
+ qcom,time-overhead = <500>;
+ };
+
};
+ qcom,system-modes {
+ compatible = "qcom,system-modes";
- qcom,lpm-level@1 {
- reg = <0x1>;
- qcom,mode = "retention";
- qcom,l2 = "l2_cache_retention";
- qcom,latency-us = <35>;
- qcom,ss-power = <542>;
- qcom,energy-overhead = <34920>;
- qcom,time-overhead = <40>;
- };
+ qcom,system-mode@0 {
+ qcom,l2 = "l2_cache_gdhs";
+ qcom,latency-us = <20000>;
+ qcom,ss-power = <163>;
+ qcom,energy-overhead = <1577736>;
+ qcom,time-overhead = <5067>;
+ qcom,min-cpu-mode= "pc";
+ qcom,sync-cpus;
+ };
-
- qcom,lpm-level@2 {
- reg = <0x2>;
- qcom,mode = "standalone_pc";
- qcom,l2 = "l2_cache_retention";
- qcom,latency-us = <300>;
- qcom,ss-power = <476>;
- qcom,energy-overhead = <225300>;
- qcom,time-overhead = <350>;
- };
-
- qcom,lpm-level@3 {
- reg = <0x3>;
- qcom,mode = "standalone_pc";
- qcom,l2 = "l2_cache_gdhs";
- qcom,latency-us = <320>;
- qcom,ss-power = <476>;
- qcom,energy-overhead = <225300>;
- qcom,time-overhead = <375>;
- };
-
- qcom,lpm-level@4 {
- reg = <0x4>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_gdhs";
- qcom,gpio-detectable;
- qcom,latency-us = <20000>;
- qcom,ss-power = <163>;
- qcom,energy-overhead = <1577736>;
- qcom,time-overhead = <5067>;
- };
-
- qcom,lpm-level@5 {
- reg = <0x5>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_pc";
- qcom,latency-us = <30000>;
- qcom,ss-power = <83>;
- qcom,energy-overhead = <2274420>;
- qcom,time-overhead = <6605>;
+ qcom,system-mode@1 {
+ qcom,l2 = "l2_cache_pc";
+ qcom,latency-us = <30000>;
+ qcom,ss-power = <83>;
+ qcom,energy-overhead = <2274420>;
+ qcom,time-overhead = <6605>;
+ qcom,min-cpu-mode = "pc";
+ qcom,sync-cpus;
+ };
};
};
diff --git a/arch/arm/boot/dts/msm8974-v2.2.dtsi b/arch/arm/boot/dts/msm8974-v2.2.dtsi
index 0ca021b..3ed5720 100644
--- a/arch/arm/boot/dts/msm8974-v2.2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.2.dtsi
@@ -23,20 +23,26 @@
/* Updated chip ID */
qcom,chipid = <0x03030001>;
- qcom,initial-pwrlevel = <4>;
+ qcom,initial-pwrlevel = <2>;
/* Updated bus bandwidth requirements */
qcom,msm-bus,vectors-KBps =
/* Off */
<26 512 0 0>, <89 604 0 0>,
+ /* Sub-SVS / SVS */
+ <26 512 0 1600000>, <89 604 0 3000000>,
/* SVS */
<26 512 0 2400000>, <89 604 0 3000000>,
/* Nominal / SVS */
<26 512 0 4656000>, <89 604 0 3000000>,
+ /* SVS / Nominal */
+ <26 512 0 2400000>, <89 604 0 5120000>,
/* Nominal */
<26 512 0 4656000>, <89 604 0 5120000>,
/* Turbo / Nominal */
<26 512 0 7464000>, <89 604 0 5120000>,
+ /* Nominal / Turbo */
+ <26 512 0 4656000>, <89 604 0 6400000>,
/* Turbo */
<26 512 0 7464000>, <89 604 0 6400000>;
@@ -49,54 +55,33 @@
qcom,gpu-pwrlevel@0 {
reg = <0>;
qcom,gpu-freq = <450000000>;
- qcom,bus-freq = <5>;
+ qcom,bus-freq = <8>;
qcom,io-fraction = <33>;
};
qcom,gpu-pwrlevel@1 {
reg = <1>;
qcom,gpu-freq = <389000000>;
- qcom,bus-freq = <4>;
+ qcom,bus-freq = <5>;
qcom,io-fraction = <33>;
};
qcom,gpu-pwrlevel@2 {
reg = <2>;
- qcom,gpu-freq = <389000000>;
- qcom,bus-freq = <3>;
+ qcom,gpu-freq = <320000000>;
+ qcom,bus-freq = <5>;
qcom,io-fraction = <66>;
};
qcom,gpu-pwrlevel@3 {
reg = <3>;
- qcom,gpu-freq = <320000000>;
- qcom,bus-freq = <4>;
- qcom,io-fraction = <66>;
- };
-
- qcom,gpu-pwrlevel@4 {
- reg = <4>;
- qcom,gpu-freq = <320000000>;
- qcom,bus-freq = <3>;
- qcom,io-fraction = <66>;
- };
-
- qcom,gpu-pwrlevel@5 {
- reg = <5>;
qcom,gpu-freq = <200000000>;
qcom,bus-freq = <2>;
qcom,io-fraction = <100>;
};
- qcom,gpu-pwrlevel@6 {
- reg = <6>;
- qcom,gpu-freq = <200000000>;
- qcom,bus-freq = <1>;
- qcom,io-fraction = <100>;
- };
-
- qcom,gpu-pwrlevel@7 {
- reg = <7>;
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
qcom,gpu-freq = <27000000>;
qcom,bus-freq = <0>;
qcom,io-fraction = <0>;
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index 0da5658..6784068 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -58,14 +58,20 @@
qcom,msm-bus,vectors-KBps =
/* Off */
<26 512 0 0>, <89 604 0 0>,
+ /* Sub-SVS / SVS */
+ <26 512 0 1600000>, <89 604 0 3000000>,
/* SVS */
<26 512 0 2400000>, <89 604 0 3000000>,
/* Nominal / SVS */
- <26 512 0 4656000>, <89 604 0 3000000>,
+ <26 512 0 4912000>, <89 604 0 3000000>,
+ /* SVS / Nominal */
+ <26 512 0 2400000>, <89 604 0 5120000>,
/* Nominal */
- <26 512 0 4656000>, <89 604 0 5120000>,
+ <26 512 0 4912000>, <89 604 0 5120000>,
/* Turbo / Nominal */
<26 512 0 7464000>, <89 604 0 5120000>,
+ /* Nominal / Turbo */
+ <26 512 0 4912000>, <89 604 0 6400000>,
/* Turbo */
<26 512 0 7464000>, <89 604 0 6400000>;
};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index de49851..0412c73 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -283,6 +283,24 @@
<87 512 60000 960000>;
};
+ rmtfs_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0fd80000 0x00180000>;
+ reg-names = "rmtfs";
+ };
+
+ dsp_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0fd60000 0x00020000>;
+ reg-names = "rfsa_dsp";
+ };
+
+ mdm_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0fd60000 0x00020000>;
+ reg-names = "rfsa_mdm";
+ };
+
sdcc1: qcom,sdcc@f9824000 {
cell-index = <1>; /* SDC1 eMMC slot */
compatible = "qcom,msm-sdcc";
diff --git a/arch/arm/boot/dts/msm8974pro-ion.dtsi b/arch/arm/boot/dts/msm8974pro-ion.dtsi
index 4c427bf..3bb885a 100644
--- a/arch/arm/boot/dts/msm8974pro-ion.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-ion.dtsi
@@ -18,6 +18,7 @@
reg = <23>;
qcom,heap-align = <0x1000>;
qcom,memory-fixed = <0x05a00000 0x2100000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
qcom,ion-heap@26 { /* MODEM HEAP */
@@ -25,6 +26,7 @@
reg = <26>;
qcom,heap-align = <0x1000>;
qcom,memory-fixed = <0x08000000 0x5000000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
};
};
diff --git a/arch/arm/boot/dts/msm8974pro-pm.dtsi b/arch/arm/boot/dts/msm8974pro-pm.dtsi
index 5769446..366faef 100644
--- a/arch/arm/boot/dts/msm8974pro-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pm.dtsi
@@ -128,60 +128,69 @@
qcom,lpm-levels {
compatible = "qcom,lpm-levels";
+ qcom,allow-synced-levels;
qcom,default-l2-state = "l2_cache_retention";
#address-cells = <1>;
- #size-cells = <0>;
+ #size-cells = <1>;
- qcom,lpm-level@0 {
- reg = <0x0>;
- qcom,mode = "wfi";
- qcom,l2 = "l2_cache_retention";
- qcom,latency-us = <1>;
- qcom,ss-power = <715>;
- qcom,energy-overhead = <17700>;
- qcom,time-overhead = <2>;
+ qcom,cpu-modes {
+ compatible = "qcom,cpu-modes";
+ qcom,cpu-mode@0 {
+ qcom,mode = "wfi";
+ qcom,latency-us = <1>;
+ qcom,ss-power = <715>;
+ qcom,energy-overhead = <17700>;
+ qcom,time-overhead = <2>;
+ };
+
+ qcom,cpu-mode@1 {
+ qcom,mode = "retention";
+ qcom,latency-us = <35>;
+ qcom,ss-power = <542>;
+ qcom,energy-overhead = <34920>;
+ qcom,time-overhead = <40>;
+ };
+
+ qcom,cpu-mode@2 {
+ qcom,mode = "standalone_pc";
+ qcom,latency-us = <300>;
+ qcom,ss-power = <476>;
+ qcom,energy-overhead = <225300>;
+ qcom,time-overhead = <350>;
+ };
+
+ qcom,cpu-mode@3 {
+ qcom,mode = "pc";
+ qcom,latency-us = <500>;
+ qcom,ss-power = <400>;
+ qcom,energy-overhead = <280000>;
+ qcom,time-overhead = <500>;
+ qcom,use-broadcast-timer;
+ };
+
};
+ qcom,system-modes {
+ compatible = "qcom,system-modes";
- qcom,lpm-level@1 {
- reg = <0x1>;
- qcom,mode = "retention";
- qcom,l2 = "l2_cache_retention";
- qcom,latency-us = <35>;
- qcom,ss-power = <542>;
- qcom,energy-overhead = <34920>;
- qcom,time-overhead = <40>;
- };
+ qcom,system-mode@0 {
+ qcom,l2 = "l2_cache_gdhs";
+ qcom,latency-us = <20000>;
+ qcom,ss-power = <163>;
+ qcom,energy-overhead = <1577736>;
+ qcom,time-overhead = <5067>;
+ qcom,min-cpu-mode= "pc";
+ qcom,sync-cpus;
+ };
-
- qcom,lpm-level@2 {
- reg = <0x2>;
- qcom,mode = "standalone_pc";
- qcom,l2 = "l2_cache_retention";
- qcom,latency-us = <300>;
- qcom,ss-power = <476>;
- qcom,energy-overhead = <225300>;
- qcom,time-overhead = <350>;
- };
-
- qcom,lpm-level@3 {
- reg = <0x3>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_gdhs";
- qcom,gpio-detectable;
- qcom,latency-us = <20000>;
- qcom,ss-power = <163>;
- qcom,energy-overhead = <1577736>;
- qcom,time-overhead = <5067>;
- };
-
- qcom,lpm-level@4 {
- reg = <0x4>;
- qcom,mode = "pc";
- qcom,l2 = "l2_cache_pc";
- qcom,latency-us = <30000>;
- qcom,ss-power = <83>;
- qcom,energy-overhead = <2274420>;
- qcom,time-overhead = <6605>;
+ qcom,system-mode@1 {
+ qcom,l2 = "l2_cache_pc";
+ qcom,latency-us = <30000>;
+ qcom,ss-power = <83>;
+ qcom,energy-overhead = <2274420>;
+ qcom,time-overhead = <6605>;
+ qcom,min-cpu-mode = "pc";
+ qcom,sync-cpus;
+ };
};
};
@@ -313,7 +322,6 @@
ranges;
reg = <0xfe805664 0x40>;
qcom,pc-mode = "tz_l2_int";
- qcom,use-sync-timer;
qcom,cpus-as-clocks;
qcom,pm-snoc-client {
diff --git a/arch/arm/boot/dts/msm8974pro.dtsi b/arch/arm/boot/dts/msm8974pro.dtsi
index 85c2fe3..ac5ccb9 100644
--- a/arch/arm/boot/dts/msm8974pro.dtsi
+++ b/arch/arm/boot/dts/msm8974pro.dtsi
@@ -1550,21 +1550,21 @@
qcom,msm-cpufreq@0 {
qcom,cpufreq-table =
- < 300000 300000 600 /* 75 MHz */ >,
- < 422400 422400 1200 /* 150 MHz */ >,
- < 652800 499200 1600 /* 200 MHz */ >,
- < 729600 576000 2456 /* 307 MHz */ >,
- < 883200 576000 2456 /* 307 MHz */ >,
- < 960000 960000 3680 /* 460 MHz */ >,
- < 1036800 1036800 3680 /* 460 MHz */ >,
- < 1190400 1036800 3680 /* 460 MHz */ >,
- < 1267200 1267200 4912 /* 614 MHz */ >,
- < 1497600 1497600 4912 /* 614 MHz */ >,
- < 1574400 1574400 6400 /* 800 MHz */ >,
- < 1728000 1651200 6400 /* 800 MHz */ >,
- < 1958400 1728000 7448 /* 931 MHz */ >,
- < 2265600 1728000 7448 /* 931 MHz */ >,
- < 2457600 1728000 7448 /* 931 MHz */ >;
+ < 300000 300000 300 /* 37.5 MHz */ >,
+ < 422400 422400 300 /* 37.5 MHz */ >,
+ < 652800 499200 300 /* 37.5 MHz */ >,
+ < 729600 576000 300 /* 37.5 MHz */ >,
+ < 883200 576000 300 /* 37.5 MHz */ >,
+ < 960000 960000 300 /* 37.5 MHz */ >,
+ < 1036800 1036800 300 /* 37.5 MHz */ >,
+ < 1190400 1036800 300 /* 37.5 MHz */ >,
+ < 1267200 1267200 300 /* 37.5 MHz */ >,
+ < 1497600 1497600 300 /* 37.5 MHz */ >,
+ < 1574400 1574400 300 /* 37.5 MHz */ >,
+ < 1728000 1651200 300 /* 37.5 MHz */ >,
+ < 1958400 1728000 300 /* 37.5 MHz */ >,
+ < 2265600 1728000 300 /* 37.5 MHz */ >,
+ < 2496000 1728000 300 /* 37.5 MHz */ >;
};
};
@@ -1572,31 +1572,41 @@
&msm_gpu {
/* Updated chip ID */
qcom,chipid = <0x03030002>;
+ qcom,msm-bus,num-cases = <15>;
+ qcom,bus-control;
+ qcom,initial-pwrlevel = <3>;
- qcom,initial-pwrlevel = <6>;
-
- qcom,msm-bus,num-cases = <10>;
/* Updated bus bandwidth requirements */
qcom,msm-bus,vectors-KBps =
/* Off */
<26 512 0 0>, <89 604 0 0>,
+ /* Sub-SVS / SVS */
+ <26 512 0 1600000>, <89 604 0 3200000>,
/* SVS */
- <26 512 0 2400000>, <89 604 0 3200000>,
- /* Nominal / SVS */
+ <26 512 0 2456000>, <89 604 0 3200000>,
+ /* low Nominal / SVS */
<26 512 0 3680000>, <89 604 0 3200000>,
- /* Nominal / Nominal */
+ /* SVS / low Nominal */
+ <26 512 0 2456000>, <89 604 0 5280000>,
+ /* low Nominal / low Nominal */
<26 512 0 3680000>, <89 604 0 5280000>,
- /* Nominal / Nominal */
+ /* Nominal / low Nominal */
<26 512 0 4912000>, <89 604 0 5280000>,
- /* Nominal / Turbo */
+ /* low Nominal / Nominal */
+ <26 512 0 3680000>, <89 604 0 6224000>,
+ /* Nominal / Nominal */
<26 512 0 4912000>, <89 604 0 6224000>,
- /* Turbo / Turbo */
- <26 512 0 7464000>, <89 604 0 6224000>,
+ /* low Turbo / Nominal */
+ <26 512 0 6400000>, <89 604 0 6224000>,
+ /* Nominal / low Turbo */
+ <26 512 0 4912000>, <89 604 0 7398000>,
+ /* low Turbo / low Turbo */
+ <26 512 0 6400000>, <89 604 0 7398000>,
+ /* Turbo / low Turbo */
+ <26 512 0 7464000>, <89 604 0 7398000>,
/* Nominal / Turbo */
- <26 512 0 4912000>, <89 604 0 7400000>,
- /* Turbo */
- <26 512 0 7464000>, <89 604 0 7400000>,
- /* Turbo */
+ <26 512 0 4912000>, <89 604 0 9248000>,
+ /* Turbo / Turbo */
<26 512 0 7464000>, <89 604 0 9248000>;
qcom,gpu-pwrlevels {
@@ -1608,68 +1618,40 @@
qcom,gpu-pwrlevel@0 {
reg = <0>;
qcom,gpu-freq = <578000000>;
- qcom,bus-freq = <9>;
+ qcom,bus-freq = <14>;
qcom,io-fraction = <33>;
};
- qcom,gpu-pwrlevel@1 {
- reg = <1>;
- qcom,gpu-freq = <462400000>;
- qcom,bus-freq = <8>;
- qcom,io-fraction = <33>;
- };
-
- qcom,gpu-pwrlevel@2 {
- reg = <2>;
- qcom,gpu-freq = <462400000>;
- qcom,bus-freq = <7>;
- qcom,io-fraction = <66>;
- };
-
- qcom,gpu-pwrlevel@3 {
- reg = <3>;
- qcom,gpu-freq = <389000000>;
- qcom,bus-freq = <6>;
- qcom,io-fraction = <66>;
- };
-
- qcom,gpu-pwrlevel@4 {
- reg = <4>;
- qcom,gpu-freq = <389000000>;
- qcom,bus-freq = <5>;
- qcom,io-fraction = <66>;
- };
-
- qcom,gpu-pwrlevel@5 {
- reg = <5>;
- qcom,gpu-freq = <330000000>;
- qcom,bus-freq = <4>;
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <462400000>;
+ qcom,bus-freq = <11>;
qcom,io-fraction = <66>;
};
- qcom,gpu-pwrlevel@6 {
- reg = <6>;
- qcom,gpu-freq = <330000000>;
- qcom,bus-freq = <3>;
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <389000000>;
+ qcom,bus-freq = <8>;
qcom,io-fraction = <66>;
};
- qcom,gpu-pwrlevel@7 {
- reg = <7>;
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <320000000>;
+ qcom,bus-freq = <5>;
+ qcom,io-fraction = <100>;
+ };
+
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
qcom,gpu-freq = <200000000>;
qcom,bus-freq = <2>;
qcom,io-fraction = <100>;
};
- qcom,gpu-pwrlevel@8 {
- reg = <8>;
- qcom,gpu-freq = <200000000>;
- qcom,bus-freq = <1>;
- qcom,io-fraction = <100>;
- };
-
- qcom,gpu-pwrlevel@9 {
- reg = <9>;
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
qcom,gpu-freq = <27000000>;
qcom,bus-freq = <0>;
qcom,io-fraction = <0>;
diff --git a/arch/arm/boot/dts/msm9625-ion.dtsi b/arch/arm/boot/dts/msm9625-ion.dtsi
index 2a3e4b5..3ef0b3f 100644
--- a/arch/arm/boot/dts/msm9625-ion.dtsi
+++ b/arch/arm/boot/dts/msm9625-ion.dtsi
@@ -16,12 +16,9 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
- };
-
- qcom,ion-heap@25 { /* IOMMU HEAP */
+ qcom,ion-heap@25 {
reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@28 { /* AUDIO HEAP */
@@ -30,6 +27,7 @@
qcom,heap-align = <0x1000>;
qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
qcom,memory-reservation-size = <0xAF000>;
+ qcom,ion-heap-type = "CARVEOUT";
};
};
};
diff --git a/arch/arm/boot/dts/msmsamarium-ion.dtsi b/arch/arm/boot/dts/msmsamarium-ion.dtsi
index ea954b8..167b8b7 100644
--- a/arch/arm/boot/dts/msmsamarium-ion.dtsi
+++ b/arch/arm/boot/dts/msmsamarium-ion.dtsi
@@ -16,16 +16,14 @@
#address-cells = <1>;
#size-cells = <0>;
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
- };
-
- qcom,ion-heap@21 { /* SYSTEM CONTIG HEAP */
- reg = <21>;
- };
-
- qcom,ion-heap@25 { /* IOMMU HEAP */
+ qcom,ion-heap@25 {
reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ qcom,ion-heap@21 {
+ reg = <21>;
+ qcom,ion-heap-type = "SYSTEM_CONTIG";
};
};
};
diff --git a/arch/arm/boot/dts/msmsamarium.dtsi b/arch/arm/boot/dts/msmsamarium.dtsi
index a492561..6c55566 100644
--- a/arch/arm/boot/dts/msmsamarium.dtsi
+++ b/arch/arm/boot/dts/msmsamarium.dtsi
@@ -65,6 +65,24 @@
reg = <0xfe805000 0x1000>; /* Address and size of IMEM */
};
+ rmtfs_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0fd80000 0x00180000>;
+ reg-names = "rmtfs";
+ };
+
+ dsp_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0fd60000 0x00020000>;
+ reg-names = "rfsa_dsp";
+ };
+
+ mdm_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0fd60000 0x00020000>;
+ reg-names = "rfsa_mdm";
+ };
+
sdcc1: qcom,sdcc@f9824000 {
cell-index = <1>; /* SDC1 eMMC slot */
compatible = "qcom,msm-sdcc";
diff --git a/arch/arm/configs/msm8226-perf_defconfig b/arch/arm/configs/msm8226-perf_defconfig
index 818e052..dac2286 100644
--- a/arch/arm/configs/msm8226-perf_defconfig
+++ b/arch/arm/configs/msm8226-perf_defconfig
@@ -396,6 +396,8 @@
CONFIG_RTC_CLASS=y
# CONFIG_RTC_DRV_MSM is not set
CONFIG_RTC_DRV_QPNP=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8226_defconfig b/arch/arm/configs/msm8226_defconfig
index c1f2ca2..9d4d37b 100644
--- a/arch/arm/configs/msm8226_defconfig
+++ b/arch/arm/configs/msm8226_defconfig
@@ -421,6 +421,8 @@
CONFIG_RTC_CLASS=y
# CONFIG_RTC_DRV_MSM is not set
CONFIG_RTC_DRV_QPNP=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8610-perf_defconfig b/arch/arm/configs/msm8610-perf_defconfig
index efdd8de..380dde78 100644
--- a/arch/arm/configs/msm8610-perf_defconfig
+++ b/arch/arm/configs/msm8610-perf_defconfig
@@ -79,9 +79,9 @@
CONFIG_ARM_ARCH_TIMER=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
CONFIG_COMPACTION=y
CONFIG_CC_STACKPROTECTOR=y
+CONFIG_KSM=y
CONFIG_ENABLE_VMALLOC_SAVING=y
CONFIG_CP_ACCESS=y
CONFIG_USE_OF=y
@@ -355,7 +355,11 @@
CONFIG_RTC_CLASS=y
# CONFIG_RTC_DRV_MSM is not set
CONFIG_RTC_DRV_QPNP=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
+CONFIG_ZRAM=y
+CONFIG_ZSMALLOC=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ASHMEM=y
@@ -371,6 +375,7 @@
CONFIG_QPNP_VIBRATOR=y
CONFIG_QPNP_REVID=y
CONFIG_MSM_IOMMU_V0=y
+CONFIG_SENSORS=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index c140a46..a04a17d 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -80,9 +80,9 @@
CONFIG_ARM_ARCH_TIMER=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
CONFIG_COMPACTION=y
CONFIG_CC_STACKPROTECTOR=y
+CONFIG_KSM=y
CONFIG_ENABLE_VMALLOC_SAVING=y
CONFIG_CP_ACCESS=y
CONFIG_USE_OF=y
@@ -379,7 +379,11 @@
CONFIG_RTC_CLASS=y
# CONFIG_RTC_DRV_MSM is not set
CONFIG_RTC_DRV_QPNP=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
+CONFIG_ZRAM=y
+CONFIG_ZSMALLOC=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ASHMEM=y
@@ -409,6 +413,7 @@
CONFIG_CORESIGHT_WCN_ETM=y
CONFIG_CORESIGHT_RPM_ETM=y
CONFIG_CORESIGHT_EVENT=m
+CONFIG_SENSORS=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index fb05a08..731538f 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -104,6 +104,7 @@
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
@@ -435,6 +436,8 @@
CONFIG_RTC_CLASS=y
# CONFIG_RTC_DRV_MSM is not set
CONFIG_RTC_DRV_QPNP=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 8f6f52f..67ceaed 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -110,6 +110,7 @@
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
@@ -443,6 +444,8 @@
CONFIG_RTC_CLASS=y
# CONFIG_RTC_DRV_MSM is not set
CONFIG_RTC_DRV_QPNP=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 9ba1436..0e8f4916 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -49,6 +49,13 @@
*
* Unconditionally clean and invalidate the entire cache.
*
+ * flush_kern_louis()
+ *
+ * Flush data cache levels up to the level of unification
+ * inner shareable and invalidate the I-cache.
+ * Only needed from v7 onwards, falls back to flush_cache_all()
+ * for all other processor versions.
+ *
* flush_user_all()
*
* Clean and invalidate all user space cache entries
@@ -112,6 +119,7 @@
struct cpu_cache_fns {
void (*flush_icache_all)(void);
void (*flush_kern_all)(void);
+ void (*flush_kern_louis)(void);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
@@ -136,6 +144,7 @@
#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
+#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
@@ -158,6 +167,7 @@
extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void);
+extern void __cpuc_flush_kern_louis(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
@@ -225,6 +235,11 @@
__flush_icache_preferred();
}
+/*
+ * Flush caches up to Level of Unification Inner Shareable
+ */
+#define flush_cache_louis() __cpuc_flush_kern_louis()
+
#define flush_cache_all() __cpuc_flush_kern_all()
static inline void vivt_flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 7e30874..2d6a7de 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -132,6 +132,7 @@
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
+#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 92e4f18..8c0a923 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -480,7 +480,7 @@
if (!clk)
return -ENOMEM;
- clk->features = CLOCK_EVT_FEAT_ONESHOT;
+ clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
clk->rating = 400;
clk->set_mode = arch_timer_set_mode_mem;
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index a341c23..4b8d443 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -266,7 +266,7 @@
select MSM_L2_SPM
select MSM_NATIVE_RESTART
select MSM_RESTART_V2
- select MSM_PM8X60 if PM
+ select MSM_PM if PM
select MAY_HAVE_SPARSE_IRQ
select SPARSE_IRQ
select MSM_RPM_SMD
@@ -297,7 +297,7 @@
select MULTI_IRQ_HANDLER
select MSM_NATIVE_RESTART
select MSM_RESTART_V2
- select MSM_PM8X60 if PM
+ select MSM_PM if PM
select MAY_HAVE_SPARSE_IRQ
select SPARSE_IRQ
select REGULATOR
@@ -308,7 +308,6 @@
select QMI_ENCDEC
select MSM_SPM_V2
select MSM_L2_SPM
- select MSM_PM8X60 if PM
select MSM_RPM_SMD
select ENABLE_VMALLOC_SAVINGS
@@ -428,13 +427,14 @@
select MSM_NATIVE_RESTART
select MSM_RESTART_V2
select MSM_SPM_V2
- select MSM_PM8X60 if PM
+ select MSM_PM if PM
select MULTI_IRQ_HANDLER
select GPIO_MSM_V3
select MAY_HAVE_SPARSE_IRQ
select SPARSE_IRQ
select MEMORY_HOLE_CARVEOUT
select QMI_ENCDEC
+ select MSM_CORTEX_A7
config ARCH_MSM8610
bool "MSM8610"
@@ -458,9 +458,10 @@
select MSM_RPM_SMD
select MSM_SPM_V2
select MSM_L2_SPM
- select MSM_PM8X60 if PM
+ select MSM_PM if PM
select MEMORY_HOLE_CARVEOUT
select MSM_BUS_SCALING
+ select MSM_CORTEX_A7
select CPU_FREQ_MSM
select CPU_FREQ
select MSM_PIL
@@ -498,9 +499,10 @@
select MSM_RPM_SMD
select MSM_SPM_V2
select MSM_L2_SPM
- select MSM_PM8X60 if PM
+ select MSM_PM if PM
select MEMORY_HOLE_CARVEOUT
select MSM_BUS_SCALING
+ select MSM_CORTEX_A7
select CPU_FREQ_MSM
select CPU_FREQ
select MSM_PIL
@@ -530,7 +532,7 @@
select MSM_L2_SPM
select MSM_NATIVE_RESTART
select MSM_RESTART_V2
- select MSM_PM8X60 if PM
+ select MSM_PM if PM
select MAY_HAVE_SPARSE_IRQ
select SPARSE_IRQ
select ARM_HAS_SG_CHAIN
@@ -573,6 +575,9 @@
bool
select ARM_L1_CACHE_SHIFT_6
+config MSM_CORTEX_A7
+ bool
+
config MSM_SMP
select HAVE_SMP
bool
@@ -655,7 +660,7 @@
config MSM_LPM_TEST
bool "Low Power Mode test framework"
depends on MSM_RPM || MSM_RPM_SMD
- depends on MSM_PM8X60
+ depends on MSM_PM
help
LPM_TEST is a test framework that assists in exercising the low
power mode algorithm on MSM targets. This test framework tracks
@@ -2431,7 +2436,7 @@
depends on PM
bool
-config MSM_PM8X60
+config MSM_PM
depends on PM
bool
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index eacdcdf..4fc1590 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -23,6 +23,7 @@
endif
obj-y += acpuclock.o
+obj-$(CONFIG_MSM_CORTEX_A7) += clock-a7.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_trace_counters.o
obj-$(CONFIG_ARCH_MSM_KRAIT) += acpuclock-krait.o clock-krait.o
ifdef CONFIG_ARCH_MSM_KRAIT
@@ -206,7 +207,7 @@
endif
obj-$(CONFIG_MSM_SYSMON_COMM) += sysmon.o
-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+#obj-$(CONFIG_CPU_IDLE) += cpuidle.o
ifdef CONFIG_MSM_CAMERA_V4L2
obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60-camera.o
@@ -310,7 +311,7 @@
obj-$(CONFIG_ARCH_MSM8610) += clock-dsi-8610.o
obj-$(CONFIG_ARCH_MSMKRYPTON) += clock-local2.o clock-pll.o clock-krypton.o clock-rpm.o clock-voter.o
-obj-$(CONFIG_MSM_PM8X60) += pm-8x60.o pm-data.o
+obj-$(CONFIG_MSM_PM) += msm-pm.o pm-data.o
obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire.o board-sapphire-gpio.o
obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-keypad.o board-sapphire-panel.o
@@ -431,3 +432,5 @@
obj-$(CONFIG_WALL_CLK) += wallclk.o
obj-$(CONFIG_WALL_CLK_SYSFS) += wallclk_sysfs.o
obj-$(CONFIG_ARCH_RANDOM) += early_random.o
+obj-$(CONFIG_PERFMAP) += perfmap.o
+obj-$(CONFIG_ARCH_MSM8974) += cpubw-krait.o
diff --git a/arch/arm/mach-msm/acpuclock-cortex.c b/arch/arm/mach-msm/acpuclock-cortex.c
index baa1c7b..f2818af 100644
--- a/arch/arm/mach-msm/acpuclock-cortex.c
+++ b/arch/arm/mach-msm/acpuclock-cortex.c
@@ -340,7 +340,7 @@
/* Construct the freq_table tables from priv->freq_tbl. */
for (i = 0; priv->freq_tbl[i].khz != 0
- && freq_cnt < ARRAY_SIZE(freq_table); i++) {
+ && freq_cnt < ARRAY_SIZE(freq_table) - 1; i++) {
if (!priv->freq_tbl[i].use_for_scaling)
continue;
freq_table[freq_cnt].index = freq_cnt;
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index cf3fac0..c5b1deb 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -761,15 +761,22 @@
}
/*
- * Increment the L2 HFPLL regulator refcount if _this_ CPU's frequency
- * requires a corresponding target L2 frequency that needs the L2 to
- * run off of an HFPLL.
+ * Vote for the L2 HFPLL regulators if _this_ CPU's frequency requires
+ * a corresponding target L2 frequency that needs the L2 an HFPLL.
*/
- if (drv.l2_freq_tbl[acpu_level->l2_level].speed.src == HFPLL)
- l2_vreg_count++;
+ if (drv.l2_freq_tbl[acpu_level->l2_level].speed.src == HFPLL) {
+ ret = enable_l2_regulators();
+ if (ret) {
+ dev_err(drv.dev, "enable_l2_regulators() failed (%d)\n",
+ ret);
+ goto err_l2_regs;
+ }
+ }
return 0;
+err_l2_regs:
+ regulator_disable(sc->vreg[VREG_CORE].reg);
err_core_conf:
regulator_put(sc->vreg[VREG_CORE].reg);
err_core_get:
@@ -901,7 +908,7 @@
ret = -ENODEV;
goto err_table;
}
- dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n",
+ dev_warn(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n",
cpu, acpu_level->speed.khz);
} else {
dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu,
@@ -1208,7 +1215,7 @@
l2_level = find_cur_l2_level();
if (!l2_level) {
l2_level = drv.l2_freq_tbl;
- dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n",
+ dev_warn(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n",
l2_level->speed.khz);
} else {
dev_dbg(drv.dev, "L2 is running at %lu KHz\n",
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index a883e39..53cea4e 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -3163,6 +3163,8 @@
CLK_LOOKUP("xo", xo_a_clk.c, "f9011050.qcom,acpuclk"),
CLK_LOOKUP("gpll0", gpll0_ao.c, "f9011050.qcom,acpuclk"),
CLK_LOOKUP("a7sspll", a7sspll.c, "f9011050.qcom,acpuclk"),
+ CLK_LOOKUP("clk-4", gpll0_ao.c, "f9011050.qcom,clock-a7"),
+ CLK_LOOKUP("clk-5", a7sspll.c, "f9011050.qcom,clock-a7"),
CLK_LOOKUP("kpss_ahb", kpss_ahb_clk_src.c, ""),
/* WCNSS CLOCKS */
diff --git a/arch/arm/mach-msm/clock-a7.c b/arch/arm/mach-msm/clock-a7.c
new file mode 100644
index 0000000..5b8dc4e
--- /dev/null
+++ b/arch/arm/mach-msm/clock-a7.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+
+#include <mach/clock-generic.h>
+#include "clock-local2.h"
+
+#define UPDATE_CHECK_MAX_LOOPS 200
+
+struct cortex_reg_data {
+ u32 cmd_offset;
+ u32 update_mask;
+ u32 poll_mask;
+};
+
+#define DIV_REG(x) ((x)->base + (x)->div_offset)
+#define SRC_REG(x) ((x)->base + (x)->src_offset)
+#define CMD_REG(x) ((x)->base + \
+ ((struct cortex_reg_data *)(x)->priv)->cmd_offset)
+
+static int update_config(struct mux_div_clk *md)
+{
+ u32 regval, count;
+ struct cortex_reg_data *r = md->priv;
+
+ /* Update the configuration */
+ regval = readl_relaxed(CMD_REG(md));
+ regval |= r->update_mask;
+ writel_relaxed(regval, CMD_REG(md));
+
+ /* Wait for update to take effect */
+ for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+ if (!(readl_relaxed(CMD_REG(md)) &
+ r->poll_mask))
+ return 0;
+ udelay(1);
+ }
+
+ CLK_WARN(&md->c, true, "didn't update its configuration.");
+
+ return -EINVAL;
+}
+
+static void cortex_get_config(struct mux_div_clk *md, u32 *src_sel, u32 *div)
+{
+ u32 regval;
+
+ regval = readl_relaxed(DIV_REG(md));
+ regval &= (md->div_mask << md->div_shift);
+ *div = regval >> md->div_shift;
+ *div = max((u32)1, (*div + 1) / 2);
+
+ regval = readl_relaxed(SRC_REG(md));
+ regval &= (md->src_mask << md->src_shift);
+ *src_sel = regval >> md->src_shift;
+}
+
+static int cortex_set_config(struct mux_div_clk *md, u32 src_sel, u32 div)
+{
+ u32 regval;
+
+ div = div ? ((2 * div) - 1) : 0;
+ regval = readl_relaxed(DIV_REG(md));
+ regval &= ~(md->div_mask << md->div_shift);
+ regval |= div << md->div_shift;
+ writel_relaxed(regval, DIV_REG(md));
+
+ regval = readl_relaxed(SRC_REG(md));
+ regval &= ~(md->src_mask << md->src_shift);
+ regval |= src_sel << md->src_shift;
+ writel_relaxed(regval, SRC_REG(md));
+
+ return update_config(md);
+}
+
+static int cortex_enable(struct mux_div_clk *md)
+{
+ u32 src_sel = parent_to_src_sel(md->parents, md->num_parents,
+ md->c.parent);
+ return cortex_set_config(md, src_sel, md->data.div);
+}
+
+static void cortex_disable(struct mux_div_clk *md)
+{
+ u32 src_sel = parent_to_src_sel(md->parents, md->num_parents,
+ md->safe_parent);
+ cortex_set_config(md, src_sel, md->safe_div);
+}
+
+static bool cortex_is_enabled(struct mux_div_clk *md)
+{
+ return true;
+}
+
+struct mux_div_ops cortex_mux_div_ops = {
+ .set_src_div = cortex_set_config,
+ .get_src_div = cortex_get_config,
+ .is_enabled = cortex_is_enabled,
+ .enable = cortex_enable,
+ .disable = cortex_disable,
+};
+
+static struct cortex_reg_data a7ssmux_priv = {
+ .cmd_offset = 0x0,
+ .update_mask = BIT(0),
+ .poll_mask = BIT(0),
+};
+
+DEFINE_VDD_REGS_INIT(vdd_cpu, 1);
+
+static struct mux_div_clk a7ssmux = {
+ .ops = &cortex_mux_div_ops,
+ .safe_freq = 300000000,
+ .data = {
+ .max_div = 8,
+ .min_div = 1,
+ },
+ .c = {
+ .dbg_name = "a7ssmux",
+ .ops = &clk_ops_mux_div_clk,
+ .vdd_class = &vdd_cpu,
+ CLK_INIT(a7ssmux.c),
+ },
+ .parents = (struct clk_src[8]) {},
+ .priv = &a7ssmux_priv,
+ .div_offset = 0x4,
+ .div_mask = BM(4, 0),
+ .div_shift = 0,
+ .src_offset = 0x4,
+ .src_mask = BM(10, 8) >> 8,
+ .src_shift = 8,
+};
+
+static struct clk_lookup clock_tbl_a7[] = {
+ CLK_LOOKUP("cpu0_clk", a7ssmux.c, "0.qcom,msm-cpufreq"),
+ CLK_LOOKUP("cpu0_clk", a7ssmux.c, "fe805664.qcom,pm-8x60"),
+};
+
+static int of_get_fmax_vdd_class(struct platform_device *pdev, struct clk *c,
+ char *prop_name)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int prop_len, i;
+ struct clk_vdd_class *vdd = c->vdd_class;
+ u32 *array;
+
+ if (!of_find_property(of, prop_name, &prop_len)) {
+ dev_err(&pdev->dev, "missing %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % 2) {
+ dev_err(&pdev->dev, "bad length %d\n", prop_len);
+ return -EINVAL;
+ }
+
+ prop_len /= 2;
+ vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ c->fmax = devm_kzalloc(&pdev->dev, prop_len * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!c->fmax)
+ return -ENOMEM;
+
+ array = devm_kzalloc(&pdev->dev, prop_len * sizeof(u32), GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ of_property_read_u32_array(of, prop_name, array, prop_len * 2);
+ for (i = 0; i < prop_len; i++) {
+ c->fmax[i] = array[2 * i];
+ vdd->vdd_uv[i] = array[2 * i + 1];
+ }
+
+ devm_kfree(&pdev->dev, array);
+ vdd->num_levels = prop_len;
+ vdd->cur_level = prop_len;
+ c->num_fmax = prop_len;
+ return 0;
+}
+
+static void get_speed_bin(struct platform_device *pdev, int *bin, int *version)
+{
+ struct resource *res;
+ void __iomem *base;
+ u32 pte_efuse, redundant_sel, valid;
+
+ *bin = 0;
+ *version = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
+ if (!res) {
+ dev_info(&pdev->dev,
+ "No speed/PVS binning available. Defaulting to 0!\n");
+ return;
+ }
+
+ base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!base) {
+ dev_warn(&pdev->dev,
+ "Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ devm_iounmap(&pdev->dev, base);
+
+ redundant_sel = (pte_efuse >> 24) & 0x7;
+ *bin = pte_efuse & 0x7;
+ valid = (pte_efuse >> 3) & 0x1;
+ *version = (pte_efuse >> 4) & 0x3;
+
+ if (redundant_sel == 1)
+ *bin = (pte_efuse >> 27) & 0x7;
+
+ if (!valid) {
+ dev_info(&pdev->dev, "Speed bin not set. Defaulting to 0!\n");
+ *bin = 0;
+ } else {
+ dev_info(&pdev->dev, "Speed bin: %d\n", *bin);
+ }
+
+ dev_info(&pdev->dev, "PVS version: %d\n", *version);
+
+ return;
+}
+
+static int of_get_clk_src(struct platform_device *pdev, struct clk_src *parents)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int num_parents, i, j, index;
+ struct clk *c;
+ char clk_name[] = "clk-x";
+
+ num_parents = of_property_count_strings(of, "clock-names");
+ if (num_parents <= 0 || num_parents > 8) {
+ dev_err(&pdev->dev, "missing clock-names\n");
+ return -EINVAL;
+ }
+
+ j = 0;
+ for (i = 0; i < 8; i++) {
+ snprintf(clk_name, ARRAY_SIZE(clk_name), "clk-%d", i);
+ index = of_property_match_string(of, "clock-names", clk_name);
+ if (IS_ERR_VALUE(index))
+ continue;
+
+ parents[j].sel = i;
+ parents[j].src = c = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(c)) {
+ if (c != ERR_PTR(-EPROBE_DEFER))
+ dev_err(&pdev->dev, "clk_get: %s\n fail",
+ clk_name);
+ return PTR_ERR(c);
+ }
+ j++;
+ }
+
+ return num_parents;
+}
+
+static int clock_a7_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int speed_bin = 0, version = 0, rc;
+ unsigned long rate, aux_rate;
+ struct clk *aux_clk, *main_pll;
+ char prop_name[] = "qcom,speedX-bin-vX";
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rcg-base");
+ if (!res) {
+ dev_err(&pdev->dev, "missing rcg-base\n");
+ return -EINVAL;
+ }
+ a7ssmux.base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!a7ssmux.base) {
+ dev_err(&pdev->dev, "ioremap failed for rcg-base\n");
+ return -ENOMEM;
+ }
+
+ vdd_cpu.regulator[0] = devm_regulator_get(&pdev->dev, "cpu-vdd");
+ if (IS_ERR(vdd_cpu.regulator[0])) {
+ if (PTR_ERR(vdd_cpu.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "unable to get regulator\n");
+ return PTR_ERR(vdd_cpu.regulator[0]);
+ }
+
+ a7ssmux.num_parents = of_get_clk_src(pdev, a7ssmux.parents);
+ if (IS_ERR_VALUE(a7ssmux.num_parents))
+ return a7ssmux.num_parents;
+
+ get_speed_bin(pdev, &speed_bin, &version);
+
+ snprintf(prop_name, ARRAY_SIZE(prop_name),
+ "qcom,speed%d-bin-v%d", speed_bin, version);
+ rc = of_get_fmax_vdd_class(pdev, &a7ssmux.c, prop_name);
+ if (rc) {
+ /* Fall back to most conservative PVS table */
+ dev_err(&pdev->dev, "Unable to load voltage plan %s!\n",
+ prop_name);
+ rc = of_get_fmax_vdd_class(pdev, &a7ssmux.c,
+ "qcom,speed0-bin-v0");
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to load safe voltage plan\n");
+ return rc;
+ }
+ dev_info(&pdev->dev, "Safe voltage plan loaded.\n");
+ }
+
+ rc = msm_clock_register(clock_tbl_a7, ARRAY_SIZE(clock_tbl_a7));
+ if (rc) {
+ dev_err(&pdev->dev, "msm_clock_register failed\n");
+ return rc;
+ }
+
+ /* Force a PLL reconfiguration */
+ aux_clk = a7ssmux.parents[0].src;
+ main_pll = a7ssmux.parents[1].src;
+
+ aux_rate = clk_get_rate(aux_clk);
+ rate = clk_get_rate(&a7ssmux.c);
+ clk_set_rate(&a7ssmux.c, aux_rate);
+ clk_set_rate(main_pll, clk_round_rate(main_pll, 1));
+ clk_set_rate(&a7ssmux.c, rate);
+
+ /*
+ * We don't want the CPU clocks to be turned off at late init
+ * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
+ * refcount of these clocks. Any cpufreq/hotplug manager can assume
+ * that the clocks have already been prepared and enabled by the time
+ * they take over.
+ */
+ WARN(clk_prepare_enable(&a7ssmux.c),
+ "Unable to turn on CPU clock");
+ return 0;
+}
+
+static struct of_device_id clock_a7_match_table[] = {
+ {.compatible = "qcom,clock-a7-8226"},
+ {}
+};
+
+static struct platform_driver clock_a7_driver = {
+ .driver = {
+ .name = "clock-a7",
+ .of_match_table = clock_a7_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init clock_a7_init(void)
+{
+ return platform_driver_probe(&clock_a7_driver, clock_a7_probe);
+}
+device_initcall(clock_a7_init);
diff --git a/arch/arm/mach-msm/clock-debug.c b/arch/arm/mach-msm/clock-debug.c
index 35917c3..c3b7229 100644
--- a/arch/arm/mach-msm/clock-debug.c
+++ b/arch/arm/mach-msm/clock-debug.c
@@ -23,6 +23,7 @@
#include <linux/clkdev.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
+#include <linux/io.h>
#include <mach/clk-provider.h>
@@ -412,6 +413,56 @@
.write = clock_parent_write,
};
+void clk_debug_print_hw(struct clk *clk, struct seq_file *f)
+{
+ void __iomem *base;
+ struct clk_register_data *regs;
+ u32 i, j, size;
+
+ if (IS_ERR_OR_NULL(clk))
+ return;
+
+ clk_debug_print_hw(clk->parent, f);
+
+ clock_debug_output(f, false, "%s\n", clk->dbg_name);
+
+ if (!clk->ops->list_registers)
+ return;
+
+ j = 0;
+ base = clk->ops->list_registers(clk, j, ®s, &size);
+ while (!IS_ERR(base)) {
+ for (i = 0; i < size; i++) {
+ u32 val = readl_relaxed(base + regs[i].offset);
+ clock_debug_output(f, false, "%20s: 0x%.8x\n",
+ regs[i].name, val);
+ }
+ j++;
+ base = clk->ops->list_registers(clk, j, ®s, &size);
+ }
+}
+
+static int print_hw_show(struct seq_file *m, void *unused)
+{
+ struct clk *c = m->private;
+ clk_debug_print_hw(c, m);
+
+ return 0;
+}
+
+static int print_hw_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, print_hw_show, inode->i_private);
+}
+
+static const struct file_operations clock_print_hw_fops = {
+ .open = print_hw_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+
static int clock_debug_add(struct clk *clock)
{
char temp[50], *ptr;
@@ -463,6 +514,10 @@
&clock_parent_fops))
goto error;
+ if (!debugfs_create_file("print", S_IRUGO, clk_dir, clock,
+ &clock_print_hw_fops))
+ goto error;
+
return 0;
error:
debugfs_remove_recursive(clk_dir);
diff --git a/arch/arm/mach-msm/clock-krait-8974.c b/arch/arm/mach-msm/clock-krait-8974.c
index 6ada01f..24fe303 100644
--- a/arch/arm/mach-msm/clock-krait-8974.c
+++ b/arch/arm/mach-msm/clock-krait-8974.c
@@ -732,8 +732,8 @@
* that the clocks have already been prepared and enabled by the time
* they take over.
*/
- clk_prepare_enable(&l2_clk.c);
for_each_online_cpu(cpu) {
+ clk_prepare_enable(&l2_clk.c);
WARN(clk_prepare_enable(cpu_clk[cpu]),
"Unable to turn on CPU%d clock", cpu);
}
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index a251784..c08df46 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -304,6 +304,22 @@
return HANDOFF_ENABLED_CLK;
}
+static long local_pll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct pll_freq_tbl *nf;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ if (!pll->freq_tbl)
+ return -EINVAL;
+
+ for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END; nf++)
+ if (nf->freq_hz >= rate)
+ return nf->freq_hz;
+
+ nf--;
+ return nf->freq_hz;
+}
+
static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
{
struct pll_freq_tbl *nf;
@@ -426,6 +442,7 @@
.enable = sr2_pll_clk_enable,
.disable = local_pll_clk_disable,
.set_rate = local_pll_clk_set_rate,
+ .round_rate = local_pll_clk_round_rate,
.handoff = local_pll_clk_handoff,
};
diff --git a/arch/arm/mach-msm/cpubw-krait.c b/arch/arm/mach-msm/cpubw-krait.c
new file mode 100644
index 0000000..4108754
--- /dev/null
+++ b/arch/arm/mach-msm/cpubw-krait.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cpubw-krait: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <trace/events/power.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+
+#include <mach/msm-krait-l2-accessors.h>
+
+#define L2PMRESR2 0x412
+#define L2PMCR 0x400
+#define L2PMCNTENCLR 0x402
+#define L2PMCNTENSET 0x403
+#define L2PMINTENCLR 0x404
+#define L2PMINTENSET 0x405
+#define L2PMOVSR 0x406
+#define L2PMOVSSET 0x407
+#define L2PMnEVCNTCR(n) (0x420 + n * 0x10)
+#define L2PMnEVCNTR(n) (0x421 + n * 0x10)
+#define L2PMnEVCNTSR(n) (0x422 + n * 0x10)
+#define L2PMnEVFILTER(n) (0x423 + n * 0x10)
+#define L2PMnEVTYPER(n) (0x424 + n * 0x10)
+#define MON_INT 33
+
+#define MBYTE (1 << 20)
+
+#define BW(_bw) \
+ { \
+ .vectors = (struct msm_bus_vectors[]){ \
+ {\
+ .src = MSM_BUS_MASTER_AMPSS_M0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ }, \
+ { \
+ .src = MSM_BUS_MASTER_AMPSS_M1, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ }, \
+ }, \
+ .num_paths = 2, \
+ }
+
+/* Has to be a power of 2 to work correctly */
+static unsigned int bytes_per_beat = 8;
+module_param(bytes_per_beat, uint, 0644);
+
+static unsigned int sample_ms = 50;
+module_param(sample_ms, uint, 0644);
+
+static unsigned int tolerance_percent = 10;
+module_param(tolerance_percent, uint, 0644);
+
+static unsigned int guard_band_mbps = 100;
+module_param(guard_band_mbps, uint, 0644);
+
+static unsigned int decay_rate = 90;
+module_param(decay_rate, uint, 0644);
+
+static unsigned int io_percent = 15;
+module_param(io_percent, uint, 0644);
+
+static unsigned int bw_step = 200;
+module_param(bw_step, uint, 0644);
+
+static struct kernel_param_ops enable_ops;
+static bool enable;
+module_param_cb(enable, &enable_ops, &enable, S_IRUGO | S_IWUSR);
+
+static void mon_init(void)
+{
+ /* Set up counters 0/1 to count write/read beats */
+ set_l2_indirect_reg(L2PMRESR2, 0x8B0B0000);
+ set_l2_indirect_reg(L2PMnEVCNTCR(0), 0x0);
+ set_l2_indirect_reg(L2PMnEVCNTCR(1), 0x0);
+ set_l2_indirect_reg(L2PMnEVCNTR(0), 0xFFFFFFFF);
+ set_l2_indirect_reg(L2PMnEVCNTR(1), 0xFFFFFFFF);
+ set_l2_indirect_reg(L2PMnEVFILTER(0), 0xF003F);
+ set_l2_indirect_reg(L2PMnEVFILTER(1), 0xF003F);
+ set_l2_indirect_reg(L2PMnEVTYPER(0), 0xA);
+ set_l2_indirect_reg(L2PMnEVTYPER(1), 0xB);
+}
+
+static void global_mon_enable(bool en)
+{
+ u32 regval;
+
+ /* Global counter enable */
+ regval = get_l2_indirect_reg(L2PMCR);
+ if (en)
+ regval |= BIT(0);
+ else
+ regval &= ~BIT(0);
+ set_l2_indirect_reg(L2PMCR, regval);
+}
+
+static void mon_enable(int n)
+{
+ /* Clear previous overflow state for event counter n */
+ set_l2_indirect_reg(L2PMOVSR, BIT(n));
+
+ /* Enable event counter n */
+ set_l2_indirect_reg(L2PMCNTENSET, BIT(n));
+}
+
+static void mon_disable(int n)
+{
+ /* Disable event counter n */
+ set_l2_indirect_reg(L2PMCNTENCLR, BIT(n));
+}
+
+/* Returns start counter value to be used with mon_get_mbps() */
+static u32 mon_set_limit_mbyte(int n, unsigned int mbytes)
+{
+ u32 regval, beats;
+
+ beats = mult_frac(mbytes, MBYTE, bytes_per_beat);
+ regval = 0xFFFFFFFF - beats;
+ set_l2_indirect_reg(L2PMnEVCNTR(n), regval);
+ pr_debug("EV%d MB: %d, start val: %x\n", n, mbytes, regval);
+
+ return regval;
+}
+
+/* Returns MBps of read/writes for the sampling window. */
+static int mon_get_mbps(int n, u32 start_val, unsigned int us)
+{
+ u32 overflow, count;
+ long long beats;
+
+ count = get_l2_indirect_reg(L2PMnEVCNTR(n));
+ overflow = get_l2_indirect_reg(L2PMOVSR);
+
+ if (overflow & BIT(n))
+ beats = 0xFFFFFFFF - start_val + count;
+ else
+ beats = count - start_val;
+
+ beats *= USEC_PER_SEC;
+ beats *= bytes_per_beat;
+ do_div(beats, us);
+ beats = DIV_ROUND_UP_ULL(beats, MBYTE);
+
+ pr_debug("EV%d ov: %x, cnt: %x\n", n, overflow, count);
+
+ return beats;
+}
+
+static void do_bw_sample(struct work_struct *work);
+static DECLARE_DEFERRED_WORK(bw_sample, do_bw_sample);
+static struct workqueue_struct *bw_sample_wq;
+
+static DEFINE_MUTEX(bw_lock);
+static ktime_t prev_ts;
+static u32 prev_r_start_val;
+static u32 prev_w_start_val;
+
+static struct msm_bus_paths bw_levels[] = {
+ BW(0), BW(200),
+};
+static struct msm_bus_scale_pdata bw_data = {
+ .usecase = bw_levels,
+ .num_usecases = ARRAY_SIZE(bw_levels),
+ .name = "cpubw-krait",
+ .active_only = 1,
+};
+static u32 bus_client;
+static void compute_bw(int mbps);
+static irqreturn_t mon_intr_handler(int irq, void *dev_id);
+
+#define START_LIMIT 100 /* MBps */
+static int start_monitoring(void)
+{
+ int mb_limit;
+ int ret;
+
+ ret = request_threaded_irq(MON_INT, NULL, mon_intr_handler,
+ IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_RISING,
+ "cpubw_krait", mon_intr_handler);
+ if (ret) {
+ pr_err("Unable to register interrupt handler\n");
+ return ret;
+ }
+
+ bw_sample_wq = alloc_workqueue("cpubw-krait", WQ_HIGHPRI, 0);
+ if (!bw_sample_wq) {
+ pr_err("Unable to alloc workqueue\n");
+ ret = -ENOMEM;
+ goto alloc_wq_fail;
+ }
+
+ bus_client = msm_bus_scale_register_client(&bw_data);
+ if (!bus_client) {
+ pr_err("Unable to register bus client\n");
+ ret = -ENODEV;
+ goto bus_reg_fail;
+ }
+
+ compute_bw(START_LIMIT);
+
+ mon_init();
+ mon_disable(0);
+ mon_disable(1);
+
+ mb_limit = mult_frac(START_LIMIT, sample_ms, MSEC_PER_SEC);
+ mb_limit /= 2;
+
+ prev_r_start_val = mon_set_limit_mbyte(0, mb_limit);
+ prev_w_start_val = mon_set_limit_mbyte(1, mb_limit);
+
+ prev_ts = ktime_get();
+
+ set_l2_indirect_reg(L2PMINTENSET, BIT(0));
+ set_l2_indirect_reg(L2PMINTENSET, BIT(1));
+ mon_enable(0);
+ mon_enable(1);
+ global_mon_enable(true);
+
+ queue_delayed_work(bw_sample_wq, &bw_sample,
+ msecs_to_jiffies(sample_ms));
+
+ return 0;
+
+bus_reg_fail:
+ destroy_workqueue(bw_sample_wq);
+alloc_wq_fail:
+ disable_irq(MON_INT);
+ free_irq(MON_INT, mon_intr_handler);
+ return ret;
+}
+
+static void stop_monitoring(void)
+{
+ global_mon_enable(false);
+ mon_disable(0);
+ mon_disable(1);
+ set_l2_indirect_reg(L2PMINTENCLR, BIT(0));
+ set_l2_indirect_reg(L2PMINTENCLR, BIT(1));
+
+ disable_irq(MON_INT);
+ free_irq(MON_INT, mon_intr_handler);
+
+ cancel_delayed_work_sync(&bw_sample);
+ destroy_workqueue(bw_sample_wq);
+
+ bw_levels[0].vectors[0].ib = 0;
+ bw_levels[0].vectors[0].ab = 0;
+ bw_levels[0].vectors[1].ib = 0;
+ bw_levels[0].vectors[1].ab = 0;
+
+ bw_levels[1].vectors[0].ib = 0;
+ bw_levels[1].vectors[0].ab = 0;
+ bw_levels[1].vectors[1].ib = 0;
+ bw_levels[1].vectors[1].ab = 0;
+ msm_bus_scale_unregister_client(bus_client);
+}
+
+static void set_bw(int mbps)
+{
+ static int cur_idx, cur_ab, cur_ib;
+ int new_ab, new_ib;
+ int i, ret;
+
+ if (!io_percent)
+ io_percent = 1;
+ new_ab = roundup(mbps, bw_step);
+ new_ib = mbps * 100 / io_percent;
+ new_ib = roundup(new_ib, bw_step);
+
+ if (cur_ib == new_ib && cur_ab == new_ab)
+ return;
+
+ i = (cur_idx + 1) % ARRAY_SIZE(bw_levels);
+
+ bw_levels[i].vectors[0].ib = new_ib * 1000000ULL;
+ bw_levels[i].vectors[0].ab = new_ab * 1000000ULL;
+ bw_levels[i].vectors[1].ib = new_ib * 1000000ULL;
+ bw_levels[i].vectors[1].ab = new_ab * 1000000ULL;
+
+ pr_debug("BW MBps: Req: %d AB: %d IB: %d\n", mbps, new_ab, new_ib);
+
+ ret = msm_bus_scale_client_update_request(bus_client, i);
+ if (ret)
+ pr_err("bandwidth request failed (%d)\n", ret);
+ else {
+ cur_idx = i;
+ cur_ib = new_ib;
+ cur_ab = new_ab;
+ }
+}
+
+static void compute_bw(int mbps)
+{
+ static int cur_bw;
+ int new_bw;
+
+ mbps += guard_band_mbps;
+
+ if (mbps > cur_bw) {
+ new_bw = mbps;
+ } else {
+ new_bw = mbps * decay_rate + cur_bw * (100 - decay_rate);
+ new_bw /= 100;
+ }
+
+ if (new_bw == cur_bw)
+ return;
+
+ set_bw(new_bw);
+ cur_bw = new_bw;
+}
+
+static int to_limit(int mbps)
+{
+ mbps *= (100 + tolerance_percent) * sample_ms;
+ mbps /= 100;
+ mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+ return mbps;
+}
+
+static void measure_bw(void)
+{
+ int r_mbps, w_mbps, mbps;
+ ktime_t ts;
+ unsigned int us;
+
+ mutex_lock(&bw_lock);
+
+ /*
+ * Since we are stopping the counters, we don't want this short work
+ * to be interrupted by other tasks and cause the measurements to be
+ * wrong. Not blocking interrupts to avoid affecting interrupt
+ * latency and since they should be short anyway because they run in
+ * atomic context.
+ */
+ preempt_disable();
+
+ ts = ktime_get();
+ us = ktime_to_us(ktime_sub(ts, prev_ts));
+ if (!us)
+ us = 1;
+
+ mon_disable(0);
+ mon_disable(1);
+
+ r_mbps = mon_get_mbps(0, prev_r_start_val, us);
+ w_mbps = mon_get_mbps(1, prev_w_start_val, us);
+
+ prev_r_start_val = mon_set_limit_mbyte(0, to_limit(r_mbps));
+ prev_w_start_val = mon_set_limit_mbyte(1, to_limit(w_mbps));
+
+ mon_enable(0);
+ mon_enable(1);
+
+ preempt_enable();
+
+ mbps = r_mbps + w_mbps;
+ pr_debug("R/W/BW/us = %d/%d/%d/%d\n", r_mbps, w_mbps, mbps, us);
+ compute_bw(mbps);
+
+ prev_ts = ts;
+ mutex_unlock(&bw_lock);
+}
+
+static void do_bw_sample(struct work_struct *work)
+{
+ measure_bw();
+ queue_delayed_work(bw_sample_wq, &bw_sample,
+ msecs_to_jiffies(sample_ms));
+}
+
+static irqreturn_t mon_intr_handler(int irq, void *dev_id)
+{
+ bool pending;
+ u32 regval;
+
+ regval = get_l2_indirect_reg(L2PMOVSR);
+ pr_debug("Got interrupt: %x\n", regval);
+
+ pending = cancel_delayed_work_sync(&bw_sample);
+
+ /*
+ * Don't recalc bandwidth if the interrupt came just after the end
+ * of the sample period (!pending). This is done for two reasons:
+ *
+ * 1. Sampling the BW during a very short duration can result in a
+ * very inaccurate measurement due to very short bursts.
+ * 2. If the limit was hit very close to the sample period, then the
+ * current BW estimate is not very off and can stay as such.
+ */
+ if (pending)
+ measure_bw();
+
+ queue_delayed_work(bw_sample_wq, &bw_sample,
+ msecs_to_jiffies(sample_ms));
+
+ return IRQ_HANDLED;
+}
+
+static int set_enable(const char *arg, const struct kernel_param *kp)
+{
+ int ret;
+ bool old_val = *((bool *) kp->arg);
+ bool new_val;
+
+ if (!arg)
+ arg = "1";
+ ret = strtobool(arg, &new_val);
+ if (ret)
+ return ret;
+
+ if (!old_val && new_val) {
+ if (start_monitoring()) {
+ pr_err("L2PM counters already in use.\n");
+ return ret;
+ } else {
+ pr_info("Enabling CPU BW monitoring\n");
+ }
+ } else if (old_val && !new_val) {
+ pr_info("Disabling CPU BW monitoring\n");
+ stop_monitoring();
+ }
+
+ *(bool *) kp->arg = new_val;
+ return 0;
+}
+
+static struct kernel_param_ops enable_ops = {
+ .set = set_enable,
+ .get = param_get_bool,
+};
+
+static int cpubw_krait_init(void)
+{
+ bw_sample_wq = alloc_workqueue("cpubw-krait", WQ_HIGHPRI, 0);
+ if (!bw_sample_wq)
+ return -ENOMEM;
+
+ bus_client = msm_bus_scale_register_client(&bw_data);
+ if (!bus_client) {
+ pr_err("Unable to register bus client\n");
+ destroy_workqueue(bw_sample_wq);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+late_initcall(cpubw_krait_init);
+
+MODULE_DESCRIPTION("CPU DDR bandwidth voting driver for Krait CPUs");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/cpufreq.c b/arch/arm/mach-msm/cpufreq.c
index 85a1468..60856c2 100644
--- a/arch/arm/mach-msm/cpufreq.c
+++ b/arch/arm/mach-msm/cpufreq.c
@@ -324,11 +324,15 @@
case CPU_UP_CANCELED:
if (is_clk) {
clk_disable_unprepare(cpu_clk[cpu]);
+ clk_disable_unprepare(l2_clk);
update_l2_bw(NULL);
}
break;
case CPU_UP_PREPARE:
if (is_clk) {
+ rc = clk_prepare_enable(l2_clk);
+ if (rc < 0)
+ return NOTIFY_BAD;
rc = clk_prepare_enable(cpu_clk[cpu]);
if (rc < 0)
return NOTIFY_BAD;
diff --git a/arch/arm/mach-msm/idle-macros.S b/arch/arm/mach-msm/idle-macros.S
deleted file mode 100644
index 3d0c937..0000000
--- a/arch/arm/mach-msm/idle-macros.S
+++ /dev/null
@@ -1,153 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <asm/hardware/cache-l2x0.h>
-
-/* Add 300 NOPs after 'wfi' for 8x25 target */
-.macro DELAY_8x25, rept
-#ifdef CONFIG_ARCH_MSM8625
- .rept \rept
- nop
- .endr
-#endif
-.endm
-
-/* Switch between smp_to_amp/amp_to_smp configuration */
-.macro SET_SMP_COHERENCY, on = 0
- ldr r0, =target_type
- ldr r0, [r0]
- mov r1, #TARGET_IS_8625
- cmp r0, r1
- bne skip\@
- mrc p15, 0, r0, c1, c0, 1 /* read ACTLR register */
- .if \on
- orr r0, r0, #(1 << 6) /* Set the SMP bit in ACTLR */
- .else
- bic r0, r0, #(1 << 6) /* Clear the SMP bit */
- .endif
- mcr p15, 0, r0, c1, c0, 1 /* write ACTLR register */
- isb
-skip\@:
-.endm
-
-/*
- * Enable the "L2" cache, not require to restore the controller registers
- */
-.macro ENABLE_8x25_L2
- ldr r0, =target_type
- ldr r0, [r0]
- mov r1, #TARGET_IS_8625
- cmp r0, r1
- bne skip_enable\@
- ldr r0, =apps_power_collapse
- ldr r0, [r0]
- cmp r0, #POWER_COLLAPSED
- bne skip_enable\@
- ldr r0, =l2x0_base_addr
- ldr r0, [r0]
- mov r1, #0x1
- str r1, [r0, #L2X0_CTRL]
- dmb
-skip_enable\@:
-.endm
-
-/*
- * Perform the required operation
- * operation: type of operation on l2 cache (e.g: clean&inv or inv)
- * l2_enable: enable or disable
- */
-.macro DO_CACHE_OPERATION, operation, l2_enable
- ldr r2, =l2x0_base_addr
- ldr r2, [r2]
- ldr r0, =0xffff
- str r0, [r2, #\operation]
-wait\@:
- ldr r0, [r2, #\operation]
- ldr r1, =0xffff
- ands r0, r0, r1
- bne wait\@
-l2x_sync\@:
- mov r0, #0x0
- str r0, [r2, #L2X0_CACHE_SYNC]
-sync\@:
- ldr r0, [r2, #L2X0_CACHE_SYNC]
- ands r0, r0, #0x1
- bne sync\@
- mov r1, #\l2_enable
- str r1, [r2, #L2X0_CTRL]
-.endm
-
-/*
- * Clean and invalidate the L2 cache.
- * 1. Check the target type
- * 2. Check whether we are coming from PC are not
- * 3. Save 'aux', 'data latency', & 'prefetch ctlr' registers
- * 4. Start L2 clean & invalidation operation
- * 5. Disable the L2 cache
- */
-.macro SUSPEND_8x25_L2
- ldr r0, =target_type
- ldr r0, [r0]
- mov r1, #TARGET_IS_8625
- cmp r0, r1
- bne skip_suspend\@
- ldr r0, =apps_power_collapse
- ldr r0, [r0]
- cmp r0, #POWER_COLLAPSED
- bne skip_suspend\@
- ldr r0, =l2x0_saved_ctrl_reg_val
- ldr r1, =l2x0_base_addr
- ldr r1, [r1]
- ldr r2, [r1, #L2X0_AUX_CTRL]
- str r2, [r0, #0x0] /* store aux_ctlr reg value */
- ldr r2, [r1, #L2X0_DATA_LATENCY_CTRL]
- str r2, [r0, #0x4] /* store data latency reg value */
- ldr r2, [r1, #L2X0_PREFETCH_CTRL]
- str r2, [r0, #0x8] /* store prefetch_ctlr reg value */
- DO_CACHE_OPERATION L2X0_CLEAN_INV_WAY OFF
- dmb
-skip_suspend\@:
-.endm
-
-/*
- * Coming back from a successful PC
- * 1. Check the target type
- * 2. Check whether we are going to PC are not
- * 3. Disable the L2 cache
- * 4. Restore 'aux', 'data latency', & 'prefetch ctlr' reg
- * 5. Invalidate the cache
- * 6. Enable the L2 cache
- */
-.macro RESUME_8x25_L2
- ldr r0, =target_type
- ldr r0, [r0]
- mov r1, #TARGET_IS_8625
- cmp r0, r1
- bne skip_resume\@
- ldr r0, =apps_power_collapse
- ldr r0, [r0]
- cmp r0, #POWER_COLLAPSED
- bne skip_resume\@
- ldr r1, =l2x0_base_addr
- ldr r1, [r1]
- mov r0, #0x0
- str r0, [r1, #L2X0_CTRL]
- ldr r0, =l2x0_saved_ctrl_reg_val
- ldr r2, [r0, #0x0]
- str r2, [r1, #L2X0_AUX_CTRL] /* restore aux_ctlr reg value */
- ldr r2, [r0, #0x4]
- str r2, [r1, #L2X0_DATA_LATENCY_CTRL]
- ldr r2, [r0, #0x8]
- str r2, [r1, #L2X0_PREFETCH_CTRL]
- DO_CACHE_OPERATION L2X0_INV_WAY ON
-skip_resume\@:
-.endm
diff --git a/arch/arm/mach-msm/idle-v7.S b/arch/arm/mach-msm/idle-v7.S
index 2956bd6..a133470 100644
--- a/arch/arm/mach-msm/idle-v7.S
+++ b/arch/arm/mach-msm/idle-v7.S
@@ -19,357 +19,7 @@
#include <linux/threads.h>
#include <asm/assembler.h>
-#include "idle.h"
-#include "idle-macros.S"
-
-#ifdef CONFIG_MSM_SCM
-#define SCM_SVC_BOOT 0x1
-#define SCM_CMD_TERMINATE_PC 0x2
-#define SCM_CMD_CORE_HOTPLUGGED 0x10
-#endif
-
-ENTRY(msm_arch_idle)
-#ifdef CONFIG_ARCH_MSM_KRAIT
- mrc p15, 0, r0, c0, c0, 0
- bic r1, r0, #0xff
- movw r2, #0x0400
- movt r2, #0x511F
- movw r3, #0x0600
- movt r3, #0x510F
- cmp r2, r1
- cmpne r3, r1
- bne go_wfi
-
- mrs r0, cpsr
- cpsid if
-
- mrc p15, 7, r1, c15, c0, 5
- bic r2, r1, #0x20000
- mcr p15, 7, r2, c15, c0, 5
- isb
-
-go_wfi:
- wfi
- bne wfi_done
- mcr p15, 7, r1, c15, c0, 5
- isb
- msr cpsr_c, r0
-
-wfi_done:
- bx lr
-#else
- wfi
-#ifdef CONFIG_ARCH_MSM8X60
- mrc p14, 1, r1, c1, c5, 4 /* read ETM PDSR to clear sticky bit */
- mrc p14, 0, r1, c1, c5, 4 /* read DBG PRSR to clear sticky bit */
- isb
-#endif
- bx lr
-#endif
-ENTRY(msm_pm_pc_hotplug)
- stmfd sp!, {lr}
-#if defined(CONFIG_MSM_FIQ_SUPPORT)
- cpsid f
-#endif
-
-#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM)
- bl msm_jtag_save_state
-#endif
- mov r1, #0
- mcr p15, 2, r1, c0, c0, 0 /*CCSELR*/
- isb
- mrc p15, 1, r1, c0, c0, 0 /*CCSIDR*/
- mov r2, #1
- and r1, r2, r1, ASR #30 /* Check if the cache is write back */
- cmp r1, #1
- bleq v7_flush_kern_cache_all
-
- mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
- and r0, r0, #15 /* what CPU am I */
-
- ldr r1, =msm_pc_debug_counters /*load the IMEM debug location */
- ldr r1, [r1]
- cmp r1, #0
- beq skip_hp_debug1
- add r1, r1, r0, LSL #4 /* debug location for this CPU */
- ldr r2, [r1]
- add r2, #1
- str r2, [r1]
-skip_hp_debug1:
-
-#ifdef CONFIG_MSM_SCM
- ldr r0, =SCM_SVC_BOOT
- ldr r1, =SCM_CMD_TERMINATE_PC
- ldr r2, =SCM_CMD_CORE_HOTPLUGGED
- bl scm_call_atomic1
-#else
- mrc p15, 0, r3, c1, c0, 0 /* read current CR */
- bic r0, r3, #(1 << 2) /* clear dcache bit */
- bic r0, r0, #(1 << 12) /* clear icache bit */
- mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
- isb
- wfi
- mcr p15, 0, r3, c1, c0, 0 /* restore d/i cache */
- isb
-#endif
-
-#if defined(CONFIG_MSM_FIQ_SUPPORT)
- cpsie f
-#endif
- mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
- and r0, r0, #15 /* what CPU am I */
-
- ldr r1, =msm_pc_debug_counters /*load the IMEM debug location */
- ldr r1, [r1]
- cmp r1, #0
- beq skip_hp_debug2
- add r1, r1, r0, LSL #4 /* debug location for this CPU */
- add r1, #8
- ldr r2, [r1]
- add r2, #1
- str r2, [r1]
-skip_hp_debug2:
-#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM)
- bl msm_jtag_restore_state
-#endif
- mov r0, #0 /* return power collapse failed */
- ldmfd sp!, {lr}
- bx lr
-
-ENTRY(msm_pm_collapse)
-#if defined(CONFIG_MSM_FIQ_SUPPORT)
- cpsid f
-#endif
-
- ldr r0, =msm_saved_state /* address of msm_saved_state ptr */
- ldr r0, [r0] /* load ptr */
-#if (NR_CPUS >= 2)
- mrc p15, 0, r1, c0, c0, 5 /* MPIDR */
- ands r1, r1, #15 /* What CPU am I */
- mov r2, #CPU_SAVED_STATE_SIZE
- mul r1, r1, r2
- add r0, r0, r1
-#endif
-
- stmia r0!, {r4-r14}
- mrc p15, 0, r1, c1, c0, 0 /* MMU control */
- mrc p15, 0, r2, c2, c0, 0 /* TTBR0 */
- mrc p15, 0, r3, c3, c0, 0 /* dacr */
-#ifdef CONFIG_ARCH_MSM_SCORPION
- /* This instruction is not valid for non scorpion processors */
- mrc p15, 3, r4, c15, c0, 3 /* L2CR1 is the L2 cache control reg 1 */
-#endif
- mrc p15, 0, r5, c10, c2, 0 /* PRRR */
- mrc p15, 0, r6, c10, c2, 1 /* NMRR */
- mrc p15, 0, r7, c1, c0, 1 /* ACTLR */
- mrc p15, 0, r8, c2, c0, 1 /* TTBR1 */
- mrc p15, 0, r9, c13, c0, 3 /* TPIDRURO */
- mrc p15, 0, ip, c13, c0, 1 /* context ID */
- stmia r0!, {r1-r9, ip}
-
-#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM)
- bl msm_jtag_save_state
-#endif
-
- ldr r0, =msm_pm_flush_l2_flag
- ldr r0, [r0]
- mov r1, #0
- mcr p15, 2, r1, c0, c0, 0 /*CCSELR*/
- isb
- mrc p15, 1, r1, c0, c0, 0 /*CCSIDR*/
- mov r2, #1
- and r1, r2, r1, ASR #30 /* Check if the cache is write back */
- orr r1, r0, r1
- and r1, r1, #1
- cmp r1, #1
- bne skip
- bl v7_flush_dcache_all
- ldr r1, =msm_pm_flush_l2_fn
- ldr r1, [r1]
- cmp r1, #0
- blxne r1
-
-skip:
- ldr r1, =msm_pm_disable_l2_fn
- ldr r1, [r1]
- cmp r1, #0
- blxne r1
- dmb
-
- mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
- and r0, r0, #15 /* what CPU am I */
-
- ldr r1, =msm_pc_debug_counters /*load the IMEM debug location */
- ldr r1, [r1]
- cmp r1, #0
- beq skip_pc_debug1
- add r1, r1, r0, LSL #4 /* debug location for this CPU */
- ldr r2, [r1]
- add r2, #1
- str r2, [r1]
-skip_pc_debug1:
-
-#ifdef CONFIG_MSM_SCM
- ldr r0, =SCM_SVC_BOOT
- ldr r1, =SCM_CMD_TERMINATE_PC
- ldr r2, =msm_pm_flush_l2_flag
- ldr r2, [r2]
- bl scm_call_atomic1
-#else
- mrc p15, 0, r4, c1, c0, 0 /* read current CR */
- bic r0, r4, #(1 << 2) /* clear dcache bit */
- bic r0, r0, #(1 << 12) /* clear icache bit */
- mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
- isb
-
- SUSPEND_8x25_L2
- SET_SMP_COHERENCY OFF
- wfi
- DELAY_8x25 300
-
- mcr p15, 0, r4, c1, c0, 0 /* restore d/i cache */
- isb
- ENABLE_8x25_L2 /* enable only l2, no need to restore the reg back */
- SET_SMP_COHERENCY ON
-#endif
-
-#if defined(CONFIG_MSM_FIQ_SUPPORT)
- cpsie f
-#endif
- mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
- and r0, r0, #15 /* what CPU am I */
-
- ldr r1, =msm_pc_debug_counters /*load the IMEM debug location */
- ldr r1, [r1]
- cmp r1, #0
- beq skip_pc_debug2
- add r1, r1, r0, LSL #4 /* debug location for this CPU */
- add r1, #8
- ldr r2, [r1]
- add r2, #1
- str r2, [r1]
-
-skip_pc_debug2:
- ldr r1, =msm_pm_enable_l2_fn
- ldr r1, [r1]
- cmp r1, #0
- blxne r1
- dmb
-
-#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM)
- bl msm_jtag_restore_state
-#endif
- ldr r0, =msm_saved_state /* address of msm_saved_state ptr */
- ldr r0, [r0] /* load ptr */
-#if (NR_CPUS >= 2)
- mrc p15, 0, r1, c0, c0, 5 /* MPIDR */
- ands r1, r1, #15 /* What CPU am I */
- mov r2, #CPU_SAVED_STATE_SIZE
- mul r2, r2, r1
- add r0, r0, r2
-#endif
- ldmfd r0, {r4-r14} /* restore registers */
- mov r0, #0 /* return power collapse failed */
- bx lr
-
-ENTRY(msm_pm_collapse_exit)
-#if 0 /* serial debug */
- mov r0, #0x80000016
- mcr p15, 0, r0, c15, c2, 4
- mov r0, #0xA9000000
- add r0, r0, #0x00A00000 /* UART1 */
- /*add r0, r0, #0x00C00000*/ /* UART3 */
- mov r1, #'A'
- str r1, [r0, #0x00C]
-#endif
- ldr r1, =msm_saved_state_phys
- ldr r2, =msm_pm_collapse_exit
- adr r3, msm_pm_collapse_exit
- add r1, r1, r3
- sub r1, r1, r2
- ldr r1, [r1]
- add r1, r1, #CPU_SAVED_STATE_SIZE
-#if (NR_CPUS >= 2)
- mrc p15, 0, r2, c0, c0, 5 /* MPIDR */
- ands r2, r2, #15 /* What CPU am I */
- mov r3, #CPU_SAVED_STATE_SIZE
- mul r2, r2, r3
- add r1, r1, r2
-#endif
-
- ldmdb r1!, {r2-r11}
- mcr p15, 0, r4, c3, c0, 0 /* dacr */
- mcr p15, 0, r3, c2, c0, 0 /* TTBR0 */
-#ifdef CONFIG_ARCH_MSM_SCORPION
- /* This instruction is not valid for non scorpion processors */
- mcr p15, 3, r5, c15, c0, 3 /* L2CR1 */
-#endif
- mcr p15, 0, r6, c10, c2, 0 /* PRRR */
- mcr p15, 0, r7, c10, c2, 1 /* NMRR */
- mcr p15, 0, r8, c1, c0, 1 /* ACTLR */
- mcr p15, 0, r9, c2, c0, 1 /* TTBR1 */
- mcr p15, 0, r10, c13, c0, 3 /* TPIDRURO */
- mcr p15, 0, r11, c13, c0, 1 /* context ID */
- isb
- ldmdb r1!, {r4-r14}
- ldr r0, =msm_pm_pc_pgd
- ldr r1, =msm_pm_collapse_exit
- adr r3, msm_pm_collapse_exit
- add r0, r0, r3
- sub r0, r0, r1
- ldr r0, [r0]
- mrc p15, 0, r1, c2, c0, 0 /* save current TTBR0 */
- and r3, r1, #0x7f /* mask to get TTB flags */
- orr r0, r0, r3 /* add TTB flags to switch TTBR value */
- mcr p15, 0, r0, c2, c0, 0 /* temporary switch TTBR0 */
- isb
- mcr p15, 0, r2, c1, c0, 0 /* MMU control */
- isb
-msm_pm_mapped_pa:
- /* Switch to virtual */
- ldr r0, =msm_pm_pa_to_va
- mov pc, r0
-msm_pm_pa_to_va:
- mcr p15, 0, r1, c2, c0, 0 /* restore TTBR0 */
- isb
- mcr p15, 0, r3, c8, c7, 0 /* UTLBIALL */
- mcr p15, 0, r3, c7, c5, 6 /* BPIALL */
- dsb
- isb
-
-#ifdef CONFIG_ARCH_MSM_KRAIT
- mrc p15, 0, r1, c0, c0, 0
- ldr r3, =0xff00fc00
- and r3, r1, r3
- ldr r1, =0x51000400
- cmp r3, r1
- mrceq p15, 7, r3, c15, c0, 2
- biceq r3, r3, #0x400
- mcreq p15, 7, r3, c15, c0, 2
-#else
- RESUME_8x25_L2
- SET_SMP_COHERENCY ON
-#endif
-
- ldr r1, =msm_pm_enable_l2_fn
- ldr r1, [r1]
- cmp r1, #0
- stmfd sp!, {lr}
- blxne r1
- dmb
-#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM)
- bl msm_jtag_restore_state
-#endif
- ldmfd sp!, {lr}
- mov r0, #1
- bx lr
- nop
- nop
- nop
- nop
- nop
-1: b 1b
-
+ .arm
ENTRY(msm_pm_boot_entry)
mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
and r0, r0, #15 /* what CPU am I */
@@ -399,80 +49,14 @@
add r1, r1, r0, LSL #2 /* locate boot vector for our cpu */
ldr pc, [r1] /* jump */
-ENTRY(msm_pm_set_l2_flush_flag)
- ldr r1, =msm_pm_flush_l2_flag
- str r0, [r1]
- bx lr
-
-ENTRY(msm_pm_get_l2_flush_flag)
- ldr r1, =msm_pm_flush_l2_flag
- ldr r0, [r1]
- bx lr
+3: .long .
.data
- .globl msm_pm_pc_pgd
-msm_pm_pc_pgd:
- .long 0x0
-
- .globl msm_saved_state
-msm_saved_state:
- .long 0x0
-
- .globl msm_saved_state_phys
-msm_saved_state_phys:
- .long 0x0
-
.globl msm_pm_boot_vector
msm_pm_boot_vector:
.space 4 * NR_CPUS
- .globl target_type
-target_type:
- .long 0x0
-
- .globl apps_power_collapse
-apps_power_collapse:
- .long 0x0
-
- .globl l2x0_base_addr
-l2x0_base_addr:
- .long 0x0
-
.globl msm_pc_debug_counters_phys
msm_pc_debug_counters_phys:
.long 0x0
-
- .globl msm_pc_debug_counters
-msm_pc_debug_counters:
- .long 0x0
-
- .globl msm_pm_enable_l2_fn
-msm_pm_enable_l2_fn:
- .long 0x0
-
- .globl msm_pm_disable_l2_fn
-msm_pm_disable_l2_fn:
- .long 0x0
-
- .globl msm_pm_flush_l2_fn
-msm_pm_flush_l2_fn:
- .long 0x0
-
-/*
- * Default the l2 flush flag to 1 so that caches are flushed during power
- * collapse unless the L2 driver decides to flush them only during L2
- * Power collapse.
- */
-msm_pm_flush_l2_flag:
- .long 0x1
-
-/*
- * Save & restore l2x0 registers while system is entering and resuming
- * from Power Collapse.
- * 1. aux_ctrl_save (0x0)
- * 2. data_latency_ctrl (0x4)
- * 3. prefetch control (0x8)
- */
-l2x0_saved_ctrl_reg_val:
- .space 4 * 3
diff --git a/arch/arm/mach-msm/idle.h b/arch/arm/mach-msm/idle.h
index 72f1a03..0fb96c3 100644
--- a/arch/arm/mach-msm/idle.h
+++ b/arch/arm/mach-msm/idle.h
@@ -14,49 +14,13 @@
#ifndef _ARCH_ARM_MACH_MSM_IDLE_H_
#define _ARCH_ARM_MACH_MSM_IDLE_H_
-/* 11 general purpose registers (r4-r14), 10 cp15 registers */
-#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10)
-
-#define ON 1
-#define OFF 0
-#define TARGET_IS_8625 1
-#define POWER_COLLAPSED 1
-
-#ifndef __ASSEMBLY__
-
-int msm_arch_idle(void);
-int msm_pm_collapse(void);
-int msm_pm_pc_hotplug(void);
-void msm_pm_collapse_exit(void);
-extern void *msm_saved_state;
-extern void (*msm_pm_disable_l2_fn)(void);
-extern void (*msm_pm_enable_l2_fn)(void);
-extern void (*msm_pm_flush_l2_fn)(void);
-extern unsigned long msm_saved_state_phys;
-
#ifdef CONFIG_CPU_V7
-void msm_pm_boot_entry(void);
-void msm_pm_set_l2_flush_flag(unsigned int flag);
-int msm_pm_get_l2_flush_flag(void);
-extern unsigned long msm_pm_pc_pgd;
extern unsigned long msm_pm_boot_vector[NR_CPUS];
-extern uint32_t target_type;
-extern uint32_t apps_power_collapse;
-extern uint32_t *l2x0_base_addr;
+void msm_pm_boot_entry(void);
#else
-static inline void msm_pm_set_l2_flush_flag(unsigned int flag)
-{
- /* empty */
-}
static inline void msm_pm_boot_entry(void)
{
/* empty */
}
-static inline void msm_pm_write_boot_vector(unsigned int cpu,
- unsigned long address)
-{
- /* empty */
-}
-#endif
#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/clk-provider.h b/arch/arm/mach-msm/include/mach/clk-provider.h
index 027606e..4529a81 100644
--- a/arch/arm/mach-msm/include/mach/clk-provider.h
+++ b/arch/arm/mach-msm/include/mach/clk-provider.h
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/regulator/consumer.h>
+#include <linux/seq_file.h>
#include <mach/clk.h>
/*
@@ -41,6 +42,21 @@
#define ENABLE_VOTED 4 /* Bit pol: 1 = running; delay on disable */
#define DELAY 5 /* No bit to check, just delay */
+struct clk_register_data {
+ char *name;
+ u32 offset;
+};
+#ifdef CONFIG_DEBUG_FS
+void clk_debug_print_hw(struct clk *clk, struct seq_file *f);
+#else
+static inline void clk_debug_print_hw(struct clk *clk, struct seq_file *f) {}
+#endif
+
+#define CLK_WARN(clk, cond, fmt, ...) do { \
+ clk_debug_print_hw(clk, NULL); \
+ WARN(cond, "%s: " fmt, (clk)->dbg_name, ##__VA_ARGS__); \
+} while (0)
+
/**
* struct clk_vdd_class - Voltage scaling class
* @class_name: name of the class
@@ -129,6 +145,8 @@
int (*set_parent)(struct clk *clk, struct clk *parent);
struct clk *(*get_parent)(struct clk *clk);
bool (*is_local)(struct clk *clk);
+ void __iomem *(*list_registers)(struct clk *clk, int n,
+ struct clk_register_data **regs, u32 *size);
};
/**
diff --git a/arch/arm/mach-msm/include/mach/kgsl.h b/arch/arm/mach-msm/include/mach/kgsl.h
index 2d7e8df..f398652 100644
--- a/arch/arm/mach-msm/include/mach/kgsl.h
+++ b/arch/arm/mach-msm/include/mach/kgsl.h
@@ -81,9 +81,9 @@
int (*set_grp_async)(void);
unsigned int idle_timeout;
bool strtstp_sleepwake;
+ bool bus_control;
unsigned int clk_map;
unsigned int idle_needed;
- unsigned int step_mul;
struct msm_bus_scale_pdata *bus_scale_table;
struct kgsl_device_iommu_data *iommu_data;
int iommu_count;
diff --git a/arch/arm/mach-msm/include/mach/mpm.h b/arch/arm/mach-msm/include/mach/mpm.h
index e76a6a9..abfac48 100644
--- a/arch/arm/mach-msm/include/mach/mpm.h
+++ b/arch/arm/mach-msm/include/mach/mpm.h
@@ -112,11 +112,13 @@
* @sclk_count: wakeup time in sclk counts for programmed RPM wakeup
* @from_idle: indicates if the sytem is entering low power mode as a part of
* suspend/idle task.
+ * @cpumask: the next cpu to wakeup.
*
* Low power management code calls into this API to configure the MPM to
* monitor the active irqs before going to sleep.
*/
-void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle);
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle,
+ const struct cpumask *cpumask);
/**
* msm_mpm_exit_sleep() -Called from PM code after resuming from low power mode
*
@@ -159,7 +161,8 @@
{ return false; }
static inline bool msm_mpm_gpio_irqs_detectable(bool from_idle)
{ return false; }
-static inline void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle) {}
+static inline void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle
+ const struct cpumask *cpumask) {}
static inline void msm_mpm_exit_sleep(bool from_idle) {}
static inline void __init of_mpm_init(struct device_node *node) {}
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
index cd07662..e3bd488 100644
--- a/arch/arm/mach-msm/include/mach/msm_smd.h
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -24,6 +24,7 @@
#include <mach/msm_smem.h>
typedef struct smd_channel smd_channel_t;
+struct cpumask;
#define SMD_MAX_CH_NAME_LEN 20 /* includes null char at end */
@@ -217,13 +218,15 @@
* particular channel.
* @ch: open channel handle to use for the edge
* @mask: 1 = mask interrupts; 0 = unmask interrupts
+ * @cpumask cpumask for the next cpu scheduled to be woken up
* @returns: 0 for success; < 0 for failure
*
* Note that this enables/disables all interrupts from the remote subsystem for
* all channels. As such, it should be used with care and only for specific
* use cases such as power-collapse sequencing.
*/
-int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask);
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+ const struct cpumask *cpumask);
/* Starts a packet transaction. The size of the packet may exceed the total
* size of the smd ring buffer.
@@ -411,7 +414,8 @@
{
}
-static inline int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
+static inline int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+ struct cpumask *cpumask)
{
return -ENODEV;
}
diff --git a/arch/arm/mach-msm/ipc_socket.c b/arch/arm/mach-msm/ipc_socket.c
index bdda546..5aa6c93 100644
--- a/arch/arm/mach-msm/ipc_socket.c
+++ b/arch/arm/mach-msm/ipc_socket.c
@@ -589,17 +589,24 @@
};
static const struct proto_ops msm_ipc_proto_ops = {
- .owner = THIS_MODULE,
.family = AF_MSM_IPC,
+ .owner = THIS_MODULE,
+ .release = msm_ipc_router_close,
.bind = msm_ipc_router_bind,
.connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = msm_ipc_router_poll,
+ .ioctl = msm_ipc_router_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
.sendmsg = msm_ipc_router_sendmsg,
.recvmsg = msm_ipc_router_recvmsg,
- .ioctl = msm_ipc_router_ioctl,
- .poll = msm_ipc_router_poll,
- .setsockopt = sock_no_setsockopt,
- .getsockopt = sock_no_getsockopt,
- .release = msm_ipc_router_close,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
};
static struct proto msm_ipc_proto = {
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index 249a334..b707df1 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -19,73 +19,115 @@
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/of.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/pm_qos.h>
+#include <linux/of_platform.h>
#include <mach/mpm.h>
+#include <mach/cpuidle.h>
+#include <mach/event_timer.h>
#include "pm.h"
#include "rpm-notifier.h"
#include "spm.h"
#include "idle.h"
+#define SCLK_HZ (32768)
+
enum {
MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
};
-enum {
- MSM_SCM_L2_ON = 0,
- MSM_SCM_L2_OFF = 1,
- MSM_SCM_L2_GDHS = 3,
-};
-
-struct msm_rpmrs_level {
- enum msm_pm_sleep_mode sleep_mode;
- uint32_t l2_cache;
- bool available;
+struct power_params {
uint32_t latency_us;
- uint32_t steady_state_power;
+ uint32_t ss_power;
uint32_t energy_overhead;
uint32_t time_overhead_us;
+ uint32_t target_residency_us;
};
+struct lpm_cpu_level {
+ const char *name;
+ enum msm_pm_sleep_mode mode;
+ struct power_params pwr;
+ bool use_bc_timer;
+ bool sync;
+};
+
+struct lpm_system_level {
+ const char *name;
+ uint32_t l2_mode;
+ struct power_params pwr;
+ enum msm_pm_sleep_mode min_cpu_mode;
+ int num_cpu_votes;
+ bool notify_rpm;
+ bool available;
+ bool sync;
+};
+
+struct lpm_system_state {
+ struct lpm_cpu_level *cpu_level;
+ int num_cpu_levels;
+ struct lpm_system_level *system_level;
+ int num_system_levels;
+ enum msm_pm_sleep_mode sync_cpu_mode;
+ int last_entered_cluster_index;
+ bool allow_synched_levels;
+ bool no_l2_saw;
+ struct spinlock sync_lock;
+ int num_cores_in_sync;
+};
+
+static struct lpm_system_state sys_state;
+static bool suspend_in_progress;
+
struct lpm_lookup_table {
uint32_t modes;
const char *mode_name;
};
-static void msm_lpm_level_update(void);
-
-static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
+static void lpm_system_level_update(void);
+static void setup_broadcast_timer(void *arg);
+static int lpm_cpu_callback(struct notifier_block *cpu_nb,
unsigned long action, void *hcpu);
-static struct notifier_block __refdata msm_lpm_cpu_nblk = {
- .notifier_call = msm_lpm_cpu_callback,
+static struct notifier_block __refdata lpm_cpu_nblk = {
+ .notifier_call = lpm_cpu_callback,
};
static uint32_t allowed_l2_mode;
static uint32_t sysfs_dbg_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
static uint32_t default_l2_mode;
-static bool no_l2_saw;
-static ssize_t msm_lpm_levels_attr_show(
+static ssize_t lpm_levels_attr_show(
struct kobject *kobj, struct kobj_attribute *attr, char *buf);
-static ssize_t msm_lpm_levels_attr_store(struct kobject *kobj,
+static ssize_t lpm_levels_attr_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count);
-#define ADJUST_LATENCY(x) \
- ((x == MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE) ?\
- (num_online_cpus()) / 2 : 0)
-static int msm_lpm_lvl_dbg_msk;
+static int lpm_lvl_dbg_msk;
module_param_named(
- debug_mask, msm_lpm_lvl_dbg_msk, int, S_IRUGO | S_IWUSR | S_IWGRP
+ debug_mask, lpm_lvl_dbg_msk, int, S_IRUGO | S_IWUSR | S_IWGRP
);
-static struct msm_rpmrs_level *msm_lpm_levels;
-static int msm_lpm_level_count;
+static bool menu_select;
+module_param_named(
+ menu_select, menu_select, bool, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static int msm_pm_sleep_time_override;
+module_param_named(sleep_time_override,
+ msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static int num_powered_cores;
+static struct hrtimer lpm_hrtimer;
static struct kobj_attribute lpm_l2_kattr = __ATTR(l2, S_IRUGO|S_IWUSR,\
- msm_lpm_levels_attr_show, msm_lpm_levels_attr_store);
+ lpm_levels_attr_show, lpm_levels_attr_store);
static struct attribute *lpm_levels_attr[] = {
&lpm_l2_kattr.attr,
@@ -97,7 +139,7 @@
};
/* SYSFS */
-static ssize_t msm_lpm_levels_attr_show(
+static ssize_t lpm_levels_attr_show(
struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct kernel_param kp;
@@ -115,7 +157,7 @@
return rc;
}
-static ssize_t msm_lpm_levels_attr_store(struct kobject *kobj,
+static ssize_t lpm_levels_attr_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
struct kernel_param kp;
@@ -128,64 +170,49 @@
return rc;
sysfs_dbg_l2_mode = temp;
- msm_lpm_level_update();
+ lpm_system_level_update();
return count;
}
-static int msm_pm_get_sleep_mode_value(struct device_node *node,
- const char *key, uint32_t *sleep_mode_val)
+static int msm_pm_get_sleep_mode_value(const char *mode_name)
{
- int i;
- struct lpm_lookup_table {
- uint32_t modes;
- const char *mode_name;
- };
struct lpm_lookup_table pm_sm_lookup[] = {
{MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
"wfi"},
- {MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT,
- "ramp_down_and_wfi"},
{MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
"standalone_pc"},
{MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
"pc"},
{MSM_PM_SLEEP_MODE_RETENTION,
"retention"},
- {MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND,
- "pc_suspend"},
- {MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN,
- "pc_no_xo_shutdown"}
};
- int ret;
- const char *mode_name;
+ int i;
+ int ret = -EINVAL;
- ret = of_property_read_string(node, key, &mode_name);
- if (!ret) {
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
- if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
- *sleep_mode_val = pm_sm_lookup[i].modes;
- ret = 0;
- break;
- }
+ for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
+ if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
+ ret = pm_sm_lookup[i].modes;
+ break;
}
}
return ret;
}
-static int msm_lpm_set_l2_mode(int sleep_mode)
+static int lpm_set_l2_mode(struct lpm_system_state *system_state,
+ int sleep_mode)
{
int lpm = sleep_mode;
int rc = 0;
- if (no_l2_saw)
+ if (system_state->no_l2_saw)
goto bail_set_l2_mode;
msm_pm_set_l2_flush_flag(MSM_SCM_L2_ON);
switch (sleep_mode) {
case MSM_SPM_L2_MODE_POWER_COLLAPSE:
+ pr_info("Configuring for L2 power collapse\n");
msm_pm_set_l2_flush_flag(MSM_SCM_L2_OFF);
break;
case MSM_SPM_L2_MODE_GDHS:
@@ -209,244 +236,341 @@
WARN_ON_ONCE(1);
else
pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
- __func__, lpm, rc);
+ __func__, lpm, rc);
}
-
bail_set_l2_mode:
return rc;
}
-static void msm_lpm_level_update(void)
+static void lpm_system_level_update(void)
{
- int lpm_level;
- struct msm_rpmrs_level *level = NULL;
+ int i;
+ struct lpm_system_level *l = NULL;
uint32_t max_l2_mode;
static DEFINE_MUTEX(lpm_lock);
mutex_lock(&lpm_lock);
+ if (num_powered_cores == 1)
+ allowed_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
+ else if (sys_state.allow_synched_levels)
+ allowed_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
+ else
+ allowed_l2_mode = default_l2_mode;
max_l2_mode = min(allowed_l2_mode, sysfs_dbg_l2_mode);
- for (lpm_level = 0; lpm_level < msm_lpm_level_count; lpm_level++) {
- level = &msm_lpm_levels[lpm_level];
- level->available = !(level->l2_cache > max_l2_mode);
+ for (i = 0; i < sys_state.num_system_levels; i++) {
+ l = &sys_state.system_level[i];
+ l->available = !(l->l2_mode > max_l2_mode);
}
mutex_unlock(&lpm_lock);
}
-int msm_lpm_enter_sleep(uint32_t sclk_count, void *limits,
- bool from_idle, bool notify_rpm)
+static int lpm_system_mode_select(
+ struct lpm_system_state *system_state,
+ uint32_t sleep_us, bool from_idle)
{
- int ret = 0;
- int debug_mask;
- uint32_t l2 = *(uint32_t *)limits;
+ int best_level = -1;
+ int i;
+ uint32_t best_level_pwr = ~0UL;
+ uint32_t pwr;
+ uint32_t latency_us = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
- if (from_idle)
- debug_mask = msm_lpm_lvl_dbg_msk &
- MSM_LPM_LVL_DBG_IDLE_LIMITS;
- else
- debug_mask = msm_lpm_lvl_dbg_msk &
- MSM_LPM_LVL_DBG_SUSPEND_LIMITS;
+ if (!system_state->system_level)
+ return -EINVAL;
- if (debug_mask)
- pr_info("%s(): l2:%d", __func__, l2);
+ for (i = 0; i < system_state->num_system_levels; i++) {
+ struct lpm_system_level *system_level =
+ &system_state->system_level[i];
+ struct power_params *pwr_param = &system_level->pwr;
- ret = msm_lpm_set_l2_mode(l2);
+ if (!system_level->available)
+ continue;
- if (ret) {
- if (ret == -ENXIO)
- ret = 0;
- else {
- pr_warn("%s(): Failed to set L2 SPM Mode %d",
- __func__, l2);
- goto bail;
+ if (system_level->sync &&
+ system_level->num_cpu_votes != num_powered_cores)
+ continue;
+
+ if (latency_us < pwr_param->latency_us)
+ continue;
+
+ if (sleep_us < pwr_param->time_overhead_us)
+ continue;
+
+ /*
+ * After the suspend prepare notifications its possible
+ * for the CPU to enter a system sleep mode. But MPM would have
+ * already requested a XO clock based on the wakeup irqs. To
+ * prevent suspend votes from being overriden by idle irqs, MPM
+ * doesn't send an updated MPM vote after suspend_prepare
+ * callback.
+ * To ensure that XO sleep vote isn't used if and when the
+ * device enters idle PC after suspend prepare callback,
+ * disallow any low power modes that notifies RPM after suspend
+ * prepare function is called
+ */
+ if (suspend_in_progress && system_level->notify_rpm &&
+ from_idle)
+ continue;
+
+ if ((sleep_us >> 10) > pwr_param->time_overhead_us) {
+ pwr = pwr_param->ss_power;
+ } else {
+ pwr = pwr_param->ss_power;
+ pwr -= (pwr_param->time_overhead_us
+ * pwr_param->ss_power) / sleep_us;
+ pwr += pwr_param->energy_overhead / sleep_us;
+ }
+
+ if (best_level_pwr >= pwr) {
+ best_level = i;
+ best_level_pwr = pwr;
}
}
+ return best_level;
+}
- if (notify_rpm) {
- ret = msm_rpm_enter_sleep(debug_mask);
+static void lpm_system_prepare(struct lpm_system_state *system_state,
+ int index, bool from_idle)
+{
+ struct lpm_system_level *lvl;
+ struct clock_event_device *bc = tick_get_broadcast_device()->evtdev;
+ uint32_t sclk;
+ int64_t us = (~0ULL);
+ int dbg_mask;
+ int ret;
+ const struct cpumask *nextcpu;
+
+ spin_lock(&system_state->sync_lock);
+ if (num_powered_cores != system_state->num_cores_in_sync) {
+ spin_unlock(&system_state->sync_lock);
+ return;
+ }
+
+ if (from_idle) {
+ dbg_mask = lpm_lvl_dbg_msk & MSM_LPM_LVL_DBG_IDLE_LIMITS;
+ us = ktime_to_us(ktime_sub(bc->next_event, ktime_get()));
+ nextcpu = bc->cpumask;
+ } else {
+ dbg_mask = lpm_lvl_dbg_msk & MSM_LPM_LVL_DBG_SUSPEND_LIMITS;
+ nextcpu = cpumask_of(smp_processor_id());
+ }
+
+ lvl = &system_state->system_level[index];
+
+ ret = lpm_set_l2_mode(system_state, lvl->l2_mode);
+
+ if (ret && ret != -ENXIO) {
+ pr_warn("%s(): Cannot set L2 Mode %d, ret:%d\n",
+ __func__, lvl->l2_mode, ret);
+ goto bail_system_sleep;
+ }
+
+ if (lvl->notify_rpm) {
+ ret = msm_rpm_enter_sleep(dbg_mask, nextcpu);
if (ret) {
- pr_warn("%s(): RPM failed to enter sleep err:%d\n",
- __func__, ret);
- goto bail;
+ pr_err("rpm_enter_sleep() failed with rc = %d\n", ret);
+ goto bail_system_sleep;
}
- msm_mpm_enter_sleep(sclk_count, from_idle);
+
+ if (!from_idle)
+ us = USEC_PER_SEC * msm_pm_sleep_time_override;
+
+ do_div(us, USEC_PER_SEC/SCLK_HZ);
+ sclk = (uint32_t)us;
+ msm_mpm_enter_sleep(sclk, from_idle, nextcpu);
}
-bail:
- return ret;
-}
-
-static void msm_lpm_exit_sleep(void *limits, bool from_idle,
- bool notify_rpm, bool collapsed)
-{
-
- msm_lpm_set_l2_mode(default_l2_mode);
-
- if (notify_rpm) {
- msm_mpm_exit_sleep(from_idle);
- msm_rpm_exit_sleep();
- }
-}
-
-void msm_lpm_show_resources(void)
-{
- /* TODO */
+ system_state->last_entered_cluster_index = index;
+ spin_unlock(&system_state->sync_lock);
return;
+
+bail_system_sleep:
+ if (default_l2_mode != system_state->system_level[index].l2_mode)
+ lpm_set_l2_mode(system_state, default_l2_mode);
+ spin_unlock(&system_state->sync_lock);
+}
+
+static void lpm_system_unprepare(struct lpm_system_state *system_state,
+ int cpu_index, bool from_idle)
+{
+ int index, i;
+ struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
+ bool first_core_up;
+
+ if (cpu_level->mode < system_state->sync_cpu_mode)
+ return;
+
+ spin_lock(&system_state->sync_lock);
+
+ first_core_up = (system_state->num_cores_in_sync == num_powered_cores);
+
+ system_state->num_cores_in_sync--;
+
+ if (!system_state->system_level)
+ goto unlock_and_return;
+
+ index = system_state->last_entered_cluster_index;
+
+ for (i = 0; i < system_state->num_system_levels; i++) {
+ struct lpm_system_level *system_lvl
+ = &system_state->system_level[i];
+ if (cpu_level->mode >= system_lvl->min_cpu_mode)
+ system_lvl->num_cpu_votes--;
+ }
+
+ if (!first_core_up)
+ goto unlock_and_return;
+
+ if (default_l2_mode != system_state->system_level[index].l2_mode)
+ lpm_set_l2_mode(system_state, default_l2_mode);
+
+ if (system_state->system_level[index].notify_rpm) {
+ msm_rpm_exit_sleep();
+ msm_mpm_exit_sleep(from_idle);
+ }
+unlock_and_return:
+ spin_unlock(&system_state->sync_lock);
}
s32 msm_cpuidle_get_deep_idle_latency(void)
{
int i;
- struct msm_rpmrs_level *level = msm_lpm_levels, *best = level;
+ struct lpm_cpu_level *level = sys_state.cpu_level;
if (!level)
return 0;
- for (i = 0; i < msm_lpm_level_count; i++, level++) {
- if (!level->available)
- continue;
- if (level->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
- continue;
- /* Pick the first power collapse mode by default */
- if (best->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
- best = level;
- /* Find the lowest latency for power collapse */
- if (level->latency_us < best->latency_us)
- best = level;
+ for (i = 0; i < sys_state.num_cpu_levels; i++, level++) {
+ if (level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+ break;
}
- return best->latency_us - 1;
+
+ if (i == sys_state.num_cpu_levels)
+ return 0;
+ else
+ return level->pwr.latency_us;
}
-static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
+static int lpm_cpu_callback(struct notifier_block *cpu_nb,
unsigned long action, void *hcpu)
{
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- allowed_l2_mode = default_l2_mode;
- msm_lpm_level_update();
+ ++num_powered_cores;
+ lpm_system_level_update();
break;
- case CPU_DEAD_FROZEN:
case CPU_DEAD:
case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- if (num_online_cpus() == 1)
- allowed_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
- msm_lpm_level_update();
+ num_powered_cores = num_online_cpus();
+ lpm_system_level_update();
+ break;
+ case CPU_ONLINE:
+ smp_call_function_single((unsigned long)hcpu,
+ setup_broadcast_timer, (void *)true, 1);
+ break;
+ default:
break;
}
return NOTIFY_OK;
}
-static void *msm_lpm_lowest_limits(bool from_idle,
- enum msm_pm_sleep_mode sleep_mode,
- struct msm_pm_time_params *time_param, uint32_t *power)
+static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
{
- unsigned int cpu = smp_processor_id();
- struct msm_rpmrs_level *best_level = NULL;
- uint32_t best_level_pwr = 0;
- uint32_t pwr;
+ return HRTIMER_NORESTART;
+}
+
+static void msm_pm_set_timer(uint32_t modified_time_us)
+{
+ u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
+ ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
+ lpm_hrtimer.function = lpm_hrtimer_cb;
+ hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+static noinline int lpm_cpu_power_select(struct cpuidle_device *dev, int *index)
+{
+ int best_level = -1;
+ uint32_t best_level_pwr = ~0UL;
+ uint32_t latency_us = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
+ uint32_t sleep_us =
+ (uint32_t)(ktime_to_us(tick_nohz_get_sleep_length()));
+ uint32_t modified_time_us = 0;
+ uint32_t next_event_us = 0;
+ uint32_t power;
int i;
- bool modify_event_timer;
- uint32_t next_wakeup_us = time_param->sleep_us;
- uint32_t lvl_latency_us = 0;
- uint32_t lvl_overhead_us = 0;
- uint32_t lvl_overhead_energy = 0;
- if (!msm_lpm_levels)
- return NULL;
+ if (!sys_state.cpu_level)
+ return -EINVAL;
- for (i = 0; i < msm_lpm_level_count; i++) {
- struct msm_rpmrs_level *level = &msm_lpm_levels[i];
+ if (!dev->cpu)
+ next_event_us = (uint32_t)(ktime_to_us(get_next_event_time()));
- modify_event_timer = false;
+ for (i = 0; i < sys_state.num_cpu_levels; i++) {
+ struct lpm_cpu_level *level = &sys_state.cpu_level[i];
+ struct power_params *pwr = &level->pwr;
+ uint32_t next_wakeup_us = sleep_us;
+ enum msm_pm_sleep_mode mode = level->mode;
+ bool allow;
- if (!level->available)
+ if (level->sync && num_online_cpus() > 1
+ && !sys_state.allow_synched_levels)
continue;
- if (sleep_mode != level->sleep_mode)
+ allow = msm_cpu_pm_check_mode(dev->cpu, mode, true);
+
+ if (!allow)
continue;
- lvl_latency_us =
- level->latency_us + (level->latency_us *
- ADJUST_LATENCY(sleep_mode));
-
- lvl_overhead_us =
- level->time_overhead_us + (level->time_overhead_us *
- ADJUST_LATENCY(sleep_mode));
-
- lvl_overhead_energy =
- level->energy_overhead + level->energy_overhead *
- ADJUST_LATENCY(sleep_mode);
-
- if (time_param->latency_us < lvl_latency_us)
+ if (latency_us < pwr->latency_us)
continue;
- if (time_param->next_event_us &&
- time_param->next_event_us < lvl_latency_us)
- continue;
+ if (next_event_us)
+ if (next_event_us < pwr->latency_us)
+ continue;
- if (time_param->next_event_us) {
- if ((time_param->next_event_us < time_param->sleep_us)
- || ((time_param->next_event_us - lvl_latency_us) <
- time_param->sleep_us)) {
- modify_event_timer = true;
- next_wakeup_us = time_param->next_event_us -
- lvl_latency_us;
+ if (((next_event_us - pwr->latency_us) < sleep_us)
+ || (next_event_us < sleep_us)) {
+ next_wakeup_us = next_event_us
+ - pwr->latency_us;
}
- }
- if (next_wakeup_us <= lvl_overhead_us)
+ if (next_wakeup_us <= pwr->time_overhead_us)
continue;
- if ((MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE == sleep_mode)
- || (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == sleep_mode))
- if (!cpu && msm_rpm_waiting_for_ack())
+ if ((MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE == mode)
+ || (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode))
+ if (!dev->cpu && msm_rpm_waiting_for_ack())
break;
- if (next_wakeup_us <= 1) {
- pwr = lvl_overhead_energy;
- } else if (next_wakeup_us <= lvl_overhead_us) {
- pwr = lvl_overhead_energy / next_wakeup_us;
- } else if ((next_wakeup_us >> 10)
- > lvl_overhead_us) {
- pwr = level->steady_state_power;
+ if ((next_wakeup_us >> 10) > pwr->latency_us) {
+ power = pwr->ss_power;
} else {
- pwr = level->steady_state_power;
- pwr -= (lvl_overhead_us *
- level->steady_state_power) /
- next_wakeup_us;
- pwr += lvl_overhead_energy / next_wakeup_us;
+ power = pwr->ss_power;
+ power -= (pwr->latency_us * pwr->ss_power)
+ / next_wakeup_us;
+ power += pwr->energy_overhead / next_wakeup_us;
}
- if (!best_level || (best_level_pwr >= pwr)) {
- best_level = level;
- best_level_pwr = pwr;
- if (power)
- *power = pwr;
- if (modify_event_timer &&
- (sleep_mode !=
- MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
- time_param->modified_time_us =
- time_param->next_event_us -
- lvl_latency_us;
+ if (best_level_pwr >= power) {
+ best_level = i;
+ best_level_pwr = power;
+ if (next_event_us < sleep_us &&
+ (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
+ modified_time_us = next_event_us
+ - pwr->latency_us;
else
- time_param->modified_time_us = 0;
+ modified_time_us = 0;
}
}
- return best_level ? &best_level->l2_cache : NULL;
+ if (modified_time_us && !dev->cpu)
+ msm_pm_set_timer(modified_time_us);
+
+ return best_level;
}
-static struct msm_pm_sleep_ops msm_lpm_ops = {
- .lowest_limits = msm_lpm_lowest_limits,
- .enter_sleep = msm_lpm_enter_sleep,
- .exit_sleep = msm_lpm_exit_sleep,
-};
-
-static int msm_lpm_get_l2_cache_value(struct device_node *node,
- char *key, uint32_t *l2_val)
+static int lpm_get_l2_cache_value(const char *l2_str)
{
int i;
struct lpm_lookup_table l2_mode_lookup[] = {
@@ -456,24 +580,14 @@
{MSM_SPM_L2_MODE_RETENTION, "l2_cache_retention"},
{MSM_SPM_L2_MODE_DISABLED, "l2_cache_active"}
};
- const char *l2_str;
- int ret;
- ret = of_property_read_string(node, key, &l2_str);
- if (!ret) {
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(l2_mode_lookup); i++) {
- if (!strcmp(l2_str, l2_mode_lookup[i].mode_name)) {
- *l2_val = l2_mode_lookup[i].modes;
- ret = 0;
- break;
- }
- }
- }
- return ret;
+ for (i = 0; i < ARRAY_SIZE(l2_mode_lookup); i++)
+ if (!strcmp(l2_str, l2_mode_lookup[i].mode_name))
+ return l2_mode_lookup[i].modes;
+ return -EINVAL;
}
-static int __devinit msm_lpm_levels_sysfs_add(void)
+static int lpm_levels_sysfs_add(void)
{
struct kobject *module_kobj = NULL;
struct kobject *low_power_kobj = NULL;
@@ -500,126 +614,548 @@
if (rc) {
if (low_power_kobj) {
sysfs_remove_group(low_power_kobj,
- &lpm_levels_attr_grp);
+ &lpm_levels_attr_grp);
kobject_del(low_power_kobj);
}
}
return rc;
}
-
-static int __devinit msm_lpm_levels_probe(struct platform_device *pdev)
+static int lpm_cpu_menu_select(struct cpuidle_device *dev, int *index)
{
- struct msm_rpmrs_level *levels = NULL;
- struct msm_rpmrs_level *level = NULL;
+ int j;
+
+ for (; *index >= 0; (*index)--) {
+ int mode = 0;
+ bool allow = false;
+
+ allow = msm_cpu_pm_check_mode(dev->cpu, mode, true);
+
+ if (!allow)
+ continue;
+
+ for (j = sys_state.num_cpu_levels; j >= 0; j--) {
+ struct lpm_cpu_level *l = &sys_state.cpu_level[j];
+ if (mode == l->mode)
+ return j;
+ }
+ }
+ return -EPERM;
+}
+
+static inline void lpm_cpu_prepare(struct lpm_system_state *system_state,
+ int cpu_index, bool from_idle)
+{
+ struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
+ unsigned int cpu = smp_processor_id();
+
+ /* Use broadcast timer for aggregating sleep mode within a cluster.
+ * A broadcast timer could be used because of harware restriction or
+ * to ensure that we BC timer is used incase a cpu mode could trigger
+ * a cluster level sleep
+ */
+ if (from_idle && (cpu_level->use_bc_timer ||
+ (cpu_level->mode >= system_state->sync_cpu_mode)))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+}
+
+static inline void lpm_cpu_unprepare(struct lpm_system_state *system_state,
+ int cpu_index, bool from_idle)
+{
+ struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
+ unsigned int cpu = smp_processor_id();
+
+ if (from_idle && (cpu_level->use_bc_timer ||
+ (cpu_level->mode >= system_state->sync_cpu_mode)))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+}
+
+static int lpm_system_select(struct lpm_system_state *system_state,
+ int cpu_index, bool from_idle)
+{
+ uint64_t us = (~0ULL);
+ struct clock_event_device *ed;
+ struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
+ int i;
+ bool last_core_down;
+
+ if (cpu_level->mode < system_state->sync_cpu_mode)
+ return -EINVAL;
+
+ spin_lock(&system_state->sync_lock);
+
+ last_core_down =
+ (++system_state->num_cores_in_sync == num_powered_cores);
+
+ if (!system_state->system_level) {
+ spin_unlock(&system_state->sync_lock);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < system_state->num_system_levels; i++) {
+ struct lpm_system_level *system_lvl =
+ &system_state->system_level[i];
+ if (cpu_level->mode >= system_lvl->min_cpu_mode)
+ system_lvl->num_cpu_votes++;
+ }
+ spin_unlock(&system_state->sync_lock);
+
+ if (!last_core_down)
+ return -EBUSY;
+
+ ed = tick_get_broadcast_device()->evtdev;
+ if (!ed)
+ return -EINVAL;
+
+ if (from_idle)
+ us = ktime_to_us(ktime_sub(ed->next_event, ktime_get()));
+ else
+ us = (~0ULL);
+
+ return lpm_system_mode_select(system_state, (uint32_t)(us), from_idle);
+}
+
+static void lpm_enter_low_power(struct lpm_system_state *system_state,
+ int cpu_index, bool from_idle)
+{
+ int idx;
+ struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
+
+ cpu_level = &system_state->cpu_level[cpu_index];
+
+ lpm_cpu_prepare(system_state, cpu_index, from_idle);
+
+ idx = lpm_system_select(system_state, cpu_index, from_idle);
+
+ if (idx >= 0)
+ lpm_system_prepare(system_state, idx, from_idle);
+
+ msm_cpu_pm_enter_sleep(cpu_level->mode, from_idle);
+
+ lpm_system_unprepare(system_state, cpu_index, from_idle);
+
+ lpm_cpu_unprepare(system_state, cpu_index, from_idle);
+}
+
+static int lpm_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ int64_t time = ktime_to_ns(ktime_get());
+ int idx;
+
+ idx = menu_select ? lpm_cpu_menu_select(dev, &index) :
+ lpm_cpu_power_select(dev, &index);
+ if (idx < 0) {
+ local_irq_enable();
+ return -EPERM;
+ }
+
+ lpm_enter_low_power(&sys_state, idx, true);
+
+ time = ktime_to_ns(ktime_get()) - time;
+ do_div(time, 1000);
+ dev->last_residency = (int)time;
+ local_irq_enable();
+ return index;
+}
+
+static int lpm_suspend_enter(suspend_state_t state)
+{
+ int i;
+
+ for (i = sys_state.num_cpu_levels - 1; i >= 0; i--) {
+ bool allow = msm_cpu_pm_check_mode(smp_processor_id(),
+ sys_state.cpu_level[i].mode, false);
+ if (allow)
+ break;
+ }
+
+ if (i < 0)
+ return -EINVAL;
+
+ lpm_enter_low_power(&sys_state, i, false);
+
+ return 0;
+}
+
+static int lpm_suspend_prepare(void)
+{
+ suspend_in_progress = true;
+ msm_mpm_suspend_prepare();
+ return 0;
+}
+
+static void lpm_suspend_wake(void)
+{
+ msm_mpm_suspend_wake();
+ suspend_in_progress = false;
+}
+
+static struct platform_device lpm_dev = {
+ .name = "msm_pm",
+ .id = -1,
+};
+
+static const struct platform_suspend_ops lpm_suspend_ops = {
+ .enter = lpm_suspend_enter,
+ .valid = suspend_valid_only_mem,
+ .prepare_late = lpm_suspend_prepare,
+ .wake = lpm_suspend_wake,
+};
+
+static void setup_broadcast_timer(void *arg)
+{
+ unsigned long reason = (unsigned long)arg;
+ int cpu = smp_processor_id();
+
+ reason = reason ?
+ CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
+
+ clockevents_notify(reason, &cpu);
+}
+
+static struct cpuidle_driver msm_cpuidle_driver = {
+ .name = "msm_idle",
+ .owner = THIS_MODULE,
+};
+
+static void lpm_cpuidle_init(void)
+{
+ int i = 0;
+ int state_count = 0;
+
+ if (!sys_state.cpu_level)
+ return;
+ BUG_ON(sys_state.num_cpu_levels > CPUIDLE_STATE_MAX);
+
+ for (i = 0; i < sys_state.num_cpu_levels; i++) {
+ struct cpuidle_state *st = &msm_cpuidle_driver.states[i];
+ struct lpm_cpu_level *cpu_level = &sys_state.cpu_level[i];
+ snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+ snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
+ st->flags = 0;
+ st->exit_latency = cpu_level->pwr.latency_us;
+ st->power_usage = cpu_level->pwr.ss_power;
+ st->target_residency = 0;
+ st->enter = lpm_cpuidle_enter;
+ state_count++;
+ }
+ msm_cpuidle_driver.state_count = state_count;
+ msm_cpuidle_driver.safe_state_index = 0;
+
+ if (cpuidle_register(&msm_cpuidle_driver, NULL))
+ pr_err("%s(): Failed to register CPUIDLE device\n", __func__);
+}
+
+static int lpm_parse_power_params(struct device_node *node,
+ struct power_params *pwr)
+{
+ char *key;
+ int ret;
+
+ key = "qcom,latency-us";
+ ret = of_property_read_u32(node, key, &pwr->latency_us);
+ if (ret)
+ goto fail;
+
+ key = "qcom,ss-power";
+ ret = of_property_read_u32(node, key, &pwr->ss_power);
+ if (ret)
+ goto fail;
+
+ key = "qcom,energy-overhead";
+ ret = of_property_read_u32(node, key, &pwr->energy_overhead);
+ if (ret)
+ goto fail;
+
+ key = "qcom,time-overhead";
+ ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
+ if (ret)
+ goto fail;
+fail:
+ if (ret)
+ pr_err("%s(): Error reading %s\n", __func__, key);
+ return ret;
+}
+
+static int lpm_cpu_probe(struct platform_device *pdev)
+{
+ struct lpm_cpu_level *level = NULL, *l;
struct device_node *node = NULL;
- char *key = NULL;
- uint32_t val = 0;
- int ret = 0;
- uint32_t num_levels = 0;
- int idx = 0;
+ int num_levels = 0;
+ char *key;
+ int ret;
for_each_child_of_node(pdev->dev.of_node, node)
num_levels++;
- levels = kzalloc(num_levels * sizeof(struct msm_rpmrs_level),
+ level = kzalloc(num_levels * sizeof(struct lpm_cpu_level),
GFP_KERNEL);
- if (!levels)
+
+ if (!level)
return -ENOMEM;
+ l = &level[0];
for_each_child_of_node(pdev->dev.of_node, node) {
- level = &levels[idx++];
- level->available = false;
key = "qcom,mode";
- ret = msm_pm_get_sleep_mode_value(node, key, &val);
- if (ret)
+ ret = of_property_read_string(node, key, &l->name);
+
+ if (ret) {
+ pr_err("%s(): Cannot read cpu mode%s\n", __func__, key);
goto fail;
- level->sleep_mode = val;
+ }
+
+ l->mode = msm_pm_get_sleep_mode_value(l->name);
+
+ if (l->mode < 0) {
+ pr_err("%s():Cannot parse cpu mode:%s\n", __func__,
+ l->name);
+ goto fail;
+ }
+
+ if (l->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+ l->sync = true;
+
+ key = "qcom,use-broadcast-timer";
+ l->use_bc_timer = of_property_read_bool(node, key);
+
+ ret = lpm_parse_power_params(node, &l->pwr);
+ if (ret) {
+ pr_err("%s(): cannot Parse power params\n", __func__);
+ goto fail;
+ }
+ l++;
+ }
+ sys_state.cpu_level = level;
+ sys_state.num_cpu_levels = num_levels;
+ return ret;
+fail:
+ kfree(level);
+ return ret;
+}
+
+static int lpm_system_probe(struct platform_device *pdev)
+{
+ struct lpm_system_level *level = NULL, *l;
+ int num_levels = 0;
+ struct device_node *node;
+ char *key;
+ int ret;
+
+ for_each_child_of_node(pdev->dev.of_node, node)
+ num_levels++;
+
+ level = kzalloc(num_levels * sizeof(struct lpm_system_level),
+ GFP_KERNEL);
+
+ if (!level)
+ return -ENOMEM;
+
+ l = &level[0];
+ for_each_child_of_node(pdev->dev.of_node, node) {
key = "qcom,l2";
- ret = msm_lpm_get_l2_cache_value(node, key, &val);
- if (ret)
+ ret = of_property_read_string(node, key, &l->name);
+ if (ret) {
+ pr_err("%s(): Failed to read L2 mode\n", __func__);
goto fail;
- level->l2_cache = val;
+ }
- key = "qcom,latency-us";
- ret = of_property_read_u32(node, key, &val);
- if (ret)
+ l->l2_mode = lpm_get_l2_cache_value(l->name);
+
+ if (l->l2_mode < 0) {
+ pr_err("%s(): Failed to read l2 cache mode\n",
+ __func__);
goto fail;
- level->latency_us = val;
+ }
- key = "qcom,ss-power";
- ret = of_property_read_u32(node, key, &val);
- if (ret)
+ if (l->l2_mode == MSM_SPM_L2_MODE_GDHS ||
+ l->l2_mode == MSM_SPM_L2_MODE_POWER_COLLAPSE)
+ l->notify_rpm = true;
+
+ if (l->l2_mode >= MSM_SPM_L2_MODE_GDHS)
+ l->sync = true;
+
+ ret = lpm_parse_power_params(node, &l->pwr);
+ if (ret) {
+ pr_err("%s(): Failed to parse power params\n",
+ __func__);
goto fail;
- level->steady_state_power = val;
+ }
- key = "qcom,energy-overhead";
- ret = of_property_read_u32(node, key, &val);
- if (ret)
- goto fail;
- level->energy_overhead = val;
+ key = "qcom,sync-cpus";
+ l->sync = of_property_read_bool(node, key);
- key = "qcom,time-overhead";
- ret = of_property_read_u32(node, key, &val);
- if (ret)
- goto fail;
- level->time_overhead_us = val;
+ if (l->sync) {
+ const char *name;
- level->available = true;
+ key = "qcom,min-cpu-mode";
+ ret = of_property_read_string(node, key, &name);
+ if (ret) {
+ pr_err("%s(): Required key %snot found\n",
+ __func__, name);
+ goto fail;
+ }
+
+ l->min_cpu_mode = msm_pm_get_sleep_mode_value(name);
+
+ if (l->min_cpu_mode < 0) {
+ pr_err("%s(): Cannot parse cpu mode:%s\n",
+ __func__, name);
+ goto fail;
+ }
+
+ if (l->min_cpu_mode < sys_state.sync_cpu_mode)
+ sys_state.sync_cpu_mode = l->min_cpu_mode;
+ }
+
+ l++;
}
+ sys_state.system_level = level;
+ sys_state.num_system_levels = num_levels;
+ return ret;
+fail:
+ kfree(level);
+ return ret;
+}
+
+static int lpm_probe(struct platform_device *pdev)
+{
+ struct device_node *node = NULL;
+ char *key = NULL;
+ int ret;
node = pdev->dev.of_node;
+
+ key = "qcom,allow-synced-levels";
+ sys_state.allow_synched_levels = of_property_read_bool(node, key);
+
key = "qcom,no-l2-saw";
- no_l2_saw = of_property_read_bool(node, key);
+ sys_state.no_l2_saw = of_property_read_bool(node, key);
- msm_lpm_levels = levels;
- msm_lpm_level_count = idx;
+ sys_state.sync_cpu_mode = MSM_PM_SLEEP_MODE_NR;
+ spin_lock_init(&sys_state.sync_lock);
+ sys_state.num_cores_in_sync = 0;
- if (num_online_cpus() == 1)
- allowed_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ if (ret)
+ goto fail;
/* Do the following two steps only if L2 SAW is present */
- if (!no_l2_saw) {
- key = "qcom,default-l2-state";
- if (msm_lpm_get_l2_cache_value(node, key, &default_l2_mode))
- goto fail;
+ num_powered_cores = num_online_cpus();
- if (msm_lpm_levels_sysfs_add())
+ if (!sys_state.no_l2_saw) {
+ int ret;
+ const char *l2;
+ key = "qcom,default-l2-state";
+ ret = of_property_read_string(node, key, &l2);
+ if (ret) {
+ pr_err("%s(): Failed to read default L2 mode\n",
+ __func__);
goto fail;
- register_hotcpu_notifier(&msm_lpm_cpu_nblk);
- msm_pm_set_l2_flush_flag(0);
+ }
+
+ default_l2_mode = lpm_get_l2_cache_value(l2);
+ if (default_l2_mode < 0) {
+ pr_err("%s(): Unable to parse default L2 mode\n",
+ __func__);
+ goto fail;
+ }
+
+ if (lpm_levels_sysfs_add())
+ goto fail;
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_ON);
} else {
- msm_pm_set_l2_flush_flag(1);
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_OFF);
default_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
}
- msm_lpm_level_update();
- msm_pm_set_sleep_ops(&msm_lpm_ops);
+ get_cpu();
+ on_each_cpu(setup_broadcast_timer, (void *)true, 1);
+ put_cpu();
+
+ register_hotcpu_notifier(&lpm_cpu_nblk);
+
+ lpm_system_level_update();
+ platform_device_register(&lpm_dev);
+ suspend_set_ops(&lpm_suspend_ops);
+ hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ lpm_cpuidle_init();
return 0;
fail:
pr_err("%s: Error in name %s key %s\n", __func__, node->full_name, key);
- kfree(levels);
return -EFAULT;
}
-static struct of_device_id msm_lpm_levels_match_table[] = {
+static struct of_device_id cpu_modes_mtch_tbl[] = {
+ {.compatible = "qcom,cpu-modes"},
+ {},
+};
+
+static struct platform_driver cpu_modes_driver = {
+ .probe = lpm_cpu_probe,
+ .driver = {
+ .name = "cpu-modes",
+ .owner = THIS_MODULE,
+ .of_match_table = cpu_modes_mtch_tbl,
+ },
+};
+
+static struct of_device_id system_modes_mtch_tbl[] = {
+ {.compatible = "qcom,system-modes"},
+ {},
+};
+
+static struct platform_driver system_modes_driver = {
+ .probe = lpm_system_probe,
+ .driver = {
+ .name = "system-modes",
+ .owner = THIS_MODULE,
+ .of_match_table = system_modes_mtch_tbl,
+ },
+};
+
+static struct of_device_id lpm_levels_match_table[] = {
{.compatible = "qcom,lpm-levels"},
{},
};
-static struct platform_driver msm_lpm_levels_driver = {
- .probe = msm_lpm_levels_probe,
+static struct platform_driver lpm_levels_driver = {
+ .probe = lpm_probe,
.driver = {
.name = "lpm-levels",
.owner = THIS_MODULE,
- .of_match_table = msm_lpm_levels_match_table,
+ .of_match_table = lpm_levels_match_table,
},
};
-static int __init msm_lpm_levels_module_init(void)
+static int __init lpm_levels_module_init(void)
{
- return platform_driver_register(&msm_lpm_levels_driver);
+ int rc;
+ rc = platform_driver_register(&cpu_modes_driver);
+ if (rc) {
+ pr_err("Error registering %s\n", cpu_modes_driver.driver.name);
+ goto fail;
+ }
+
+ rc = platform_driver_register(&system_modes_driver);
+ if (rc) {
+ platform_driver_unregister(&cpu_modes_driver);
+ pr_err("Error registering %s\n",
+ system_modes_driver.driver.name);
+ goto fail;
+ }
+
+ rc = platform_driver_register(&lpm_levels_driver);
+ if (rc) {
+ platform_driver_unregister(&cpu_modes_driver);
+ platform_driver_unregister(&system_modes_driver);
+ pr_err("Error registering %s\n",
+ lpm_levels_driver.driver.name);
+ }
+fail:
+ return rc;
}
-late_initcall(msm_lpm_levels_module_init);
+late_initcall(lpm_levels_module_init);
diff --git a/arch/arm/mach-msm/mpm-of.c b/arch/arm/mach-msm/mpm-of.c
index e364393..5b351b4 100644
--- a/arch/arm/mach-msm/mpm-of.c
+++ b/arch/arm/mach-msm/mpm-of.c
@@ -505,7 +505,9 @@
return msm_mpm_interrupts_detectable(MSM_MPM_GIC_IRQ_DOMAIN,
from_idle);
}
-void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
+
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle,
+ const struct cpumask *cpumask)
{
cycle_t wakeup = (u64)sclk_count * ARCH_TIMER_HZ;
@@ -522,6 +524,7 @@
}
msm_mpm_set(wakeup, !from_idle);
+ irq_set_affinity(msm_mpm_dev_data.mpm_ipc_irq, cpumask);
}
void msm_mpm_exit_sleep(bool from_idle)
diff --git a/arch/arm/mach-msm/msm-pm.c b/arch/arm/mach-msm/msm-pm.c
new file mode 100644
index 0000000..5f8657e
--- /dev/null
+++ b/arch/arm/mach-msm/msm-pm.c
@@ -0,0 +1,1230 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/smp.h>
+#include <linux/tick.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/cpu_pm.h>
+#include <asm/uaccess.h>
+#include <asm/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <mach/scm.h>
+#include <mach/msm_bus.h>
+#include <mach/jtag.h>
+#include "acpuclock.h"
+#include "avs.h"
+#include "idle.h"
+#include "pm.h"
+#include "scm-boot.h"
+#include "spm.h"
+#include "pm-boot.h"
+
+#define CREATE_TRACE_POINTS
+#include <mach/trace_msm_low_power.h>
+
+#define SCM_CMD_TERMINATE_PC (0x2)
+#define SCM_CMD_CORE_HOTPLUGGED (0x10)
+
+#define GET_CPU_OF_ATTR(attr) \
+ (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
+
+#define SCLK_HZ (32768)
+
+#define MAX_BUF_SIZE 512
+
+static int msm_pm_debug_mask = 1;
+module_param_named(
+ debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static bool use_acpuclk_apis;
+
+enum {
+ MSM_PM_DEBUG_SUSPEND = BIT(0),
+ MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
+ MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
+ MSM_PM_DEBUG_CLOCK = BIT(3),
+ MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
+ MSM_PM_DEBUG_IDLE_CLK = BIT(5),
+ MSM_PM_DEBUG_IDLE = BIT(6),
+ MSM_PM_DEBUG_IDLE_LIMITS = BIT(7),
+ MSM_PM_DEBUG_HOTPLUG = BIT(8),
+};
+
+enum msm_pc_count_offsets {
+ MSM_PC_ENTRY_COUNTER,
+ MSM_PC_EXIT_COUNTER,
+ MSM_PC_FALLTHRU_COUNTER,
+ MSM_PC_NUM_COUNTERS,
+};
+
+enum {
+ MSM_PM_MODE_ATTR_SUSPEND,
+ MSM_PM_MODE_ATTR_IDLE,
+ MSM_PM_MODE_ATTR_NR,
+};
+
+static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
+ [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
+ [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
+};
+
+struct msm_pm_kobj_attribute {
+ unsigned int cpu;
+ struct kobj_attribute ka;
+};
+
+struct msm_pm_sysfs_sleep_mode {
+ struct kobject *kobj;
+ struct attribute_group attr_group;
+ struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
+ struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
+};
+
+static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
+ [MSM_PM_SLEEP_MODE_RETENTION] = "retention",
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
+ "standalone_power_collapse",
+};
+
+static bool msm_pm_ldo_retention_enabled = true;
+static bool msm_no_ramp_down_pc;
+static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
+DEFINE_PER_CPU(struct clk *, cpu_clks);
+static struct clk *l2_clk;
+
+static void (*msm_pm_disable_l2_fn)(void);
+static void (*msm_pm_enable_l2_fn)(void);
+static void (*msm_pm_flush_l2_fn)(void);
+static void __iomem *msm_pc_debug_counters;
+
+/*
+ * Default the l2 flush flag to OFF so the caches are flushed during power
+ * collapse unless the explicitly voted by lpm driver.
+ */
+static enum msm_pm_l2_scm_flag msm_pm_flush_l2_flag = MSM_SCM_L2_OFF;
+
+void msm_pm_set_l2_flush_flag(enum msm_pm_l2_scm_flag flag)
+{
+ msm_pm_flush_l2_flag = flag;
+}
+EXPORT_SYMBOL(msm_pm_set_l2_flush_flag);
+
+static enum msm_pm_l2_scm_flag msm_pm_get_l2_flush_flag(void)
+{
+ return msm_pm_flush_l2_flag;
+}
+
+static cpumask_t retention_cpus;
+static DEFINE_SPINLOCK(retention_lock);
+
+static int msm_pm_get_pc_mode(struct device_node *node,
+ const char *key, uint32_t *pc_mode_val)
+{
+ struct pc_mode_of {
+ uint32_t mode;
+ char *mode_name;
+ };
+ int i;
+ struct pc_mode_of pc_modes[] = {
+ {MSM_PM_PC_TZ_L2_INT, "tz_l2_int"},
+ {MSM_PM_PC_NOTZ_L2_EXT, "no_tz_l2_ext"},
+ {MSM_PM_PC_TZ_L2_EXT , "tz_l2_ext"} };
+ int ret;
+ const char *pc_mode_str;
+ *pc_mode_val = MSM_PM_PC_TZ_L2_INT;
+
+ ret = of_property_read_string(node, key, &pc_mode_str);
+ if (!ret) {
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(pc_modes); i++) {
+ if (!strncmp(pc_mode_str, pc_modes[i].mode_name,
+ strlen(pc_modes[i].mode_name))) {
+ *pc_mode_val = pc_modes[i].mode;
+ ret = 0;
+ break;
+ }
+ }
+ } else {
+ pr_debug("%s: Cannot read %s,defaulting to 0", __func__, key);
+ ret = 0;
+ }
+ return ret;
+}
+
+/*
+ * Write out the attribute.
+ */
+static ssize_t msm_pm_mode_attr_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
+ struct kernel_param kp;
+ unsigned int cpu;
+ struct msm_pm_platform_data *mode;
+
+ if (msm_pm_sleep_mode_labels[i] == NULL)
+ continue;
+
+ if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
+ continue;
+
+ cpu = GET_CPU_OF_ATTR(attr);
+ mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
+
+ if (!strcmp(attr->attr.name,
+ msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
+ u32 arg = mode->suspend_enabled;
+ kp.arg = &arg;
+ ret = param_get_ulong(buf, &kp);
+ } else if (!strcmp(attr->attr.name,
+ msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
+ u32 arg = mode->idle_enabled;
+ kp.arg = &arg;
+ ret = param_get_ulong(buf, &kp);
+ }
+
+ break;
+ }
+
+ if (ret > 0) {
+ strlcat(buf, "\n", PAGE_SIZE);
+ ret++;
+ }
+
+ return ret;
+}
+
+static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
+ struct kernel_param kp;
+ unsigned int cpu;
+ struct msm_pm_platform_data *mode;
+
+ if (msm_pm_sleep_mode_labels[i] == NULL)
+ continue;
+
+ if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
+ continue;
+
+ cpu = GET_CPU_OF_ATTR(attr);
+ mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
+
+ if (!strcmp(attr->attr.name,
+ msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
+ kp.arg = &mode->suspend_enabled;
+ ret = param_set_byte(buf, &kp);
+ } else if (!strcmp(attr->attr.name,
+ msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
+ kp.arg = &mode->idle_enabled;
+ ret = param_set_byte(buf, &kp);
+ }
+
+ break;
+ }
+
+ return ret ? ret : count;
+}
+
+static int msm_pm_mode_sysfs_add_cpu(
+ unsigned int cpu, struct kobject *modes_kobj)
+{
+ char cpu_name[8];
+ struct kobject *cpu_kobj;
+ struct msm_pm_sysfs_sleep_mode *mode = NULL;
+ int i, j, k;
+ int ret;
+
+ snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
+ cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
+ if (!cpu_kobj) {
+ pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
+ ret = -ENOMEM;
+ goto mode_sysfs_add_cpu_exit;
+ }
+
+ for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
+ int idx = MSM_PM_MODE(cpu, i);
+
+ if ((!msm_pm_sleep_modes[idx].suspend_supported)
+ && (!msm_pm_sleep_modes[idx].idle_supported))
+ continue;
+
+ if (!msm_pm_sleep_mode_labels[i] ||
+ !msm_pm_sleep_mode_labels[i][0])
+ continue;
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode) {
+ pr_err("%s: cannot allocate memory for attributes\n",
+ __func__);
+ ret = -ENOMEM;
+ goto mode_sysfs_add_cpu_exit;
+ }
+
+ mode->kobj = kobject_create_and_add(
+ msm_pm_sleep_mode_labels[i], cpu_kobj);
+ if (!mode->kobj) {
+ pr_err("%s: cannot create kobject\n", __func__);
+ ret = -ENOMEM;
+ goto mode_sysfs_add_cpu_exit;
+ }
+
+ for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
+ if ((k == MSM_PM_MODE_ATTR_IDLE) &&
+ !msm_pm_sleep_modes[idx].idle_supported)
+ continue;
+ if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
+ !msm_pm_sleep_modes[idx].suspend_supported)
+ continue;
+ sysfs_attr_init(&mode->kas[j].ka.attr);
+ mode->kas[j].cpu = cpu;
+ mode->kas[j].ka.attr.mode = 0644;
+ mode->kas[j].ka.show = msm_pm_mode_attr_show;
+ mode->kas[j].ka.store = msm_pm_mode_attr_store;
+ mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
+ mode->attrs[j] = &mode->kas[j].ka.attr;
+ j++;
+ }
+ mode->attrs[j] = NULL;
+
+ mode->attr_group.attrs = mode->attrs;
+ ret = sysfs_create_group(mode->kobj, &mode->attr_group);
+ if (ret) {
+ pr_err("%s: cannot create kobject attribute group\n",
+ __func__);
+ goto mode_sysfs_add_cpu_exit;
+ }
+ }
+
+ ret = 0;
+
+mode_sysfs_add_cpu_exit:
+ if (ret) {
+ if (mode && mode->kobj)
+ kobject_del(mode->kobj);
+ kfree(mode);
+ }
+
+ return ret;
+}
+
+int msm_pm_mode_sysfs_add(void)
+{
+ struct kobject *module_kobj;
+ struct kobject *modes_kobj;
+ unsigned int cpu;
+ int ret;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module %s\n",
+ __func__, KBUILD_MODNAME);
+ ret = -ENOENT;
+ goto mode_sysfs_add_exit;
+ }
+
+ modes_kobj = kobject_create_and_add("modes", module_kobj);
+ if (!modes_kobj) {
+ pr_err("%s: cannot create modes kobject\n", __func__);
+ ret = -ENOMEM;
+ goto mode_sysfs_add_exit;
+ }
+
+ for_each_possible_cpu(cpu) {
+ ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
+ if (ret)
+ goto mode_sysfs_add_exit;
+ }
+
+ ret = 0;
+
+mode_sysfs_add_exit:
+ return ret;
+}
+
+static inline void msm_arch_idle(void)
+{
+ mb();
+ wfi();
+}
+
+static bool msm_pm_is_L1_writeback(void)
+{
+ u32 sel = 0, cache_id;
+
+ asm volatile ("mcr p15, 2, %[ccselr], c0, c0, 0\n\t"
+ "isb\n\t"
+ "mrc p15, 1, %[ccsidr], c0, c0, 0\n\t"
+ :[ccsidr]"=r" (cache_id)
+ :[ccselr]"r" (sel)
+ );
+ return cache_id & BIT(31);
+}
+
+static enum msm_pm_time_stats_id msm_pm_swfi(bool from_idle)
+{
+ msm_arch_idle();
+ return MSM_PM_STAT_IDLE_WFI;
+}
+
+static enum msm_pm_time_stats_id msm_pm_retention(bool from_idle)
+{
+ int ret = 0;
+ int cpu = smp_processor_id();
+
+ spin_lock(&retention_lock);
+
+ if (!msm_pm_ldo_retention_enabled)
+ goto bailout;
+
+ cpumask_set_cpu(cpu, &retention_cpus);
+ spin_unlock(&retention_lock);
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_RETENTION, false);
+ WARN_ON(ret);
+
+ msm_arch_idle();
+ spin_lock(&retention_lock);
+ cpumask_clear_cpu(cpu, &retention_cpus);
+bailout:
+ spin_unlock(&retention_lock);
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+ WARN_ON(ret);
+
+ return MSM_PM_STAT_RETENTION;
+}
+
+static inline void msm_pc_inc_debug_count(uint32_t cpu,
+ enum msm_pc_count_offsets offset)
+{
+ uint32_t cnt;
+
+ if (!msm_pc_debug_counters)
+ return;
+
+ cnt = readl_relaxed(msm_pc_debug_counters + cpu * 4 + offset * 4);
+ writel_relaxed(++cnt, msm_pc_debug_counters + cpu * 4 + offset * 4);
+ mb();
+}
+
+static bool msm_pm_pc_hotplug(void)
+{
+ uint32_t cpu = smp_processor_id();
+
+ if (msm_pm_is_L1_writeback())
+ flush_cache_louis();
+
+ msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
+
+ scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
+ SCM_CMD_CORE_HOTPLUGGED);
+
+ /* Should not return here */
+ msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
+ return 0;
+}
+
+static int msm_pm_collapse(unsigned long unused)
+{
+ uint32_t cpu = smp_processor_id();
+
+ if (msm_pm_get_l2_flush_flag() == MSM_SCM_L2_OFF) {
+ flush_cache_all();
+ if (msm_pm_flush_l2_fn)
+ msm_pm_flush_l2_fn();
+ } else if (msm_pm_is_L1_writeback())
+ flush_cache_louis();
+
+ if (msm_pm_disable_l2_fn)
+ msm_pm_disable_l2_fn();
+
+ msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
+
+ scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
+ msm_pm_get_l2_flush_flag());
+
+ msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
+
+ if (msm_pm_enable_l2_fn)
+ msm_pm_enable_l2_fn();
+
+ return 0;
+}
+
+static bool __ref msm_pm_spm_power_collapse(
+ unsigned int cpu, bool from_idle, bool notify_rpm)
+{
+ void *entry;
+ bool collapsed = 0;
+ int ret;
+ bool save_cpu_regs = !cpu || from_idle;
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: notify_rpm %d\n",
+ cpu, __func__, (int) notify_rpm);
+
+ if (from_idle)
+ cpu_pm_enter();
+
+ ret = msm_spm_set_low_power_mode(
+ MSM_SPM_MODE_POWER_COLLAPSE, notify_rpm);
+ WARN_ON(ret);
+
+ entry = save_cpu_regs ? cpu_resume : msm_secondary_startup;
+
+ msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
+
+ if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: program vector to %p\n",
+ cpu, __func__, entry);
+
+ msm_jtag_save_state();
+
+ collapsed = save_cpu_regs ?
+ !cpu_suspend(0, msm_pm_collapse) : msm_pm_pc_hotplug();
+
+ msm_jtag_restore_state();
+
+ if (collapsed) {
+ cpu_init();
+ local_fiq_enable();
+ }
+
+ msm_pm_boot_config_after_pc(cpu);
+
+ if (from_idle)
+ cpu_pm_exit();
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
+ cpu, __func__, collapsed);
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+ WARN_ON(ret);
+ return collapsed;
+}
+
+static enum msm_pm_time_stats_id msm_pm_power_collapse_standalone(
+ bool from_idle)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int avsdscr;
+ unsigned int avscsr;
+ bool collapsed;
+
+ avsdscr = avs_get_avsdscr();
+ avscsr = avs_get_avscsr();
+ avs_set_avscsr(0); /* Disable AVS */
+
+ collapsed = msm_pm_spm_power_collapse(cpu, from_idle, false);
+
+ avs_set_avsdscr(avsdscr);
+ avs_set_avscsr(avscsr);
+ return collapsed ? MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE :
+ MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE;
+}
+
+static int ramp_down_last_cpu(int cpu)
+{
+ struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+ int ret = 0;
+
+ if (use_acpuclk_apis) {
+ ret = acpuclk_power_collapse();
+ if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: change clk rate(old rate = %d)\n",
+ cpu, __func__, ret);
+ } else {
+ clk_disable(cpu_clk);
+ clk_disable(l2_clk);
+ }
+ return ret;
+}
+
+static int ramp_up_first_cpu(int cpu, int saved_rate)
+{
+ struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+ int rc = 0;
+
+ if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: restore clock rate\n",
+ cpu, __func__);
+
+ if (use_acpuclk_apis) {
+ rc = acpuclk_set_rate(cpu, saved_rate, SETRATE_PC);
+ if (rc)
+ pr_err("CPU:%u: Error restoring cpu clk\n", cpu);
+ } else {
+ if (l2_clk) {
+ rc = clk_enable(l2_clk);
+ if (rc)
+ pr_err("%s(): Error restoring l2 clk\n",
+ __func__);
+ }
+
+ if (cpu_clk) {
+ int ret = clk_enable(cpu_clk);
+
+ if (ret) {
+ pr_err("%s(): Error restoring cpu clk\n",
+ __func__);
+ return ret;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static enum msm_pm_time_stats_id msm_pm_power_collapse(bool from_idle)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long saved_acpuclk_rate = 0;
+ unsigned int avsdscr;
+ unsigned int avscsr;
+ bool collapsed;
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: idle %d\n",
+ cpu, __func__, (int)from_idle);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
+
+ avsdscr = avs_get_avsdscr();
+ avscsr = avs_get_avscsr();
+ avs_set_avscsr(0); /* Disable AVS */
+
+ if (cpu_online(cpu) && !msm_no_ramp_down_pc)
+ saved_acpuclk_rate = ramp_down_last_cpu(cpu);
+
+ collapsed = msm_pm_spm_power_collapse(cpu, from_idle, true);
+
+ if (cpu_online(cpu) && !msm_no_ramp_down_pc)
+ ramp_up_first_cpu(cpu, saved_acpuclk_rate);
+
+ avs_set_avsdscr(avsdscr);
+ avs_set_avscsr(avscsr);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: post power up\n", cpu, __func__);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: return\n", cpu, __func__);
+ return collapsed ? MSM_PM_STAT_IDLE_POWER_COLLAPSE :
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
+}
+/******************************************************************************
+ * External Idle/Suspend Functions
+ *****************************************************************************/
+
+void arch_idle(void)
+{
+ return;
+}
+
+static inline void msm_pm_ftrace_lpm_enter(unsigned int cpu,
+ uint32_t latency, uint32_t sleep_us,
+ uint32_t wake_up,
+ enum msm_pm_sleep_mode mode)
+{
+ switch (mode) {
+ case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
+ trace_msm_pm_enter_wfi(cpu, latency, sleep_us, wake_up);
+ break;
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
+ trace_msm_pm_enter_spc(cpu, latency, sleep_us, wake_up);
+ break;
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
+ trace_msm_pm_enter_pc(cpu, latency, sleep_us, wake_up);
+ break;
+ case MSM_PM_SLEEP_MODE_RETENTION:
+ trace_msm_pm_enter_ret(cpu, latency, sleep_us, wake_up);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void msm_pm_ftrace_lpm_exit(unsigned int cpu,
+ enum msm_pm_sleep_mode mode,
+ bool success)
+{
+ switch (mode) {
+ case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
+ trace_msm_pm_exit_wfi(cpu, success);
+ break;
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
+ trace_msm_pm_exit_spc(cpu, success);
+ break;
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
+ trace_msm_pm_exit_pc(cpu, success);
+ break;
+ case MSM_PM_SLEEP_MODE_RETENTION:
+ trace_msm_pm_exit_ret(cpu, success);
+ break;
+ default:
+ break;
+ }
+}
+
+static enum msm_pm_time_stats_id (*execute[MSM_PM_SLEEP_MODE_NR])(bool idle) = {
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = msm_pm_swfi,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
+ msm_pm_power_collapse_standalone,
+ [MSM_PM_SLEEP_MODE_RETENTION] = msm_pm_retention,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = msm_pm_power_collapse,
+};
+
+bool msm_cpu_pm_check_mode(unsigned int cpu, enum msm_pm_sleep_mode mode,
+ bool from_idle)
+{
+ int idx = MSM_PM_MODE(cpu, mode);
+ struct msm_pm_platform_data *d = &msm_pm_sleep_modes[idx];
+
+ if ((mode == MSM_PM_SLEEP_MODE_RETENTION)
+ && !msm_pm_ldo_retention_enabled)
+ return false;
+
+ if (from_idle)
+ return d->idle_enabled && d->idle_supported;
+ else
+ return d->suspend_enabled && d->suspend_supported;
+}
+
+int msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle)
+{
+ int64_t time;
+ bool collapsed = 1;
+ int exit_stat = -1;
+
+ if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: mode %d\n",
+ smp_processor_id(), __func__, mode);
+ if (!from_idle)
+ pr_info("CPU%u: %s mode:%d\n",
+ smp_processor_id(), __func__, mode);
+
+ time = sched_clock();
+ if (execute[mode])
+ exit_stat = execute[mode](from_idle);
+ time = sched_clock() - time;
+ if (from_idle)
+ msm_pm_ftrace_lpm_exit(smp_processor_id(), mode, collapsed);
+ else
+ exit_stat = MSM_PM_STAT_SUSPEND;
+ if (exit_stat >= 0)
+ msm_pm_add_stat(exit_stat, time);
+ do_div(time, 1000);
+ return collapsed;
+}
+
+int msm_pm_wait_cpu_shutdown(unsigned int cpu)
+{
+ int timeout = 10;
+
+ if (!msm_pm_slp_sts)
+ return 0;
+ if (!msm_pm_slp_sts[cpu].base_addr)
+ return 0;
+ while (timeout--) {
+ /*
+ * Check for the SPM of the core being hotplugged to set
+ * its sleep state.The SPM sleep state indicates that the
+ * core has been power collapsed.
+ */
+ int acc_sts = __raw_readl(msm_pm_slp_sts[cpu].base_addr);
+
+ if (acc_sts & msm_pm_slp_sts[cpu].mask)
+ return 0;
+ udelay(100);
+ }
+
+ pr_info("%s(): Timed out waiting for CPU %u SPM to enter sleep state",
+ __func__, cpu);
+ return -EBUSY;
+}
+
+void msm_pm_cpu_enter_lowpower(unsigned int cpu)
+{
+ int i;
+ bool allow[MSM_PM_SLEEP_MODE_NR];
+
+ for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
+ struct msm_pm_platform_data *mode;
+
+ mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
+ allow[i] = mode->suspend_supported && mode->suspend_enabled;
+ }
+
+ if (MSM_PM_DEBUG_HOTPLUG & msm_pm_debug_mask)
+ pr_notice("CPU%u: %s: shutting down cpu\n", cpu, __func__);
+
+ if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
+ msm_pm_power_collapse(false);
+ else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE])
+ msm_pm_power_collapse_standalone(false);
+ else if (allow[MSM_PM_SLEEP_MODE_RETENTION])
+ msm_pm_retention(false);
+ else
+ msm_pm_swfi(false);
+}
+
+static void msm_pm_ack_retention_disable(void *data)
+{
+ /*
+ * This is a NULL function to ensure that the core has woken up
+ * and is safe to disable retention.
+ */
+}
+/**
+ * msm_pm_enable_retention() - Disable/Enable retention on all cores
+ * @enable: Enable/Disable retention
+ *
+ */
+void msm_pm_enable_retention(bool enable)
+{
+ if (enable == msm_pm_ldo_retention_enabled)
+ return;
+
+ msm_pm_ldo_retention_enabled = enable;
+
+ /*
+ * If retention is being disabled, wakeup all online core to ensure
+ * that it isn't executing retention. Offlined cores need not be woken
+ * up as they enter the deepest sleep mode, namely RPM assited power
+ * collapse
+ */
+ if (!enable) {
+ preempt_disable();
+ smp_call_function_many(&retention_cpus,
+ msm_pm_ack_retention_disable,
+ NULL, true);
+ preempt_enable();
+ }
+}
+EXPORT_SYMBOL(msm_pm_enable_retention);
+
+static int msm_pm_snoc_client_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ static struct msm_bus_scale_pdata *msm_pm_bus_pdata;
+ static uint32_t msm_pm_bus_client;
+
+ msm_pm_bus_pdata = msm_bus_cl_get_pdata(pdev);
+
+ if (msm_pm_bus_pdata) {
+ msm_pm_bus_client =
+ msm_bus_scale_register_client(msm_pm_bus_pdata);
+
+ if (!msm_pm_bus_client) {
+ pr_err("%s: Failed to register SNOC client", __func__);
+ rc = -ENXIO;
+ goto snoc_cl_probe_done;
+ }
+
+ rc = msm_bus_scale_client_update_request(msm_pm_bus_client, 1);
+
+ if (rc)
+ pr_err("%s: Error setting bus rate", __func__);
+ }
+
+snoc_cl_probe_done:
+ return rc;
+}
+
+static int msm_cpu_status_probe(struct platform_device *pdev)
+{
+ struct msm_pm_sleep_status_data *pdata;
+ char *key;
+ u32 cpu;
+
+ if (!pdev)
+ return -EFAULT;
+
+ msm_pm_slp_sts = devm_kzalloc(&pdev->dev,
+ sizeof(*msm_pm_slp_sts) * num_possible_cpus(),
+ GFP_KERNEL);
+
+ if (!msm_pm_slp_sts)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node) {
+ struct resource *res;
+ u32 offset;
+ int rc;
+ u32 mask;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ key = "qcom,cpu-alias-addr";
+ rc = of_property_read_u32(pdev->dev.of_node, key, &offset);
+
+ if (rc)
+ return -ENODEV;
+
+ key = "qcom,sleep-status-mask";
+ rc = of_property_read_u32(pdev->dev.of_node, key, &mask);
+
+ if (rc)
+ return -ENODEV;
+
+ for_each_possible_cpu(cpu) {
+ phys_addr_t base_c = res->start + cpu * offset;
+ msm_pm_slp_sts[cpu].base_addr =
+ devm_ioremap(&pdev->dev, base_c,
+ resource_size(res));
+ msm_pm_slp_sts[cpu].mask = mask;
+
+ if (!msm_pm_slp_sts[cpu].base_addr)
+ return -ENOMEM;
+ }
+ } else {
+ pdata = pdev->dev.platform_data;
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ msm_pm_slp_sts[cpu].base_addr =
+ pdata->base_addr + cpu * pdata->cpu_offset;
+ msm_pm_slp_sts[cpu].mask = pdata->mask;
+ }
+ }
+
+ return 0;
+};
+
+static struct of_device_id msm_slp_sts_match_tbl[] = {
+ {.compatible = "qcom,cpu-sleep-status"},
+ {},
+};
+
+static struct platform_driver msm_cpu_status_driver = {
+ .probe = msm_cpu_status_probe,
+ .driver = {
+ .name = "cpu_slp_status",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_slp_sts_match_tbl,
+ },
+};
+
+static struct of_device_id msm_snoc_clnt_match_tbl[] = {
+ {.compatible = "qcom,pm-snoc-client"},
+ {},
+};
+
+static struct platform_driver msm_cpu_pm_snoc_client_driver = {
+ .probe = msm_pm_snoc_client_probe,
+ .driver = {
+ .name = "pm_snoc_client",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_snoc_clnt_match_tbl,
+ },
+};
+
+static int msm_pm_init(void)
+{
+ enum msm_pm_time_stats_id enable_stats[] = {
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_RETENTION,
+ MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ };
+ msm_pm_mode_sysfs_add();
+ msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
+
+ return 0;
+}
+
+static void msm_pm_set_flush_fn(uint32_t pc_mode)
+{
+ msm_pm_disable_l2_fn = NULL;
+ msm_pm_enable_l2_fn = NULL;
+ msm_pm_flush_l2_fn = outer_flush_all;
+
+ if (pc_mode == MSM_PM_PC_NOTZ_L2_EXT) {
+ msm_pm_disable_l2_fn = outer_disable;
+ msm_pm_enable_l2_fn = outer_resume;
+ }
+}
+
+struct msm_pc_debug_counters_buffer {
+ void __iomem *reg;
+ u32 len;
+ char buf[MAX_BUF_SIZE];
+};
+
+static inline u32 msm_pc_debug_counters_read_register(
+ void __iomem *reg, int index , int offset)
+{
+ return readl_relaxed(reg + (index * 4 + offset) * 4);
+}
+
+static char *counter_name[] = {
+ "PC Entry Counter",
+ "Warmboot Entry Counter",
+ "PC Bailout Counter"
+};
+
+static int msm_pc_debug_counters_copy(
+ struct msm_pc_debug_counters_buffer *data)
+{
+ int j;
+ u32 stat;
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ data->len += scnprintf(data->buf + data->len,
+ sizeof(data->buf)-data->len,
+ "CPU%d\n", cpu);
+
+ for (j = 0; j < MSM_PC_NUM_COUNTERS; j++) {
+ stat = msm_pc_debug_counters_read_register(
+ data->reg, cpu, j);
+ data->len += scnprintf(data->buf + data->len,
+ sizeof(data->buf)-data->len,
+ "\t%s : %d\n", counter_name[j],
+ stat);
+ }
+
+ }
+
+ return data->len;
+}
+
+static int msm_pc_debug_counters_file_read(struct file *file,
+ char __user *bufu, size_t count, loff_t *ppos)
+{
+ struct msm_pc_debug_counters_buffer *data;
+
+ data = file->private_data;
+
+ if (!data)
+ return -EINVAL;
+
+ if (!bufu || count < 0)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE, bufu, count))
+ return -EFAULT;
+
+ if (*ppos >= data->len && data->len == 0)
+ data->len = msm_pc_debug_counters_copy(data);
+
+ return simple_read_from_buffer(bufu, count, ppos,
+ data->buf, data->len);
+}
+
+static int msm_pc_debug_counters_file_open(struct inode *inode,
+ struct file *file)
+{
+ struct msm_pc_debug_counters_buffer *buf;
+ void __iomem *msm_pc_debug_counters_reg;
+
+ msm_pc_debug_counters_reg = inode->i_private;
+
+ if (!msm_pc_debug_counters_reg)
+ return -EINVAL;
+
+ file->private_data = kzalloc(
+ sizeof(struct msm_pc_debug_counters_buffer), GFP_KERNEL);
+
+ if (!file->private_data) {
+ pr_err("%s: ERROR kmalloc failed to allocate %d bytes\n",
+ __func__, sizeof(struct msm_pc_debug_counters_buffer));
+
+ return -ENOMEM;
+ }
+
+ buf = file->private_data;
+ buf->reg = msm_pc_debug_counters_reg;
+
+ return 0;
+}
+
+static int msm_pc_debug_counters_file_close(struct inode *inode,
+ struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations msm_pc_debug_counters_fops = {
+ .open = msm_pc_debug_counters_file_open,
+ .read = msm_pc_debug_counters_file_read,
+ .release = msm_pc_debug_counters_file_close,
+ .llseek = no_llseek,
+};
+
+static int msm_pm_clk_init(struct platform_device *pdev)
+{
+ bool synced_clocks;
+ u32 cpu;
+ char clk_name[] = "cpu??_clk";
+ bool cpu_as_clocks;
+ char *key;
+
+ key = "qcom,cpus-as-clocks";
+ cpu_as_clocks = of_property_read_bool(pdev->dev.of_node, key);
+
+ if (!cpu_as_clocks) {
+ use_acpuclk_apis = true;
+ return 0;
+ }
+
+ key = "qcom,synced-clocks";
+ synced_clocks = of_property_read_bool(pdev->dev.of_node, key);
+
+ for_each_possible_cpu(cpu) {
+ struct clk *clk;
+ snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
+ clk = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(clk)) {
+ if (cpu && synced_clocks)
+ return 0;
+ else
+ return PTR_ERR(clk);
+ }
+ per_cpu(cpu_clks, cpu) = clk;
+ }
+
+ if (synced_clocks)
+ return 0;
+
+ l2_clk = devm_clk_get(&pdev->dev, "l2_clk");
+
+ return PTR_RET(l2_clk);
+}
+
+static int msm_cpu_pm_probe(struct platform_device *pdev)
+{
+ char *key = NULL;
+ struct dentry *dent = NULL;
+ struct resource *res = NULL;
+ int i;
+ struct msm_pm_init_data_type pdata_local;
+ int ret = 0;
+
+ memset(&pdata_local, 0, sizeof(struct msm_pm_init_data_type));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return 0;
+ msm_pc_debug_counters_phys = res->start;
+ WARN_ON(resource_size(res) < SZ_64);
+ msm_pc_debug_counters = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (msm_pc_debug_counters) {
+ for (i = 0; i < resource_size(res)/4; i++)
+ __raw_writel(0, msm_pc_debug_counters + i * 4);
+
+ dent = debugfs_create_file("pc_debug_counter", S_IRUGO, NULL,
+ msm_pc_debug_counters,
+ &msm_pc_debug_counters_fops);
+ if (!dent)
+ pr_err("%s: ERROR debugfs_create_file failed\n",
+ __func__);
+ } else {
+ msm_pc_debug_counters = 0;
+ msm_pc_debug_counters_phys = 0;
+ }
+
+ if (pdev->dev.of_node) {
+ enum msm_pm_pc_mode_type pc_mode;
+
+ ret = msm_pm_clk_init(pdev);
+ if (ret) {
+ pr_info("msm_pm_clk_init returned error\n");
+ return ret;
+ }
+
+ key = "qcom,pc-mode";
+ ret = msm_pm_get_pc_mode(pdev->dev.of_node, key, &pc_mode);
+ if (ret) {
+ pr_debug("%s: Error reading key %s", __func__, key);
+ return -EINVAL;
+ }
+ msm_pm_set_flush_fn(pc_mode);
+ }
+
+ msm_pm_init();
+ if (pdev->dev.of_node)
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ return ret;
+}
+
+static struct of_device_id msm_cpu_pm_table[] = {
+ {.compatible = "qcom,pm-8x60"},
+ {},
+};
+
+static struct platform_driver msm_cpu_pm_driver = {
+ .probe = msm_cpu_pm_probe,
+ .driver = {
+ .name = "pm-8x60",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cpu_pm_table,
+ },
+};
+
+static int __init msm_cpu_pm_init(void)
+{
+ int rc;
+
+ cpumask_clear(&retention_cpus);
+
+ rc = platform_driver_register(&msm_cpu_pm_snoc_client_driver);
+
+ if (rc) {
+ pr_err("%s(): failed to register driver %s\n", __func__,
+ msm_cpu_pm_snoc_client_driver.driver.name);
+ return rc;
+ }
+
+ return platform_driver_register(&msm_cpu_pm_driver);
+}
+device_initcall(msm_cpu_pm_init);
+
+void __init msm_pm_sleep_status_init(void)
+{
+ platform_driver_register(&msm_cpu_status_driver);
+}
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index 8b64653..c745f92 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -1578,15 +1578,16 @@
static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index,
int32_t th, int32_t tm, int32_t tl, uint32_t gp,
- uint32_t gc, bool bke_en)
+ uint32_t gc)
{
int32_t reg_val, val;
+ int32_t bke_reg_val;
int16_t val2;
/* Disable BKE before writing to registers as per spec */
- reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index)) &
+ bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index)) &
M_BKE_EN_RMSK;
- writel_relaxed((reg_val & ~(M_BKE_EN_EN_BMSK)),
+ writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)),
M_BKE_EN_ADDR(baddr, mas_index));
/* Write values of registers calculated */
@@ -1624,8 +1625,7 @@
/* Set BKE enable to the value it was */
reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index)) &
M_BKE_EN_RMSK;
- val = bke_en << M_BKE_EN_EN_SHFT;
- writel_relaxed(((reg_val & ~(M_BKE_EN_EN_BMSK)) | (val &
+ writel_relaxed(((reg_val & ~(M_BKE_EN_EN_BMSK)) | (bke_reg_val &
M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(baddr, mas_index));
/* Ensure that all bandwidth register writes have completed
* before returning
@@ -1651,7 +1651,7 @@
/* Only calculate if there's a requested bandwidth and window */
if (qbw->bw && qbw->ws) {
int64_t th, tm, tl;
- uint32_t gp, gc, data_width;
+ uint32_t gp, gc;
int64_t gp_nominal, gp_required, gp_calc, data, temp;
int64_t win = qbw->ws * binfo->qos_freq;
temp = win;
@@ -1666,16 +1666,7 @@
* Calculate max window size, defined by bw request.
* Units: (KHz, MB/s)
*/
- data_width = (readl_relaxed(M_CONFIG_INFO_2_ADDR(
- binfo->base, mas_index)) &
- M_CONFIG_INFO_2_M_DATA_WIDTH_BMSK) >>
- M_CONFIG_INFO_2_M_DATA_WIDTH_SHFT;
-
- /* If unspecified, use data-width 8 by default */
- if (!data_width)
- data_width = 8;
-
- gp_calc = MAX_GC * data_width * binfo->qos_freq * 1000;
+ gp_calc = MAX_GC * binfo->qos_freq * 1000;
gp_required = gp_calc;
bimc_div(&gp_required, qbw->bw);
@@ -1684,7 +1675,7 @@
/* Calculate bandwith in grants and ceil. */
temp = qbw->bw * gp;
- data = data_width * binfo->qos_freq * 1000;
+ data = binfo->qos_freq * 1000;
bimc_div(&temp, data);
gc = min_t(int64_t, MAX_GC, temp);
@@ -1704,12 +1695,10 @@
mas_index, th, tm);
MSM_BUS_DBG("BIMC: tl: %llu gp:%u gc: %u bke_en: %u\n",
tl, gp, gc, bke_en);
- set_qos_bw_regs(binfo->base, mas_index, th, tm, tl, gp,
- gc, bke_en);
+ set_qos_bw_regs(binfo->base, mas_index, th, tm, tl, gp, gc);
} else
/* Clear bandwidth registers */
- set_qos_bw_regs(binfo->base, mas_index, 0, 0, 0, 0, 0,
- bke_en);
+ set_qos_bw_regs(binfo->base, mas_index, 0, 0, 0, 0, 0);
}
static int msm_bus_bimc_allocate_commit_data(struct msm_bus_fabric_registration
@@ -1816,16 +1805,27 @@
kfree(cd);
}
-static void bke_switch(void __iomem *baddr, uint32_t mas_index, bool req)
+static void bke_switch(
+ void __iomem *baddr, uint32_t mas_index, bool req, int mode)
{
uint32_t reg_val, val;
val = req << M_BKE_EN_EN_SHFT;
reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index)) &
M_BKE_EN_RMSK;
+ if (val == reg_val)
+ return;
+
+ if (!req && mode == BIMC_QOS_MODE_FIXED)
+ set_qos_mode(baddr, mas_index, 1, 1, 1);
+
writel_relaxed(((reg_val & ~(M_BKE_EN_EN_BMSK)) | (val &
M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(baddr, mas_index));
+ /* Make sure BKE on/off goes through before changing priorities */
wmb();
+
+ if (req)
+ set_qos_mode(baddr, mas_index, 0, 0, 0);
}
static void msm_bus_bimc_config_master(
@@ -1854,13 +1854,13 @@
case BIMC_QOS_MODE_FIXED:
for (i = 0; i < ports; i++)
bke_switch(binfo->base, info->node_info->qport[i],
- BKE_OFF);
+ BKE_OFF, mode);
break;
case BIMC_QOS_MODE_REGULATOR:
case BIMC_QOS_MODE_LIMITER:
for (i = 0; i < ports; i++)
bke_switch(binfo->base, info->node_info->qport[i],
- BKE_ON);
+ BKE_ON, mode);
break;
default:
break;
@@ -1969,8 +1969,8 @@
static void bimc_set_static_qos_bw(struct msm_bus_bimc_info *binfo,
int mport, struct msm_bus_bimc_qos_bw *qbw)
{
- int32_t bw_MBps, thh = 0, thm, thl, gc;
- int16_t gp;
+ int32_t bw_mbps, thh = 0, thm, thl, gc;
+ int32_t gp;
u64 temp;
if (binfo->qos_freq == 0) {
@@ -1986,17 +1986,17 @@
/* Convert bandwidth to MBPS */
temp = qbw->bw;
bimc_div(&temp, 1000000);
- bw_MBps = temp;
+ bw_mbps = temp;
/* Grant period in clock cycles
* Grant period from bandwidth structure
- * is in micro seconds, QoS freq is in KHz.
+ * is in nano seconds, QoS freq is in KHz.
* Divide by 1000 to get clock cycles */
- gp = (binfo->qos_freq * qbw->gp) / 1000;
+ gp = (binfo->qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
/* Grant count = BW in MBps * Grant period
* in micro seconds */
- gc = bw_MBps * qbw->gp;
+ gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
/* Medium threshold = -((Medium Threshold percentage *
* Grant count) / 100) */
@@ -2007,8 +2007,10 @@
thl = -gc;
qbw->thl = thl;
- set_qos_bw_regs(binfo->base, mport, thh, thm, thl, gp,
- gc, 1);
+ MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
+ __func__, gp, gc, thm, thl, thh);
+
+ set_qos_bw_regs(binfo->base, mport, thh, thm, thl, gp, gc);
}
static void bimc_init_mas_reg(struct msm_bus_bimc_info *binfo,
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
deleted file mode 100644
index c8a6496..0000000
--- a/arch/arm/mach-msm/pm-8x60.c
+++ /dev/null
@@ -1,1845 +0,0 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/completion.h>
-#include <linux/cpuidle.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/ktime.h>
-#include <linux/pm.h>
-#include <linux/pm_qos.h>
-#include <linux/smp.h>
-#include <linux/suspend.h>
-#include <linux/tick.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/of_platform.h>
-#include <linux/regulator/krait-regulator.h>
-#include <linux/cpu.h>
-#include <mach/msm_iomap.h>
-#include <mach/socinfo.h>
-#include <mach/system.h>
-#include <mach/scm.h>
-#include <mach/socinfo.h>
-#define CREATE_TRACE_POINTS
-#include <mach/trace_msm_low_power.h>
-#include <mach/msm-krait-l2-accessors.h>
-#include <mach/msm_bus.h>
-#include <mach/mpm.h>
-#include <asm/cacheflush.h>
-#include <asm/hardware/gic.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/outercache.h>
-#ifdef CONFIG_VFP
-#include <asm/vfp.h>
-#endif
-#include "acpuclock.h"
-#include "clock.h"
-#include "avs.h"
-#include <mach/cpuidle.h>
-#include "idle.h"
-#include "pm.h"
-#include "scm-boot.h"
-#include "spm.h"
-#include "timer.h"
-#include "pm-boot.h"
-#include <mach/event_timer.h>
-#include <linux/cpu_pm.h>
-
-#define SCM_L2_RETENTION (0x2)
-#define SCM_CMD_TERMINATE_PC (0x2)
-
-#define GET_CPU_OF_ATTR(attr) \
- (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
-
-#define SCLK_HZ (32768)
-
-#define NUM_OF_COUNTERS 3
-#define MAX_BUF_SIZE 512
-
-static int msm_pm_debug_mask = 1;
-module_param_named(
- debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
-);
-
-static int msm_pm_sleep_time_override;
-module_param_named(sleep_time_override,
- msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
-
-static bool use_acpuclk_apis;
-
-enum {
- MSM_PM_DEBUG_SUSPEND = BIT(0),
- MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
- MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
- MSM_PM_DEBUG_CLOCK = BIT(3),
- MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
- MSM_PM_DEBUG_IDLE_CLK = BIT(5),
- MSM_PM_DEBUG_IDLE = BIT(6),
- MSM_PM_DEBUG_IDLE_LIMITS = BIT(7),
- MSM_PM_DEBUG_HOTPLUG = BIT(8),
-};
-
-enum {
- MSM_PM_MODE_ATTR_SUSPEND,
- MSM_PM_MODE_ATTR_IDLE,
- MSM_PM_MODE_ATTR_NR,
-};
-
-static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
- [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
- [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
-};
-
-struct msm_pm_kobj_attribute {
- unsigned int cpu;
- struct kobj_attribute ka;
-};
-
-struct msm_pm_sysfs_sleep_mode {
- struct kobject *kobj;
- struct attribute_group attr_group;
- struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
- struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
-};
-
-static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
- [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
- [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
- [MSM_PM_SLEEP_MODE_RETENTION] = "retention",
- [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
- "standalone_power_collapse",
-};
-
-static struct hrtimer pm_hrtimer;
-static struct msm_pm_sleep_ops pm_sleep_ops;
-static bool msm_pm_ldo_retention_enabled = true;
-static bool msm_pm_use_sync_timer;
-static struct msm_pm_cp15_save_data cp15_data;
-static bool msm_pm_retention_calls_tz;
-static bool msm_no_ramp_down_pc;
-static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
-static bool msm_pm_pc_reset_timer;
-
-DEFINE_PER_CPU(struct clk *, cpu_clks);
-static struct clk *l2_clk;
-
-static int msm_pm_get_pc_mode(struct device_node *node,
- const char *key, uint32_t *pc_mode_val)
-{
- struct pc_mode_of {
- uint32_t mode;
- char *mode_name;
- };
- int i;
- struct pc_mode_of pc_modes[] = {
- {MSM_PM_PC_TZ_L2_INT, "tz_l2_int"},
- {MSM_PM_PC_NOTZ_L2_EXT, "no_tz_l2_ext"},
- {MSM_PM_PC_TZ_L2_EXT , "tz_l2_ext"} };
- int ret;
- const char *pc_mode_str;
-
- ret = of_property_read_string(node, key, &pc_mode_str);
- if (ret) {
- pr_debug("%s: Cannot read %s,defaulting to 0", __func__, key);
- pc_mode_val = MSM_PM_PC_TZ_L2_INT;
- ret = 0;
- } else {
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(pc_modes); i++) {
- if (!strncmp(pc_mode_str, pc_modes[i].mode_name,
- strlen(pc_modes[i].mode_name))) {
- *pc_mode_val = pc_modes[i].mode;
- ret = 0;
- break;
- }
- }
- }
- return ret;
-}
-
-/*
- * Write out the attribute.
- */
-static ssize_t msm_pm_mode_attr_show(
- struct kobject *kobj, struct kobj_attribute *attr, char *buf)
-{
- int ret = -EINVAL;
- int i;
-
- for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
- struct kernel_param kp;
- unsigned int cpu;
- struct msm_pm_platform_data *mode;
-
- if (msm_pm_sleep_mode_labels[i] == NULL)
- continue;
-
- if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
- continue;
-
- cpu = GET_CPU_OF_ATTR(attr);
- mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
-
- if (!strcmp(attr->attr.name,
- msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
- u32 arg = mode->suspend_enabled;
- kp.arg = &arg;
- ret = param_get_ulong(buf, &kp);
- } else if (!strcmp(attr->attr.name,
- msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
- u32 arg = mode->idle_enabled;
- kp.arg = &arg;
- ret = param_get_ulong(buf, &kp);
- }
-
- break;
- }
-
- if (ret > 0) {
- strlcat(buf, "\n", PAGE_SIZE);
- ret++;
- }
-
- return ret;
-}
-
-static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
-{
- int ret = -EINVAL;
- int i;
-
- for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
- struct kernel_param kp;
- unsigned int cpu;
- struct msm_pm_platform_data *mode;
-
- if (msm_pm_sleep_mode_labels[i] == NULL)
- continue;
-
- if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
- continue;
-
- cpu = GET_CPU_OF_ATTR(attr);
- mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
-
- if (!strcmp(attr->attr.name,
- msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
- kp.arg = &mode->suspend_enabled;
- ret = param_set_byte(buf, &kp);
- } else if (!strcmp(attr->attr.name,
- msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
- kp.arg = &mode->idle_enabled;
- ret = param_set_byte(buf, &kp);
- }
-
- break;
- }
-
- return ret ? ret : count;
-}
-
-static int __devinit msm_pm_mode_sysfs_add_cpu(
- unsigned int cpu, struct kobject *modes_kobj)
-{
- char cpu_name[8];
- struct kobject *cpu_kobj;
- struct msm_pm_sysfs_sleep_mode *mode = NULL;
- int i, j, k;
- int ret;
-
- snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
- cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
- if (!cpu_kobj) {
- pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
- ret = -ENOMEM;
- goto mode_sysfs_add_cpu_exit;
- }
-
- for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
- int idx = MSM_PM_MODE(cpu, i);
-
- if ((!msm_pm_sleep_modes[idx].suspend_supported)
- && (!msm_pm_sleep_modes[idx].idle_supported))
- continue;
-
- if (!msm_pm_sleep_mode_labels[i] ||
- !msm_pm_sleep_mode_labels[i][0])
- continue;
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode) {
- pr_err("%s: cannot allocate memory for attributes\n",
- __func__);
- ret = -ENOMEM;
- goto mode_sysfs_add_cpu_exit;
- }
-
- mode->kobj = kobject_create_and_add(
- msm_pm_sleep_mode_labels[i], cpu_kobj);
- if (!mode->kobj) {
- pr_err("%s: cannot create kobject\n", __func__);
- ret = -ENOMEM;
- goto mode_sysfs_add_cpu_exit;
- }
-
- for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
- if ((k == MSM_PM_MODE_ATTR_IDLE) &&
- !msm_pm_sleep_modes[idx].idle_supported)
- continue;
- if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
- !msm_pm_sleep_modes[idx].suspend_supported)
- continue;
- sysfs_attr_init(&mode->kas[j].ka.attr);
- mode->kas[j].cpu = cpu;
- mode->kas[j].ka.attr.mode = 0644;
- mode->kas[j].ka.show = msm_pm_mode_attr_show;
- mode->kas[j].ka.store = msm_pm_mode_attr_store;
- mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
- mode->attrs[j] = &mode->kas[j].ka.attr;
- j++;
- }
- mode->attrs[j] = NULL;
-
- mode->attr_group.attrs = mode->attrs;
- ret = sysfs_create_group(mode->kobj, &mode->attr_group);
- if (ret) {
- pr_err("%s: cannot create kobject attribute group\n",
- __func__);
- goto mode_sysfs_add_cpu_exit;
- }
- }
-
- ret = 0;
-
-mode_sysfs_add_cpu_exit:
- if (ret) {
- if (mode && mode->kobj)
- kobject_del(mode->kobj);
- kfree(mode);
- }
-
- return ret;
-}
-
-int __devinit msm_pm_mode_sysfs_add(void)
-{
- struct kobject *module_kobj;
- struct kobject *modes_kobj;
- unsigned int cpu;
- int ret;
-
- module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
- if (!module_kobj) {
- pr_err("%s: cannot find kobject for module %s\n",
- __func__, KBUILD_MODNAME);
- ret = -ENOENT;
- goto mode_sysfs_add_exit;
- }
-
- modes_kobj = kobject_create_and_add("modes", module_kobj);
- if (!modes_kobj) {
- pr_err("%s: cannot create modes kobject\n", __func__);
- ret = -ENOMEM;
- goto mode_sysfs_add_exit;
- }
-
- for_each_possible_cpu(cpu) {
- ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
- if (ret)
- goto mode_sysfs_add_exit;
- }
-
- ret = 0;
-
-mode_sysfs_add_exit:
- return ret;
-}
-
-/*
- * Configure hardware registers in preparation for Apps power down.
- */
-static void msm_pm_config_hw_before_power_down(void)
-{
- return;
-}
-
-/*
- * Clear hardware registers after Apps powers up.
- */
-static void msm_pm_config_hw_after_power_up(void)
-{
-}
-
-/*
- * Configure hardware registers in preparation for SWFI.
- */
-static void msm_pm_config_hw_before_swfi(void)
-{
- return;
-}
-
-/*
- * Configure/Restore hardware registers in preparation for Retention.
- */
-
-static void msm_pm_config_hw_after_retention(void)
-{
- int ret;
-
- ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
- WARN_ON(ret);
-}
-
-static void msm_pm_config_hw_before_retention(void)
-{
- return;
-}
-
-static void msm_pm_save_cpu_reg(void)
-{
- int i;
-
- /* Only on core0 */
- if (smp_processor_id())
- return;
-
- /**
- * On some targets, L2 PC will turn off may reset the core
- * configuration for the mux and the default may not make the core
- * happy when it resumes.
- * Save the active vdd, and set the core vdd to QSB max vdd, so that
- * when the core resumes, it is capable of supporting the current QSB
- * rate. Then restore the active vdd before switching the acpuclk rate.
- */
- if (msm_pm_get_l2_flush_flag() == 1) {
- cp15_data.active_vdd = msm_spm_get_vdd(0);
- for (i = 0; i < cp15_data.reg_saved_state_size; i++)
- cp15_data.reg_val[i] =
- get_l2_indirect_reg(
- cp15_data.reg_data[i]);
- msm_spm_set_vdd(0, cp15_data.qsb_pc_vdd);
- }
-}
-
-static void msm_pm_restore_cpu_reg(void)
-{
- int i;
-
- /* Only on core0 */
- if (smp_processor_id())
- return;
-
- if (msm_pm_get_l2_flush_flag() == 1) {
- for (i = 0; i < cp15_data.reg_saved_state_size; i++)
- set_l2_indirect_reg(
- cp15_data.reg_data[i],
- cp15_data.reg_val[i]);
- msm_spm_set_vdd(0, cp15_data.active_vdd);
- }
-}
-
-static void msm_pm_swfi(void)
-{
- msm_pm_config_hw_before_swfi();
- msm_arch_idle();
-}
-
-static void msm_pm_retention(void)
-{
- int ret = 0;
-
- msm_pm_config_hw_before_retention();
- ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_RETENTION, false);
- WARN_ON(ret);
-
- if (msm_pm_retention_calls_tz)
- scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
- SCM_L2_RETENTION);
- else
- msm_arch_idle();
-
- msm_pm_config_hw_after_retention();
-}
-
-static bool __ref msm_pm_spm_power_collapse(
- unsigned int cpu, bool from_idle, bool notify_rpm)
-{
- void *entry;
- bool collapsed = 0;
- int ret;
- bool save_cpu_regs = !cpu || from_idle;
- unsigned int saved_gic_cpu_ctrl;
-
- saved_gic_cpu_ctrl = readl_relaxed(MSM_QGIC_CPU_BASE + GIC_CPU_CTRL);
- mb();
-
- if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
- pr_info("CPU%u: %s: notify_rpm %d\n",
- cpu, __func__, (int) notify_rpm);
-
- if (from_idle == true)
- cpu_pm_enter();
-
- ret = msm_spm_set_low_power_mode(
- MSM_SPM_MODE_POWER_COLLAPSE, notify_rpm);
- WARN_ON(ret);
-
- entry = save_cpu_regs ? msm_pm_collapse_exit : msm_secondary_startup;
-
- msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
-
- if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
- pr_info("CPU%u: %s: program vector to %p\n",
- cpu, __func__, entry);
- if (from_idle && msm_pm_pc_reset_timer)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
-
-#ifdef CONFIG_VFP
- vfp_pm_suspend();
-#endif
- collapsed = save_cpu_regs ? msm_pm_collapse() : msm_pm_pc_hotplug();
-
- if (from_idle && msm_pm_pc_reset_timer)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
-
- msm_pm_boot_config_after_pc(cpu);
-
- if (collapsed) {
-#ifdef CONFIG_VFP
- vfp_pm_resume();
-#endif
- cpu_init();
- writel(0xF0, MSM_QGIC_CPU_BASE + GIC_CPU_PRIMASK);
- writel_relaxed(saved_gic_cpu_ctrl,
- MSM_QGIC_CPU_BASE + GIC_CPU_CTRL);
- mb();
- local_fiq_enable();
- }
-
- if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
- pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
- cpu, __func__, collapsed);
-
- ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
- WARN_ON(ret);
-
- if (from_idle == true)
- cpu_pm_exit();
-
- return collapsed;
-}
-
-static bool msm_pm_power_collapse_standalone(bool from_idle)
-{
- unsigned int cpu = smp_processor_id();
- unsigned int avsdscr;
- unsigned int avscsr;
- bool collapsed;
-
- avsdscr = avs_get_avsdscr();
- avscsr = avs_get_avscsr();
- avs_set_avscsr(0); /* Disable AVS */
-
- collapsed = msm_pm_spm_power_collapse(cpu, from_idle, false);
-
- avs_set_avsdscr(avsdscr);
- avs_set_avscsr(avscsr);
- return collapsed;
-}
-
-static int ramp_down_last_cpu(int cpu)
-{
- struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
- int ret = 0;
-
- if (use_acpuclk_apis) {
- ret = acpuclk_power_collapse();
- if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
- pr_info("CPU%u: %s: change clk rate(old rate = %d)\n",
- cpu, __func__, ret);
- } else {
- clk_disable(cpu_clk);
- clk_disable(l2_clk);
- }
- return ret;
-}
-
-static int ramp_up_first_cpu(int cpu, int saved_rate)
-{
- struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
- int rc = 0;
-
- if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
- pr_info("CPU%u: %s: restore clock rate\n",
- cpu, __func__);
-
- if (use_acpuclk_apis) {
- rc = acpuclk_set_rate(cpu, saved_rate, SETRATE_PC);
- if (rc)
- pr_err("CPU:%u: Error restoring cpu clk\n", cpu);
- } else {
- if (l2_clk) {
- rc = clk_enable(l2_clk);
- if (rc)
- pr_err("%s(): Error restoring l2 clk\n",
- __func__);
- }
-
- if (cpu_clk) {
- int ret = clk_enable(cpu_clk);
-
- if (ret) {
- pr_err("%s(): Error restoring cpu clk\n",
- __func__);
- return ret;
- }
- }
- }
-
- return rc;
-}
-
-static bool msm_pm_power_collapse(bool from_idle)
-{
- unsigned int cpu = smp_processor_id();
- unsigned long saved_acpuclk_rate = 0;
- unsigned int avsdscr;
- unsigned int avscsr;
- bool collapsed;
-
- if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
- pr_info("CPU%u: %s: idle %d\n",
- cpu, __func__, (int)from_idle);
-
- msm_pm_config_hw_before_power_down();
- if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
- pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
-
- avsdscr = avs_get_avsdscr();
- avscsr = avs_get_avscsr();
- avs_set_avscsr(0); /* Disable AVS */
-
- if (cpu_online(cpu) && !msm_no_ramp_down_pc)
- saved_acpuclk_rate = ramp_down_last_cpu(cpu);
-
- if (cp15_data.save_cp15)
- msm_pm_save_cpu_reg();
-
- collapsed = msm_pm_spm_power_collapse(cpu, from_idle, true);
-
- if (cp15_data.save_cp15)
- msm_pm_restore_cpu_reg();
-
- if (cpu_online(cpu) && !msm_no_ramp_down_pc) {
- ramp_up_first_cpu(cpu, saved_acpuclk_rate);
- } else {
- unsigned int gic_dist_enabled;
- unsigned int gic_dist_pending;
- gic_dist_enabled = readl_relaxed(
- MSM_QGIC_DIST_BASE + GIC_DIST_ENABLE_CLEAR);
- gic_dist_pending = readl_relaxed(
- MSM_QGIC_DIST_BASE + GIC_DIST_PENDING_SET);
- mb();
- gic_dist_pending &= gic_dist_enabled;
-
- if (gic_dist_pending)
- pr_err("CPU %d interrupted during hotplug.Pending int 0x%x\n",
- cpu, gic_dist_pending);
- }
-
-
- avs_set_avsdscr(avsdscr);
- avs_set_avscsr(avscsr);
- msm_pm_config_hw_after_power_up();
- if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
- pr_info("CPU%u: %s: post power up\n", cpu, __func__);
-
- if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
- pr_info("CPU%u: %s: return\n", cpu, __func__);
- return collapsed;
-}
-
-static int64_t msm_pm_timer_enter_idle(void)
-{
- if (msm_pm_use_sync_timer)
- return ktime_to_ns(tick_nohz_get_sleep_length());
-
- return msm_timer_enter_idle();
-}
-
-static void msm_pm_timer_exit_idle(bool timer_halted)
-{
- if (msm_pm_use_sync_timer)
- return;
-
- msm_timer_exit_idle((int) timer_halted);
-}
-
-static int64_t msm_pm_timer_enter_suspend(int64_t *period)
-{
- int64_t time = 0;
-
- if (msm_pm_use_sync_timer) {
- struct timespec ts;
- getnstimeofday(&ts);
- return timespec_to_ns(&ts);
- }
-
- time = msm_timer_get_sclk_time(period);
- if (!time)
- pr_err("%s: Unable to read sclk.\n", __func__);
-
- return time;
-}
-
-static int64_t msm_pm_timer_exit_suspend(int64_t time, int64_t period)
-{
- if (msm_pm_use_sync_timer) {
- struct timespec ts;
- getnstimeofday(&ts);
-
- return timespec_to_ns(&ts) - time;
- }
-
- if (time != 0) {
- int64_t end_time = msm_timer_get_sclk_time(NULL);
- if (end_time != 0) {
- time = end_time - time;
- if (time < 0)
- time += period;
- } else
- time = 0;
- }
-
- return time;
-}
-
-/**
- * pm_hrtimer_cb() : Callback function for hrtimer created if the
- * core needs to be awake to handle an event.
- * @hrtimer : Pointer to hrtimer
- */
-static enum hrtimer_restart pm_hrtimer_cb(struct hrtimer *hrtimer)
-{
- return HRTIMER_NORESTART;
-}
-
-/**
- * msm_pm_set_timer() : Set an hrtimer to wakeup the core in time
- * to handle an event.
- */
-static void msm_pm_set_timer(uint32_t modified_time_us)
-{
- u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
- ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
- pm_hrtimer.function = pm_hrtimer_cb;
- hrtimer_start(&pm_hrtimer, modified_ktime, HRTIMER_MODE_REL);
-}
-
-/******************************************************************************
- * External Idle/Suspend Functions
- *****************************************************************************/
-
-void arch_idle(void)
-{
- return;
-}
-
-static inline void msm_pm_ftrace_lpm_enter(unsigned int cpu,
- uint32_t latency, uint32_t sleep_us,
- uint32_t wake_up,
- enum msm_pm_sleep_mode mode)
-{
- switch (mode) {
- case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
- trace_msm_pm_enter_wfi(cpu, latency, sleep_us, wake_up);
- break;
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
- trace_msm_pm_enter_spc(cpu, latency, sleep_us, wake_up);
- break;
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
- trace_msm_pm_enter_pc(cpu, latency, sleep_us, wake_up);
- break;
- case MSM_PM_SLEEP_MODE_RETENTION:
- trace_msm_pm_enter_ret(cpu, latency, sleep_us, wake_up);
- break;
- default:
- break;
- }
-}
-
-static inline void msm_pm_ftrace_lpm_exit(unsigned int cpu,
- enum msm_pm_sleep_mode mode,
- bool success)
-{
- switch (mode) {
- case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
- trace_msm_pm_exit_wfi(cpu, success);
- break;
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
- trace_msm_pm_exit_spc(cpu, success);
- break;
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
- trace_msm_pm_exit_pc(cpu, success);
- break;
- case MSM_PM_SLEEP_MODE_RETENTION:
- trace_msm_pm_exit_ret(cpu, success);
- break;
- default:
- break;
- }
-}
-
-static int msm_pm_idle_prepare(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index,
- void **msm_pm_idle_rs_limits)
-{
- int i;
- unsigned int power_usage = -1;
- int ret = MSM_PM_SLEEP_MODE_NOT_SELECTED;
- uint32_t modified_time_us = 0;
- struct msm_pm_time_params time_param;
-
- time_param.latency_us =
- (uint32_t) pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
- time_param.sleep_us =
- (uint32_t) (ktime_to_us(tick_nohz_get_sleep_length())
- & UINT_MAX);
- time_param.modified_time_us = 0;
-
- if (!dev->cpu)
- time_param.next_event_us =
- (uint32_t) (ktime_to_us(get_next_event_time())
- & UINT_MAX);
- else
- time_param.next_event_us = 0;
-
- for (i = 0; i < dev->state_count; i++) {
- struct cpuidle_state *state = &drv->states[i];
- struct cpuidle_state_usage *st_usage = &dev->states_usage[i];
- enum msm_pm_sleep_mode mode;
- bool allow;
- uint32_t power;
- int idx;
- void *rs_limits = NULL;
-
- mode = (enum msm_pm_sleep_mode) cpuidle_get_statedata(st_usage);
- idx = MSM_PM_MODE(dev->cpu, mode);
-
- allow = msm_pm_sleep_modes[idx].idle_enabled &&
- msm_pm_sleep_modes[idx].idle_supported;
-
- switch (mode) {
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
- if (num_online_cpus() > 1)
- allow = false;
- break;
- case MSM_PM_SLEEP_MODE_RETENTION:
- /*
- * The Krait BHS regulator doesn't have enough head
- * room to drive the retention voltage on LDO and so
- * has disabled retention
- */
- if (!msm_pm_ldo_retention_enabled)
- allow = false;
-
- if (msm_pm_retention_calls_tz && num_online_cpus() > 1)
- allow = false;
- break;
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
- case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
- break;
- default:
- allow = false;
- break;
- }
-
- if (!allow)
- continue;
-
- if (pm_sleep_ops.lowest_limits)
- rs_limits = pm_sleep_ops.lowest_limits(true,
- mode, &time_param, &power);
-
- if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
- pr_info("CPU%u:%s:%s, latency %uus, slp %uus, lim %p\n",
- dev->cpu, __func__, state->desc,
- time_param.latency_us,
- time_param.sleep_us, rs_limits);
- if (!rs_limits)
- continue;
-
- if (power < power_usage) {
- power_usage = power;
- modified_time_us = time_param.modified_time_us;
- ret = mode;
- *msm_pm_idle_rs_limits = rs_limits;
- }
-
- }
-
- if (modified_time_us && !dev->cpu)
- msm_pm_set_timer(modified_time_us);
-
- msm_pm_ftrace_lpm_enter(dev->cpu, time_param.latency_us,
- time_param.sleep_us, time_param.next_event_us,
- ret);
-
- return ret;
-}
-
-enum msm_pm_sleep_mode msm_pm_idle_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- int64_t time;
- bool collapsed = 1;
- int exit_stat = -1;
- enum msm_pm_sleep_mode sleep_mode;
- void *msm_pm_idle_rs_limits = NULL;
- uint32_t sleep_delay = 1;
- int ret = -ENODEV;
- int notify_rpm = false;
- bool timer_halted = false;
-
- sleep_mode = msm_pm_idle_prepare(dev, drv, index,
- &msm_pm_idle_rs_limits);
-
- if (!msm_pm_idle_rs_limits) {
- sleep_mode = MSM_PM_SLEEP_MODE_NOT_SELECTED;
- goto cpuidle_enter_bail;
- }
-
- if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
- pr_info("CPU%u: %s: mode %d\n",
- smp_processor_id(), __func__, sleep_mode);
-
- time = ktime_to_ns(ktime_get());
-
- if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
- int64_t ns = msm_pm_timer_enter_idle();
- notify_rpm = true;
- do_div(ns, NSEC_PER_SEC / SCLK_HZ);
- sleep_delay = (uint32_t)ns;
-
- if (sleep_delay == 0) /* 0 would mean infinite time */
- sleep_delay = 1;
- }
-
- if (pm_sleep_ops.enter_sleep)
- ret = pm_sleep_ops.enter_sleep(sleep_delay,
- msm_pm_idle_rs_limits, true, notify_rpm);
- if (ret)
- goto cpuidle_enter_bail;
-
- switch (sleep_mode) {
- case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
- msm_pm_swfi();
- exit_stat = MSM_PM_STAT_IDLE_WFI;
- break;
-
- case MSM_PM_SLEEP_MODE_RETENTION:
- msm_pm_retention();
- exit_stat = MSM_PM_STAT_RETENTION;
- break;
-
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
- collapsed = msm_pm_power_collapse_standalone(true);
- if (collapsed)
- exit_stat = MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
- else
- exit_stat
- = MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE;
- break;
-
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
- if (MSM_PM_DEBUG_IDLE_CLK & msm_pm_debug_mask)
- clock_debug_print_enabled();
-
- collapsed = msm_pm_power_collapse(true);
- timer_halted = true;
-
- if (collapsed)
- exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
- else
- exit_stat = MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
-
- msm_pm_timer_exit_idle(timer_halted);
- break;
-
- case MSM_PM_SLEEP_MODE_NOT_SELECTED:
- goto cpuidle_enter_bail;
- break;
-
- default:
- __WARN();
- goto cpuidle_enter_bail;
- break;
- }
-
- if (pm_sleep_ops.exit_sleep)
- pm_sleep_ops.exit_sleep(msm_pm_idle_rs_limits, true,
- notify_rpm, collapsed);
-
- time = ktime_to_ns(ktime_get()) - time;
- msm_pm_ftrace_lpm_exit(smp_processor_id(), sleep_mode, collapsed);
- if (exit_stat >= 0)
- msm_pm_add_stat(exit_stat, time);
- do_div(time, 1000);
- dev->last_residency = (int) time;
- return sleep_mode;
-
-cpuidle_enter_bail:
- dev->last_residency = 0;
- if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
- msm_pm_timer_exit_idle(timer_halted);
- sleep_mode = MSM_PM_SLEEP_MODE_NOT_SELECTED;
- return sleep_mode;
-}
-
-int msm_pm_wait_cpu_shutdown(unsigned int cpu)
-{
- int timeout = 0;
-
- if (!msm_pm_slp_sts)
- return 0;
- if (!msm_pm_slp_sts[cpu].base_addr)
- return 0;
- while (1) {
- /*
- * Check for the SPM of the core being hotplugged to set
- * its sleep state.The SPM sleep state indicates that the
- * core has been power collapsed.
- */
- int acc_sts = __raw_readl(msm_pm_slp_sts[cpu].base_addr);
-
- if (acc_sts & msm_pm_slp_sts[cpu].mask)
- return 0;
- udelay(100);
- WARN(++timeout == 20, "CPU%u didn't collape within 2ms\n",
- cpu);
- }
-
- return -EBUSY;
-}
-
-void msm_pm_cpu_enter_lowpower(unsigned int cpu)
-{
- int i;
- bool allow[MSM_PM_SLEEP_MODE_NR];
-
- for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
- struct msm_pm_platform_data *mode;
-
- mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
- allow[i] = mode->suspend_supported && mode->suspend_enabled;
- }
-
- if (MSM_PM_DEBUG_HOTPLUG & msm_pm_debug_mask)
- pr_notice("CPU%u: %s: shutting down cpu\n", cpu, __func__);
-
- if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
- msm_pm_power_collapse(false);
- else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE])
- msm_pm_power_collapse_standalone(false);
- else if (allow[MSM_PM_SLEEP_MODE_RETENTION])
- msm_pm_retention();
- else
- msm_pm_swfi();
-}
-
-static void msm_pm_ack_retention_disable(void *data)
-{
- /*
- * This is a NULL function to ensure that the core has woken up
- * and is safe to disable retention.
- */
-}
-/**
- * msm_pm_enable_retention() - Disable/Enable retention on all cores
- * @enable: Enable/Disable retention
- *
- */
-void msm_pm_enable_retention(bool enable)
-{
- if (enable == msm_pm_ldo_retention_enabled)
- return;
-
- msm_pm_ldo_retention_enabled = enable;
- /*
- * If retention is being disabled, wakeup all online core to ensure
- * that it isn't executing retention. Offlined cores need not be woken
- * up as they enter the deepest sleep mode, namely RPM assited power
- * collapse
- */
- if (!enable) {
- preempt_disable();
- smp_call_function_many(cpu_online_mask,
- msm_pm_ack_retention_disable,
- NULL, true);
- preempt_enable();
-
-
- }
-}
-EXPORT_SYMBOL(msm_pm_enable_retention);
-
-static int64_t suspend_time, suspend_period;
-static int collapsed;
-static int suspend_power_collapsed;
-
-static int msm_pm_enter(suspend_state_t state)
-{
- bool allow[MSM_PM_SLEEP_MODE_NR];
- int i;
- struct msm_pm_time_params time_param;
-
- time_param.latency_us = -1;
- time_param.sleep_us = -1;
- time_param.next_event_us = 0;
-
- if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
- pr_info("%s\n", __func__);
-
- if (smp_processor_id()) {
- __WARN();
- goto enter_exit;
- }
-
-
- for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
- struct msm_pm_platform_data *mode;
-
- mode = &msm_pm_sleep_modes[MSM_PM_MODE(0, i)];
- allow[i] = mode->suspend_supported && mode->suspend_enabled;
- }
-
- if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
- void *rs_limits = NULL;
- int ret = -ENODEV;
- uint32_t power;
- uint32_t msm_pm_max_sleep_time = 0;
-
- if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
- pr_info("%s: power collapse\n", __func__);
-
- clock_debug_print_enabled();
-
- if (msm_pm_sleep_time_override > 0) {
- int64_t ns = NSEC_PER_SEC *
- (int64_t) msm_pm_sleep_time_override;
- do_div(ns, NSEC_PER_SEC / SCLK_HZ);
- msm_pm_max_sleep_time = (uint32_t) ns;
- }
-
- if (pm_sleep_ops.lowest_limits)
- rs_limits = pm_sleep_ops.lowest_limits(false,
- MSM_PM_SLEEP_MODE_POWER_COLLAPSE, &time_param, &power);
-
- if (rs_limits) {
- if (pm_sleep_ops.enter_sleep)
- ret = pm_sleep_ops.enter_sleep(
- msm_pm_max_sleep_time,
- rs_limits, false, true);
- if (!ret) {
- collapsed = msm_pm_power_collapse(false);
- if (pm_sleep_ops.exit_sleep) {
- pm_sleep_ops.exit_sleep(rs_limits,
- false, true, collapsed);
- }
- }
- } else {
- pr_err("%s: cannot find the lowest power limit\n",
- __func__);
- }
- suspend_power_collapsed = true;
- } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
- if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
- pr_info("%s: standalone power collapse\n", __func__);
- msm_pm_power_collapse_standalone(false);
- } else if (allow[MSM_PM_SLEEP_MODE_RETENTION]) {
- if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
- pr_info("%s: retention\n", __func__);
- msm_pm_retention();
- } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
- if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
- pr_info("%s: swfi\n", __func__);
- msm_pm_swfi();
- }
-
-enter_exit:
- if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
- pr_info("%s: return\n", __func__);
-
- return 0;
-}
-
-void msm_pm_set_sleep_ops(struct msm_pm_sleep_ops *ops)
-{
- if (ops)
- pm_sleep_ops = *ops;
-}
-
-static int msm_suspend_prepare(void)
-{
- suspend_time = msm_pm_timer_enter_suspend(&suspend_period);
- msm_mpm_suspend_prepare();
- return 0;
-}
-
-static void msm_suspend_wake(void)
-{
- msm_mpm_suspend_wake();
- if (suspend_power_collapsed) {
- suspend_time = msm_pm_timer_exit_suspend(suspend_time,
- suspend_period);
- if (collapsed)
- msm_pm_add_stat(MSM_PM_STAT_SUSPEND, suspend_time);
- else
- msm_pm_add_stat(MSM_PM_STAT_FAILED_SUSPEND,
- suspend_time);
- suspend_power_collapsed = false;
- }
-}
-
-static const struct platform_suspend_ops msm_pm_ops = {
- .enter = msm_pm_enter,
- .valid = suspend_valid_only_mem,
- .prepare_late = msm_suspend_prepare,
- .wake = msm_suspend_wake,
-};
-
-static int __devinit msm_pm_snoc_client_probe(struct platform_device *pdev)
-{
- int rc = 0;
- static struct msm_bus_scale_pdata *msm_pm_bus_pdata;
- static uint32_t msm_pm_bus_client;
-
- msm_pm_bus_pdata = msm_bus_cl_get_pdata(pdev);
-
- if (msm_pm_bus_pdata) {
- msm_pm_bus_client =
- msm_bus_scale_register_client(msm_pm_bus_pdata);
-
- if (!msm_pm_bus_client) {
- pr_err("%s: Failed to register SNOC client",
- __func__);
- rc = -ENXIO;
- goto snoc_cl_probe_done;
- }
-
- rc = msm_bus_scale_client_update_request(msm_pm_bus_client, 1);
-
- if (rc)
- pr_err("%s: Error setting bus rate", __func__);
- }
-
-snoc_cl_probe_done:
- return rc;
-}
-
-static int __devinit msm_cpu_status_probe(struct platform_device *pdev)
-{
- struct msm_pm_sleep_status_data *pdata;
- char *key;
- u32 cpu;
-
- if (!pdev)
- return -EFAULT;
-
- msm_pm_slp_sts =
- kzalloc(sizeof(*msm_pm_slp_sts) * num_possible_cpus(),
- GFP_KERNEL);
-
- if (!msm_pm_slp_sts)
- return -ENOMEM;
-
- if (pdev->dev.of_node) {
- struct resource *res;
- u32 offset;
- int rc;
- u32 mask;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- goto fail_free_mem;
-
- key = "qcom,cpu-alias-addr";
- rc = of_property_read_u32(pdev->dev.of_node, key, &offset);
-
- if (rc)
- goto fail_free_mem;
-
- key = "qcom,sleep-status-mask";
- rc = of_property_read_u32(pdev->dev.of_node, key,
- &mask);
- if (rc)
- goto fail_free_mem;
-
- for_each_possible_cpu(cpu) {
- msm_pm_slp_sts[cpu].base_addr =
- ioremap(res->start + cpu * offset,
- resource_size(res));
- msm_pm_slp_sts[cpu].mask = mask;
-
- if (!msm_pm_slp_sts[cpu].base_addr)
- goto failed_of_node;
- }
-
- } else {
- pdata = pdev->dev.platform_data;
- if (!pdev->dev.platform_data)
- goto fail_free_mem;
-
- for_each_possible_cpu(cpu) {
- msm_pm_slp_sts[cpu].base_addr =
- pdata->base_addr + cpu * pdata->cpu_offset;
- msm_pm_slp_sts[cpu].mask = pdata->mask;
- }
- }
-
- return 0;
-
-failed_of_node:
- pr_info("%s(): Failed to key=%s\n", __func__, key);
- for_each_possible_cpu(cpu) {
- if (msm_pm_slp_sts[cpu].base_addr)
- iounmap(msm_pm_slp_sts[cpu].base_addr);
- }
-fail_free_mem:
- kfree(msm_pm_slp_sts);
- return -EINVAL;
-
-};
-
-static struct of_device_id msm_slp_sts_match_tbl[] = {
- {.compatible = "qcom,cpu-sleep-status"},
- {},
-};
-
-static struct platform_driver msm_cpu_status_driver = {
- .probe = msm_cpu_status_probe,
- .driver = {
- .name = "cpu_slp_status",
- .owner = THIS_MODULE,
- .of_match_table = msm_slp_sts_match_tbl,
- },
-};
-
-static struct of_device_id msm_snoc_clnt_match_tbl[] = {
- {.compatible = "qcom,pm-snoc-client"},
- {},
-};
-
-static struct platform_driver msm_cpu_pm_snoc_client_driver = {
- .probe = msm_pm_snoc_client_probe,
- .driver = {
- .name = "pm_snoc_client",
- .owner = THIS_MODULE,
- .of_match_table = msm_snoc_clnt_match_tbl,
- },
-};
-
-#ifdef CONFIG_ARM_LPAE
-static int msm_pm_idmap_add_pmd(pud_t *pud, unsigned long addr,
- unsigned long end, unsigned long prot)
-{
- pmd_t *pmd;
- unsigned long next;
-
- if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
- pmd = pmd_alloc_one(&init_mm, addr);
- if (!pmd)
- return -ENOMEM;
-
- pud_populate(&init_mm, pud, pmd);
- pmd += pmd_index(addr);
- } else {
- pmd = pmd_offset(pud, addr);
- }
-
- do {
- next = pmd_addr_end(addr, end);
- *pmd = __pmd((addr & PMD_MASK) | prot);
- flush_pmd_entry(pmd);
- } while (pmd++, addr = next, addr != end);
-
- return 0;
-}
-#else /* !CONFIG_ARM_LPAE */
-static int msm_pm_idmap_add_pmd(pud_t *pud, unsigned long addr,
- unsigned long end, unsigned long prot)
-{
- pmd_t *pmd = pmd_offset(pud, addr);
-
- addr = (addr & PMD_MASK) | prot;
- pmd[0] = __pmd(addr);
- addr += SECTION_SIZE;
- pmd[1] = __pmd(addr);
- flush_pmd_entry(pmd);
-
- return 0;
-}
-#endif /* CONFIG_ARM_LPAE */
-
-static int msm_pm_idmap_add_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end,
- unsigned long prot)
-{
- pud_t *pud = pud_offset(pgd, addr);
- unsigned long next;
- int ret;
-
- do {
- next = pud_addr_end(addr, end);
- ret = msm_pm_idmap_add_pmd(pud, addr, next, prot);
- if (ret)
- return ret;
- } while (pud++, addr = next, addr != end);
-
- return 0;
-}
-
-static int msm_pm_add_idmap(pgd_t *pgd, unsigned long addr,
- unsigned long end,
- unsigned long prot)
-{
- unsigned long next;
- int ret;
-
- pgd += pgd_index(addr);
- do {
- next = pgd_addr_end(addr, end);
- ret = msm_pm_idmap_add_pud(pgd, addr, next, prot);
- if (ret)
- return ret;
- } while (pgd++, addr = next, addr != end);
-
- return 0;
-}
-
-static int msm_pm_setup_pagetable(void)
-{
- pgd_t *pc_pgd;
- unsigned long exit_phys;
- unsigned long end;
- int ret;
-
- /* Page table for cores to come back up safely. */
- pc_pgd = pgd_alloc(&init_mm);
- if (!pc_pgd)
- return -ENOMEM;
-
- exit_phys = virt_to_phys(msm_pm_collapse_exit);
-
- /*
- * Make the (hopefully) reasonable assumption that the code size of
- * msm_pm_collapse_exit won't be more than a section in size
- */
- end = exit_phys + SECTION_SIZE;
-
- ret = msm_pm_add_idmap(pc_pgd, exit_phys, end,
- PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF);
-
- if (ret)
- return ret;
-
- msm_pm_pc_pgd = virt_to_phys(pc_pgd);
- clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
- virt_to_phys(&msm_pm_pc_pgd));
-
- return 0;
-}
-
-static int __init msm_pm_setup_saved_state(void)
-{
- int ret;
- dma_addr_t temp_phys;
-
- ret = msm_pm_setup_pagetable();
- if (ret)
- return ret;
-
- msm_saved_state = dma_zalloc_coherent(NULL, CPU_SAVED_STATE_SIZE *
- num_possible_cpus(),
- &temp_phys, 0);
-
- if (!msm_saved_state)
- return -ENOMEM;
-
- /*
- * Explicitly cast here since msm_saved_state_phys is defined
- * in assembly and we want to avoid any kind of truncation
- * or endian problems.
- */
- msm_saved_state_phys = (unsigned long)temp_phys;
-
- return 0;
-}
-arch_initcall(msm_pm_setup_saved_state);
-
-static void setup_broadcast_timer(void *arg)
-{
- int cpu = smp_processor_id();
-
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
-}
-
-static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
-{
- int cpu = (unsigned long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- smp_call_function_single(cpu, setup_broadcast_timer, NULL, 1);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block setup_broadcast_notifier = {
- .notifier_call = setup_broadcast_cpuhp_notify,
-};
-
-static int __init msm_pm_init(void)
-{
- enum msm_pm_time_stats_id enable_stats[] = {
- MSM_PM_STAT_IDLE_WFI,
- MSM_PM_STAT_RETENTION,
- MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
- MSM_PM_STAT_IDLE_POWER_COLLAPSE,
- MSM_PM_STAT_SUSPEND,
- };
- msm_pm_mode_sysfs_add();
- msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
- suspend_set_ops(&msm_pm_ops);
- hrtimer_init(&pm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- msm_cpuidle_init();
-
- if (msm_pm_pc_reset_timer) {
- on_each_cpu(setup_broadcast_timer, NULL, 1);
- register_cpu_notifier(&setup_broadcast_notifier);
- }
-
- return 0;
-}
-
-static void __devinit msm_pm_set_flush_fn(uint32_t pc_mode)
-{
- msm_pm_disable_l2_fn = NULL;
- msm_pm_enable_l2_fn = NULL;
- msm_pm_flush_l2_fn = outer_flush_all;
-
- if (pc_mode == MSM_PM_PC_NOTZ_L2_EXT) {
- msm_pm_disable_l2_fn = outer_disable;
- msm_pm_enable_l2_fn = outer_resume;
- }
-}
-
-struct msm_pc_debug_counters_buffer {
- void __iomem *reg;
- u32 len;
- char buf[MAX_BUF_SIZE];
-};
-
-static inline u32 msm_pc_debug_counters_read_register(
- void __iomem *reg, int index , int offset)
-{
- return readl_relaxed(reg + (index * 4 + offset) * 4);
-}
-
-static char *counter_name[] = {
- "PC Entry Counter",
- "Warmboot Entry Counter",
- "PC Bailout Counter"
-};
-
-static int msm_pc_debug_counters_copy(
- struct msm_pc_debug_counters_buffer *data)
-{
- int j;
- u32 stat;
- unsigned int cpu;
-
- for_each_possible_cpu(cpu) {
- data->len += scnprintf(data->buf + data->len,
- sizeof(data->buf)-data->len,
- "CPU%d\n", cpu);
-
- for (j = 0; j < NUM_OF_COUNTERS; j++) {
- stat = msm_pc_debug_counters_read_register(
- data->reg, cpu, j);
- data->len += scnprintf(data->buf + data->len,
- sizeof(data->buf)-data->len,
- "\t%s : %d\n", counter_name[j],
- stat);
- }
-
- }
-
- return data->len;
-}
-
-static int msm_pc_debug_counters_file_read(struct file *file,
- char __user *bufu, size_t count, loff_t *ppos)
-{
- struct msm_pc_debug_counters_buffer *data;
-
- data = file->private_data;
-
- if (!data)
- return -EINVAL;
-
- if (!bufu)
- return -EINVAL;
-
- if (!access_ok(VERIFY_WRITE, bufu, count))
- return -EFAULT;
-
- if (*ppos >= data->len && data->len == 0)
- data->len = msm_pc_debug_counters_copy(data);
-
- return simple_read_from_buffer(bufu, count, ppos,
- data->buf, data->len);
-}
-
-static int msm_pc_debug_counters_file_open(struct inode *inode,
- struct file *file)
-{
- struct msm_pc_debug_counters_buffer *buf;
- void __iomem *msm_pc_debug_counters_reg;
-
- msm_pc_debug_counters_reg = inode->i_private;
-
- if (!msm_pc_debug_counters_reg)
- return -EINVAL;
-
- file->private_data = kzalloc(
- sizeof(struct msm_pc_debug_counters_buffer), GFP_KERNEL);
-
- if (!file->private_data) {
- pr_err("%s: ERROR kmalloc failed to allocate %d bytes\n",
- __func__, sizeof(struct msm_pc_debug_counters_buffer));
-
- return -ENOMEM;
- }
-
- buf = file->private_data;
- buf->reg = msm_pc_debug_counters_reg;
-
- return 0;
-}
-
-static int msm_pc_debug_counters_file_close(struct inode *inode,
- struct file *file)
-{
- kfree(file->private_data);
- return 0;
-}
-
-static const struct file_operations msm_pc_debug_counters_fops = {
- .open = msm_pc_debug_counters_file_open,
- .read = msm_pc_debug_counters_file_read,
- .release = msm_pc_debug_counters_file_close,
- .llseek = no_llseek,
-};
-
-static int msm_pm_clk_init(struct platform_device *pdev)
-{
- bool synced_clocks;
- u32 cpu;
- char clk_name[] = "cpu??_clk";
- bool cpu_as_clocks;
- char *key;
-
- key = "qcom,cpus-as-clocks";
- cpu_as_clocks = of_property_read_bool(pdev->dev.of_node, key);
-
- if (!cpu_as_clocks) {
- use_acpuclk_apis = true;
- return 0;
- }
-
- key = "qcom,synced-clocks";
- synced_clocks = of_property_read_bool(pdev->dev.of_node, key);
-
- for_each_possible_cpu(cpu) {
- struct clk *clk;
- snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
- clk = devm_clk_get(&pdev->dev, clk_name);
- if (IS_ERR(clk)) {
- if (cpu && synced_clocks)
- return 0;
- else
- return PTR_ERR(clk);
- }
- per_cpu(cpu_clks, cpu) = clk;
- }
-
- if (synced_clocks)
- return 0;
-
- l2_clk = devm_clk_get(&pdev->dev, "l2_clk");
-
- return PTR_RET(l2_clk);
-}
-
-static int __devinit msm_pm_8x60_probe(struct platform_device *pdev)
-{
- char *key = NULL;
- struct dentry *dent = NULL;
- struct resource *res = NULL;
- int i ;
- struct msm_pm_init_data_type pdata_local;
- int ret = 0;
-
- memset(&pdata_local, 0, sizeof(struct msm_pm_init_data_type));
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res) {
- msm_pc_debug_counters_phys = res->start;
- WARN_ON(resource_size(res) < SZ_64);
- msm_pc_debug_counters = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (msm_pc_debug_counters)
- for (i = 0; i < resource_size(res)/4; i++)
- __raw_writel(0, msm_pc_debug_counters + i * 4);
-
- }
-
- if (!msm_pc_debug_counters) {
- msm_pc_debug_counters = 0;
- msm_pc_debug_counters_phys = 0;
- } else {
- dent = debugfs_create_file("pc_debug_counter", S_IRUGO, NULL,
- msm_pc_debug_counters,
- &msm_pc_debug_counters_fops);
- if (!dent)
- pr_err("%s: ERROR debugfs_create_file failed\n",
- __func__);
- }
-
- if (!pdev->dev.of_node) {
- struct msm_pm_init_data_type *d = pdev->dev.platform_data;
-
- if (!d)
- goto pm_8x60_probe_done;
-
- memcpy(&pdata_local, d, sizeof(struct msm_pm_init_data_type));
-
- } else {
- ret = msm_pm_clk_init(pdev);
- if (ret) {
- pr_info("msm_pm_clk_init returned error\n");
- return ret;
- }
-
- key = "qcom,pc-mode";
- ret = msm_pm_get_pc_mode(pdev->dev.of_node,
- key,
- &pdata_local.pc_mode);
- if (ret) {
- pr_debug("%s: Error reading key %s",
- __func__, key);
- return -EINVAL;
- }
-
- key = "qcom,use-sync-timer";
- pdata_local.use_sync_timer =
- of_property_read_bool(pdev->dev.of_node, key);
-
- key = "qcom,saw-turns-off-pll";
- msm_no_ramp_down_pc = of_property_read_bool(pdev->dev.of_node,
- key);
-
- key = "qcom,pc-resets-timer";
- msm_pm_pc_reset_timer = of_property_read_bool(
- pdev->dev.of_node, key);
- }
-
- if (pdata_local.cp15_data.reg_data &&
- pdata_local.cp15_data.reg_saved_state_size > 0) {
- cp15_data.reg_data = kzalloc(sizeof(uint32_t) *
- pdata_local.cp15_data.reg_saved_state_size,
- GFP_KERNEL);
- if (!cp15_data.reg_data)
- return -ENOMEM;
-
- cp15_data.reg_val = kzalloc(sizeof(uint32_t) *
- pdata_local.cp15_data.reg_saved_state_size,
- GFP_KERNEL);
- if (cp15_data.reg_val)
- return -ENOMEM;
-
- memcpy(cp15_data.reg_data, pdata_local.cp15_data.reg_data,
- pdata_local.cp15_data.reg_saved_state_size *
- sizeof(uint32_t));
- }
-
- msm_pm_set_flush_fn(pdata_local.pc_mode);
- msm_pm_use_sync_timer = pdata_local.use_sync_timer;
- msm_pm_retention_calls_tz = pdata_local.retention_calls_tz;
-
-pm_8x60_probe_done:
- msm_pm_init();
- if (pdev->dev.of_node)
- of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
-
- return ret;
-}
-
-static struct of_device_id msm_pm_8x60_table[] = {
- {.compatible = "qcom,pm-8x60"},
- {},
-};
-
-static struct platform_driver msm_pm_8x60_driver = {
- .probe = msm_pm_8x60_probe,
- .driver = {
- .name = "pm-8x60",
- .owner = THIS_MODULE,
- .of_match_table = msm_pm_8x60_table,
- },
-};
-
-static int __init msm_pm_8x60_init(void)
-{
- int rc;
-
- rc = platform_driver_register(&msm_cpu_pm_snoc_client_driver);
-
- if (rc) {
- pr_err("%s(): failed to register driver %s\n", __func__,
- msm_cpu_pm_snoc_client_driver.driver.name);
- return rc;
- }
-
- return platform_driver_register(&msm_pm_8x60_driver);
-}
-device_initcall(msm_pm_8x60_init);
-
-void __init msm_pm_sleep_status_init(void)
-{
- platform_driver_register(&msm_cpu_status_driver);
-}
diff --git a/arch/arm/mach-msm/pm-data.c b/arch/arm/mach-msm/pm-data.c
index f41c569..04f4237 100644
--- a/arch/arm/mach-msm/pm-data.c
+++ b/arch/arm/mach-msm/pm-data.c
@@ -43,7 +43,7 @@
},
[MSM_PM_MODE(1, MSM_PM_SLEEP_MODE_POWER_COLLAPSE)] = {
- .idle_supported = 0,
+ .idle_supported = 1,
.suspend_supported = 1,
.idle_enabled = 0,
.suspend_enabled = 1,
@@ -71,7 +71,7 @@
},
[MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_POWER_COLLAPSE)] = {
- .idle_supported = 0,
+ .idle_supported = 1,
.suspend_supported = 1,
.idle_enabled = 0,
.suspend_enabled = 1,
@@ -99,7 +99,7 @@
},
[MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_POWER_COLLAPSE)] = {
- .idle_supported = 0,
+ .idle_supported = 1,
.suspend_supported = 1,
.idle_enabled = 0,
.suspend_enabled = 1,
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index f2fc80b..a20b36e 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -27,33 +27,20 @@
#define msm_secondary_startup NULL
#endif
-DECLARE_PER_CPU(int, power_collapsed);
-
-struct msm_pm_irq_calls {
- unsigned int (*irq_pending)(void);
- int (*idle_sleep_allowed)(void);
- void (*enter_sleep1)(bool modem_wake, int from_idle, uint32_t
- *irq_mask);
- int (*enter_sleep2)(bool modem_wake, int from_idle);
- void (*exit_sleep1)(uint32_t irq_mask, uint32_t wakeup_reason,
- uint32_t pending_irqs);
- void (*exit_sleep2)(uint32_t irq_mask, uint32_t wakeup_reason,
- uint32_t pending_irqs);
- void (*exit_sleep3)(uint32_t irq_mask, uint32_t wakeup_reason,
- uint32_t pending_irqs);
+enum msm_pm_sleep_mode {
+ MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+ MSM_PM_SLEEP_MODE_RETENTION,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+ MSM_PM_SLEEP_MODE_NR,
+ MSM_PM_SLEEP_MODE_NOT_SELECTED,
};
-enum msm_pm_sleep_mode {
- MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT = 0,
- MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT = 1,
- MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE = 2,
- MSM_PM_SLEEP_MODE_POWER_COLLAPSE = 3,
- MSM_PM_SLEEP_MODE_APPS_SLEEP = 4,
- MSM_PM_SLEEP_MODE_RETENTION = MSM_PM_SLEEP_MODE_APPS_SLEEP,
- MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND = 5,
- MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN = 6,
- MSM_PM_SLEEP_MODE_NR = 7,
- MSM_PM_SLEEP_MODE_NOT_SELECTED,
+enum msm_pm_l2_scm_flag {
+ MSM_SCM_L2_ON = 0,
+ MSM_SCM_L2_OFF = 1,
+ MSM_SCM_L2_RET = 2,
+ MSM_SCM_L2_GDHS = 3,
};
#define MSM_PM_MODE(cpu, mode_nr) ((cpu) * MSM_PM_SLEEP_MODE_NR + (mode_nr))
@@ -84,16 +71,6 @@
extern struct msm_pm_platform_data msm_pm_sleep_modes[];
-struct msm_pm_sleep_ops {
- void *(*lowest_limits)(bool from_idle,
- enum msm_pm_sleep_mode sleep_mode,
- struct msm_pm_time_params *time_param, uint32_t *power);
- int (*enter_sleep)(uint32_t sclk_count, void *limits,
- bool from_idle, bool notify_rpm);
- void (*exit_sleep)(void *limits, bool from_idle,
- bool notify_rpm, bool collapsed);
-};
-
enum msm_pm_pc_mode_type {
MSM_PM_PC_TZ_L2_INT, /*Power collapse terminates in TZ;
integrated L2 cache controller */
@@ -103,20 +80,8 @@
external L2 cache controller */
};
-struct msm_pm_cp15_save_data {
- bool save_cp15;
- uint32_t active_vdd;
- uint32_t qsb_pc_vdd;
- uint32_t reg_saved_state_size;
- uint32_t *reg_data;
- uint32_t *reg_val;
-};
-
struct msm_pm_init_data_type {
enum msm_pm_pc_mode_type pc_mode;
- bool retention_calls_tz;
- struct msm_pm_cp15_save_data cp15_data;
- bool use_sync_timer;
};
struct msm_pm_cpr_ops {
@@ -127,21 +92,37 @@
void msm_pm_set_platform_data(struct msm_pm_platform_data *data, int count);
enum msm_pm_sleep_mode msm_pm_idle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
-void msm_pm_set_irq_extns(struct msm_pm_irq_calls *irq_calls);
void msm_pm_cpu_enter_lowpower(unsigned int cpu);
void __init msm_pm_set_tz_retention_flag(unsigned int flag);
void msm_pm_enable_retention(bool enable);
-#ifdef CONFIG_MSM_PM8X60
+#if defined(CONFIG_MSM_PM)
void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
-void msm_pm_set_sleep_ops(struct msm_pm_sleep_ops *ops);
int msm_pm_wait_cpu_shutdown(unsigned int cpu);
+int msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle);
void __init msm_pm_sleep_status_init(void);
+void msm_pm_set_l2_flush_flag(enum msm_pm_l2_scm_flag flag);
+bool msm_cpu_pm_check_mode(unsigned int cpu, enum msm_pm_sleep_mode mode,
+ bool from_idle);
+int msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle);
#else
static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {}
-static inline void msm_pm_set_sleep_ops(struct msm_pm_sleep_ops *ops) {}
static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; }
+static inline int msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode,
+ bool from_idle)
+{
+ return -ENODEV;
+}
static inline void msm_pm_sleep_status_init(void) {};
+static inline void msm_pm_set_l2_flush_flag(unsigned int flag)
+{
+ /* empty */
+}
+bool msm_cpu_pm_check_mode(unsigned int cpu, enum msm_pm_sleep_mode mode,
+ bool from_idle)
+{
+ return false;
+}
#endif
#ifdef CONFIG_HOTPLUG_CPU
int msm_platform_secondary_init(unsigned int cpu);
@@ -174,6 +155,5 @@
#endif
void msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops);
-extern void *msm_pc_debug_counters;
extern unsigned long msm_pc_debug_counters_phys;
#endif /* __ARCH_ARM_MACH_MSM_PM_H */
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c b/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
index 4681437..21040b1 100644
--- a/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
@@ -108,9 +108,7 @@
break;
case RESET_EVENTS:
pr_debug("%s: Received opcode:0x%x\n", __func__, opcode);
- audio->event_abort = 1;
audio->stopped = 1;
- audio->enabled = 0;
wake_up(&audio->event_wait);
break;
default:
diff --git a/arch/arm/mach-msm/rpm-notifier.h b/arch/arm/mach-msm/rpm-notifier.h
index 16de77e..1f4fdab 100644
--- a/arch/arm/mach-msm/rpm-notifier.h
+++ b/arch/arm/mach-msm/rpm-notifier.h
@@ -42,10 +42,11 @@
* msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
*
* @bool - flag to enable print contents of sleep buffer.
+ * @cpumask - cpumask of next wakeup cpu
*
* return 0 on success errno on failure.
*/
-int msm_rpm_enter_sleep(bool print);
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask);
/**
* msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index ccab6e2..54576a9 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -1287,14 +1287,14 @@
* During power collapse, the rpm driver disables the SMD interrupts to make
* sure that the interrupt doesn't wakes us from sleep.
*/
-int msm_rpm_enter_sleep(bool print)
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask)
{
if (standalone)
return 0;
msm_rpm_flush_requests(print);
- return smd_mask_receive_interrupt(msm_rpm_data.ch_info, true);
+ return smd_mask_receive_interrupt(msm_rpm_data.ch_info, true, cpumask);
}
EXPORT_SYMBOL(msm_rpm_enter_sleep);
@@ -1307,7 +1307,7 @@
if (standalone)
return;
- smd_mask_receive_interrupt(msm_rpm_data.ch_info, false);
+ smd_mask_receive_interrupt(msm_rpm_data.ch_info, false, NULL);
}
EXPORT_SYMBOL(msm_rpm_exit_sleep);
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index cb9697d..09b3113 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -2247,13 +2247,15 @@
* particular channel.
* @ch: open channel handle to use for the edge
* @mask: 1 = mask interrupts; 0 = unmask interrupts
+ * @cpumask cpumask for the next cpu scheduled to be woken up
* @returns: 0 for success; < 0 for failure
*
* Note that this enables/disables all interrupts from the remote subsystem for
* all channels. As such, it should be used with care and only for specific
* use cases such as power-collapse sequencing.
*/
-int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+ const struct cpumask *cpumask)
{
struct irq_chip *irq_chip;
struct irq_data *irq_data;
@@ -2282,6 +2284,8 @@
SMD_POWER_INFO("SMD Masking interrupts from %s\n",
edge_to_pids[ch->type].subsys_name);
irq_chip->irq_mask(irq_data);
+ if (cpumask)
+ irq_chip->irq_set_affinity(irq_data, cpumask, true);
} else {
SMD_POWER_INFO("SMD Unmasking interrupts from %s\n",
edge_to_pids[ch->type].subsys_name);
diff --git a/arch/arm/mach-msm/spm.h b/arch/arm/mach-msm/spm.h
index 1769402..2946689 100644
--- a/arch/arm/mach-msm/spm.h
+++ b/arch/arm/mach-msm/spm.h
@@ -28,43 +28,6 @@
MSM_SPM_L2_MODE_POWER_COLLAPSE,
};
-#if defined(CONFIG_MSM_SPM_V1)
-
-enum {
- MSM_SPM_REG_SAW_AVS_CTL,
- MSM_SPM_REG_SAW_CFG,
- MSM_SPM_REG_SAW_SPM_CTL,
- MSM_SPM_REG_SAW_SPM_SLP_TMR_DLY,
- MSM_SPM_REG_SAW_SPM_WAKE_TMR_DLY,
- MSM_SPM_REG_SAW_SLP_CLK_EN,
- MSM_SPM_REG_SAW_SLP_HSFS_PRECLMP_EN,
- MSM_SPM_REG_SAW_SLP_HSFS_POSTCLMP_EN,
- MSM_SPM_REG_SAW_SLP_CLMP_EN,
- MSM_SPM_REG_SAW_SLP_RST_EN,
- MSM_SPM_REG_SAW_SPM_MPM_CFG,
- MSM_SPM_REG_NR_INITIALIZE,
-
- MSM_SPM_REG_SAW_VCTL = MSM_SPM_REG_NR_INITIALIZE,
- MSM_SPM_REG_SAW_STS,
- MSM_SPM_REG_SAW_SPM_PMIC_CTL,
- MSM_SPM_REG_NR
-};
-
-struct msm_spm_platform_data {
- void __iomem *reg_base_addr;
- uint32_t reg_init_values[MSM_SPM_REG_NR_INITIALIZE];
-
- uint8_t awake_vlevel;
- uint8_t retention_vlevel;
- uint8_t collapse_vlevel;
- uint8_t retention_mid_vlevel;
- uint8_t collapse_mid_vlevel;
-
- uint32_t vctl_timeout_us;
-};
-
-#elif defined(CONFIG_MSM_SPM_V2)
-
enum {
MSM_SPM_REG_SAW2_CFG,
MSM_SPM_REG_SAW2_AVS_CTL,
@@ -122,9 +85,8 @@
uint32_t num_modes;
struct msm_spm_seq_entry *modes;
};
-#endif
-#if defined(CONFIG_MSM_SPM_V1) || defined(CONFIG_MSM_SPM_V2)
+#if defined(CONFIG_MSM_SPM_V2)
/* Public functions */
@@ -187,7 +149,7 @@
return -ENOSYS;
}
#endif /* defined(CONFIG_MSM_L2_SPM) */
-#else /* defined(CONFIG_MSM_SPM_V1) || defined(CONFIG_MSM_SPM_V2) */
+#else /* defined(CONFIG_MSM_SPM_V2) */
static inline int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
{
return -ENOSYS;
@@ -218,5 +180,5 @@
return -ENOSYS;
}
-#endif /*defined(CONFIG_MSM_SPM_V1) || defined (CONFIG_MSM_SPM_V2) */
+#endif /* defined (CONFIG_MSM_SPM_V2) */
#endif /* __ARCH_ARM_MACH_MSM_SPM_H */
diff --git a/arch/arm/mach-msm/test-lpm.c b/arch/arm/mach-msm/test-lpm.c
index 031b2dc..790c909 100644
--- a/arch/arm/mach-msm/test-lpm.c
+++ b/arch/arm/mach-msm/test-lpm.c
@@ -121,9 +121,6 @@
case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
strlcat(nm, "WFI ", BUF_SIZE);
break;
- case MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT:
- strlcat(nm, "WFI voltage Rampdown ", BUF_SIZE);
- break;
case MSM_PM_SLEEP_MODE_RETENTION:
strlcat(nm, "Retention ", BUF_SIZE);
break;
@@ -133,9 +130,6 @@
case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
strlcat(nm, "Idle Power collapse ", BUF_SIZE);
break;
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND:
- strlcat(nm, "Suspend Power collapse ", BUF_SIZE);
- break;
default:
strlcat(nm, "Invalid Mode ", BUF_SIZE);
break;
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 0720163..e505bef 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -240,6 +240,9 @@
mov pc, lr
ENDPROC(fa_dma_unmap_area)
+ .globl fa_flush_kern_cache_louis
+ .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2301f2..d99c00c 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -127,6 +127,9 @@
ENDPROC(v3_dma_unmap_area)
ENDPROC(v3_dma_map_area)
+ .globl v3_flush_kern_cache_louis
+ .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index fd9bb7a..548b892a 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -139,6 +139,9 @@
ENDPROC(v4_dma_unmap_area)
ENDPROC(v4_dma_map_area)
+ .globl v4_flush_kern_cache_louis
+ .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 4f2c141..63b7e49 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -251,6 +251,9 @@
mov pc, lr
ENDPROC(v4wb_dma_unmap_area)
+ .globl v4wb_flush_kern_cache_louis
+ .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 4d7b467..198d424 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -195,6 +195,9 @@
ENDPROC(v4wt_dma_unmap_area)
ENDPROC(v4wt_dma_map_area)
+ .globl v4wt_flush_kern_cache_louis
+ .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 2edb6f6..6a5674d 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -345,6 +345,9 @@
mov pc, lr
ENDPROC(v6_dma_unmap_area)
+ .globl v6_flush_kern_cache_louis
+ .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a655d3d..df79627 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -32,6 +32,24 @@
mov pc, lr
ENDPROC(v7_flush_icache_all)
+ /*
+ * v7_flush_dcache_louis()
+ *
+ * Flush the D-cache up to the Level of Unification Inner Shareable
+ *
+ * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
+ */
+
+ENTRY(v7_flush_dcache_louis)
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
+ ands r3, r0, #0xe00000 @ extract LoUIS from clidr
+ mov r3, r3, lsr #20 @ r3 = LoUIS * 2
+ moveq pc, lr @ return if level == 0
+ mov r10, #0 @ r10 (starting level) = 0
+ b loop1 @ start flushing cache levels
+ENDPROC(v7_flush_dcache_louis)
+
/*
* v7_flush_dcache_all()
*
@@ -119,6 +137,24 @@
mov pc, lr
ENDPROC(v7_flush_kern_cache_all)
+ /*
+ * v7_flush_kern_cache_louis(void)
+ *
+ * Flush the data cache up to Level of Unification Inner Shareable.
+ * Invalidate the I-cache to the point of unification.
+ */
+ENTRY(v7_flush_kern_cache_louis)
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ bl v7_flush_dcache_louis
+ mov r0, #0
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
+ENDPROC(v7_flush_kern_cache_louis)
+
/*
* v7_flush_cache_all()
*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 34ae4e6..ac6ccf3b 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -885,8 +885,9 @@
MLM(VMALLOC_START, VMALLOC_END),
MLM(PAGE_OFFSET, (unsigned long)high_memory));
#endif
-#ifdef CONFIG_HIGHMEM
+
printk(KERN_NOTICE
+#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
#endif
#ifdef CONFIG_MODULES
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 2349513..c11e32e 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -367,6 +367,9 @@
mov pc, lr
ENDPROC(arm1020_dma_unmap_area)
+ .globl arm1020_flush_kern_cache_louis
+ .equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1020
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index c244b06..9624a35 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -353,6 +353,9 @@
mov pc, lr
ENDPROC(arm1020e_dma_unmap_area)
+ .globl arm1020e_flush_kern_cache_louis
+ .equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1020e
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 38fe22e..f2b45ee 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -342,6 +342,9 @@
mov pc, lr
ENDPROC(arm1022_dma_unmap_area)
+ .globl arm1022_flush_kern_cache_louis
+ .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1022
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 3eb9c3c..95934d2 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -336,6 +336,9 @@
mov pc, lr
ENDPROC(arm1026_dma_unmap_area)
+ .globl arm1026_flush_kern_cache_louis
+ .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1026
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index cb941ae..ed3acd4 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -318,6 +318,9 @@
mov pc, lr
ENDPROC(arm920_dma_unmap_area)
+ .globl arm920_flush_kern_cache_louis
+ .equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm920
#endif
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 4ec0e07..142bace 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -320,6 +320,9 @@
mov pc, lr
ENDPROC(arm922_dma_unmap_area)
+ .globl arm922_flush_kern_cache_louis
+ .equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm922
#endif
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 9dccd9a..3028390 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -375,6 +375,9 @@
mov pc, lr
ENDPROC(arm925_dma_unmap_area)
+ .globl arm925_flush_kern_cache_louis
+ .equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm925
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 820259b..1f99b46 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -338,6 +338,9 @@
mov pc, lr
ENDPROC(arm926_dma_unmap_area)
+ .globl arm926_flush_kern_cache_louis
+ .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm926
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 9fdc0a1..e5af959 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -267,6 +267,9 @@
mov pc, lr
ENDPROC(arm940_dma_unmap_area)
+ .globl arm940_flush_kern_cache_louis
+ .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm940
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index f684cfe..3599b37 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -309,6 +309,9 @@
mov pc, lr
ENDPROC(arm946_dma_unmap_area)
+ .globl arm946_flush_kern_cache_louis
+ .equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm946
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index ba3c500..26a9984 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -414,6 +414,9 @@
mov pc, lr
ENDPROC(feroceon_dma_unmap_area)
+ .globl feroceon_flush_kern_cache_louis
+ .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions feroceon
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 5829bb3..2ea177a 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -299,6 +299,7 @@
ENTRY(\name\()_cache_fns)
.long \name\()_flush_icache_all
.long \name\()_flush_kern_cache_all
+ .long \name\()_flush_kern_cache_louis
.long \name\()_flush_user_cache_all
.long \name\()_flush_user_cache_range
.long \name\()_coherent_kern_range
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index cdfedc5..224d0f5 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -302,6 +302,9 @@
mov pc, lr
ENDPROC(mohawk_dma_unmap_area)
+ .globl mohawk_flush_kern_cache_louis
+ .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions mohawk
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index b0d5786..eb93d64 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -337,6 +337,9 @@
mov pc, lr
ENDPROC(xsc3_dma_unmap_area)
+ .globl xsc3_flush_kern_cache_louis
+ .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions xsc3
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 4ffebaa..b5ea31d 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -410,6 +410,9 @@
mov pc, lr
ENDPROC(xscale_dma_unmap_area)
+ .globl xscale_flush_kern_cache_louis
+ .equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
+
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions xscale
diff --git a/drivers/Kconfig b/drivers/Kconfig
index adead10..72440c9 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -152,4 +152,6 @@
source "drivers/bif/Kconfig"
+source "drivers/sensors/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index d55b035..867be8a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -144,3 +144,4 @@
obj-$(CONFIG_CORESIGHT) += coresight/
obj-$(CONFIG_BIF) += bif/
+obj-$(CONFIG_SENSORS) += sensors/
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 4507f80..3aa86463 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -198,7 +198,7 @@
me->smmu.domain_id, 0);
buf->phys = 0;
}
- if (buf->virt) {
+ if (!IS_ERR_OR_NULL(buf->virt)) {
ion_unmap_kernel(me->iclient, buf->handle);
buf->virt = 0;
}
@@ -211,7 +211,7 @@
{
struct fastrpc_apps *me = &gfa;
if (!IS_ERR_OR_NULL(map->handle)) {
- if (map->virt) {
+ if (!IS_ERR_OR_NULL(map->virt)) {
ion_unmap_kernel(me->iclient, map->handle);
map->virt = 0;
}
@@ -230,13 +230,15 @@
unsigned long len;
buf->handle = 0;
buf->virt = 0;
+ buf->phys = 0;
heap = me->smmu.enabled ? ION_HEAP(ION_IOMMU_HEAP_ID) :
ION_HEAP(ION_ADSP_HEAP_ID) | ION_HEAP(ION_AUDIO_HEAP_ID);
buf->handle = ion_alloc(clnt, buf->size, SZ_4K, heap, ION_FLAG_CACHED);
VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle));
if (err)
goto bail;
- VERIFY(err, 0 != (buf->virt = ion_map_kernel(clnt, buf->handle)));
+ buf->virt = ion_map_kernel(clnt, buf->handle);
+ VERIFY(err, 0 == IS_ERR_OR_NULL(buf->virt));
if (err)
goto bail;
if (me->smmu.enabled) {
@@ -355,6 +357,9 @@
list[i].num = 0;
list[i].pgidx = 0;
len = pra[i].buf.len;
+ VERIFY(err, len >= 0);
+ if (err)
+ goto bail;
if (!len)
continue;
buf = pra[i].buf.pv;
@@ -852,7 +857,7 @@
context_free(ctx);
if (me->smmu.enabled) {
- bufs = REMOTE_SCALARS_LENGTH(sc);
+ bufs = REMOTE_SCALARS_INBUFS(sc) + REMOTE_SCALARS_OUTBUFS(sc);
if (fds) {
handles = (struct ion_handle **)(fds + bufs);
for (i = 0; i < bufs; i++)
@@ -1028,7 +1033,8 @@
VERIFY(err, 0 == IS_ERR_OR_NULL(map->handle));
if (err)
goto bail;
- VERIFY(err, 0 != (map->virt = ion_map_kernel(clnt, map->handle)));
+ map->virt = ion_map_kernel(clnt, map->handle);
+ VERIFY(err, 0 == IS_ERR_OR_NULL(map->virt));
if (err)
goto bail;
buf = (void *)mmap->vaddrin;
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 46756c5..83ab92b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -1,5 +1,5 @@
# CPUfreq core
-obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o cpu-boost.o
# CPUfreq stats
obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c
new file mode 100644
index 0000000..f789425
--- /dev/null
+++ b/drivers/cpufreq/cpu-boost.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cpu-boost: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/kthread.h>
+#include <linux/moduleparam.h>
+
+struct cpu_sync {
+ struct task_struct *thread;
+ wait_queue_head_t sync_wq;
+ struct delayed_work boost_rem;
+ int cpu;
+ spinlock_t lock;
+ bool pending;
+ int src_cpu;
+ unsigned int boost_min;
+};
+
+static DEFINE_PER_CPU(struct cpu_sync, sync_info);
+static struct workqueue_struct *boost_rem_wq;
+
+static unsigned int boost_ms = 50;
+module_param(boost_ms, uint, 0644);
+
+static unsigned int sync_threshold;
+module_param(sync_threshold, uint, 0644);
+/*
+ * The CPUFREQ_ADJUST notifier is used to override the current policy min to
+ * make sure policy min >= boost_min. The cpufreq framework then does the job
+ * of enforcing the new policy.
+ */
+static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ unsigned int cpu = policy->cpu;
+ struct cpu_sync *s = &per_cpu(sync_info, cpu);
+ unsigned int min = s->boost_min;
+
+ if (val != CPUFREQ_ADJUST)
+ return NOTIFY_OK;
+
+ if (min == 0)
+ return NOTIFY_OK;
+
+ pr_debug("CPU%u policy min before boost: %u kHz\n",
+ cpu, policy->min);
+ pr_debug("CPU%u boost min: %u kHz\n", cpu, min);
+
+ cpufreq_verify_within_limits(policy, min, UINT_MAX);
+
+ pr_debug("CPU%u policy min after boost: %u kHz\n",
+ cpu, policy->min);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block boost_adjust_nb = {
+ .notifier_call = boost_adjust_notify,
+};
+
+static void do_boost_rem(struct work_struct *work)
+{
+ struct cpu_sync *s = container_of(work, struct cpu_sync,
+ boost_rem.work);
+
+ pr_debug("Removing boost for CPU%d\n", s->cpu);
+ s->boost_min = 0;
+ /* Force policy re-evaluation to trigger adjust notifier. */
+ cpufreq_update_policy(s->cpu);
+}
+
+static int boost_mig_sync_thread(void *data)
+{
+ int dest_cpu = (int) data;
+ int src_cpu, ret;
+ struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
+ struct cpufreq_policy dest_policy;
+ struct cpufreq_policy src_policy;
+ unsigned long flags;
+
+ while(1) {
+ wait_event(s->sync_wq, s->pending || kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&s->lock, flags);
+ s->pending = false;
+ src_cpu = s->src_cpu;
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ ret = cpufreq_get_policy(&src_policy, src_cpu);
+ if (ret)
+ continue;
+
+ ret = cpufreq_get_policy(&dest_policy, dest_cpu);
+ if (ret)
+ continue;
+
+ if (dest_policy.cur >= src_policy.cur ) {
+ pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
+ dest_cpu, dest_policy.cur, src_cpu, src_policy.cur);
+ continue;
+ }
+
+ if (sync_threshold && (dest_policy.cur >= sync_threshold))
+ continue;
+
+ cancel_delayed_work_sync(&s->boost_rem);
+ if (sync_threshold) {
+ if (src_policy.cur >= sync_threshold)
+ s->boost_min = sync_threshold;
+ else
+ s->boost_min = src_policy.cur;
+ } else {
+ s->boost_min = src_policy.cur;
+ }
+ /* Force policy re-evaluation to trigger adjust notifier. */
+ cpufreq_update_policy(dest_cpu);
+ queue_delayed_work_on(s->cpu, boost_rem_wq,
+ &s->boost_rem, msecs_to_jiffies(boost_ms));
+ }
+
+ return 0;
+}
+
+static int boost_migration_notify(struct notifier_block *nb,
+ unsigned long dest_cpu, void *arg)
+{
+ unsigned long flags;
+ struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
+
+ if (!boost_ms)
+ return NOTIFY_OK;
+
+ pr_debug("Migration: CPU%d --> CPU%d\n", (int) arg, (int) dest_cpu);
+ spin_lock_irqsave(&s->lock, flags);
+ s->pending = true;
+ s->src_cpu = (int) arg;
+ spin_unlock_irqrestore(&s->lock, flags);
+ wake_up(&s->sync_wq);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block boost_migration_nb = {
+ .notifier_call = boost_migration_notify,
+};
+
+static int cpu_boost_init(void)
+{
+ int cpu;
+ struct cpu_sync *s;
+
+ cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
+
+ boost_rem_wq = alloc_workqueue("cpuboost_rem_wq", WQ_HIGHPRI, 0);
+ if (!boost_rem_wq)
+ return -EFAULT;
+
+ for_each_possible_cpu(cpu) {
+ s = &per_cpu(sync_info, cpu);
+ s->cpu = cpu;
+ init_waitqueue_head(&s->sync_wq);
+ spin_lock_init(&s->lock);
+ INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
+ s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu,
+ "boost_sync/%d", cpu);
+ }
+ atomic_notifier_chain_register(&migration_notifier_head,
+ &boost_migration_nb);
+
+ return 0;
+}
+late_initcall(cpu_boost_init);
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 7d1952c..6c55285 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -29,6 +29,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <linux/kernel_stat.h>
#include <asm/cputime.h>
#define CREATE_TRACE_POINTS
@@ -69,6 +70,9 @@
#define DEFAULT_GO_HISPEED_LOAD 99
static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+/* Sampling down factor to be applied to min_sample_time at max freq */
+static unsigned int sampling_down_factor;
+
/* Target load. Lower values result in higher CPU speeds. */
#define DEFAULT_TARGET_LOAD 90
static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
@@ -93,7 +97,11 @@
* timer interval.
*/
#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
-static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
+static unsigned int default_above_hispeed_delay[] = {
+ DEFAULT_ABOVE_HISPEED_DELAY };
+static spinlock_t above_hispeed_delay_lock;
+static unsigned int *above_hispeed_delay = default_above_hispeed_delay;
+static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay);
/* Non-zero means indefinite speed boost active */
static int boost_val;
@@ -109,6 +117,8 @@
#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
static int timer_slack_val = DEFAULT_TIMER_SLACK;
+static bool io_is_busy;
+
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event);
@@ -122,27 +132,108 @@
.owner = THIS_MODULE,
};
+static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
+ cputime64_t *wall)
+{
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
+
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = jiffies_to_usecs(cur_wall_time);
+
+ return jiffies_to_usecs(idle_time);
+}
+
+static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
+ cputime64_t *wall)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, wall);
+
+ if (idle_time == -1ULL)
+ idle_time = get_cpu_idle_time_jiffy(cpu, wall);
+ else if (!io_is_busy)
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+ return idle_time;
+}
+
static void cpufreq_interactive_timer_resched(
struct cpufreq_interactive_cpuinfo *pcpu)
{
- unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+ unsigned long expires;
unsigned long flags;
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ pcpu->time_in_idle =
+ get_cpu_idle_time(smp_processor_id(),
+ &pcpu->time_in_idle_timestamp);
+ pcpu->cputime_speedadj = 0;
+ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+ expires = jiffies + usecs_to_jiffies(timer_rate);
mod_timer_pinned(&pcpu->cpu_timer, expires);
+
if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
expires += usecs_to_jiffies(timer_slack_val);
mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
}
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
+ * function.
+ */
+static void cpufreq_interactive_timer_start(int cpu)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+ unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+ unsigned long flags;
+
+ pcpu->cpu_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_timer, cpu);
+ if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
+ expires += usecs_to_jiffies(timer_slack_val);
+ pcpu->cpu_slack_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_slack_timer, cpu);
+ }
+
spin_lock_irqsave(&pcpu->load_lock, flags);
pcpu->time_in_idle =
- get_cpu_idle_time_us(smp_processor_id(),
- &pcpu->time_in_idle_timestamp);
+ get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
pcpu->cputime_speedadj = 0;
pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
+static unsigned int freq_to_above_hispeed_delay(unsigned int freq)
+{
+ int i;
+ unsigned int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&above_hispeed_delay_lock, flags);
+
+ for (i = 0; i < nabove_hispeed_delay - 1 &&
+ freq >= above_hispeed_delay[i+1]; i += 2)
+ ;
+
+ ret = above_hispeed_delay[i];
+ spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
+ return ret;
+}
+
static unsigned int freq_to_targetload(unsigned int freq)
{
int i;
@@ -185,9 +276,10 @@
* than or equal to the target load.
*/
- cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
- CPUFREQ_RELATION_L, &index);
+ if (cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+ CPUFREQ_RELATION_L, &index))
+ break;
freq = pcpu->freq_table[index].frequency;
if (freq > prevfreq) {
@@ -199,10 +291,11 @@
* Find the highest frequency that is less
* than freqmax.
*/
- cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table,
- freqmax - 1, CPUFREQ_RELATION_H,
- &index);
+ if (cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmax - 1, CPUFREQ_RELATION_H,
+ &index))
+ break;
freq = pcpu->freq_table[index].frequency;
if (freq == freqmin) {
@@ -225,10 +318,11 @@
* Find the lowest frequency that is higher
* than freqmin.
*/
- cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table,
- freqmin + 1, CPUFREQ_RELATION_L,
- &index);
+ if (cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmin + 1, CPUFREQ_RELATION_L,
+ &index))
+ break;
freq = pcpu->freq_table[index].frequency;
/*
@@ -256,10 +350,15 @@
unsigned int delta_time;
u64 active_time;
- now_idle = get_cpu_idle_time_us(cpu, &now);
+ now_idle = get_cpu_idle_time(cpu, &now);
delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
- active_time = delta_time - delta_idle;
+
+ if (delta_time <= delta_idle)
+ active_time = 0;
+ else
+ active_time = delta_time - delta_idle;
+
pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
pcpu->time_in_idle = now_idle;
@@ -280,6 +379,7 @@
unsigned int index;
unsigned long flags;
bool boosted;
+ unsigned long mod_min_sample_time;
if (!down_read_trylock(&pcpu->enable_sem))
return;
@@ -315,7 +415,8 @@
if (pcpu->target_freq >= hispeed_freq &&
new_freq > pcpu->target_freq &&
- now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
+ now - pcpu->hispeed_validate_time <
+ freq_to_above_hispeed_delay(pcpu->target_freq)) {
trace_cpufreq_interactive_notyet(
data, cpu_load, pcpu->target_freq,
pcpu->policy->cur, new_freq);
@@ -326,11 +427,8 @@
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
new_freq, CPUFREQ_RELATION_L,
- &index)) {
- pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
- (int) data);
+ &index))
goto rearm;
- }
new_freq = pcpu->freq_table[index].frequency;
@@ -338,8 +436,14 @@
* Do not scale below floor_freq unless we have been at or above the
* floor frequency for the minimum sample time since last validated.
*/
+ if (pcpu->policy->cur == pcpu->policy->max) {
+ mod_min_sample_time = sampling_down_factor;
+ } else {
+ mod_min_sample_time = min_sample_time;
+ }
+
if (new_freq < pcpu->floor_freq) {
- if (now - pcpu->floor_validate_time < min_sample_time) {
+ if (now - pcpu->floor_validate_time < mod_min_sample_time) {
trace_cpufreq_interactive_notyet(
data, cpu_load, pcpu->target_freq,
pcpu->policy->cur, new_freq);
@@ -565,9 +669,19 @@
for_each_cpu(cpu, pcpu->policy->cpus) {
struct cpufreq_interactive_cpuinfo *pjcpu =
&per_cpu(cpuinfo, cpu);
+ if (cpu != freq->cpu) {
+ if (!down_read_trylock(&pjcpu->enable_sem))
+ continue;
+ if (!pjcpu->governor_enabled) {
+ up_read(&pjcpu->enable_sem);
+ continue;
+ }
+ }
spin_lock_irqsave(&pjcpu->load_lock, flags);
update_load(cpu);
spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+ if (cpu != freq->cpu)
+ up_read(&pjcpu->enable_sem);
}
up_read(&pcpu->enable_sem);
@@ -579,6 +693,51 @@
.notifier_call = cpufreq_interactive_notifier,
};
+static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
+{
+ const char *cp;
+ int i;
+ int ntokens = 1;
+ unsigned int *tokenized_data;
+ int err = -EINVAL;
+
+ cp = buf;
+ while ((cp = strpbrk(cp + 1, " :")))
+ ntokens++;
+
+ if (!(ntokens & 0x1))
+ goto err;
+
+ tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+ if (!tokenized_data) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ cp = buf;
+ i = 0;
+ while (i < ntokens) {
+ if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
+ goto err_kfree;
+
+ cp = strpbrk(cp, " :");
+ if (!cp)
+ break;
+ cp++;
+ }
+
+ if (i != ntokens)
+ goto err_kfree;
+
+ *num_tokens = ntokens;
+ return tokenized_data;
+
+err_kfree:
+ kfree(tokenized_data);
+err:
+ return ERR_PTR(err);
+}
+
static ssize_t show_target_loads(
struct kobject *kobj, struct attribute *attr, char *buf)
{
@@ -592,7 +751,7 @@
ret += sprintf(buf + ret, "%u%s", target_loads[i],
i & 0x1 ? ":" : " ");
- ret += sprintf(buf + ret, "\n");
+ ret += sprintf(buf + --ret, "\n");
spin_unlock_irqrestore(&target_loads_lock, flags);
return ret;
}
@@ -601,40 +760,13 @@
struct kobject *kobj, struct attribute *attr, const char *buf,
size_t count)
{
- int ret;
- const char *cp;
+ int ntokens;
unsigned int *new_target_loads = NULL;
- int ntokens = 1;
- int i;
unsigned long flags;
- cp = buf;
- while ((cp = strpbrk(cp + 1, " :")))
- ntokens++;
-
- if (!(ntokens & 0x1))
- goto err_inval;
-
- new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
- if (!new_target_loads) {
- ret = -ENOMEM;
- goto err;
- }
-
- cp = buf;
- i = 0;
- while (i < ntokens) {
- if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
- goto err_inval;
-
- cp = strpbrk(cp, " :");
- if (!cp)
- break;
- cp++;
- }
-
- if (i != ntokens)
- goto err_inval;
+ new_target_loads = get_tokenized_data(buf, &ntokens);
+ if (IS_ERR(new_target_loads))
+ return PTR_RET(new_target_loads);
spin_lock_irqsave(&target_loads_lock, flags);
if (target_loads != default_target_loads)
@@ -643,18 +775,56 @@
ntarget_loads = ntokens;
spin_unlock_irqrestore(&target_loads_lock, flags);
return count;
-
-err_inval:
- ret = -EINVAL;
-err:
- kfree(new_target_loads);
- return ret;
}
static struct global_attr target_loads_attr =
__ATTR(target_loads, S_IRUGO | S_IWUSR,
show_target_loads, store_target_loads);
+static ssize_t show_above_hispeed_delay(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ int i;
+ ssize_t ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&above_hispeed_delay_lock, flags);
+
+ for (i = 0; i < nabove_hispeed_delay; i++)
+ ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
+ i & 0x1 ? ":" : " ");
+
+ ret += sprintf(buf + --ret, "\n");
+ spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
+ return ret;
+}
+
+static ssize_t store_above_hispeed_delay(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ntokens;
+ unsigned int *new_above_hispeed_delay = NULL;
+ unsigned long flags;
+
+ new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
+ if (IS_ERR(new_above_hispeed_delay))
+ return PTR_RET(new_above_hispeed_delay);
+
+ spin_lock_irqsave(&above_hispeed_delay_lock, flags);
+ if (above_hispeed_delay != default_above_hispeed_delay)
+ kfree(above_hispeed_delay);
+ above_hispeed_delay = new_above_hispeed_delay;
+ nabove_hispeed_delay = ntokens;
+ spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
+ return count;
+
+}
+
+static struct global_attr above_hispeed_delay_attr =
+ __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR,
+ show_above_hispeed_delay, store_above_hispeed_delay);
+
static ssize_t show_hispeed_freq(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -678,6 +848,29 @@
static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
show_hispeed_freq, store_hispeed_freq);
+static ssize_t show_sampling_down_factor(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", sampling_down_factor);
+}
+
+static ssize_t store_sampling_down_factor(struct kobject *kobj,
+ struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ long unsigned int val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ sampling_down_factor = val;
+ return count;
+}
+
+static struct global_attr sampling_down_factor_attr =
+ __ATTR(sampling_down_factor, 0644,
+ show_sampling_down_factor, store_sampling_down_factor);
static ssize_t show_go_hispeed_load(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -723,28 +916,6 @@
static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
show_min_sample_time, store_min_sample_time);
-static ssize_t show_above_hispeed_delay(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- return sprintf(buf, "%lu\n", above_hispeed_delay_val);
-}
-
-static ssize_t store_above_hispeed_delay(struct kobject *kobj,
- struct attribute *attr,
- const char *buf, size_t count)
-{
- int ret;
- unsigned long val;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- above_hispeed_delay_val = val;
- return count;
-}
-
-define_one_global_rw(above_hispeed_delay);
-
static ssize_t show_timer_rate(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -862,17 +1033,41 @@
define_one_global_rw(boostpulse_duration);
+static ssize_t show_io_is_busy(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ io_is_busy = val;
+ return count;
+}
+
+static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644,
+ show_io_is_busy, store_io_is_busy);
+
static struct attribute *interactive_attributes[] = {
&target_loads_attr.attr,
+ &above_hispeed_delay_attr.attr,
&hispeed_freq_attr.attr,
&go_hispeed_load_attr.attr,
- &above_hispeed_delay.attr,
&min_sample_time_attr.attr,
&timer_rate_attr.attr,
&timer_slack.attr,
&boost.attr,
&boostpulse.attr,
&boostpulse_duration.attr,
+ &io_is_busy_attr.attr,
+ &sampling_down_factor_attr.attr,
NULL,
};
@@ -922,8 +1117,6 @@
hispeed_freq = policy->max;
for_each_cpu(j, policy->cpus) {
- unsigned long expires;
-
pcpu = &per_cpu(cpuinfo, j);
pcpu->policy = policy;
pcpu->target_freq = policy->cur;
@@ -934,14 +1127,7 @@
pcpu->hispeed_validate_time =
pcpu->floor_validate_time;
down_write(&pcpu->enable_sem);
- expires = jiffies + usecs_to_jiffies(timer_rate);
- pcpu->cpu_timer.expires = expires;
- add_timer_on(&pcpu->cpu_timer, j);
- if (timer_slack_val >= 0) {
- expires += usecs_to_jiffies(timer_slack_val);
- pcpu->cpu_slack_timer.expires = expires;
- add_timer_on(&pcpu->cpu_slack_timer, j);
- }
+ cpufreq_interactive_timer_start(j);
pcpu->governor_enabled = 1;
up_write(&pcpu->enable_sem);
}
@@ -1000,6 +1186,33 @@
else if (policy->min > policy->cur)
__cpufreq_driver_target(policy,
policy->min, CPUFREQ_RELATION_L);
+ for_each_cpu(j, policy->cpus) {
+ pcpu = &per_cpu(cpuinfo, j);
+
+ /* hold write semaphore to avoid race */
+ down_write(&pcpu->enable_sem);
+ if (pcpu->governor_enabled == 0) {
+ up_write(&pcpu->enable_sem);
+ continue;
+ }
+
+ /* update target_freq firstly */
+ if (policy->max < pcpu->target_freq)
+ pcpu->target_freq = policy->max;
+ else if (policy->min > pcpu->target_freq)
+ pcpu->target_freq = policy->min;
+
+ /* Reschedule timer.
+ * Delete the timers, else the timer callback may
+ * return without re-arm the timer when failed
+ * acquire the semaphore. This race may cause timer
+ * stopped unexpectedly.
+ */
+ del_timer_sync(&pcpu->cpu_timer);
+ del_timer_sync(&pcpu->cpu_slack_timer);
+ cpufreq_interactive_timer_start(j);
+ up_write(&pcpu->enable_sem);
+ }
break;
}
return 0;
@@ -1029,6 +1242,7 @@
spin_lock_init(&target_loads_lock);
spin_lock_init(&speedchange_cpumask_lock);
+ spin_lock_init(&above_hispeed_delay_lock);
mutex_init(&gov_lock);
speedchange_task =
kthread_create(cpufreq_interactive_speedchange_task, NULL,
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index e81cfda..d048a91 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -23,6 +23,7 @@
#include "cpuidle.h"
DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
+DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);
@@ -484,6 +485,77 @@
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
+/*
+ * cpuidle_unregister: unregister a driver and the devices. This function
+ * can be used only if the driver has been previously registered through
+ * the cpuidle_register function.
+ *
+ * @drv: a valid pointer to a struct cpuidle_driver
+ */
+void cpuidle_unregister(struct cpuidle_driver *drv)
+{
+ int cpu;
+ struct cpuidle_device *device;
+
+ for_each_possible_cpu(cpu) {
+ device = &per_cpu(cpuidle_dev, cpu);
+ cpuidle_unregister_device(device);
+ }
+
+ cpuidle_unregister_driver(drv);
+}
+EXPORT_SYMBOL_GPL(cpuidle_unregister);
+
+/**
+ * cpuidle_register: registers the driver and the cpu devices with the
+ * coupled_cpus passed as parameter. This function is used for all common
+ * initialization pattern there are in the arch specific drivers. The
+ * devices is globally defined in this file.
+ *
+ * @drv : a valid pointer to a struct cpuidle_driver
+ * @coupled_cpus: a cpumask for the coupled states
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+int cpuidle_register(struct cpuidle_driver *drv,
+ const struct cpumask *const coupled_cpus)
+{
+ int ret, cpu;
+ struct cpuidle_device *device;
+
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("failed to register cpuidle driver\n");
+ return ret;
+ }
+
+ for_each_possible_cpu(cpu) {
+ device = &per_cpu(cpuidle_dev, cpu);
+ device->cpu = cpu;
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+ /*
+ * On multiplatform for ARM, the coupled idle states could
+ * enabled in the kernel even if the cpuidle driver does not
+ * use it. Note, coupled_cpus is a struct copy.
+ */
+ if (coupled_cpus)
+ device->coupled_cpus = *coupled_cpus;
+#endif
+ ret = cpuidle_register_device(device);
+ if (!ret)
+ continue;
+
+ pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
+
+ cpuidle_unregister(drv);
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpuidle_register);
+
#ifdef CONFIG_SMP
static void smp_callback(void *v)
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 7778477..8037187 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -1949,6 +1949,12 @@
else
q_req->cryptlen = areq->cryptlen - authsize;
+ if ((q_req->cryptlen > ULONG_MAX - ivsize) ||
+ (q_req->cryptlen + ivsize > ULONG_MAX - areq->assoclen)) {
+ pr_err("Integer overflow on total aead req length.\n");
+ return -EINVAL;
+ }
+
totallen = q_req->cryptlen + ivsize + areq->assoclen;
pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 4c05978..a4154c1 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -43,13 +43,17 @@
#define QCE_MAX_NUM_DSCR 0x500
#define QCE_SECTOR_SIZE 0x200
-static DEFINE_MUTEX(bam_register_cnt);
+static DEFINE_MUTEX(bam_register_lock);
struct bam_registration_info {
+ struct list_head qlist;
uint32_t handle;
uint32_t cnt;
+ uint32_t bam_mem;
+ void __iomem *bam_iobase;
+ bool support_cmd_dscr;
};
-static struct bam_registration_info bam_registry;
-static bool ce_bam_registered;
+static LIST_HEAD(qce50_bam_list);
+
/*
* CE HW device structure.
* Each engine has an instance of the structure.
@@ -58,11 +62,14 @@
*/
struct qce_device {
struct device *pdev; /* Handle to platform_device structure */
+ struct bam_registration_info *pbam;
unsigned char *coh_vmem; /* Allocated coherent virtual memory */
dma_addr_t coh_pmem; /* Allocated coherent physical memory */
int memsize; /* Memory allocated */
- int is_shared; /* CE HW is shared */
+ uint32_t bam_mem; /* bam physical address, from DT */
+ uint32_t bam_mem_size; /* bam io size, from DT */
+ int is_shared; /* CE HW is shared */
bool support_cmd_dscr;
bool support_hw_key;
@@ -2162,25 +2169,93 @@
sps_connect_info->desc.phys_base);
sps_free_endpoint(sps_pipe_info);
}
-/**
- * Initialize SPS HW connected with CE core
- *
- * This function register BAM HW resources with
- * SPS driver and then initialize 2 SPS endpoints
- *
- * This function should only be called once typically
- * during driver probe.
- *
- * @pce_dev - Pointer to qce_device structure
- *
- * @return - 0 if successful else negative value.
- *
- */
-static int qce_sps_init(struct qce_device *pce_dev)
+
+static void qce_sps_release_bam(struct qce_device *pce_dev)
+{
+ struct bam_registration_info *pbam;
+
+ mutex_lock(&bam_register_lock);
+ pbam = pce_dev->pbam;
+ if (pbam == NULL)
+ goto ret;
+
+ pbam->cnt--;
+ if (pbam->cnt > 0)
+ goto ret;
+
+ if (pce_dev->ce_sps.bam_handle) {
+ sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
+
+ pr_debug("deregister bam handle %x\n",
+ pce_dev->ce_sps.bam_handle);
+ pce_dev->ce_sps.bam_handle = 0;
+ }
+ iounmap(pbam->bam_iobase);
+ pr_debug("delete bam 0x%x\n", pbam->bam_mem);
+ list_del(&pbam->qlist);
+ kfree(pbam);
+
+ pce_dev->pbam = NULL;
+ret:
+ mutex_unlock(&bam_register_lock);
+}
+
+static int qce_sps_get_bam(struct qce_device *pce_dev)
{
int rc = 0;
struct sps_bam_props bam = {0};
- bool register_bam = false;
+ struct bam_registration_info *pbam = NULL;
+ struct bam_registration_info *p;
+ uint32_t bam_cfg = 0 ;
+
+
+ mutex_lock(&bam_register_lock);
+
+ list_for_each_entry(p, &qce50_bam_list, qlist) {
+ if (p->bam_mem == pce_dev->bam_mem) {
+ pbam = p; /* found */
+ break;
+ }
+ }
+
+ if (pbam) {
+ pr_debug("found bam 0x%x\n", pbam->bam_mem);
+ pbam->cnt++;
+ pce_dev->ce_sps.bam_handle = pbam->handle;
+ pce_dev->ce_sps.bam_mem = pbam->bam_mem;
+ pce_dev->ce_sps.bam_iobase = pbam->bam_iobase;
+ pce_dev->pbam = pbam;
+ pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+ goto ret;
+ }
+
+ pbam = kzalloc(sizeof(struct bam_registration_info), GFP_KERNEL);
+ if (!pbam) {
+ pr_err("qce50 Memory allocation of bam FAIL, error %ld\n",
+ PTR_ERR(pbam));
+
+ rc = -ENOMEM;
+ goto ret;
+ }
+ pbam->cnt = 1;
+ pbam->bam_mem = pce_dev->bam_mem;
+ pbam->bam_iobase = ioremap_nocache(pce_dev->bam_mem,
+ pce_dev->bam_mem_size);
+ if (!pbam->bam_iobase) {
+ kfree(pbam);
+ rc = -ENOMEM;
+ pr_err("Can not map BAM io memory\n");
+ goto ret;
+ }
+ pce_dev->ce_sps.bam_mem = pbam->bam_mem;
+ pce_dev->ce_sps.bam_iobase = pbam->bam_iobase;
+ pbam->handle = 0;
+ pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
+ bam_cfg = readl_relaxed(pce_dev->ce_sps.bam_iobase +
+ CRYPTO_BAM_CNFG_BITS_REG);
+ pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
+ true : false;
+ pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
bam.phys_addr = pce_dev->ce_sps.bam_mem;
bam.virt_addr = pce_dev->ce_sps.bam_iobase;
@@ -2212,27 +2287,46 @@
pr_debug("bam physical base=0x%x\n", (u32)bam.phys_addr);
pr_debug("bam virtual base=0x%x\n", (u32)bam.virt_addr);
- mutex_lock(&bam_register_cnt);
- if (ce_bam_registered == false) {
- bam_registry.handle = 0;
- bam_registry.cnt = 0;
+ /* Register CE Peripheral BAM device to SPS driver */
+ rc = sps_register_bam_device(&bam, &pbam->handle);
+ if (rc) {
+ pr_err("sps_register_bam_device() failed! err=%d", rc);
+ rc = -EIO;
+ iounmap(pbam->bam_iobase);
+ kfree(pbam);
+ goto ret;
}
- if ((bam_registry.handle == 0) && (bam_registry.cnt == 0)) {
- /* Register CE Peripheral BAM device to SPS driver */
- rc = sps_register_bam_device(&bam, &bam_registry.handle);
- if (rc) {
- mutex_unlock(&bam_register_cnt);
- pr_err("sps_register_bam_device() failed! err=%d", rc);
- return -EIO;
- }
- bam_registry.cnt++;
- register_bam = true;
- ce_bam_registered = true;
- } else {
- bam_registry.cnt++;
- }
- mutex_unlock(&bam_register_cnt);
- pce_dev->ce_sps.bam_handle = bam_registry.handle;
+
+ pce_dev->pbam = pbam;
+ list_add_tail(&pbam->qlist, &qce50_bam_list);
+ pce_dev->ce_sps.bam_handle = pbam->handle;
+
+ret:
+ mutex_unlock(&bam_register_lock);
+
+ return rc;
+}
+/**
+ * Initialize SPS HW connected with CE core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init(struct qce_device *pce_dev)
+{
+ int rc = 0;
+
+ rc = qce_sps_get_bam(pce_dev);
+ if (rc)
+ return rc;
pr_debug("BAM device registered. bam_handle=0x%x",
pce_dev->ce_sps.bam_handle);
@@ -2253,14 +2347,7 @@
sps_connect_consumer_err:
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
sps_connect_producer_err:
- if (register_bam) {
- mutex_lock(&bam_register_cnt);
- sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
- ce_bam_registered = false;
- bam_registry.handle = 0;
- bam_registry.cnt = 0;
- mutex_unlock(&bam_register_cnt);
- }
+ qce_sps_release_bam(pce_dev);
return rc;
}
@@ -2280,17 +2367,7 @@
{
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.consumer);
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
- mutex_lock(&bam_register_cnt);
- if ((bam_registry.handle != 0) && (bam_registry.cnt == 1)) {
- sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
- bam_registry.cnt = 0;
- bam_registry.handle = 0;
- }
- if ((bam_registry.handle != 0) && (bam_registry.cnt > 1))
- bam_registry.cnt--;
- mutex_unlock(&bam_register_cnt);
-
- iounmap(pce_dev->ce_sps.bam_iobase);
+ qce_sps_release_bam(pce_dev);
}
static void _aead_sps_producer_callback(struct sps_event_notify *notify)
@@ -4069,22 +4146,15 @@
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"crypto-bam-base");
if (resource) {
- pce_dev->ce_sps.bam_mem = resource->start;
- pce_dev->ce_sps.bam_iobase = ioremap_nocache(resource->start,
- resource_size(resource));
- if (!pce_dev->ce_sps.bam_iobase) {
- rc = -ENOMEM;
- pr_err("Can not map BAM io memory\n");
- goto err_getting_bam_info;
- }
+ pce_dev->bam_mem = resource->start;
+ pce_dev->bam_mem_size = resource_size(resource);
} else {
pr_err("CRYPTO BAM mem unavailable.\n");
rc = -ENODEV;
goto err_getting_bam_info;
}
- pr_warn("ce_bam_phy_reg_base=0x%x ", pce_dev->ce_sps.bam_mem);
- pr_warn("ce_bam_virt_reg_base=0x%x\n",
- (uint32_t)pce_dev->ce_sps.bam_iobase);
+ pr_warn("ce_bam_phy_reg_base=0x%x ", pce_dev->bam_mem);
+
resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (resource) {
pce_dev->ce_sps.bam_irq = resource->start;
@@ -4250,7 +4320,6 @@
void *qce_open(struct platform_device *pdev, int *rc)
{
struct qce_device *pce_dev;
- uint32_t bam_cfg = 0 ;
pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
if (!pce_dev) {
@@ -4293,15 +4362,9 @@
}
*rc = 0;
- bam_cfg = readl_relaxed(pce_dev->ce_sps.bam_iobase +
- CRYPTO_BAM_CNFG_BITS_REG);
- pce_dev->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
- true : false;
qce_init_ce_cfg_val(pce_dev);
- qce_setup_ce_sps_data(pce_dev);
qce_sps_init(pce_dev);
-
-
+ qce_setup_ce_sps_data(pce_dev);
qce_disable_clk(pce_dev);
return pce_dev;
@@ -4313,8 +4376,6 @@
dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
pce_dev->coh_vmem, pce_dev->coh_pmem);
err_iobase:
- if (pce_dev->ce_sps.bam_iobase)
- iounmap(pce_dev->ce_sps.bam_iobase);
if (pce_dev->iobase)
iounmap(pce_dev->iobase);
err_pce_dev:
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 81a90fe..4845f11 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1339,7 +1339,7 @@
areq->cipher_op_req.vbuf.src[0].len))
return -EFAULT;
- k_align_src += areq->cipher_op_req.vbuf.src[0].len;
+ k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
for (i = 1; i < areq->cipher_op_req.entries; i++) {
user_src =
@@ -1602,11 +1602,6 @@
static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
struct qcedev_control *podev)
{
-
- if (req->encklen < 0) {
- pr_err("%s: Invalid key size: %d\n", __func__, req->encklen);
- return -EINVAL;
- }
/* if intending to use HW key make sure key fields are set
* correctly and HW key is indeed supported in target
*/
@@ -1701,6 +1696,13 @@
goto error;
}
}
+
+ if (req->data_len < req->byteoffset) {
+ pr_err("%s: req data length %u is less than byteoffset %u\n",
+ __func__, req->data_len, req->byteoffset);
+ goto error;
+ }
+
/* Ensure zer ivlen for ECB mode */
if (req->ivlen > 0) {
if ((req->mode == QCEDEV_AES_MODE_ECB) ||
@@ -1716,16 +1718,28 @@
}
}
/* Check for sum of all dst length is equal to data_len */
- for (i = 0; (i < QCEDEV_MAX_BUFFERS) && (total < req->data_len); i++)
+ for (i = 0; (i < QCEDEV_MAX_BUFFERS) && (total < req->data_len); i++) {
+ if (req->vbuf.dst[i].len > ULONG_MAX - total) {
+ pr_err("%s: Integer overflow on total req dst vbuf length\n",
+ __func__);
+ goto error;
+ }
total += req->vbuf.dst[i].len;
+ }
if (total != req->data_len) {
pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
__func__, i, total, req->data_len);
goto error;
}
/* Check for sum of all src length is equal to data_len */
- for (i = 0, total = 0; i < req->entries; i++)
+ for (i = 0, total = 0; i < req->entries; i++) {
+ if (req->vbuf.src[i].len > ULONG_MAX - total) {
+ pr_err("%s: Integer overflow on total req src vbuf length\n",
+ __func__);
+ goto error;
+ }
total += req->vbuf.src[i].len;
+ }
if (total != req->data_len) {
pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
__func__, total, req->data_len);
@@ -1781,8 +1795,15 @@
}
/* Check for sum of all src length is equal to data_len */
- for (i = 0, total = 0; i < req->entries; i++)
+ for (i = 0, total = 0; i < req->entries; i++) {
+ if (req->data[i].len > ULONG_MAX - total) {
+ pr_err("%s: Integer overflow on total req buf length\n",
+ __func__);
+ goto sha_error;
+ }
total += req->data[i].len;
+ }
+
if (total != req->data_len) {
pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
__func__, total, req->data_len);
@@ -2112,21 +2133,21 @@
int len = 0;
pstat = &_qcedev_stat;
- len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+ len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
"\nQualcomm QCE dev driver %d Statistics:\n",
id + 1);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" Encryption operation success : %d\n",
pstat->qcedev_enc_success);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" Encryption operation fail : %d\n",
pstat->qcedev_enc_fail);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" Decryption operation success : %d\n",
pstat->qcedev_dec_success);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" Encryption operation fail : %d\n",
pstat->qcedev_dec_fail);
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index ae57d6c..6606706 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -409,7 +409,7 @@
{
int i;
- for (i = 0; nbytes > 0; i++, sg = scatterwalk_sg_next(sg))
+ for (i = 0; nbytes > 0 && sg != NULL; i++, sg = scatterwalk_sg_next(sg))
nbytes -= sg->length;
return i;
@@ -628,98 +628,98 @@
int len = 0;
pstat = &_qcrypto_stat;
- len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+ len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
"\nQualcomm crypto accelerator %d Statistics:\n",
id + 1);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" ABLK AES CIPHER encryption : %d\n",
pstat->ablk_cipher_aes_enc);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" ABLK AES CIPHER decryption : %d\n",
pstat->ablk_cipher_aes_dec);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" ABLK DES CIPHER encryption : %d\n",
pstat->ablk_cipher_des_enc);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" ABLK DES CIPHER decryption : %d\n",
pstat->ablk_cipher_des_dec);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" ABLK 3DES CIPHER encryption : %d\n",
pstat->ablk_cipher_3des_enc);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" ABLK 3DES CIPHER decryption : %d\n",
pstat->ablk_cipher_3des_dec);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" ABLK CIPHER operation success: %d\n",
pstat->ablk_cipher_op_success);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" ABLK CIPHER operation fail : %d\n",
pstat->ablk_cipher_op_fail);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD SHA1-AES encryption : %d\n",
pstat->aead_sha1_aes_enc);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD SHA1-AES decryption : %d\n",
pstat->aead_sha1_aes_dec);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD SHA1-DES encryption : %d\n",
pstat->aead_sha1_des_enc);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD SHA1-DES decryption : %d\n",
pstat->aead_sha1_des_dec);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD SHA1-3DES encryption : %d\n",
pstat->aead_sha1_3des_enc);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD SHA1-3DES decryption : %d\n",
pstat->aead_sha1_3des_dec);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD CCM-AES encryption : %d\n",
pstat->aead_ccm_aes_enc);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD CCM-AES decryption : %d\n",
pstat->aead_ccm_aes_dec);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD operation success : %d\n",
pstat->aead_op_success);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD operation fail : %d\n",
pstat->aead_op_fail);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD bad message : %d\n",
pstat->aead_bad_msg);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" SHA1 digest : %d\n",
pstat->sha1_digest);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" SHA256 digest : %d\n",
pstat->sha256_digest);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" SHA operation fail : %d\n",
pstat->sha_op_fail);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" SHA operation success : %d\n",
pstat->sha_op_success);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" SHA1 HMAC digest : %d\n",
pstat->sha1_hmac_digest);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" SHA256 HMAC digest : %d\n",
pstat->sha256_hmac_digest);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" SHA HMAC operation fail : %d\n",
pstat->sha_hmac_op_fail);
- len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" SHA HMAC operation success : %d\n",
pstat->sha_hmac_op_success);
return len;
@@ -1423,8 +1423,20 @@
rctx->orig_src = req->src;
rctx->orig_dst = req->dst;
+
+ if ((MAX_ALIGN_SIZE*2 > ULONG_MAX - req->assoclen) ||
+ ((MAX_ALIGN_SIZE*2 + req->assoclen) >
+ ULONG_MAX - qreq.authsize) ||
+ ((MAX_ALIGN_SIZE*2 + req->assoclen +
+ qreq.authsize) >
+ ULONG_MAX - req->cryptlen)) {
+ pr_err("Integer overflow on aead req length.\n");
+ return -EINVAL;
+ }
+
rctx->data = kzalloc((req->cryptlen + qreq.assoclen +
- qreq.authsize + 64*2), GFP_ATOMIC);
+ qreq.authsize + MAX_ALIGN_SIZE*2),
+ GFP_ATOMIC);
if (rctx->data == NULL) {
pr_err("Mem Alloc fail rctx->data, err %ld\n",
PTR_ERR(rctx->data));
@@ -1486,6 +1498,16 @@
* include assoicated data, ciphering data stream,
* generated MAC, and CCM padding.
*/
+ if ((MAX_ALIGN_SIZE * 2 > ULONG_MAX - req->assoclen) ||
+ ((MAX_ALIGN_SIZE * 2 + req->assoclen) >
+ ULONG_MAX - qreq.ivsize) ||
+ ((MAX_ALIGN_SIZE * 2 + req->assoclen
+ + qreq.ivsize)
+ > ULONG_MAX - req->cryptlen)) {
+ pr_err("Integer overflow on aead req length.\n");
+ return -EINVAL;
+ }
+
rctx->data = kzalloc(
(req->cryptlen +
req->assoclen +
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 1c63b70..17bbe19 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -63,6 +63,14 @@
Otherwise, the governor does not change the frequnecy
given at the initialization.
+config DEVFREQ_GOV_MSM_ADRENO_TZ
+ tristate "MSM Adreno Trustzone"
+ depends on MSM_KGSL && MSM_SCM
+ help
+ Trustzone based governor for the Adreno GPU.
+ Sets the frequency using a "on-demand" algorithm.
+ This governor is unlikely to be useful for other devices.
+
comment "DEVFREQ Drivers"
config ARM_EXYNOS4_BUS_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 8c46423..29b48ff 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
+obj-$(CONFIG_DEVFREQ_GOV_MSM_ADRENO_TZ) += governor_msm_adreno_tz.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index dadf87c..9e49b3e 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -187,7 +187,7 @@
return -EINVAL;
/* Reevaluate the proper frequency */
- err = devfreq->governor->get_target_freq(devfreq, &freq);
+ err = devfreq->governor->get_target_freq(devfreq, &freq, &flags);
if (err)
return err;
@@ -459,7 +459,7 @@
return NULL;
for (i = 0; i < profile->num_governor_data; i++) {
- if (!strncmp(governor_name, profile->governor_data[i].name,
+ if (strncmp(governor_name, profile->governor_data[i].name,
DEVFREQ_NAME_LEN) == 0) {
data = profile->governor_data[i].data;
break;
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
new file mode 100644
index 0000000..8c97fe9
--- /dev/null
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/devfreq.h>
+#include <linux/math64.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ftrace.h>
+#include <linux/msm_adreno_devfreq.h>
+#include <mach/scm.h>
+#include "governor.h"
+
+static DEFINE_SPINLOCK(tz_lock);
+
+/*
+ * FLOOR is 5msec to capture up to 3 re-draws
+ * per frame for 60fps content.
+ */
+#define FLOOR 5000
+#define LONG_FLOOR 50000
+#define HIST 5
+#define TARGET 80
+#define CAP 75
+
+/*
+ * CEILING is 50msec, larger than any standard
+ * frame length, but less than the idle timer.
+ */
+#define CEILING 50000
+#define TZ_RESET_ID 0x3
+#define TZ_UPDATE_ID 0x4
+#define TZ_INIT_ID 0x6
+
+#define TAG "msm_adreno_tz: "
+
+/* Trap into the TrustZone, and call funcs there. */
+static int __secure_tz_entry2(u32 cmd, u32 val1, u32 val2)
+{
+ int ret;
+ spin_lock(&tz_lock);
+ /* sync memory before sending the commands to tz*/
+ __iowmb();
+ ret = scm_call_atomic2(SCM_SVC_IO, cmd, val1, val2);
+ spin_unlock(&tz_lock);
+ return ret;
+}
+
+static int __secure_tz_entry3(u32 cmd, u32 val1, u32 val2, u32 val3)
+{
+ int ret;
+ spin_lock(&tz_lock);
+ /* sync memory before sending the commands to tz*/
+ __iowmb();
+ ret = scm_call_atomic3(SCM_SVC_IO, cmd, val1, val2, val3);
+ spin_unlock(&tz_lock);
+ return ret;
+}
+
+static void _update_cutoff(struct devfreq_msm_adreno_tz_data *priv,
+ unsigned int norm_max)
+{
+ int i;
+
+ priv->bus.max = norm_max;
+ for (i = 0; i < priv->bus.num; i++) {
+ priv->bus.up[i] = priv->bus.p_up[i] * norm_max / 100;
+ priv->bus.down[i] = priv->bus.p_down[i] * norm_max / 100;
+ }
+}
+
+static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq,
+ u32 *flag)
+{
+ int result = 0;
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+ struct devfreq_dev_status stats;
+ struct xstats b;
+ int val, level = 0;
+ int act_level;
+ int norm_cycles;
+ int gpu_percent;
+
+ if (priv->bus.num)
+ stats.private_data = &b;
+ else
+ stats.private_data = NULL;
+ result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats);
+ if (result) {
+ pr_err(TAG "get_status failed %d\n", result);
+ return result;
+ }
+
+ *freq = stats.current_frequency;
+ *flag = 0;
+ priv->bin.total_time += stats.total_time;
+ priv->bin.busy_time += stats.busy_time;
+ if (priv->bus.num) {
+ priv->bus.total_time += stats.total_time;
+ priv->bus.gpu_time += stats.busy_time;
+ priv->bus.ram_time += b.ram_time;
+ priv->bus.ram_time += b.ram_wait;
+ }
+
+ /*
+ * Do not waste CPU cycles running this algorithm if
+ * the GPU just started, or if less than FLOOR time
+ * has passed since the last run.
+ */
+ if ((stats.total_time == 0) ||
+ (priv->bin.total_time < FLOOR)) {
+ return 1;
+ }
+
+ level = devfreq_get_freq_level(devfreq, stats.current_frequency);
+
+ if (level < 0) {
+ pr_err(TAG "bad freq %ld\n", stats.current_frequency);
+ return level;
+ }
+
+ /*
+ * If there is an extended block of busy processing,
+ * increase frequency. Otherwise run the normal algorithm.
+ */
+ if (priv->bin.busy_time > CEILING) {
+ val = -1 * level;
+ } else {
+ val = __secure_tz_entry3(TZ_UPDATE_ID,
+ level,
+ priv->bin.total_time,
+ priv->bin.busy_time);
+ }
+ priv->bin.total_time = 0;
+ priv->bin.busy_time = 0;
+
+ /*
+ * If the decision is to move to a different level, make sure the GPU
+ * frequency changes.
+ */
+ if (val) {
+ level += val;
+ level = max(level, 0);
+ level = min_t(int, level, devfreq->profile->max_state);
+ goto clear;
+ }
+
+ if (priv->bus.total_time < LONG_FLOOR)
+ goto end;
+ norm_cycles = (unsigned int)priv->bus.ram_time /
+ (unsigned int) priv->bus.total_time;
+ gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
+ (unsigned int) priv->bus.total_time;
+ /*
+ * If there's a new high watermark, update the cutoffs and send the
+ * FAST hint. Otherwise check the current value against the current
+ * cutoffs.
+ */
+ if (norm_cycles > priv->bus.max) {
+ _update_cutoff(priv, norm_cycles);
+ *flag = DEVFREQ_FLAG_FAST_HINT;
+ } else {
+ /*
+ * Normalize by gpu_time unless it is a small fraction of
+ * the total time interval.
+ */
+ norm_cycles = (100 * norm_cycles) / TARGET;
+ act_level = priv->bus.index[level] + b.mod;
+ act_level = (act_level < 0) ? 0 : act_level;
+ act_level = (act_level >= priv->bus.num) ?
+ (priv->bus.num - 1) : act_level;
+ if (norm_cycles > priv->bus.up[act_level] &&
+ gpu_percent > CAP)
+ *flag = DEVFREQ_FLAG_FAST_HINT;
+ else if (norm_cycles < priv->bus.down[act_level] && level)
+ *flag = DEVFREQ_FLAG_SLOW_HINT;
+ }
+
+clear:
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+
+end:
+ *freq = devfreq->profile->freq_table[level];
+ return 0;
+}
+
+static int tz_notify(struct notifier_block *nb, unsigned long type, void *devp)
+{
+ int result = 0;
+ struct devfreq *devfreq = devp;
+
+ switch (type) {
+ case ADRENO_DEVFREQ_NOTIFY_IDLE:
+ case ADRENO_DEVFREQ_NOTIFY_RETIRE:
+ mutex_lock(&devfreq->lock);
+ result = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+ break;
+ /* ignored by this governor */
+ case ADRENO_DEVFREQ_NOTIFY_SUBMIT:
+ default:
+ break;
+ }
+ return notifier_from_errno(result);
+}
+
+static int tz_start(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv;
+ unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1];
+ unsigned int t1, t2 = 2 * HIST;
+ int i, out, ret;
+
+ if (devfreq->data == NULL) {
+ pr_err(TAG "data is required for this governor\n");
+ return -EINVAL;
+ }
+
+ priv = devfreq->data;
+ priv->nb.notifier_call = tz_notify;
+
+ out = 1;
+ if (devfreq->profile->max_state < MSM_ADRENO_MAX_PWRLEVELS) {
+ for (i = 0; i < devfreq->profile->max_state; i++)
+ tz_pwrlevels[out++] = devfreq->profile->freq_table[i];
+ tz_pwrlevels[0] = i;
+ } else {
+ pr_err(TAG "tz_pwrlevels[] is too short\n");
+ return -EINVAL;
+ }
+
+ ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
+ sizeof(tz_pwrlevels), NULL, 0);
+
+ if (ret != 0)
+ pr_err(TAG "tz_init failed\n");
+
+ /* Set up the cut-over percentages for the bus calculation. */
+ if (priv->bus.num) {
+ for (i = 0; i < priv->bus.num; i++) {
+ t1 = (u32)(100 * priv->bus.ib[i]) /
+ (u32)priv->bus.ib[priv->bus.num - 1];
+ priv->bus.p_up[i] = t1 - HIST;
+ priv->bus.p_down[i] = t2 - 2 * HIST;
+ t2 = t1;
+ }
+ /* Set the upper-most and lower-most bounds correctly. */
+ priv->bus.p_down[0] = 0;
+ priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ?
+ priv->bus.p_down[1] : (2 * HIST);
+ if (priv->bus.num - 1 >= 0)
+ priv->bus.p_up[priv->bus.num - 1] = 100;
+ _update_cutoff(priv, priv->bus.max);
+ }
+
+ return kgsl_devfreq_add_notifier(devfreq->dev.parent, &priv->nb);
+}
+
+static int tz_stop(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+ kgsl_devfreq_del_notifier(devfreq->dev.parent, &priv->nb);
+ return 0;
+}
+
+
+static int tz_resume(struct devfreq *devfreq)
+{
+ struct devfreq_dev_profile *profile = devfreq->profile;
+ unsigned long freq;
+
+ freq = profile->initial_freq;
+
+ return profile->target(devfreq->dev.parent, &freq, 0);
+}
+
+static int tz_suspend(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+ __secure_tz_entry2(TZ_RESET_ID, 0, 0);
+
+ priv->bin.total_time = 0;
+ priv->bin.busy_time = 0;
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+ return 0;
+}
+
+static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data)
+{
+ int result;
+ BUG_ON(devfreq == NULL);
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ result = tz_start(devfreq);
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ result = tz_stop(devfreq);
+ break;
+
+ case DEVFREQ_GOV_SUSPEND:
+ result = tz_suspend(devfreq);
+ break;
+
+ case DEVFREQ_GOV_RESUME:
+ result = tz_resume(devfreq);
+ break;
+
+ case DEVFREQ_GOV_INTERVAL:
+ /* ignored, this governor doesn't use polling */
+ default:
+ result = 0;
+ break;
+ }
+
+ return result;
+}
+
+static struct devfreq_governor msm_adreno_tz = {
+ .name = "msm-adreno-tz",
+ .get_target_freq = tz_get_target_freq,
+ .event_handler = tz_handler,
+};
+
+static int __init msm_adreno_tz_init(void)
+{
+ return devfreq_add_governor(&msm_adreno_tz);
+}
+subsys_initcall(msm_adreno_tz_init);
+
+static void __exit msm_adreno_tz_exit(void)
+{
+ int ret;
+ ret = devfreq_remove_governor(&msm_adreno_tz);
+ if (ret)
+ pr_err(TAG "failed to remove governor %d\n", ret);
+
+ return;
+}
+
+module_exit(msm_adreno_tz_exit);
+
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index bc7da1e..af2edc2 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -14,7 +14,8 @@
#include "governor.h"
static int devfreq_performance_func(struct devfreq *df,
- unsigned long *freq)
+ unsigned long *freq,
+ u32 *flag)
{
/*
* target callback should be able to get floor value as
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index 6d43685..57f3738 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -14,7 +14,8 @@
#include "governor.h"
static int devfreq_powersave_func(struct devfreq *df,
- unsigned long *freq)
+ unsigned long *freq,
+ u32 *flag)
{
/*
* target callback should be able to get ceiling value as
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index 0720ba8..bb29360 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -19,7 +19,8 @@
#define DFSO_UPTHRESHOLD (90)
#define DFSO_DOWNDIFFERENCTIAL (5)
static int devfreq_simple_ondemand_func(struct devfreq *df,
- unsigned long *freq)
+ unsigned long *freq,
+ u32 *flag)
{
struct devfreq_dev_status stat;
int err = df->profile->get_dev_status(df->dev.parent, &stat);
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 35de6e8..4fbde04 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -22,7 +22,8 @@
bool valid;
};
-static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
+static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq,
+ u32 *flag)
{
struct userspace_data *data = df->data;
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index 0e460c8..108abe6 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -2,4 +2,4 @@
ion_carveout_heap.o ion_chunk_heap.o
obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o
obj-$(CONFIG_ION_TEGRA) += tegra/
-obj-$(CONFIG_ION_MSM) += ion_iommu_heap.o ion_cp_heap.o ion_removed_heap.o msm/
+obj-$(CONFIG_ION_MSM) += ion_cp_heap.o ion_removed_heap.o msm/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 6777dae..c791c49 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -24,14 +24,13 @@
#include <linux/ion.h>
#include <linux/kthread.h>
#include <linux/list.h>
+#include <linux/list_sort.h>
#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/rbtree.h>
-#include <linux/rtmutex.h>
-#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -62,6 +61,8 @@
unsigned long arg);
struct rb_root clients;
struct dentry *debug_root;
+ struct dentry *heaps_debug_root;
+ struct dentry *clients_debug_root;
};
/**
@@ -147,7 +148,6 @@
static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
-static bool ion_heap_drain_freelist(struct ion_heap *heap);
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
@@ -174,7 +174,7 @@
if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
goto err2;
- ion_heap_drain_freelist(heap);
+ ion_heap_freelist_drain(heap, 0);
ret = heap->ops->allocate(heap, buffer, len, align,
flags);
if (ret)
@@ -242,7 +242,7 @@
buffer->heap->ops->unsecure_buffer(buffer, 1);
}
-static void _ion_buffer_destroy(struct ion_buffer *buffer)
+void ion_buffer_destroy(struct ion_buffer *buffer)
{
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
@@ -255,7 +255,7 @@
kfree(buffer);
}
-static void ion_buffer_destroy(struct kref *kref)
+static void _ion_buffer_destroy(struct kref *kref)
{
struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_heap *heap = buffer->heap;
@@ -265,14 +265,10 @@
rb_erase(&buffer->node, &dev->buffers);
mutex_unlock(&dev->buffer_lock);
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
- rt_mutex_lock(&heap->lock);
- list_add(&buffer->list, &heap->free_list);
- rt_mutex_unlock(&heap->lock);
- wake_up(&heap->waitqueue);
- return;
- }
- _ion_buffer_destroy(buffer);
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ ion_heap_freelist_add(heap, buffer);
+ else
+ ion_buffer_destroy(buffer);
}
static void ion_buffer_get(struct ion_buffer *buffer)
@@ -282,7 +278,7 @@
static int ion_buffer_put(struct ion_buffer *buffer)
{
- return kref_put(&buffer->ref, ion_buffer_destroy);
+ return kref_put(&buffer->ref, _ion_buffer_destroy);
}
static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
@@ -708,6 +704,35 @@
.release = single_release,
};
+static bool startswith(const char *string, const char *prefix)
+{
+ size_t l1 = strlen(string);
+ size_t l2 = strlen(prefix);
+ return strncmp(string, prefix, min(l1, l2)) == 0;
+}
+
+static int ion_get_client_serial(const struct rb_root *root,
+ const unsigned char *name)
+{
+ int serial = -1;
+ struct rb_node *node;
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ int n;
+ char *serial_string;
+ struct ion_client *client = rb_entry(node, struct ion_client,
+ node);
+ if (!startswith(client->name, name))
+ continue;
+ serial_string = strrchr(client->name, '-');
+ if (!serial_string)
+ continue;
+ serial_string++;
+ sscanf(serial_string, "%d", &n);
+ serial = max(serial, n);
+ }
+ return serial + 1;
+}
+
struct ion_client *ion_client_create(struct ion_device *dev,
const char *name)
{
@@ -717,13 +742,16 @@
struct rb_node *parent = NULL;
struct ion_client *entry;
pid_t pid;
- unsigned int name_len;
+ int name_len;
+ int client_serial;
if (!name) {
pr_err("%s: Name cannot be null\n", __func__);
return ERR_PTR(-EINVAL);
}
name_len = strnlen(name, 64);
+ /* add some space to accommodate the serial number suffix */
+ name_len = min(64, name_len + 11);
get_task_struct(current->group_leader);
task_lock(current->group_leader);
@@ -754,14 +782,14 @@
put_task_struct(current->group_leader);
kfree(client);
return ERR_PTR(-ENOMEM);
- } else {
- strlcpy(client->name, name, name_len+1);
}
client->task = task;
client->pid = pid;
down_write(&dev->lock);
+ client_serial = ion_get_client_serial(&dev->clients, name);
+ snprintf(client->name, name_len, "%s-%d", name, client_serial);
p = &dev->clients.rb_node;
while (*p) {
parent = *p;
@@ -776,9 +804,16 @@
rb_insert_color(&client->node, &dev->clients);
- client->debug_root = debugfs_create_file(name, 0664,
- dev->debug_root, client,
- &debug_client_fops);
+ client->debug_root = debugfs_create_file(client->name, 0664,
+ dev->clients_debug_root,
+ client, &debug_client_fops);
+ if (!client->debug_root) {
+ char buf[256], *path;
+ path = dentry_path(dev->clients_debug_root, buf, 256);
+ pr_err("Failed to created client debugfs at %s/%s\n",
+ path, client->name);
+ }
+
up_write(&dev->lock);
return client;
@@ -1397,134 +1432,89 @@
}
/**
- * Searches through a clients handles to find if the buffer is owned
- * by this client. Used for debug output.
- * @param client pointer to candidate owner of buffer
- * @param buf pointer to buffer that we are trying to find the owner of
- * @return 1 if found, 0 otherwise
- */
-static int ion_debug_find_buffer_owner(const struct ion_client *client,
- const struct ion_buffer *buf)
-{
- struct rb_node *n;
-
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- const struct ion_handle *handle = rb_entry(n,
- const struct ion_handle,
- node);
- if (handle->buffer == buf)
- return 1;
- }
- return 0;
-}
-
-/**
- * Adds mem_map_data pointer to the tree of mem_map
- * Used for debug output.
- * @param mem_map The mem_map tree
- * @param data The new data to add to the tree
- */
-static void ion_debug_mem_map_add(struct rb_root *mem_map,
- struct mem_map_data *data)
-{
- struct rb_node **p = &mem_map->rb_node;
- struct rb_node *parent = NULL;
- struct mem_map_data *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct mem_map_data, node);
-
- if (data->addr < entry->addr) {
- p = &(*p)->rb_left;
- } else if (data->addr > entry->addr) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: mem_map_data already found.", __func__);
- BUG();
- }
- }
- rb_link_node(&data->node, parent, p);
- rb_insert_color(&data->node, mem_map);
-}
-
-/**
- * Search for an owner of a buffer by iterating over all ION clients.
- * @param dev ion device containing pointers to all the clients.
- * @param buffer pointer to buffer we are trying to find the owner of.
- * @return name of owner.
- */
-const char *ion_debug_locate_owner(const struct ion_device *dev,
- const struct ion_buffer *buffer)
-{
- struct rb_node *j;
- const char *client_name = NULL;
-
- for (j = rb_first(&dev->clients); j && !client_name;
- j = rb_next(j)) {
- struct ion_client *client = rb_entry(j, struct ion_client,
- node);
- if (ion_debug_find_buffer_owner(client, buffer))
- client_name = client->name;
- }
- return client_name;
-}
-
-/**
* Create a mem_map of the heap.
* @param s seq_file to log error message to.
* @param heap The heap to create mem_map for.
* @param mem_map The mem map to be created.
*/
void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
- struct rb_root *mem_map)
+ struct list_head *mem_map)
{
struct ion_device *dev = heap->dev;
- struct rb_node *n;
+ struct rb_node *cnode;
size_t size;
+ struct ion_client *client;
if (!heap->ops->phys)
return;
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buffer =
- rb_entry(n, struct ion_buffer, node);
- if (buffer->heap->id == heap->id) {
- struct mem_map_data *data =
- kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- seq_printf(s, "ERROR: out of memory. "
- "Part of memory map will not be logged\n");
- break;
- }
+ down_read(&dev->lock);
+ for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) {
+ struct rb_node *hnode;
+ client = rb_entry(cnode, struct ion_client, node);
- buffer->heap->ops->phys(buffer->heap, buffer,
- &(data->addr), &size);
- data->size = (unsigned long) size;
- data->addr_end = data->addr + data->size - 1;
- data->client_name = ion_debug_locate_owner(dev, buffer);
- ion_debug_mem_map_add(mem_map, data);
+ mutex_lock(&client->lock);
+ for (hnode = rb_first(&client->handles);
+ hnode;
+ hnode = rb_next(hnode)) {
+ struct ion_handle *handle = rb_entry(
+ hnode, struct ion_handle, node);
+ if (handle->buffer->heap == heap) {
+ struct mem_map_data *data =
+ kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto inner_error;
+ heap->ops->phys(heap, handle->buffer,
+ &(data->addr), &size);
+ data->size = (unsigned long) size;
+ data->addr_end = data->addr + data->size - 1;
+ data->client_name = kstrdup(client->name,
+ GFP_KERNEL);
+ if (!data->client_name) {
+ kfree(data);
+ goto inner_error;
+ }
+ list_add(&data->node, mem_map);
+ }
}
+ mutex_unlock(&client->lock);
}
+ up_read(&dev->lock);
+ return;
+
+inner_error:
+ seq_puts(s,
+ "ERROR: out of memory. Part of memory map will not be logged\n");
+ mutex_unlock(&client->lock);
+ up_read(&dev->lock);
}
/**
* Free the memory allocated by ion_debug_mem_map_create
* @param mem_map The mem map to free.
*/
-static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
+static void ion_debug_mem_map_destroy(struct list_head *mem_map)
{
if (mem_map) {
- struct rb_node *n;
- while ((n = rb_first(mem_map)) != 0) {
- struct mem_map_data *data =
- rb_entry(n, struct mem_map_data, node);
- rb_erase(&data->node, mem_map);
+ struct mem_map_data *data, *tmp;
+ list_for_each_entry_safe(data, tmp, mem_map, node) {
+ list_del(&data->node);
+ kfree(data->client_name);
kfree(data);
}
}
}
+static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct mem_map_data *d1, *d2;
+ d1 = list_entry(a, struct mem_map_data, node);
+ d2 = list_entry(b, struct mem_map_data, node);
+ if (d1->addr == d2->addr)
+ return d1->size - d2->size;
+ return d1->addr - d2->addr;
+}
+
/**
* Print heap debug information.
* @param s seq_file to log message to.
@@ -1533,8 +1523,9 @@
static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
{
if (heap->ops->print_debug) {
- struct rb_root mem_map = RB_ROOT;
+ struct list_head mem_map = LIST_HEAD_INIT(mem_map);
ion_debug_mem_map_create(s, heap, &mem_map);
+ list_sort(NULL, &mem_map, mem_map_cmp);
heap->ops->print_debug(heap, s, &mem_map);
ion_debug_mem_map_destroy(&mem_map);
}
@@ -1551,6 +1542,7 @@
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
seq_printf(s, "----------------------------------------------------\n");
+ down_read(&dev->lock);
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
@@ -1568,6 +1560,7 @@
client->pid, size);
}
}
+ up_read(&dev->lock);
seq_printf(s, "----------------------------------------------------\n");
seq_printf(s, "orphaned allocations (info is from last known client):"
"\n");
@@ -1611,87 +1604,89 @@
.release = single_release,
};
-static size_t ion_heap_free_list_is_empty(struct ion_heap *heap)
+#ifdef DEBUG_HEAP_SHRINKER
+static int debug_shrink_set(void *data, u64 val)
{
- bool is_empty;
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
- rt_mutex_lock(&heap->lock);
- is_empty = list_empty(&heap->free_list);
- rt_mutex_unlock(&heap->lock);
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
- return is_empty;
+ if (!val)
+ return 0;
+
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ sc.nr_to_scan = objs;
+
+ heap->shrinker.shrink(&heap->shrinker, &sc);
+ return 0;
}
-static int ion_heap_deferred_free(void *data)
+static int debug_shrink_get(void *data, u64 *val)
{
- struct ion_heap *heap = data;
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
- while (true) {
- struct ion_buffer *buffer;
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
- wait_event_freezable(heap->waitqueue,
- !ion_heap_free_list_is_empty(heap));
-
- rt_mutex_lock(&heap->lock);
- if (list_empty(&heap->free_list)) {
- rt_mutex_unlock(&heap->lock);
- continue;
- }
- buffer = list_first_entry(&heap->free_list, struct ion_buffer,
- list);
- list_del(&buffer->list);
- rt_mutex_unlock(&heap->lock);
- _ion_buffer_destroy(buffer);
- }
-
- return 0;
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ *val = objs;
+ return 0;
}
-static bool ion_heap_drain_freelist(struct ion_heap *heap)
-{
- struct ion_buffer *buffer, *tmp;
-
- if (ion_heap_free_list_is_empty(heap))
- return false;
- rt_mutex_lock(&heap->lock);
- list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
- list_del(&buffer->list);
- _ion_buffer_destroy(buffer);
- }
- BUG_ON(!list_empty(&heap->free_list));
- rt_mutex_unlock(&heap->lock);
-
-
- return true;
-}
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+ debug_shrink_set, "%llu\n");
+#endif
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
- struct sched_param param = { .sched_priority = 0 };
+ struct dentry *debug_file;
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
- INIT_LIST_HEAD(&heap->free_list);
- rt_mutex_init(&heap->lock);
- init_waitqueue_head(&heap->waitqueue);
- heap->task = kthread_run(ion_heap_deferred_free, heap,
- "%s", heap->name);
- sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
- if (IS_ERR(heap->task))
- pr_err("%s: creating thread for deferred free failed\n",
- __func__);
- }
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ ion_heap_init_deferred_free(heap);
heap->dev = dev;
down_write(&dev->lock);
- plist_node_init(&heap->node, heap->id);
+ /* use negative heap->id to reverse the priority -- when traversing
+ the list later attempt higher id numbers first */
+ plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
- debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
- &debug_heap_fops);
+ debug_file = debugfs_create_file(heap->name, 0664,
+ dev->heaps_debug_root, heap,
+ &debug_heap_fops);
+
+ if (!debug_file) {
+ char buf[256], *path;
+ path = dentry_path(dev->heaps_debug_root, buf, 256);
+ pr_err("Failed to created heap debugfs at %s/%s\n",
+ path, heap->name);
+ }
+
+#ifdef DEBUG_HEAP_SHRINKER
+ if (heap->shrinker.shrink) {
+ char debug_name[64];
+
+ snprintf(debug_name, 64, "%s_shrink", heap->name);
+ debug_file = debugfs_create_file(
+ debug_name, 0644, dev->heaps_debug_root, heap,
+ &debug_shrink_fops);
+ if (!debug_file) {
+ char buf[256], *path;
+ path = dentry_path(dev->heaps_debug_root, buf, 256);
+ pr_err("Failed to created heap shrinker debugfs at %s/%s\n",
+ path, debug_name);
+ }
+ }
+#endif
up_write(&dev->lock);
}
@@ -1839,8 +1834,21 @@
}
idev->debug_root = debugfs_create_dir("ion", NULL);
- if (IS_ERR_OR_NULL(idev->debug_root))
- pr_err("ion: failed to create debug files.\n");
+ if (!idev->debug_root) {
+ pr_err("ion: failed to create debugfs root directory.\n");
+ goto debugfs_done;
+ }
+ idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
+ if (!idev->heaps_debug_root) {
+ pr_err("ion: failed to create debugfs heaps directory.\n");
+ goto debugfs_done;
+ }
+ idev->clients_debug_root = debugfs_create_dir("clients",
+ idev->debug_root);
+ if (!idev->clients_debug_root)
+ pr_err("ion: failed to create debugfs clients directory.\n");
+
+debugfs_done:
idev->custom_ioctl = custom_ioctl;
idev->buffers = RB_ROOT;
@@ -1854,6 +1862,7 @@
void ion_device_destroy(struct ion_device *dev)
{
misc_deregister(&dev->dev);
+ debugfs_remove_recursive(dev->debug_root);
/* XXX need to free the heaps and clients ? */
kfree(dev);
}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 08921299..d25e928 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -162,7 +162,7 @@
}
static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
- const struct rb_root *mem_map)
+ const struct list_head *mem_map)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -176,16 +176,14 @@
unsigned long size = carveout_heap->total_size;
unsigned long end = base+size;
unsigned long last_end = base;
- struct rb_node *n;
+ struct mem_map_data *data;
seq_printf(s, "\nMemory Map\n");
seq_printf(s, "%16.s %14.s %14.s %14.s\n",
"client", "start address", "end address",
"size (hex)");
- for (n = rb_first(mem_map); n; n = rb_next(n)) {
- struct mem_map_data *data =
- rb_entry(n, struct mem_map_data, node);
+ list_for_each_entry(data, mem_map, node) {
const char *client_name = "(null)";
if (last_end < data->addr) {
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
index b24b2bd..d4bbab7 100644
--- a/drivers/gpu/ion/ion_cma_heap.c
+++ b/drivers/gpu/ion/ion_cma_heap.c
@@ -180,19 +180,17 @@
}
static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
- const struct rb_root *mem_map)
+ const struct list_head *mem_map)
{
if (mem_map) {
- struct rb_node *n;
+ struct mem_map_data *data;
seq_printf(s, "\nMemory Map\n");
seq_printf(s, "%16.s %14.s %14.s %14.s\n",
"client", "start address", "end address",
"size (hex)");
- for (n = rb_first(mem_map); n; n = rb_next(n)) {
- struct mem_map_data *data =
- rb_entry(n, struct mem_map_data, node);
+ list_for_each_entry(data, mem_map, node) {
const char *client_name = "(null)";
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
index 90451ca..bdf48b3 100644
--- a/drivers/gpu/ion/ion_cma_secure_heap.c
+++ b/drivers/gpu/ion/ion_cma_secure_heap.c
@@ -234,19 +234,17 @@
}
static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
- const struct rb_root *mem_map)
+ const struct list_head *mem_map)
{
if (mem_map) {
- struct rb_node *n;
+ struct mem_map_data *data;
seq_printf(s, "\nMemory Map\n");
seq_printf(s, "%16.s %14.s %14.s %14.s\n",
"client", "start address", "end address",
"size (hex)");
- for (n = rb_first(mem_map); n; n = rb_next(n)) {
- struct mem_map_data *data =
- rb_entry(n, struct mem_map_data, node);
+ list_for_each_entry(data, mem_map, node) {
const char *client_name = "(null)";
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index f2f4fad..8cb90e5 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -622,7 +622,7 @@
}
static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
- const struct rb_root *mem_map)
+ const struct list_head *mem_map)
{
unsigned long total_alloc;
unsigned long total_size;
@@ -651,16 +651,14 @@
unsigned long size = cp_heap->total_size;
unsigned long end = base+size;
unsigned long last_end = base;
- struct rb_node *n;
+ struct mem_map_data *data;
seq_printf(s, "\nMemory Map\n");
seq_printf(s, "%16.s %14.s %14.s %14.s\n",
"client", "start address", "end address",
"size (hex)");
- for (n = rb_first(mem_map); n; n = rb_next(n)) {
- struct mem_map_data *data =
- rb_entry(n, struct mem_map_data, node);
+ list_for_each_entry(data, mem_map, node) {
const char *client_name = "(null)";
if (last_end < data->addr) {
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 3d37541..9d33bf4 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -16,10 +16,16 @@
*/
#include <linux/err.h>
+#include <linux/freezer.h>
#include <linux/ion.h>
+#include <linux/kthread.h>
#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
#include "ion_priv.h"
void *ion_heap_map_kernel(struct ion_heap *heap,
@@ -94,9 +100,155 @@
return 0;
}
+#define MAX_VMAP_RETRIES 10
+
+/**
+ * An optimized page-zero'ing function. vmaps arrays of pages in large
+ * chunks to minimize the number of memsets and vmaps/vunmaps.
+ *
+ * Note that the `pages' array should be composed of all 4K pages.
+ */
+int ion_heap_pages_zero(struct page **pages, int num_pages,
+ bool should_invalidate)
+{
+ int i, j, k, npages_to_vmap;
+ void *ptr = NULL;
+ /*
+ * It's cheaper just to use writecombine memory and skip the
+ * cache vs. using a cache memory and trying to flush it afterwards
+ */
+ pgprot_t pgprot = pgprot_writecombine(pgprot_kernel);
+
+ /*
+ * As an optimization, we manually zero out all of the pages
+ * in one fell swoop here. To safeguard against insufficient
+ * vmalloc space, we only vmap `npages_to_vmap' at a time,
+ * starting with a conservative estimate of 1/8 of the total
+ * number of vmalloc pages available.
+ */
+ npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
+ >> PAGE_SHIFT;
+ for (i = 0; i < num_pages; i += npages_to_vmap) {
+ npages_to_vmap = min(npages_to_vmap, num_pages - i);
+ for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
+ ++j) {
+ ptr = vmap(&pages[i], npages_to_vmap,
+ VM_IOREMAP, pgprot);
+ if (ptr)
+ break;
+ else
+ npages_to_vmap >>= 1;
+ }
+ if (!ptr)
+ return -ENOMEM;
+
+ memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
+ if (should_invalidate) {
+ /*
+ * invalidate the cache to pick up the zeroing
+ */
+ for (k = 0; k < npages_to_vmap; k++) {
+ void *p = kmap_atomic(pages[i + k]);
+ phys_addr_t phys = page_to_phys(
+ pages[i + k]);
+
+ dmac_inv_range(p, p + PAGE_SIZE);
+ outer_inv_range(phys, phys + PAGE_SIZE);
+ kunmap_atomic(p);
+ }
+ }
+ vunmap(ptr);
+ }
+
+ return 0;
+}
+
+static int ion_heap_alloc_pages_mem(int page_tbl_size,
+ struct pages_mem *pages_mem)
+{
+ struct page **pages;
+ pages_mem->free_fn = kfree;
+ if (page_tbl_size > SZ_8K) {
+ /*
+ * Do fallback to ensure we have a balance between
+ * performance and availability.
+ */
+ pages = kmalloc(page_tbl_size,
+ __GFP_COMP | __GFP_NORETRY |
+ __GFP_NO_KSWAPD | __GFP_NOWARN);
+ if (!pages) {
+ pages = vmalloc(page_tbl_size);
+ pages_mem->free_fn = vfree;
+ }
+ } else {
+ pages = kmalloc(page_tbl_size, GFP_KERNEL);
+ }
+
+ if (!pages)
+ return -ENOMEM;
+
+ pages_mem->pages = pages;
+ return 0;
+}
+
+static void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
+{
+ pages_mem->free_fn(pages_mem->pages);
+}
+
+int ion_heap_high_order_page_zero(struct page *page,
+ int order, bool should_invalidate)
+{
+ int i, ret;
+ struct pages_mem pages_mem;
+ int npages = 1 << order;
+ int page_tbl_size = sizeof(struct page *) * npages;
+
+ if (ion_heap_alloc_pages_mem(page_tbl_size, &pages_mem))
+ return -ENOMEM;
+
+ for (i = 0; i < (1 << order); ++i)
+ pages_mem.pages[i] = page + i;
+
+ ret = ion_heap_pages_zero(pages_mem.pages, npages,
+ should_invalidate);
+ ion_heap_free_pages_mem(&pages_mem);
+ return ret;
+}
+
int ion_heap_buffer_zero(struct ion_buffer *buffer)
{
struct sg_table *table = buffer->sg_table;
+ struct scatterlist *sg;
+ int i, j, ret = 0, npages = 0, page_tbl_size = 0;
+ struct pages_mem pages_mem;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ unsigned long len = sg_dma_len(sg);
+ int nrpages = len >> PAGE_SHIFT;
+ page_tbl_size += sizeof(struct page *) * nrpages;
+ }
+
+ if (ion_heap_alloc_pages_mem(page_tbl_size, &pages_mem))
+ return -ENOMEM;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long len = sg_dma_len(sg);
+
+ for (j = 0; j < len / PAGE_SIZE; j++)
+ pages_mem.pages[npages++] = page + j;
+ }
+
+ ret = ion_heap_pages_zero(pages_mem.pages, npages,
+ ion_buffer_cached(buffer));
+ ion_heap_free_pages_mem(&pages_mem);
+ return ret;
+}
+
+int ion_heap_buffer_zero_old(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
pgprot_t pgprot;
struct scatterlist *sg;
struct vm_struct *vm_struct;
@@ -131,6 +283,122 @@
return ret;
}
+void ion_heap_free_page(struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ int i;
+
+ if (!ion_buffer_fault_user_mappings(buffer)) {
+ __free_pages(page, order);
+ return;
+ }
+ for (i = 0; i < (1 << order); i++)
+ __free_page(page + i);
+}
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
+{
+ rt_mutex_lock(&heap->lock);
+ list_add(&buffer->list, &heap->free_list);
+ heap->free_list_size += buffer->size;
+ rt_mutex_unlock(&heap->lock);
+ wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+ size_t size;
+
+ rt_mutex_lock(&heap->lock);
+ size = heap->free_list_size;
+ rt_mutex_unlock(&heap->lock);
+
+ return size;
+}
+
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+ bool skip_pools)
+{
+ struct ion_buffer *buffer, *tmp;
+ size_t total_drained = 0;
+
+ if (ion_heap_freelist_size(heap) == 0)
+ return 0;
+
+ rt_mutex_lock(&heap->lock);
+ if (size == 0)
+ size = heap->free_list_size;
+
+ list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
+ if (total_drained >= size)
+ break;
+ list_del(&buffer->list);
+ ion_buffer_destroy(buffer);
+ heap->free_list_size -= buffer->size;
+ if (skip_pools)
+ buffer->flags |= ION_FLAG_FREED_FROM_SHRINKER;
+ total_drained += buffer->size;
+ }
+ rt_mutex_unlock(&heap->lock);
+
+ return total_drained;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, true);
+}
+
+int ion_heap_deferred_free(void *data)
+{
+ struct ion_heap *heap = data;
+
+ while (true) {
+ struct ion_buffer *buffer;
+
+ wait_event_freezable(heap->waitqueue,
+ ion_heap_freelist_size(heap) > 0);
+
+ rt_mutex_lock(&heap->lock);
+ if (list_empty(&heap->free_list)) {
+ rt_mutex_unlock(&heap->lock);
+ continue;
+ }
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ heap->free_list_size -= buffer->size;
+ rt_mutex_unlock(&heap->lock);
+ ion_buffer_destroy(buffer);
+ }
+
+ return 0;
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ INIT_LIST_HEAD(&heap->free_list);
+ heap->free_list_size = 0;
+ rt_mutex_init(&heap->lock);
+ init_waitqueue_head(&heap->waitqueue);
+ heap->task = kthread_run(ion_heap_deferred_free, heap,
+ "%s", heap->name);
+ sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
+ if (IS_ERR(heap->task)) {
+ pr_err("%s: creating thread for deferred free failed\n",
+ __func__);
+ return PTR_RET(heap->task);
+ }
+ return 0;
+}
+
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_heap *heap = NULL;
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
deleted file mode 100644
index d9e9e09..0000000
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ /dev/null
@@ -1,588 +0,0 @@
-/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/msm_ion.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/iommu.h>
-#include <linux/pfn.h>
-#include <linux/dma-mapping.h>
-#include "ion_priv.h"
-
-#include <asm/mach/map.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <mach/iommu_domains.h>
-#include <trace/events/kmem.h>
-
-struct ion_iommu_heap {
- struct ion_heap heap;
- struct ion_page_pool **cached_pools;
- struct ion_page_pool **uncached_pools;
-};
-
-/*
- * We will attempt to allocate high-order pages and store those in an
- * sg_list. However, some APIs expect an array of struct page * where
- * each page is of size PAGE_SIZE. We use this extra structure to
- * carry around an array of such pages (derived from the high-order
- * pages with nth_page).
- */
-struct ion_iommu_priv_data {
- struct page **pages;
- unsigned int pages_uses_vmalloc;
- int nrpages;
- unsigned long size;
-};
-
-#define MAX_VMAP_RETRIES 10
-#define BAD_ORDER -1
-
-static const unsigned int orders[] = {9, 8, 4, 0};
-static const int num_orders = ARRAY_SIZE(orders);
-static unsigned int low_gfp_flags = __GFP_HIGHMEM | GFP_KERNEL | __GFP_ZERO;
-static unsigned int high_gfp_flags = (__GFP_HIGHMEM | __GFP_NORETRY
- | __GFP_NO_KSWAPD | __GFP_NOWARN |
- __GFP_IO | __GFP_FS | __GFP_ZERO);
-
-struct page_info {
- struct page *page;
- unsigned int order;
- struct list_head list;
-};
-
-static int order_to_index(unsigned int order)
-{
- int i;
- for (i = 0; i < num_orders; i++)
- if (order == orders[i])
- return i;
- BUG();
- return BAD_ORDER;
-}
-
-static unsigned int order_to_size(int order)
-{
- return PAGE_SIZE << order;
-}
-
-static struct page_info *alloc_largest_available(struct ion_iommu_heap *heap,
- unsigned long size,
- unsigned int max_order,
- unsigned long flags)
-{
- struct page *page;
- struct page_info *info;
- int i;
-
- for (i = 0; i < num_orders; i++) {
- gfp_t gfp;
- int idx = order_to_index(orders[i]);
- struct ion_page_pool *pool;
-
- if (idx == BAD_ORDER)
- continue;
-
- if (ION_IS_CACHED(flags)) {
- pool = heap->cached_pools[idx];
- BUG_ON(!pool);
- } else {
- pool = heap->uncached_pools[idx];
- BUG_ON(!pool);
- }
-
- if (size < order_to_size(orders[i]))
- continue;
- if (max_order < orders[i])
- continue;
-
- if (orders[i]) {
- gfp = high_gfp_flags;
- } else {
- gfp = low_gfp_flags;
- }
- trace_alloc_pages_iommu_start(gfp, orders[i]);
- if (flags & ION_FLAG_POOL_FORCE_ALLOC)
- page = alloc_pages(gfp, orders[i]);
- else
- page = ion_page_pool_alloc(pool);
- trace_alloc_pages_iommu_end(gfp, orders[i]);
- if (!page) {
- trace_alloc_pages_iommu_fail(gfp, orders[i]);
- continue;
- }
-
- info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
- if (info) {
- info->page = page;
- info->order = orders[i];
- }
- return info;
- }
- return NULL;
-}
-
-static int ion_iommu_buffer_zero(struct ion_iommu_priv_data *data,
- bool is_cached)
-{
- int i, j, k;
- unsigned int npages_to_vmap;
- unsigned int total_pages;
- void *ptr = NULL;
- /*
- * It's cheaper just to use writecombine memory and skip the
- * cache vs. using a cache memory and trying to flush it afterwards
- */
- pgprot_t pgprot = pgprot_writecombine(pgprot_kernel);
-
- /*
- * As an optimization, we manually zero out all of the
- * pages in one fell swoop here. To safeguard against
- * insufficient vmalloc space, we only vmap
- * `npages_to_vmap' at a time, starting with a
- * conservative estimate of 1/8 of the total number of
- * vmalloc pages available. Note that the `pages'
- * array is composed of all 4K pages, irrespective of
- * the size of the pages on the sg list.
- */
- npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
- >> PAGE_SHIFT;
- total_pages = data->nrpages;
- for (i = 0; i < total_pages; i += npages_to_vmap) {
- npages_to_vmap = min(npages_to_vmap, total_pages - i);
- for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
- ++j) {
- ptr = vmap(&data->pages[i], npages_to_vmap,
- VM_IOREMAP, pgprot);
- if (ptr)
- break;
- else
- npages_to_vmap >>= 1;
- }
- if (!ptr)
- return -ENOMEM;
-
- memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
- if (is_cached) {
- /*
- * invalidate the cache to pick up the zeroing
- */
- for (k = 0; k < npages_to_vmap; k++) {
- void *p = kmap_atomic(data->pages[i + k]);
- phys_addr_t phys = page_to_phys(
- data->pages[i + k]);
-
- dmac_inv_range(p, p + PAGE_SIZE);
- outer_inv_range(phys, phys + PAGE_SIZE);
- kunmap_atomic(p);
- }
- }
- vunmap(ptr);
- }
-
- return 0;
-}
-
-static int ion_iommu_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
-{
- int ret, i;
- struct list_head pages_list;
- struct page_info *info, *tmp_info;
- struct ion_iommu_priv_data *data = NULL;
- struct ion_iommu_heap *iommu_heap =
- container_of(heap, struct ion_iommu_heap, heap);
-
- if (msm_use_iommu()) {
- struct scatterlist *sg;
- struct sg_table *table;
- int j;
- unsigned int num_large_pages = 0;
- unsigned long size_remaining = PAGE_ALIGN(size);
- unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
- unsigned int page_tbl_size;
-
- data = kmalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&pages_list);
- while (size_remaining > 0) {
- info = alloc_largest_available(iommu_heap,
- size_remaining,
- max_order,
- flags);
- if (!info) {
- ret = -ENOMEM;
- goto err_free_data;
- }
- list_add_tail(&info->list, &pages_list);
- size_remaining -= order_to_size(info->order);
- max_order = info->order;
- num_large_pages++;
- }
-
- data->size = PFN_ALIGN(size);
- data->nrpages = data->size >> PAGE_SHIFT;
- data->pages_uses_vmalloc = 0;
- page_tbl_size = sizeof(struct page *) * data->nrpages;
-
- if (page_tbl_size > SZ_8K) {
- /*
- * Do fallback to ensure we have a balance between
- * performance and availability.
- */
- data->pages = kmalloc(page_tbl_size,
- __GFP_COMP | __GFP_NORETRY |
- __GFP_NO_KSWAPD | __GFP_NOWARN);
- if (!data->pages) {
- data->pages = vmalloc(page_tbl_size);
- data->pages_uses_vmalloc = 1;
- }
- } else {
- data->pages = kmalloc(page_tbl_size, GFP_KERNEL);
- }
- if (!data->pages) {
- ret = -ENOMEM;
- goto err_free_data;
- }
-
- table = buffer->sg_table =
- kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-
- if (!table) {
- ret = -ENOMEM;
- goto err1;
- }
- ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
- if (ret)
- goto err2;
-
- i = 0;
- sg = table->sgl;
- list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
- struct page *page = info->page;
- sg_set_page(sg, page, order_to_size(info->order), 0);
- sg_dma_address(sg) = sg_phys(sg);
- sg = sg_next(sg);
- for (j = 0; j < (1 << info->order); ++j)
- data->pages[i++] = nth_page(page, j);
- list_del(&info->list);
- kfree(info);
- }
-
-
- if (flags & ION_FLAG_POOL_FORCE_ALLOC) {
- ret = ion_iommu_buffer_zero(data, ION_IS_CACHED(flags));
- if (ret) {
- pr_err("Couldn't vmap the pages for zeroing\n");
- goto err3;
- }
-
-
- if (!ION_IS_CACHED(flags))
- dma_sync_sg_for_device(NULL, table->sgl,
- table->nents,
- DMA_BIDIRECTIONAL);
-
- }
- buffer->priv_virt = data;
- return 0;
-
- } else {
- return -ENOMEM;
- }
-
-
-err3:
- sg_free_table(buffer->sg_table);
-err2:
- kfree(buffer->sg_table);
- buffer->sg_table = 0;
-err1:
- if (data->pages_uses_vmalloc)
- vfree(data->pages);
- else
- kfree(data->pages);
-err_free_data:
- kfree(data);
-
- list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
- if (info->page)
- __free_pages(info->page, info->order);
- list_del(&info->list);
- kfree(info);
- }
- return ret;
-}
-
-static void ion_iommu_heap_free(struct ion_buffer *buffer)
-{
- int i;
- struct scatterlist *sg;
- struct sg_table *table = buffer->sg_table;
- struct ion_iommu_priv_data *data = buffer->priv_virt;
- bool cached = ion_buffer_cached(buffer);
- struct ion_iommu_heap *iommu_heap =
- container_of(buffer->heap, struct ion_iommu_heap, heap);
-
- if (!table)
- return;
- if (!data)
- return;
-
- if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC))
- ion_iommu_buffer_zero(data, ION_IS_CACHED(buffer->flags));
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- int order = get_order(sg_dma_len(sg));
- int idx = order_to_index(order);
- struct ion_page_pool *pool;
-
- if (idx == BAD_ORDER) {
- WARN_ON(1);
- continue;
- }
-
- if (cached)
- pool = iommu_heap->cached_pools[idx];
- else
- pool = iommu_heap->uncached_pools[idx];
-
- if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)
- __free_pages(sg_page(sg), order);
- else
- ion_page_pool_free(pool, sg_page(sg));
- }
-
- sg_free_table(table);
- kfree(table);
- table = 0;
- if (data->pages_uses_vmalloc)
- vfree(data->pages);
- else
- kfree(data->pages);
- kfree(data);
-}
-
-void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_iommu_priv_data *data = buffer->priv_virt;
- pgprot_t page_prot = PAGE_KERNEL;
-
- if (!data)
- return NULL;
-
- if (!ION_IS_CACHED(buffer->flags))
- page_prot = pgprot_writecombine(page_prot);
-
- buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
-
- return buffer->vaddr;
-}
-
-void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- if (!buffer->vaddr)
- return;
-
- vunmap(buffer->vaddr);
- buffer->vaddr = NULL;
-}
-
-int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct sg_table *table = buffer->sg_table;
- unsigned long addr = vma->vm_start;
- unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
- struct scatterlist *sg;
- int i;
-
- if (!ION_IS_CACHED(buffer->flags))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- unsigned long remainder = vma->vm_end - addr;
- unsigned long len = sg_dma_len(sg);
-
- if (offset >= sg_dma_len(sg)) {
- offset -= sg_dma_len(sg);
- continue;
- } else if (offset) {
- page += offset / PAGE_SIZE;
- len = sg_dma_len(sg) - offset;
- offset = 0;
- }
- len = min(len, remainder);
- remap_pfn_range(vma, addr, page_to_pfn(page), len,
- vma->vm_page_prot);
- addr += len;
- if (addr >= vma->vm_end)
- return 0;
- }
- return 0;
-}
-
-static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->sg_table;
-}
-
-static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
-static int ion_iommu_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
- void *unused)
-{
-
- struct ion_iommu_heap *iommu_heap = container_of(heap,
- struct ion_iommu_heap,
- heap);
- int i;
- unsigned long total = 0;
-
- seq_printf(s, "Cached Pools:\n");
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = iommu_heap->cached_pools[i];
- seq_printf(s, "%d order %u highmem pages in pool = %lx total\n",
- pool->high_count, pool->order,
- (1 << pool->order) * PAGE_SIZE * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages in pool = %lx total\n",
- pool->low_count, pool->order,
- (1 << pool->order) * PAGE_SIZE * pool->low_count);
-
- total += (1 << pool->order) * PAGE_SIZE *
- (pool->low_count + pool->high_count);
- }
-
- seq_printf(s, "Uncached Pools:\n");
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = iommu_heap->uncached_pools[i];
- seq_printf(s, "%d order %u highmem pages in pool = %lx total\n",
- pool->high_count, pool->order,
- (1 << pool->order) * PAGE_SIZE * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages in pool = %lx total\n",
- pool->low_count, pool->order,
- (1 << pool->order) * PAGE_SIZE * pool->low_count);
-
- total += (1 << pool->order) * PAGE_SIZE *
- (pool->low_count + pool->high_count);
- }
- seq_printf(s, "Total bytes in pool: %lx\n", total);
- return 0;
-}
-
-static struct ion_heap_ops iommu_heap_ops = {
- .allocate = ion_iommu_heap_allocate,
- .free = ion_iommu_heap_free,
- .map_user = ion_iommu_heap_map_user,
- .map_kernel = ion_iommu_heap_map_kernel,
- .unmap_kernel = ion_iommu_heap_unmap_kernel,
- .map_dma = ion_iommu_heap_map_dma,
- .unmap_dma = ion_iommu_heap_unmap_dma,
-};
-
-struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_iommu_heap *iommu_heap;
- int i;
-
- iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
- if (!iommu_heap)
- return ERR_PTR(-ENOMEM);
-
- iommu_heap->heap.ops = &iommu_heap_ops;
- iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
- iommu_heap->uncached_pools = kzalloc(
- sizeof(struct ion_page_pool *) * num_orders,
- GFP_KERNEL);
- if (!iommu_heap->uncached_pools)
- goto err_alloc_uncached_pools;
-
- iommu_heap->cached_pools = kzalloc(
- sizeof(struct ion_page_pool *) * num_orders,
- GFP_KERNEL);
-
- if (!iommu_heap->cached_pools)
- goto err_alloc_cached_pools;
-
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool;
- gfp_t gfp_flags;
-
- if (orders[i])
- gfp_flags = high_gfp_flags | __GFP_ZERO;
- else
- gfp_flags = low_gfp_flags | __GFP_ZERO;
- pool = ion_page_pool_create(gfp_flags, orders[i]);
- if (!pool)
- goto err_create_cached_pool;
- iommu_heap->cached_pools[i] = pool;
- }
-
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool;
- gfp_t gfp_flags;
-
- if (orders[i])
- gfp_flags = high_gfp_flags | __GFP_ZERO;
- else
- gfp_flags = low_gfp_flags | __GFP_ZERO;
- pool = ion_page_pool_create(gfp_flags, orders[i]);
- if (!pool)
- goto err_create_uncached_pool;
- iommu_heap->uncached_pools[i] = pool;
- }
- iommu_heap->heap.debug_show = ion_iommu_heap_debug_show;
- return &iommu_heap->heap;
-
-err_create_uncached_pool:
- for (i = 0; i < num_orders; i++)
- if (iommu_heap->cached_pools[i])
- ion_page_pool_destroy(iommu_heap->uncached_pools[i]);
-
-
-err_create_cached_pool:
- for (i = 0; i < num_orders; i++)
- if (iommu_heap->uncached_pools[i])
- ion_page_pool_destroy(iommu_heap->cached_pools[i]);
-
- kfree(iommu_heap->cached_pools);
-err_alloc_cached_pools:
- kfree(iommu_heap->uncached_pools);
-err_alloc_uncached_pools:
- kfree(iommu_heap);
- return ERR_PTR(-ENOMEM);
-}
-
-void ion_iommu_heap_destroy(struct ion_heap *heap)
-{
- struct ion_iommu_heap *iommu_heap =
- container_of(heap, struct ion_iommu_heap, heap);
-
- kfree(iommu_heap);
- iommu_heap = NULL;
-}
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/gpu/ion/ion_page_pool.c
index 495dd24..94f9445 100644
--- a/drivers/gpu/ion/ion_page_pool.c
+++ b/drivers/gpu/ion/ion_page_pool.c
@@ -21,14 +21,9 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/shrinker.h>
+#include <linux/vmalloc.h>
#include "ion_priv.h"
-/* #define DEBUG_PAGE_POOL_SHRINKER */
-
-static struct plist_head pools = PLIST_HEAD_INIT(pools);
-static struct shrinker shrinker;
-
struct ion_page_pool_item {
struct page *page;
struct list_head list;
@@ -36,18 +31,28 @@
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
- struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+ struct page *page;
struct scatterlist sg;
+ page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);
+
if (!page)
return NULL;
+ if (pool->gfp_mask & __GFP_ZERO)
+ if (ion_heap_high_order_page_zero(
+ page, pool->order, pool->should_invalidate))
+ goto error_free_pages;
+
sg_init_table(&sg, 1);
sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
sg_dma_address(&sg) = sg_phys(&sg);
dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
return page;
+error_free_pages:
+ __free_pages(page, pool->order);
+ return NULL;
}
static void ion_page_pool_free_pages(struct ion_page_pool *pool,
@@ -128,113 +133,50 @@
ion_page_pool_free_pages(pool, page);
}
-#ifdef DEBUG_PAGE_POOL_SHRINKER
-static int debug_drop_pools_set(void *data, u64 val)
+static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
{
- struct shrink_control sc;
- int objs;
-
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
-
- if (!val)
- return 0;
-
- objs = shrinker.shrink(&shrinker, &sc);
- sc.nr_to_scan = objs;
-
- shrinker.shrink(&shrinker, &sc);
- return 0;
-}
-
-static int debug_drop_pools_get(void *data, u64 *val)
-{
- struct shrink_control sc;
- int objs;
-
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
-
- objs = shrinker.shrink(&shrinker, &sc);
- *val = objs;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(debug_drop_pools_fops, debug_drop_pools_get,
- debug_drop_pools_set, "%llu\n");
-
-static int debug_grow_pools_set(void *data, u64 val)
-{
- struct ion_page_pool *pool;
- struct page *page;
-
- plist_for_each_entry(pool, &pools, list) {
- if (val != pool->list.prio)
- continue;
- page = ion_page_pool_alloc_pages(pool);
- if (page)
- ion_page_pool_add(pool, page);
- }
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(debug_grow_pools_fops, debug_drop_pools_get,
- debug_grow_pools_set, "%llu\n");
-#endif
-
-static int ion_page_pool_total(bool high)
-{
- struct ion_page_pool *pool;
int total = 0;
- plist_for_each_entry(pool, &pools, list) {
- total += high ? (pool->high_count + pool->low_count) *
- (1 << pool->order) :
+ total += high ? (pool->high_count + pool->low_count) *
+ (1 << pool->order) :
pool->low_count * (1 << pool->order);
- }
return total;
}
-static int ion_page_pool_shrink(struct shrinker *shrinker,
- struct shrink_control *sc)
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan)
{
- struct ion_page_pool *pool;
int nr_freed = 0;
int i;
bool high;
- int nr_to_scan = sc->nr_to_scan;
- if (sc->gfp_mask & __GFP_HIGHMEM)
- high = true;
+ high = gfp_mask & __GFP_HIGHMEM;
if (nr_to_scan == 0)
- return ion_page_pool_total(high);
+ return ion_page_pool_total(pool, high);
- plist_for_each_entry(pool, &pools, list) {
- for (i = 0; i < nr_to_scan; i++) {
- struct page *page;
+ for (i = 0; i < nr_to_scan; i++) {
+ struct page *page;
- mutex_lock(&pool->mutex);
- if (high && pool->high_count) {
- page = ion_page_pool_remove(pool, true);
- } else if (pool->low_count) {
- page = ion_page_pool_remove(pool, false);
- } else {
- mutex_unlock(&pool->mutex);
- break;
- }
+ mutex_lock(&pool->mutex);
+ if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
+ } else if (pool->low_count) {
+ page = ion_page_pool_remove(pool, false);
+ } else {
mutex_unlock(&pool->mutex);
- ion_page_pool_free_pages(pool, page);
- nr_freed += (1 << pool->order);
+ break;
}
- nr_to_scan -= i;
+ mutex_unlock(&pool->mutex);
+ ion_page_pool_free_pages(pool, page);
+ nr_freed += (1 << pool->order);
}
- return ion_page_pool_total(high);
+ return nr_freed;
}
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+ bool should_invalidate)
{
struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
GFP_KERNEL);
@@ -246,37 +188,25 @@
INIT_LIST_HEAD(&pool->high_items);
pool->gfp_mask = gfp_mask;
pool->order = order;
+ pool->should_invalidate = should_invalidate;
mutex_init(&pool->mutex);
plist_node_init(&pool->list, order);
- plist_add(&pool->list, &pools);
return pool;
}
void ion_page_pool_destroy(struct ion_page_pool *pool)
{
- plist_del(&pool->list, &pools);
kfree(pool);
}
static int __init ion_page_pool_init(void)
{
- shrinker.shrink = ion_page_pool_shrink;
- shrinker.seeks = DEFAULT_SEEKS;
- shrinker.batch = 0;
- register_shrinker(&shrinker);
-#ifdef DEBUG_PAGE_POOL_SHRINKER
- debugfs_create_file("ion_pools_shrink", 0644, NULL, NULL,
- &debug_drop_pools_fops);
- debugfs_create_file("ion_pools_grow", 0644, NULL, NULL,
- &debug_grow_pools_fops);
-#endif
return 0;
}
static void __exit ion_page_pool_exit(void)
{
- unregister_shrinker(&shrinker);
}
module_init(ion_page_pool_init);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index e3fbbda..2b00ee6 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -85,11 +85,16 @@
char task_comm[TASK_COMM_LEN];
pid_t pid;
};
+void ion_buffer_destroy(struct ion_buffer *buffer);
/**
* struct ion_heap_ops - ops to operate on a given heap
* @allocate: allocate memory
- * @free: free memory
+ * @free: free memory. Will be called with
+ * ION_FLAG_FREED_FROM_SHRINKER set in buffer flags when
+ * called from a shrinker. In that case, the pages being
+ * free'd must be truly free'd back to the system, not put
+ * in a page pool or otherwise cached.
* @phys get physical address of a buffer (only define on
* physically contiguous heaps)
* @map_dma map the memory for dma to a scatterlist
@@ -115,7 +120,7 @@
struct vm_area_struct *vma);
void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
- const struct rb_root *mem_map);
+ const struct list_head *mem_map);
int (*secure_heap)(struct ion_heap *heap, int version, void *data);
int (*unsecure_heap)(struct ion_heap *heap, int version, void *data);
int (*secure_buffer)(struct ion_buffer *buffer, int version,
@@ -139,8 +144,13 @@
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
+ * @shrinker: a shrinker for the heap, if the heap caches system
+ * memory, it must define a shrinker to return it on low
+ * memory conditions, this includes system memory cached
+ * in the deferred free lists for heaps that support it
* @priv: private heap data
* @free_list: free list head if deferred free is used
+ * @free_list_size size of the deferred free list in bytes
* @lock: protects the free list
* @waitqueue: queue to wait on from deferred free thread
* @task: task struct of deferred free thread
@@ -160,8 +170,10 @@
unsigned long flags;
unsigned int id;
const char *name;
+ struct shrinker shrinker;
void *priv;
struct list_head free_list;
+ size_t free_list_size;
struct rt_mutex lock;
wait_queue_head_t waitqueue;
struct task_struct *task;
@@ -209,6 +221,11 @@
*/
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+struct pages_mem {
+ struct page **pages;
+ void (*free_fn) (const void *);
+};
+
/**
* some helpers for common operations on buffers using the sg_table
* and vaddr fields
@@ -217,7 +234,71 @@
void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
struct vm_area_struct *);
+int ion_heap_pages_zero(struct page **pages, int num_pages,
+ bool should_invalidate);
int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_high_order_page_zero(struct page *page,
+ int order, bool should_invalidate);
+
+/**
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int ion_heap_init_deferred_free(struct ion_heap *heap);
+
+/**
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap: the heap
+ * @buffer: the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap: the heap
+ * @size: ammount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
+
+/**
+ * ion_heap_freelist_drain_from_shrinker - drain the deferred free
+ * list, skipping any heap-specific
+ * pooling or caching mechanisms
+ *
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the
+ * ION_FLAG_FREED_FROM_SHRINKER flag.
+ */
+size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap,
+ size_t size);
+
+/**
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap: the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
/**
@@ -276,6 +357,8 @@
* @gfp_mask: gfp_mask to use from alloc
* @order: order of pages in the pool
* @list: plist node for list of pools
+ * @should_invalidate: whether or not the cache needs to be invalidated at
+ * page allocation time.
*
* Allows you to keep a pool of pre allocated pages to use from your heap.
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -288,16 +371,26 @@
struct list_head high_items;
struct list_head low_items;
struct mutex mutex;
- void *(*alloc)(struct ion_page_pool *pool);
- void (*free)(struct ion_page_pool *pool, struct page *page);
gfp_t gfp_mask;
unsigned int order;
struct plist_node list;
+ bool should_invalidate;
};
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+ bool should_invalidate);
void ion_page_pool_destroy(struct ion_page_pool *);
void *ion_page_pool_alloc(struct ion_page_pool *);
void ion_page_pool_free(struct ion_page_pool *, struct page *);
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool: the pool
+ * @gfp_mask: the memory type to reclaim
+ * @nr_to_scan: number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan);
+
#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_removed_heap.c b/drivers/gpu/ion/ion_removed_heap.c
index 84d8d37..94d4a25 100644
--- a/drivers/gpu/ion/ion_removed_heap.c
+++ b/drivers/gpu/ion/ion_removed_heap.c
@@ -233,7 +233,7 @@
}
static int ion_removed_print_debug(struct ion_heap *heap, struct seq_file *s,
- const struct rb_root *mem_map)
+ const struct list_head *mem_map)
{
struct ion_removed_heap *removed_heap =
container_of(heap, struct ion_removed_heap, heap);
@@ -247,16 +247,14 @@
unsigned long size = removed_heap->total_size;
unsigned long end = base+size;
unsigned long last_end = base;
- struct rb_node *n;
+ struct mem_map_data *data;
seq_printf(s, "\nMemory Map\n");
seq_printf(s, "%16.s %14.s %14.s %14.s\n",
"client", "start address", "end address",
"size (hex)");
- for (n = rb_first(mem_map); n; n = rb_next(n)) {
- struct mem_map_data *data =
- rb_entry(n, struct mem_map_data, node);
+ list_for_each_entry(data, mem_map, node) {
const char *client_name = "(null)";
if (last_end < data->addr) {
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 44bb86f..8e885b2 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -53,7 +53,8 @@
struct ion_system_heap {
struct ion_heap heap;
- struct ion_page_pool **pools;
+ struct ion_page_pool **uncached_pools;
+ struct ion_page_pool **cached_pools;
};
struct page_info {
@@ -68,29 +69,14 @@
{
bool cached = ion_buffer_cached(buffer);
bool split_pages = ion_buffer_fault_user_mappings(buffer);
- struct ion_page_pool *pool = heap->pools[order_to_index(order)];
struct page *page;
+ struct ion_page_pool *pool;
- if (!cached) {
- page = ion_page_pool_alloc(pool);
- } else {
- struct scatterlist sg;
- gfp_t gfp_flags = low_order_gfp_flags;
-
- if (order > 4)
- gfp_flags = high_order_gfp_flags;
- trace_alloc_pages_sys_start(gfp_flags, order);
- page = alloc_pages(gfp_flags, order);
- trace_alloc_pages_sys_end(gfp_flags, order);
- if (!page) {
- trace_alloc_pages_sys_fail(gfp_flags, order);
- return 0;
- }
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, PAGE_SIZE << order, 0);
- sg_dma_address(&sg) = sg_phys(&sg);
- dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
- }
+ if (!cached)
+ pool = heap->uncached_pools[order_to_index(order)];
+ else
+ pool = heap->cached_pools[order_to_index(order)];
+ page = ion_page_pool_alloc(pool);
if (!page)
return 0;
@@ -107,14 +93,20 @@
bool split_pages = ion_buffer_fault_user_mappings(buffer);
int i;
- if (!cached) {
- struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ if ((buffer->flags & ION_FLAG_FREED_FROM_SHRINKER)) {
+ if (split_pages) {
+ for (i = 0; i < (1 << order); i++)
+ __free_page(page + i);
+ } else {
+ __free_pages(page, order);
+ }
+ } else {
+ struct ion_page_pool *pool;
+ if (cached)
+ pool = heap->cached_pools[order_to_index(order)];
+ else
+ pool = heap->uncached_pools[order_to_index(order)];
ion_page_pool_free(pool, page);
- } else if (split_pages) {
- for (i = 0; i < (1 << order); i++)
- __free_page(page + i);
- } else {
- __free_pages(page, order);
}
}
@@ -212,7 +204,7 @@
err1:
kfree(table);
err:
- list_for_each_entry(info, &pages, list) {
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
free_buffer_page(sys_heap, buffer, info->page, info->order);
kfree(info);
}
@@ -226,14 +218,11 @@
struct ion_system_heap,
heap);
struct sg_table *table = buffer->sg_table;
- bool cached = ion_buffer_cached(buffer);
struct scatterlist *sg;
LIST_HEAD(pages);
int i;
- /* uncached pages come from the page pools, zero them before returning
- for security purposes (other allocations are zerod at alloc time */
- if (!cached)
+ if (!(buffer->flags & ION_FLAG_FREED_FROM_SHRINKER))
ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
@@ -265,6 +254,56 @@
.map_user = ion_heap_map_user,
};
+static int ion_system_heap_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc) {
+
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int nr_total = 0;
+ int nr_freed = 0;
+ int i;
+
+ if (sc->nr_to_scan == 0)
+ goto end;
+
+ /* shrink the free list first, no point in zeroing the memory if
+ we're just going to reclaim it. Also, skip any possible
+ page pooling */
+ nr_freed += ion_heap_freelist_drain_from_shrinker(
+ heap, sc->nr_to_scan * PAGE_SIZE) / PAGE_SIZE;
+
+ if (nr_freed >= sc->nr_to_scan)
+ goto end;
+
+ for (i = 0; i < num_orders; i++) {
+ nr_freed += ion_page_pool_shrink(sys_heap->uncached_pools[i],
+ sc->gfp_mask, sc->nr_to_scan);
+ if (nr_freed >= sc->nr_to_scan)
+ goto end;
+
+ nr_freed += ion_page_pool_shrink(sys_heap->cached_pools[i],
+ sc->gfp_mask, sc->nr_to_scan);
+ if (nr_freed >= sc->nr_to_scan)
+ goto end;
+ }
+
+end:
+ /* total number of items is whatever the page pools are holding
+ plus whatever's in the freelist */
+ for (i = 0; i < num_orders; i++) {
+ nr_total += ion_page_pool_shrink(
+ sys_heap->uncached_pools[i], sc->gfp_mask, 0);
+ nr_total += ion_page_pool_shrink(
+ sys_heap->cached_pools[i], sc->gfp_mask, 0);
+ }
+ nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
+ return nr_total;
+
+}
+
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
void *unused)
{
@@ -274,21 +313,74 @@
heap);
int i;
for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
- seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
- pool->high_count, pool->order,
- (1 << pool->order) * PAGE_SIZE * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
- pool->low_count, pool->order,
- (1 << pool->order) * PAGE_SIZE * pool->low_count);
+ struct ion_page_pool *pool = sys_heap->uncached_pools[i];
+ seq_printf(s,
+ "%d order %u highmem pages in uncached pool = %lu total\n",
+ pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s,
+ "%d order %u lowmem pages in uncached pool = %lu total\n",
+ pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
+ }
+
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->cached_pools[i];
+ seq_printf(s,
+ "%d order %u highmem pages in cached pool = %lu total\n",
+ pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s,
+ "%d order %u lowmem pages in cached pool = %lu total\n",
+ pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
+ }
+
+ return 0;
+}
+
+
+static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
+{
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (pools[i])
+ ion_page_pool_destroy(pools[i]);
+}
+
+/**
+ * ion_system_heap_create_pools - Creates pools for all orders
+ *
+ * If this fails you don't need to destroy any pools. It's all or
+ * nothing. If it succeeds you'll eventually need to use
+ * ion_system_heap_destroy_pools to destroy the pools.
+ */
+static int ion_system_heap_create_pools(struct ion_page_pool **pools,
+ bool should_invalidate)
+{
+ int i;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+ pool = ion_page_pool_create(gfp_flags, orders[i],
+ should_invalidate);
+ if (!pool)
+ goto err_create_pool;
+ pools[i] = pool;
}
return 0;
+err_create_pool:
+ ion_system_heap_destroy_pools(pools);
+ return 1;
}
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
{
struct ion_system_heap *heap;
- int i;
+ int pools_size = sizeof(struct ion_page_pool *) * num_orders;
heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
if (!heap)
@@ -296,29 +388,35 @@
heap->heap.ops = &system_heap_ops;
heap->heap.type = ION_HEAP_TYPE_SYSTEM;
heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
- GFP_KERNEL);
- if (!heap->pools)
- goto err_alloc_pools;
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool;
- gfp_t gfp_flags = low_order_gfp_flags;
- if (orders[i] > 4)
- gfp_flags = high_order_gfp_flags;
- pool = ion_page_pool_create(gfp_flags, orders[i]);
- if (!pool)
- goto err_create_pool;
- heap->pools[i] = pool;
- }
+ heap->uncached_pools = kzalloc(pools_size, GFP_KERNEL);
+ if (!heap->uncached_pools)
+ goto err_alloc_uncached_pools;
+
+ heap->cached_pools = kzalloc(pools_size, GFP_KERNEL);
+ if (!heap->cached_pools)
+ goto err_alloc_cached_pools;
+
+ if (ion_system_heap_create_pools(heap->uncached_pools, false))
+ goto err_create_uncached_pools;
+
+ if (ion_system_heap_create_pools(heap->cached_pools, true))
+ goto err_create_cached_pools;
+
+ heap->heap.shrinker.shrink = ion_system_heap_shrink;
+ heap->heap.shrinker.seeks = DEFAULT_SEEKS;
+ heap->heap.shrinker.batch = 0;
+ register_shrinker(&heap->heap.shrinker);
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
-err_create_pool:
- for (i = 0; i < num_orders; i++)
- if (heap->pools[i])
- ion_page_pool_destroy(heap->pools[i]);
- kfree(heap->pools);
-err_alloc_pools:
+
+err_create_cached_pools:
+ ion_system_heap_destroy_pools(heap->uncached_pools);
+err_create_uncached_pools:
+ kfree(heap->cached_pools);
+err_alloc_cached_pools:
+ kfree(heap->uncached_pools);
+err_alloc_uncached_pools:
kfree(heap);
return ERR_PTR(-ENOMEM);
}
@@ -328,36 +426,82 @@
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
- int i;
- for (i = 0; i < num_orders; i++)
- ion_page_pool_destroy(sys_heap->pools[i]);
- kfree(sys_heap->pools);
+ ion_system_heap_destroy_pools(sys_heap->uncached_pools);
+ ion_system_heap_destroy_pools(sys_heap->cached_pools);
+ kfree(sys_heap->uncached_pools);
+ kfree(sys_heap->cached_pools);
kfree(sys_heap);
}
+struct kmalloc_buffer_info {
+ struct sg_table *table;
+ void *vaddr;
+};
+
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long len,
unsigned long align,
unsigned long flags)
{
- buffer->priv_virt = kzalloc(len, GFP_KERNEL);
- if (!buffer->priv_virt)
- return -ENOMEM;
+ int ret;
+ struct kmalloc_buffer_info *info;
+
+ info = kmalloc(sizeof(struct kmalloc_buffer_info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ info->table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!info->table) {
+ ret = -ENOMEM;
+ goto kfree_info;
+ }
+
+ ret = sg_alloc_table(info->table, 1, GFP_KERNEL);
+ if (ret)
+ goto kfree_table;
+
+ info->vaddr = kzalloc(len, GFP_KERNEL);
+ if (!info->vaddr) {
+ ret = -ENOMEM;
+ goto sg_free_table;
+ }
+
+ sg_set_page(info->table->sgl, virt_to_page(info->vaddr), len,
+ 0);
+ sg_dma_address(info->table->sgl) = virt_to_phys(info->vaddr);
+ dma_sync_sg_for_device(NULL, info->table->sgl, 1, DMA_BIDIRECTIONAL);
+
+ buffer->priv_virt = info;
return 0;
+
+sg_free_table:
+ sg_free_table(info->table);
+kfree_table:
+ kfree(info->table);
+kfree_info:
+ kfree(info);
+out:
+ return ret;
}
void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
- kfree(buffer->priv_virt);
+ struct kmalloc_buffer_info *info = buffer->priv_virt;
+ sg_free_table(info->table);
+ kfree(info->table);
+ kfree(info->vaddr);
}
static int ion_system_contig_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
- *addr = virt_to_phys(buffer->priv_virt);
+ struct kmalloc_buffer_info *info = buffer->priv_virt;
+ *addr = virt_to_phys(info->vaddr);
*len = buffer->size;
return 0;
}
@@ -365,27 +509,13 @@
struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct sg_table *table;
- int ret;
-
- table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- return ERR_PTR(-ENOMEM);
- ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret) {
- kfree(table);
- return ERR_PTR(ret);
- }
- sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
- 0);
- return table;
+ struct kmalloc_buffer_info *info = buffer->priv_virt;
+ return info->table;
}
void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- sg_free_table(buffer->sg_table);
- kfree(buffer->sg_table);
}
static struct ion_heap_ops kmalloc_ops = {
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index 118c39a..c77bac7 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -51,74 +51,56 @@
static struct ion_heap_desc ion_heap_meta[] = {
{
.id = ION_SYSTEM_HEAP_ID,
- .type = ION_HEAP_TYPE_SYSTEM,
- .name = ION_VMALLOC_HEAP_NAME,
+ .name = ION_SYSTEM_HEAP_NAME,
},
{
.id = ION_SYSTEM_CONTIG_HEAP_ID,
- .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
.name = ION_KMALLOC_HEAP_NAME,
},
{
.id = ION_CP_MM_HEAP_ID,
- .type = ION_HEAP_TYPE_SECURE_DMA,
.name = ION_MM_HEAP_NAME,
.permission_type = IPT_TYPE_MM_CARVEOUT,
},
{
.id = ION_MM_FIRMWARE_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_MM_FIRMWARE_HEAP_NAME,
},
{
.id = ION_CP_MFC_HEAP_ID,
- .type = ION_HEAP_TYPE_CP,
.name = ION_MFC_HEAP_NAME,
.permission_type = IPT_TYPE_MFC_SHAREDMEM,
},
{
.id = ION_SF_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_SF_HEAP_NAME,
},
{
- .id = ION_IOMMU_HEAP_ID,
- .type = ION_HEAP_TYPE_IOMMU,
- .name = ION_IOMMU_HEAP_NAME,
- },
- {
.id = ION_QSECOM_HEAP_ID,
- .type = ION_HEAP_TYPE_DMA,
.name = ION_QSECOM_HEAP_NAME,
},
{
.id = ION_AUDIO_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_AUDIO_HEAP_NAME,
},
{
.id = ION_PIL1_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_PIL1_HEAP_NAME,
},
{
.id = ION_PIL2_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_PIL2_HEAP_NAME,
},
{
.id = ION_CP_WB_HEAP_ID,
- .type = ION_HEAP_TYPE_CP,
.name = ION_WB_HEAP_NAME,
},
{
.id = ION_CAMERA_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_CAMERA_HEAP_NAME,
},
{
.id = ION_ADSP_HEAP_ID,
- .type = ION_HEAP_TYPE_DMA,
.name = ION_ADSP_HEAP_NAME,
}
};
@@ -127,6 +109,16 @@
struct ion_client *msm_ion_client_create(unsigned int heap_mask,
const char *name)
{
+ /*
+ * The assumption is that if there is a NULL device, the ion
+ * driver has not yet probed.
+ */
+ if (idev == NULL)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (IS_ERR(idev))
+ return (struct ion_client *)idev;
+
return ion_client_create(idev, name);
}
EXPORT_SYMBOL(msm_ion_client_create);
@@ -593,15 +585,58 @@
return ret;
}
-static int msm_ion_populate_heap(struct ion_platform_heap *heap)
+#define MAKE_HEAP_TYPE_MAPPING(h) { .name = #h, \
+ .heap_type = ION_HEAP_TYPE_##h, }
+
+static struct heap_types_info {
+ const char *name;
+ int heap_type;
+} heap_types_info[] = {
+ MAKE_HEAP_TYPE_MAPPING(SYSTEM),
+ MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
+ MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
+ MAKE_HEAP_TYPE_MAPPING(CHUNK),
+ MAKE_HEAP_TYPE_MAPPING(DMA),
+ MAKE_HEAP_TYPE_MAPPING(CP),
+ MAKE_HEAP_TYPE_MAPPING(SECURE_DMA),
+ MAKE_HEAP_TYPE_MAPPING(REMOVED),
+};
+
+static int msm_ion_get_heap_type_from_dt_node(struct device_node *node,
+ int *heap_type)
+{
+ const char *name;
+ int i, ret = -EINVAL;
+ ret = of_property_read_string(node, "qcom,ion-heap-type", &name);
+ if (ret)
+ goto out;
+ for (i = 0; i < ARRAY_SIZE(heap_types_info); ++i) {
+ if (!strcmp(heap_types_info[i].name, name)) {
+ *heap_type = heap_types_info[i].heap_type;
+ ret = 0;
+ goto out;
+ }
+ }
+ WARN(1, "Unknown heap type: %s. You might need to update heap_types_info in %s",
+ name, __FILE__);
+out:
+ return ret;
+}
+
+static int msm_ion_populate_heap(struct device_node *node,
+ struct ion_platform_heap *heap)
{
unsigned int i;
- int ret = -EINVAL;
+ int ret = -EINVAL, heap_type = -1;
unsigned int len = ARRAY_SIZE(ion_heap_meta);
for (i = 0; i < len; ++i) {
if (ion_heap_meta[i].id == heap->id) {
heap->name = ion_heap_meta[i].name;
- heap->type = ion_heap_meta[i].type;
+ ret = msm_ion_get_heap_type_from_dt_node(node,
+ &heap_type);
+ if (ret)
+ break;
+ heap->type = heap_type;
ret = msm_init_extra_data(heap, &ion_heap_meta[i]);
break;
}
@@ -793,7 +828,7 @@
}
pdata->heaps[idx].id = val;
- ret = msm_ion_populate_heap(&pdata->heaps[idx]);
+ ret = msm_ion_populate_heap(node, &pdata->heaps[idx]);
if (ret)
goto free_heaps;
@@ -933,9 +968,6 @@
struct ion_heap *heap = NULL;
switch ((int)heap_data->type) {
- case ION_HEAP_TYPE_IOMMU:
- heap = ion_iommu_heap_create(heap_data);
- break;
case ION_HEAP_TYPE_CP:
heap = ion_cp_heap_create(heap_data);
break;
@@ -975,9 +1007,6 @@
return;
switch ((int)heap->type) {
- case ION_HEAP_TYPE_IOMMU:
- ion_iommu_heap_destroy(heap);
- break;
case ION_HEAP_TYPE_CP:
ion_cp_heap_destroy(heap);
break;
@@ -999,6 +1028,7 @@
static int msm_ion_probe(struct platform_device *pdev)
{
+ static struct ion_device *new_dev;
struct ion_platform_data *pdata;
unsigned int pdata_needs_to_be_freed;
int err = -1;
@@ -1024,9 +1054,14 @@
goto out;
}
- idev = ion_device_create(msm_ion_custom_ioctl);
- if (IS_ERR_OR_NULL(idev)) {
- err = PTR_ERR(idev);
+ new_dev = ion_device_create(msm_ion_custom_ioctl);
+ if (IS_ERR_OR_NULL(new_dev)) {
+ /*
+ * set this to the ERR to indicate to the clients
+ * that Ion failed to probe.
+ */
+ idev = new_dev;
+ err = PTR_ERR(new_dev);
goto freeheaps;
}
@@ -1053,13 +1088,18 @@
heap_data->name);
}
- ion_device_add_heap(idev, heaps[i]);
+ ion_device_add_heap(new_dev, heaps[i]);
}
check_for_heap_overlap(pdata->heaps, num_heaps);
if (pdata_needs_to_be_freed)
free_pdata(pdata);
- platform_set_drvdata(pdev, idev);
+ platform_set_drvdata(pdev, new_dev);
+ /*
+ * intentionally set this at the very end to allow probes to be deferred
+ * completely until Ion is setup
+ */
+ idev = new_dev;
return 0;
freeheaps:
diff --git a/drivers/gpu/ion/msm_ion_priv.h b/drivers/gpu/ion/msm_ion_priv.h
index 2de4e8a..412ead2 100644
--- a/drivers/gpu/ion/msm_ion_priv.h
+++ b/drivers/gpu/ion/msm_ion_priv.h
@@ -21,14 +21,14 @@
#include <linux/kref.h>
#include <linux/mm_types.h>
#include <linux/mutex.h>
-#include <linux/rbtree.h>
+#include <linux/types.h>
#include <linux/ion.h>
#include <linux/iommu.h>
#include <linux/seq_file.h>
/**
* struct mem_map_data - represents information about the memory map for a heap
- * @node: rb node used to store in the tree of mem_map_data
+ * @node: list node used to store in the list of mem_map_data
* @addr: start address of memory region.
* @addr: end address of memory region.
* @size: size of memory region
@@ -36,7 +36,7 @@
*
*/
struct mem_map_data {
- struct rb_node node;
+ struct list_head node;
ion_phys_addr_t addr;
ion_phys_addr_t addr_end;
unsigned long size;
diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig
index ba63fbc..2576386 100644
--- a/drivers/gpu/msm/Kconfig
+++ b/drivers/gpu/msm/Kconfig
@@ -4,6 +4,10 @@
depends on ARCH_MSM && !ARCH_MSM7X00A && !ARCH_MSM7X25
select GENERIC_ALLOCATOR
select FW_LOADER
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select DEVFREQ_GOV_PERFORMANCE
+ select DEVFREQ_GOV_MSM_ADRENO_TZ
---help---
3D graphics driver. Required to use hardware accelerated
OpenGL ES 2.0 and 1.1.
@@ -60,6 +64,17 @@
default y
depends on MSM_KGSL && !ARCH_MSM7X27 && !ARCH_MSM7X27A && !(ARCH_QSD8X50 && !MSM_SOC_REV_A)
+config MSM_ADRENO_DEFAULT_GOVERNOR
+ string "devfreq governor for the adreno core"
+ default "msm-adreno-tz" if DEVFREQ_GOV_MSM_ADRENO_TZ
+ default "simple_ondemand"
+ depends on MSM_KGSL
+
+config MSM_Z180_DEFAULT_GOVERNOR
+ string "devfreq governor for the z180 core(s)"
+ default "performance"
+ depends on MSM_KGSL_2D
+
config MSM_KGSL_DRM
bool "Build a DRM interface for the MSM_KGSL driver"
depends on MSM_KGSL && DRM
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index aac183b..14e07e5 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -15,8 +15,6 @@
msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
-msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
-msm_kgsl_core-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += kgsl_pwrscale_idlestats.o
msm_kgsl_core-$(CONFIG_SYNC) += kgsl_sync.o
msm_adreno-y += \
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index 676f46d..758d5c5 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -775,6 +775,9 @@
#define SP0_ICL1_MISSES 0x1A
#define SP_FS_CFLOW_INSTRUCTIONS 0x0C
+/* COUNTABLE FOR TSE PERFCOUNTER */
+#define TSE_INPUT_PRIM_NUM 0x0
+
/* VBIF PERFCOUNTER ENA/CLR values */
#define VBIF_PERF_CNT_0 BIT(0)
#define VBIF_PERF_CNT_1 BIT(1)
@@ -789,6 +792,7 @@
#define VBIF_PERF_CNT_1_SEL_MASK 0x7f00
/* VBIF countables */
+#define VBIF_AXI_TOTAL_BEATS 85
#define VBIF_DDR_TOTAL_CYCLES 110
#endif
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index a271388..675fcf3 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -19,6 +19,7 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/of_coresight.h>
+#include <linux/input.h>
#include <mach/socinfo.h>
#include <mach/msm_bus_board.h>
@@ -77,11 +78,38 @@
#define KGSL_LOG_LEVEL_DEFAULT 3
+static void adreno_start_work(struct work_struct *work);
+static void adreno_input_work(struct work_struct *work);
+
+/*
+ * The default values for the simpleondemand governor are 90 and 5,
+ * we use different values here.
+ * They have to be tuned and compare with the tz governor anyway.
+ */
+static struct devfreq_simple_ondemand_data adreno_ondemand_data = {
+ .upthreshold = 80,
+ .downdifferential = 20,
+};
+
+static struct devfreq_msm_adreno_tz_data adreno_tz_data = {
+ .bus = {
+ .max = 450,
+ },
+ .device_id = KGSL_DEVICE_3D0,
+};
+
+static const struct devfreq_governor_data adreno_governors[] = {
+ { .name = "simple_ondemand", .data = &adreno_ondemand_data },
+ { .name = "msm-adreno-tz", .data = &adreno_tz_data },
+};
+
static const struct kgsl_functable adreno_functable;
static struct adreno_device device_3d0 = {
.dev = {
KGSL_DEVICE_COMMON_INIT(device_3d0.dev),
+ .pwrscale = KGSL_PWRSCALE_INIT(adreno_governors,
+ ARRAY_SIZE(adreno_governors)),
.name = DEVICE_3D0_NAME,
.id = KGSL_DEVICE_3D0,
.mh = {
@@ -123,10 +151,16 @@
.ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY,
.fast_hang_detect = 1,
.long_ib_detect = 1,
+ .start_work = __WORK_INITIALIZER(device_3d0.start_work,
+ adreno_start_work),
+ .input_work = __WORK_INITIALIZER(device_3d0.input_work,
+ adreno_input_work),
};
unsigned int ft_detect_regs[FT_DETECT_REGS_COUNT];
+static struct workqueue_struct *adreno_wq;
+
/*
* This is the master list of all GPU cores that are supported by this
* driver.
@@ -220,6 +254,122 @@
512, 0, 2, SZ_128K, 0x3FF037, 0x3FF016 },
};
+/* Nice level for the higher priority GPU start thread */
+static unsigned int _wake_nice = -7;
+
+/* Number of milliseconds to stay active active after a wake on touch */
+static unsigned int _wake_timeout = 100;
+
+/*
+ * A workqueue callback responsible for actually turning on the GPU after a
+ * touch event. kgsl_pwrctrl_wake() is used without any active_count protection
+ * to avoid the need to maintain state. Either somebody will start using the
+ * GPU or the idle timer will fire and put the GPU back into slumber
+ */
+static void adreno_input_work(struct work_struct *work)
+{
+ struct adreno_device *adreno_dev = container_of(work,
+ struct adreno_device, input_work);
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ mutex_lock(&device->mutex);
+
+ device->flags |= KGSL_FLAG_WAKE_ON_TOUCH;
+
+ /*
+ * Don't schedule adreno_start in a high priority workqueue, we are
+ * already in a workqueue which should be sufficient
+ */
+ kgsl_pwrctrl_wake(device, 0);
+
+ /*
+ * When waking up from a touch event we want to stay active long enough
+ * for the user to send a draw command. The default idle timer timeout
+ * is shorter than we want so go ahead and push the idle timer out
+ * further for this special case
+ */
+ mod_timer(&device->idle_timer,
+ jiffies + msecs_to_jiffies(_wake_timeout));
+ mutex_unlock(&device->mutex);
+}
+
+/*
+ * Process input events and schedule work if needed. At this point we are only
+ * interested in groking EV_ABS touchscreen events
+ */
+static void adreno_input_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ struct kgsl_device *device = handle->handler->private;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /*
+ * Only queue the work under certain circumstances: we have to be in
+ * slumber, the event has to be EV_EBS and we had to have processed an
+ * IB since the last time we called wake on touch.
+ */
+ if ((type == EV_ABS) &&
+ !(device->flags & KGSL_FLAG_WAKE_ON_TOUCH) &&
+ (device->state == KGSL_STATE_SLUMBER))
+ schedule_work(&adreno_dev->input_work);
+}
+
+static int adreno_input_connect(struct input_handler *handler,
+ struct input_dev *dev, const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int ret;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (handle == NULL)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = handler->name;
+
+ ret = input_register_handle(handle);
+ if (ret) {
+ kfree(handle);
+ return ret;
+ }
+
+ ret = input_open_device(handle);
+ if (ret) {
+ input_unregister_handle(handle);
+ kfree(handle);
+ }
+
+ return ret;
+}
+
+static void adreno_input_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+/*
+ * We are only interested in EV_ABS events so only register handlers for those
+ * input devices that have EV_ABS events
+ */
+static const struct input_device_id adreno_input_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_ABS) },
+ },
+ { },
+};
+
+static struct input_handler adreno_input_handler = {
+ .event = adreno_input_event,
+ .connect = adreno_input_connect,
+ .disconnect = adreno_input_disconnect,
+ .name = "kgsl",
+ .id_table = adreno_input_ids,
+};
+
/**
* adreno_perfcounter_init: Reserve kernel performance counters
* @device: device to configure
@@ -616,6 +766,33 @@
return -EINVAL;
}
+/**
+ * adreno_perfcounter_restore() - Restore performance counters
+ * @adreno_dev: adreno device to configure
+ *
+ * Load the physical performance counters with 64 bit value which are
+ * saved on GPU power collapse.
+ */
+static inline void adreno_perfcounter_restore(struct adreno_device *adreno_dev)
+{
+ if (adreno_dev->gpudev->perfcounter_restore)
+ adreno_dev->gpudev->perfcounter_restore(adreno_dev);
+}
+
+/**
+ * adreno_perfcounter_save() - Save performance counters
+ * @adreno_dev: adreno device to configure
+ *
+ * Save the performance counter values before GPU power collapse.
+ * The saved values are restored on restart.
+ * This ensures physical counters are coherent across power-collapse.
+ */
+static inline void adreno_perfcounter_save(struct adreno_device *adreno_dev)
+{
+ if (adreno_dev->gpudev->perfcounter_save)
+ adreno_dev->gpudev->perfcounter_save(adreno_dev);
+}
+
static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -631,8 +808,6 @@
kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
- kgsl_mmu_unmap(pagetable, &rb->memptrs_desc);
-
kgsl_mmu_unmap(pagetable, &device->memstore);
kgsl_mmu_unmap(pagetable, &adreno_dev->pwron_fixup);
@@ -653,14 +828,11 @@
/*
* ALERT: Order of these mapping is important to
- * Keep the most used entries like memptrs, memstore
+ * Keep the most used entries like memstore
* and mmu setstate memory by TLB prefetcher.
*/
if (!result)
- result = kgsl_mmu_map_global(pagetable, &rb->memptrs_desc);
-
- if (!result)
result = kgsl_mmu_map_global(pagetable, &device->memstore);
if (!result)
@@ -1364,15 +1536,6 @@
&pdata->init_level))
pdata->init_level = 1;
- /*
- * qcom,step-pwrlevel isn't required so don't spam the kernel log
- * if it isn't found
- */
-
- if (of_property_read_u32(parent, "qcom,step-pwrlevel",
- &pdata->step_mul))
- pdata->step_mul = 1;
-
if (pdata->init_level < 0 || pdata->init_level > pdata->num_levels) {
KGSL_CORE_ERR("Initial power level out of range\n");
pdata->init_level = 1;
@@ -1506,6 +1669,9 @@
pdata->strtstp_sleepwake = of_property_read_bool(pdev->dev.of_node,
"qcom,strtstp-sleepwake");
+ pdata->bus_control = of_property_read_bool(pdev->dev.of_node,
+ "qcom,bus-control");
+
if (adreno_of_read_property(pdev->dev.of_node, "qcom,clk-map",
&pdata->clk_map))
goto err;
@@ -1635,14 +1801,23 @@
adreno_ft_init_sysfs(device);
- kgsl_pwrscale_init(device);
- kgsl_pwrscale_attach_policy(device, ADRENO_DEFAULT_PWRSCALE_POLICY);
+ kgsl_pwrscale_init(&pdev->dev, CONFIG_MSM_ADRENO_DEFAULT_GOVERNOR);
+
device->flags &= ~KGSL_FLAGS_SOFT_RESET;
pdata = kgsl_device_get_drvdata(device);
adreno_coresight_init(pdev);
+ adreno_input_handler.private = device;
+
+ /*
+ * It isn't fatal if we cannot register the input handler. Sad,
+ * perhaps, but not fatal
+ */
+ if (input_register_handler(&adreno_input_handler))
+ KGSL_DRV_ERR(device, "Unable to register the input handler\n");
+
return 0;
error_close_device:
@@ -1663,10 +1838,11 @@
device = (struct kgsl_device *)pdev->id_entry->driver_data;
adreno_dev = ADRENO_DEVICE(device);
+ input_unregister_handler(&adreno_input_handler);
+
adreno_coresight_remove(pdev);
adreno_profile_close(device);
- kgsl_pwrscale_detach_policy(device);
kgsl_pwrscale_close(device);
adreno_dispatcher_close(adreno_dev);
@@ -1685,6 +1861,9 @@
int i;
int ret;
+ /* Make a high priority workqueue for starting the GPU */
+ adreno_wq = alloc_workqueue("adreno", WQ_HIGHPRI | WQ_UNBOUND, 1);
+
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
/*
* initialization only needs to be done once initially until
@@ -1761,10 +1940,17 @@
return ret;
}
-static int adreno_start(struct kgsl_device *device)
+/**
+ * _adreno_start - Power up the GPU and prepare to accept commands
+ * @adreno_dev: Pointer to an adreno_device structure
+ *
+ * The core function that powers up and initalizes the GPU. This function is
+ * called at init and after coming out of SLUMBER
+ */
+static int _adreno_start(struct adreno_device *adreno_dev)
{
+ struct kgsl_device *device = &adreno_dev->dev;
int status = -EINVAL;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int state = device->state;
unsigned int regulator_left_on = 0;
@@ -1817,6 +2003,9 @@
adreno_dev->gpudev->soft_reset(adreno_dev);
}
+ /* Restore performance counter registers with saved values */
+ adreno_perfcounter_restore(adreno_dev);
+
/* Start the GPU */
adreno_dev->gpudev->start(adreno_dev);
@@ -1854,6 +2043,58 @@
return status;
}
+static int _status;
+
+/**
+ * _adreno_start_work() - Work handler for the low latency adreno_start
+ * @work: Pointer to the work_struct for
+ *
+ * The work callbak for the low lantecy GPU start - this executes the core
+ * _adreno_start function in the workqueue.
+ */
+static void adreno_start_work(struct work_struct *work)
+{
+ struct adreno_device *adreno_dev = container_of(work,
+ struct adreno_device, start_work);
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ /* Nice ourselves to be higher priority but not too high priority */
+ set_user_nice(current, _wake_nice);
+
+ mutex_lock(&device->mutex);
+ _status = _adreno_start(adreno_dev);
+ mutex_unlock(&device->mutex);
+}
+
+/**
+ * adreno_start() - Power up and initialize the GPU
+ * @device: Pointer to the KGSL device to power up
+ * @priority: Boolean flag to specify of the start should be scheduled in a low
+ * latency work queue
+ *
+ * Power up the GPU and initialize it. If priority is specified then queue the
+ * start function in a high priority queue for lower latency.
+ */
+static int adreno_start(struct kgsl_device *device, int priority)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* No priority (normal latency) call the core start function directly */
+ if (!priority)
+ return _adreno_start(adreno_dev);
+
+ /*
+ * If priority is specified (low latency) then queue the work in a
+ * higher priority work queue and wait for it to finish
+ */
+ queue_work(adreno_wq, &adreno_dev->start_work);
+ mutex_unlock(&device->mutex);
+ flush_work(&adreno_dev->start_work);
+ mutex_lock(&device->mutex);
+
+ return _status;
+}
+
static int adreno_stop(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1874,6 +2115,9 @@
adreno_ocmem_gmem_free(adreno_dev);
+ /* Save physical performance counter values before GPU power down*/
+ adreno_perfcounter_save(adreno_dev);
+
/* Power down the device */
kgsl_pwrctrl_disable(device);
@@ -1908,7 +2152,7 @@
/* Keep trying to start the device until it works */
for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
- ret = adreno_start(device);
+ ret = adreno_start(device, 0);
if (!ret)
break;
@@ -2090,12 +2334,29 @@
const char *buf, size_t count)
{
struct adreno_device *adreno_dev = _get_adreno_dev(dev);
- int ret;
+ int ret, tmp;
+
if (adreno_dev == NULL)
return 0;
mutex_lock(&adreno_dev->dev.mutex);
+
+ tmp = adreno_dev->fast_hang_detect;
+
ret = _ft_sysfs_store(buf, count, &adreno_dev->fast_hang_detect);
+
+ if (tmp != adreno_dev->fast_hang_detect) {
+ if (adreno_dev->fast_hang_detect) {
+ if (adreno_dev->gpudev->fault_detect_start)
+ adreno_dev->gpudev->fault_detect_start(
+ adreno_dev);
+ } else {
+ if (adreno_dev->gpudev->fault_detect_stop)
+ adreno_dev->gpudev->fault_detect_stop(
+ adreno_dev);
+ }
+ }
+
mutex_unlock(&adreno_dev->dev.mutex);
return ret;
@@ -2166,6 +2427,36 @@
(adreno_dev->long_ib_detect ? 1 : 0));
}
+/**
+ * _wake_timeout_store() - Store the amount of time to extend idle check after
+ * wake on touch
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value to write
+ * @count: size of the value to write
+ *
+ */
+static ssize_t _wake_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return _ft_sysfs_store(buf, count, &_wake_timeout);
+}
+
+/**
+ * _wake_timeout_show() - Show the amount of time idle check gets extended
+ * after wake on touch
+ * detect policy
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value read
+ */
+static ssize_t _wake_timeout_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", _wake_timeout);
+}
#define FT_DEVICE_ATTR(name) \
DEVICE_ATTR(name, 0644, _ ## name ## _show, _ ## name ## _store);
@@ -2175,12 +2466,16 @@
FT_DEVICE_ATTR(ft_fast_hang_detect);
FT_DEVICE_ATTR(ft_long_ib_detect);
+static DEVICE_INT_ATTR(wake_nice, 0644, _wake_nice);
+static FT_DEVICE_ATTR(wake_timeout);
const struct device_attribute *ft_attr_list[] = {
&dev_attr_ft_policy,
&dev_attr_ft_pagefault_policy,
&dev_attr_ft_fast_hang_detect,
&dev_attr_ft_long_ib_detect,
+ &dev_attr_wake_nice.attr,
+ &dev_attr_wake_timeout,
NULL,
};
@@ -2316,11 +2611,19 @@
if (enable) {
device->pwrctrl.ctrl_flags = 0;
adreno_dev->fast_hang_detect = 1;
+
+ if (adreno_dev->gpudev->fault_detect_start)
+ adreno_dev->gpudev->fault_detect_start(
+ adreno_dev);
+
kgsl_pwrscale_enable(device);
} else {
- kgsl_pwrctrl_wake(device);
+ kgsl_pwrctrl_wake(device, 0);
device->pwrctrl.ctrl_flags = KGSL_PWR_ON;
adreno_dev->fast_hang_detect = 0;
+ if (adreno_dev->gpudev->fault_detect_stop)
+ adreno_dev->gpudev->fault_detect_stop(
+ adreno_dev);
kgsl_pwrscale_disable(device);
}
@@ -2403,9 +2706,15 @@
/* Make sure we are totally awake */
kgsl_pwrctrl_enable(device);
+ /* save physical performance counter values before GPU soft reset */
+ adreno_perfcounter_save(adreno_dev);
+
/* Reset the GPU */
adreno_dev->gpudev->soft_reset(adreno_dev);
+ /* Restore physical performance counter values after soft reset */
+ adreno_perfcounter_restore(adreno_dev);
+
/* Reinitialize the GPU */
adreno_dev->gpudev->start(adreno_dev);
@@ -2580,9 +2889,6 @@
if (kgsl_gpuaddr_in_memdesc(&ringbuffer->buffer_desc, gpuaddr, size))
return &ringbuffer->buffer_desc;
- if (kgsl_gpuaddr_in_memdesc(&ringbuffer->memptrs_desc, gpuaddr, size))
- return &ringbuffer->memptrs_desc;
-
if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr, size))
return &device->memstore;
@@ -2842,10 +3148,10 @@
}
-static inline s64 adreno_ticks_to_us(u32 ticks, u32 gpu_freq)
+static inline s64 adreno_ticks_to_us(u32 ticks, u32 freq)
{
- gpu_freq /= 1000000;
- return ticks / gpu_freq;
+ freq /= 1000000;
+ return ticks / freq;
}
static void adreno_power_stats(struct kgsl_device *device,
@@ -2853,32 +3159,21 @@
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int cycles = 0;
+ struct adreno_busy_data busy_data;
+ memset(stats, 0, sizeof(*stats));
/*
* Get the busy cycles counted since the counter was last reset.
* If we're not currently active, there shouldn't have been
* any cycles since the last time this function was called.
*/
if (device->state == KGSL_STATE_ACTIVE)
- cycles = adreno_dev->gpudev->busy_cycles(adreno_dev);
+ adreno_dev->gpudev->busy_cycles(adreno_dev, &busy_data);
- /*
- * In order to calculate idle you have to have run the algorithm
- * at least once to get a start time.
- */
- if (pwr->time != 0) {
- s64 tmp = ktime_to_us(ktime_get());
- stats->total_time = tmp - pwr->time;
- pwr->time = tmp;
- stats->busy_time = adreno_ticks_to_us(cycles, device->pwrctrl.
- pwrlevels[device->pwrctrl.active_pwrlevel].
- gpu_freq);
- } else {
- stats->total_time = 0;
- stats->busy_time = 0;
- pwr->time = ktime_to_us(ktime_get());
- }
+ stats->busy_time = adreno_ticks_to_us(busy_data.gpu_busy,
+ kgsl_pwrctrl_active_freq(pwr));
+ stats->ram_time = busy_data.vbif_ram_cycles;
+ stats->ram_wait = busy_data.vbif_starved_ram;
}
void adreno_irqctrl(struct kgsl_device *device, int state)
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 418d230..b5938b0 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -62,6 +62,7 @@
#define ADRENO_DEFAULT_PWRSCALE_POLICY NULL
#endif
+void adreno_debugfs_init(struct kgsl_device *device);
#define ADRENO_ISTORE_START 0x5000 /* Istore offset */
@@ -148,6 +149,12 @@
struct adreno_gpudev;
+struct adreno_busy_data {
+ unsigned int gpu_busy;
+ unsigned int vbif_ram_cycles;
+ unsigned int vbif_starved_ram;
+};
+
struct adreno_device {
struct kgsl_device dev; /* Must be first field in this struct */
unsigned long priv;
@@ -188,11 +195,14 @@
unsigned int gpulist_index;
struct ocmem_buf *ocmem_hdl;
unsigned int ocmem_base;
- unsigned int gpu_cycles;
struct adreno_profile profile;
struct kgsl_memdesc pwron_fixup;
unsigned int pwron_fixup_dwords;
struct adreno_dispatcher dispatcher;
+ struct adreno_busy_data busy_data;
+
+ struct work_struct start_work;
+ struct work_struct input_work;
};
/**
@@ -220,6 +230,7 @@
* @offset: register hardware offset
* @load_bit: The bit number in LOAD register which corresponds to this counter
* @select: The countable register offset
+ * @value: The 64 bit countable register value
*/
struct adreno_perfcount_register {
unsigned int countable;
@@ -228,6 +239,7 @@
unsigned int offset;
int load_bit;
unsigned int select;
+ uint64_t value;
};
/**
@@ -350,12 +362,18 @@
int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
int (*perfcounter_init)(struct adreno_device *);
void (*perfcounter_close)(struct adreno_device *);
+ void (*perfcounter_save)(struct adreno_device *);
+ void (*perfcounter_restore)(struct adreno_device *);
+ void (*fault_detect_start)(struct adreno_device *);
+ void (*fault_detect_stop)(struct adreno_device *);
void (*start)(struct adreno_device *);
- unsigned int (*busy_cycles)(struct adreno_device *);
int (*perfcounter_enable)(struct adreno_device *, unsigned int group,
unsigned int counter, unsigned int countable);
+ void (*busy_cycles)(struct adreno_device *, struct adreno_busy_data *);
uint64_t (*perfcounter_read)(struct adreno_device *adreno_dev,
unsigned int group, unsigned int counter);
+ void (*perfcounter_write)(struct adreno_device *adreno_dev,
+ unsigned int group, unsigned int counter);
int (*coresight_enable) (struct kgsl_device *device);
void (*coresight_disable) (struct kgsl_device *device);
void (*coresight_config_debug_reg) (struct kgsl_device *device,
@@ -364,7 +382,7 @@
void (*postmortem_dump)(struct adreno_device *adreno_dev);
};
-#define FT_DETECT_REGS_COUNT 12
+#define FT_DETECT_REGS_COUNT 14
struct log_field {
bool show;
@@ -845,4 +863,19 @@
return 0;
}
+/**
+ * adreno_get_rptr() - Get the current ringbuffer read pointer
+ * @rb: Pointer the ringbuffer to query
+ *
+ * Get the current read pointer from the GPU register.
+ */
+static inline unsigned int
+adreno_get_rptr(struct adreno_ringbuffer *rb)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+ unsigned int result;
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &result);
+ return result;
+}
+
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 24a0933..622350d3 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1971,17 +1971,19 @@
return 0;
}
-static unsigned int a2xx_busy_cycles(struct adreno_device *adreno_dev)
+static void a2xx_busy_cycles(struct adreno_device *adreno_dev,
+ struct adreno_busy_data *data)
{
struct kgsl_device *device = &adreno_dev->dev;
- unsigned int reg, val;
+ unsigned int reg;
+ memset(data, 0, sizeof(*data));
/* Freeze the counter */
kgsl_regwrite(device, REG_CP_PERFMON_CNTL,
REG_PERF_MODE_CNT | REG_PERF_STATE_FREEZE);
/* Get the value */
- kgsl_regread(device, REG_RBBM_PERFCOUNTER1_LO, &val);
+ kgsl_regread(device, REG_RBBM_PERFCOUNTER1_LO, &data->gpu_busy);
/* Reset the counter */
kgsl_regwrite(device, REG_CP_PERFMON_CNTL,
@@ -1994,7 +1996,6 @@
kgsl_regwrite(device, REG_CP_PERFMON_CNTL,
REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE);
- return val;
}
static void a2xx_gmeminit(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index df1794f..ced7fe6 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -95,6 +95,11 @@
#define _SET(_shift, _val) ((_val) << (_shift))
+/* EN/CLR mask for the VBIF counters we care about */
+#define VBIF_PERF_MASK (VBIF_PERF_CNT_0 | VBIF_PERF_PWR_CNT_0)
+#define RBBM_PERF_ENABLE_MASK (RBBM_RBBM_CTL_ENABLE_PWR_CTR1)
+#define RBBM_PERF_RESET_MASK (RBBM_RBBM_CTL_RESET_PWR_CTR1)
+
/*
****************************************************************************
*
@@ -3376,6 +3381,136 @@
return (((uint64_t) hi) << 32) | lo;
}
+/*
+ * values cannot be loaded into physical performance
+ * counters belonging to these groups.
+ */
+static inline int loadable_perfcounter_group(unsigned int groupid)
+{
+ return ((groupid == KGSL_PERFCOUNTER_GROUP_VBIF_PWR) ||
+ (groupid == KGSL_PERFCOUNTER_GROUP_VBIF) ||
+ (groupid == KGSL_PERFCOUNTER_GROUP_PWR)) ? 0 : 1;
+}
+
+/*
+ * Return true if the countable is used and not broken
+ */
+static inline int active_countable(unsigned int countable)
+{
+ return ((countable != KGSL_PERFCOUNTER_NOT_USED) &&
+ (countable != KGSL_PERFCOUNTER_BROKEN));
+}
+
+/**
+ * a3xx_perfcounter_save() - Save the physical performance counter values
+ * @adreno_dev - Adreno device whose registers need to be saved
+ *
+ * Read all the physical performance counter's values and save them
+ * before GPU power collapse.
+ */
+static void a3xx_perfcounter_save(struct adreno_device *adreno_dev)
+{
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+ unsigned int regid, groupid;
+
+ for (groupid = 0; groupid < counters->group_count; groupid++) {
+ if (!loadable_perfcounter_group(groupid))
+ continue;
+
+ group = &(counters->groups[groupid]);
+
+ /* group/counter iterator */
+ for (regid = 0; regid < group->reg_count; regid++) {
+ if (!active_countable(group->regs[regid].countable))
+ continue;
+
+ group->regs[regid].value =
+ adreno_dev->gpudev->perfcounter_read(
+ adreno_dev, groupid, regid);
+ }
+ }
+}
+
+/**
+ * a3xx_perfcounter_write() - Write the physical performance counter values.
+ * @adreno_dev - Adreno device whose registers are to be written to.
+ * @group - group to which the physical counter belongs to.
+ * @counter - register id of the physical counter to which the value is
+ * written to.
+ *
+ * This function loads the 64 bit saved value into the particular physical
+ * counter by enabling the corresponding bit in A3XX_RBBM_PERFCTR_LOAD_CMD*
+ * register.
+ */
+static void a3xx_perfcounter_write(struct adreno_device *adreno_dev,
+ unsigned int group, unsigned int counter)
+{
+ struct kgsl_device *device = &(adreno_dev->dev);
+ struct adreno_perfcount_register *reg;
+ unsigned int val;
+
+ reg = &(adreno_dev->gpudev->perfcounters->groups[group].regs[counter]);
+
+ /* Clear the load cmd registers */
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, 0);
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, 0);
+
+ /* Write the saved value to PERFCTR_LOAD_VALUE* registers. */
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_VALUE_LO,
+ (uint32_t)reg->value);
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_VALUE_HI,
+ (uint32_t)(reg->value >> 32));
+
+ /*
+ * Set the load bit in PERFCTR_LOAD_CMD for the physical counter
+ * we want to restore. The value in PERFCTR_LOAD_VALUE* is loaded
+ * into the corresponding physical counter.
+ */
+ if (reg->load_bit < 32) {
+ val = 1 << reg->load_bit;
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, val);
+ } else {
+ val = 1 << (reg->load_bit - 32);
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, val);
+ }
+}
+
+/**
+ * a3xx_perfcounter_restore() - Restore the physical performance counter values.
+ * @adreno_dev - Adreno device whose registers are to be restored.
+ *
+ * This function together with a3xx_perfcounter_save make sure that performance
+ * counters are coherent across GPU power collapse.
+ */
+static void a3xx_perfcounter_restore(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+ unsigned int regid, groupid;
+
+ for (groupid = 0; groupid < counters->group_count; groupid++) {
+ if (!loadable_perfcounter_group(groupid))
+ continue;
+
+ group = &(counters->groups[groupid]);
+
+ /* group/counter iterator */
+ for (regid = 0; regid < group->reg_count; regid++) {
+ if (!active_countable(group->regs[regid].countable))
+ continue;
+
+ a3xx_perfcounter_write(adreno_dev, groupid, regid);
+ }
+ }
+
+ /* Clear the load cmd registers */
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, 0);
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, 0);
+
+}
+
#define A3XX_IRQ_CALLBACK(_c) { .func = _c }
#define A3XX_INT_MASK \
@@ -3474,32 +3609,63 @@
return (status & A3XX_INT_MASK) ? 1 : 0;
}
-static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
+static unsigned int counter_delta(struct adreno_device *adreno_dev,
+ unsigned int reg, unsigned int *counter)
{
struct kgsl_device *device = &adreno_dev->dev;
unsigned int val;
unsigned int ret = 0;
/* Read the value */
- kgsl_regread(device, A3XX_RBBM_PERFCTR_PWR_1_LO, &val);
+ if (reg == ADRENO_REG_RBBM_PERFCTR_PWR_1_LO)
+ adreno_readreg(adreno_dev, reg, &val);
+ else
+ kgsl_regread(device, reg, &val);
/* Return 0 for the first read */
- if (adreno_dev->gpu_cycles != 0) {
- if (val < adreno_dev->gpu_cycles)
- ret = (0xFFFFFFFF - adreno_dev->gpu_cycles) + val;
+ if (*counter != 0) {
+ if (val < *counter)
+ ret = (0xFFFFFFFF - *counter) + val;
else
- ret = val - adreno_dev->gpu_cycles;
+ ret = val - *counter;
}
- adreno_dev->gpu_cycles = val;
+ *counter = val;
return ret;
}
+/*
+ * a3xx_busy_cycles() - Returns number of gpu cycles
+ * @adreno_dev: Pointer to device ehose cycles are checked
+ *
+ * Returns number of busy cycles since the last time this function is called
+ * Function is common between a3xx and a4xx devices
+ */
+void a3xx_busy_cycles(struct adreno_device *adreno_dev,
+ struct adreno_busy_data *data)
+{
+ struct adreno_busy_data *busy = &adreno_dev->busy_data;
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ memset(data, 0, sizeof(*data));
+
+ data->gpu_busy = counter_delta(adreno_dev,
+ ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
+ &busy->gpu_busy);
+ if (device->pwrctrl.bus_control) {
+ data->vbif_ram_cycles = counter_delta(adreno_dev,
+ A3XX_VBIF_PERF_CNT0_LO,
+ &busy->vbif_ram_cycles);
+ data->vbif_starved_ram = counter_delta(adreno_dev,
+ A3XX_VBIF_PERF_PWR_CNT0_LO,
+ &busy->vbif_starved_ram);
+ }
+}
+
struct a3xx_vbif_data {
unsigned int reg;
unsigned int val;
};
-
/* VBIF registers start after 0x3000 so use 0x0 as end of list marker */
static struct a3xx_vbif_data a305_vbif[] = {
/* Set up 16 deep read/write request queues */
@@ -3789,27 +3955,123 @@
ARRAY_SIZE(a3xx_perfcounter_groups),
};
-/*
- * a3xx_perfcounter_close() - Return counters that were initialized in
+static inline int _get_counter(struct adreno_device *adreno_dev,
+ int group, int countable, unsigned int *lo,
+ unsigned int *hi)
+{
+ int ret = 0;
+
+ if (*lo == 0) {
+ *hi = 0;
+
+ ret = adreno_perfcounter_get(adreno_dev, group, countable,
+ lo, PERFCOUNTER_FLAG_KERNEL);
+
+ if (ret == 0)
+ *hi = *lo + 1;
+ else {
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ KGSL_DRV_ERR(device,
+ "Unable to allocate fault detect performance counter %d/%d\n",
+ group, countable);
+ KGSL_DRV_ERR(device,
+ "GPU fault detect will be less reliable\n");
+ }
+ }
+
+ return ret;
+}
+
+static inline void _put_counter(struct adreno_device *adreno_dev,
+ int group, int countable, unsigned int *lo,
+ unsigned int *hi)
+{
+ if (*lo != 0) {
+ adreno_perfcounter_put(adreno_dev, group, countable,
+ PERFCOUNTER_FLAG_KERNEL);
+ }
+
+ *lo = 0;
+ *hi = 0;
+}
+
+/**
+ * a3xx_fault_detect_start() - Allocate performance counters used for fast fault
+ * detection
+ * @adreno_dev: Pointer to an adreno_device structure
+ *
+ * Allocate the series of performance counters that should be periodically
+ * checked to verify that the GPU is still moving
+ */
+void a3xx_fault_detect_start(struct adreno_device *adreno_dev)
+{
+ _get_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_ALU_ACTIVE_CYCLES,
+ &ft_detect_regs[6], &ft_detect_regs[7]);
+
+ _get_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP0_ICL1_MISSES,
+ &ft_detect_regs[8], &ft_detect_regs[9]);
+
+ _get_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_FS_CFLOW_INSTRUCTIONS,
+ &ft_detect_regs[10], &ft_detect_regs[11]);
+
+ _get_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_TSE,
+ TSE_INPUT_PRIM_NUM,
+ &ft_detect_regs[12], &ft_detect_regs[13]);
+}
+/**
+ * a3xx_fault_detect_stop() - Release performance counters used for fast fault
+ * detection
+ * @adreno_dev: Pointer to an adreno_device structure
+ *
+ * Release the counters allocated in a3xx_fault_detect_start
+ */
+void a3xx_fault_detect_stop(struct adreno_device *adreno_dev)
+{
+ _put_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_ALU_ACTIVE_CYCLES,
+ &ft_detect_regs[6], &ft_detect_regs[7]);
+
+ _put_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP0_ICL1_MISSES,
+ &ft_detect_regs[8], &ft_detect_regs[9]);
+
+ _put_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_FS_CFLOW_INSTRUCTIONS,
+ &ft_detect_regs[10], &ft_detect_regs[11]);
+
+ _put_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_TSE,
+ TSE_INPUT_PRIM_NUM,
+ &ft_detect_regs[12], &ft_detect_regs[13]);
+}
+
+/**
+ * a3xx_perfcounter_close() - Put counters that were initialized in
* a3xx_perfcounter_init
- * @adreno_dev: The device for which counters were initialized
+ * @adreno_dev: Pointer to an adreno_device structure
*/
static void a3xx_perfcounter_close(struct adreno_device *adreno_dev)
{
- adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
- SP_FS_FULL_ALU_INSTRUCTIONS,
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
PERFCOUNTER_FLAG_KERNEL);
- adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
- SP_FS_CFLOW_INSTRUCTIONS,
+
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_VBIF_PWR, 0,
PERFCOUNTER_FLAG_KERNEL);
- adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
- SP0_ICL1_MISSES,
- PERFCOUNTER_FLAG_KERNEL);
- adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
- SP_ALU_ACTIVE_CYCLES,
- PERFCOUNTER_FLAG_KERNEL);
+
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_VBIF,
+ VBIF_AXI_TOTAL_BEATS, PERFCOUNTER_FLAG_KERNEL);
+
+ if (adreno_dev->fast_hang_detect)
+ a3xx_fault_detect_stop(adreno_dev);
}
+/**
+ * a3xx_perfcounter_init() - Allocate performance counters for use in the kernel
+ * @adreno_dev: Pointer to an adreno_device structure
+ */
static int a3xx_perfcounter_init(struct adreno_device *adreno_dev)
{
int ret;
@@ -3817,57 +4079,25 @@
if (adreno_is_a330(adreno_dev))
a3xx_perfcounters_sp[3].countable = KGSL_PERFCOUNTER_BROKEN;
- /*
- * Set SP to count SP_ALU_ACTIVE_CYCLES, it includes
- * all ALU instruction execution regardless precision or shader ID.
- * Set SP to count SP0_ICL1_MISSES, It counts
- * USP L1 instruction miss request.
- * Set SP to count SP_FS_FULL_ALU_INSTRUCTIONS, it
- * counts USP flow control instruction execution.
- * we will use this to augment our hang detection
- */
- if (adreno_dev->fast_hang_detect) {
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_SP,
- SP_ALU_ACTIVE_CYCLES, &ft_detect_regs[6],
- PERFCOUNTER_FLAG_KERNEL);
- if (ret)
- goto err;
- ft_detect_regs[7] = ft_detect_regs[6] + 1;
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_SP,
- SP0_ICL1_MISSES, &ft_detect_regs[8],
- PERFCOUNTER_FLAG_KERNEL);
- if (ret)
- goto err;
- ft_detect_regs[9] = ft_detect_regs[8] + 1;
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_SP,
- SP_FS_CFLOW_INSTRUCTIONS, &ft_detect_regs[10],
- PERFCOUNTER_FLAG_KERNEL);
- if (ret)
- goto err;
- ft_detect_regs[11] = ft_detect_regs[10] + 1;
- }
-
- ret = adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
- SP_FS_FULL_ALU_INSTRUCTIONS, NULL, PERFCOUNTER_FLAG_KERNEL);
- if (ret)
- goto err;
+ if (adreno_dev->fast_hang_detect)
+ a3xx_fault_detect_start(adreno_dev);
/* Reserve and start countable 1 in the PWR perfcounter group */
ret = adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
NULL, PERFCOUNTER_FLAG_KERNEL);
- if (ret)
- goto err;
+
+ /* VBIF waiting for RAM */
+ ret |= adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_VBIF_PWR, 0,
+ NULL, PERFCOUNTER_FLAG_KERNEL);
+ /* VBIF DDR cycles */
+ ret |= adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_VBIF,
+ VBIF_AXI_TOTAL_BEATS, NULL,
+ PERFCOUNTER_FLAG_KERNEL);
/* Default performance counter profiling to false */
adreno_dev->profile.enabled = false;
return ret;
-
-err:
- a3xx_perfcounter_close(adreno_dev);
- return ret;
}
/**
@@ -3971,14 +4201,14 @@
a3xx_protect_init(device);
/* Turn on performance counters */
- kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01);
-
- /* Turn on the GPU busy counter and let it run free */
-
- adreno_dev->gpu_cycles = 0;
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, RBBM_PERF_ENABLE_MASK);
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_SEL,
+ _SET(VBIF_PERF_CNT_0_SEL, VBIF_AXI_TOTAL_BEATS));
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_EN, VBIF_PERF_MASK);
/* the CP_DEBUG register offset and value are same as A2XX */
kgsl_regwrite(device, REG_CP_DEBUG, A2XX_CP_DEBUG_DEFAULT);
+ memset(&adreno_dev->busy_data, 0, sizeof(adreno_dev->busy_data));
}
/**
@@ -4345,6 +4575,8 @@
.rb_init = a3xx_rb_init,
.perfcounter_init = a3xx_perfcounter_init,
.perfcounter_close = a3xx_perfcounter_close,
+ .perfcounter_save = a3xx_perfcounter_save,
+ .perfcounter_restore = a3xx_perfcounter_restore,
.irq_control = a3xx_irq_control,
.irq_handler = a3xx_irq_handler,
.irq_pending = a3xx_irq_pending,
@@ -4353,9 +4585,12 @@
.snapshot = a3xx_snapshot,
.perfcounter_enable = a3xx_perfcounter_enable,
.perfcounter_read = a3xx_perfcounter_read,
+ .perfcounter_write = a3xx_perfcounter_write,
.coresight_enable = a3xx_coresight_enable,
.coresight_disable = a3xx_coresight_disable,
.coresight_config_debug_reg = a3xx_coresight_config_debug_reg,
+ .fault_detect_start = a3xx_fault_detect_start,
+ .fault_detect_stop = a3xx_fault_detect_stop,
.soft_reset = a3xx_soft_reset,
.postmortem_dump = a3xx_postmortem_dump,
};
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
index 8d3efd6..28fd6d6 100644
--- a/drivers/gpu/msm/adreno_profile.c
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -441,6 +441,11 @@
profile, *(ptr + buf_off++));
if (assigns_list == NULL) {
*log_ptr = (unsigned int) -1;
+
+ shared_buf_inc(profile->shared_size,
+ &profile->shared_tail,
+ SIZE_SHARED_ENTRY(cnt));
+
goto err;
} else {
*log_ptr = assigns_list->groupid << 16 |
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index a43bd54..1383a20 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -368,61 +368,26 @@
*/
void _ringbuffer_setup_common(struct adreno_ringbuffer *rb)
{
- union reg_cp_rb_cntl cp_rb_cntl;
- unsigned int rb_cntl;
struct kgsl_device *device = rb->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- kgsl_sharedmem_set(rb->device, &rb->memptrs_desc, 0, 0,
- sizeof(struct kgsl_rbmemptrs));
-
kgsl_sharedmem_set(rb->device, &rb->buffer_desc, 0, 0xAA,
(rb->sizedwords << 2));
- if (adreno_is_a2xx(adreno_dev)) {
- kgsl_regwrite(device, REG_CP_RB_WPTR_BASE,
- (rb->memptrs_desc.gpuaddr
- + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
-
- /* setup WPTR delay */
- kgsl_regwrite(device, REG_CP_RB_WPTR_DELAY,
- 0 /*0x70000010 */);
- }
-
- /*setup REG_CP_RB_CNTL */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_CNTL, &rb_cntl);
- cp_rb_cntl.val = rb_cntl;
-
/*
* The size of the ringbuffer in the hardware is the log2
- * representation of the size in quadwords (sizedwords / 2)
+ * representation of the size in quadwords (sizedwords / 2).
+ * Also disable the host RPTR shadow register as it might be unreliable
+ * in certain circumstances.
*/
- cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
- /*
- * Specify the quadwords to read before updating mem RPTR.
- * Like above, pass the log2 representation of the blocksize
- * in quadwords.
- */
- cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
-
- if (adreno_is_a2xx(adreno_dev)) {
- /* WPTR polling */
- cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
- }
-
- /* mem RPTR writebacks */
- cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
-
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL, cp_rb_cntl.val);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
+ (ilog2(rb->sizedwords >> 1) & 0x3F) |
+ (1 << 27));
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR,
- rb->memptrs_desc.gpuaddr +
- GSL_RB_MEMPTRS_RPTR_OFFSET);
-
if (adreno_is_a2xx(adreno_dev)) {
/* explicitly clear all cp interrupts */
kgsl_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
@@ -621,20 +586,6 @@
return status;
}
- /* allocate memory for polling and timestamps */
- /* This really can be at 4 byte alignment boundry but for using MMU
- * we need to make it at page boundary */
- status = kgsl_allocate_contiguous(&rb->memptrs_desc,
- sizeof(struct kgsl_rbmemptrs));
-
- if (status != 0) {
- adreno_ringbuffer_close(rb);
- return status;
- }
-
- /* overlay structure on memptrs memory */
- rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
-
rb->global_ts = 0;
return 0;
@@ -645,7 +596,6 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
kgsl_sharedmem_free(&rb->buffer_desc);
- kgsl_sharedmem_free(&rb->memptrs_desc);
kfree(adreno_dev->pfp_fw);
kfree(adreno_dev->pm4_fw);
@@ -1156,6 +1106,13 @@
/* wait for the suspend gate */
wait_for_completion(&device->cmdbatch_gate);
+ /*
+ * Clear the wake on touch bit to indicate an IB has been submitted
+ * since the last time we set it
+ */
+
+ device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
+
/* Queue the command in the ringbuffer */
ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
timestamp);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index eee4127..697e113 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -19,7 +19,6 @@
*/
#define KGSL_RB_SIZE (32 * 1024)
-#define KGSL_RB_BLKSIZE 16
/* CP timestamp register */
#define REG_CP_TIMESTAMP REG_SCRATCH_REG0
@@ -28,27 +27,12 @@
struct kgsl_device;
struct kgsl_device_private;
-#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
-struct kgsl_rbmemptrs {
- int rptr;
- int wptr_poll;
-};
-
-#define GSL_RB_MEMPTRS_RPTR_OFFSET \
- (offsetof(struct kgsl_rbmemptrs, rptr))
-
-#define GSL_RB_MEMPTRS_WPTRPOLL_OFFSET \
- (offsetof(struct kgsl_rbmemptrs, wptr_poll))
-
struct adreno_ringbuffer {
struct kgsl_device *device;
uint32_t flags;
struct kgsl_memdesc buffer_desc;
- struct kgsl_memdesc memptrs_desc;
- struct kgsl_rbmemptrs *memptrs;
-
/*ringbuffer size */
unsigned int sizedwords;
@@ -70,25 +54,6 @@
/* enable timestamp (...scratch0) memory shadowing */
#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x1
-/* mem rptr */
-#define GSL_RB_CNTL_NO_UPDATE 0x0 /* enable */
-
-/**
- * adreno_get_rptr - Get the current ringbuffer read pointer
- * @rb - the ringbuffer
- *
- * Get the current read pointer, which is written by the GPU.
- */
-static inline unsigned int
-adreno_get_rptr(struct adreno_ringbuffer *rb)
-{
- unsigned int result = rb->memptrs->rptr;
- rmb();
- return result;
-}
-
-#define GSL_RB_CNTL_POLL_EN 0x0 /* disable */
-
/*
* protected mode error checking below register address 0x800
* note: if CP_INTERRUPT packet is used then checking needs
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index fe6b34c..fe99a4b 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -60,6 +60,9 @@
struct sg_table *table;
};
+static void kgsl_put_process_private(struct kgsl_device *device,
+ struct kgsl_process_private *private);
+
static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
static void
@@ -341,14 +344,19 @@
*/
static int
kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
- struct kgsl_process_private *process)
+ struct kgsl_device_private *dev_priv)
{
int ret;
+ struct kgsl_process_private *process = dev_priv->process_priv;
+
+ ret = kref_get_unless_zero(&process->refcount);
+ if (!ret)
+ return -EBADF;
while (1) {
if (idr_pre_get(&process->mem_idr, GFP_KERNEL) == 0) {
ret = -ENOMEM;
- goto err;
+ goto err_put_proc_priv;
}
spin_lock(&process->mem_lock);
@@ -359,9 +367,10 @@
if (ret == 0)
break;
else if (ret != -EAGAIN)
- goto err;
+ goto err_put_proc_priv;
}
entry->priv = process;
+ entry->dev_priv = dev_priv;
spin_lock(&process->mem_lock);
ret = kgsl_mem_entry_track_gpuaddr(process, entry);
@@ -369,14 +378,17 @@
idr_remove(&process->mem_idr, entry->id);
spin_unlock(&process->mem_lock);
if (ret)
- goto err;
+ goto err_put_proc_priv;
/* map the memory after unlocking if gpuaddr has been assigned */
if (entry->memdesc.gpuaddr) {
ret = kgsl_mmu_map(process->pagetable, &entry->memdesc);
if (ret)
kgsl_mem_entry_detach_process(entry);
}
-err:
+ return ret;
+
+err_put_proc_priv:
+ kgsl_put_process_private(dev_priv->device, process);
return ret;
}
@@ -399,6 +411,7 @@
entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
spin_unlock(&entry->priv->mem_lock);
+ kgsl_put_process_private(entry->dev_priv->device, entry->priv);
entry->priv = NULL;
}
@@ -605,7 +618,6 @@
static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
{
int status = -EINVAL;
- struct kgsl_pwrscale_policy *policy_saved;
if (!device)
return -EINVAL;
@@ -613,8 +625,6 @@
KGSL_PWR_WARN(device, "suspend start\n");
mutex_lock(&device->mutex);
- policy_saved = device->pwrscale.policy;
- device->pwrscale.policy = NULL;
kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
/* Tell the device to drain the submission queue */
@@ -659,7 +669,7 @@
goto end;
}
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
- device->pwrscale.policy = policy_saved;
+ kgsl_pwrscale_sleep(device);
status = 0;
end:
@@ -767,11 +777,6 @@
*/
static void kgsl_destroy_process_private(struct kref *kref)
{
-
- struct kgsl_mem_entry *entry = NULL;
- int next = 0;
-
-
struct kgsl_process_private *private = container_of(kref,
struct kgsl_process_private, refcount);
@@ -795,20 +800,6 @@
if (private->debug_root)
debugfs_remove_recursive(private->debug_root);
- while (1) {
- spin_lock(&private->mem_lock);
- entry = idr_get_next(&private->mem_idr, &next);
- spin_unlock(&private->mem_lock);
- if (entry == NULL)
- break;
- kgsl_mem_entry_put(entry);
- /*
- * Always start back at the beginning, to
- * ensure all entries are removed,
- * like list_for_each_entry_safe.
- */
- next = 0;
- }
idr_destroy(&private->mem_idr);
kgsl_mmu_putpagetable(private->pagetable);
@@ -953,6 +944,7 @@
struct kgsl_process_private *private = dev_priv->process_priv;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
+ struct kgsl_mem_entry *entry;
int next = 0;
filep->private_data = NULL;
@@ -981,6 +973,25 @@
next = next + 1;
}
+ next = 0;
+ while (1) {
+ spin_lock(&private->mem_lock);
+ entry = idr_get_next(&private->mem_idr, &next);
+ spin_unlock(&private->mem_lock);
+ if (entry == NULL)
+ break;
+ /*
+ * If the free pending flag is not set it means that user space
+ * did not free it's reference to this entry, in that case
+ * free a reference to this entry, other references are from
+ * within kgsl so they will be freed eventually by kgsl
+ */
+ if (entry->dev_priv == dev_priv && !entry->pending_free) {
+ entry->pending_free = 1;
+ kgsl_mem_entry_put(entry);
+ }
+ next = next + 1;
+ }
/*
* Clean up any to-be-freed entries that belong to this
* process and this device. This is done after the context
@@ -1017,7 +1028,7 @@
if (result)
goto err;
- result = device->ftbl->start(device);
+ result = device->ftbl->start(device, 0);
if (result)
goto err;
/*
@@ -2743,7 +2754,7 @@
/* echo back flags */
param->flags = entry->memdesc.flags;
- result = kgsl_mem_entry_attach_process(entry, private);
+ result = kgsl_mem_entry_attach_process(entry, dev_priv);
if (result)
goto error_attach;
@@ -3031,7 +3042,7 @@
if (result)
return result;
- result = kgsl_mem_entry_attach_process(entry, private);
+ result = kgsl_mem_entry_attach_process(entry, dev_priv);
if (result != 0)
goto err;
@@ -3064,7 +3075,7 @@
if (result != 0)
goto err;
- result = kgsl_mem_entry_attach_process(entry, private);
+ result = kgsl_mem_entry_attach_process(entry, dev_priv);
if (result != 0)
goto err;
@@ -4054,7 +4065,7 @@
del_timer_sync(&device->idle_timer);
/* Force on the clocks */
- kgsl_pwrctrl_wake(device);
+ kgsl_pwrctrl_wake(device, 0);
/* Disable the irq */
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index ee7a485..6da4a86 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -200,6 +200,7 @@
struct kgsl_process_private *priv;
/* Initialized to 0, set to 1 when entry is marked for freeing */
int pending_free;
+ struct kgsl_device_private *dev_priv;
};
#ifdef CONFIG_MSM_KGSL_MMU_PAGE_FAULT
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index f87c64c..7fc6fae 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -62,6 +62,8 @@
#define KGSL_EVENT_TIMESTAMP_RETIRED 0
#define KGSL_EVENT_CANCELLED 1
+#define KGSL_FLAG_WAKE_ON_TOUCH BIT(0)
+
/*
* "list" of event types for ftrace symbolic magic
*/
@@ -91,7 +93,7 @@
bool (*isidle) (struct kgsl_device *device);
int (*suspend_context) (struct kgsl_device *device);
int (*init) (struct kgsl_device *device);
- int (*start) (struct kgsl_device *device);
+ int (*start) (struct kgsl_device *device, int priority);
int (*stop) (struct kgsl_device *device);
int (*getproperty) (struct kgsl_device *device,
enum kgsl_property_type type, void *value,
@@ -292,7 +294,6 @@
struct list_head events;
struct list_head events_pending_list;
unsigned int events_last_timestamp;
- s64 on_time;
/* Postmortem Control switches */
int pm_regs_enabled;
@@ -418,11 +419,6 @@
struct kgsl_process_private *process_priv;
};
-struct kgsl_power_stats {
- s64 total_time;
- s64 busy_time;
-};
-
struct kgsl_device *kgsl_get_device(int dev_idx);
int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 8ed29fb..3e15580 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -17,6 +17,7 @@
#include <linux/pm_runtime.h>
#include <mach/msm_iomap.h>
#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
#include <linux/ktime.h>
#include <linux/delay.h>
@@ -123,13 +124,34 @@
return level;
}
+void kgsl_pwrctrl_buslevel_update(struct kgsl_device *device,
+ bool on)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int cur = pwr->pwrlevels[pwr->active_pwrlevel].bus_freq;
+ int buslevel = 0;
+ if (!pwr->pcl)
+ return;
+ /*
+ * If the bus should remain on calculate our request and submit it,
+ * otherwise request bus level 0, off.
+ */
+ if (on) {
+ buslevel = min_t(int, pwr->pwrlevels[0].bus_freq,
+ cur + pwr->bus_mod);
+ buslevel = max_t(int, buslevel, 1);
+ }
+ msm_bus_scale_client_update_request(pwr->pcl, buslevel);
+ trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, buslevel);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_buslevel_update);
+
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int new_level)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct kgsl_pwrlevel *pwrlevel;
- int delta;
- int level;
+ int delta, level;
/* Adjust the power level to the current constraints */
new_level = _adjust_pwrlevel(pwr, new_level);
@@ -150,14 +172,12 @@
*/
pwr->active_pwrlevel = new_level;
+ pwr->bus_mod = 0;
pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
-
- if (pwr->pcl)
- msm_bus_scale_client_update_request(pwr->pcl,
- pwrlevel->bus_freq);
- else if (pwr->ebi1_clk)
+ kgsl_pwrctrl_buslevel_update(device, true);
+ if (pwr->ebi1_clk)
clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
}
@@ -226,11 +246,8 @@
* a policy only change the active clock if it is higher then the new
* thermal level
*/
-
- if (device->pwrscale.policy == NULL ||
- pwr->thermal_pwrlevel > pwr->active_pwrlevel)
+ if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
-
mutex_unlock(&device->mutex);
return count;
@@ -285,11 +302,8 @@
* If there is no policy then move to max by default. Otherwise only
* move max if the current level happens to be higher then the new max
*/
-
- if (device->pwrscale.policy == NULL ||
- (max_level > pwr->active_pwrlevel))
+ if (max_level > pwr->active_pwrlevel)
kgsl_pwrctrl_pwrlevel_change(device, max_level);
-
mutex_unlock(&device->mutex);
return count;
@@ -479,8 +493,7 @@
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
+ return snprintf(buf, PAGE_SIZE, "%ld\n", kgsl_pwrctrl_active_freq(pwr));
}
static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
@@ -742,6 +755,42 @@
return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_POWER_ON);
}
+static ssize_t kgsl_pwrctrl_bus_split_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ if (device == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ device->pwrctrl.bus_control);
+}
+
+static ssize_t kgsl_pwrctrl_bus_split_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char temp[20];
+ unsigned long val;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ int rc;
+
+ if (device == NULL)
+ return 0;
+
+ snprintf(temp, sizeof(temp), "%.*s",
+ (int)min(count, sizeof(temp) - 1), buf);
+ rc = kstrtoul(temp, 0, &val);
+ if (rc)
+ return rc;
+
+ mutex_lock(&device->mutex);
+ device->pwrctrl.bus_control = val ? true : false;
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
kgsl_pwrctrl_max_gpuclk_store);
@@ -781,6 +830,9 @@
DEVICE_ATTR(force_rail_on, 0644,
kgsl_pwrctrl_force_rail_on_show,
kgsl_pwrctrl_force_rail_on_store);
+DEVICE_ATTR(bus_split, 0644,
+ kgsl_pwrctrl_bus_split_show,
+ kgsl_pwrctrl_bus_split_store);
static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
@@ -798,6 +850,7 @@
&dev_attr_force_clk_on,
&dev_attr_force_bus_on,
&dev_attr_force_rail_on,
+ &dev_attr_bus_split,
NULL
};
@@ -924,9 +977,7 @@
clk_set_rate(pwr->ebi1_clk, 0);
clk_disable_unprepare(pwr->ebi1_clk);
}
- if (pwr->pcl)
- msm_bus_scale_client_update_request(pwr->pcl,
- 0);
+ kgsl_pwrctrl_buslevel_update(device, false);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
@@ -938,10 +989,7 @@
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
}
- if (pwr->pcl)
- msm_bus_scale_client_update_request(pwr->pcl,
- pwr->pwrlevels[pwr->active_pwrlevel].
- bus_freq);
+ kgsl_pwrctrl_buslevel_update(device, true);
}
}
}
@@ -1011,7 +1059,7 @@
int kgsl_pwrctrl_init(struct kgsl_device *device)
{
- int i, result = 0;
+ int i, k, m, n = 0, result = 0;
struct clk *clk;
struct platform_device *pdev =
container_of(device->parentdev, struct platform_device, dev);
@@ -1092,26 +1140,61 @@
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
- if (pdata->bus_scale_table != NULL) {
- pwr->pcl = msm_bus_scale_register_client(pdata->
- bus_scale_table);
- if (!pwr->pcl) {
- KGSL_PWR_ERR(device,
- "msm_bus_scale_register_client failed: "
- "id %d table %p", device->id,
- pdata->bus_scale_table);
- result = -EINVAL;
- goto done;
- }
- }
-
- /* Set the power level step multiplier with 1 as the default */
- pwr->step_mul = pdata->step_mul ? pdata->step_mul : 1;
/* Set the CPU latency to 501usec to allow low latency PC modes */
pwr->pm_qos_latency = 501;
pm_runtime_enable(device->parentdev);
+
+ if (pdata->bus_scale_table == NULL)
+ return result;
+
+ pwr->pcl = msm_bus_scale_register_client(pdata->
+ bus_scale_table);
+ if (!pwr->pcl) {
+ KGSL_PWR_ERR(device,
+ "msm_bus_scale_register_client failed: "
+ "id %d table %p", device->id,
+ pdata->bus_scale_table);
+ result = -EINVAL;
+ goto done;
+ }
+
+ /* Set if independent bus BW voting is supported */
+ pwr->bus_control = pdata->bus_control;
+ /*
+ * Pull the BW vote out of the bus table. They will be used to
+ * calculate the ratio between the votes.
+ */
+ for (i = 0; i < pdata->bus_scale_table->num_usecases; i++) {
+ struct msm_bus_paths *usecase =
+ &pdata->bus_scale_table->usecase[i];
+ struct msm_bus_vectors *vector = &usecase->vectors[0];
+ if (vector->dst == MSM_BUS_SLAVE_EBI_CH0 &&
+ vector->ib != 0) {
+ for (k = 0; k < n; k++)
+ if (vector->ib == pwr->bus_ib[k])
+ break;
+ /* if this is a new ib value, save it */
+ if (k == n) {
+ pwr->bus_ib[k] = vector->ib;
+ n++;
+ /* find which pwrlevels use this ib */
+ for (m = 0; m < pwr->num_pwrlevels - 1; m++) {
+ if (pdata->bus_scale_table->
+ usecase[pwr->pwrlevels[m].
+ bus_freq].vectors[0].ib
+ == vector->ib)
+ pwr->bus_index[m] = k;
+ }
+ printk("kgsl bus ib [%d] = %llu\n", k, vector->ib);
+ }
+ }
+ }
+
+ for (m = 0; m < pwr->num_pwrlevels - 1; m++)
+ printk("kgsl bus index is %d for pwrlevel %d\n", pwr->bus_index[m], m);
+
return result;
clk_err:
@@ -1179,7 +1262,7 @@
mutex_lock(&device->mutex);
- kgsl_pwrscale_idle(device);
+ kgsl_pwrscale_update(device);
if (device->state == KGSL_STATE_ACTIVE
|| device->state == KGSL_STATE_NAP) {
@@ -1275,12 +1358,25 @@
static int
_nap(struct kgsl_device *device)
{
+ struct kgsl_power_stats stats;
+
switch (device->state) {
case KGSL_STATE_ACTIVE:
if (!device->ftbl->isidle(device)) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
return -EBUSY;
}
+
+ /*
+ * Read HW busy counters before going to NAP state.
+ * The data might be used by power scale governors
+ * independently of the HW activity. For example
+ * the simple-on-demand governor will get the latest
+ * busy_time data even if the gpu isn't active.
+ */
+ device->ftbl->power_stats(device, &stats);
+ device->pwrscale.accum_stats.busy_time += stats.busy_time;
+
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
@@ -1300,7 +1396,7 @@
{
kgsl_pwrctrl_busy_time(device, false);
device->pwrctrl.clk_stats.start = ktime_set(0, 0);
- device->pwrctrl.time = 0;
+
kgsl_pwrscale_sleep(device);
}
@@ -1398,9 +1494,16 @@
}
EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
-/******************************************************************/
-/* Caller must hold the device mutex. */
-int kgsl_pwrctrl_wake(struct kgsl_device *device)
+/**
+ * kgsl_pwrctrl_wake() - Power up the GPU from a slumber/sleep state
+ * @device - Pointer to the kgsl_device struct
+ * @priority - Boolean flag to indicate that the GPU start should be run in the
+ * higher priority thread
+ *
+ * Resume the GPU from a lower power state to ACTIVE. The caller to this
+ * fucntion must host the kgsl_device mutex.
+ */
+int kgsl_pwrctrl_wake(struct kgsl_device *device, int priority)
{
int status = 0;
unsigned int context_id;
@@ -1411,7 +1514,8 @@
kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
switch (device->state) {
case KGSL_STATE_SLUMBER:
- status = device->ftbl->start(device);
+ status = device->ftbl->start(device, priority);
+
if (status) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
KGSL_DRV_ERR(device, "start failed %d\n", status);
@@ -1465,7 +1569,7 @@
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
/* Order pwrrail/clk sequence based upon platform */
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
- kgsl_pwrctrl_pwrlevel_change(device, pwr->default_pwrlevel);
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
}
@@ -1544,7 +1648,7 @@
wait_for_completion(&device->hwaccess_gate);
mutex_lock(&device->mutex);
- ret = kgsl_pwrctrl_wake(device);
+ ret = kgsl_pwrctrl_wake(device, 1);
}
if (ret == 0)
atomic_inc(&device->active_cnt);
@@ -1591,8 +1695,6 @@
BUG_ON(!mutex_is_locked(&device->mutex));
BUG_ON(atomic_read(&device->active_cnt) == 0);
- kgsl_pwrscale_idle(device);
-
if (atomic_dec_and_test(&device->active_cnt)) {
if (device->state == KGSL_STATE_ACTIVE &&
device->requested_state == KGSL_STATE_NONE) {
@@ -1602,6 +1704,8 @@
mod_timer(&device->idle_timer,
jiffies + device->pwrctrl.interval_timeout);
+ } else {
+ kgsl_pwrscale_update(device);
}
trace_kgsl_active_count(device,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 9f18160..6ec809d 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -64,7 +64,9 @@
* @clk_stats - structure of clock statistics
* @pm_qos_req_dma - the power management quality of service structure
* @pm_qos_latency - allowed CPU latency in microseconds
- * @step_mul - multiplier for moving between power levels
+ * @bus_control - true if the bus calculation is independent
+ * @bus_index - default bus index into the bus_ib table
+ * @bus_ib - the set of unique ib requests needed for the bus calculation
*/
struct kgsl_pwrctrl {
@@ -88,12 +90,14 @@
uint32_t pcl;
unsigned int idle_needed;
const char *irq_name;
- s64 time;
+ bool irq_last;
struct kgsl_clk_stats clk_stats;
struct pm_qos_request pm_qos_req_dma;
unsigned int pm_qos_latency;
- unsigned int step_mul;
- unsigned int irq_last;
+ bool bus_control;
+ int bus_mod;
+ unsigned int bus_index[KGSL_MAX_PWRLEVELS];
+ uint64_t bus_ib[KGSL_MAX_PWRLEVELS];
};
void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
@@ -103,9 +107,11 @@
void kgsl_idle_check(struct work_struct *work);
void kgsl_pre_hwaccess(struct kgsl_device *device);
int kgsl_pwrctrl_sleep(struct kgsl_device *device);
-int kgsl_pwrctrl_wake(struct kgsl_device *device);
+int kgsl_pwrctrl_wake(struct kgsl_device *device, int priority);
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int level);
+void kgsl_pwrctrl_buslevel_update(struct kgsl_device *device,
+ bool on);
int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device);
void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device);
void kgsl_pwrctrl_enable(struct kgsl_device *device);
@@ -117,6 +123,18 @@
return (clk != NULL) ? clk_get_rate(clk) : 0;
}
+/*
+ * kgsl_pwrctrl_active_freq - get currently configured frequency
+ * @pwr: kgsl_pwrctrl structure for the device
+ *
+ * Returns the currently configured frequency for the device.
+ */
+static inline unsigned long
+kgsl_pwrctrl_active_freq(struct kgsl_pwrctrl *pwr)
+{
+ return pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq;
+}
+
void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state);
void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 47554c4..52732cf 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -14,364 +14,497 @@
#include <linux/export.h>
#include <linux/kernel.h>
-#include <asm/page.h>
-
#include "kgsl.h"
#include "kgsl_pwrscale.h"
#include "kgsl_device.h"
+#include "kgsl_trace.h"
-struct kgsl_pwrscale_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kgsl_device *device, char *buf);
- ssize_t (*store)(struct kgsl_device *device, const char *buf,
- size_t count);
-};
+#define FAST_BUS 1
+#define SLOW_BUS -1
-#define to_pwrscale(k) container_of(k, struct kgsl_pwrscale, kobj)
-#define pwrscale_to_device(p) container_of(p, struct kgsl_device, pwrscale)
-#define to_device(k) container_of(k, struct kgsl_device, pwrscale_kobj)
-#define to_pwrscale_attr(a) \
-container_of(a, struct kgsl_pwrscale_attribute, attr)
-#define to_policy_attr(a) \
-container_of(a, struct kgsl_pwrscale_policy_attribute, attr)
+static void do_devfreq_suspend(struct work_struct *work);
+static void do_devfreq_resume(struct work_struct *work);
+static void do_devfreq_notify(struct work_struct *work);
-#define PWRSCALE_ATTR(_name, _mode, _show, _store) \
-struct kgsl_pwrscale_attribute pwrscale_attr_##_name = \
-__ATTR(_name, _mode, _show, _store)
-
-/* Master list of available policies */
-
-static struct kgsl_pwrscale_policy *kgsl_pwrscale_policies[] = {
-#ifdef CONFIG_MSM_SCM
- &kgsl_pwrscale_policy_tz,
-#endif
-#ifdef CONFIG_MSM_SLEEP_STATS_DEVICE
- &kgsl_pwrscale_policy_idlestats,
-#endif
- NULL
-};
-
-static ssize_t pwrscale_policy_store(struct kgsl_device *device,
- const char *buf, size_t count)
-{
- int i;
- struct kgsl_pwrscale_policy *policy = NULL;
-
- /* The special keyword none allows the user to detach all
- policies */
- if (!strncmp("none", buf, 4)) {
- kgsl_pwrscale_detach_policy(device);
- return count;
- }
-
- for (i = 0; kgsl_pwrscale_policies[i]; i++) {
- if (!strncmp(kgsl_pwrscale_policies[i]->name, buf,
- strnlen(kgsl_pwrscale_policies[i]->name,
- PAGE_SIZE))) {
- policy = kgsl_pwrscale_policies[i];
- break;
- }
- }
-
- if (policy)
- if (kgsl_pwrscale_attach_policy(device, policy))
- return -EIO;
-
- return count;
-}
-
-static ssize_t pwrscale_policy_show(struct kgsl_device *device, char *buf)
-{
- int ret;
-
- if (device->pwrscale.policy) {
- ret = snprintf(buf, PAGE_SIZE, "%s",
- device->pwrscale.policy->name);
- if (device->pwrscale.enabled == 0)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- " (disabled)");
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
- } else
- ret = snprintf(buf, PAGE_SIZE, "none\n");
-
- return ret;
-}
-
-PWRSCALE_ATTR(policy, 0664, pwrscale_policy_show, pwrscale_policy_store);
-
-static ssize_t pwrscale_avail_policies_show(struct kgsl_device *device,
- char *buf)
-{
- int i, ret = 0;
-
- for (i = 0; kgsl_pwrscale_policies[i]; i++) {
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s ",
- kgsl_pwrscale_policies[i]->name);
- }
-
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "none\n");
- return ret;
-}
-PWRSCALE_ATTR(avail_policies, 0444, pwrscale_avail_policies_show, NULL);
-
-static struct attribute *pwrscale_attrs[] = {
- &pwrscale_attr_policy.attr,
- &pwrscale_attr_avail_policies.attr,
- NULL
-};
-
-static ssize_t policy_sysfs_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
- struct kgsl_device *device = pwrscale_to_device(pwrscale);
- struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
- ssize_t ret;
-
- if (pattr->show)
- ret = pattr->show(device, pwrscale, buf);
- else
- ret = -EIO;
-
- return ret;
-}
-
-static ssize_t policy_sysfs_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf, size_t count)
-{
- struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
- struct kgsl_device *device = pwrscale_to_device(pwrscale);
- struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
- ssize_t ret;
-
- if (pattr->store)
- ret = pattr->store(device, pwrscale, buf, count);
- else
- ret = -EIO;
-
- return ret;
-}
-
-static void policy_sysfs_release(struct kobject *kobj)
-{
-}
-
-static ssize_t pwrscale_sysfs_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct kgsl_device *device = to_device(kobj);
- struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
- ssize_t ret;
-
- if (pattr->show)
- ret = pattr->show(device, buf);
- else
- ret = -EIO;
-
- return ret;
-}
-
-static ssize_t pwrscale_sysfs_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf, size_t count)
-{
- struct kgsl_device *device = to_device(kobj);
- struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
- ssize_t ret;
-
- if (pattr->store)
- ret = pattr->store(device, buf, count);
- else
- ret = -EIO;
-
- return ret;
-}
-
-static void pwrscale_sysfs_release(struct kobject *kobj)
-{
-}
-
-static const struct sysfs_ops policy_sysfs_ops = {
- .show = policy_sysfs_show,
- .store = policy_sysfs_store
-};
-
-static const struct sysfs_ops pwrscale_sysfs_ops = {
- .show = pwrscale_sysfs_show,
- .store = pwrscale_sysfs_store
-};
-
-static struct kobj_type ktype_pwrscale_policy = {
- .sysfs_ops = &policy_sysfs_ops,
- .default_attrs = NULL,
- .release = policy_sysfs_release
-};
-
-static struct kobj_type ktype_pwrscale = {
- .sysfs_ops = &pwrscale_sysfs_ops,
- .default_attrs = pwrscale_attrs,
- .release = pwrscale_sysfs_release
-};
-
-#define PWRSCALE_ACTIVE(_d) \
- ((_d)->pwrscale.policy && (_d)->pwrscale.enabled)
-
+/*
+ * kgsl_pwrscale_sleep - notify governor that device is going off
+ * @device: The device
+ *
+ * Called shortly after all pending work is completed.
+ */
void kgsl_pwrscale_sleep(struct kgsl_device *device)
{
- if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->sleep)
- device->pwrscale.policy->sleep(device, &device->pwrscale);
+ BUG_ON(!mutex_is_locked(&device->mutex));
+ if (!device->pwrscale.enabled)
+ return;
+ device->pwrscale.time = device->pwrscale.on_time = 0;
+
+ /* to call devfreq_suspend_device() from a kernel thread */
+ queue_work(device->pwrscale.devfreq_wq,
+ &device->pwrscale.devfreq_suspend_ws);
}
EXPORT_SYMBOL(kgsl_pwrscale_sleep);
+/*
+ * kgsl_pwrscale_wake - notify governor that device is going on
+ * @device: The device
+ *
+ * Called when the device is returning to an active state.
+ */
void kgsl_pwrscale_wake(struct kgsl_device *device)
{
- if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->wake)
- device->pwrscale.policy->wake(device, &device->pwrscale);
+ struct kgsl_power_stats stats;
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (!device->pwrscale.enabled)
+ return;
+ /* clear old stats before waking */
+ memset(&device->pwrscale.accum_stats, 0,
+ sizeof(device->pwrscale.accum_stats));
+
+ /* and any hw activity from waking up*/
+ device->ftbl->power_stats(device, &stats);
+
+ device->pwrscale.time = ktime_to_us(ktime_get());
+
+ device->pwrscale.next_governor_call = 0;
+
+ /* to call devfreq_resume_device() from a kernel thread */
+ queue_work(device->pwrscale.devfreq_wq,
+ &device->pwrscale.devfreq_resume_ws);
}
EXPORT_SYMBOL(kgsl_pwrscale_wake);
+/*
+ * kgsl_pwrscale_busy - update pwrscale state for new work
+ * @device: The device
+ *
+ * Called when new work is submitted to the device.
+ * This function must be called with the device mutex locked.
+ */
void kgsl_pwrscale_busy(struct kgsl_device *device)
{
- if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->busy)
- device->pwrscale.policy->busy(device,
- &device->pwrscale);
+ BUG_ON(!mutex_is_locked(&device->mutex));
+ if (!device->pwrscale.enabled)
+ return;
+ if (device->pwrscale.on_time == 0)
+ device->pwrscale.on_time = ktime_to_us(ktime_get());
}
EXPORT_SYMBOL(kgsl_pwrscale_busy);
-void kgsl_pwrscale_idle(struct kgsl_device *device)
+/*
+ * kgsl_pwrscale_update - update device busy statistics
+ * @device: The device
+ *
+ * Read hardware busy counters when the device is likely to be
+ * on and accumulate the results between devfreq get_dev_status
+ * calls. This is limits the need to turn on clocks to read these
+ * values for governors that run independently of hardware
+ * activity (for example, by time based polling).
+ */
+void kgsl_pwrscale_update(struct kgsl_device *device)
{
- if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->idle)
- if (device->state == KGSL_STATE_ACTIVE)
- device->pwrscale.policy->idle(device,
- &device->pwrscale);
-}
-EXPORT_SYMBOL(kgsl_pwrscale_idle);
+ struct kgsl_power_stats stats;
+ BUG_ON(!mutex_is_locked(&device->mutex));
+ if (!device->pwrscale.enabled)
+ return;
+
+ if (device->pwrscale.next_governor_call == 0)
+ device->pwrscale.next_governor_call = jiffies;
+
+ if (time_before(jiffies, device->pwrscale.next_governor_call))
+ return;
+
+ device->pwrscale.next_governor_call = jiffies
+ + msecs_to_jiffies(KGSL_GOVERNOR_CALL_INTERVAL);
+
+ if (device->state == KGSL_STATE_ACTIVE) {
+ device->ftbl->power_stats(device, &stats);
+ device->pwrscale.accum_stats.busy_time += stats.busy_time;
+ device->pwrscale.accum_stats.ram_time += stats.ram_time;
+ device->pwrscale.accum_stats.ram_wait += stats.ram_wait;
+ }
+
+ /* to call srcu_notifier_call_chain() from a kernel thread */
+ if (device->requested_state != KGSL_STATE_SLUMBER)
+ queue_work(device->pwrscale.devfreq_wq,
+ &device->pwrscale.devfreq_notify_ws);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_update);
+
+/*
+ * kgsl_pwrscale_disable - temporarily disable the governor
+ * @device: The device
+ *
+ * Temporarily disable the governor, to prevent interference
+ * with profiling tools that expect a fixed clock frequency.
+ * This function must be called with the device mutex locked.
+ */
void kgsl_pwrscale_disable(struct kgsl_device *device)
{
- device->pwrscale.enabled = 0;
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (device->pwrscale.enabled) {
+ queue_work(device->pwrscale.devfreq_wq,
+ &device->pwrscale.devfreq_suspend_ws);
+ device->pwrscale.enabled = false;
+ kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
+ }
}
EXPORT_SYMBOL(kgsl_pwrscale_disable);
+/*
+ * kgsl_pwrscale_enable - re-enable the governor
+ * @device: The device
+ *
+ * Reenable the governor after a kgsl_pwrscale_disable() call.
+ * This function must be called with the device mutex locked.
+ */
void kgsl_pwrscale_enable(struct kgsl_device *device)
{
- device->pwrscale.enabled = 1;
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (!device->pwrscale.enabled) {
+ device->pwrscale.enabled = true;
+ queue_work(device->pwrscale.devfreq_wq,
+ &device->pwrscale.devfreq_resume_ws);
+ }
}
EXPORT_SYMBOL(kgsl_pwrscale_enable);
-int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale,
- struct attribute_group *attr_group)
+/*
+ * kgsl_devfreq_target - devfreq_dev_profile.target callback
+ * @dev: see devfreq.h
+ * @freq: see devfreq.h
+ * @flags: see devfreq.h
+ *
+ * This function expects the device mutex to be unlocked.
+ */
+int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags)
{
- int ret;
+ struct kgsl_device *device = dev_get_drvdata(dev);
+ struct kgsl_pwrctrl *pwr;
+ int level, i, b;
+ unsigned long cur_freq;
- ret = kobject_add(&pwrscale->kobj, &device->pwrscale_kobj,
- "%s", pwrscale->policy->name);
+ if (device == NULL)
+ return -ENODEV;
+ if (freq == NULL)
+ return -EINVAL;
+ if (!device->pwrscale.enabled)
+ return 0;
- if (ret)
- return ret;
+ pwr = &device->pwrctrl;
- ret = sysfs_create_group(&pwrscale->kobj, attr_group);
+ mutex_lock(&device->mutex);
+ cur_freq = kgsl_pwrctrl_active_freq(pwr);
+ level = pwr->active_pwrlevel;
- if (ret) {
- kobject_del(&pwrscale->kobj);
- kobject_put(&pwrscale->kobj);
- }
-
- return ret;
-}
-
-void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale,
- struct attribute_group *attr_group)
-{
- sysfs_remove_group(&pwrscale->kobj, attr_group);
- kobject_del(&pwrscale->kobj);
- kobject_put(&pwrscale->kobj);
-}
-
-static void _kgsl_pwrscale_detach_policy(struct kgsl_device *device)
-{
- if (device->pwrscale.policy != NULL) {
- device->pwrscale.policy->close(device, &device->pwrscale);
-
+ if (*freq != cur_freq) {
+ level = pwr->max_pwrlevel;
+ for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--)
+ if (*freq <= pwr->pwrlevels[i].gpu_freq) {
+ level = i;
+ break;
+ }
+ } else if (flags && pwr->bus_control) {
/*
- * Try to set max pwrlevel which will be limited to thermal by
- * kgsl_pwrctrl_pwrlevel_change if thermal is indeed lower
+ * Signal for faster or slower bus. If KGSL isn't already
+ * running at the desired speed for the given level, modify
+ * its vote.
*/
-
- kgsl_pwrctrl_pwrlevel_change(device,
- device->pwrctrl.max_pwrlevel);
- device->pwrctrl.default_pwrlevel =
- device->pwrctrl.max_pwrlevel;
+ b = pwr->bus_mod;
+ if ((flags & DEVFREQ_FLAG_FAST_HINT) &&
+ (pwr->bus_mod != FAST_BUS))
+ pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ?
+ 0 : FAST_BUS;
+ else if ((flags & DEVFREQ_FLAG_SLOW_HINT) &&
+ (pwr->bus_mod != SLOW_BUS))
+ pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ?
+ 0 : SLOW_BUS;
+ if (pwr->bus_mod != b)
+ kgsl_pwrctrl_buslevel_update(device, true);
}
- device->pwrscale.policy = NULL;
-}
-void kgsl_pwrscale_detach_policy(struct kgsl_device *device)
-{
- mutex_lock(&device->mutex);
- _kgsl_pwrscale_detach_policy(device);
+ kgsl_pwrctrl_pwrlevel_change(device, level);
+ *freq = kgsl_pwrctrl_active_freq(pwr);
+
mutex_unlock(&device->mutex);
+ return 0;
}
-EXPORT_SYMBOL(kgsl_pwrscale_detach_policy);
+EXPORT_SYMBOL(kgsl_devfreq_target);
-int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
- struct kgsl_pwrscale_policy *policy)
+/*
+ * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback
+ * @dev: see devfreq.h
+ * @freq: see devfreq.h
+ * @flags: see devfreq.h
+ *
+ * This function expects the device mutex to be unlocked.
+ */
+int kgsl_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
{
- int ret = 0;
+ struct kgsl_device *device = dev_get_drvdata(dev);
+ struct kgsl_pwrscale *pwrscale;
+ s64 tmp;
+
+ if (device == NULL)
+ return -ENODEV;
+ if (stat == NULL)
+ return -EINVAL;
+
+ pwrscale = &device->pwrscale;
mutex_lock(&device->mutex);
-
- if (device->pwrscale.policy == policy)
- goto done;
-
- if (device->pwrctrl.num_pwrlevels < 3) {
- ret = -EINVAL;
- goto done;
+ /* make sure we don't turn on clocks just to read stats */
+ if (device->state == KGSL_STATE_ACTIVE) {
+ struct kgsl_power_stats extra;
+ device->ftbl->power_stats(device, &extra);
+ device->pwrscale.accum_stats.busy_time += extra.busy_time;
+ device->pwrscale.accum_stats.ram_time += extra.ram_time;
+ device->pwrscale.accum_stats.ram_wait += extra.ram_wait;
}
- if (device->pwrscale.policy != NULL)
- _kgsl_pwrscale_detach_policy(device);
+ tmp = ktime_to_us(ktime_get());
+ stat->total_time = tmp - pwrscale->time;
+ pwrscale->time = tmp;
- device->pwrscale.policy = policy;
+ stat->busy_time = pwrscale->accum_stats.busy_time;
- device->pwrctrl.default_pwrlevel =
- device->pwrctrl.init_pwrlevel;
- /* Pwrscale is enabled by default at attach time */
- kgsl_pwrscale_enable(device);
+ stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl);
- if (policy) {
- ret = device->pwrscale.policy->init(device, &device->pwrscale);
- if (ret)
- device->pwrscale.policy = NULL;
+ if (stat->private_data) {
+ struct xstats *b = (struct xstats *)stat->private_data;
+ b->ram_time = device->pwrscale.accum_stats.ram_time;
+ b->ram_wait = device->pwrscale.accum_stats.ram_wait;
+ b->mod = device->pwrctrl.bus_mod;
}
-done:
+ trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats);
+ memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats));
+
mutex_unlock(&device->mutex);
- return ret;
+ return 0;
}
-EXPORT_SYMBOL(kgsl_pwrscale_attach_policy);
+EXPORT_SYMBOL(kgsl_devfreq_get_dev_status);
-int kgsl_pwrscale_init(struct kgsl_device *device)
+/*
+ * kgsl_devfreq_get_cur_freq - devfreq_dev_profile.get_cur_freq callback
+ * @dev: see devfreq.h
+ * @freq: see devfreq.h
+ * @flags: see devfreq.h
+ *
+ * This function expects the device mutex to be unlocked.
+ */
+int kgsl_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
{
+ struct kgsl_device *device = dev_get_drvdata(dev);
+
+ if (device == NULL)
+ return -ENODEV;
+ if (freq == NULL)
+ return -EINVAL;
+
+ mutex_lock(&device->mutex);
+ *freq = kgsl_pwrctrl_active_freq(&device->pwrctrl);
+ mutex_unlock(&device->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_devfreq_get_cur_freq);
+
+/*
+ * kgsl_devfreq_add_notifier - add a fine grained notifier.
+ * @dev: The device
+ * @nb: Notifier block that will recieve updates.
+ *
+ * Add a notifier to recieve ADRENO_DEVFREQ_NOTIFY_* events
+ * from the device.
+ */
+int kgsl_devfreq_add_notifier(struct device *dev, struct notifier_block *nb)
+{
+ struct kgsl_device *device = dev_get_drvdata(dev);
+
+ if (device == NULL)
+ return -ENODEV;
+
+ if (nb == NULL)
+ return -EINVAL;
+
+ return srcu_notifier_chain_register(&device->pwrscale.nh, nb);
+}
+
+void kgsl_pwrscale_idle(struct kgsl_device *device)
+{
+ BUG_ON(!mutex_is_locked(&device->mutex));
+ queue_work(device->pwrscale.devfreq_wq,
+ &device->pwrscale.devfreq_notify_ws);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_idle);
+
+/*
+ * kgsl_devfreq_del_notifier - remove a fine grained notifier.
+ * @dev: The device
+ * @nb: The notifier block.
+ *
+ * Remove a notifier registered with kgsl_devfreq_add_notifier().
+ */
+int kgsl_devfreq_del_notifier(struct device *dev, struct notifier_block *nb)
+{
+ struct kgsl_device *device = dev_get_drvdata(dev);
+
+ if (device == NULL)
+ return -ENODEV;
+
+ if (nb == NULL)
+ return -EINVAL;
+
+ return srcu_notifier_chain_unregister(&device->pwrscale.nh, nb);
+}
+EXPORT_SYMBOL(kgsl_devfreq_del_notifier);
+
+/*
+ * kgsl_pwrscale_init - Initialize pwrscale.
+ * @dev: The device
+ * @governor: The initial governor to use.
+ *
+ * Initialize devfreq and any non-constant profile data.
+ */
+int kgsl_pwrscale_init(struct device *dev, const char *governor)
+{
+ struct kgsl_device *device;
+ struct kgsl_pwrscale *pwrscale;
+ struct kgsl_pwrctrl *pwr;
+ struct devfreq *devfreq;
+ struct devfreq_dev_profile *profile;
+ struct devfreq_msm_adreno_tz_data *data;
+ int i, out = 0;
int ret;
- ret = kobject_init_and_add(&device->pwrscale_kobj, &ktype_pwrscale,
- &device->dev->kobj, "pwrscale");
+ device = dev_get_drvdata(dev);
+ if (device == NULL)
+ return -ENODEV;
- if (ret)
- return ret;
+ pwrscale = &device->pwrscale;
+ pwr = &device->pwrctrl;
+ profile = &pwrscale->profile;
- kobject_init(&device->pwrscale.kobj, &ktype_pwrscale_policy);
- return ret;
+ srcu_init_notifier_head(&pwrscale->nh);
+
+ profile->initial_freq =
+ pwr->pwrlevels[pwr->default_pwrlevel].gpu_freq;
+ /* Let's start with 10 ms and tune in later */
+ profile->polling_ms = 10;
+
+ /* do not include the 'off' level or duplicate freq. levels */
+ for (i = 0; i < (pwr->num_pwrlevels - 1); i++)
+ pwrscale->freq_table[out++] = pwr->pwrlevels[i].gpu_freq;
+
+ profile->max_state = out;
+ /* link storage array to the devfreq profile pointer */
+ profile->freq_table = pwrscale->freq_table;
+
+ /* if there is only 1 freq, no point in running a governor */
+ if (profile->max_state == 1)
+ governor = "performance";
+
+ /* initialize any governor specific data here */
+ for (i = 0; i < profile->num_governor_data; i++) {
+ if (strncmp("msm-adreno-tz",
+ profile->governor_data[i].name,
+ DEVFREQ_NAME_LEN) == 0) {
+ data = (struct devfreq_msm_adreno_tz_data *)
+ profile->governor_data[i].data;
+ /*
+ * If there is a separate GX power rail, allow
+ * independent modification to its voltage through
+ * the bus bandwidth vote.
+ */
+ if (pwr->bus_control) {
+ out = 0;
+ while (pwr->bus_ib[out]) {
+ pwr->bus_ib[out] =
+ pwr->bus_ib[out] >> 20;
+ out++;
+ }
+ data->bus.num = out;
+ data->bus.ib = &pwr->bus_ib[0];
+ data->bus.index = &pwr->bus_index[0];
+ printk("kgsl: num bus is %d\n", out);
+ } else {
+ data->bus.num = 0;
+ }
+ }
+ }
+
+ devfreq = devfreq_add_device(dev, &pwrscale->profile, governor, NULL);
+ if (IS_ERR(devfreq))
+ return PTR_ERR(devfreq);
+
+ pwrscale->devfreq = devfreq;
+
+ ret = sysfs_create_link(&device->dev->kobj,
+ &devfreq->dev.kobj, "devfreq");
+
+ pwrscale->devfreq_wq = create_freezable_workqueue("kgsl_devfreq_wq");
+ INIT_WORK(&pwrscale->devfreq_suspend_ws, do_devfreq_suspend);
+ INIT_WORK(&pwrscale->devfreq_resume_ws, do_devfreq_resume);
+ INIT_WORK(&pwrscale->devfreq_notify_ws, do_devfreq_notify);
+
+ pwrscale->next_governor_call = 0;
+
+ return 0;
}
EXPORT_SYMBOL(kgsl_pwrscale_init);
+/*
+ * kgsl_pwrscale_close - clean up pwrscale
+ * @device: the device
+ *
+ * This function should be called with the device mutex locked.
+ */
void kgsl_pwrscale_close(struct kgsl_device *device)
{
- kobject_put(&device->pwrscale_kobj);
+ struct kgsl_pwrscale *pwrscale;
+
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ pwrscale = &device->pwrscale;
+ flush_workqueue(pwrscale->devfreq_wq);
+ destroy_workqueue(pwrscale->devfreq_wq);
+ devfreq_remove_device(device->pwrscale.devfreq);
+ device->pwrscale.devfreq = NULL;
+ srcu_cleanup_notifier_head(&device->pwrscale.nh);
}
EXPORT_SYMBOL(kgsl_pwrscale_close);
+
+static void do_devfreq_suspend(struct work_struct *work)
+{
+ struct kgsl_pwrscale *pwrscale = container_of(work,
+ struct kgsl_pwrscale, devfreq_suspend_ws);
+ struct devfreq *devfreq = pwrscale->devfreq;
+
+ devfreq_suspend_device(devfreq);
+}
+
+static void do_devfreq_resume(struct work_struct *work)
+{
+ struct kgsl_pwrscale *pwrscale = container_of(work,
+ struct kgsl_pwrscale, devfreq_resume_ws);
+ struct devfreq *devfreq = pwrscale->devfreq;
+
+ devfreq_resume_device(devfreq);
+}
+
+static void do_devfreq_notify(struct work_struct *work)
+{
+ struct kgsl_pwrscale *pwrscale = container_of(work,
+ struct kgsl_pwrscale, devfreq_notify_ws);
+ struct devfreq *devfreq = pwrscale->devfreq;
+ srcu_notifier_call_chain(&pwrscale->nh,
+ ADRENO_DEVFREQ_NOTIFY_RETIRE,
+ devfreq);
+}
diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h
index f17b394..866964c 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.h
+++ b/drivers/gpu/msm/kgsl_pwrscale.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,68 +14,58 @@
#ifndef __KGSL_PWRSCALE_H
#define __KGSL_PWRSCALE_H
-struct kgsl_pwrscale;
+#include <linux/devfreq.h>
+#include <linux/msm_adreno_devfreq.h>
-struct kgsl_pwrscale_policy {
- const char *name;
- int (*init)(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale);
- void (*close)(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale);
- void (*idle)(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale);
- void (*busy)(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale);
- void (*sleep)(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale);
- void (*wake)(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale);
+/* devfreq governor call window in msec */
+#define KGSL_GOVERNOR_CALL_INTERVAL 5
+
+struct kgsl_power_stats {
+ u64 busy_time;
+ u64 ram_time;
+ u64 ram_wait;
};
struct kgsl_pwrscale {
- struct kgsl_pwrscale_policy *policy;
- struct kobject kobj;
- void *priv;
- int enabled;
+ struct devfreq *devfreq;
+ struct devfreq_dev_profile profile;
+ unsigned int freq_table[KGSL_MAX_PWRLEVELS];
+ char last_governor[DEVFREQ_NAME_LEN];
+ struct kgsl_power_stats accum_stats;
+ bool enabled;
+ s64 time;
+ s64 on_time;
+ struct srcu_notifier_head nh;
+ struct workqueue_struct *devfreq_wq;
+ struct work_struct devfreq_suspend_ws;
+ struct work_struct devfreq_resume_ws;
+ struct work_struct devfreq_notify_ws;
+ unsigned long next_governor_call;
};
-struct kgsl_pwrscale_policy_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale, char *buf);
- ssize_t (*store)(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale, const char *buf,
- size_t count);
-};
-
-#define PWRSCALE_POLICY_ATTR(_name, _mode, _show, _store) \
- struct kgsl_pwrscale_policy_attribute policy_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-
-extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz;
-extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_idlestats;
-extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_msm;
-
-int kgsl_pwrscale_init(struct kgsl_device *device);
+int kgsl_pwrscale_init(struct device *dev, const char *governor);
void kgsl_pwrscale_close(struct kgsl_device *device);
-int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
- struct kgsl_pwrscale_policy *policy);
-void kgsl_pwrscale_detach_policy(struct kgsl_device *device);
-
-void kgsl_pwrscale_idle(struct kgsl_device *device);
+void kgsl_pwrscale_update(struct kgsl_device *device);
void kgsl_pwrscale_busy(struct kgsl_device *device);
+void kgsl_pwrscale_idle(struct kgsl_device *device);
void kgsl_pwrscale_sleep(struct kgsl_device *device);
void kgsl_pwrscale_wake(struct kgsl_device *device);
void kgsl_pwrscale_enable(struct kgsl_device *device);
void kgsl_pwrscale_disable(struct kgsl_device *device);
-int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale,
- struct attribute_group *attr_group);
+int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags);
+int kgsl_devfreq_get_dev_status(struct device *, struct devfreq_dev_status *);
+int kgsl_devfreq_get_cur_freq(struct device *dev, unsigned long *freq);
-void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale,
- struct attribute_group *attr_group);
+#define KGSL_PWRSCALE_INIT(_gov_list, _num_gov) { \
+ .enabled = true, \
+ .profile = { \
+ .target = kgsl_devfreq_target, \
+ .get_dev_status = kgsl_devfreq_get_dev_status, \
+ .get_cur_freq = kgsl_devfreq_get_cur_freq, \
+ .governor_data = (_gov_list), \
+ .num_governor_data = (_num_gov), \
+ } }
#endif
diff --git a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
deleted file mode 100644
index c3188a5..0000000
--- a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/idle_stats_device.h>
-#include <linux/cpufreq.h>
-#include <linux/notifier.h>
-#include <linux/cpumask.h>
-#include <linux/tick.h>
-
-#include "kgsl.h"
-#include "kgsl_pwrscale.h"
-#include "kgsl_device.h"
-
-#define MAX_CORES 4
-struct _cpu_info {
- spinlock_t lock;
- struct notifier_block cpu_nb;
- u64 start[MAX_CORES];
- u64 end[MAX_CORES];
- int curr_freq[MAX_CORES];
- int max_freq[MAX_CORES];
-};
-
-struct idlestats_priv {
- char name[32];
- struct msm_idle_stats_device idledev;
- struct kgsl_device *device;
- struct msm_idle_pulse pulse;
- struct _cpu_info cpu_info;
-};
-
-static int idlestats_cpufreq_notifier(
- struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct _cpu_info *cpu = container_of(nb,
- struct _cpu_info, cpu_nb);
- struct cpufreq_freqs *freq = data;
-
- if (val != CPUFREQ_POSTCHANGE)
- return 0;
-
- spin_lock(&cpu->lock);
- if (freq->cpu < num_possible_cpus())
- cpu->curr_freq[freq->cpu] = freq->new / 1000;
- spin_unlock(&cpu->lock);
-
- return 0;
-}
-
-static void idlestats_get_sample(struct msm_idle_stats_device *idledev,
- struct msm_idle_pulse *pulse)
-{
- struct kgsl_power_stats stats;
- struct idlestats_priv *priv = container_of(idledev,
- struct idlestats_priv, idledev);
- struct kgsl_device *device = priv->device;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-
- mutex_lock(&device->mutex);
- /* If the GPU is asleep, don't wake it up - assume that we
- are idle */
-
- if (device->state == KGSL_STATE_ACTIVE) {
- device->ftbl->power_stats(device, &stats);
- pulse->busy_start_time = pwr->time - stats.busy_time;
- pulse->busy_interval = stats.busy_time;
- } else {
- pulse->busy_start_time = pwr->time;
- pulse->busy_interval = 0;
- }
- pulse->wait_interval = 0;
- mutex_unlock(&device->mutex);
-}
-
-static void idlestats_busy(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- struct idlestats_priv *priv = pwrscale->priv;
- struct kgsl_power_stats stats;
- int i, busy, nr_cpu = 1;
-
- if (priv->pulse.busy_start_time != 0) {
- priv->pulse.wait_interval = 0;
- /* Calculate the total CPU busy time for this GPU pulse */
- for (i = 0; i < num_possible_cpus(); i++) {
- spin_lock(&priv->cpu_info.lock);
- if (cpu_online(i)) {
- priv->cpu_info.end[i] =
- (u64)ktime_to_us(ktime_get()) -
- get_cpu_idle_time_us(i, NULL);
- busy = priv->cpu_info.end[i] -
- priv->cpu_info.start[i];
- /* Normalize the busy time by frequency */
- busy = priv->cpu_info.curr_freq[i] *
- (busy / priv->cpu_info.max_freq[i]);
- priv->pulse.wait_interval += busy;
- nr_cpu++;
- }
- spin_unlock(&priv->cpu_info.lock);
- }
- priv->pulse.wait_interval /= nr_cpu;
-
- /* This is called from within a mutex protected function, so
- no additional locking required */
- device->ftbl->power_stats(device, &stats);
-
- /* If total_time is zero, then we don't have
- any interesting statistics to store */
- if (stats.total_time == 0) {
- priv->pulse.busy_start_time = 0;
- return;
- }
-
- priv->pulse.busy_interval = stats.busy_time;
- msm_idle_stats_idle_end(&priv->idledev, &priv->pulse);
- }
- priv->pulse.busy_start_time = ktime_to_us(ktime_get());
-}
-
-static void idlestats_idle(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- int i, nr_cpu;
- struct idlestats_priv *priv = pwrscale->priv;
-
- nr_cpu = num_possible_cpus();
- for (i = 0; i < nr_cpu; i++)
- if (cpu_online(i))
- priv->cpu_info.start[i] =
- (u64)ktime_to_us(ktime_get()) -
- get_cpu_idle_time_us(i, NULL);
-
- msm_idle_stats_idle_start(&priv->idledev);
-}
-
-static void idlestats_sleep(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- struct idlestats_priv *priv = pwrscale->priv;
- msm_idle_stats_update_event(&priv->idledev,
- MSM_IDLE_STATS_EVENT_IDLE_TIMER_EXPIRED);
-}
-
-static void idlestats_wake(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- /* Use highest perf level on wake-up from
- sleep for better performance */
- kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
-}
-
-static int idlestats_init(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- struct idlestats_priv *priv;
- struct cpufreq_policy cpu_policy;
- int ret, i;
-
- priv = pwrscale->priv = kzalloc(sizeof(struct idlestats_priv),
- GFP_KERNEL);
- if (pwrscale->priv == NULL)
- return -ENOMEM;
-
- snprintf(priv->name, sizeof(priv->name), "idle_stats_%s",
- device->name);
-
- priv->device = device;
-
- priv->idledev.name = (const char *) priv->name;
- priv->idledev.get_sample = idlestats_get_sample;
-
- spin_lock_init(&priv->cpu_info.lock);
- priv->cpu_info.cpu_nb.notifier_call =
- idlestats_cpufreq_notifier;
- ret = cpufreq_register_notifier(&priv->cpu_info.cpu_nb,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (ret)
- goto err;
- for (i = 0; i < num_possible_cpus(); i++) {
- cpufreq_frequency_table_cpuinfo(&cpu_policy,
- cpufreq_frequency_get_table(i));
- priv->cpu_info.max_freq[i] = cpu_policy.max / 1000;
- priv->cpu_info.curr_freq[i] = cpu_policy.max / 1000;
- }
- ret = msm_idle_stats_register_device(&priv->idledev);
-err:
- if (ret) {
- kfree(pwrscale->priv);
- pwrscale->priv = NULL;
- }
-
- return ret;
-}
-
-static void idlestats_close(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- struct idlestats_priv *priv = pwrscale->priv;
-
- if (pwrscale->priv == NULL)
- return;
-
- cpufreq_unregister_notifier(&priv->cpu_info.cpu_nb,
- CPUFREQ_TRANSITION_NOTIFIER);
- msm_idle_stats_deregister_device(&priv->idledev);
-
- kfree(pwrscale->priv);
- pwrscale->priv = NULL;
-}
-
-struct kgsl_pwrscale_policy kgsl_pwrscale_policy_idlestats = {
- .name = "idlestats",
- .init = idlestats_init,
- .idle = idlestats_idle,
- .busy = idlestats_busy,
- .sleep = idlestats_sleep,
- .wake = idlestats_wake,
- .close = idlestats_close
-};
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
deleted file mode 100644
index 7f8a6b1..0000000
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/spinlock.h>
-#include <mach/socinfo.h>
-#include <mach/scm.h>
-
-#include "kgsl.h"
-#include "kgsl_pwrscale.h"
-#include "kgsl_device.h"
-
-#define TZ_GOVERNOR_PERFORMANCE 0
-#define TZ_GOVERNOR_ONDEMAND 1
-
-struct tz_priv {
- int governor;
- struct kgsl_power_stats bin;
- unsigned int idle_dcvs;
-};
-spinlock_t tz_lock;
-
-/* FLOOR is 5msec to capture up to 3 re-draws
- * per frame for 60fps content.
- */
-#define FLOOR 5000
-/* CEILING is 50msec, larger than any standard
- * frame length, but less than the idle timer.
- */
-#define CEILING 50000
-#define TZ_RESET_ID 0x3
-#define TZ_UPDATE_ID 0x4
-#define TZ_INIT_ID 0x6
-
-/* Trap into the TrustZone, and call funcs there. */
-static int __secure_tz_entry2(u32 cmd, u32 val1, u32 val2)
-{
- int ret;
- spin_lock(&tz_lock);
- /* sync memory before sending the commands to tz*/
- __iowmb();
- ret = scm_call_atomic2(SCM_SVC_IO, cmd, val1, val2);
- spin_unlock(&tz_lock);
- return ret;
-}
-
-static int __secure_tz_entry3(u32 cmd, u32 val1, u32 val2,
- u32 val3)
-{
- int ret;
- spin_lock(&tz_lock);
- /* sync memory before sending the commands to tz*/
- __iowmb();
- ret = scm_call_atomic3(SCM_SVC_IO, cmd, val1, val2,
- val3);
- spin_unlock(&tz_lock);
- return ret;
-}
-
-static ssize_t tz_governor_show(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale,
- char *buf)
-{
- struct tz_priv *priv = pwrscale->priv;
- int ret;
-
- if (priv->governor == TZ_GOVERNOR_ONDEMAND)
- ret = snprintf(buf, 10, "ondemand\n");
- else
- ret = snprintf(buf, 13, "performance\n");
-
- return ret;
-}
-
-static ssize_t tz_governor_store(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale,
- const char *buf, size_t count)
-{
- char str[20];
- struct tz_priv *priv = pwrscale->priv;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- int ret;
-
- ret = sscanf(buf, "%20s", str);
- if (ret != 1)
- return -EINVAL;
-
- mutex_lock(&device->mutex);
-
- if (!strncmp(str, "ondemand", 8))
- priv->governor = TZ_GOVERNOR_ONDEMAND;
- else if (!strncmp(str, "performance", 11))
- priv->governor = TZ_GOVERNOR_PERFORMANCE;
-
- if (priv->governor == TZ_GOVERNOR_PERFORMANCE) {
- kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
- pwr->default_pwrlevel = pwr->max_pwrlevel;
- } else {
- pwr->default_pwrlevel = pwr->init_pwrlevel;
- }
-
- mutex_unlock(&device->mutex);
- return count;
-}
-
-PWRSCALE_POLICY_ATTR(governor, 0644, tz_governor_show, tz_governor_store);
-
-static struct attribute *tz_attrs[] = {
- &policy_attr_governor.attr,
- NULL
-};
-
-static struct attribute_group tz_attr_group = {
- .attrs = tz_attrs,
-};
-
-static void tz_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
-{
- struct tz_priv *priv = pwrscale->priv;
- if (device->state != KGSL_STATE_NAP &&
- priv->governor == TZ_GOVERNOR_ONDEMAND)
- kgsl_pwrctrl_pwrlevel_change(device,
- device->pwrctrl.default_pwrlevel);
-}
-
-static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
-{
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- struct tz_priv *priv = pwrscale->priv;
- struct kgsl_power_stats stats;
- int val, idle;
-
- /* In "performance" mode the clock speed always stays
- the same */
- if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
- return;
-
- device->ftbl->power_stats(device, &stats);
- priv->bin.total_time += stats.total_time;
- priv->bin.busy_time += stats.busy_time;
- /* Do not waste CPU cycles running this algorithm if
- * the GPU just started, or if less than FLOOR time
- * has passed since the last run.
- */
- if ((stats.total_time == 0) ||
- (priv->bin.total_time < FLOOR))
- return;
-
- /* If there is an extended block of busy processing, set
- * frequency to turbo. Otherwise run the normal algorithm.
- */
- if (priv->bin.busy_time > CEILING) {
- val = 0;
- kgsl_pwrctrl_pwrlevel_change(device,
- KGSL_PWRLEVEL_TURBO);
- } else if (priv->idle_dcvs) {
- idle = priv->bin.total_time - priv->bin.busy_time;
- idle = (idle > 0) ? idle : 0;
- val = __secure_tz_entry2(TZ_UPDATE_ID, idle, device->id);
- } else {
- if (pwr->step_mul > 1)
- val = __secure_tz_entry3(TZ_UPDATE_ID,
- (pwr->active_pwrlevel + 1)/2,
- priv->bin.total_time, priv->bin.busy_time);
- else
- val = __secure_tz_entry3(TZ_UPDATE_ID,
- pwr->active_pwrlevel,
- priv->bin.total_time, priv->bin.busy_time);
- }
-
- priv->bin.total_time = 0;
- priv->bin.busy_time = 0;
-
- /* If the decision is to move to a lower level, make sure the GPU
- * frequency drops.
- */
- if (val > 0)
- val *= pwr->step_mul;
- if (val)
- kgsl_pwrctrl_pwrlevel_change(device,
- pwr->active_pwrlevel + val);
-}
-
-static void tz_busy(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- device->on_time = ktime_to_us(ktime_get());
-}
-
-static void tz_sleep(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
-{
- struct tz_priv *priv = pwrscale->priv;
-
- __secure_tz_entry2(TZ_RESET_ID, 0, 0);
- priv->bin.total_time = 0;
- priv->bin.busy_time = 0;
-}
-
-#ifdef CONFIG_MSM_SCM
-static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
-{
- int i = 0, j = 1, ret = 0;
- struct tz_priv *priv;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int tz_pwrlevels[KGSL_MAX_PWRLEVELS + 1];
-
- priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL);
- if (pwrscale->priv == NULL)
- return -ENOMEM;
- priv->idle_dcvs = 0;
- priv->governor = TZ_GOVERNOR_ONDEMAND;
- spin_lock_init(&tz_lock);
- kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group);
- for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
- if (i == 0)
- tz_pwrlevels[j] = pwr->pwrlevels[i].gpu_freq;
- else if (pwr->pwrlevels[i].gpu_freq !=
- pwr->pwrlevels[i - 1].gpu_freq) {
- j++;
- tz_pwrlevels[j] = pwr->pwrlevels[i].gpu_freq;
- }
- }
- tz_pwrlevels[0] = j;
- ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
- sizeof(tz_pwrlevels), NULL, 0);
- if (ret) {
- KGSL_DRV_ERR(device, "Fall back to idle based GPU DCVS algo");
- priv->idle_dcvs = 1;
- }
- return 0;
-}
-#else
-static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_MSM_SCM */
-
-static void tz_close(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
-{
- kgsl_pwrscale_policy_remove_files(device, pwrscale, &tz_attr_group);
- kfree(pwrscale->priv);
- pwrscale->priv = NULL;
-}
-
-struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz = {
- .name = "trustzone",
- .init = tz_init,
- .busy = tz_busy,
- .idle = tz_idle,
- .sleep = tz_sleep,
- .wake = tz_wake,
- .close = tz_close
-};
-EXPORT_SYMBOL(kgsl_pwrscale_policy_tz);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 3986c61..505be69 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -141,6 +141,9 @@
static inline void *kgsl_sg_alloc(unsigned int sglen)
{
+ if (sglen >= ULONG_MAX / sizeof(struct scatterlist))
+ return NULL;
+
if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
else
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 5f39b8b..c737cc8 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -796,6 +796,37 @@
)
);
+
+TRACE_EVENT(kgsl_pwrstats,
+ TP_PROTO(struct kgsl_device *device, s64 time,
+ struct kgsl_power_stats *pstats),
+
+ TP_ARGS(device, time, pstats),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(s64, total_time)
+ __field(u64, busy_time)
+ __field(u64, ram_time)
+ __field(u64, ram_wait)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->total_time = time;
+ __entry->busy_time = pstats->busy_time;
+ __entry->ram_time = pstats->ram_time;
+ __entry->ram_wait = pstats->ram_wait;
+ ),
+
+ TP_printk(
+ "d_name=%s total=%lld busy=%lld ram_time=%lld ram_wait=%lld",
+ __get_str(device_name), __entry->total_time, __entry->busy_time,
+ __entry->ram_time, __entry->ram_wait
+ )
+);
+
+
#endif /* _KGSL_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index ae7aee0..270a7a6 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -94,7 +94,7 @@
#define Z180_CMDWINDOW_ADDR_SHIFT 8
static int z180_init(struct kgsl_device *device);
-static int z180_start(struct kgsl_device *device);
+static int z180_start(struct kgsl_device *device, int priority);
static int z180_stop(struct kgsl_device *device);
static int z180_wait(struct kgsl_device *device,
struct kgsl_context *context,
@@ -559,8 +559,7 @@
if (status)
goto error_close_ringbuffer;
- kgsl_pwrscale_init(device);
- kgsl_pwrscale_attach_policy(device, Z180_DEFAULT_PWRSCALE_POLICY);
+ kgsl_pwrscale_init(&pdev->dev, CONFIG_MSM_Z180_DEFAULT_GOVERNOR);
return status;
@@ -595,7 +594,7 @@
return 0;
}
-static int z180_start(struct kgsl_device *device)
+static int z180_start(struct kgsl_device *device, int priority)
{
int status = 0;
@@ -955,18 +954,16 @@
static void z180_power_stats(struct kgsl_device *device,
struct kgsl_power_stats *stats)
{
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct kgsl_pwrscale *pwrscale = &device->pwrscale;
s64 tmp = ktime_to_us(ktime_get());
- if (pwr->time == 0) {
- pwr->time = tmp;
- stats->total_time = 0;
+ memset(stats, 0, sizeof(stats));
+ if (pwrscale->on_time == 0) {
+ pwrscale->on_time = tmp;
stats->busy_time = 0;
} else {
- stats->total_time = tmp - pwr->time;
- pwr->time = tmp;
- stats->busy_time = tmp - device->on_time;
- device->on_time = tmp;
+ stats->busy_time = tmp - pwrscale->on_time;
+ pwrscale->on_time = tmp;
}
}
diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h
index a36e92d..5b54445 100644
--- a/drivers/gpu/msm/z180.h
+++ b/drivers/gpu/msm/z180.h
@@ -26,8 +26,6 @@
#define Z180_DEVICE(device) \
KGSL_CONTAINER_OF(device, struct z180_device, dev)
-#define Z180_DEFAULT_PWRSCALE_POLICY NULL
-
/* Wait a maximum of 10 seconds when trying to idle the core */
#define Z180_IDLE_TIMEOUT (20 * 1000)
diff --git a/drivers/input/misc/cm36283.c b/drivers/input/misc/cm36283.c
index 17127a8..35c92ca 100644
--- a/drivers/input/misc/cm36283.c
+++ b/drivers/input/misc/cm36283.c
@@ -1565,7 +1565,7 @@
__func__, lpi->ls_cmd);
if (pdata->ls_cmd == 0) {
- lpi->ls_cmd = CM36283_ALS_IT_160ms | CM36283_ALS_GAIN_2;
+ lpi->ls_cmd = CM36283_ALS_IT_80ms | CM36283_ALS_GAIN_2;
}
lp_info = lpi;
@@ -1582,17 +1582,17 @@
mutex_init(&ps_get_adc_mutex);
- //SET LUX STEP FACTOR HERE
- // if adc raw value one step = 5/100 = 1/20 = 0.05 lux
- // the following will set the factor 0.05 = 1/20
- // and lpi->golden_adc = 1;
- // set als_kadc = (ALS_CALIBRATED <<16) | 20;
+ /*
+ * SET LUX STEP FACTOR HERE
+ * if adc raw value one step = 5/100 = 1/20 = 0.05 lux
+ * the following will set the factor 0.05 = 1/20
+ * and lpi->golden_adc = 1;
+ * set als_kadc = (ALS_CALIBRATED << 16) | 20;
+ */
- als_kadc = (ALS_CALIBRATED <<16) | 20;
- lpi->golden_adc = 1;
-
- //ls calibrate always set to 1
- lpi->ls_calibrate = 1;
+ als_kadc = (ALS_CALIBRATED << 16) | 10;
+ lpi->golden_adc = 100;
+ lpi->ls_calibrate = 0;
lightsensor_set_kvalue(lpi);
ret = lightsensor_update_table(lpi);
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index f879d78..f5d8441 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/sensors.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -79,6 +80,21 @@
* The following table lists the maximum appropriate poll interval for each
* available output data rate.
*/
+
+static struct sensors_classdev sensors_cdev = {
+ .name = "kxtj9-accel",
+ .vendor = "Kionix",
+ .version = 1,
+ .handle = 0,
+ .type = 1,
+ .max_range = "19.6",
+ .resolution = "0.01",
+ .sensor_power = "0.2",
+ .min_delay = 2000,
+ .fifo_reserved_event_count = 0,
+ .fifo_max_event_count = 0,
+};
+
static const struct {
unsigned int cutoff;
u8 mask;
@@ -415,19 +431,16 @@
}
}
- tj9->enable = true;
return 0;
fail:
kxtj9_device_power_off(tj9);
- tj9->enable = false;
return err;
}
static void kxtj9_disable(struct kxtj9_data *tj9)
{
kxtj9_device_power_off(tj9);
- tj9->enable = false;
}
@@ -496,18 +509,21 @@
if (error)
return error;
mutex_lock(&input_dev->mutex);
- disable_irq(client->irq);
- if (data == 0)
+ if (data == 0) {
+ disable_irq(client->irq);
kxtj9_disable(tj9);
- else if (data == 1)
- kxtj9_enable(tj9);
- else {
+ tj9->enable = false;
+ } else if (data == 1) {
+ if (!kxtj9_enable(tj9)) {
+ enable_irq(client->irq);
+ tj9->enable = true;
+ }
+ } else {
dev_err(&tj9->client->dev,
"Invalid value of input, input=%ld\n", data);
}
- enable_irq(client->irq);
mutex_unlock(&input_dev->mutex);
return count;
@@ -555,7 +571,8 @@
/* Lock the device to prevent races with open/close (and itself) */
mutex_lock(&input_dev->mutex);
- disable_irq(client->irq);
+ if (tj9->enable)
+ disable_irq(client->irq);
/*
* Set current interval to the greater of the minimum interval or
@@ -563,9 +580,10 @@
*/
tj9->last_poll_interval = max(interval, tj9->pdata.min_interval);
- kxtj9_update_odr(tj9, tj9->last_poll_interval);
-
- enable_irq(client->irq);
+ if (tj9->enable) {
+ kxtj9_update_odr(tj9, tj9->last_poll_interval);
+ enable_irq(client->irq);
+ }
mutex_unlock(&input_dev->mutex);
return count;
@@ -840,6 +858,12 @@
tj9->ctrl_reg1 = tj9->pdata.res_ctl | tj9->pdata.g_range;
tj9->last_poll_interval = tj9->pdata.init_interval;
+ err = sensors_classdev_register(&client->dev, &sensors_cdev);
+ if (err) {
+ dev_err(&client->dev, "class device create failed: %d\n", err);
+ goto err_power_off;
+ }
+
if (client->irq) {
/* If in irq mode, populate INT_CTRL_REG1 and enable DRDY. */
tj9->int_ctrl |= KXTJ9_IEN | KXTJ9_IEA | KXTJ9_IEL;
@@ -857,6 +881,8 @@
goto err_destroy_input;
}
+ disable_irq(tj9->client->irq);
+
err = sysfs_create_group(&client->dev.kobj, &kxtj9_attribute_group);
if (err) {
dev_err(&client->dev, "sysfs create failed: %d\n", err);
@@ -923,7 +949,7 @@
mutex_lock(&input_dev->mutex);
- if (input_dev->users)
+ if (input_dev->users && tj9->enable)
kxtj9_disable(tj9);
mutex_unlock(&input_dev->mutex);
@@ -939,7 +965,7 @@
mutex_lock(&input_dev->mutex);
- if (input_dev->users)
+ if (input_dev->users && tj9->enable)
kxtj9_enable(tj9);
mutex_unlock(&input_dev->mutex);
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index a1cac54..937fb8c 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -899,7 +899,9 @@
} else {
int i;
+ spin_lock(&dmxdev->dvr_in_lock);
dmxdev->dvr_in_exit = 1;
+ spin_unlock(&dmxdev->dvr_in_lock);
wake_up_all(&dmxdev->dvr_cmd_buffer.queue);
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
index d6fa2b0..8f080ce 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
@@ -28,6 +28,19 @@
#include "msm_jpeg_common.h"
#include "msm_jpeg_hw.h"
+int msm_jpeg_platform_set_clk_rate(struct msm_jpeg_device *pgmn_dev,
+ long clk_rate)
+{
+ struct msm_cam_clk_info jpeg_core_clk_info[] = {
+ {"core_clk", JPEG_CLK_RATE, 0}
+ };
+
+ jpeg_core_clk_info[0].clk_rate = clk_rate;
+
+ return msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_core_clk_info,
+ pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_core_clk_info), 1);
+}
+
void msm_jpeg_platform_p2v(struct msm_jpeg_device *pgmn_dev, struct file *file,
struct ion_handle **ionhandle, int domain_num)
{
@@ -135,8 +148,8 @@
{
.src = MSM_BUS_MASTER_JPEG,
.dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = JPEG_CLK_RATE * 2.5,
- .ib = JPEG_CLK_RATE * 2.5,
+ .ab = JPEG_MAX_CLK_RATE * 2.5,
+ .ib = JPEG_MAX_CLK_RATE * 2.5,
},
};
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.h b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.h
index a14b8ee..7be9e19 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.h
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.h
@@ -20,7 +20,10 @@
#include <mach/iommu.h>
#include "msm_jpeg_sync.h"
#define JPEG_CLK_RATE 266670000
+#define JPEG_MAX_CLK_RATE 320000000
+int msm_jpeg_platform_set_clk_rate(struct msm_jpeg_device *pgmn_dev,
+ long clk_rate);
void msm_jpeg_platform_p2v(struct msm_jpeg_device *pgmn_dev, struct file *file,
struct ion_handle **ionhandle, int domain_num);
uint32_t msm_jpeg_platform_v2p(struct msm_jpeg_device *pgmn_dev, int fd,
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.c
index 80ff9e5..f7241dd 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.c
@@ -804,6 +804,36 @@
return 0;
}
+int msm_jpeg_ioctl_set_clk_rate(struct msm_jpeg_device *pgmn_dev,
+ unsigned long arg)
+{
+ long clk_rate;
+ int rc;
+
+ if ((pgmn_dev->state != MSM_JPEG_INIT) &&
+ (pgmn_dev->state != MSM_JPEG_RESET)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (get_user(clk_rate, (long __user *)arg)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ JPEG_DBG("%s:%d] Requested clk rate %ld\n", __func__, __LINE__,
+ clk_rate);
+ if (clk_rate < 0) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ rc = msm_jpeg_platform_set_clk_rate(pgmn_dev, clk_rate);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: clk failed rc = %d\n", __func__, rc);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
long __msm_jpeg_ioctl(struct msm_jpeg_device *pgmn_dev,
unsigned int cmd, unsigned long arg)
{
@@ -873,6 +903,9 @@
rc = msm_jpeg_ioctl_test_dump_region(pgmn_dev, arg);
break;
+ case MSM_JPEG_IOCTL_SET_CLK_RATE:
+ rc = msm_jpeg_ioctl_set_clk_rate(pgmn_dev, arg);
+ break;
default:
JPEG_PR_ERR(KERN_INFO "%s:%d] cmd = %d not supported\n",
__func__, __LINE__, _IOC_NR(cmd));
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 2124b13..63973b4 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -1285,8 +1285,8 @@
in_phyaddr = msm_cpp_fetch_buffer_info(cpp_dev,
&new_frame->input_buffer_info,
- ((new_frame->identity >> 16) & 0xFFFF),
- (new_frame->identity & 0xFFFF), &in_fd);
+ ((new_frame->input_buffer_info.identity >> 16) & 0xFFFF),
+ (new_frame->input_buffer_info.identity & 0xFFFF), &in_fd);
if (!in_phyaddr) {
pr_err("error gettting input physical address\n");
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
index 0083378..03145c8 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
@@ -1797,6 +1797,8 @@
(struct msm_sensor_ctrl_t *)data;
struct msm_camera_cci_client *cci_client = NULL;
uint32_t session_id;
+ unsigned long mount_pos;
+
s_ctrl->pdev = pdev;
s_ctrl->dev = &pdev->dev;
CDBG("%s called data %p\n", __func__, data);
@@ -1862,6 +1864,11 @@
s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR;
s_ctrl->msm_sd.sd.entity.name =
s_ctrl->msm_sd.sd.name;
+ mount_pos = s_ctrl->sensordata->sensor_init_params->position;
+ mount_pos = mount_pos << 8;
+ mount_pos = mount_pos |
+ (s_ctrl->sensordata->sensor_init_params->sensor_mount_angle / 90);
+ s_ctrl->msm_sd.sd.entity.flags = mount_pos;
rc = camera_init_v4l2(&s_ctrl->pdev->dev, &session_id);
CDBG("%s rc %d session_id %d\n", __func__, rc, session_id);
@@ -1880,6 +1887,8 @@
{
int rc = 0;
uint32_t session_id;
+ unsigned long mount_pos;
+
CDBG("%s %s_i2c_probe called\n", __func__, client->name);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
pr_err("%s %s i2c_check_functionality failed\n",
@@ -1976,6 +1985,12 @@
s_ctrl->msm_sd.sd.entity.name =
s_ctrl->msm_sd.sd.name;
+ mount_pos = s_ctrl->sensordata->sensor_init_params->position;
+ mount_pos = mount_pos << 8;
+ mount_pos = mount_pos |
+ (s_ctrl->sensordata->sensor_init_params->sensor_mount_angle / 90);
+ s_ctrl->msm_sd.sd.entity.flags = mount_pos;
+
rc = camera_init_v4l2(&s_ctrl->sensor_i2c_client->client->dev,
&session_id);
CDBG("%s rc %d session_id %d\n", __func__, rc, session_id);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index ae94287..2fb3c35 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -553,6 +553,7 @@
pkt->alloc_len = output_frame->alloc_len;
pkt->filled_len = output_frame->filled_len;
pkt->offset = output_frame->offset;
+ pkt->rgData[0] = output_frame->extradata_size;
dprintk(VIDC_DBG, "### Q OUTPUT BUFFER ###: %d, %d, %d\n",
pkt->alloc_len, pkt->filled_len, pkt->offset);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 42460fa..e127dc1 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -765,6 +765,7 @@
mutex_lock(&core->lock);
core->state = VIDC_CORE_INVALID;
mutex_unlock(&core->lock);
+ mutex_lock(&core->sync_lock);
list_for_each_entry(inst, &core->instances,
list) {
mutex_lock(&inst->lock);
@@ -786,6 +787,7 @@
msm_vidc_queue_v4l2_event(inst,
V4L2_EVENT_MSM_VIDC_SYS_ERROR);
}
+ mutex_unlock(&core->sync_lock);
} else {
dprintk(VIDC_ERR,
"Got SYS_ERR but unable to identify core");
@@ -813,6 +815,7 @@
mutex_lock(&core->lock);
core->state = VIDC_CORE_INVALID;
mutex_unlock(&core->lock);
+ mutex_lock(&core->sync_lock);
list_for_each_entry(inst, &core->instances, list) {
if (inst) {
msm_vidc_queue_v4l2_event(inst,
@@ -834,6 +837,7 @@
mutex_unlock(&inst->lock);
}
}
+ mutex_unlock(&core->sync_lock);
}
static void handle_session_close(enum command_response cmd, void *data)
@@ -2448,9 +2452,12 @@
extra_idx =
EXTRADATA_IDX(inst->fmts[CAPTURE_PORT]->num_planes);
if (extra_idx && (extra_idx < VIDEO_MAX_PLANES) &&
- vb->v4l2_planes[extra_idx].m.userptr)
+ vb->v4l2_planes[extra_idx].m.userptr) {
frame_data.extradata_addr =
vb->v4l2_planes[extra_idx].m.userptr;
+ frame_data.extradata_size =
+ vb->v4l2_planes[extra_idx].length;
+ }
dprintk(VIDC_DBG,
"Sending ftb to hal: Alloc: %d :filled: %d",
frame_data.alloc_len, frame_data.filled_len);
@@ -3158,10 +3165,12 @@
int num_mbs_per_sec = 0;
if (inst->state == MSM_VIDC_OPEN_DONE) {
+ mutex_lock(&inst->core->sync_lock);
num_mbs_per_sec = msm_comm_get_load(inst->core,
MSM_VIDC_DECODER);
num_mbs_per_sec += msm_comm_get_load(inst->core,
MSM_VIDC_ENCODER);
+ mutex_unlock(&inst->core->sync_lock);
if (num_mbs_per_sec > inst->core->resources.max_load) {
dprintk(VIDC_ERR,
"H/w is overloaded. needed: %d max: %d\n",
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index cc07806..ee83eee 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -862,6 +862,7 @@
u32 mark_target;
u32 mark_data;
u32 clnt_data;
+ u32 extradata_size;
};
struct vidc_seq_hdr {
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index a30607c..0bc18fb 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -91,7 +91,6 @@
static struct class *driver_class;
static dev_t qseecom_device_no;
-static struct cdev qseecom_cdev;
static DEFINE_MUTEX(qsee_bw_mutex);
static DEFINE_MUTEX(app_access_lock);
@@ -162,6 +161,7 @@
uint32_t qsee_perf_client;
struct qseecom_clk qsee;
struct qseecom_clk ce_drv;
+ struct cdev cdev;
};
struct qseecom_client_handle {
@@ -251,6 +251,12 @@
break;
}
spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+ if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
+ pr_err("Service id: %u is not found\n", listener_id);
+ return NULL;
+ }
+
return entry;
}
@@ -494,6 +500,11 @@
}
spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
flags);
+
+ if (ptr_svc == NULL) {
+ pr_err("Listener Svc %d does not exist\n", lstnr);
+ return -EINVAL;
+ }
if (ptr_svc->svc.listener_id != lstnr) {
pr_warning("Service requested for does on exist\n");
return -ERESTARTSYS;
@@ -855,14 +866,42 @@
struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
{
int ret = 0;
+ void *req_buf = NULL;
+
if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
pr_err("Error with pointer: req_ptr = %p, send_svc_ptr = %p\n",
req_ptr, send_svc_ireq_ptr);
return -EINVAL;
}
+
+ if ((!req_ptr->cmd_req_buf) || (!req_ptr->resp_buf)) {
+ pr_err("Invalid req/resp buffer, exiting\n");
+ return -EINVAL;
+ }
+
+ if (((uint32_t)req_ptr->cmd_req_buf <
+ data_ptr->client.user_virt_sb_base)
+ || ((uint32_t)req_ptr->cmd_req_buf >=
+ (data_ptr->client.user_virt_sb_base +
+ data_ptr->client.sb_length))) {
+ pr_err("cmd buffer address not within shared bufffer\n");
+ return -EINVAL;
+ }
+
+
+ if (((uint32_t)req_ptr->resp_buf < data_ptr->client.user_virt_sb_base)
+ || ((uint32_t)req_ptr->resp_buf >=
+ (data_ptr->client.user_virt_sb_base +
+ data_ptr->client.sb_length))){
+ pr_err("response buffer address not within shared bufffer\n");
+ return -EINVAL;
+ }
+
+ req_buf = data_ptr->client.sb_virt;
+
send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
send_svc_ireq_ptr->key_type =
- ((struct qseecom_rpmb_provision_key *)req_ptr->cmd_req_buf)->key_type;
+ ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
send_svc_ireq_ptr->rsp_ptr = (void *)(__qseecom_uvirt_to_kphys(data_ptr,
(uint32_t)req_ptr->resp_buf));
@@ -1063,8 +1102,6 @@
if (ret)
return ret;
- pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%p\n",
- req.resp_len, req.resp_buf);
return ret;
}
@@ -1254,8 +1291,7 @@
ret = __qseecom_update_cmd_buf(&req, true, data, false);
if (ret)
return ret;
- pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%p\n",
- req.resp_len, req.resp_buf);
+
return ret;
}
@@ -1273,6 +1309,11 @@
struct qseecom_registered_listener_list *this_lstnr;
this_lstnr = __qseecom_find_svc(data->listener.id);
+ if (!this_lstnr) {
+ pr_err("Invalid listener ID\n");
+ return -ENODATA;
+ }
+
while (1) {
if (wait_event_freezable(this_lstnr->rcv_req_wq,
__qseecom_listener_has_rcvd_req(data,
@@ -3330,7 +3371,7 @@
if (IS_ERR(driver_class)) {
rc = -ENOMEM;
pr_err("class_create failed %d\n", rc);
- goto unregister_chrdev_region;
+ goto exit_unreg_chrdev_region;
}
class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
@@ -3338,16 +3379,16 @@
if (!class_dev) {
pr_err("class_device_create failed %d\n", rc);
rc = -ENOMEM;
- goto class_destroy;
+ goto exit_destroy_class;
}
- cdev_init(&qseecom_cdev, &qseecom_fops);
- qseecom_cdev.owner = THIS_MODULE;
+ cdev_init(&qseecom.cdev, &qseecom_fops);
+ qseecom.cdev.owner = THIS_MODULE;
- rc = cdev_add(&qseecom_cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
+ rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
if (rc < 0) {
pr_err("cdev_add failed %d\n", rc);
- goto err;
+ goto exit_destroy_device;
}
INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
@@ -3363,7 +3404,7 @@
&qsee_not_legacy, sizeof(qsee_not_legacy));
if (rc) {
pr_err("Failed to retrieve QSEOS version information %d\n", rc);
- goto err;
+ goto exit_del_cdev;
}
if (qsee_not_legacy) {
uint32_t feature = 10;
@@ -3373,14 +3414,14 @@
&qseecom.qsee_version, sizeof(qseecom.qsee_version));
if (rc) {
pr_err("Failed to get QSEE version info %d\n", rc);
- goto err;
+ goto exit_del_cdev;
}
qseecom.qseos_version = QSEOS_VERSION_14;
} else {
pr_err("QSEE legacy version is not supported:");
pr_err("Support for TZ1.3 and earlier is deprecated\n");
rc = -EINVAL;
- goto err;
+ goto exit_del_cdev;
}
qseecom.commonlib_loaded = false;
qseecom.pdev = class_dev;
@@ -3389,7 +3430,7 @@
if (qseecom.ion_clnt == NULL) {
pr_err("Ion client cannot be created\n");
rc = -ENOMEM;
- goto err;
+ goto exit_del_cdev;
}
/* register client for bus scaling */
@@ -3401,7 +3442,7 @@
pr_err("Fail to get disk-encrypt pipe pair information.\n");
qseecom.ce_info.disk_encrypt_pipe = 0xff;
rc = -EINVAL;
- goto err;
+ goto exit_destroy_ion_client;
} else {
pr_warn("bam_pipe_pair=0x%x",
qseecom.ce_info.disk_encrypt_pipe);
@@ -3413,7 +3454,7 @@
pr_err("Fail to get qsee ce hw instance information.\n");
qseecom.ce_info.qsee_ce_hw_instance = 0xff;
rc = -EINVAL;
- goto err;
+ goto exit_destroy_ion_client;
} else {
pr_warn("qsee-ce-hw-instance=0x%x",
qseecom.ce_info.qsee_ce_hw_instance);
@@ -3425,7 +3466,7 @@
pr_err("Fail to get hlos ce hw instance information.\n");
qseecom.ce_info.hlos_ce_hw_instance = 0xff;
rc = -EINVAL;
- goto err;
+ goto exit_destroy_ion_client;
} else {
pr_warn("hlos-ce-hw-instance=0x%x",
qseecom.ce_info.hlos_ce_hw_instance);
@@ -3436,13 +3477,13 @@
ret = __qseecom_init_clk(CLK_QSEE);
if (ret)
- goto err;
+ goto exit_destroy_ion_client;
if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
ret = __qseecom_init_clk(CLK_CE_DRV);
if (ret) {
__qseecom_deinit_clk(CLK_QSEE);
- goto err;
+ goto exit_destroy_ion_client;
}
} else {
struct qseecom_clk *qclk;
@@ -3472,7 +3513,7 @@
} else {
pr_err("Fail to get secure app region info\n");
rc = -EINVAL;
- goto err;
+ goto exit_destroy_ion_client;
}
rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(req),
&resp, sizeof(resp));
@@ -3480,7 +3521,7 @@
pr_err("send secapp reg fail %d resp.res %d\n",
rc, resp.result);
rc = -EINVAL;
- goto err;
+ goto exit_destroy_ion_client;
}
}
} else {
@@ -3494,11 +3535,16 @@
if (!qseecom.qsee_perf_client)
pr_err("Unable to register bus client\n");
return 0;
-err:
+
+exit_destroy_ion_client:
+ ion_client_destroy(qseecom.ion_clnt);
+exit_del_cdev:
+ cdev_del(&qseecom.cdev);
+exit_destroy_device:
device_destroy(driver_class, qseecom_device_no);
-class_destroy:
+exit_destroy_class:
class_destroy(driver_class);
-unregister_chrdev_region:
+exit_unreg_chrdev_region:
unregister_chrdev_region(qseecom_device_no, 1);
return rc;
}
@@ -3509,69 +3555,64 @@
unsigned long flags = 0;
int ret = 0;
- if (pdev->dev.platform_data != NULL)
- msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
-
spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
- kclient = list_entry((&qseecom.registered_kclient_list_head)->next,
- struct qseecom_registered_kclient_list, list);
- if (list_empty(&kclient->list)) {
- spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock,
- flags);
- return 0;
- }
+
list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
- list) {
- if (kclient)
- list_del(&kclient->list);
- break;
- }
- spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+ list) {
+ if (!kclient)
+ goto exit_irqrestore;
+ /* Break the loop if client handle is NULL */
+ if (!kclient->handle)
+ goto exit_free_kclient;
- while (kclient->handle != NULL) {
+ if (list_empty(&kclient->list))
+ goto exit_free_kc_handle;
+
+ list_del(&kclient->list);
ret = qseecom_unload_app(kclient->handle->dev);
- if (ret == 0) {
+ if (!ret) {
kzfree(kclient->handle->dev);
kzfree(kclient->handle);
kzfree(kclient);
}
- spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
- kclient = list_entry(
- (&qseecom.registered_kclient_list_head)->next,
- struct qseecom_registered_kclient_list, list);
- if (list_empty(&kclient->list)) {
- spin_unlock_irqrestore(
- &qseecom.registered_kclient_list_lock, flags);
- return 0;
- }
- list_for_each_entry(kclient,
- &qseecom.registered_kclient_list_head, list) {
- if (kclient)
- list_del(&kclient->list);
- break;
- }
- spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock,
- flags);
- if (!kclient) {
- ret = 0;
- break;
- }
}
- if (qseecom.qseos_version > QSEEE_VERSION_00)
+
+exit_free_kc_handle:
+ kzfree(kclient->handle);
+exit_free_kclient:
+ kzfree(kclient);
+exit_irqrestore:
+ spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+ if (qseecom.qseos_version > QSEEE_VERSION_00)
qseecom_unload_commonlib_image();
if (qseecom.qsee_perf_client)
msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
0);
+ if (pdev->dev.platform_data != NULL)
+ msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
+
/* register client for bus scaling */
if (pdev->dev.of_node) {
__qseecom_deinit_clk(CLK_QSEE);
if (qseecom.qsee.instance != qseecom.ce_drv.instance)
__qseecom_deinit_clk(CLK_CE_DRV);
}
+
+ ion_client_destroy(qseecom.ion_clnt);
+
+ cdev_del(&qseecom.cdev);
+
+ device_destroy(driver_class, qseecom_device_no);
+
+ class_destroy(driver_class);
+
+ unregister_chrdev_region(qseecom_device_no, 1);
+
return ret;
-};
+}
static struct of_device_id qseecom_match[] = {
{
@@ -3597,10 +3638,7 @@
static void __devexit qseecom_exit(void)
{
- device_destroy(driver_class, qseecom_device_no);
- class_destroy(driver_class);
- unregister_chrdev_region(qseecom_device_no, 1);
- ion_client_destroy(qseecom.ion_clnt);
+ platform_driver_unregister(&qseecom_plat_driver);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index 36bdf45..3d69473 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -1821,7 +1821,7 @@
}
if (filter->priority >= TSPP_NUM_PRIORITIES) {
- pr_err("tspp invalid source");
+ pr_err("tspp invalid filter priority");
return -ENOSR;
}
@@ -1950,6 +1950,10 @@
pr_err("tspp_remove: can't find device %i", dev);
return -ENODEV;
}
+ if (filter->priority >= TSPP_NUM_PRIORITIES) {
+ pr_err("tspp invalid filter priority");
+ return -ENOSR;
+ }
channel = &pdev->channels[channel_id];
src = channel->src;
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 10dea37..bd4ecf3 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -520,6 +520,7 @@
*/
regd = ath_world_regdomain(reg);
wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy->country_ie_pref = NL80211_COUNTRY_IE_FOLLOW_POWER;
} else {
/*
* This gets applied in the case of the absence of CRDA,
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 9727787..aa4e016 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -1876,7 +1876,7 @@
if (batt_terminal_uv >= chip->max_voltage_uv - VDD_MAX_ERR) {
chip->soc_at_cv = soc;
chip->prev_chg_soc = soc;
- chip->ibat_at_cv_ua = ibat_ua;
+ chip->ibat_at_cv_ua = params->iavg_ua;
pr_debug("CC_TO_CV ibat_ua = %d CHG SOC %d\n",
ibat_ua, soc);
} else {
@@ -1927,7 +1927,7 @@
soc_ibat = bound_soc(linear_interpolate(chip->soc_at_cv,
chip->ibat_at_cv_ua,
100, -1 * chip->chg_term_ua,
- ibat_ua));
+ params->iavg_ua));
weight_ibat = bound_soc(linear_interpolate(1, chip->soc_at_cv,
100, 100, chip->prev_chg_soc));
weight_cc = 100 - weight_ibat;
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index a627ec2..411aebc 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -335,6 +335,7 @@
unsigned int cold_batt_p;
int warm_bat_decidegc;
int cool_bat_decidegc;
+ int fake_battery_soc;
unsigned int safe_current;
unsigned int revision;
unsigned int type;
@@ -1659,6 +1660,7 @@
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
case POWER_SUPPLY_PROP_COOL_TEMP:
case POWER_SUPPLY_PROP_WARM_TEMP:
+ case POWER_SUPPLY_PROP_CAPACITY:
return 1;
default:
break;
@@ -2013,6 +2015,9 @@
union power_supply_propval ret = {0,};
int battery_status, bms_status, soc, charger_in;
+ if (chip->fake_battery_soc >= 0)
+ return chip->fake_battery_soc;
+
if (chip->use_default_batt_values || !get_prop_batt_present(chip))
return DEFAULT_CAPACITY;
@@ -3354,6 +3359,10 @@
case POWER_SUPPLY_PROP_WARM_TEMP:
rc = qpnp_chg_configure_jeita(chip, psp, val->intval);
break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ chip->fake_battery_soc = val->intval;
+ power_supply_changed(&chip->batt_psy);
+ break;
case POWER_SUPPLY_PROP_CHARGING_ENABLED:
chip->charging_disabled = !(val->intval);
if (chip->charging_disabled) {
@@ -4172,6 +4181,7 @@
}
chip->prev_usb_max_ma = -EINVAL;
+ chip->fake_battery_soc = -EINVAL;
chip->dev = &(spmi->dev);
chip->spmi = spmi;
diff --git a/drivers/sensors/Kconfig b/drivers/sensors/Kconfig
new file mode 100644
index 0000000..2d81924
--- /dev/null
+++ b/drivers/sensors/Kconfig
@@ -0,0 +1,5 @@
+config SENSORS
+ bool "Sensors Class Support"
+ help
+ This option enables the sensor sysfs class in /sys/class/sensors.
+ You'll need this to do anything useful with sensorss. If unsure, say N.
diff --git a/drivers/sensors/Makefile b/drivers/sensors/Makefile
new file mode 100644
index 0000000..3a2a848
--- /dev/null
+++ b/drivers/sensors/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SENSORS) += sensors_class.o
diff --git a/drivers/sensors/sensors_class.c b/drivers/sensors/sensors_class.c
new file mode 100644
index 0000000..71d8089
--- /dev/null
+++ b/drivers/sensors/sensors_class.c
@@ -0,0 +1,175 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/rwsem.h>
+#include <linux/sensors.h>
+
+static struct class *sensors_class;
+
+DECLARE_RWSEM(sensors_list_lock);
+LIST_HEAD(sensors_list);
+
+static ssize_t sensors_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n", sensors_cdev->name);
+}
+
+static ssize_t sensors_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n", sensors_cdev->vendor);
+}
+
+static ssize_t sensors_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", sensors_cdev->version);
+}
+
+static ssize_t sensors_handle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", sensors_cdev->handle);
+}
+
+static ssize_t sensors_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", sensors_cdev->type);
+}
+
+static ssize_t sensors_max_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n", sensors_cdev->max_range);
+}
+
+static ssize_t sensors_resolution_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n", sensors_cdev->resolution);
+}
+
+static ssize_t sensors_power_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n", sensors_cdev->sensor_power);
+}
+
+static ssize_t sensors_min_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", sensors_cdev->min_delay);
+}
+
+static ssize_t sensors_fifo_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ sensors_cdev->fifo_reserved_event_count);
+}
+
+static ssize_t sensors_fifo_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensors_classdev *sensors_cdev = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ sensors_cdev->fifo_max_event_count);
+}
+
+static struct device_attribute sensors_class_attrs[] = {
+ __ATTR(name, 0644, sensors_name_show, NULL),
+ __ATTR(vendor, 0644, sensors_vendor_show, NULL),
+ __ATTR(version, 0644, sensors_version_show, NULL),
+ __ATTR(handle, 0644, sensors_handle_show, NULL),
+ __ATTR(type, 0644, sensors_type_show, NULL),
+ __ATTR(max_range, 0644, sensors_max_range_show, NULL),
+ __ATTR(resolution, 0644, sensors_resolution_show, NULL),
+ __ATTR(sensor_power, 0644, sensors_power_show, NULL),
+ __ATTR(min_delay, 0644, sensors_min_delay_show, NULL),
+ __ATTR(fifo_reserved_event_count, 0644, sensors_fifo_event_show, NULL),
+ __ATTR(fifo_max_event_count, 0644, sensors_fifo_max_show, NULL),
+ __ATTR_NULL,
+};
+
+/**
+ * sensors_classdev_register - register a new object of sensors_classdev class.
+ * @parent: The device to register.
+ * @sensors_cdev: the sensors_classdev structure for this device.
+*/
+int sensors_classdev_register(struct device *parent,
+ struct sensors_classdev *sensors_cdev)
+{
+ sensors_cdev->dev = device_create(sensors_class, parent, 0,
+ sensors_cdev, "%s", sensors_cdev->name);
+ if (IS_ERR(sensors_cdev->dev))
+ return PTR_ERR(sensors_cdev->dev);
+
+ down_write(&sensors_list_lock);
+ list_add_tail(&sensors_cdev->node, &sensors_list);
+ up_write(&sensors_list_lock);
+
+ pr_debug("Registered sensors device: %s\n",
+ sensors_cdev->name);
+ return 0;
+}
+EXPORT_SYMBOL(sensors_classdev_register);
+
+/**
+ * sensors_classdev_unregister - unregister a object of sensors class.
+ * @sensors_cdev: the sensor device to unregister
+ * Unregister a previously registered via sensors_classdev_register object.
+*/
+void sensors_classdev_unregister(struct sensors_classdev *sensors_cdev)
+{
+ device_unregister(sensors_cdev->dev);
+ down_write(&sensors_list_lock);
+ list_del(&sensors_cdev->node);
+ up_write(&sensors_list_lock);
+}
+EXPORT_SYMBOL(sensors_classdev_unregister);
+
+static int __init sensors_init(void)
+{
+ sensors_class = class_create(THIS_MODULE, "sensors");
+ if (IS_ERR(sensors_class))
+ return PTR_ERR(sensors_class);
+ sensors_class->dev_attrs = sensors_class_attrs;
+ return 0;
+}
+
+static void __exit sensors_exit(void)
+{
+ class_destroy(sensors_class);
+}
+
+subsys_initcall(sensors_init);
+module_exit(sensors_exit);
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 4251968..1ad0054 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -40,6 +40,7 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/swap.h>
+#include <linux/fs.h>
#ifdef CONFIG_HIGHMEM
#define _ZONE ZONE_HIGHMEM
@@ -246,8 +247,14 @@
}
other_free = global_page_state(NR_FREE_PAGES);
- other_file = global_page_state(NR_FILE_PAGES) -
- global_page_state(NR_SHMEM);
+
+ if (global_page_state(NR_SHMEM) + total_swapcache_pages <
+ global_page_state(NR_FILE_PAGES))
+ other_file = global_page_state(NR_FILE_PAGES) -
+ global_page_state(NR_SHMEM) -
+ total_swapcache_pages;
+ else
+ other_file = 0;
tune_lmk_param(&other_free, &other_file, sc);
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 91d94b5..83fa657 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -32,6 +32,7 @@
#include <linux/lzo.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
+#include <linux/ratelimit.h>
#include "zram_drv.h"
@@ -39,6 +40,12 @@
static int zram_major;
static struct zram *zram_devices;
+/*
+ * We don't need to see memory allocation errors more than once every 1
+ * second to know that a problem is occurring.
+ */
+#define ALLOC_ERROR_LOG_RATE_MS 1000
+
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
@@ -221,7 +228,8 @@
goto free_buffer;
}
- meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
+ meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_NOWARN);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
goto free_table;
@@ -399,6 +407,7 @@
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
+ static unsigned long zram_rs_time;
page = bvec->bv_page;
src = meta->compress_buffer;
@@ -472,8 +481,10 @@
handle = zs_malloc(meta->mem_pool, clen);
if (!handle) {
- pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
- index, clen);
+ if (printk_timed_ratelimit(&zram_rs_time,
+ ALLOC_ERROR_LOG_RATE_MS))
+ pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
+ index, clen);
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index 97a3acf..508a19f 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -32,7 +32,7 @@
* Pages that compress to size greater than this are stored
* uncompressed in memory.
*/
-static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
+static const size_t max_zpage_size = PAGE_SIZE / 10 * 9;
/*
* NOTE: max_zpage_size must be less than or equal to:
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 1a67537..41a6803 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -430,7 +430,12 @@
return next;
}
-/* Encode <page, obj_idx> as a single handle value */
+/*
+ * Encode <page, obj_idx> as a single handle value.
+ * On hardware platforms with physical memory starting at 0x0 the pfn
+ * could be 0 so we ensure that the handle will never be 0 by adjusting the
+ * encoded obj_idx value before encoding.
+ */
static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
{
unsigned long handle;
@@ -441,17 +446,21 @@
}
handle = page_to_pfn(page) << OBJ_INDEX_BITS;
- handle |= (obj_idx & OBJ_INDEX_MASK);
+ handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
return (void *)handle;
}
-/* Decode <page, obj_idx> pair from the given object handle */
+/*
+ * Decode <page, obj_idx> pair from the given object handle. We adjust the
+ * decoded obj_idx back to its original value since it was adjusted in
+ * obj_location_to_handle().
+ */
static void obj_handle_to_location(unsigned long handle, struct page **page,
unsigned long *obj_idx)
{
*page = pfn_to_page(handle >> OBJ_INDEX_BITS);
- *obj_idx = handle & OBJ_INDEX_MASK;
+ *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
}
static unsigned long obj_idx_to_offset(struct page *page,
@@ -472,7 +481,7 @@
set_page_private(page, 0);
page->mapping = NULL;
page->freelist = NULL;
- page_mapcount_reset(page);
+ reset_page_mapcount(page);
}
static void free_zspage(struct page *first_page)
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index c6f6f03..e9bb553 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -37,6 +37,7 @@
#include <linux/regulator/consumer.h>
#define MAX_RAILS 5
+#define MAX_THRESHOLD 2
static struct msm_thermal_data msm_thermal_info;
static uint32_t limited_max_freq = UINT_MAX;
@@ -69,6 +70,7 @@
static bool psm_enabled;
static bool psm_nodes_called;
static bool psm_probed;
+static bool hotplug_enabled;
static int *tsens_id_map;
static DEFINE_MUTEX(vdd_rstr_mutex);
static DEFINE_MUTEX(psm_mutex);
@@ -77,8 +79,10 @@
uint32_t cpu;
bool offline;
bool user_offline;
+ bool thresh_cleared;
const char *sensor_type;
- struct sensor_threshold thresh[2];
+ uint32_t sensor_id;
+ struct sensor_threshold thresh[MAX_THRESHOLD];
};
struct rail {
@@ -624,6 +628,76 @@
return ret;
}
+static int set_and_activate_threshold(uint32_t sensor_id,
+ struct sensor_threshold *threshold)
+{
+ int ret = 0;
+
+ ret = sensor_set_trip(sensor_id, threshold);
+ if (ret != 0) {
+ pr_err("%s: Error in setting trip %d\n",
+ KBUILD_MODNAME, threshold->trip);
+ goto set_done;
+ }
+
+ ret = sensor_activate_trip(sensor_id, threshold, true);
+ if (ret != 0) {
+ pr_err("%s: Error in enabling trip %d\n",
+ KBUILD_MODNAME, threshold->trip);
+ goto set_done;
+ }
+
+set_done:
+ return ret;
+}
+
+static int set_threshold(uint32_t sensor_id,
+ struct sensor_threshold *threshold)
+{
+ struct tsens_device tsens_dev;
+ int i = 0, ret = 0;
+ long temp;
+
+ if ((!threshold) || check_sensor_id(sensor_id)) {
+ pr_err("%s: Invalid input\n", KBUILD_MODNAME);
+ ret = -EINVAL;
+ goto set_threshold_exit;
+ }
+
+ tsens_dev.sensor_num = sensor_id;
+ ret = tsens_get_temp(&tsens_dev, &temp);
+ if (ret) {
+ pr_err("%s: Unable to read TSENS sensor %d\n",
+ KBUILD_MODNAME, tsens_dev.sensor_num);
+ goto set_threshold_exit;
+ }
+ while (i < MAX_THRESHOLD) {
+ switch (threshold[i].trip) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ if (threshold[i].temp >= temp) {
+ ret = set_and_activate_threshold(sensor_id,
+ &threshold[i]);
+ if (ret)
+ goto set_threshold_exit;
+ }
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ if (threshold[i].temp <= temp) {
+ ret = set_and_activate_threshold(sensor_id,
+ &threshold[i]);
+ if (ret)
+ goto set_threshold_exit;
+ }
+ break;
+ default:
+ break;
+ }
+ i++;
+ }
+set_threshold_exit:
+ return ret;
+}
+
#ifdef CONFIG_SMP
static void __ref do_core_control(long temp)
{
@@ -677,7 +751,7 @@
/* Call with core_control_mutex locked */
static int __ref update_offline_cores(int val)
{
- int cpu = 0;
+ uint32_t cpu = 0;
int ret = 0;
if (!core_control_enabled)
@@ -701,8 +775,7 @@
static __ref int do_hotplug(void *data)
{
int ret = 0;
- int cpu = 0;
- uint32_t mask = 0;
+ uint32_t cpu = 0, mask = 0;
if (!core_control_enabled)
return -EINVAL;
@@ -714,6 +787,12 @@
mutex_lock(&core_control_mutex);
for_each_possible_cpu(cpu) {
+ if (hotplug_enabled &&
+ cpus[cpu].thresh_cleared) {
+ set_threshold(cpus[cpu].sensor_id,
+ cpus[cpu].thresh);
+ cpus[cpu].thresh_cleared = false;
+ }
if (cpus[cpu].offline || cpus[cpu].user_offline)
mask |= BIT(cpu);
}
@@ -984,10 +1063,12 @@
default:
break;
}
- if (hotplug_task)
+ if (hotplug_task) {
+ cpu_node->thresh_cleared = true;
complete(&hotplug_notify_complete);
- else
+ } else {
pr_err("%s: Hotplug task is not initialized\n", KBUILD_MODNAME);
+ }
return 0;
}
/* Adjust cpus offlined bit based on temperature reading. */
@@ -997,15 +1078,18 @@
long temp = 0;
int cpu = 0;
+ if (!hotplug_enabled)
+ return 0;
+
mutex_lock(&core_control_mutex);
for_each_possible_cpu(cpu) {
if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu)))
continue;
- tsens_dev.sensor_num = sensor_get_id(\
- (char *)cpus[cpu].sensor_type);
+ tsens_dev.sensor_num = cpus[cpu].sensor_id;
if (tsens_get_temp(&tsens_dev, &temp)) {
pr_err("%s: Unable to read TSENS sensor %d\n",
KBUILD_MODNAME, tsens_dev.sensor_num);
+ mutex_unlock(&core_control_mutex);
return -EINVAL;
}
@@ -1034,26 +1118,29 @@
if (hotplug_task)
return;
+ if (!hotplug_enabled)
+ goto init_kthread;
+
for_each_possible_cpu(cpu) {
+ cpus[cpu].cpu = (uint32_t)cpu;
+ cpus[cpu].thresh_cleared = false;
+ cpus[cpu].sensor_id =
+ sensor_get_id((char *)cpus[cpu].sensor_type);
if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu)))
continue;
- cpus[cpu].cpu = (uint32_t)cpu;
cpus[cpu].thresh[0].temp = msm_thermal_info.hotplug_temp_degC;
cpus[cpu].thresh[0].trip = THERMAL_TRIP_CONFIGURABLE_HI;
cpus[cpu].thresh[0].notify = hotplug_notify;
cpus[cpu].thresh[0].data = (void *)&cpus[cpu];
- sensor_set_trip(sensor_get_id((char *)cpus[cpu].sensor_type),
- &cpus[cpu].thresh[0]);
cpus[cpu].thresh[1].temp = msm_thermal_info.hotplug_temp_degC -
msm_thermal_info.hotplug_temp_hysteresis_degC;
cpus[cpu].thresh[1].trip = THERMAL_TRIP_CONFIGURABLE_LOW;
cpus[cpu].thresh[1].notify = hotplug_notify;
cpus[cpu].thresh[1].data = (void *)&cpus[cpu];
- sensor_set_trip(sensor_get_id((char *)cpus[cpu].sensor_type),
- &cpus[cpu].thresh[1]);
-
+ set_threshold(cpus[cpu].sensor_id, cpus[cpu].thresh);
}
+init_kthread:
init_completion(&hotplug_notify_complete);
hotplug_task = kthread_run(do_hotplug, NULL, "msm_thermal:hotplug");
if (IS_ERR(hotplug_task)) {
@@ -1771,6 +1858,11 @@
int ret = 0;
int cpu = 0;
+ if (num_possible_cpus() > 1) {
+ core_control_enabled = 1;
+ hotplug_enabled = 1;
+ }
+
key = "qcom,core-limit-temp";
ret = of_property_read_u32(node, key, &data->core_limit_temp_degC);
if (ret)
@@ -1789,19 +1881,20 @@
key = "qcom,hotplug-temp";
ret = of_property_read_u32(node, key, &data->hotplug_temp_degC);
if (ret)
- goto read_node_fail;
+ goto hotplug_node_fail;
key = "qcom,hotplug-temp-hysteresis";
ret = of_property_read_u32(node, key,
&data->hotplug_temp_hysteresis_degC);
if (ret)
- goto read_node_fail;
+ goto hotplug_node_fail;
key = "qcom,cpu-sensors";
cpu_cnt = of_property_count_strings(node, key);
if (cpu_cnt != num_possible_cpus()) {
pr_err("%s: Wrong number of cpu\n", KBUILD_MODNAME);
- goto read_node_fail;
+ ret = -EINVAL;
+ goto hotplug_node_fail;
}
for_each_possible_cpu(cpu) {
@@ -1811,12 +1904,9 @@
ret = of_property_read_string_index(node, key, cpu,
&cpus[cpu].sensor_type);
if (ret)
- goto read_node_fail;
+ goto hotplug_node_fail;
}
- if (num_possible_cpus() > 1)
- core_control_enabled = 1;
-
read_node_fail:
if (ret) {
dev_info(&pdev->dev,
@@ -1826,6 +1916,16 @@
}
return ret;
+
+hotplug_node_fail:
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key);
+ hotplug_enabled = 0;
+ }
+
+ return ret;
}
static int __devinit msm_thermal_dev_probe(struct platform_device *pdev)
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 8d9da6b..739696d 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -89,30 +89,21 @@
}
EXPORT_SYMBOL(sensor_get_id);
-static long get_min(struct sensor_info *sensor, long temp)
+static int __update_sensor_thresholds(struct sensor_info *sensor)
{
- long min = LONG_MIN;
- struct sensor_threshold *pos, *var;
-
- list_for_each_entry_safe(pos, var, &sensor->threshold_list, list) {
- if (pos->trip == THERMAL_TRIP_CONFIGURABLE_LOW)
- if (pos->temp < temp && pos->temp > min)
- min = pos->temp;
- }
-
- return min;
-}
-
-static void __update_sensor_thresholds(struct sensor_info *sensor)
-{
- long min = LONG_MIN;
- long max = LONG_MAX;
- long max_of_min = LONG_MIN;
- long min_of_max = LONG_MAX;
+ long max_of_low_thresh = LONG_MIN;
+ long min_of_high_thresh = LONG_MAX;
struct sensor_threshold *pos, *var;
enum thermal_trip_type type;
- int i;
- long curr_temp;
+ int i, ret = 0;
+
+ if (!sensor->tz->ops->set_trip_temp ||
+ !sensor->tz->ops->activate_trip_type ||
+ !sensor->tz->ops->get_trip_type ||
+ !sensor->tz->ops->get_trip_temp) {
+ ret = -ENODEV;
+ goto update_done;
+ }
for (i = 0; ((sensor->max_idx == -1) || (sensor->min_idx == -1)) &&
(sensor->tz->ops->get_trip_type) && (i < sensor->tz->trips);
@@ -128,60 +119,85 @@
THERMAL_TRIP_CONFIGURABLE_HI, &sensor->threshold_max);
}
- sensor->tz->ops->get_temp(sensor->tz, &curr_temp);
list_for_each_entry_safe(pos, var, &sensor->threshold_list, list) {
+ if (!pos->active)
+ continue;
if (pos->trip == THERMAL_TRIP_CONFIGURABLE_LOW) {
- if (pos->temp > max_of_min)
- max_of_min = pos->temp;
- if (pos->temp < curr_temp && pos->temp > min)
- min = pos->temp;
+ if (pos->temp > max_of_low_thresh)
+ max_of_low_thresh = pos->temp;
}
if (pos->trip == THERMAL_TRIP_CONFIGURABLE_HI) {
- if (pos->temp < min_of_max)
- min_of_max = pos->temp;
- if (pos->temp > curr_temp && pos->temp < max)
- max = pos->temp;
+ if (pos->temp < min_of_high_thresh)
+ min_of_high_thresh = pos->temp;
}
}
- pr_debug("sensor %d: min of max: %ld max of min: %ld\n",
- sensor->sensor_id, max_of_min, min_of_max);
+ pr_debug("sensor %d: Thresholds: max of low: %ld min of high: %ld\n",
+ sensor->sensor_id, max_of_low_thresh,
+ min_of_high_thresh);
- /* If we haven't found a max and min bounding the curr_temp,
- * use the min of max and max of min instead.
- */
- if (max == LONG_MAX)
- max = min_of_max;
- if (min == LONG_MIN) {
- min = get_min(sensor, max);
- if (min == LONG_MIN)
- min = max_of_min;
+ if ((min_of_high_thresh != sensor->threshold_max) &&
+ (min_of_high_thresh != LONG_MAX)) {
+ ret = sensor->tz->ops->set_trip_temp(sensor->tz,
+ sensor->max_idx, min_of_high_thresh);
+ if (ret) {
+ pr_err("sensor %d: Unable to set high threshold %d",
+ sensor->sensor_id, ret);
+ goto update_done;
+ }
+ sensor->threshold_max = min_of_high_thresh;
+ }
+ ret = sensor->tz->ops->activate_trip_type(sensor->tz,
+ sensor->max_idx,
+ (min_of_high_thresh == LONG_MAX) ?
+ THERMAL_TRIP_ACTIVATION_DISABLED :
+ THERMAL_TRIP_ACTIVATION_ENABLED);
+ if (ret) {
+ pr_err("sensor %d: Unable to activate high threshold %d",
+ sensor->sensor_id, ret);
+ goto update_done;
}
- if (sensor->tz->ops->set_trip_temp) {
- if (max != sensor->threshold_max) {
- sensor->tz->ops->set_trip_temp(sensor->tz,
- sensor->max_idx, max);
- sensor->threshold_max = max;
+ if ((max_of_low_thresh != sensor->threshold_min) &&
+ (max_of_low_thresh != LONG_MIN)) {
+ ret = sensor->tz->ops->set_trip_temp(sensor->tz,
+ sensor->min_idx, max_of_low_thresh);
+ if (ret) {
+ pr_err("sensor %d: Unable to set low threshold %d",
+ sensor->sensor_id, ret);
+ goto update_done;
}
- if (min != sensor->threshold_min) {
- sensor->tz->ops->set_trip_temp(sensor->tz,
- sensor->min_idx, min);
- sensor->threshold_min = min;
- }
+ sensor->threshold_min = max_of_low_thresh;
+ }
+ ret = sensor->tz->ops->activate_trip_type(sensor->tz,
+ sensor->min_idx,
+ (max_of_low_thresh == LONG_MIN) ?
+ THERMAL_TRIP_ACTIVATION_DISABLED :
+ THERMAL_TRIP_ACTIVATION_ENABLED);
+ if (ret) {
+ pr_err("sensor %d: Unable to activate low threshold %d",
+ sensor->sensor_id, ret);
+ goto update_done;
}
- pr_debug("sensor %d: curr_temp: %ld min: %ld max: %ld\n",
- sensor->sensor_id, curr_temp,
+ pr_debug("sensor %d: low: %ld high: %ld\n",
+ sensor->sensor_id,
sensor->threshold_min, sensor->threshold_max);
+
+update_done:
+ return ret;
}
static void sensor_update_work(struct work_struct *work)
{
struct sensor_info *sensor = container_of(work, struct sensor_info,
work);
+ int ret = 0;
mutex_lock(&sensor->lock);
- __update_sensor_thresholds(sensor);
+ ret = __update_sensor_thresholds(sensor);
+ if (ret)
+ pr_err("sensor %d: Error %d setting threshold\n",
+ sensor->sensor_id, ret);
mutex_unlock(&sensor->lock);
}
@@ -202,7 +218,7 @@
return 0;
list_for_each_entry_safe(pos, var, &tz->sensor.threshold_list, list) {
- if (pos->trip != trip)
+ if ((pos->trip != trip) || (!pos->active))
continue;
if (((trip == THERMAL_TRIP_CONFIGURABLE_LOW) &&
(pos->temp <= tz->sensor.threshold_min) &&
@@ -210,6 +226,7 @@
((trip == THERMAL_TRIP_CONFIGURABLE_HI) &&
(pos->temp >= tz->sensor.threshold_max) &&
(pos->temp <= temp))) {
+ pos->active = 0;
pos->notify(trip, temp, pos->data);
}
}
@@ -220,6 +237,29 @@
}
EXPORT_SYMBOL(thermal_sensor_trip);
+int sensor_activate_trip(uint32_t sensor_id,
+ struct sensor_threshold *threshold, bool enable)
+{
+ struct sensor_info *sensor = get_sensor(sensor_id);
+ int ret = 0;
+
+ if (!sensor || !threshold) {
+ pr_err("Sensor %d: uninitialized data\n",
+ sensor_id);
+ ret = -ENODEV;
+ goto activate_trip_exit;
+ }
+
+ mutex_lock(&sensor->lock);
+ threshold->active = (enable) ? 1 : 0;
+ ret = __update_sensor_thresholds(sensor);
+ mutex_unlock(&sensor->lock);
+
+activate_trip_exit:
+ return ret;
+}
+EXPORT_SYMBOL(sensor_activate_trip);
+
int sensor_set_trip(uint32_t sensor_id, struct sensor_threshold *threshold)
{
struct sensor_threshold *pos, *var;
@@ -241,8 +281,7 @@
INIT_LIST_HEAD(&threshold->list);
list_add(&threshold->list, &sensor->threshold_list);
}
-
- __update_sensor_thresholds(sensor);
+ threshold->active = 0; /* Do not allow active threshold right away */
mutex_unlock(&sensor->lock);
return 0;
@@ -254,6 +293,7 @@
{
struct sensor_threshold *pos, *var;
struct sensor_info *sensor = get_sensor(sensor_id);
+ int ret = 0;
if (!sensor)
return -ENODEV;
@@ -261,15 +301,16 @@
mutex_lock(&sensor->lock);
list_for_each_entry_safe(pos, var, &sensor->threshold_list, list) {
if (pos == threshold) {
+ pos->active = 0;
list_del(&pos->list);
break;
}
}
- __update_sensor_thresholds(sensor);
+ ret = __update_sensor_thresholds(sensor);
mutex_unlock(&sensor->lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(sensor_cancel_trip);
@@ -283,36 +324,36 @@
return 0;
}
+static void get_trip_threshold(struct thermal_zone_device *tz, int trip,
+ struct sensor_threshold **threshold)
+{
+ enum thermal_trip_type type;
+
+ tz->ops->get_trip_type(tz, trip, &type);
+
+ if (type == THERMAL_TRIP_CONFIGURABLE_HI)
+ *threshold = &tz->tz_threshold[0];
+ else if (type == THERMAL_TRIP_CONFIGURABLE_LOW)
+ *threshold = &tz->tz_threshold[1];
+ else
+ *threshold = NULL;
+}
+
int sensor_set_trip_temp(struct thermal_zone_device *tz,
int trip, long temp)
{
int ret = 0;
- enum thermal_trip_type type;
+ struct sensor_threshold *threshold = NULL;
if (!tz->ops->get_trip_type)
return -EPERM;
- tz->ops->get_trip_type(tz, trip, &type);
- switch (type) {
- case THERMAL_TRIP_CONFIGURABLE_HI:
- tz->tz_threshold[0].temp = temp;
- tz->tz_threshold[0].trip = THERMAL_TRIP_CONFIGURABLE_HI;
- tz->tz_threshold[0].notify = tz_notify_trip;
- tz->tz_threshold[0].data = tz;
- ret = sensor_set_trip(tz->sensor.sensor_id,
- &tz->tz_threshold[0]);
- break;
- case THERMAL_TRIP_CONFIGURABLE_LOW:
- tz->tz_threshold[1].temp = temp;
- tz->tz_threshold[1].trip = THERMAL_TRIP_CONFIGURABLE_LOW;
- tz->tz_threshold[1].notify = tz_notify_trip;
- tz->tz_threshold[1].data = tz;
- ret = sensor_set_trip(tz->sensor.sensor_id,
- &tz->tz_threshold[1]);
- break;
- default:
+ get_trip_threshold(tz, trip, &threshold);
+ if (threshold) {
+ threshold->temp = temp;
+ ret = sensor_set_trip(tz->sensor.sensor_id, threshold);
+ } else {
ret = tz->ops->set_trip_temp(tz, trip, temp);
- break;
}
return ret;
@@ -333,10 +374,12 @@
INIT_LIST_HEAD(&sensor->threshold_list);
INIT_LIST_HEAD(&tz->tz_threshold[0].list);
INIT_LIST_HEAD(&tz->tz_threshold[1].list);
- tz->tz_threshold[0].notify = NULL;
- tz->tz_threshold[0].data = NULL;
- tz->tz_threshold[1].notify = NULL;
- tz->tz_threshold[1].data = NULL;
+ tz->tz_threshold[0].notify = tz_notify_trip;
+ tz->tz_threshold[0].data = tz;
+ tz->tz_threshold[0].trip = THERMAL_TRIP_CONFIGURABLE_HI;
+ tz->tz_threshold[1].notify = tz_notify_trip;
+ tz->tz_threshold[1].data = tz;
+ tz->tz_threshold[1].trip = THERMAL_TRIP_CONFIGURABLE_LOW;
list_add(&sensor->sensor_list, &sensor_info_list);
INIT_WORK(&sensor->work, sensor_update_work);
@@ -489,23 +532,40 @@
const char *buf, size_t count)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip, result;
+ int trip, result = 0;
+ bool activate;
+ struct sensor_threshold *threshold = NULL;
- if (!tz->ops->activate_trip_type)
- return -EPERM;
+ if (!tz->ops->get_trip_type ||
+ !tz->ops->activate_trip_type) {
+ result = -EPERM;
+ goto trip_activate_exit;
+ }
- if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip))
- return -EINVAL;
-
- if (!strncmp(buf, "enabled", sizeof("enabled")))
- result = tz->ops->activate_trip_type(tz, trip,
- THERMAL_TRIP_ACTIVATION_ENABLED);
- else if (!strncmp(buf, "disabled", sizeof("disabled")))
- result = tz->ops->activate_trip_type(tz, trip,
- THERMAL_TRIP_ACTIVATION_DISABLED);
- else
+ if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip)) {
result = -EINVAL;
+ goto trip_activate_exit;
+ }
+ if (!strcmp(buf, "enabled")) {
+ activate = true;
+ } else if (!strcmp(buf, "disabled")) {
+ activate = false;
+ } else {
+ result = -EINVAL;
+ goto trip_activate_exit;
+ }
+
+ get_trip_threshold(tz, trip, &threshold);
+ if (threshold)
+ result = sensor_activate_trip(tz->sensor.sensor_id,
+ threshold, activate);
+ else
+ result = tz->ops->activate_trip_type(tz, trip,
+ activate ? THERMAL_TRIP_ACTIVATION_ENABLED :
+ THERMAL_TRIP_ACTIVATION_DISABLED);
+
+trip_activate_exit:
if (result)
return result;
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 6f3ea9b..ad66113 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -111,4 +111,11 @@
To compile this driver as a module, choose M here: the module
will be called uio_pruss.
+config UIO_MSM_SHAREDMEM
+ bool "MSM shared memory driver"
+ default n
+ help
+ Provides the clients with their respective alloted shared memory
+ addresses which are used as transport buffer.
+
endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index d4dd9a5..c4d177a 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_UIO_PCI_GENERIC) += uio_pci_generic.o
obj-$(CONFIG_UIO_NETX) += uio_netx.o
obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
+obj-$(CONFIG_UIO_MSM_SHAREDMEM) += msm_sharedmem.o
diff --git a/drivers/uio/msm_sharedmem.c b/drivers/uio/msm_sharedmem.c
new file mode 100644
index 0000000..438f002
--- /dev/null
+++ b/drivers/uio/msm_sharedmem.c
@@ -0,0 +1,87 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/uio_driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+
+#define DRIVER_NAME "msm_sharedmem"
+
+static int msm_sharedmem_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct uio_info *info = NULL;
+ struct resource *clnt_res = NULL;
+
+ /* Get the addresses from platform-data */
+ if (!pdev->dev.of_node) {
+ pr_err("Node not found\n");
+ ret = -ENODEV;
+ goto out;
+ }
+ clnt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!clnt_res) {
+ pr_err("resource not found\n");
+ return -ENODEV;
+ }
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->name = clnt_res->name;
+ info->version = "1.0";
+ info->mem[0].addr = clnt_res->start;
+ info->mem[0].size = resource_size(clnt_res);
+ info->mem[0].memtype = UIO_MEM_PHYS;
+
+ /* Setup device */
+ ret = uio_register_device(&pdev->dev, info);
+ if (ret)
+ goto out;
+
+ dev_set_drvdata(&pdev->dev, info);
+ pr_debug("Device created for client '%s'\n", clnt_res->name);
+out:
+ return ret;
+}
+
+static int msm_sharedmem_remove(struct platform_device *pdev)
+{
+ struct uio_info *info = dev_get_drvdata(&pdev->dev);
+
+ uio_unregister_device(info);
+
+ return 0;
+}
+
+static struct of_device_id msm_sharedmem_of_match[] = {
+ {.compatible = "qcom,sharedmem-uio",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, msm_sharedmem_of_match);
+
+static struct platform_driver msm_sharedmem_driver = {
+ .probe = msm_sharedmem_probe,
+ .remove = msm_sharedmem_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_sharedmem_of_match,
+ },
+};
+
+module_platform_driver(msm_sharedmem_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
index 31e93a5..f26570d 100644
--- a/drivers/video/fbcmap.c
+++ b/drivers/video/fbcmap.c
@@ -203,11 +203,13 @@
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
+ if ((to->len <= tooff) || (from->len <= fromoff))
+ return -EINVAL;
+
size = to->len - tooff;
+
if (size > (int) (from->len - fromoff))
size = from->len - fromoff;
- if (size <= 0)
- return -EINVAL;
size *= sizeof(u16);
if (from->red && to->red)
diff --git a/drivers/video/msm/mdss/mdp3.c b/drivers/video/msm/mdss/mdp3.c
index d642093..9ac17c9 100644
--- a/drivers/video/msm/mdss/mdp3.c
+++ b/drivers/video/msm/mdss/mdp3.c
@@ -38,7 +38,7 @@
#include <linux/major.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
-
+#include <linux/iopoll.h>
#include <mach/board.h>
#include <mach/clk.h>
#include <mach/hardware.h>
@@ -55,6 +55,10 @@
#include "mdp3_ppp.h"
#include "mdss_debug.h"
+#define MISR_POLL_SLEEP 2000
+#define MISR_POLL_TIMEOUT 32000
+#define MDP3_REG_CAPTURED_DSI_PCLK_MASK 1
+
#define MDP_CORE_HW_VERSION 0x03040310
struct mdp3_hw_resource *mdp3_res;
@@ -184,11 +188,11 @@
u32 mdp_interrupt = 0;
spin_lock(&mdata->irq_lock);
- if (!mdata->irq_mask) {
+ if (!mdata->irq_mask)
pr_err("spurious interrupt\n");
- spin_unlock(&mdata->irq_lock);
- return IRQ_HANDLED;
- }
+
+ clk_enable(mdp3_res->clocks[MDP3_CLK_AHB]);
+ clk_enable(mdp3_res->clocks[MDP3_CLK_CORE]);
mdp_interrupt = MDP3_REG_READ(MDP3_REG_INTR_STATUS);
MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, mdp_interrupt);
@@ -202,6 +206,10 @@
mdp_interrupt = mdp_interrupt >> 1;
i++;
}
+
+ clk_disable(mdp3_res->clocks[MDP3_CLK_AHB]);
+ clk_disable(mdp3_res->clocks[MDP3_CLK_CORE]);
+
spin_unlock(&mdata->irq_lock);
return IRQ_HANDLED;
@@ -281,8 +289,6 @@
spin_lock_irqsave(&mdp3_res->irq_lock, flag);
memset(mdp3_res->irq_ref_count, 0, sizeof(u32) * MDP3_MAX_INTR);
mdp3_res->irq_mask = 0;
- MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
- MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, 0xfffffff);
disable_irq_nosync(mdp3_res->irq);
spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
}
@@ -415,10 +421,10 @@
count = mdp3_res->clock_ref_count[clk_idx];
if (count == 1 && enable) {
pr_debug("clk=%d en=%d\n", clk_idx, enable);
- ret = clk_prepare_enable(clk);
+ ret = clk_enable(clk);
} else if (count == 0) {
pr_debug("clk=%d disable\n", clk_idx);
- clk_disable_unprepare(clk);
+ clk_disable(clk);
ret = 0;
} else if (count < 0) {
pr_err("clk=%d count=%d\n", clk_idx, count);
@@ -554,7 +560,7 @@
clk_put(mdp3_res->clocks[MDP3_CLK_DSI]);
}
-int mdp3_clk_enable(int enable)
+int mdp3_clk_enable(int enable, int dsi_clk)
{
int rc;
@@ -564,7 +570,79 @@
rc = mdp3_clk_update(MDP3_CLK_AHB, enable);
rc |= mdp3_clk_update(MDP3_CLK_CORE, enable);
rc |= mdp3_clk_update(MDP3_CLK_VSYNC, enable);
- rc |= mdp3_clk_update(MDP3_CLK_DSI, enable);
+ if (dsi_clk)
+ rc |= mdp3_clk_update(MDP3_CLK_DSI, enable);
+ mutex_unlock(&mdp3_res->res_mutex);
+ return rc;
+}
+
+int mdp3_clk_prepare(void)
+{
+ int rc = 0;
+
+ mutex_lock(&mdp3_res->res_mutex);
+ mdp3_res->clk_prepare_count++;
+ if (mdp3_res->clk_prepare_count == 1) {
+ rc = clk_prepare(mdp3_res->clocks[MDP3_CLK_AHB]);
+ if (rc < 0)
+ goto error0;
+ rc = clk_prepare(mdp3_res->clocks[MDP3_CLK_CORE]);
+ if (rc < 0)
+ goto error1;
+ rc = clk_prepare(mdp3_res->clocks[MDP3_CLK_VSYNC]);
+ if (rc < 0)
+ goto error2;
+ rc = clk_prepare(mdp3_res->clocks[MDP3_CLK_DSI]);
+ if (rc < 0)
+ goto error3;
+ }
+ mutex_unlock(&mdp3_res->res_mutex);
+ return rc;
+
+error3:
+ clk_unprepare(mdp3_res->clocks[MDP3_CLK_VSYNC]);
+error2:
+ clk_unprepare(mdp3_res->clocks[MDP3_CLK_CORE]);
+error1:
+ clk_unprepare(mdp3_res->clocks[MDP3_CLK_AHB]);
+error0:
+ mdp3_res->clk_prepare_count--;
+ mutex_unlock(&mdp3_res->res_mutex);
+ return rc;
+}
+
+void mdp3_clk_unprepare(void)
+{
+ mutex_lock(&mdp3_res->res_mutex);
+ mdp3_res->clk_prepare_count--;
+ if (mdp3_res->clk_prepare_count == 0) {
+ clk_unprepare(mdp3_res->clocks[MDP3_CLK_AHB]);
+ clk_unprepare(mdp3_res->clocks[MDP3_CLK_CORE]);
+ clk_unprepare(mdp3_res->clocks[MDP3_CLK_VSYNC]);
+ clk_unprepare(mdp3_res->clocks[MDP3_CLK_DSI]);
+ } else if (mdp3_res->clk_prepare_count < 0) {
+ pr_err("mdp3 clk unprepare mismatch\n");
+ }
+ mutex_unlock(&mdp3_res->res_mutex);
+}
+
+int mdp3_get_mdp_dsi_clk(void)
+{
+ int rc;
+
+ mutex_lock(&mdp3_res->res_mutex);
+ clk_prepare(mdp3_res->clocks[MDP3_CLK_DSI]);
+ rc = mdp3_clk_update(MDP3_CLK_DSI, 1);
+ mutex_unlock(&mdp3_res->res_mutex);
+ return rc;
+}
+
+int mdp3_put_mdp_dsi_clk(void)
+{
+ int rc;
+ mutex_lock(&mdp3_res->res_mutex);
+ rc = mdp3_clk_update(MDP3_CLK_DSI, 0);
+ clk_unprepare(mdp3_res->clocks[MDP3_CLK_DSI]);
mutex_unlock(&mdp3_res->res_mutex);
return rc;
}
@@ -1512,8 +1590,17 @@
static int mdp3_init(struct msm_fb_data_type *mfd)
{
int rc;
+
rc = mdp3_ctrl_init(mfd);
- rc |= mdp3_ppp_res_init(mfd);
+ if (rc) {
+ pr_err("mdp3 ctl init fail\n");
+ return rc;
+ }
+
+ rc = mdp3_ppp_res_init(mfd);
+ if (rc)
+ pr_err("mdp3 ppp res init fail\n");
+
return rc;
}
@@ -1740,9 +1827,16 @@
pr_debug("mdp3__continuous_splash_on\n");
- rc = mdp3_clk_enable(1);
+ rc = mdp3_clk_prepare();
+ if (rc) {
+ pr_err("fail to prepare clk\n");
+ return rc;
+ }
+
+ rc = mdp3_clk_enable(1, 1);
if (rc) {
pr_err("fail to enable clk\n");
+ mdp3_clk_unprepare();
return rc;
}
@@ -1779,8 +1873,10 @@
return 0;
splash_on_err:
- if (mdp3_clk_enable(0))
+ if (mdp3_clk_enable(0, 1))
pr_err("%s: Unable to disable mdp3 clocks\n", __func__);
+
+ mdp3_clk_unprepare();
return rc;
}
@@ -1813,10 +1909,13 @@
static void mdp3_debug_enable_clock(int on)
{
- if (on)
- mdp3_clk_enable(1);
- else
- mdp3_clk_enable(0);
+ if (on) {
+ mdp3_clk_prepare();
+ mdp3_clk_enable(1, 0);
+ } else {
+ mdp3_clk_enable(0, 0);
+ mdp3_clk_unprepare();
+ }
}
static int mdp3_debug_init(struct platform_device *pdev)
@@ -1906,6 +2005,105 @@
return rc;
}
+int mdp3_misr_get(struct mdp_misr *misr_resp)
+{
+ int result = 0, ret = -1;
+ int crc = 0;
+ pr_debug("%s CRC Capture on DSI\n", __func__);
+ switch (misr_resp->block_id) {
+ case DISPLAY_MISR_DSI0:
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 0);
+ /* Sleep for one vsync after DSI video engine is disabled */
+ msleep(20);
+ /* Enable DSI_VIDEO_0 MISR Block */
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x20);
+ /* Reset MISR Block */
+ MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 1);
+ /* Clear MISR capture done bit */
+ MDP3_REG_WRITE(MDP3_REG_CAPTURED_DSI_PCLK, 0);
+ /* Enable MDP DSI interface */
+ MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 1);
+ ret = readl_poll_timeout(mdp3_res->mdp_base +
+ MDP3_REG_CAPTURED_DSI_PCLK, result,
+ result & MDP3_REG_CAPTURED_DSI_PCLK_MASK,
+ MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0);
+ if (ret == 0) {
+ /* Disable DSI MISR interface */
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x0);
+ crc = MDP3_REG_READ(MDP3_REG_MISR_CAPT_VAL_DSI_PCLK);
+ pr_debug("CRC Val %d\n", crc);
+ } else {
+ pr_err("CRC Read Timed Out\n");
+ }
+ break;
+
+ case DISPLAY_MISR_DSI_CMD:
+ /* Select DSI PCLK Domain */
+ MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 0x004);
+ /* Select Block id DSI_CMD */
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x10);
+ /* Reset MISR Block */
+ MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 1);
+ /* Drive Data on Test Bus */
+ MDP3_REG_WRITE(MDP3_REG_EXPORT_MISR_DSI_PCLK, 0);
+ /* Kikk off DMA_P */
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_START, 0x11);
+ /* Wait for DMA_P Done */
+ ret = readl_poll_timeout(mdp3_res->mdp_base +
+ MDP3_REG_INTR_STATUS, result,
+ result & MDP3_INTR_DMA_P_DONE_BIT,
+ MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+ if (ret == 0) {
+ crc = MDP3_REG_READ(MDP3_REG_MISR_CURR_VAL_DSI_PCLK);
+ pr_debug("CRC Val %d\n", crc);
+ } else {
+ pr_err("CRC Read Timed Out\n");
+ }
+ break;
+
+ default:
+ pr_err("%s CRC Capture not supported\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ misr_resp->crc_value[0] = crc;
+ pr_debug("%s, CRC Capture on DSI Param Block = 0x%x, CRC 0x%x\n",
+ __func__, misr_resp->block_id, misr_resp->crc_value[0]);
+ return ret;
+}
+
+int mdp3_misr_set(struct mdp_misr *misr_req)
+{
+ int ret = 0;
+ pr_debug("%s Parameters Block = %d Cframe Count = %d CRC = %d\n",
+ __func__, misr_req->block_id, misr_req->frame_count,
+ misr_req->crc_value[0]);
+
+ switch (misr_req->block_id) {
+ case DISPLAY_MISR_DSI0:
+ pr_debug("In the case DISPLAY_MISR_DSI0\n");
+ MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 1);
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x20);
+ MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 0x1);
+ break;
+
+ case DISPLAY_MISR_DSI_CMD:
+ pr_debug("In the case DISPLAY_MISR_DSI_CMD\n");
+ MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 1);
+ MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x10);
+ MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 0x1);
+ break;
+
+ default:
+ pr_err("%s CRC Capture not supported\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
static int mdp3_probe(struct platform_device *pdev)
{
int rc;
diff --git a/drivers/video/msm/mdss/mdp3.h b/drivers/video/msm/mdss/mdp3.h
index e66b5ac..bc94fd6 100644
--- a/drivers/video/msm/mdss/mdp3.h
+++ b/drivers/video/msm/mdss/mdp3.h
@@ -152,6 +152,8 @@
struct mdss_panel_cfg pan_cfg;
u32 splash_mem_addr;
u32 splash_mem_size;
+
+ int clk_prepare_count;
};
struct mdp3_img_data {
@@ -175,7 +177,9 @@
void mdp3_irq_register(void);
void mdp3_irq_deregister(void);
int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate, int client);
-int mdp3_clk_enable(int enable);
+int mdp3_clk_enable(int enable, int dsi_clk);
+int mdp3_clk_prepare(void);
+void mdp3_clk_unprepare(void);
int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota);
int mdp3_put_img(struct mdp3_img_data *data, int client);
int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data,
@@ -187,6 +191,11 @@
int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd);
void mdp3_release_splash_memory(void);
int mdp3_create_sysfs_link(struct device *dev);
+int mdp3_get_mdp_dsi_clk(void);
+int mdp3_put_mdp_dsi_clk(void);
+
+int mdp3_misr_set(struct mdp_misr *misr_req);
+int mdp3_misr_get(struct mdp_misr *misr_resp);
#define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr)
#define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr)
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index b123ccb..f36f088 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -118,7 +118,9 @@
}
mutex_lock(&mdp3_session->lock);
+ mdp3_clk_enable(1, 0);
mdp3_session->dma->vsync_enable(mdp3_session->dma, arg);
+ mdp3_clk_enable(0, 0);
if (enable && mdp3_session->status == 1 && !mdp3_session->intf->active)
mod_timer(&mdp3_session->vsync_timer,
jiffies + msecs_to_jiffies(mdp3_session->vsync_period));
@@ -236,12 +238,24 @@
mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE,
MDP3_CLIENT_DMA_P);
- rc = mdp3_clk_enable(true);
- if (rc)
+ rc = mdp3_clk_prepare();
+ if (rc) {
+ pr_err("mdp3 clk prepare fail\n");
return rc;
+ }
+ rc = mdp3_clk_enable(1, 1);
+ if (rc) {
+ pr_err("mdp3 clk enable fail\n");
+ mdp3_clk_unprepare();
+ return rc;
+ }
} else {
- rc = mdp3_clk_enable(false);
+ rc = mdp3_clk_enable(0, 1);
+ if (rc)
+ pr_err("mdp3 clk disable fail\n");
+ else
+ mdp3_clk_unprepare();
}
return rc;
}
@@ -517,19 +531,21 @@
goto off_error;
}
+ mdp3_clk_enable(1, 0);
+
mdp3_histogram_stop(mdp3_session, MDP_BLOCK_DMA_P);
rc = mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
if (rc)
pr_debug("fail to stop the MDP3 dma\n");
+ mdp3_clk_enable(0, 0);
+
if (panel->event_handler)
rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
if (rc)
pr_err("fail to turn off the panel\n");
-
-
mdp3_irq_deregister();
pr_debug("mdp3_ctrl_off stop clock\n");
@@ -650,7 +666,7 @@
if (rc)
pr_err("fail to turn off panel\n");
- rc = mdp3_ctrl_res_req_clk(mfd, 0);
+ rc = mdp3_put_mdp_dsi_clk();
if (rc) {
pr_err("fail to release mdp clocks\n");
goto reset_error;
@@ -680,7 +696,7 @@
goto reset_error;
}
- rc = mdp3_ctrl_res_req_clk(mfd, 1);
+ rc = mdp3_get_mdp_dsi_clk();
if (rc) {
pr_err("fail to turn on mdp clks\n");
goto reset_error;
@@ -845,9 +861,11 @@
data = mdp3_bufq_pop(&mdp3_session->bufq_in);
if (data) {
+ mdp3_clk_enable(1, 0);
mdp3_session->dma->update(mdp3_session->dma,
(void *)data->addr,
mdp3_session->intf);
+ mdp3_clk_enable(0, 0);
mdp3_bufq_push(&mdp3_session->bufq_out, data);
}
@@ -912,6 +930,7 @@
goto pan_error;
}
+ mdp3_clk_enable(1, 0);
if (mfd->fbi->screen_base) {
mdp3_session->dma->update(mdp3_session->dma,
(void *)mfd->iova + offset,
@@ -920,6 +939,7 @@
pr_debug("mdp3_ctrl_pan_display no memory, stop interface");
mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
}
+ mdp3_clk_enable(0, 0);
if (mdp3_session->first_commit) {
/*wait for one frame time to ensure frame is sent to panel*/
@@ -931,6 +951,22 @@
mutex_unlock(&mdp3_session->lock);
}
+static int mdp3_set_metadata(struct msm_fb_data_type *mfd,
+ struct msmfb_metadata *metadata_ptr)
+{
+ int ret = 0;
+ switch (metadata_ptr->op) {
+ case metadata_op_crc:
+ ret = mdp3_misr_set(&metadata_ptr->data.misr_request);
+ break;
+ default:
+ pr_warn("Unsupported request to MDP SET META IOCTL.\n");
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
static int mdp3_get_metadata(struct msm_fb_data_type *mfd,
struct msmfb_metadata *metadata)
{
@@ -946,8 +982,11 @@
metadata->data.caps.vig_pipes = 0;
metadata->data.caps.dma_pipes = 1;
break;
+ case metadata_op_crc:
+ ret = mdp3_misr_get(&metadata->data.misr_request);
+ break;
default:
- pr_warn("Unsupported request to MDP META IOCTL.\n");
+ pr_warn("Unsupported request to MDP GET META IOCTL.\n");
ret = -EINVAL;
break;
}
@@ -1034,10 +1073,11 @@
if (session->histo_status) {
pr_err("mdp3_histogram_start already started\n");
- ret = -EBUSY;
- goto histogram_start_err;
+ mutex_unlock(&session->histo_lock);
+ return -EBUSY;
}
+ mdp3_clk_enable(1, 0);
ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_RESET);
if (ret) {
pr_err("mdp3_histogram_start reset error\n");
@@ -1063,6 +1103,8 @@
session->histo_status = 1;
histogram_start_err:
+ if (ret)
+ mdp3_clk_enable(0, 0);
mutex_unlock(&session->histo_lock);
return ret;
}
@@ -1086,6 +1128,7 @@
}
ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_CANCEL);
+ mdp3_clk_enable(0, 0);
if (ret)
pr_err("mdp3_histogram_stop error\n");
@@ -1199,7 +1242,9 @@
ccs.post_lv = data->csc_data.csc_post_lv;
mutex_lock(&session->lock);
+ mdp3_clk_enable(1, 0);
ret = session->dma->config_ccs(session->dma, &config, &ccs);
+ mdp3_clk_enable(0, 0);
mutex_unlock(&session->lock);
return ret;
}
@@ -1341,8 +1386,10 @@
return -EPERM;
}
+ mdp3_clk_enable(1, 0);
rc = mdp3_session->dma->config_lut(mdp3_session->dma, &lut_config,
&lut);
+ mdp3_clk_enable(0, 0);
if (rc)
pr_err("mdp3_ctrl_lut_update failed\n");
@@ -1400,11 +1447,19 @@
break;
case MSMFB_METADATA_GET:
rc = copy_from_user(&metadata, argp, sizeof(metadata));
- if (rc)
- return rc;
- rc = mdp3_get_metadata(mfd, &metadata);
+ if (!rc)
+ rc = mdp3_get_metadata(mfd, &metadata);
if (!rc)
rc = copy_to_user(argp, &metadata, sizeof(metadata));
+ if (rc)
+ pr_err("mdp3_get_metadata failed (%d)\n", rc);
+ break;
+ case MSMFB_METADATA_SET:
+ rc = copy_from_user(&metadata, argp, sizeof(metadata));
+ if (!rc)
+ rc = mdp3_set_metadata(mfd, &metadata);
+ if (rc)
+ pr_err("mdp3_set_metadata failed (%d)\n", rc);
break;
case MSMFB_OVERLAY_GET:
rc = copy_from_user(req, argp, sizeof(*req));
diff --git a/drivers/video/msm/mdss/mdp3_dma.c b/drivers/video/msm/mdss/mdp3_dma.c
index 89f3e27..3a2c94b 100644
--- a/drivers/video/msm/mdss/mdp3_dma.c
+++ b/drivers/video/msm/mdss/mdp3_dma.c
@@ -828,6 +828,9 @@
MDP3_DMA_CALLBACK_TYPE_DMA_DONE);
mdp3_irq_disable(MDP3_INTR_LCDC_UNDERFLOW);
+ MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
+ MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, 0xfffffff);
+
init_completion(&dma->dma_comp);
dma->vsync_client.handler = NULL;
return ret;
diff --git a/drivers/video/msm/mdss/mdp3_hwio.h b/drivers/video/msm/mdss/mdp3_hwio.h
index 8846ec5..b457c10 100644
--- a/drivers/video/msm/mdss/mdp3_hwio.h
+++ b/drivers/video/msm/mdss/mdp3_hwio.h
@@ -55,6 +55,7 @@
#define MDP3_REG_HW_VERSION 0x0070
#define MDP3_REG_SW_RESET 0x0074
+#define MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS 0x007C
/*EBI*/
#define MDP3_REG_EBI2_LCD0 0x003c
@@ -117,6 +118,46 @@
#define MDP3_REG_DMA_S_IBUF_Y_STRIDE 0xA000C
#define MDP3_REG_DMA_S_OUT_XY 0xA0010
+/*MISR*/
+#define MDP3_REG_MODE_CLK 0x000D0000
+#define MDP3_REG_MISR_RESET_CLK 0x000D0004
+#define MDP3_REG_EXPORT_MISR_CLK 0x000D0008
+#define MDP3_REG_MISR_CURR_VAL_CLK 0x000D000C
+#define MDP3_REG_MODE_HCLK 0x000D0100
+#define MDP3_REG_MISR_RESET_HCLK 0x000D0104
+#define MDP3_REG_EXPORT_MISR_HCLK 0x000D0108
+#define MDP3_REG_MISR_CURR_VAL_HCLK 0x000D010C
+#define MDP3_REG_MODE_DCLK 0x000D0200
+#define MDP3_REG_MISR_RESET_DCLK 0x000D0204
+#define MDP3_REG_EXPORT_MISR_DCLK 0x000D0208
+#define MDP3_REG_MISR_CURR_VAL_DCLK 0x000D020C
+#define MDP3_REG_CAPTURED_DCLK 0x000D0210
+#define MDP3_REG_MISR_CAPT_VAL_DCLK 0x000D0214
+#define MDP3_REG_MODE_TVCLK 0x000D0300
+#define MDP3_REG_MISR_RESET_TVCLK 0x000D0304
+#define MDP3_REG_EXPORT_MISR_TVCLK 0x000D0308
+#define MDP3_REG_MISR_CURR_VAL_TVCLK 0x000D030C
+#define MDP3_REG_CAPTURED_TVCLK 0x000D0310
+#define MDP3_REG_MISR_CAPT_VAL_TVCLK 0x000D0314
+
+/* Select DSI operation type(CMD/VIDEO) */
+#define MDP3_REG_MODE_DSI_PCLK 0x000D0400
+#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_CMD 0x10
+#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_VIDEO1 0x20
+#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_VIDEO2 0x30
+/* RESET DSI MISR STATE */
+#define MDP3_REG_MISR_RESET_DSI_PCLK 0x000D0404
+
+/* For reading MISR State(1) and driving data on test bus(0) */
+#define MDP3_REG_EXPORT_MISR_DSI_PCLK 0x000D0408
+/* Read MISR signature */
+#define MDP3_REG_MISR_CURR_VAL_DSI_PCLK 0x000D040C
+
+/* MISR status Bit0 (1) Capture Done */
+#define MDP3_REG_CAPTURED_DSI_PCLK 0x000D0410
+#define MDP3_REG_MISR_CAPT_VAL_DSI_PCLK 0x000D0414
+#define MDP3_REG_MISR_TESTBUS_CAPT_VAL 0x000D0600
+
/*interface*/
#define MDP3_REG_LCDC_EN 0xE0000
#define MDP3_REG_LCDC_HSYNC_CTL 0xE0004
diff --git a/drivers/video/msm/mdss/mdp3_ppp.c b/drivers/video/msm/mdss/mdp3_ppp.c
index 83787c3..a64a6b4 100644
--- a/drivers/video/msm/mdss/mdp3_ppp.c
+++ b/drivers/video/msm/mdss/mdp3_ppp.c
@@ -372,14 +372,14 @@
ib = (ab * 3) / 2;
}
mdp3_clk_set_rate(MDP3_CLK_CORE, rate, MDP3_CLIENT_PPP);
- rc = mdp3_clk_enable(on_off);
+ rc = mdp3_clk_enable(on_off, 0);
if (rc < 0) {
pr_err("%s: mdp3_clk_enable failed\n", __func__);
return rc;
}
rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP, ab, ib);
if (rc < 0) {
- mdp3_clk_enable(!on_off);
+ mdp3_clk_enable(!on_off, 0);
pr_err("%s: scale_set_quota failed\n", __func__);
return rc;
}
@@ -522,8 +522,8 @@
if (blit_op->dst.color_fmt == MDP_RGBA_8888)
blit_op->dst.color_fmt = MDP_RGBX_8888;
blit_op->solid_fill_color = (req->const_color.g & 0xFF)|
- (req->const_color.b & 0xFF) << 8 |
- (req->const_color.r & 0xFF) << 16 |
+ (req->const_color.r & 0xFF) << 8 |
+ (req->const_color.b & 0xFF) << 16 |
(req->const_color.alpha & 0xFF) << 24;
} else {
blit_op->solid_fill = false;
diff --git a/drivers/video/msm/mdss/mdss_debug.c b/drivers/video/msm/mdss/mdss_debug.c
index f933c8e..0d0240f 100644
--- a/drivers/video/msm/mdss/mdss_debug.c
+++ b/drivers/video/msm/mdss/mdss_debug.c
@@ -23,12 +23,13 @@
#include "mdss.h"
#include "mdss_mdp.h"
+#include "mdss_mdp_hwio.h"
#include "mdss_debug.h"
#define DEFAULT_BASE_REG_CNT 0x100
#define GROUP_BYTES 4
#define ROW_BYTES 16
-
+#define MAX_VSYNC_COUNT 0xFFFFFFF
struct mdss_debug_data {
struct dentry *root;
struct list_head base_list;
@@ -409,28 +410,63 @@
return 0;
}
+int vsync_count;
static struct mdss_mdp_misr_map {
u32 ctrl_reg;
u32 value_reg;
u32 crc_op_mode;
u32 crc_index;
- u32 crc_value[MISR_CRC_BATCH_SIZE];
+ bool use_ping;
+ bool is_ping_full;
+ bool is_pong_full;
+ struct mutex crc_lock;
+ u32 crc_ping[MISR_CRC_BATCH_SIZE];
+ u32 crc_pong[MISR_CRC_BATCH_SIZE];
} mdss_mdp_misr_table[DISPLAY_MISR_MAX] = {
[DISPLAY_MISR_DSI0] = {
.ctrl_reg = MDSS_MDP_LP_MISR_CTRL_DSI0,
.value_reg = MDSS_MDP_LP_MISR_SIGN_DSI0,
+ .crc_op_mode = 0,
+ .crc_index = 0,
+ .use_ping = true,
+ .is_ping_full = false,
+ .is_pong_full = false,
},
[DISPLAY_MISR_DSI1] = {
.ctrl_reg = MDSS_MDP_LP_MISR_CTRL_DSI1,
.value_reg = MDSS_MDP_LP_MISR_SIGN_DSI1,
+ .crc_op_mode = 0,
+ .crc_index = 0,
+ .use_ping = true,
+ .is_ping_full = false,
+ .is_pong_full = false,
},
[DISPLAY_MISR_EDP] = {
.ctrl_reg = MDSS_MDP_LP_MISR_CTRL_EDP,
.value_reg = MDSS_MDP_LP_MISR_SIGN_EDP,
+ .crc_op_mode = 0,
+ .crc_index = 0,
+ .use_ping = true,
+ .is_ping_full = false,
+ .is_pong_full = false,
},
[DISPLAY_MISR_HDMI] = {
.ctrl_reg = MDSS_MDP_LP_MISR_CTRL_HDMI,
.value_reg = MDSS_MDP_LP_MISR_SIGN_HDMI,
+ .crc_op_mode = 0,
+ .crc_index = 0,
+ .use_ping = true,
+ .is_ping_full = false,
+ .is_pong_full = false,
+ },
+ [DISPLAY_MISR_MDP] = {
+ .ctrl_reg = MDSS_MDP_LP_MISR_CTRL_MDP,
+ .value_reg = MDSS_MDP_LP_MISR_SIGN_MDP,
+ .crc_op_mode = 0,
+ .crc_index = 0,
+ .use_ping = true,
+ .is_ping_full = false,
+ .is_pong_full = false,
},
};
@@ -438,7 +474,7 @@
{
struct mdss_mdp_misr_map *map;
- if (block_id > DISPLAY_MISR_LCDC) {
+ if (block_id > DISPLAY_MISR_MDP) {
pr_err("MISR Block id (%d) out of range\n", block_id);
return NULL;
}
@@ -452,23 +488,51 @@
return map;
}
-int mdss_misr_crc_set(struct mdss_data_type *mdata, struct mdp_misr *req)
+int mdss_misr_set(struct mdss_data_type *mdata,
+ struct mdp_misr *req,
+ struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_misr_map *map;
- u32 config = 0;
-
+ struct mdss_mdp_mixer *mixer;
+ u32 config = 0, val = 0;
+ u32 mixer_num = 0;
+ bool is_valid_wb_mixer = true;
map = mdss_misr_get_map(req->block_id);
if (!map) {
pr_err("Invalid MISR Block=%d\n", req->block_id);
return -EINVAL;
}
-
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ if (req->block_id == DISPLAY_MISR_MDP) {
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
+ mixer_num = mixer->num;
+ pr_debug("SET MDP MISR BLK to MDSS_MDP_LP_MISR_SEL_LMIX%d_GC\n",
+ req->block_id);
+ switch (mixer_num) {
+ case MDSS_MDP_INTF_LAYERMIXER0:
+ pr_debug("Use Layer Mixer 0 for WB CRC\n");
+ val = MDSS_MDP_LP_MISR_SEL_LMIX0_GC;
+ break;
+ case MDSS_MDP_INTF_LAYERMIXER1:
+ pr_debug("Use Layer Mixer 1 for WB CRC\n");
+ val = MDSS_MDP_LP_MISR_SEL_LMIX1_GC;
+ break;
+ case MDSS_MDP_INTF_LAYERMIXER2:
+ pr_debug("Use Layer Mixer 2 for WB CRC\n");
+ val = MDSS_MDP_LP_MISR_SEL_LMIX2_GC;
+ break;
+ default:
+ pr_err("Invalid Layer Mixer %d selected for WB CRC\n",
+ mixer_num);
+ is_valid_wb_mixer = false;
+ break;
+ }
+ if (is_valid_wb_mixer)
+ writel_relaxed(val,
+ mdata->mdp_base + MDSS_MDP_LP_MISR_SEL);
+ }
+ vsync_count = 0;
map->crc_op_mode = req->crc_op_mode;
- memset(map->crc_value, 0, sizeof(map->crc_value));
-
- pr_debug("MISR Config (BlockId %d) (Frame Count = %d)\n",
- req->block_id, req->frame_count);
-
config = (MDSS_MDP_LP_MISR_CTRL_FRAME_COUNT_MASK & req->frame_count) |
(MDSS_MDP_LP_MISR_CTRL_ENABLE);
@@ -476,24 +540,32 @@
mdata->mdp_base + map->ctrl_reg);
/* ensure clear is done */
wmb();
- if (MISR_OP_BM == map->crc_op_mode) {
- writel_relaxed(MISR_CRC_BATCH_CFG,
- mdata->mdp_base + map->ctrl_reg);
- } else {
- writel_relaxed(config,
- mdata->mdp_base + map->ctrl_reg);
- config = readl_relaxed(mdata->mdp_base + map->ctrl_reg);
- pr_debug("MISR_CTRL = 0x%x", config);
+ memset(map->crc_ping, 0, sizeof(map->crc_ping));
+ memset(map->crc_pong, 0, sizeof(map->crc_pong));
+ map->crc_index = 0;
+ map->use_ping = true;
+ map->is_ping_full = false;
+ map->is_pong_full = false;
+
+ if (MISR_OP_BM != map->crc_op_mode) {
+
+ writel_relaxed(config,
+ mdata->mdp_base + map->ctrl_reg);
+ pr_debug("MISR_CTRL = 0x%x",
+ readl_relaxed(mdata->mdp_base + map->ctrl_reg));
}
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return 0;
}
-int mdss_misr_crc_get(struct mdss_data_type *mdata, struct mdp_misr *resp)
+int mdss_misr_get(struct mdss_data_type *mdata,
+ struct mdp_misr *resp,
+ struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_misr_map *map;
u32 status;
- int ret = 0;
+ int ret = -1;
int i;
map = mdss_misr_get_map(resp->block_id);
@@ -501,35 +573,60 @@
pr_err("Invalid MISR Block=%d\n", resp->block_id);
return -EINVAL;
}
-
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
switch (map->crc_op_mode) {
case MISR_OP_SFM:
case MISR_OP_MFM:
ret = readl_poll_timeout(mdata->mdp_base + map->ctrl_reg,
status, status & MDSS_MDP_LP_MISR_CTRL_STATUS,
MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
-
- pr_debug("Status of Get MISR_CTRL = 0x%x", status);
if (ret == 0) {
- resp->crc_value[0] =
- readl_relaxed(mdata->mdp_base + map->value_reg);
+ resp->crc_value[0] = readl_relaxed(mdata->mdp_base +
+ map->value_reg);
pr_debug("CRC %d=0x%x\n", resp->block_id,
- resp->crc_value[0]);
+ resp->crc_value[0]);
+ writel_relaxed(0, mdata->mdp_base + map->ctrl_reg);
} else {
- pr_warn("MISR %d busy with status 0x%x\n",
- resp->block_id, status);
+ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
+ ret = readl_poll_timeout(mdata->mdp_base +
+ map->ctrl_reg, status,
+ status & MDSS_MDP_LP_MISR_CTRL_STATUS,
+ MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+ if (ret == 0) {
+ resp->crc_value[0] =
+ readl_relaxed(mdata->mdp_base +
+ map->value_reg);
+ }
+ writel_relaxed(0, mdata->mdp_base + map->ctrl_reg);
}
break;
case MISR_OP_BM:
- for (i = 0; i < MISR_CRC_BATCH_SIZE; i++)
- resp->crc_value[i] = map->crc_value[i];
- map->crc_index = 0;
+ if (map->is_ping_full) {
+ for (i = 0; i < MISR_CRC_BATCH_SIZE; i++)
+ resp->crc_value[i] = map->crc_ping[i];
+ memset(map->crc_ping, 0, sizeof(map->crc_ping));
+ map->is_ping_full = false;
+ ret = 0;
+ } else if (map->is_pong_full) {
+ for (i = 0; i < MISR_CRC_BATCH_SIZE; i++)
+ resp->crc_value[i] = map->crc_pong[i];
+ memset(map->crc_pong, 0, sizeof(map->crc_pong));
+ map->is_pong_full = false;
+ ret = 0;
+ } else {
+ pr_debug("mdss_mdp_misr_crc_get PING BUF %s\n",
+ map->is_ping_full ? "FULL" : "EMPTRY");
+ pr_debug("mdss_mdp_misr_crc_get PONG BUF %s\n",
+ map->is_pong_full ? "FULL" : "EMPTRY");
+ }
+ resp->crc_op_mode = map->crc_op_mode;
break;
default:
ret = -ENOSYS;
break;
}
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return ret;
}
@@ -537,22 +634,71 @@
void mdss_misr_crc_collect(struct mdss_data_type *mdata, int block_id)
{
struct mdss_mdp_misr_map *map;
- u32 status, config;
+ u32 status = 0;
+ u32 crc = 0x0BAD0BAD;
+ bool crc_stored = false;
map = mdss_misr_get_map(block_id);
if (!map || (map->crc_op_mode != MISR_OP_BM))
return;
- config = MISR_CRC_BATCH_CFG;
-
status = readl_relaxed(mdata->mdp_base + map->ctrl_reg);
- if (status & MDSS_MDP_LP_MISR_CTRL_STATUS) {
- map->crc_value[map->crc_index] =
- readl_relaxed(mdata->mdp_base + map->value_reg);
- map->crc_index++;
- if (map->crc_index == MISR_CRC_BATCH_SIZE)
- map->crc_index = 0;
- config |= MDSS_MDP_LP_MISR_CTRL_STATUS_CLEAR;
+ if (MDSS_MDP_LP_MISR_CTRL_STATUS & status) {
+ crc = readl_relaxed(mdata->mdp_base + map->value_reg);
+ if (map->use_ping) {
+ if (map->is_ping_full) {
+ pr_err("PING Buffer FULL\n");
+ } else {
+ map->crc_ping[map->crc_index] = crc;
+ crc_stored = true;
+ }
+ } else {
+ if (map->is_pong_full) {
+ pr_err("PONG Buffer FULL\n");
+ } else {
+ map->crc_pong[map->crc_index] = crc;
+ crc_stored = true;
+ }
+ }
+
+ if (crc_stored) {
+ map->crc_index = (map->crc_index + 1);
+ if (map->crc_index == MISR_CRC_BATCH_SIZE) {
+ map->crc_index = 0;
+ if (true == map->use_ping) {
+ map->is_ping_full = true;
+ map->use_ping = false;
+ } else {
+ map->is_pong_full = true;
+ map->use_ping = true;
+ }
+ pr_debug("USE BUFF %s\n", map->use_ping ?
+ "PING" : "PONG");
+ pr_debug("mdss_misr_crc_collect PING BUF %s\n",
+ map->is_ping_full ? "FULL" : "EMPTRY");
+ pr_debug("mdss_misr_crc_collect PONG BUF %s\n",
+ map->is_pong_full ? "FULL" : "EMPTRY");
+ }
+ } else {
+ pr_err("CRC(%d) Not saved\n", crc);
+ }
+
+ writel_relaxed(MDSS_MDP_LP_MISR_CTRL_STATUS_CLEAR,
+ mdata->mdp_base + map->ctrl_reg);
+ writel_relaxed(MISR_CRC_BATCH_CFG,
+ mdata->mdp_base + map->ctrl_reg);
+ } else if (0 == status) {
+ writel_relaxed(MISR_CRC_BATCH_CFG,
+ mdata->mdp_base + map->ctrl_reg);
+ pr_debug("$$ Batch CRC Start $$\n");
}
- writel_relaxed(config, mdata->mdp_base + map->ctrl_reg);
+ pr_debug("$$ Vsync Count = %d, CRC=0x%x Indx = %d$$\n",
+ vsync_count, crc, map->crc_index);
+
+ if (MAX_VSYNC_COUNT == vsync_count) {
+ pr_err("RESET vsync_count(%d)\n", vsync_count);
+ vsync_count = 0;
+ } else {
+ vsync_count += 1;
+ }
}
diff --git a/drivers/video/msm/mdss/mdss_debug.h b/drivers/video/msm/mdss/mdss_debug.h
index 29eb16c..984caab 100644
--- a/drivers/video/msm/mdss/mdss_debug.h
+++ b/drivers/video/msm/mdss/mdss_debug.h
@@ -16,30 +16,35 @@
#include "mdss.h"
-#define MISR_POLL_SLEEP 2000
-#define MISR_POLL_TIMEOUT 32000
-#define MISR_CRC_BATCH_SIZE 32
-#define MISR_CRC_BATCH_CFG 0x101
+#define MISR_POLL_SLEEP 2000
+#define MISR_POLL_TIMEOUT 32000
+#define MISR_CRC_BATCH_CFG 0x101
#ifdef CONFIG_DEBUG_FS
int mdss_debugfs_init(struct mdss_data_type *mdata);
int mdss_debugfs_remove(struct mdss_data_type *mdata);
int mdss_debug_register_base(const char *name, void __iomem *base,
size_t max_offset);
-int mdss_misr_crc_set(struct mdss_data_type *mdata, struct mdp_misr *req);
-int mdss_misr_crc_get(struct mdss_data_type *mdata, struct mdp_misr *resp);
+int mdss_misr_set(struct mdss_data_type *mdata, struct mdp_misr *req,
+ struct mdss_mdp_ctl *ctl);
+int mdss_misr_get(struct mdss_data_type *mdata, struct mdp_misr *resp,
+ struct mdss_mdp_ctl *ctl);
void mdss_misr_crc_collect(struct mdss_data_type *mdata, int block_id);
#else
static inline int mdss_debugfs_init(struct mdss_data_type *mdata) { return 0; }
static inline int mdss_debugfs_remove(struct mdss_data_type *mdata)
{ return 0; }
static inline int mdss_debug_register_base(const char *name, void __iomem *base,
- size_t max_offset) { return 0; }
-static inline int mdss_misr_crc_set(struct mdss_data_type *mdata,
- struct mdp_misr *reg) { return 0; }
-static inline int mdss_misr_crc_get(struct mdss_data_type *mdata,
- struct mdp_misr *resp) { return 0; }
+ size_t max_offset) { return 0; }
+static inline int mdss_misr_set(struct mdss_data_type *mdata,
+ struct mdp_misr *req,
+ struct mdss_mdp_ctl *ctl)
+{ return 0; }
+static inline int mdss_misr_get(struct mdss_data_type *mdata,
+ struct mdp_misr *resp,
+ struct mdss_mdp_ctl *ctl)
+{ return 0; }
static inline void mdss_misr_crc_collect(struct mdss_data_type *mdata,
- int block_id) { }
+ int block_id) { }
#endif
#endif /* MDSS_DEBUG_H */
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index bb1f8ae..d33aefa 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -377,19 +377,24 @@
pinfo = &pdata->panel_info;
- ret = mdss_dsi_panel_power_on(pdata, 1);
+ ret = msm_dss_enable_vreg(ctrl_pdata->power_data.vreg_config,
+ ctrl_pdata->power_data.num_vreg, 1);
if (ret) {
- pr_err("%s: Panel power on failed\n", __func__);
+ pr_err("%s:Failed to enable vregs. rc=%d\n", __func__, ret);
return ret;
}
pdata->panel_info.panel_power_on = 1;
+ if (!pdata->panel_info.mipi.lp11_init)
+ mdss_dsi_panel_reset(pdata, 1);
+
ret = mdss_dsi_enable_bus_clocks(ctrl_pdata);
if (ret) {
pr_err("%s: failed to enable bus clocks. rc=%d\n", __func__,
ret);
mdss_dsi_panel_power_on(pdata, 0);
+ pdata->panel_info.panel_power_on = 0;
return ret;
}
@@ -470,6 +475,16 @@
mdss_dsi_sw_reset(pdata);
mdss_dsi_host_init(mipi, pdata);
+ /*
+ * Issue hardware reset line after enabling the DSI clocks and data
+ * data lanes for LP11 init
+ */
+ if (pdata->panel_info.mipi.lp11_init)
+ mdss_dsi_panel_reset(pdata, 1);
+
+ if (pdata->panel_info.mipi.init_delay)
+ usleep(pdata->panel_info.mipi.init_delay);
+
if (mipi->force_clk_lane_hs) {
u32 tmp;
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
index 7202c62..947923a 100644
--- a/drivers/video/msm/mdss/mdss_dsi.h
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -89,6 +89,17 @@
DSI_HS_MODE,
};
+enum dsi_lane_map_type {
+ DSI_LANE_MAP_0123,
+ DSI_LANE_MAP_3012,
+ DSI_LANE_MAP_2301,
+ DSI_LANE_MAP_1230,
+ DSI_LANE_MAP_0321,
+ DSI_LANE_MAP_1032,
+ DSI_LANE_MAP_2103,
+ DSI_LANE_MAP_3210,
+};
+
#define CTRL_STATE_UNKNOWN 0x00
#define CTRL_STATE_PANEL_INIT BIT(0)
#define CTRL_STATE_MDP_ACTIVE BIT(1)
diff --git a/drivers/video/msm/mdss/mdss_dsi_panel.c b/drivers/video/msm/mdss/mdss_dsi_panel.c
index 890066e..59e52b9 100644
--- a/drivers/video/msm/mdss/mdss_dsi_panel.c
+++ b/drivers/video/msm/mdss/mdss_dsi_panel.c
@@ -366,6 +366,49 @@
return 0;
}
+static void mdss_dsi_parse_lane_swap(struct device_node *np, char *dlane_swap)
+{
+ const char *data;
+
+ *dlane_swap = DSI_LANE_MAP_0123;
+ data = of_get_property(np, "qcom,mdss-dsi-lane-map", NULL);
+ if (data) {
+ if (!strcmp(data, "lane_map_3012"))
+ *dlane_swap = DSI_LANE_MAP_3012;
+ else if (!strcmp(data, "lane_map_2301"))
+ *dlane_swap = DSI_LANE_MAP_2301;
+ else if (!strcmp(data, "lane_map_1230"))
+ *dlane_swap = DSI_LANE_MAP_1230;
+ else if (!strcmp(data, "lane_map_0321"))
+ *dlane_swap = DSI_LANE_MAP_0321;
+ else if (!strcmp(data, "lane_map_1032"))
+ *dlane_swap = DSI_LANE_MAP_1032;
+ else if (!strcmp(data, "lane_map_2103"))
+ *dlane_swap = DSI_LANE_MAP_2103;
+ else if (!strcmp(data, "lane_map_3210"))
+ *dlane_swap = DSI_LANE_MAP_3210;
+ }
+}
+
+static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger,
+ char *trigger_key)
+{
+ const char *data;
+
+ *trigger = DSI_CMD_TRIGGER_SW;
+ data = of_get_property(np, trigger_key, NULL);
+ if (data) {
+ if (!strcmp(data, "none"))
+ *trigger = DSI_CMD_TRIGGER_NONE;
+ else if (!strcmp(data, "trigger_te"))
+ *trigger = DSI_CMD_TRIGGER_TE;
+ else if (!strcmp(data, "trigger_sw_seof"))
+ *trigger = DSI_CMD_TRIGGER_SW_SEOF;
+ else if (!strcmp(data, "trigger_sw_te"))
+ *trigger = DSI_CMD_TRIGGER_SW_TE;
+ }
+}
+
static int mdss_dsi_parse_dcs_cmds(struct device_node *np,
struct dsi_panel_cmds *pcmds, char *cmd_key, char *link_key)
@@ -652,8 +695,10 @@
data = of_get_property(np, "qcom,mdss-dsi-panel-type", NULL);
if (data && !strncmp(data, "dsi_cmd_mode", 12))
pinfo->mipi.mode = DSI_CMD_MODE;
- rc = of_property_read_u32(np, "qcom,mdss-dsi-pixel-packing", &tmp);
- tmp = (!rc ? tmp : 0);
+ tmp = 0;
+ data = of_get_property(np, "qcom,mdss-dsi-pixel-packing", NULL);
+ if (data && !strcmp(data, "loose"))
+ tmp = 1;
rc = mdss_panel_dt_get_dst_fmt(pinfo->bpp,
pinfo->mipi.mode, tmp,
&(pinfo->mipi.dst_format));
@@ -761,10 +806,14 @@
"qcom,mdss-dsi-bllp-power-mode");
pinfo->mipi.eof_bllp_power_stop = of_property_read_bool(
np, "qcom,mdss-dsi-bllp-eof-power-mode");
- rc = of_property_read_u32(np,
- "qcom,mdss-dsi-traffic-mode", &tmp);
- pinfo->mipi.traffic_mode =
- (!rc ? tmp : DSI_NON_BURST_SYNCH_PULSE);
+ pinfo->mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE;
+ data = of_get_property(np, "qcom,mdss-dsi-traffic-mode", NULL);
+ if (data) {
+ if (!strcmp(data, "non_burst_sync_event"))
+ pinfo->mipi.traffic_mode = DSI_NON_BURST_SYNCH_EVENT;
+ else if (!strcmp(data, "burst_mode"))
+ pinfo->mipi.traffic_mode = DSI_BURST_MODE;
+ }
rc = of_property_read_u32(np,
"qcom,mdss-dsi-te-dcs-command", &tmp);
pinfo->mipi.insert_dcs_cmd =
@@ -783,8 +832,20 @@
(!rc ? tmp : 1);
rc = of_property_read_u32(np, "qcom,mdss-dsi-virtual-channel-id", &tmp);
pinfo->mipi.vc = (!rc ? tmp : 0);
- rc = of_property_read_u32(np, "qcom,mdss-dsi-color-order", &tmp);
- pinfo->mipi.rgb_swap = (!rc ? tmp : DSI_RGB_SWAP_RGB);
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_RGB;
+ data = of_get_property(np, "mdss-dsi-color-order", NULL);
+ if (data) {
+ if (!strcmp(data, "rgb_swap_rbg"))
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_RBG;
+ else if (!strcmp(data, "rgb_swap_bgr"))
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_BGR;
+ else if (!strcmp(data, "rgb_swap_brg"))
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_BRG;
+ else if (!strcmp(data, "rgb_swap_grb"))
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_GRB;
+ else if (!strcmp(data, "rgb_swap_gbr"))
+ pinfo->mipi.rgb_swap = DSI_RGB_SWAP_GBR;
+ }
pinfo->mipi.data_lane0 = of_property_read_bool(np,
"qcom,mdss-dsi-lane-0-state");
pinfo->mipi.data_lane1 = of_property_read_bool(np,
@@ -794,9 +855,6 @@
pinfo->mipi.data_lane3 = of_property_read_bool(np,
"qcom,mdss-dsi-lane-3-state");
- rc = of_property_read_u32(np, "qcom,mdss-dsi-lane-map", &tmp);
- pinfo->mipi.dlane_swap = (!rc ? tmp : 0);
-
rc = of_property_read_u32(np, "qcom,mdss-dsi-t-clk-pre", &tmp);
pinfo->mipi.t_clk_pre = (!rc ? tmp : 0x24);
rc = of_property_read_u32(np, "qcom,mdss-dsi-t-clk-post", &tmp);
@@ -805,26 +863,7 @@
rc = of_property_read_u32(np, "qcom,mdss-dsi-stream", &tmp);
pinfo->mipi.stream = (!rc ? tmp : 0);
- rc = of_property_read_u32(np, "qcom,mdss-dsi-mdp-trigger", &tmp);
- pinfo->mipi.mdp_trigger =
- (!rc ? tmp : DSI_CMD_TRIGGER_SW);
- if (pinfo->mipi.mdp_trigger > 6) {
- pr_err("%s:%d, Invalid mdp trigger. Forcing to sw trigger",
- __func__, __LINE__);
- pinfo->mipi.mdp_trigger =
- DSI_CMD_TRIGGER_SW;
- }
-
- rc = of_property_read_u32(np, "qcom,mdss-dsi-dma-trigger", &tmp);
- pinfo->mipi.dma_trigger =
- (!rc ? tmp : DSI_CMD_TRIGGER_SW);
- if (pinfo->mipi.dma_trigger > 6) {
- pr_err("%s:%d, Invalid dma trigger. Forcing to sw trigger",
- __func__, __LINE__);
- pinfo->mipi.dma_trigger =
- DSI_CMD_TRIGGER_SW;
- }
- data = of_get_property(np, "qcom,mdss-dsi-panel-mode-gpio-state", &tmp);
+ data = of_get_property(np, "qcom,mdss-dsi-panel-mode-gpio-state", NULL);
if (data) {
if (!strcmp(data, "high"))
pinfo->mode_gpio_state = MODE_GPIO_HIGH;
@@ -847,8 +886,21 @@
for (i = 0; i < len; i++)
pinfo->mipi.dsi_phy_db.timing[i] = data[i];
+ pinfo->mipi.lp11_init = of_property_read_bool(np,
+ "qcom,mdss-dsi-lp11-init");
+ rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp);
+ pinfo->mipi.init_delay = (!rc ? tmp : 0);
+
mdss_dsi_parse_fbc_params(np, pinfo);
+ mdss_dsi_parse_trigger(np, &(pinfo->mipi.mdp_trigger),
+ "qcom,mdss-dsi-mdp-trigger");
+
+ mdss_dsi_parse_trigger(np, &(pinfo->mipi.dma_trigger),
+ "qcom,mdss-dsi-dma-trigger");
+
+ mdss_dsi_parse_lane_swap(np, &(pinfo->mipi.dlane_swap));
+
mdss_dsi_parse_reset_seq(np, pinfo->rst_seq, &(pinfo->rst_seq_len),
"qcom,mdss-dsi-reset-sequence");
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 81473db..5408bc3 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -92,7 +92,7 @@
unsigned long val, void *data);
static int __mdss_fb_display_thread(void *data);
-static void mdss_fb_pan_idle(struct msm_fb_data_type *mfd);
+static int mdss_fb_pan_idle(struct msm_fb_data_type *mfd);
static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
int event, void *arg);
void mdss_fb_no_update_notify_timer_cb(unsigned long data)
@@ -297,6 +297,7 @@
{
struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+ mfd->shutdown_pending = true;
lock_fb_info(mfd->fbi);
mdss_fb_release_all(mfd->fbi, true);
unlock_fb_info(mfd->fbi);
@@ -779,13 +780,18 @@
u32 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ int ret = 0;
if (!start) {
pr_warn("No framebuffer memory is allocated.\n");
return -ENOMEM;
}
- mdss_fb_pan_idle(mfd);
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret) {
+ pr_err("Shutdown pending. Aborting operation\n");
+ return ret;
+ }
/* Set VM flags. */
start &= PAGE_MASK;
@@ -1120,8 +1126,6 @@
mfd->index, fbi->var.xres, fbi->var.yres,
fbi->fix.smem_len);
- kthread_run(__mdss_fb_display_thread, mfd, "mdss_fb%d", mfd->index);
-
return 0;
}
@@ -1132,6 +1136,11 @@
int result;
int pid = current->tgid;
+ if (mfd->shutdown_pending) {
+ pr_err("Shutdown pending. Aborting operation\n");
+ return -EPERM;
+ }
+
list_for_each_entry(pinfo, &mfd->proc_list, list) {
if (pinfo->pid == pid)
break;
@@ -1151,23 +1160,47 @@
result = pm_runtime_get_sync(info->dev);
- if (result < 0)
+ if (result < 0) {
pr_err("pm_runtime: fail to wake up\n");
+ goto pm_error;
+ }
if (!mfd->ref_cnt) {
+ mfd->disp_thread = kthread_run(__mdss_fb_display_thread, mfd,
+ "mdss_fb%d", mfd->index);
+ if (IS_ERR(mfd->disp_thread)) {
+ pr_err("unable to start display thread %d\n",
+ mfd->index);
+ result = PTR_ERR(mfd->disp_thread);
+ goto thread_error;
+ }
+
result = mdss_fb_blank_sub(FB_BLANK_UNBLANK, info,
mfd->op_enable);
if (result) {
- pm_runtime_put(info->dev);
pr_err("can't turn on fb%d! rc=%d\n", mfd->index,
result);
- return result;
+ goto blank_error;
}
}
pinfo->ref_cnt++;
mfd->ref_cnt++;
+
return 0;
+
+blank_error:
+ kthread_stop(mfd->disp_thread);
+
+thread_error:
+ if (pinfo && !pinfo->ref_cnt) {
+ list_del(&pinfo->list);
+ kfree(pinfo);
+ }
+ pm_runtime_put(info->dev);
+
+pm_error:
+ return result;
}
static int mdss_fb_release_all(struct fb_info *info, bool release_all)
@@ -1176,9 +1209,12 @@
struct mdss_fb_proc_info *pinfo = NULL, *temp_pinfo = NULL;
int ret = 0;
int pid = current->tgid;
+ bool unknown_pid = true, release_needed = false;
+ struct task_struct *task = current->group_leader;
if (!mfd->ref_cnt) {
- pr_info("try to close unopened fb %d!\n", mfd->index);
+ pr_info("try to close unopened fb %d! from %s\n", mfd->index,
+ task->comm);
return -EINVAL;
}
@@ -1190,12 +1226,15 @@
if (!release_all && (pinfo->pid != pid))
continue;
- pr_debug("found process entry pid=%d ref=%d\n", pinfo->pid,
- pinfo->ref_cnt);
+ unknown_pid = false;
+
+ pr_debug("found process %s pid=%d mfd->ref=%d pinfo->ref=%d\n",
+ task->comm, mfd->ref_cnt, pinfo->pid, pinfo->ref_cnt);
do {
if (mfd->ref_cnt < pinfo->ref_cnt)
- pr_warn("WARN:mfd->ref_cnt < pinfo->ref_cnt\n");
+ pr_warn("WARN:mfd->ref=%d < pinfo->ref=%d\n",
+ mfd->ref_cnt, pinfo->ref_cnt);
else
mfd->ref_cnt--;
@@ -1203,24 +1242,52 @@
pm_runtime_put(info->dev);
} while (release_all && pinfo->ref_cnt);
+ if (release_all)
+ kthread_stop(mfd->disp_thread);
+
if (pinfo->ref_cnt == 0) {
- if (mfd->mdp.release_fnc) {
- ret = mfd->mdp.release_fnc(mfd);
- if (ret)
- pr_err("error releasing fb%d pid=%d\n",
- mfd->index, pinfo->pid);
- }
list_del(&pinfo->list);
kfree(pinfo);
+ release_needed = !release_all;
+ }
+
+ if (!release_all)
+ break;
+ }
+
+ if (release_needed) {
+ pr_debug("known process %s pid=%d mfd->ref=%d\n",
+ task->comm, pid, mfd->ref_cnt);
+
+ if (mfd->mdp.release_fnc) {
+ ret = mfd->mdp.release_fnc(mfd, false);
+ if (ret)
+ pr_err("error releasing fb%d pid=%d\n",
+ mfd->index, pid);
+ }
+ } else if (unknown_pid || release_all) {
+ pr_warn("unknown process %s pid=%d mfd->ref=%d\n",
+ task->comm, pid, mfd->ref_cnt);
+
+ if (mfd->ref_cnt)
+ mfd->ref_cnt--;
+
+ if (mfd->mdp.release_fnc) {
+ ret = mfd->mdp.release_fnc(mfd, true);
+ if (ret)
+ pr_err("error fb%d release process %s pid=%d\n",
+ mfd->index, task->comm, pid);
}
}
if (!mfd->ref_cnt) {
+ kthread_stop(mfd->disp_thread);
+
ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info,
mfd->op_enable);
if (ret) {
- pr_err("can't turn off fb%d! rc=%d\n",
- mfd->index, ret);
+ pr_err("can't turn off fb%d! rc=%d process %s pid=%d\n",
+ mfd->index, ret, task->comm, pid);
return ret;
}
}
@@ -1392,21 +1459,28 @@
* hardware configuration. After this function returns it is safe to perform
* software updates for next frame.
*/
-static void mdss_fb_pan_idle(struct msm_fb_data_type *mfd)
+static int mdss_fb_pan_idle(struct msm_fb_data_type *mfd)
{
- int ret;
+ int ret = 0;
ret = wait_event_timeout(mfd->idle_wait_q,
- !atomic_read(&mfd->commits_pending),
+ (!atomic_read(&mfd->commits_pending) ||
+ mfd->shutdown_pending),
msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
if (!ret) {
pr_err("wait for idle timeout %d pending=%d\n",
ret, atomic_read(&mfd->commits_pending));
mdss_fb_signal_timeline(&mfd->mdp_sync_pt_data);
+ } else if (mfd->shutdown_pending) {
+ pr_debug("Shutdown signalled\n");
+ return -EPERM;
}
+
+ return 0;
}
+
static int mdss_fb_pan_display_ex(struct fb_info *info,
struct mdp_display_commit *disp_commit)
{
@@ -1424,7 +1498,11 @@
if (var->yoffset > (info->var.yres_virtual - info->var.yres))
return -EINVAL;
- mdss_fb_pan_idle(mfd);
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret) {
+ pr_err("Shutdown pending. Aborting operation\n");
+ return ret;
+ }
mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
if (info->fix.xpanstep)
@@ -1556,14 +1634,20 @@
while (1) {
wait_event(mfd->commit_wait_q,
- atomic_read(&mfd->commits_pending));
+ (atomic_read(&mfd->commits_pending) ||
+ kthread_should_stop()));
+
+ if (kthread_should_stop())
+ break;
ret = __mdss_fb_perform_commit(mfd);
-
atomic_dec(&mfd->commits_pending);
wake_up_all(&mfd->idle_wait_q);
}
+ atomic_set(&mfd->commits_pending, 0);
+ wake_up_all(&mfd->idle_wait_q);
+
return ret;
}
@@ -1686,8 +1770,14 @@
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct fb_var_screeninfo *var = &info->var;
int old_imgType;
+ int ret = 0;
- mdss_fb_pan_idle(mfd);
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret) {
+ pr_err("Shutdown pending. Aborting operation\n");
+ return ret;
+ }
+
old_imgType = mfd->fb_imgType;
switch (var->bits_per_pixel) {
case 16:
@@ -1734,7 +1824,7 @@
mfd->panel_reconfig = false;
}
- return 0;
+ return ret;
}
int mdss_fb_dcm(struct msm_fb_data_type *mfd, int req_state)
@@ -1975,8 +2065,15 @@
mfd = (struct msm_fb_data_type *)info->par;
mdss_fb_power_setting_idle(mfd);
if ((cmd != MSMFB_VSYNC_CTRL) && (cmd != MSMFB_OVERLAY_VSYNC_CTRL) &&
- (cmd != MSMFB_ASYNC_BLIT) && (cmd != MSMFB_BLIT))
- mdss_fb_pan_idle(mfd);
+ (cmd != MSMFB_ASYNC_BLIT) && (cmd != MSMFB_BLIT) &&
+ (cmd != MSMFB_NOTIFY_UPDATE)) {
+ ret = mdss_fb_pan_idle(mfd);
+ if (ret) {
+ pr_debug("Shutdown pending. Aborting operation %x\n",
+ cmd);
+ return ret;
+ }
+ }
switch (cmd) {
case MSMFB_CURSOR:
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 8213dbe..e245dd3 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -106,7 +106,7 @@
int (*on_fnc)(struct msm_fb_data_type *mfd);
int (*off_fnc)(struct msm_fb_data_type *mfd);
/* called to release resources associated to the process */
- int (*release_fnc)(struct msm_fb_data_type *mfd);
+ int (*release_fnc)(struct msm_fb_data_type *mfd, bool release_all);
int (*kickoff_fnc)(struct msm_fb_data_type *mfd,
struct mdp_display_commit *data);
int (*ioctl_handler)(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
@@ -195,9 +195,11 @@
struct msm_sync_pt_data mdp_sync_pt_data;
/* for non-blocking */
+ struct task_struct *disp_thread;
atomic_t commits_pending;
wait_queue_head_t commit_wait_q;
wait_queue_head_t idle_wait_q;
+ bool shutdown_pending;
struct msm_fb_backup_type msm_fb_backup;
struct completion power_set_comp;
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index ee4db4f..11dd134 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -1060,15 +1060,16 @@
#define SPRINT(fmt, ...) \
(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
- SPRINT("mdp_version=5 hw_rev=%d\n", mdata->mdp_rev);
+ SPRINT("mdp_version=5\n");
+ SPRINT("hw_rev=%d\n", mdata->mdp_rev);
SPRINT("rgb_pipes=%d\n", mdata->nrgb_pipes);
SPRINT("vig_pipes=%d\n", mdata->nvig_pipes);
SPRINT("dma_pipes=%d\n", mdata->ndma_pipes);
SPRINT("smp_count=%d\n", mdata->smp_mb_cnt);
SPRINT("smp_size=%d\n", mdata->smp_mb_size);
- SPRINT("max downscale ratio=%d\n", MAX_DOWNSCALE_RATIO);
- SPRINT("max upscale ratio=%d\n", MAX_UPSCALE_RATIO);
- SPRINT("features:");
+ SPRINT("max_downscale_ratio=%d\n", MAX_DOWNSCALE_RATIO);
+ SPRINT("max_upscale_ratio=%d\n", MAX_UPSCALE_RATIO);
+ SPRINT("features=");
if (mdata->has_bwc)
SPRINT(" bwc");
if (mdata->has_decimation)
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 161eead..41a5d56 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -361,6 +361,8 @@
u8 blend_op;
u8 overfetch_disable;
u32 transp;
+ u32 bg_color;
+ u8 has_buf;
struct msm_fb_data_type *mfd;
struct mdss_mdp_mixer *mixer;
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index aa7c4dd..19ed857 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -1782,6 +1782,11 @@
{
int ret;
+ if (!ctl) {
+ pr_err("invalid ctl\n");
+ return -ENODEV;
+ }
+
ret = mutex_lock_interruptible(&ctl->lock);
if (ret)
return ret;
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 17f8338..82b8b55 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -504,6 +504,7 @@
} else {
pipe->overfetch_disable = 0;
}
+ pipe->bg_color = req->bg_color;
req->id = pipe->ndx;
pipe->req_data = *req;
@@ -596,6 +597,7 @@
}
pipe->params_changed++;
+ pipe->has_buf = 0;
req->vert_deci = pipe->vert_deci;
@@ -1027,8 +1029,9 @@
} else if (pipe->front_buf.num_planes) {
buf = &pipe->front_buf;
} else {
- pr_warn("pipe queue w/o buffer\n");
- continue;
+ pr_debug("no buf detected pnum=%d use solid fill\n",
+ pipe->num);
+ buf = NULL;
}
ret = mdss_mdp_pipe_queue_data(pipe, buf);
@@ -1153,11 +1156,13 @@
/**
* mdss_mdp_overlay_release_all() - release any overlays associated with fb dev
* @mfd: Msm frame buffer structure associated with fb device
+ * @release_all: ignore pid and release all the pipes
*
* Release any resources allocated by calling process, this can be called
* on fb_release to release any overlays/rotator sessions left open.
*/
-static int __mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd)
+static int __mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd,
+ bool release_all)
{
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_rotator_session *rot, *tmp;
@@ -1171,7 +1176,7 @@
mutex_lock(&mdp5_data->ov_lock);
mutex_lock(&mfd->lock);
list_for_each_entry(pipe, &mdp5_data->pipes_used, used_list) {
- if (!mfd->ref_cnt || (pipe->pid == pid)) {
+ if (release_all || (pipe->pid == pid)) {
unset_ndx |= pipe->ndx;
cnt++;
}
@@ -1183,6 +1188,9 @@
cnt++;
}
+ pr_debug("release_all=%d mfd->ref_cnt=%d unset_ndx=0x%x cnt=%d\n",
+ release_all, mfd->ref_cnt, unset_ndx, cnt);
+
mutex_unlock(&mfd->lock);
if (unset_ndx) {
@@ -1251,6 +1259,7 @@
if (IS_ERR_VALUE(ret)) {
pr_err("src_data pmem error\n");
}
+ pipe->has_buf = 1;
mdss_mdp_pipe_unmap(pipe);
return ret;
@@ -1493,6 +1502,7 @@
buf->p[0].addr += offset;
buf->p[0].len = fbi->fix.smem_len - offset;
buf->num_planes = 1;
+ pipe->has_buf = 1;
mdss_mdp_pipe_unmap(pipe);
if (fbi->var.xres > MAX_MIXER_WIDTH || mfd->split_display) {
@@ -1507,6 +1517,7 @@
goto pan_display_error;
}
pipe->back_buf = *buf;
+ pipe->has_buf = 1;
mdss_mdp_pipe_unmap(pipe);
}
mutex_unlock(&mdp5_data->ov_lock);
@@ -2097,7 +2108,10 @@
struct msmfb_metadata *metadata)
{
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
int ret = 0;
+ if (!ctl)
+ return -EPERM;
switch (metadata->op) {
case metadata_op_vic:
if (mfd->panel_info)
@@ -2109,7 +2123,7 @@
case metadata_op_crc:
if (!mfd->panel_power_on)
return -EPERM;
- ret = mdss_misr_crc_set(mdata, &metadata->data.misr_request);
+ ret = mdss_misr_set(mdata, &metadata->data.misr_request, ctl);
break;
case metadata_op_wb_format:
ret = mdss_mdp_wb_set_format(mfd,
@@ -2145,7 +2159,10 @@
struct msmfb_metadata *metadata)
{
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
int ret = 0;
+ if (!ctl)
+ return -EPERM;
switch (metadata->op) {
case metadata_op_frame_rate:
metadata->data.panel_frame_rate =
@@ -2157,7 +2174,7 @@
case metadata_op_crc:
if (!mfd->panel_power_on)
return -EPERM;
- ret = mdss_misr_crc_get(mdata, &metadata->data.misr_request);
+ ret = mdss_misr_get(mdata, &metadata->data.misr_request, ctl);
break;
case metadata_op_wb_format:
ret = mdss_mdp_wb_get_format(mfd, &metadata->data.mixer_cfg);
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 25cb9dd..78e7d29 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -924,6 +924,8 @@
secure = (pipe->flags & MDP_SECURE_OVERLAY_SESSION ? 0xF : 0x0);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT, format);
+ mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR,
+ pipe->bg_color);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
return 0;
@@ -962,7 +964,8 @@
(pipe->mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK)
&& (ctl->mdata->mixer_switched)) ||
ctl->roi_changed;
- if (src_data == NULL) {
+ if (src_data == NULL || !pipe->has_buf) {
+ pipe->params_changed = 0;
mdss_mdp_pipe_solidfill_setup(pipe);
goto update_nobuf;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index b680823..c3e1916 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -200,14 +200,20 @@
mdss_misr_crc_collect(mdata, DISPLAY_MISR_HDMI);
}
- if (isr & MDSS_MDP_INTR_WB_0_DONE)
+ if (isr & MDSS_MDP_INTR_WB_0_DONE) {
mdss_mdp_intr_done(MDP_INTR_WB_0);
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP);
+ }
- if (isr & MDSS_MDP_INTR_WB_1_DONE)
+ if (isr & MDSS_MDP_INTR_WB_1_DONE) {
mdss_mdp_intr_done(MDP_INTR_WB_1);
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP);
+ }
- if (isr & MDSS_MDP_INTR_WB_2_DONE)
+ if (isr & MDSS_MDP_INTR_WB_2_DONE) {
mdss_mdp_intr_done(MDP_INTR_WB_2);
+ mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP);
+ }
mdp_isr_done:
hist_isr = MDSS_MDP_REG_READ(MDSS_MDP_REG_HIST_INTR_STATUS);
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index b859598..65275db 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -222,6 +222,9 @@
char vsync_enable;
char hw_vsync_mode;
+
+ char lp11_init;
+ u32 init_delay;
};
enum dynamic_fps_update {
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 4bbd07a..8da837b 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -90,8 +90,8 @@
* unusual file system layouts.
*/
if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
- block_cluster = EXT4_B2C(sbi, (start -
- ext4_block_bitmap(sb, gdp)));
+ block_cluster = EXT4_B2C(sbi,
+ ext4_block_bitmap(sb, gdp) - start);
if (block_cluster < num_clusters)
block_cluster = -1;
else if (block_cluster == num_clusters) {
@@ -102,7 +102,7 @@
if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
inode_cluster = EXT4_B2C(sbi,
- start - ext4_inode_bitmap(sb, gdp));
+ ext4_inode_bitmap(sb, gdp) - start);
if (inode_cluster < num_clusters)
inode_cluster = -1;
else if (inode_cluster == num_clusters) {
@@ -114,7 +114,7 @@
itbl_blk = ext4_inode_table(sb, gdp);
for (i = 0; i < sbi->s_itb_per_group; i++) {
if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
- c = EXT4_B2C(sbi, start - itbl_blk + i);
+ c = EXT4_B2C(sbi, itbl_blk + i - start);
if ((c < num_clusters) || (c == inode_cluster) ||
(c == block_cluster) || (c == itbl_cluster))
continue;
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 81e803e..20b8446 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -55,6 +55,11 @@
#define CLOCK_EVT_FEAT_C3STOP 0x000008
#define CLOCK_EVT_FEAT_DUMMY 0x000010
+/*
+ * Core shall set the interrupt affinity dynamically in broadcast mode
+ */
+#define CLOCK_EVT_FEAT_DYNIRQ 0x000020
+
/**
* struct clock_event_device - clock event device descriptor
* @event_handler: Assigned by the framework to be called by the low
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 5ab7183..6b76dfd7 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -147,7 +147,9 @@
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
-
+extern int cpuidle_register(struct cpuidle_driver *drv,
+ const struct cpumask *const coupled_cpus);
+extern void cpuidle_unregister(struct cpuidle_driver *drv);
extern void cpuidle_pause_and_lock(void);
extern void cpuidle_resume_and_unlock(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev);
@@ -168,7 +170,10 @@
static inline int cpuidle_register_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
-
+static inline int cpuidle_register(struct cpuidle_driver *drv,
+ const struct cpumask *const coupled_cpus)
+{return -ENODEV; }
+static inline void cpuidle_unregister(struct cpuidle_driver *drv) { }
static inline void cpuidle_pause_and_lock(void) { }
static inline void cpuidle_resume_and_unlock(void) { }
static inline int cpuidle_enable_device(struct cpuidle_device *dev)
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 9c74fbb..d00847a 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -52,6 +52,9 @@
*/
#define DEVFREQ_FLAG_LEAST_UPPER_BOUND 0x1
+#define DEVFREQ_FLAG_FAST_HINT 0x2
+#define DEVFREQ_FLAG_SLOW_HINT 0x4
+
/**
* struct devfreq_governor_data - mapping to per device governor data
* @name: The name of the governor.
@@ -132,7 +135,8 @@
struct list_head node;
const char name[DEVFREQ_NAME_LEN];
- int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ int (*get_target_freq)(struct devfreq *this, unsigned long *freq,
+ u32 *flag);
int (*event_handler)(struct devfreq *devfreq,
unsigned int event, void *data);
};
diff --git a/include/linux/ion.h b/include/linux/ion.h
index 4983316..f36298b 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -58,6 +58,14 @@
#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
at mmap time, if this is set
caches must be managed manually */
+#define ION_FLAG_FREED_FROM_SHRINKER 4 /* Skip any possible
+ heap-specific caching
+ mechanism (e.g. page
+ pools). Guarantees that any
+ buffer storage that came
+ from the system allocator
+ will be returned to the
+ system allocator. */
#ifdef __KERNEL__
#include <linux/err.h>
diff --git a/include/linux/msm_adreno_devfreq.h b/include/linux/msm_adreno_devfreq.h
new file mode 100644
index 0000000..53d7085
--- /dev/null
+++ b/include/linux/msm_adreno_devfreq.h
@@ -0,0 +1,47 @@
+#ifndef MSM_ADRENO_DEVFREQ_H
+#define MSM_ADRENO_DEVFREQ_H
+
+#include <linux/notifier.h>
+
+#define ADRENO_DEVFREQ_NOTIFY_SUBMIT 1
+#define ADRENO_DEVFREQ_NOTIFY_RETIRE 2
+#define ADRENO_DEVFREQ_NOTIFY_IDLE 3
+
+struct device;
+
+int kgsl_devfreq_add_notifier(struct device *, struct notifier_block *);
+
+int kgsl_devfreq_del_notifier(struct device *, struct notifier_block *);
+
+/* same as KGSL_MAX_PWRLEVELS */
+#define MSM_ADRENO_MAX_PWRLEVELS 10
+
+struct xstats {
+ u64 ram_time;
+ u64 ram_wait;
+ int mod;
+};
+
+struct devfreq_msm_adreno_tz_data {
+ struct notifier_block nb;
+ struct {
+ s64 total_time;
+ s64 busy_time;
+ } bin;
+ struct {
+ u64 total_time;
+ u64 ram_time;
+ u64 gpu_time;
+ u32 num;
+ u32 max;
+ u32 up[MSM_ADRENO_MAX_PWRLEVELS];
+ u32 down[MSM_ADRENO_MAX_PWRLEVELS];
+ u32 p_up[MSM_ADRENO_MAX_PWRLEVELS];
+ u32 p_down[MSM_ADRENO_MAX_PWRLEVELS];
+ unsigned int *index;
+ uint64_t *ib;
+ } bus;
+ unsigned int device_id;
+};
+
+#endif
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
index 16a1000..3976699 100644
--- a/include/linux/msm_ion.h
+++ b/include/linux/msm_ion.h
@@ -5,11 +5,14 @@
enum msm_ion_heap_types {
ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
- ION_HEAP_TYPE_IOMMU = ION_HEAP_TYPE_MSM_START,
- ION_HEAP_TYPE_DMA,
+ ION_HEAP_TYPE_DMA = ION_HEAP_TYPE_MSM_START,
ION_HEAP_TYPE_CP,
ION_HEAP_TYPE_SECURE_DMA,
ION_HEAP_TYPE_REMOVED,
+ /*
+ * if you add a heap type here you should also add it to
+ * heap_types_info[] in msm_ion.c
+ */
};
/**
@@ -31,17 +34,23 @@
ION_ADSP_HEAP_ID = 22,
ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
ION_SF_HEAP_ID = 24,
- ION_IOMMU_HEAP_ID = 25,
+ ION_SYSTEM_HEAP_ID = 25,
ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */
ION_QSECOM_HEAP_ID = 27,
ION_AUDIO_HEAP_ID = 28,
ION_MM_FIRMWARE_HEAP_ID = 29,
- ION_SYSTEM_HEAP_ID = 30,
ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
};
+/*
+ * The IOMMU heap is deprecated! Here are some aliases for backwards
+ * compatibility:
+ */
+#define ION_IOMMU_HEAP_ID ION_SYSTEM_HEAP_ID
+#define ION_HEAP_TYPE_IOMMU ION_HEAP_TYPE_SYSTEM
+
enum ion_fixed_position {
NOT_FIXED,
FIXED_LOW,
@@ -90,7 +99,8 @@
#define ION_HEAP(bit) (1 << (bit))
#define ION_ADSP_HEAP_NAME "adsp"
-#define ION_VMALLOC_HEAP_NAME "vmalloc"
+#define ION_SYSTEM_HEAP_NAME "system"
+#define ION_VMALLOC_HEAP_NAME ION_SYSTEM_HEAP_NAME
#define ION_KMALLOC_HEAP_NAME "kmalloc"
#define ION_AUDIO_HEAP_NAME "audio"
#define ION_SF_HEAP_NAME "sf"
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 1be32d9..a691330 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -527,6 +527,50 @@
uint32_t roi_w[MAX_PLANES];
};
+/**
+ * struct mdp_overlay - overlay surface structure
+ * @src: Source image information (width, height, format).
+ * @src_rect: Source crop rectangle, portion of image that will be fetched.
+ * This should always be within boundaries of source image.
+ * @dst_rect: Destination rectangle, the position and size of image on screen.
+ * This should always be within panel boundaries.
+ * @z_order: Blending stage to occupy in display, if multiple layers are
+ * present, highest z_order usually means the top most visible
+ * layer. The range acceptable is from 0-3 to support blending
+ * up to 4 layers.
+ * @is_fg: This flag is used to disable blending of any layers with z_order
+ * less than this overlay. It means that any layers with z_order
+ * less than this layer will not be blended and will be replaced
+ * by the background border color.
+ * @alpha: Used to set plane opacity. The range can be from 0-255, where
+ * 0 means completely transparent and 255 means fully opaque.
+ * @transp_mask: Color used as color key for transparency. Any pixel in fetched
+ * image matching this color will be transparent when blending.
+ * The color should be in same format as the source image format.
+ * @flags: This is used to customize operation of overlay. See MDP flags
+ * for more information.
+ * @user_data: DEPRECATED* Used to store user application specific information.
+ * @bg_color: Solid color used to fill the overlay surface when no source
+ * buffer is provided.
+ * @horz_deci: Horizontal decimation value, this indicates the amount of pixels
+ * dropped for each pixel that is fetched from a line. The value
+ * given should be power of two of decimation amount.
+ * 0: no decimation
+ * 1: decimate by 2 (drop 1 pixel for each pixel fetched)
+ * 2: decimate by 4 (drop 3 pixels for each pixel fetched)
+ * 3: decimate by 8 (drop 7 pixels for each pixel fetched)
+ * 4: decimate by 16 (drop 15 pixels for each pixel fetched)
+ * @vert_deci: Vertical decimation value, this indicates the amount of lines
+ * dropped for each line that is fetched from overlay. The value
+ * given should be power of two of decimation amount.
+ * 0: no decimation
+ * 1: decimation by 2 (drop 1 line for each line fetched)
+ * 2: decimation by 4 (drop 3 lines for each line fetched)
+ * 3: decimation by 8 (drop 7 lines for each line fetched)
+ * 4: decimation by 16 (drop 15 lines for each line fetched)
+ * @overlay_pp_cfg: Overlay post processing configuration, for more information
+ * see struct mdp_overlay_pp_params.
+ */
struct mdp_overlay {
struct msmfb_img src;
struct mdp_rect src_rect;
@@ -538,7 +582,8 @@
uint32_t transp_mask;
uint32_t flags;
uint32_t id;
- uint32_t user_data[7];
+ uint32_t user_data[6];
+ uint32_t bg_color;
uint8_t horz_deci;
uint8_t vert_deci;
struct mdp_overlay_pp_params overlay_pp_cfg;
@@ -568,19 +613,21 @@
uint32_t *b;
};
+#define MISR_CRC_BATCH_SIZE 32
enum {
- DISPLAY_MISR_EDP,
+ DISPLAY_MISR_EDP = 0,
DISPLAY_MISR_DSI0,
DISPLAY_MISR_DSI1,
DISPLAY_MISR_HDMI,
DISPLAY_MISR_LCDC,
+ DISPLAY_MISR_MDP,
DISPLAY_MISR_ATV,
DISPLAY_MISR_DSI_CMD,
DISPLAY_MISR_MAX
};
enum {
- MISR_OP_NONE,
+ MISR_OP_NONE = 0,
MISR_OP_SFM,
MISR_OP_MFM,
MISR_OP_BM,
@@ -591,7 +638,7 @@
uint32_t block_id;
uint32_t frame_count;
uint32_t crc_op_mode;
- uint32_t crc_value[32];
+ uint32_t crc_value[MISR_CRC_BATCH_SIZE];
};
/*
diff --git a/include/linux/sensors.h b/include/linux/sensors.h
new file mode 100644
index 0000000..3520034
--- /dev/null
+++ b/include/linux/sensors.h
@@ -0,0 +1,70 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_SENSORS_H_INCLUDED
+#define __LINUX_SENSORS_H_INCLUDED
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+
+#define SENSORS_ACCELERATION_HANDLE 0
+#define SENSORS_MAGNETIC_FIELD_HANDLE 1
+#define SENSORS_ORIENTATION_HANDLE 2
+#define SENSORS_LIGHT_HANDLE 3
+#define SENSORS_PROXIMITY_HANDLE 4
+#define SENSORS_GYROSCOPE_HANDLE 5
+#define SENSORS_PRESSURE_HANDLE 6
+
+#define SENSOR_TYPE_ACCELEROMETER 1
+#define SENSOR_TYPE_GEOMAGNETIC_FIELD 2
+#define SENSOR_TYPE_MAGNETIC_FIELD SENSOR_TYPE_GEOMAGNETIC_FIELD
+#define SENSOR_TYPE_ORIENTATION 3
+#define SENSOR_TYPE_GYROSCOPE 4
+#define SENSOR_TYPE_LIGHT 5
+#define SENSOR_TYPE_PRESSURE 6
+#define SENSOR_TYPE_TEMPERATURE 7
+#define SENSOR_TYPE_PROXIMITY 8
+#define SENSOR_TYPE_GRAVITY 9
+#define SENSOR_TYPE_LINEAR_ACCELERATION 10
+#define SENSOR_TYPE_ROTATION_VECTOR 11
+#define SENSOR_TYPE_RELATIVE_HUMIDITY 12
+#define SENSOR_TYPE_AMBIENT_TEMPERATURE 13
+#define SENSOR_TYPE_MAGNETIC_FIELD_UNCALIBRATED 14
+#define SENSOR_TYPE_GAME_ROTATION_VECTOR 15
+#define SENSOR_TYPE_GYROSCOPE_UNCALIBRATED 16
+#define SENSOR_TYPE_SIGNIFICANT_MOTION 17
+#define SENSOR_TYPE_STEP_DETECTOR 18
+#define SENSOR_TYPE_STEP_COUNTER 19
+#define SENSOR_TYPE_GEOMAGNETIC_ROTATION_VECTOR 20
+
+struct sensors_classdev {
+ struct device *dev;
+ struct list_head node;
+ const char *name;
+ const char *vendor;
+ int version;
+ int handle;
+ int type;
+ const char *max_range;
+ const char *resolution;
+ const char *sensor_power;
+ int min_delay;
+ int fifo_reserved_event_count;
+ int fifo_max_event_count;
+};
+
+extern int sensors_classdev_register(struct device *parent,
+ struct sensors_classdev *sensors_cdev);
+extern void sensors_classdev_unregister(struct sensors_classdev *sensors_cdev);
+
+#endif /* __LINUX_SENSORS_H_INCLUDED */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index ed6d41b..faf98fe 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -102,6 +102,7 @@
enum thermal_trip_type trip;
int (*notify)(enum thermal_trip_type type, int temp, void *data);
void *data;
+ uint8_t active;
struct list_head list;
};
@@ -189,6 +190,8 @@
int sensor_get_id(char *name);
int sensor_set_trip(uint32_t sensor_id, struct sensor_threshold *threshold);
int sensor_cancel_trip(uint32_t sensor_id, struct sensor_threshold *threshold);
+int sensor_activate_trip(uint32_t sensor_id, struct sensor_threshold *threshold,
+ bool enable);
int thermal_sensor_trip(struct thermal_zone_device *tz,
enum thermal_trip_type trip, long temp);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 494a314..dc15221 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -8,6 +8,7 @@
#include <linux/clockchips.h>
#include <linux/irqflags.h>
+#include <linux/hrtimer.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
diff --git a/include/linux/tspp.h b/include/linux/tspp.h
index c790c28..ddddbfb 100644
--- a/include/linux/tspp.h
+++ b/include/linux/tspp.h
@@ -34,7 +34,7 @@
int pid;
int mask;
enum tspp_mode mode;
- int priority; /* 0 - 15 */
+ unsigned int priority; /* 0 - 15 */
int decrypt;
enum tspp_source source;
};
diff --git a/include/media/msm_jpeg.h b/include/media/msm_jpeg.h
index 56829f1..99f0de0 100644
--- a/include/media/msm_jpeg.h
+++ b/include/media/msm_jpeg.h
@@ -55,6 +55,9 @@
#define MSM_JPEG_IOCTL_TEST_DUMP_REGION \
_IOW(MSM_JPEG_IOCTL_MAGIC, 15, unsigned long)
+#define MSM_JPEG_IOCTL_SET_CLK_RATE \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 16, unsigned long)
+
#define MSM_JPEG_MODE_REALTIME_ENCODE 0
#define MSM_JPEG_MODE_OFFLINE_ENCODE 1
#define MSM_JPEG_MODE_REALTIME_ROTATION 2
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 868be9f..ef7d118 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -93,6 +93,27 @@
unsigned int num_panscan_windows;
struct msm_vidc_panscan_window wnd[1];
};
+struct msm_vidc_s3d_frame_packing_payload {
+ unsigned int fpa_id;
+ unsigned int cancel_flag;
+ unsigned int fpa_type;
+ unsigned int quin_cunx_flag;
+ unsigned int content_interprtation_type;
+ unsigned int spatial_flipping_flag;
+ unsigned int frame0_flipped_flag;
+ unsigned int field_views_flag;
+ unsigned int current_frame_is_frame0_flag;
+ unsigned int frame0_self_contained_flag;
+ unsigned int frame1_self_contained_flag;
+ unsigned int frame0_graid_pos_x;
+ unsigned int frame0_graid_pos_y;
+ unsigned int frame1_graid_pos_x;
+ unsigned int frame1_graid_pos_y;
+ unsigned int fpa_reserved_byte;
+ unsigned int fpa_repetition_period;
+ unsigned int fpa_extension_flag;
+};
+
enum msm_vidc_extradata_type {
EXTRADATA_NONE = 0x00000000,
EXTRADATA_MB_QUANTIZATION = 0x00000001,
diff --git a/include/media/msmb_pproc.h b/include/media/msmb_pproc.h
index de42c38..ed4ffa2 100644
--- a/include/media/msmb_pproc.h
+++ b/include/media/msmb_pproc.h
@@ -90,6 +90,7 @@
uint32_t offset;
uint8_t native_buff;
uint8_t processed_divert;
+ uint32_t identity;
};
struct msm_cpp_stream_buff_info_t {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 4ecadd8..7932ba1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1801,22 +1801,28 @@
* enum wiphy_flags - wiphy capability flags
*
* @WIPHY_FLAG_CUSTOM_REGULATORY: tells us the driver for this device
- * has its own custom regulatory domain and cannot identify the
- * ISO / IEC 3166 alpha2 it belongs to. When this is enabled
- * we will disregard the first regulatory hint (when the
- * initiator is %REGDOM_SET_BY_CORE).
- * @WIPHY_FLAG_STRICT_REGULATORY: tells us the driver for this device will
- * ignore regulatory domain settings until it gets its own regulatory
- * domain via its regulatory_hint() unless the regulatory hint is
- * from a country IE. After its gets its own regulatory domain it will
- * only allow further regulatory domain settings to further enhance
- * compliance. For example if channel 13 and 14 are disabled by this
- * regulatory domain no user regulatory domain can enable these channels
- * at a later time. This can be used for devices which do not have
- * calibration information guaranteed for frequencies or settings
- * outside of its regulatory domain. If used in combination with
- * WIPHY_FLAG_CUSTOM_REGULATORY the inspected country IE power settings
- * will be followed.
+ * has its own custom regulatory domain and cannot identify the
+ * ISO / IEC 3166 alpha2 it belongs to. When this is enabled
+ * we will disregard the first regulatory hint (when the
+ * initiator is %REGDOM_SET_BY_CORE). wiphys can set the custom
+ * regulatory domain using wiphy_apply_custom_regulatory()
+ * prior to wiphy registration.
+ * @WIPHY_FLAG_STRICT_REGULATORY: tells us that the wiphy for this device
+ * has regulatory domain that it wishes to be considered as the
+ * superset for regulatory rules. After this device gets its regulatory
+ * domain programmed further regulatory hints shall only be considered
+ * for this device to enhance regulatory compliance, forcing the
+ * device to only possibly use subsets of the original regulatory
+ * rules. For example if channel 13 and 14 are disabled by this
+ * device's regulatory domain no user specified regulatory hint which
+ * has these channels enabled would enable them for this wiphy,
+ * the device's original regulatory domain will be trusted as the
+ * base. You can program the superset of regulatory rules for this
+ * wiphy with regulatory_hint() for cards programmed with an
+ * ISO3166-alpha2 country code. wiphys that use regulatory_hint()
+ * will have their wiphy->regd programmed once the regulatory
+ * domain is set, and all other regulatory hints will be ignored
+ * until their own regulatory domain gets programmed.
* @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
* that passive scan flags and beaconing flags may not be lifted by
* cfg80211 due to regulatory beacon hints. For more information on beacon
@@ -1884,6 +1890,34 @@
};
/**
+ * enum nl80211_country_ie_pref - country IE processing preferences
+ *
+ * enumerates the different preferences a 802.11 card can advertize
+ * for parsing the country IEs. As per the current implementation
+ * country IEs are only used derive the apha2, the information
+ * for power settings that comes with the country IE is ignored
+ * and we use the power settings from regdb.
+ *
+ * @NL80211_COUNTRY_IE_FOLLOW_CORE - This is the default behaviour.
+ * It allows the core to update channel flags according to the
+ * ISO3166-alpha2 in the country IE. The applied power is -
+ * MIN(power specified by custom domain, power obtained from regdb)
+ * @NL80211_COUNTRY_IE_FOLLOW_POWER - for devices that have a
+ * preference that even though they may have programmed their own
+ * custom power setting prior to wiphy registration, they want
+ * to ensure their channel power settings are updated for this
+ * connection with the power settings derived from alpha2 of the
+ * country IE.
+ * @NL80211_COUNTRY_IE_IGNORE_CORE - for devices that have a preference to
+ * to ignore all country IE information processed by the core.
+ */
+enum nl80211_country_ie_pref {
+ NL80211_COUNTRY_IE_FOLLOW_CORE,
+ NL80211_COUNTRY_IE_FOLLOW_POWER,
+ NL80211_COUNTRY_IE_IGNORE_CORE,
+};
+
+/**
* struct ieee80211_iface_limit - limit on certain interface types
* @max: maximum number of interfaces of these types
* @types: interface types (bits)
@@ -2100,6 +2134,8 @@
*
* @max_acl_mac_addrs: Maximum number of MAC addresses that the device
* supports for ACL.
+ * @country_ie_pref: country IE processing preferences specified
+ * by enum nl80211_country_ie_pref
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -2164,6 +2200,8 @@
*/
u32 probe_resp_offload;
+ u8 country_ie_pref;
+
/* If multiple wiphys are registered and you're handed e.g.
* a regular netdev with assigned ieee80211_ptr, you won't
* know whether it points to a wiphy your driver has registered
@@ -2691,6 +2729,30 @@
extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
/**
+ * regulatory_hint_user - hint to the wireless core a regulatory domain
+ * which the driver has received from an application
+ * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
+ * should be in. If @rd is set this should be NULL. Note that if you
+ * set this to NULL you should still set rd->alpha2 to some accepted
+ * alpha2.
+ *
+ * Wireless drivers can use this function to hint to the wireless core
+ * the current regulatory domain as specified by trusted applications,
+ * it is the driver's responsibilty to estbalish which applications it
+ * trusts.
+ *
+ * The wiphy should be registered to cfg80211 prior to this call.
+ * For cfg80211 drivers this means you must first use wiphy_register(),
+ * for mac80211 drivers you must first use ieee80211_register_hw().
+ *
+ * Drivers should check the return value, its possible you can get
+ * an -ENOMEM or an -EINVAL.
+ *
+ * Return: 0 on success. -ENOMEM, -EINVAL.
+ */
+extern int regulatory_hint_user(const char *alpha2);
+
+/**
* wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
* @wiphy: the wireless device we want to process the regulatory domain on
* @regd: the custom regulatory domain to use for this wiphy
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index b62a759..75fe3a2 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -6279,6 +6279,206 @@
#define VOICE_CMD_GET_PARAM 0x0001133E
#define VOICE_EVT_GET_PARAM_ACK 0x00011008
+
+/** ID of the Bass Boost module.
+ This module supports the following parameter IDs:
+ - #AUDPROC_PARAM_ID_BASS_BOOST_ENABLE
+ - #AUDPROC_PARAM_ID_BASS_BOOST_MODE
+ - #AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH
+*/
+#define AUDPROC_MODULE_ID_BASS_BOOST 0x000108A1
+/** ID of the Bass Boost enable parameter used by
+ AUDPROC_MODULE_ID_BASS_BOOST.
+*/
+#define AUDPROC_PARAM_ID_BASS_BOOST_ENABLE 0x000108A2
+/** ID of the Bass Boost mode parameter used by
+ AUDPROC_MODULE_ID_BASS_BOOST.
+*/
+#define AUDPROC_PARAM_ID_BASS_BOOST_MODE 0x000108A3
+/** ID of the Bass Boost strength parameter used by
+ AUDPROC_MODULE_ID_BASS_BOOST.
+*/
+#define AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH 0x000108A4
+
+/** ID of the Virtualizer module. This module supports the
+ following parameter IDs:
+ - #AUDPROC_PARAM_ID_VIRTUALIZER_ENABLE
+ - #AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH
+ - #AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE
+ - #AUDPROC_PARAM_ID_VIRTUALIZER_GAIN_ADJUST
+*/
+#define AUDPROC_MODULE_ID_VIRTUALIZER 0x000108A5
+/** ID of the Virtualizer enable parameter used by
+ AUDPROC_MODULE_ID_VIRTUALIZER.
+*/
+#define AUDPROC_PARAM_ID_VIRTUALIZER_ENABLE 0x000108A6
+/** ID of the Virtualizer strength parameter used by
+ AUDPROC_MODULE_ID_VIRTUALIZER.
+*/
+#define AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH 0x000108A7
+/** ID of the Virtualizer out type parameter used by
+ AUDPROC_MODULE_ID_VIRTUALIZER.
+*/
+#define AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE 0x000108A8
+/** ID of the Virtualizer out type parameter used by
+ AUDPROC_MODULE_ID_VIRTUALIZER.
+*/
+#define AUDPROC_PARAM_ID_VIRTUALIZER_GAIN_ADJUST 0x000108A9
+
+/** ID of the Reverb module. This module supports the following
+ parameter IDs:
+ - #AUDPROC_PARAM_ID_REVERB_ENABLE
+ - #AUDPROC_PARAM_ID_REVERB_MODE
+ - #AUDPROC_PARAM_ID_REVERB_PRESET
+ - #AUDPROC_PARAM_ID_REVERB_WET_MIX
+ - #AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST
+ - #AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL
+ - #AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL
+ - #AUDPROC_PARAM_ID_REVERB_DECAY_TIME
+ - #AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO
+ - #AUDPROC_PARAM_ID_REVERB_REFLECTIONS_LEVEL
+ - #AUDPROC_PARAM_ID_REVERB_REFLECTIONS_DELAY
+ - #AUDPROC_PARAM_ID_REVERB_LEVEL
+ - #AUDPROC_PARAM_ID_REVERB_DELAY
+ - #AUDPROC_PARAM_ID_REVERB_DIFFUSION
+ - #AUDPROC_PARAM_ID_REVERB_DENSITY
+*/
+#define AUDPROC_MODULE_ID_REVERB 0x000108AA
+/** ID of the Reverb enable parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_ENABLE 0x000108AB
+/** ID of the Reverb mode parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_MODE 0x000108AC
+/** ID of the Reverb preset parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_PRESET 0x000108AD
+/** ID of the Reverb wet mix parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_WET_MIX 0x000108AE
+/** ID of the Reverb gain adjust parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST 0x000108AF
+/** ID of the Reverb room level parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL 0x000108B0
+/** ID of the Reverb room hf level parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL 0x000108B1
+/** ID of the Reverb decay time parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_DECAY_TIME 0x000108B2
+/** ID of the Reverb decay hf ratio parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO 0x000108B3
+/** ID of the Reverb reflections level parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_REFLECTIONS_LEVEL 0x000108B4
+/** ID of the Reverb reflections delay parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_REFLECTIONS_DELAY 0x000108B5
+/** ID of the Reverb level parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_LEVEL 0x000108B6
+/** ID of the Reverb delay parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_DELAY 0x000108B7
+/** ID of the Reverb diffusion parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_DIFFUSION 0x000108B8
+/** ID of the Reverb density parameter used by
+ AUDPROC_MODULE_ID_REVERB.
+*/
+#define AUDPROC_PARAM_ID_REVERB_DENSITY 0x000108B9
+
+/** ID of the Popless Equalizer module. This module supports the
+ following parameter IDs:
+ - #AUDPROC_PARAM_ID_EQ_ENABLE
+ - #AUDPROC_PARAM_ID_EQ_CONFIG
+ - #AUDPROC_PARAM_ID_EQ_NUM_BANDS
+ - #AUDPROC_PARAM_ID_EQ_BAND_LEVELS
+ - #AUDPROC_PARAM_ID_EQ_BAND_LEVEL_RANGE
+ - #AUDPROC_PARAM_ID_EQ_BAND_FREQS
+ - #AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ_RANGE
+ - #AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ
+ - #AUDPROC_PARAM_ID_EQ_BAND_INDEX
+ - #AUDPROC_PARAM_ID_EQ_PRESET_ID
+ - #AUDPROC_PARAM_ID_EQ_NUM_PRESETS
+ - #AUDPROC_PARAM_ID_EQ_GET_PRESET_NAME
+*/
+#define AUDPROC_MODULE_ID_POPLESS_EQUALIZER 0x000108BA
+/** ID of the Popless Equalizer enable parameter used by
+ AUDPROC_MODULE_ID_POPLESS_EQUALIZER.
+*/
+#define AUDPROC_PARAM_ID_EQ_ENABLE 0x000108BB
+/** ID of the Popless Equalizer config parameter used by
+ AUDPROC_MODULE_ID_POPLESS_EQUALIZER.
+*/
+#define AUDPROC_PARAM_ID_EQ_CONFIG 0x000108BC
+/** ID of the Popless Equalizer number of bands parameter used
+ by AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is
+ used for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_NUM_BANDS 0x000108BD
+/** ID of the Popless Equalizer band levels parameter used by
+ AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is
+ used for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_BAND_LEVELS 0x000108BE
+/** ID of the Popless Equalizer band level range parameter used
+ by AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is
+ used for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_BAND_LEVEL_RANGE 0x000108BF
+/** ID of the Popless Equalizer band frequencies parameter used
+ by AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is
+ used for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_BAND_FREQS 0x000108C0
+/** ID of the Popless Equalizer single band frequency range
+ parameter used by AUDPROC_MODULE_ID_POPLESS_EQUALIZER.
+ This param ID is used for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ_RANGE 0x000108C1
+/** ID of the Popless Equalizer single band frequency parameter
+ used by AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID
+ is used for set param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ 0x000108C2
+/** ID of the Popless Equalizer band index parameter used by
+ AUDPROC_MODULE_ID_POPLESS_EQUALIZER.
+*/
+#define AUDPROC_PARAM_ID_EQ_BAND_INDEX 0x000108C3
+/** ID of the Popless Equalizer preset id parameter used by
+ AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is used
+ for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_PRESET_ID 0x000108C4
+/** ID of the Popless Equalizer number of presets parameter used
+ by AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is used
+ for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_NUM_PRESETS 0x000108C5
+/** ID of the Popless Equalizer preset name parameter used by
+ AUDPROC_MODULE_ID_POPLESS_EQUALIZER. This param ID is used
+ for get param only.
+*/
+#define AUDPROC_PARAM_ID_EQ_PRESET_NAME 0x000108C6
+
/* Set Q6 topologies */
#define ASM_CMD_ADD_TOPOLOGIES 0x00010DBE
#define ADM_CMD_ADD_TOPOLOGIES 0x00010335
diff --git a/include/sound/audio_effects.h b/include/sound/audio_effects.h
new file mode 100644
index 0000000..3444477
--- /dev/null
+++ b/include/sound/audio_effects.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _AUDIO_EFFECTS_H
+#define _AUDIO_EFFECTS_H
+
+/** AUDIO EFFECTS **/
+
+
+/* CONFIG GET/SET */
+#define CONFIG_CACHE 0
+#define CONFIG_SET 1
+#define CONFIG_GET 2
+
+/* CONFIG HEADER */
+/*
+
+ MODULE_ID,
+ DEVICE,
+ NUM_COMMANDS,
+ COMMAND_ID_1,
+ CONFIG_CACHE/SET/GET,
+ OFFSET_1,
+ LENGTH_1,
+ VALUES_1,
+ ...,
+ ...,
+ COMMAND_ID_2,
+ CONFIG_CACHE/SET/GET,
+ OFFSET_2,
+ LENGTH_2,
+ VALUES_2,
+ ...,
+ ...,
+ COMMAND_ID_3,
+ ...
+*/
+
+
+/* CONFIG PARAM IDs */
+#define VIRTUALIZER_MODULE 0x00001000
+#define VIRTUALIZER_ENABLE 0x00001001
+#define VIRTUALIZER_STRENGTH 0x00001002
+#define VIRTUALIZER_OUT_TYPE 0x00001003
+#define VIRTUALIZER_GAIN_ADJUST 0x00001004
+#define VIRTUALIZER_ENABLE_PARAM_LEN 1
+#define VIRTUALIZER_STRENGTH_PARAM_LEN 1
+#define VIRTUALIZER_OUT_TYPE_PARAM_LEN 1
+#define VIRTUALIZER_GAIN_ADJUST_PARAM_LEN 1
+
+#define REVERB_MODULE 0x00002000
+#define REVERB_ENABLE 0x00002001
+#define REVERB_MODE 0x00002002
+#define REVERB_PRESET 0x00002003
+#define REVERB_WET_MIX 0x00002004
+#define REVERB_GAIN_ADJUST 0x00002005
+#define REVERB_ROOM_LEVEL 0x00002006
+#define REVERB_ROOM_HF_LEVEL 0x00002007
+#define REVERB_DECAY_TIME 0x00002008
+#define REVERB_DECAY_HF_RATIO 0x00002009
+#define REVERB_REFLECTIONS_LEVEL 0x0000200a
+#define REVERB_REFLECTIONS_DELAY 0x0000200b
+#define REVERB_LEVEL 0x0000200c
+#define REVERB_DELAY 0x0000200d
+#define REVERB_DIFFUSION 0x0000200e
+#define REVERB_DENSITY 0x0000200f
+#define REVERB_ENABLE_PARAM_LEN 1
+#define REVERB_MODE_PARAM_LEN 1
+#define REVERB_PRESET_PARAM_LEN 1
+#define REVERB_WET_MIX_PARAM_LEN 1
+#define REVERB_GAIN_ADJUST_PARAM_LEN 1
+#define REVERB_ROOM_LEVEL_PARAM_LEN 1
+#define REVERB_ROOM_HF_LEVEL_PARAM_LEN 1
+#define REVERB_DECAY_TIME_PARAM_LEN 1
+#define REVERB_DECAY_HF_RATIO_PARAM_LEN 1
+#define REVERB_REFLECTIONS_LEVEL_PARAM_LEN 1
+#define REVERB_REFLECTIONS_DELAY_PARAM_LEN 1
+#define REVERB_LEVEL_PARAM_LEN 1
+#define REVERB_DELAY_PARAM_LEN 1
+#define REVERB_DIFFUSION_PARAM_LEN 1
+#define REVERB_DENSITY_PARAM_LEN 1
+
+#define BASS_BOOST_MODULE 0x00003000
+#define BASS_BOOST_ENABLE 0x00003001
+#define BASS_BOOST_MODE 0x00003002
+#define BASS_BOOST_STRENGTH 0x00003003
+#define BASS_BOOST_ENABLE_PARAM_LEN 1
+#define BASS_BOOST_MODE_PARAM_LEN 1
+#define BASS_BOOST_STRENGTH_PARAM_LEN 1
+
+#define EQ_MODULE 0x00004000
+#define EQ_ENABLE 0x00004001
+#define EQ_CONFIG 0x00004002
+#define EQ_NUM_BANDS 0x00004003
+#define EQ_BAND_LEVELS 0x00004004
+#define EQ_BAND_LEVEL_RANGE 0x00004005
+#define EQ_BAND_FREQS 0x00004006
+#define EQ_SINGLE_BAND_FREQ_RANGE 0x00004007
+#define EQ_SINGLE_BAND_FREQ 0x00004008
+#define EQ_BAND_INDEX 0x00004009
+#define EQ_PRESET_ID 0x0000400a
+#define EQ_NUM_PRESETS 0x0000400b
+#define EQ_PRESET_NAME 0x0000400c
+#define EQ_ENABLE_PARAM_LEN 1
+#define EQ_CONFIG_PARAM_LEN 3
+#define EQ_CONFIG_PER_BAND_PARAM_LEN 5
+#define EQ_NUM_BANDS_PARAM_LEN 1
+#define EQ_BAND_LEVELS_PARAM_LEN 13
+#define EQ_BAND_LEVEL_RANGE_PARAM_LEN 2
+#define EQ_BAND_FREQS_PARAM_LEN 13
+#define EQ_SINGLE_BAND_FREQ_RANGE_PARAM_LEN 2
+#define EQ_SINGLE_BAND_FREQ_PARAM_LEN 1
+#define EQ_BAND_INDEX_PARAM_LEN 1
+#define EQ_PRESET_ID_PARAM_LEN 1
+#define EQ_NUM_PRESETS_PARAM_LEN 1
+#define EQ_PRESET_NAME_PARAM_LEN 32
+
+#define EQ_TYPE_NONE 0
+#define EQ_BASS_BOOST 1
+#define EQ_BASS_CUT 2
+#define EQ_TREBLE_BOOST 3
+#define EQ_TREBLE_CUT 4
+#define EQ_BAND_BOOST 5
+#define EQ_BAND_CUT 6
+
+
+
+#define COMMAND_PAYLOAD_LEN 3
+#define COMMAND_PAYLOAD_SZ (COMMAND_PAYLOAD_LEN * sizeof(uint32_t))
+#define MAX_INBAND_PARAM_SZ 4096
+#define Q27_UNITY (1 << 27)
+#define Q8_UNITY (1 << 8)
+#define CUSTOM_OPENSL_PRESET 18
+
+#define VIRTUALIZER_ENABLE_PARAM_SZ \
+ (VIRTUALIZER_ENABLE_PARAM_LEN*sizeof(uint32_t))
+#define VIRTUALIZER_STRENGTH_PARAM_SZ \
+ (VIRTUALIZER_STRENGTH_PARAM_LEN*sizeof(uint32_t))
+#define VIRTUALIZER_OUT_TYPE_PARAM_SZ \
+ (VIRTUALIZER_OUT_TYPE_PARAM_LEN*sizeof(uint32_t))
+#define VIRTUALIZER_GAIN_ADJUST_PARAM_SZ \
+ (VIRTUALIZER_GAIN_ADJUST_PARAM_LEN*sizeof(uint32_t))
+struct virtualizer_params {
+ uint32_t device;
+ uint32_t enable_flag;
+ uint32_t strength;
+ uint32_t out_type;
+ int32_t gain_adjust;
+};
+
+#define NUM_OSL_REVERB_PRESETS_SUPPORTED 6
+#define REVERB_ENABLE_PARAM_SZ \
+ (REVERB_ENABLE_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_MODE_PARAM_SZ \
+ (REVERB_MODE_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_PRESET_PARAM_SZ \
+ (REVERB_PRESET_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_WET_MIX_PARAM_SZ \
+ (REVERB_WET_MIX_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_GAIN_ADJUST_PARAM_SZ \
+ (REVERB_GAIN_ADJUST_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_ROOM_LEVEL_PARAM_SZ \
+ (REVERB_ROOM_LEVEL_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_ROOM_HF_LEVEL_PARAM_SZ \
+ (REVERB_ROOM_HF_LEVEL_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_DECAY_TIME_PARAM_SZ \
+ (REVERB_DECAY_TIME_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_DECAY_HF_RATIO_PARAM_SZ \
+ (REVERB_DECAY_HF_RATIO_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_REFLECTIONS_LEVEL_PARAM_SZ \
+ (REVERB_REFLECTIONS_LEVEL_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_REFLECTIONS_DELAY_PARAM_SZ \
+ (REVERB_REFLECTIONS_DELAY_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_LEVEL_PARAM_SZ \
+ (REVERB_LEVEL_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_DELAY_PARAM_SZ \
+ (REVERB_DELAY_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_DIFFUSION_PARAM_SZ \
+ (REVERB_DIFFUSION_PARAM_LEN*sizeof(uint32_t))
+#define REVERB_DENSITY_PARAM_SZ \
+ (REVERB_DENSITY_PARAM_LEN*sizeof(uint32_t))
+struct reverb_params {
+ uint32_t device;
+ uint32_t enable_flag;
+ uint32_t mode;
+ uint32_t preset;
+ uint32_t wet_mix;
+ int32_t gain_adjust;
+ int32_t room_level;
+ int32_t room_hf_level;
+ uint32_t decay_time;
+ uint32_t decay_hf_ratio;
+ int32_t reflections_level;
+ uint32_t reflections_delay;
+ int32_t level;
+ uint32_t delay;
+ uint32_t diffusion;
+ uint32_t density;
+};
+
+#define BASS_BOOST_ENABLE_PARAM_SZ \
+ (BASS_BOOST_ENABLE_PARAM_LEN*sizeof(uint32_t))
+#define BASS_BOOST_MODE_PARAM_SZ \
+ (BASS_BOOST_MODE_PARAM_LEN*sizeof(uint32_t))
+#define BASS_BOOST_STRENGTH_PARAM_SZ \
+ (BASS_BOOST_STRENGTH_PARAM_LEN*sizeof(uint32_t))
+struct bass_boost_params {
+ uint32_t device;
+ uint32_t enable_flag;
+ uint32_t mode;
+ uint32_t strength;
+};
+
+
+#define MAX_EQ_BANDS 12
+#define MAX_OSL_EQ_BANDS 5
+#define EQ_ENABLE_PARAM_SZ \
+ (EQ_ENABLE_PARAM_LEN*sizeof(uint32_t))
+#define EQ_CONFIG_PARAM_SZ \
+ (EQ_CONFIG_PARAM_LEN*sizeof(uint32_t))
+#define EQ_CONFIG_PER_BAND_PARAM_SZ \
+ (EQ_CONFIG_PER_BAND_PARAM_LEN*sizeof(uint32_t))
+#define EQ_CONFIG_PARAM_MAX_LEN (EQ_CONFIG_PARAM_LEN+\
+ MAX_EQ_BANDS*EQ_CONFIG_PER_BAND_PARAM_LEN)
+#define EQ_CONFIG_PARAM_MAX_SZ \
+ (EQ_CONFIG_PARAM_MAX_LEN*sizeof(uint32_t))
+#define EQ_NUM_BANDS_PARAM_SZ \
+ (EQ_NUM_BANDS_PARAM_LEN*sizeof(uint32_t))
+#define EQ_BAND_LEVELS_PARAM_SZ \
+ (EQ_BAND_LEVELS_PARAM_LEN*sizeof(uint32_t))
+#define EQ_BAND_LEVEL_RANGE_PARAM_SZ \
+ (EQ_BAND_LEVEL_RANGE_PARAM_LEN*sizeof(uint32_t))
+#define EQ_BAND_FREQS_PARAM_SZ \
+ (EQ_BAND_FREQS_PARAM_LEN*sizeof(uint32_t))
+#define EQ_SINGLE_BAND_FREQ_RANGE_PARAM_SZ \
+ (EQ_SINGLE_BAND_FREQ_RANGE_PARAM_LEN*sizeof(uint32_t))
+#define EQ_SINGLE_BAND_FREQ_PARAM_SZ \
+ (EQ_SINGLE_BAND_FREQ_PARAM_LEN*sizeof(uint32_t))
+#define EQ_BAND_INDEX_PARAM_SZ \
+ (EQ_BAND_INDEX_PARAM_LEN*sizeof(uint32_t))
+#define EQ_PRESET_ID_PARAM_SZ \
+ (EQ_PRESET_ID_PARAM_LEN*sizeof(uint32_t))
+#define EQ_NUM_PRESETS_PARAM_SZ \
+ (EQ_NUM_PRESETS_PARAM_LEN*sizeof(uint8_t))
+struct eq_config_t {
+ int32_t eq_pregain;
+ int32_t preset_id;
+ uint32_t num_bands;
+};
+struct eq_per_band_config_t {
+ int32_t band_idx;
+ uint32_t filter_type;
+ uint32_t freq_millihertz;
+ int32_t gain_millibels;
+ uint32_t quality_factor;
+};
+struct eq_per_band_freq_range_t {
+ uint32_t band_index;
+ uint32_t min_freq_millihertz;
+ uint32_t max_freq_millihertz;
+};
+
+struct eq_params {
+ uint32_t device;
+ uint32_t enable_flag;
+ struct eq_config_t config;
+ struct eq_per_band_config_t per_band_cfg[MAX_EQ_BANDS];
+ struct eq_per_band_freq_range_t per_band_freq_range[MAX_EQ_BANDS];
+ uint32_t band_index;
+ uint32_t freq_millihertz;
+};
+
+#endif /*_MSM_AUDIO_EFFECTS_H*/
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index eb3b41a..9a459b8 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -377,6 +377,9 @@
int q6asm_get_session_time(struct audio_client *ac, uint64_t *tstamp);
+int q6asm_send_audio_effects_params(struct audio_client *ac, char *params,
+ uint32_t params_length);
+
/* Client can set the IO mode to either AIO/SIO mode */
int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode);
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 41f8607..c549831 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -242,6 +242,56 @@
__entry->status ? "online" : "offline", __entry->error)
);
+/*
+ * Tracepoint for load balancing:
+ */
+#if NR_CPUS > 32
+#error "Unsupported NR_CPUS for lb tracepoint."
+#endif
+TRACE_EVENT(sched_load_balance,
+
+ TP_PROTO(int cpu, enum cpu_idle_type idle, int balance,
+ unsigned long group_mask, int busiest_nr_running,
+ unsigned long imbalance, unsigned int env_flags, int ld_moved,
+ unsigned int balance_interval),
+
+ TP_ARGS(cpu, idle, balance, group_mask, busiest_nr_running,
+ imbalance, env_flags, ld_moved, balance_interval),
+
+ TP_STRUCT__entry(
+ __field( int, cpu)
+ __field( enum cpu_idle_type, idle)
+ __field( int, balance)
+ __field( unsigned long, group_mask)
+ __field( int, busiest_nr_running)
+ __field( unsigned long, imbalance)
+ __field( unsigned int, env_flags)
+ __field( int, ld_moved)
+ __field( unsigned int, balance_interval)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->idle = idle;
+ __entry->balance = balance;
+ __entry->group_mask = group_mask;
+ __entry->busiest_nr_running = busiest_nr_running;
+ __entry->imbalance = imbalance;
+ __entry->env_flags = env_flags;
+ __entry->ld_moved = ld_moved;
+ __entry->balance_interval = balance_interval;
+ ),
+
+ TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d",
+ __entry->cpu,
+ __entry->idle == CPU_IDLE ? "idle" :
+ (__entry->idle == CPU_NEWLY_IDLE ? "newly_idle" : "busy"),
+ __entry->balance,
+ __entry->group_mask, __entry->busiest_nr_running,
+ __entry->imbalance, __entry->env_flags, __entry->ld_moved,
+ __entry->balance_interval)
+);
+
DECLARE_EVENT_CLASS(sched_process_template,
TP_PROTO(struct task_struct *p),
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 627dab1..89a5395 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -621,7 +621,19 @@
static inline bool got_nohz_idle_kick(void)
{
int cpu = smp_processor_id();
- return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+
+ if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+ return false;
+
+ if (idle_cpu(cpu) && !need_resched())
+ return true;
+
+ /*
+ * We can't run Idle Load Balance on this CPU for this time so we
+ * cancel it and clear NOHZ_BALANCE_KICK
+ */
+ clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+ return false;
}
#else /* CONFIG_NO_HZ */
@@ -1519,7 +1531,7 @@
/*
* Check if someone kicked us for doing the nohz idle load balance.
*/
- if (unlikely(got_nohz_idle_kick() && !need_resched())) {
+ if (unlikely(got_nohz_idle_kick())) {
this_rq()->idle_balance = 1;
raise_softirq_irqoff(SCHED_SOFTIRQ);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2e98983..08497b0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4422,7 +4422,7 @@
int ld_moved, active_balance = 0;
struct sched_group *group;
unsigned long imbalance;
- struct rq *busiest;
+ struct rq *busiest = NULL;
unsigned long flags;
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
@@ -4591,6 +4591,10 @@
ld_moved = 0;
out:
+ trace_sched_load_balance(this_cpu, idle, *balance,
+ group ? group->cpumask[0] : 0,
+ busiest ? busiest->nr_running : 0, imbalance,
+ env.flags, ld_moved, sd->balance_interval);
return ld_moved;
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f113755..d6d0d41 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -370,14 +370,34 @@
return to_cpumask(tick_broadcast_oneshot_mask);
}
-static int tick_broadcast_set_event(ktime_t expires, int force)
+/*
+ * Set broadcast interrupt affinity
+ */
+static void tick_broadcast_set_affinity(struct clock_event_device *bc,
+ const struct cpumask *cpumask)
{
- struct clock_event_device *bc = tick_broadcast_device.evtdev;
+ if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
+ return;
+
+ if (cpumask_equal(bc->cpumask, cpumask))
+ return;
+
+ bc->cpumask = cpumask;
+ irq_set_affinity(bc->irq, bc->cpumask);
+}
+
+static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
+ ktime_t expires, int force)
+{
+ int ret;
if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
- return clockevents_program_event(bc, expires, force);
+ ret = clockevents_program_event(bc, expires, force);
+ if (!ret)
+ tick_broadcast_set_affinity(bc, cpumask_of(cpu));
+ return ret;
}
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
@@ -406,7 +426,7 @@
{
struct tick_device *td;
ktime_t now, next_event;
- int cpu;
+ int cpu, next_cpu = 0;
raw_spin_lock(&tick_broadcast_lock);
again:
@@ -417,10 +437,12 @@
/* Find all expired events */
for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
td = &per_cpu(tick_cpu_device, cpu);
- if (td->evtdev->next_event.tv64 <= now.tv64)
+ if (td->evtdev->next_event.tv64 <= now.tv64) {
cpumask_set_cpu(cpu, to_cpumask(tmpmask));
- else if (td->evtdev->next_event.tv64 < next_event.tv64)
+ } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
next_event.tv64 = td->evtdev->next_event.tv64;
+ next_cpu = cpu;
+ }
}
/*
@@ -443,7 +465,7 @@
* Rearm the broadcast device. If event expired,
* repeat the above
*/
- if (tick_broadcast_set_event(next_event, 0))
+ if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
goto again;
}
raw_spin_unlock(&tick_broadcast_lock);
@@ -486,7 +508,7 @@
cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
if (dev->next_event.tv64 < bc->next_event.tv64)
- tick_broadcast_set_event(dev->next_event, 1);
+ tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
}
} else {
if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
@@ -555,7 +577,7 @@
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
tick_broadcast_init_next_event(to_cpumask(tmpmask),
tick_next_period);
- tick_broadcast_set_event(tick_next_period, 1);
+ tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
} else
bc->next_event.tv64 = KTIME_MAX;
} else {
diff --git a/mm/page_io.c b/mm/page_io.c
index e60e43f..1499e1c 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -20,8 +20,15 @@
#include <linux/swapops.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
+#include <linux/ratelimit.h>
#include <asm/pgtable.h>
+/*
+ * We don't need to see swap errors more than once every 1 second to know
+ * that a problem is occurring.
+ */
+#define SWAP_ERROR_LOG_RATE_MS 1000
+
static struct bio *get_swap_bio(gfp_t gfp_flags,
struct page *page, bio_end_io_t end_io)
{
@@ -46,6 +53,7 @@
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct page *page = bio->bi_io_vec[0].bv_page;
+ static unsigned long swap_error_rs_time;
if (!uptodate) {
SetPageError(page);
@@ -58,7 +66,9 @@
* Also clear PG_reclaim to avoid rotate_reclaimable_page()
*/
set_page_dirty(page);
- printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
+ if (printk_timed_ratelimit(&swap_error_rs_time,
+ SWAP_ERROR_LOG_RATE_MS))
+ printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
imajor(bio->bi_bdev->bd_inode),
iminor(bio->bi_bdev->bd_inode),
(unsigned long long)bio->bi_sector);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9e95109..d0e40e5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -474,6 +474,8 @@
if (!PageWriteback(page)) {
/* synchronous write or broken a_ops? */
ClearPageReclaim(page);
+ if (PageError(page))
+ return PAGE_ACTIVATE;
}
trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
inc_zone_page_state(page, NR_VMSCAN_WRITE);
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index c5861b8..b1efe57 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -6,24 +6,34 @@
# Channel 14. Only JP enables this and for 802.11b only
(2474 - 2494 @ 20), (3, 20), PASSIVE-SCAN, NO-IBSS, NO-OFDM
# Channel 36 - 48
- (5170 - 5250 @ 40), (3, 20), PASSIVE-SCAN, NO-IBSS
+ (5170 - 5250 @ 80), (3, 20), PASSIVE-SCAN, NO-IBSS
# NB: 5260 MHz - 5700 MHz requies DFS
# Channel 149 - 165
- (5735 - 5835 @ 40), (3, 20), PASSIVE-SCAN, NO-IBSS
+ (5735 - 5835 @ 80), (3, 20), PASSIVE-SCAN, NO-IBSS
+ # IEEE 802.11ad (60GHz), channels 1..3
+ (57240 - 63720 @ 2160), (N/A, 0)
country AD:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country AE:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
country AL:
- (2402 - 2482 @ 20), (N/A, 20)
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 27), DFS
country AM:
(2402 - 2482 @ 40), (N/A, 20)
@@ -32,30 +42,38 @@
country AN:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
country AR:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
+
+country AS:
+ (2402 - 2472 @ 40), (N/A, 30)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
country AT: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country AU:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 23)
- (5250 - 5330 @ 40), (3, 23), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
country AW:
(2402 - 2482 @ 40), (N/A, 20)
@@ -65,39 +83,40 @@
country AZ:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 18)
- (5250 - 5330 @ 40), (N/A, 18), DFS
+ (5170 - 5250 @ 80), (N/A, 18)
+ (5250 - 5330 @ 80), (N/A, 18), DFS
country BA: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country BB:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 23)
- (5250 - 5330 @ 40), (3, 23), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (3, 23)
+ (5250 - 5330 @ 80), (3, 23), DFS
+ (5735 - 5835 @ 80), (3, 30)
country BD:
(2402 - 2482 @ 40), (N/A, 20)
+ (5725 - 5850 @ 80), (N/A, 30)
country BE: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country BG: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 23)
- (5250 - 5290 @ 40), (N/A, 23), DFS
- (5490 - 5710 @ 40), (N/A, 30), DFS
+ (5170 - 5250 @ 80), (N/A, 23)
+ (5250 - 5290 @ 80), (N/A, 23), DFS
+ (5490 - 5710 @ 80), (N/A, 30), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
@@ -112,57 +131,74 @@
(5170 - 5250 @ 40), (N/A, 18)
(5250 - 5330 @ 40), (N/A, 18), DFS
+country BM:
+ (2402 - 2472 @ 40), (N/A, 30)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
+
country BN:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5735 - 5835 @ 40), (N/A, 30)
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5735 - 5835 @ 80), (N/A, 30)
country BO:
(2402 - 2482 @ 40), (N/A, 30)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5735 - 5835 @ 80), (N/A, 30)
country BR:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
+
+country BS:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
country BY:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
country BZ:
(2402 - 2482 @ 40), (N/A, 30)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5735 - 5835 @ 80), (N/A, 30)
country CA:
- (2402 - 2472 @ 40), (3, 27)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (2402 - 2472 @ 40), (N/A, 27)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
country CH: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country CL:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5735 - 5835 @ 40), (N/A, 20)
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5735 - 5835 @ 80), (N/A, 20)
country CN:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5150 - 5250 @ 80), (6, 23)
+ (5250 - 5350 @ 80), (6, 23), DFS
+ (5725 - 5850 @ 80), (6, 30)
+ (5735 - 5835 @ 80), (N/A, 30)
# 60 gHz band channels 1,4: 28dBm, channels 2,3: 44dBm
# ref: http://www.miit.gov.cn/n11293472/n11505629/n11506593/n11960250/n11960606/n11960700/n12330791.files/n12330790.pdf
(57240 - 59400 @ 2160), (N/A, 28)
@@ -170,28 +206,24 @@
(63720 - 65880 @ 2160), (N/A, 28)
country CO:
- (2402 - 2472 @ 40), (3, 27)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 23), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (2402 - 2472 @ 40), (N/A, 27)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
country CR:
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 20), (3, 17)
- (5250 - 5330 @ 20), (3, 23), DFS
+ (5250 - 5330 @ 20), (3, 24), DFS
+ (5490 - 5710 @ 20), (3, 24), DFS
(5735 - 5835 @ 20), (3, 30)
-country CS:
- (2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
-
country CY: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
@@ -201,9 +233,9 @@
# implemented.
country CZ: DFS-ETSI
(2400 - 2483.5 @ 40), (N/A, 100 mW)
- (5150 - 5250 @ 40), (N/A, 200 mW), NO-OUTDOOR
- (5250 - 5350 @ 40), (N/A, 100 mW), NO-OUTDOOR, DFS
- (5470 - 5725 @ 40), (N/A, 500 mW), DFS
+ (5150 - 5250 @ 80), (N/A, 200 mW), NO-OUTDOOR
+ (5250 - 5350 @ 80), (N/A, 100 mW), NO-OUTDOOR, DFS
+ (5470 - 5725 @ 80), (N/A, 500 mW), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
@@ -223,27 +255,27 @@
# entries 279004 and 280006
(2400 - 2483.5 @ 40), (N/A, 100 mW)
# entry 303005
- (5150 - 5250 @ 40), (N/A, 100 mW), NO-OUTDOOR
+ (5150 - 5250 @ 80), (N/A, 100 mW), NO-OUTDOOR
# entries 304002 and 305002
- (5250 - 5350 @ 40), (N/A, 100 mW), NO-OUTDOOR, DFS
+ (5250 - 5350 @ 80), (N/A, 100 mW), NO-OUTDOOR, DFS
# entries 308002, 309001 and 310003
- (5470 - 5725 @ 40), (N/A, 500 mW), DFS
+ (5470 - 5725 @ 80), (N/A, 500 mW), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country DK: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country DO:
- (2402 - 2472 @ 40), (3, 27)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 23), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (2402 - 2472 @ 40), (N/A, 27)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 23), DFS
+ (5735 - 5835 @ 80), (3, 30)
country DZ:
(2402 - 2482 @ 40), (N/A, 20)
@@ -251,14 +283,15 @@
country EC:
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 20), (3, 17)
- (5250 - 5330 @ 20), (3, 23), DFS
+ (5250 - 5330 @ 20), (3, 24), DFS
+ (5490 - 5710 @ 20), (3, 24), DFS
(5735 - 5835 @ 20), (3, 30)
country EE: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
@@ -269,55 +302,67 @@
country ES: DFS-ETSI
(2400 - 2483.5 @ 40), (N/A, 100 mW)
- (5150 - 5250 @ 40), (N/A, 100 mW), NO-OUTDOOR
- (5250 - 5350 @ 40), (N/A, 100 mW), NO-OUTDOOR, DFS
- (5470 - 5725 @ 40), (N/A, 500 mW), DFS
+ (5150 - 5250 @ 80), (N/A, 100 mW), NO-OUTDOOR
+ (5250 - 5350 @ 80), (N/A, 100 mW), NO-OUTDOOR, DFS
+ (5470 - 5725 @ 80), (N/A, 500 mW), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country FI: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country FR: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
+country GF:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 20), DFS
+
country GE:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 18)
- (5250 - 5330 @ 40), (N/A, 18), DFS
+ (5170 - 5250 @ 80), (N/A, 18)
+ (5250 - 5330 @ 80), (N/A, 18), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country GB: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country GD:
- (2402 - 2472 @ 40), (3, 27)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (2402 - 2472 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 24), DFS
+ (5490 - 5710 @ 80), (6, 24), DFS
+ (5735 - 5835 @ 80), (6, 30)
+
+country GP:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 27), DFS
country GR: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
@@ -328,156 +373,165 @@
(5490 - 5710 @ 20), (N/A, 27), DFS
country GT:
- (2402 - 2472 @ 40), (3, 27)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 23), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (2402 - 2472 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 23), DFS
+ (5735 - 5835 @ 80), (6, 30)
country GU:
- (2402 - 2472 @ 40), (3, 27)
- (5170 - 5250 @ 20), (3, 17)
- (5250 - 5330 @ 20), (3, 23), DFS
- (5735 - 5835 @ 20), (3, 30)
+ (2402 - 2472 @ 40), (3, 30)
+ (5170 - 5250 @ 20), (6, 17)
+ (5250 - 5330 @ 20), (6, 24), DFS
+ (5490 - 5710 @ 20), (6, 24), DFS
+ (5735 - 5835 @ 20), (6, 30)
country HN:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 24), DFS
+ (5490 - 5710 @ 80), (6, 24), DFS
+ (5735 - 5835 @ 80), (6, 30)
country HK:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 24), DFS
+ (5490 - 5710 @ 80), (6, 24), DFS
+ (5735 - 5835 @ 80), (6, 30)
country HR: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country HT:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
country HU: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country ID:
+ # ref: http://www.postel.go.id/content/ID/regulasi/standardisasi/kepdir/bwa%205,8%20ghz.pdf
(2402 - 2482 @ 40), (N/A, 20)
+ (5735 - 5815 @ 20), (N/A, 23)
country IE: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country IL:
(2402 - 2482 @ 40), (N/A, 20)
- (5150 - 5250 @ 40), (N/A, 200 mW), NO-OUTDOOR
- (5250 - 5350 @ 40), (N/A, 200 mW), NO-OUTDOOR, DFS
+ (5150 - 5250 @ 80), (N/A, 200 mW), NO-OUTDOOR
+ (5250 - 5350 @ 80), (N/A, 200 mW), NO-OUTDOOR, DFS
country IN:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5735 - 5835 @ 40), (N/A, 20)
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5735 - 5835 @ 80), (N/A, 20)
country IS: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country IR:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5735 - 5835 @ 80), (N/A, 30)
country IT: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country JM:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 24), DFS
+ (5490 - 5710 @ 80), (6, 24), DFS
+ (5735 - 5835 @ 80), (6, 30)
country JP:
(2402 - 2482 @ 40), (N/A, 20)
(2474 - 2494 @ 20), (N/A, 20), NO-OFDM
(4910 - 4990 @ 40), (N/A, 23)
(5030 - 5090 @ 40), (N/A, 23)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 23), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 160), (N/A, 23), DFS
country JO:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 18)
+ (5150 - 5250 @ 80), (N/A, 23)
+ (5725 - 5850 @ 80), (N/A, 23)
country KE:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5150 - 5250 @ 80), (N/A, 23)
+ (5470 - 5570 @ 80), (N/A, 30), DFS
+ (5725 - 5775 @ 80), (N/A, 23)
country KH:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
country KP:
- (2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5330 @ 40), (3, 20)
- (5160 - 5250 @ 40), (3, 20), DFS
- (5490 - 5630 @ 40), (3, 30), DFS
- (5735 - 5815 @ 40), (3, 30)
+ (2402 - 2482 @ 20), (N/A, 20)
+ (5170 - 5330 @ 20), (6, 20)
+ (5160 - 5250 @ 20), (6, 20), DFS
+ (5490 - 5630 @ 20), (6, 30), DFS
+ (5735 - 5815 @ 20), (6, 30)
country KR:
(2402 - 2482 @ 20), (N/A, 20)
- (5170 - 5250 @ 20), (3, 20)
- (5250 - 5330 @ 20), (3, 20), DFS
- (5490 - 5630 @ 20), (3, 30), DFS
- (5735 - 5815 @ 20), (3, 30)
+ (5150 - 5250 @ 80), (6, 20)
+ (5250 - 5350 @ 80), (6, 20), DFS
+ (5470 - 5725 @ 80), (6, 30), DFS
+ (5725 - 5825 @ 80), (6, 30)
country KW:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
country KZ:
(2402 - 2482 @ 40), (N/A, 20)
country LB:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
country LI: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
country LK:
(2402 - 2482 @ 40), (N/A, 20)
@@ -488,35 +542,38 @@
country LT: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country LU: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country LV: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country MC: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 18)
- (5250 - 5330 @ 40), (N/A, 18), DFS
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 27), DFS
country MA:
(2402 - 2482 @ 40), (N/A, 20)
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5735 - 5835 @ 80), (N/A, 20), DFS
country MO:
(2402 - 2482 @ 40), (N/A, 20)
@@ -524,6 +581,13 @@
(5250 - 5330 @ 40), (3, 23), DFS
(5735 - 5835 @ 40), (3, 30)
+country MP:
+ (2402 - 2472 @ 40), (N/A, 30)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
+
country MK: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 40), (N/A, 20)
@@ -532,6 +596,13 @@
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
+country MN:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
+
country MT: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 40), (N/A, 20)
@@ -540,105 +611,167 @@
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
+country MQ: DFS-ETSI
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 27), DFS
+
+country MU:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
+
country MY:
(2402 - 2482 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 30), DFS
- (5735 - 5835 @ 40), (N/A, 30)
+ (5170 - 5250 @ 80), (N/A, 17)
+ (5250 - 5330 @ 80), (N/A, 23), DFS
+ (5735 - 5835 @ 80), (N/A, 30)
country MX:
(2402 - 2472 @ 40), (3, 27)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 23), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
+
+country MW:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 27), DFS
+
+country NG:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 30), DFS
+ (5725 - 5850 @ 80), (N/A, 30)
+
+country NI:
+ (2402 - 2472 @ 40), (N/A, 30)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
country NL: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20), NO-OUTDOOR
- (5250 - 5330 @ 40), (N/A, 20), NO-OUTDOOR, DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20), NO-OUTDOOR
+ (5250 - 5330 @ 80), (N/A, 20), NO-OUTDOOR, DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country NO: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country NP:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5725 - 5850 @ 80), (N/A, 20)
country NZ:
(2402 - 2482 @ 40), (N/A, 30)
- (5170 - 5250 @ 20), (3, 23)
- (5250 - 5330 @ 20), (3, 23), DFS
- (5735 - 5835 @ 20), (3, 30)
+ (5170 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 24), DFS
+ (5490 - 5710 @ 80), (6, 24), DFS
+ (5735 - 5835 @ 80), (6, 30)
country OM:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 27), DFS
country PA:
- (2402 - 2472 @ 40), (3, 27)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 23), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (2402 - 2472 @ 40), (N/A, 30)
+ (5170 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 23), DFS
+ (5735 - 5835 @ 80), (6, 30)
country PE:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5170 - 5250 @ 80), (6, 20)
+ (5250 - 5330 @ 80), (6, 20), DFS
+ (5490 - 5710 @ 80), (6, 27), DFS
+ (5735 - 5835 @ 80), (6, 30)
+
+country PF:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 27), DFS
country PG:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 23), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
country PH:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5170 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 24), DFS
+ (5490 - 5710 @ 80), (6, 24), DFS
+ (5735 - 5835 @ 80), (6, 30)
country PK:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5735 - 5835 @ 80), (N/A, 30)
country PL: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country PT: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country PR:
- (2402 - 2472 @ 40), (3, 27)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 23), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (2402 - 2472 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 24), DFS
+ (5490 - 5710 @ 80), (6, 24), DFS
+ (5735 - 5835 @ 80), (6, 30)
+
+country PY:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
country QA:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5735 - 5835 @ 80), (N/A, 30)
+
+country RE:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 27), DFS
country RO: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
@@ -647,52 +780,61 @@
# http://www.ratel.rs/upload/documents/Plan_namene/Plan_namene-sl_glasnik.pdf
country RS:
(2400 - 2483.5 @ 40), (N/A, 100 mW)
- (5150 - 5350 @ 40), (N/A, 200 mW), NO-OUTDOOR
- (5470 - 5725 @ 20), (3, 1000 mW), DFS
+ (5150 - 5250 @ 80), (N/A, 200 mW), NO-OUTDOOR
+ (5250 - 5350 @ 80), (N/A, 200 mW), DFS
+ (5470 - 5725 @ 80), (3, 1000 mW), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country RU:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 20), (N/A, 30)
+ (5150 - 5250 @ 40), (N/A, 20)
+ (5250 - 5350 @ 40), (N/A, 20), DFS
+ (5650 - 5725 @ 40), (N/A, 30), DFS
+ (5725 - 5825 @ 40), (N/A, 30)
country RW:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5835 @ 40), (N/A, 30)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5835 @ 80), (6, 30)
country SA:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 20), (3, 23)
- (5250 - 5330 @ 20), (3, 23), DFS
- (5735 - 5835 @ 20), (3, 30)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
country SE: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country SG:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5735 - 5835 @ 40), (N/A, 20)
+ (5170 - 5250 @ 80), (6, 17)
+ (5250 - 5330 @ 80), (6, 24), DFS
+ (5490 - 5710 @ 80), (6, 24), DFS
+ (5735 - 5835 @ 80), (6, 30)
country SI: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
country SK: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
- (5490 - 5710 @ 40), (N/A, 27), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
@@ -707,15 +849,16 @@
country TW:
(2402 - 2472 @ 40), (3, 27)
- (5270 - 5330 @ 40), (3, 17), DFS
- (5735 - 5815 @ 40), (3, 30)
+ (5270 - 5330 @ 80), (6, 17), DFS
+ (5490 - 5710 @ 80), (6, 30), DFS
+ (5735 - 5815 @ 80), (6, 30)
country TH:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
country TT:
(2402 - 2482 @ 40), (N/A, 20)
@@ -731,8 +874,9 @@
country TR: DFS-ETSI
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 20), (N/A, 20)
- (5250 - 5330 @ 20), (N/A, 20), DFS
+ (5170 - 5250 @ 80), (N/A, 20)
+ (5250 - 5330 @ 80), (N/A, 20), DFS
+ (5490 - 5710 @ 80), (N/A, 27), DFS
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
@@ -745,22 +889,42 @@
# disputable definitions there.
country UA:
(2400 - 2483.5 @ 40), (N/A, 20), NO-OUTDOOR
- (5150 - 5350 @ 40), (N/A, 20), NO-OUTDOOR
+ (5150 - 5250 @ 40), (N/A, 20), NO-OUTDOOR
+ (5250 - 5350 @ 40), (N/A, 20), NO-OUTDOOR, DFS
+ (5470 - 5670 @ 40), (N/A, 20), DFS
+ (5725 - 5850 @ 40), (N/A, 20)
# 60 gHz band channels 1-4, ref: Etsi En 302 567
(57240 - 65880 @ 2160), (N/A, 40), NO-OUTDOOR
+country UG:
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (6, 20)
+ (5250 - 5350 @ 80), (6, 20), DFS
+ (5470 - 5725 @ 80), (6, 20), DFS
+ (5725 - 5825 @ 80), (6, 20)
+
country US: DFS-FCC
(2402 - 2472 @ 40), (3, 27)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5600 @ 40), (3, 20), DFS
- (5650 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5600 @ 80), (3, 24), DFS
+ (5650 - 5710 @ 40), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
# 60g band
# reference: http://cfr.regstoday.com/47cfr15.aspx#47_CFR_15p255
# channels 1,2,3, EIRP=40dBm(43dBm peak)
(57240 - 63720 @ 2160), (N/A, 40)
+# Public Safety FCCA, FCC4
+# 27dBm [4.9GHz 1/4 rate], 30dBm [1/2 rate], 33dBm [full rate], and 5GHz same as FCC1
+# db.txt cannot express the limitation on 5G so disable all 5G channels for FCC4
+country PS:
+ (2402 - 2472 @ 40), (N/A, 30)
+ #(4940 - 4990 @ 40), (6, 27)
+ #(5150 - 5250 @ 80), (6, 30)
+ #(5250 - 5350 @ 80), (6, 30), DFS
+ #(5725 - 5850 @ 80), (6, 33)
+
country UY:
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 40), (3, 17)
@@ -777,23 +941,44 @@
country VE:
(2402 - 2482 @ 40), (N/A, 20)
- (5735 - 5815 @ 40), (N/A, 23)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 23), DFS
+ (5725 - 5850 @ 80), (6, 30)
+
country VN:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (N/A, 20)
- (5250 - 5330 @ 40), (N/A, 20), DFS
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
+
+country VI:
+ (2402 - 2472 @ 40), (N/A, 30)
+ (5150 - 5250 @ 80), (6, 17)
+ (5250 - 5350 @ 80), (6, 24), DFS
+ (5470 - 5725 @ 80), (6, 24), DFS
+ (5725 - 5850 @ 80), (6, 30)
country YE:
(2402 - 2482 @ 40), (N/A, 20)
+country YT: DFS-ETSI
+ (2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 27), DFS
+
country ZA:
(2402 - 2482 @ 40), (N/A, 20)
- (5170 - 5250 @ 40), (3, 17)
- (5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
- (5735 - 5835 @ 40), (3, 30)
+ (5170 - 5250 @ 80), (3, 17)
+ (5250 - 5330 @ 80), (3, 24), DFS
+ (5490 - 5710 @ 80), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
country ZW:
(2402 - 2482 @ 40), (N/A, 20)
+ (5150 - 5250 @ 80), (N/A, 20)
+ (5250 - 5350 @ 80), (N/A, 20), DFS
+ (5470 - 5725 @ 80), (N/A, 27), DFS
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index b96094c..bddb720 100755
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -848,8 +848,18 @@
r == -ERANGE)
return;
- REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
- chan->flags = IEEE80211_CHAN_DISABLED;
+ if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
+ request_wiphy && request_wiphy == wiphy &&
+ request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
+ REG_DBG_PRINT("Disabling freq %d MHz for good\n",
+ chan->center_freq);
+ chan->orig_flags |= IEEE80211_CHAN_DISABLED;
+ chan->flags = chan->orig_flags;
+ } else {
+ REG_DBG_PRINT("Disabling freq %d MHz\n",
+ chan->center_freq);
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ }
return;
}
@@ -883,7 +893,19 @@
chan->max_antenna_gain = min(chan->orig_mag,
(int) MBI_TO_DBI(power_rule->max_antenna_gain));
chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
- chan->max_power = min(chan->max_power, chan->max_reg_power);
+ if (chan->orig_mpwr) {
+ /*
+ * Devices that use NL80211_COUNTRY_IE_FOLLOW_POWER will always
+ * follow the passed country IE power settings.
+ */
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ wiphy->country_ie_pref & NL80211_COUNTRY_IE_FOLLOW_POWER)
+ chan->max_power = chan->max_reg_power;
+ else
+ chan->max_power = min(chan->orig_mpwr,
+ chan->max_reg_power);
+ } else
+ chan->max_power = chan->max_reg_power;
}
static void handle_band(struct wiphy *wiphy,
@@ -1218,7 +1240,8 @@
"wide channel\n",
chan->center_freq,
KHZ_TO_MHZ(desired_bw_khz));
- chan->flags = IEEE80211_CHAN_DISABLED;
+ chan->orig_flags |= IEEE80211_CHAN_DISABLED;
+ chan->flags = chan->orig_flags;
return;
}
@@ -1295,6 +1318,8 @@
case NL80211_REGDOM_SET_BY_CORE:
return 0;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ if (wiphy->country_ie_pref & NL80211_COUNTRY_IE_IGNORE_CORE)
+ return -EALREADY;
last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
@@ -1648,6 +1673,7 @@
return 0;
}
+EXPORT_SYMBOL(regulatory_hint_user);
/* Driver hints */
int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
@@ -2117,7 +2143,7 @@
* checking if the alpha2 changes if CRDA was already called
*/
if (!regdom_changes(rd->alpha2))
- return -EINVAL;
+ return -EALREADY;
}
/*
@@ -2242,6 +2268,9 @@
/* Note that this doesn't update the wiphys, this is done below */
r = __set_regdom(rd);
if (r) {
+ if (r == -EALREADY)
+ reg_set_request_processed();
+
kfree(rd);
mutex_unlock(®_mutex);
return r;
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index e2aaaf5..017880c 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -22,8 +22,6 @@
bool reg_is_valid_request(const char *alpha2);
bool reg_supported_dfs_region(u8 dfs_region);
-int regulatory_hint_user(const char *alpha2);
-
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env);
void reg_device_remove(struct wiphy *wiphy);
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 8daa015..f8e9d7a 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -612,9 +612,10 @@
static inline int
snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
{
- struct snd_compr_tstamp tstamp = {0};
+ struct snd_compr_tstamp tstamp;
int ret;
+ memset(&tstamp, 0, sizeof(tstamp));
ret = snd_compr_update_tstamp(stream, &tstamp);
if (ret == 0)
ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index 52af21c..55fec32 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -2582,6 +2582,8 @@
*/
{MSM8X10_WCD_A_RX_HPH_OCP_CTL, 0xE1, 0x61},
{MSM8X10_WCD_A_RX_COM_OCP_COUNT, 0xFF, 0xFF},
+ {MSM8X10_WCD_A_RX_HPH_L_TEST, 0x01, 0x01},
+ {MSM8X10_WCD_A_RX_HPH_R_TEST, 0x01, 0x01},
/* Initialize gain registers to use register gain */
{MSM8X10_WCD_A_RX_HPH_L_GAIN, 0x20, 0x20},
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index ec99c5f..db7a34c 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -90,7 +90,7 @@
#define WCD9XXX_HPHL_STATUS_READY_WAIT_US 1000
#define WCD9XXX_MUX_SWITCH_READY_WAIT_MS 50
-#define WCD9XXX_MEAS_DELTA_MAX_MV 50
+#define WCD9XXX_MEAS_DELTA_MAX_MV 120
#define WCD9XXX_MEAS_INVALD_RANGE_LOW_MV 20
#define WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV 80
@@ -128,7 +128,7 @@
S_IRUGO | S_IWUSR | S_IWGRP);
MODULE_PARM_DESC(impedance_detect_en, "enable/disable impedance detect");
-static bool detect_use_vddio_switch = true;
+static bool detect_use_vddio_switch;
struct wcd9xxx_mbhc_detect {
u16 dce;
@@ -227,7 +227,7 @@
* setup internal micbias if codec uses internal micbias for
* headset detection
*/
- if (mbhc->mbhc_cfg->use_int_rbias && !mbhc->int_rbias_on) {
+ if (mbhc->mbhc_cfg->use_int_rbias) {
if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
mbhc->mbhc_cb->setup_int_rbias(codec, true);
else
@@ -841,7 +841,8 @@
mbhc->zl = mbhc->zr = 0;
wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
0, WCD9XXX_JACK_MASK);
- mbhc->hph_status = 0;
+ mbhc->hph_status &= ~(SND_JACK_HEADSET |
+ SND_JACK_LINEOUT);
}
/* Report insertion */
mbhc->hph_status |= jack_type;
@@ -972,7 +973,9 @@
if (noreldetection)
wcd9xxx_turn_onoff_rel_detection(codec, false);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, 0x0);
+ if (mbhc->mbhc_cfg->do_recalibration)
+ snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2,
+ 0x0);
/* Turn on the override */
if (!override_bypass)
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x4, 0x4);
@@ -982,8 +985,9 @@
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x4);
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
0x0);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2,
- 0x2);
+ if (mbhc->mbhc_cfg->do_recalibration)
+ snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
+ 0x2, 0x2);
usleep_range(mbhc->mbhc_data.t_sta_dce,
mbhc->mbhc_data.t_sta_dce);
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x4);
@@ -995,8 +999,9 @@
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
0x0);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2,
- 0x2);
+ if (mbhc->mbhc_cfg->do_recalibration)
+ snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
+ 0x2, 0x2);
usleep_range(mbhc->mbhc_data.t_sta_dce,
mbhc->mbhc_data.t_sta_dce);
snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
@@ -1103,7 +1108,7 @@
* setup internal micbias if codec uses internal micbias for
* headset detection
*/
- if (mbhc->mbhc_cfg->use_int_rbias && !mbhc->int_rbias_on) {
+ if (mbhc->mbhc_cfg->use_int_rbias) {
if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
mbhc->mbhc_cb->setup_int_rbias(codec, true);
else
@@ -1140,48 +1145,59 @@
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x2, 0x2);
snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8);
+ if (!mbhc->mbhc_cfg->do_recalibration) {
+ if (!is_cs_enable)
+ wcd9xxx_calibrate_hs_polling(mbhc);
+ }
+
/* don't flip override */
bias_value = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true);
snd_soc_write(codec, mbhc->mbhc_bias_regs.cfilt_ctl, cfilt_mode);
snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
- /* recalibrate dce_z and sta_z */
- reg = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL);
- change = snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
- btn_det->mbhc_nsc << 3);
- wcd9xxx_get_z(mbhc, &dce_z, &sta_z);
- if (change)
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg);
- if (dce_z && sta_z) {
- pr_debug("%s: sta_z 0x%x -> 0x%x, dce_z 0x%x -> 0x%x\n",
- __func__,
- mbhc->mbhc_data.sta_z, sta_z & 0xffff,
- mbhc->mbhc_data.dce_z, dce_z & 0xffff);
- mbhc->mbhc_data.dce_z = dce_z;
- mbhc->mbhc_data.sta_z = sta_z;
- wcd9xxx_mbhc_calc_thres(mbhc);
- wcd9xxx_calibrate_hs_polling(mbhc);
- } else {
- pr_warn("%s: failed get new dce_z/sta_z 0x%x/0x%x\n", __func__,
- dce_z, sta_z);
- }
-
- if (is_cs_enable) {
- /* recalibrate dce_nsc_cs_z */
- reg = snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL);
- snd_soc_update_bits(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
- 0x78, WCD9XXX_MBHC_NSC_CS << 3);
- wcd9xxx_get_z(mbhc, &dce_z, NULL);
- snd_soc_write(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg);
- if (dce_z) {
- pr_debug("%s: dce_nsc_cs_z 0x%x -> 0x%x\n", __func__,
- mbhc->mbhc_data.dce_nsc_cs_z, dce_z & 0xffff);
- mbhc->mbhc_data.dce_nsc_cs_z = dce_z;
+ if (mbhc->mbhc_cfg->do_recalibration) {
+ /* recalibrate dce_z and sta_z */
+ reg = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL);
+ change = snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+ 0x78, btn_det->mbhc_nsc << 3);
+ wcd9xxx_get_z(mbhc, &dce_z, &sta_z);
+ if (change)
+ snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg);
+ if (dce_z && sta_z) {
+ pr_debug("%s: sta_z 0x%x -> 0x%x, dce_z 0x%x -> 0x%x\n",
+ __func__,
+ mbhc->mbhc_data.sta_z, sta_z & 0xffff,
+ mbhc->mbhc_data.dce_z, dce_z & 0xffff);
+ mbhc->mbhc_data.dce_z = dce_z;
+ mbhc->mbhc_data.sta_z = sta_z;
+ wcd9xxx_mbhc_calc_thres(mbhc);
+ wcd9xxx_calibrate_hs_polling(mbhc);
} else {
- pr_debug("%s: failed get new dce_nsc_cs_z\n", __func__);
+ pr_warn("%s: failed get new dce_z/sta_z 0x%x/0x%x\n",
+ __func__, dce_z, sta_z);
+ }
+
+ if (is_cs_enable) {
+ /* recalibrate dce_nsc_cs_z */
+ reg = snd_soc_read(mbhc->codec,
+ WCD9XXX_A_CDC_MBHC_B1_CTL);
+ snd_soc_update_bits(mbhc->codec,
+ WCD9XXX_A_CDC_MBHC_B1_CTL,
+ 0x78, WCD9XXX_MBHC_NSC_CS << 3);
+ wcd9xxx_get_z(mbhc, &dce_z, NULL);
+ snd_soc_write(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
+ reg);
+ if (dce_z) {
+ pr_debug("%s: dce_nsc_cs_z 0x%x -> 0x%x\n",
+ __func__, mbhc->mbhc_data.dce_nsc_cs_z,
+ dce_z & 0xffff);
+ mbhc->mbhc_data.dce_nsc_cs_z = dce_z;
+ } else {
+ pr_debug("%s: failed get new dce_nsc_cs_z\n",
+ __func__);
+ }
}
}
-
return bias_value;
}
@@ -1751,6 +1767,7 @@
/* GND and MIC swap detection requires at least 2 rounds of DCE */
BUG_ON(NUM_DCE_PLUG_INS_DETECT < 2);
+ detect_use_vddio_switch = mbhc->mbhc_cfg->use_vddio_meas;
/*
* There are chances vddio switch is on and cfilt voltage is adjusted
@@ -3188,7 +3205,7 @@
* setup internal micbias if codec uses internal micbias for
* headset detection
*/
- if (mbhc->mbhc_cfg->use_int_rbias && !mbhc->int_rbias_on) {
+ if (mbhc->mbhc_cfg->use_int_rbias) {
if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
mbhc->mbhc_cb->setup_int_rbias(codec, true);
else
@@ -4019,7 +4036,6 @@
if (mbhc->mbhc_cfg->use_int_rbias) {
if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias) {
mbhc->mbhc_cb->setup_int_rbias(codec, true);
- mbhc->int_rbias_on = true;
} else {
pr_info("%s: internal bias requested but codec did not provide callback\n",
__func__);
@@ -4178,7 +4194,6 @@
case WCD9XXX_EVENT_PRE_MICBIAS_2_ON:
case WCD9XXX_EVENT_PRE_MICBIAS_3_ON:
case WCD9XXX_EVENT_PRE_MICBIAS_4_ON:
- mbhc->int_rbias_on = true;
if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
wcd9xxx_event_to_micbias(event)) {
wcd9xxx_switch_micbias(mbhc, 0);
@@ -4206,7 +4221,6 @@
case WCD9XXX_EVENT_POST_MICBIAS_2_OFF:
case WCD9XXX_EVENT_POST_MICBIAS_3_OFF:
case WCD9XXX_EVENT_POST_MICBIAS_4_OFF:
- mbhc->int_rbias_on = false;
if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
wcd9xxx_event_to_micbias(event)) {
if (mbhc->event_state &
@@ -4502,7 +4516,6 @@
mbhc->mbhc_cb = mbhc_cb;
mbhc->intr_ids = mbhc_cdc_intr_ids;
mbhc->impedance_detect = impedance_det_en;
- mbhc->int_rbias_on = false;
if (mbhc->intr_ids == NULL) {
pr_err("%s: Interrupt mapping not provided\n", __func__);
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.h b/sound/soc/codecs/wcd9xxx-mbhc.h
index 7fe9538..29dd84a 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.h
+++ b/sound/soc/codecs/wcd9xxx-mbhc.h
@@ -228,6 +228,8 @@
bool (*swap_gnd_mic) (struct snd_soc_codec *);
unsigned long cs_enable_flags;
bool use_int_rbias;
+ bool do_recalibration;
+ bool use_vddio_meas;
};
struct wcd9xxx_cfilt_mode {
@@ -336,7 +338,6 @@
u32 rco_clk_rate;
bool update_z;
- bool int_rbias_on;
/* Holds codec specific interrupt mapping */
const struct wcd9xxx_mbhc_intr *intr_ids;
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.c b/sound/soc/codecs/wcd9xxx-resmgr.c
index 5d74469..6a22ff2 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr.c
@@ -561,8 +561,15 @@
if (--resmgr->clk_rco_users == 0 &&
resmgr->clk_type == WCD9XXX_CLK_RCO) {
wcd9xxx_disable_clock_block(resmgr);
- snd_soc_update_bits(resmgr->codec,
- WCD9XXX_A_RC_OSC_FREQ, 0x80, 0x00);
+ /* if RCO is enabled, switch from it */
+ if (snd_soc_read(resmgr->codec, WCD9XXX_A_RC_OSC_FREQ)
+ & 0x80) {
+ if (resmgr->codec_type !=
+ WCD9XXX_CDC_TYPE_HELICON)
+ snd_soc_write(resmgr->codec,
+ WCD9XXX_A_CLK_BUFF_EN2, 0x02);
+ wcd9xxx_resmgr_enable_config_mode(resmgr, 0);
+ }
resmgr->clk_type = WCD9XXX_CLK_OFF;
}
break;
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index d235a69..39cb470 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -281,6 +281,17 @@
.rate_min = 8000,
.rate_max = 192000,
},
+ .capture = {
+ .stream_name = "MultiMedia8 Capture",
+ .aif_name = "MM_UL8",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
.ops = &msm_fe_Multimedia_dai_ops,
.name = "MultiMedia8",
},
diff --git a/sound/soc/msm/msm8226.c b/sound/soc/msm/msm8226.c
index c25a460..ceea3d2 100644
--- a/sound/soc/msm/msm8226.c
+++ b/sound/soc/msm/msm8226.c
@@ -94,6 +94,8 @@
.cs_enable_flags = (1 << MBHC_CS_ENABLE_POLLING |
1 << MBHC_CS_ENABLE_INSERTION |
1 << MBHC_CS_ENABLE_REMOVAL),
+ .do_recalibration = true,
+ .use_vddio_meas = true,
};
struct msm_auxpcm_gpio {
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 99f196c..c120e0c 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -129,6 +129,8 @@
.cs_enable_flags = (1 << MBHC_CS_ENABLE_POLLING |
1 << MBHC_CS_ENABLE_INSERTION |
1 << MBHC_CS_ENABLE_REMOVAL),
+ .do_recalibration = true,
+ .use_vddio_meas = true,
};
struct msm_auxpcm_gpio {
@@ -2158,7 +2160,7 @@
.name = "MSM8974 Compr4",
.stream_name = "COMPR4",
.cpu_dai_name = "MultiMedia8",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-compr-dsp",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/msm/msm8x10.c b/sound/soc/msm/msm8x10.c
index d9af6f3..408ec03 100644
--- a/sound/soc/msm/msm8x10.c
+++ b/sound/soc/msm/msm8x10.c
@@ -102,6 +102,8 @@
.cs_enable_flags = (1 << MBHC_CS_ENABLE_POLLING |
1 << MBHC_CS_ENABLE_INSERTION |
1 << MBHC_CS_ENABLE_REMOVAL),
+ .do_recalibration = false,
+ .use_vddio_meas = false,
};
/*
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index 15128c9..ea16f47 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -1,8 +1,9 @@
snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o \
- msm-compress-q6-v2.o msm-multi-ch-pcm-q6-v2.o \
- msm-pcm-lpa-v2.o msm-pcm-afe-v2.o msm-pcm-voip-v2.o \
+ msm-compress-q6-v2.o msm-compr-q6-v2.o \
+ msm-multi-ch-pcm-q6-v2.o msm-pcm-lpa-v2.o \
+ msm-pcm-afe-v2.o msm-pcm-voip-v2.o \
msm-pcm-voice-v2.o msm-dai-q6-hdmi-v2.o \
- msm-lsm-client.o
+ msm-lsm-client.o msm-audio-effects-q6-v2.o
obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o msm-pcm-dtmf-v2.o \
msm-dai-stub-v2.o
obj-$(CONFIG_DOLBY_DAP) += msm-dolby-dap-config.o
diff --git a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
new file mode 100644
index 0000000..5e4d9d3
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
@@ -0,0 +1,721 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6asm-v2.h>
+#include <sound/compress_params.h>
+#include "msm-audio-effects-q6-v2.h"
+
+int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
+ struct virtualizer_params *virtualizer,
+ long *values)
+{
+ int devices = *values++;
+ int num_commands = *values++;
+ char *params;
+ int *updt_params, i, prev_enable_flag;
+ uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+ int rc = 0;
+
+ pr_debug("%s\n", __func__);
+ if (!ac) {
+ pr_err("%s: cannot set audio effects\n", __func__);
+ return -EINVAL;
+ }
+ params = kzalloc(params_length, GFP_KERNEL);
+ if (!params) {
+ pr_err("%s, params memory alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ pr_debug("%s: device: %d\n", __func__, devices);
+ updt_params = (int *)params;
+ params_length = 0;
+ for (i = 0; i < num_commands; i++) {
+ uint32_t command_id = *values++;
+ uint32_t command_config_state = *values++;
+ uint32_t index_offset = *values++;
+ uint32_t length = *values++;
+ switch (command_id) {
+ case VIRTUALIZER_ENABLE:
+ pr_debug("%s: VIRTUALIZER_ENABLE\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ prev_enable_flag = virtualizer->enable_flag;
+ virtualizer->enable_flag = *values++;
+ if (prev_enable_flag != virtualizer->enable_flag) {
+ *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_VIRTUALIZER_ENABLE;
+ *updt_params++ = VIRTUALIZER_ENABLE_PARAM_SZ;
+ *updt_params++ = virtualizer->enable_flag;
+ params_length += COMMAND_PAYLOAD_SZ +
+ VIRTUALIZER_ENABLE_PARAM_SZ;
+ }
+ break;
+ case VIRTUALIZER_STRENGTH:
+ pr_debug("%s: VIRTUALIZER_STRENGTH\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ virtualizer->strength = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH;
+ *updt_params++ = VIRTUALIZER_STRENGTH_PARAM_SZ;
+ *updt_params++ = virtualizer->strength;
+ params_length += COMMAND_PAYLOAD_SZ +
+ VIRTUALIZER_STRENGTH_PARAM_SZ;
+ }
+ break;
+ case VIRTUALIZER_OUT_TYPE:
+ pr_debug("%s: VIRTUALIZER_OUT_TYPE\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ virtualizer->out_type = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE;
+ *updt_params++ = VIRTUALIZER_OUT_TYPE_PARAM_SZ;
+ *updt_params++ = virtualizer->out_type;
+ params_length += COMMAND_PAYLOAD_SZ +
+ VIRTUALIZER_OUT_TYPE_PARAM_SZ;
+ }
+ break;
+ case VIRTUALIZER_GAIN_ADJUST:
+ pr_debug("%s: VIRTUALIZER_GAIN_ADJUST\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ virtualizer->gain_adjust = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_VIRTUALIZER_GAIN_ADJUST;
+ *updt_params++ =
+ VIRTUALIZER_GAIN_ADJUST_PARAM_SZ;
+ *updt_params++ = virtualizer->gain_adjust;
+ params_length += COMMAND_PAYLOAD_SZ +
+ VIRTUALIZER_GAIN_ADJUST_PARAM_SZ;
+ }
+ break;
+ default:
+ pr_err("%s: Invalid command to set config\n", __func__);
+ break;
+ }
+ }
+ if (params_length)
+ q6asm_send_audio_effects_params(ac, params,
+ params_length);
+invalid_config:
+ kfree(params);
+ return rc;
+}
+
+int msm_audio_effects_reverb_handler(struct audio_client *ac,
+ struct reverb_params *reverb,
+ long *values)
+{
+ int devices = *values++;
+ int num_commands = *values++;
+ char *params;
+ int *updt_params, i, prev_enable_flag;
+ uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+ int rc = 0;
+
+ pr_debug("%s\n", __func__);
+ if (!ac) {
+ pr_err("%s: cannot set audio effects\n", __func__);
+ return -EINVAL;
+ }
+ params = kzalloc(params_length, GFP_KERNEL);
+ if (!params) {
+ pr_err("%s, params memory alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ pr_debug("%s: device: %d\n", __func__, devices);
+ updt_params = (int *)params;
+ params_length = 0;
+ for (i = 0; i < num_commands; i++) {
+ uint32_t command_id = *values++;
+ uint32_t command_config_state = *values++;
+ uint32_t index_offset = *values++;
+ uint32_t length = *values++;
+ switch (command_id) {
+ case REVERB_ENABLE:
+ pr_debug("%s: REVERB_ENABLE\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ prev_enable_flag = reverb->enable_flag;
+ reverb->enable_flag = *values++;
+ if (prev_enable_flag != reverb->enable_flag) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ = AUDPROC_PARAM_ID_REVERB_ENABLE;
+ *updt_params++ = REVERB_ENABLE_PARAM_SZ;
+ *updt_params++ = reverb->enable_flag;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_ENABLE_PARAM_SZ;
+ }
+ break;
+ case REVERB_MODE:
+ pr_debug("%s: REVERB_MODE\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->mode = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ = AUDPROC_PARAM_ID_REVERB_MODE;
+ *updt_params++ = REVERB_MODE_PARAM_SZ;
+ *updt_params++ = reverb->mode;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_MODE_PARAM_SZ;
+ }
+ break;
+ case REVERB_PRESET:
+ pr_debug("%s: REVERB_PRESET\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->preset = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ = AUDPROC_PARAM_ID_REVERB_PRESET;
+ *updt_params++ = REVERB_PRESET_PARAM_SZ;
+ *updt_params++ = reverb->preset;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_PRESET_PARAM_SZ;
+ }
+ break;
+ case REVERB_WET_MIX:
+ pr_debug("%s: REVERB_WET_MIX\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->wet_mix = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_REVERB_WET_MIX;
+ *updt_params++ = REVERB_WET_MIX_PARAM_SZ;
+ *updt_params++ = reverb->wet_mix;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_WET_MIX_PARAM_SZ;
+ }
+ break;
+ case REVERB_GAIN_ADJUST:
+ pr_debug("%s: REVERB_GAIN_ADJUST\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->gain_adjust = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST;
+ *updt_params++ = REVERB_GAIN_ADJUST_PARAM_SZ;
+ *updt_params++ = reverb->gain_adjust;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_GAIN_ADJUST_PARAM_SZ;
+ }
+ break;
+ case REVERB_ROOM_LEVEL:
+ pr_debug("%s: REVERB_ROOM_LEVEL\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->room_level = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL;
+ *updt_params++ = REVERB_ROOM_LEVEL_PARAM_SZ;
+ *updt_params++ = reverb->room_level;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_ROOM_LEVEL_PARAM_SZ;
+ }
+ break;
+ case REVERB_ROOM_HF_LEVEL:
+ pr_debug("%s: REVERB_ROOM_HF_LEVEL\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->room_hf_level = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL;
+ *updt_params++ = REVERB_ROOM_HF_LEVEL_PARAM_SZ;
+ *updt_params++ = reverb->room_hf_level;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_ROOM_HF_LEVEL_PARAM_SZ;
+ }
+ break;
+ case REVERB_DECAY_TIME:
+ pr_debug("%s: REVERB_DECAY_TIME\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->decay_time = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_REVERB_DECAY_TIME;
+ *updt_params++ = REVERB_DECAY_TIME_PARAM_SZ;
+ *updt_params++ = reverb->decay_time;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_DECAY_TIME_PARAM_SZ;
+ }
+ break;
+ case REVERB_DECAY_HF_RATIO:
+ pr_debug("%s: REVERB_DECAY_HF_RATIO\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->decay_hf_ratio = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO;
+ *updt_params++ = REVERB_DECAY_HF_RATIO_PARAM_SZ;
+ *updt_params++ = reverb->decay_hf_ratio;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_DECAY_HF_RATIO_PARAM_SZ;
+ }
+ break;
+ case REVERB_REFLECTIONS_LEVEL:
+ pr_debug("%s: REVERB_REFLECTIONS_LEVEL\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->reflections_level = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_REVERB_REFLECTIONS_LEVEL;
+ *updt_params++ =
+ REVERB_REFLECTIONS_LEVEL_PARAM_SZ;
+ *updt_params++ = reverb->reflections_level;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_REFLECTIONS_LEVEL_PARAM_SZ;
+ }
+ break;
+ case REVERB_REFLECTIONS_DELAY:
+ pr_debug("%s: REVERB_REFLECTIONS_DELAY\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->reflections_delay = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_REVERB_REFLECTIONS_DELAY;
+ *updt_params++ =
+ REVERB_REFLECTIONS_DELAY_PARAM_SZ;
+ *updt_params++ = reverb->reflections_delay;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_REFLECTIONS_DELAY_PARAM_SZ;
+ }
+ break;
+ case REVERB_LEVEL:
+ pr_debug("%s: REVERB_LEVEL\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->level = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ = AUDPROC_PARAM_ID_REVERB_LEVEL;
+ *updt_params++ = REVERB_LEVEL_PARAM_SZ;
+ *updt_params++ = reverb->level;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_LEVEL_PARAM_SZ;
+ }
+ break;
+ case REVERB_DELAY:
+ pr_debug("%s: REVERB_DELAY\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->delay = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ = AUDPROC_PARAM_ID_REVERB_DELAY;
+ *updt_params++ = REVERB_DELAY_PARAM_SZ;
+ *updt_params++ = reverb->delay;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_DELAY_PARAM_SZ;
+ }
+ break;
+ case REVERB_DIFFUSION:
+ pr_debug("%s: REVERB_DIFFUSION\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->diffusion = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_REVERB_DIFFUSION;
+ *updt_params++ = REVERB_DIFFUSION_PARAM_SZ;
+ *updt_params++ = reverb->diffusion;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_DIFFUSION_PARAM_SZ;
+ }
+ break;
+ case REVERB_DENSITY:
+ pr_debug("%s: REVERB_DENSITY\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ reverb->density = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_REVERB;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_REVERB_DENSITY;
+ *updt_params++ = REVERB_DENSITY_PARAM_SZ;
+ *updt_params++ = reverb->density;
+ params_length += COMMAND_PAYLOAD_SZ +
+ REVERB_DENSITY_PARAM_SZ;
+ }
+ break;
+ default:
+ pr_err("%s: Invalid command to set config\n", __func__);
+ break;
+ }
+ }
+ if (params_length)
+ q6asm_send_audio_effects_params(ac, params,
+ params_length);
+invalid_config:
+ kfree(params);
+ return rc;
+}
+
+int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
+ struct bass_boost_params *bass_boost,
+ long *values)
+{
+ int devices = *values++;
+ int num_commands = *values++;
+ char *params;
+ int *updt_params, i, prev_enable_flag;
+ uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+ int rc = 0;
+
+ pr_debug("%s\n", __func__);
+ if (!ac) {
+ pr_err("%s: cannot set audio effects\n", __func__);
+ return -EINVAL;
+ }
+ params = kzalloc(params_length, GFP_KERNEL);
+ if (!params) {
+ pr_err("%s, params memory alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ pr_debug("%s: device: %d\n", __func__, devices);
+ updt_params = (int *)params;
+ params_length = 0;
+ for (i = 0; i < num_commands; i++) {
+ uint32_t command_id = *values++;
+ uint32_t command_config_state = *values++;
+ uint32_t index_offset = *values++;
+ uint32_t length = *values++;
+ switch (command_id) {
+ case BASS_BOOST_ENABLE:
+ pr_debug("%s: BASS_BOOST_ENABLE\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ prev_enable_flag = bass_boost->enable_flag;
+ bass_boost->enable_flag = *values++;
+ if (prev_enable_flag != bass_boost->enable_flag) {
+ *updt_params++ = AUDPROC_MODULE_ID_BASS_BOOST;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_BASS_BOOST_ENABLE;
+ *updt_params++ = BASS_BOOST_ENABLE_PARAM_SZ;
+ *updt_params++ = bass_boost->enable_flag;
+ params_length += COMMAND_PAYLOAD_SZ +
+ BASS_BOOST_ENABLE_PARAM_SZ;
+ }
+ break;
+ case BASS_BOOST_MODE:
+ pr_debug("%s: BASS_BOOST_MODE\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ bass_boost->mode = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_BASS_BOOST;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_BASS_BOOST_MODE;
+ *updt_params++ = BASS_BOOST_MODE_PARAM_SZ;
+ *updt_params++ = bass_boost->mode;
+ params_length += COMMAND_PAYLOAD_SZ +
+ BASS_BOOST_MODE_PARAM_SZ;
+ }
+ break;
+ case BASS_BOOST_STRENGTH:
+ pr_debug("%s: BASS_BOOST_STRENGTH\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ bass_boost->strength = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ = AUDPROC_MODULE_ID_BASS_BOOST;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH;
+ *updt_params++ = BASS_BOOST_STRENGTH_PARAM_SZ;
+ *updt_params++ = bass_boost->strength;
+ params_length += COMMAND_PAYLOAD_SZ +
+ BASS_BOOST_STRENGTH_PARAM_SZ;
+ }
+ break;
+ default:
+ pr_err("%s: Invalid command to set config\n", __func__);
+ break;
+ }
+ }
+ if (params_length)
+ q6asm_send_audio_effects_params(ac, params,
+ params_length);
+invalid_config:
+ kfree(params);
+ return rc;
+}
+
+int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
+ struct eq_params *eq,
+ long *values)
+{
+ int devices = *values++;
+ int num_commands = *values++;
+ char *params;
+ int *updt_params, i, prev_enable_flag;
+ uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+ int rc = 0;
+
+ pr_debug("%s\n", __func__);
+ if (!ac) {
+ pr_err("%s: cannot set audio effects\n", __func__);
+ return -EINVAL;
+ }
+ params = kzalloc(params_length, GFP_KERNEL);
+ if (!params) {
+ pr_err("%s, params memory alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ pr_debug("%s: device: %d\n", __func__, devices);
+ updt_params = (int *)params;
+ params_length = 0;
+ for (i = 0; i < num_commands; i++) {
+ uint32_t command_id = *values++;
+ uint32_t command_config_state = *values++;
+ uint32_t index_offset = *values++;
+ uint32_t length = *values++;
+ int idx, j;
+ switch (command_id) {
+ case EQ_ENABLE:
+ pr_debug("%s: EQ_ENABLE\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ prev_enable_flag = eq->enable_flag;
+ eq->enable_flag = *values++;
+ pr_debug("%s: prev_enable_flag : %d, eq.enable_flag : %d",
+ __func__, prev_enable_flag, eq->enable_flag);
+ if (prev_enable_flag != eq->enable_flag) {
+ *updt_params++ =
+ AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+ *updt_params++ = AUDPROC_PARAM_ID_EQ_ENABLE;
+ *updt_params++ = EQ_ENABLE_PARAM_SZ;
+ *updt_params++ = eq->enable_flag;
+ params_length += COMMAND_PAYLOAD_SZ +
+ EQ_ENABLE_PARAM_SZ;
+ }
+ break;
+ case EQ_CONFIG:
+ pr_debug("%s: EQ_CONFIG\n", __func__);
+ if (length < EQ_CONFIG_PARAM_LEN || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ for (idx = 0; idx < MAX_EQ_BANDS; idx++)
+ eq->per_band_cfg[idx].band_idx = -1;
+ eq->config.eq_pregain = *values++;
+ eq->config.preset_id = *values++;
+ eq->config.num_bands = *values++;
+ if (eq->config.num_bands > MAX_EQ_BANDS) {
+ pr_err("invalid num of bands\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ if (eq->config.num_bands &&
+ (((length - EQ_CONFIG_PARAM_LEN)/
+ EQ_CONFIG_PER_BAND_PARAM_LEN)
+ != eq->config.num_bands)) {
+ pr_err("invalid length to set config per band\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ for (j = 0; j < eq->config.num_bands; j++) {
+ idx = *values++;
+ eq->per_band_cfg[idx].band_idx = idx;
+ eq->per_band_cfg[idx].filter_type = *values++;
+ eq->per_band_cfg[idx].freq_millihertz =
+ *values++;
+ eq->per_band_cfg[idx].gain_millibels =
+ *values++;
+ eq->per_band_cfg[idx].quality_factor =
+ *values++;
+ }
+ if (command_config_state == CONFIG_SET) {
+ int config_param_length = EQ_CONFIG_PARAM_SZ +
+ (EQ_CONFIG_PER_BAND_PARAM_SZ*
+ eq->config.num_bands);
+ *updt_params++ =
+ AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+ *updt_params++ = AUDPROC_PARAM_ID_EQ_CONFIG;
+ *updt_params++ = config_param_length;
+ *updt_params++ = eq->config.eq_pregain;
+ *updt_params++ = eq->config.preset_id;
+ *updt_params++ = eq->config.num_bands;
+ for (idx = 0; idx < MAX_EQ_BANDS; idx++) {
+ if (eq->per_band_cfg[idx].band_idx < 0)
+ continue;
+ *updt_params++ =
+ eq->per_band_cfg[idx].filter_type;
+ *updt_params++ =
+ eq->per_band_cfg[idx].freq_millihertz;
+ *updt_params++ =
+ eq->per_band_cfg[idx].gain_millibels;
+ *updt_params++ =
+ eq->per_band_cfg[idx].quality_factor;
+ *updt_params++ =
+ eq->per_band_cfg[idx].band_idx;
+ }
+ params_length += COMMAND_PAYLOAD_SZ +
+ config_param_length;
+ }
+ break;
+ case EQ_BAND_INDEX:
+ pr_debug("%s: EQ_BAND_INDEX\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ idx = *values++;
+ if (idx > MAX_EQ_BANDS) {
+ pr_err("invalid band index\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ eq->band_index = idx;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ =
+ AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_EQ_BAND_INDEX;
+ *updt_params++ = EQ_BAND_INDEX_PARAM_SZ;
+ *updt_params++ = eq->band_index;
+ params_length += COMMAND_PAYLOAD_SZ +
+ EQ_BAND_INDEX_PARAM_SZ;
+ }
+ break;
+ case EQ_SINGLE_BAND_FREQ:
+ pr_debug("%s: EQ_SINGLE_BAND_FREQ\n", __func__);
+ if (length != 1 || index_offset != 0) {
+ pr_err("no valid params\n");
+ rc = -EINVAL;
+ goto invalid_config;
+ }
+ if (eq->band_index > MAX_EQ_BANDS) {
+ pr_err("invalid band index to set frequency\n");
+ break;
+ }
+ eq->freq_millihertz = *values++;
+ if (command_config_state == CONFIG_SET) {
+ *updt_params++ =
+ AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+ *updt_params++ =
+ AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ;
+ *updt_params++ = EQ_SINGLE_BAND_FREQ_PARAM_SZ;
+ *updt_params++ = eq->freq_millihertz;
+ params_length += COMMAND_PAYLOAD_SZ +
+ EQ_SINGLE_BAND_FREQ_PARAM_SZ;
+ }
+ break;
+ default:
+ pr_err("%s: Invalid command to set config\n", __func__);
+ break;
+ }
+ }
+ if (params_length)
+ q6asm_send_audio_effects_params(ac, params,
+ params_length);
+invalid_config:
+ kfree(params);
+ return rc;
+}
diff --git a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.h b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.h
new file mode 100644
index 0000000..3d2e6d4
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_AUDIO_EFFECTS_H
+#define _MSM_AUDIO_EFFECTS_H
+
+#include <sound/audio_effects.h>
+
+int msm_audio_effects_reverb_handler(struct audio_client *ac,
+ struct reverb_params *reverb,
+ long *values);
+
+int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
+ struct bass_boost_params *bass_boost,
+ long *values);
+int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
+ struct virtualizer_params *virtualizer,
+ long *values);
+
+int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
+ struct eq_params *eq,
+ long *values);
+#endif /*_MSM_AUDIO_EFFECTS_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
index b626fa4..bb325d8 100755
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -52,6 +52,9 @@
#define COMPRESSED_LR_VOL_MAX_STEPS 0x20002000
#define MAX_AC3_PARAM_SIZE (18*2*sizeof(int))
+#define AMR_WB_BAND_MODE 8
+#define AMR_WB_DTX_MODE 0
+
const DECLARE_TLV_DB_LINEAR(compr_rx_vol_gain, 0,
COMPRESSED_LR_VOL_MAX_STEPS);
@@ -108,12 +111,30 @@
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
};
+/* Add supported codecs for compress capture path */
+static uint32_t supported_compr_capture_codecs[] = {
+ SND_AUDIOCODEC_AMRWB
+};
+
static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
.count = ARRAY_SIZE(supported_sample_rates),
.list = supported_sample_rates,
.mask = 0,
};
+static bool msm_compr_capture_codecs(uint32_t req_codec)
+{
+ int i;
+ pr_debug("%s req_codec:%d\n", __func__, req_codec);
+ if (req_codec == 0)
+ return false;
+ for (i = 0; i < ARRAY_SIZE(supported_compr_capture_codecs); i++) {
+ if (req_codec == supported_compr_capture_codecs[i])
+ return true;
+ }
+ return false;
+}
+
static void compr_event_handler(uint32_t opcode,
uint32_t token, uint32_t *payload, void *priv)
{
@@ -428,6 +449,11 @@
prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
prtd->pcm_irq_pos = 0;
+ if (!msm_compr_capture_codecs(codec->id)) {
+ /*request codec invalid or not supported,
+ use default compress format*/
+ codec->id = SND_AUDIOCODEC_AMRWB;
+ }
/* rate and channels are sent to audio driver */
prtd->samp_rate = runtime->rate;
prtd->channel_mode = runtime->channels;
@@ -441,8 +467,12 @@
pr_debug("SND_AUDIOCODEC_AMRWB\n");
ret = q6asm_enc_cfg_blk_amrwb(prtd->audio_client,
MAX_NUM_FRAMES_PER_BUFFER,
- codec->options.generic.reserved[0] /*bitrate 0-8*/,
- codec->options.generic.reserved[1] /*dtx mode 0/1*/);
+ /* use fixed band mode and dtx mode
+ * band mode - 23.85 kbps
+ */
+ AMR_WB_BAND_MODE,
+ /* dtx mode - disable */
+ AMR_WB_DTX_MODE);
if (ret < 0)
pr_err("%s: CMD Format block" \
"failed: %d\n", __func__, ret);
@@ -500,6 +530,13 @@
prtd->pcm_irq_pos = 0;
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (!msm_compr_capture_codecs(
+ compr->info.codec_param.codec.id)) {
+ /*request codec invalid or not supported,
+ use default compress format*/
+ compr->info.codec_param.codec.id =
+ SND_AUDIOCODEC_AMRWB;
+ }
switch (compr->info.codec_param.codec.id) {
case SND_AUDIOCODEC_AMRWB:
break;
@@ -834,6 +871,13 @@
pr_err("%s: Send SoftVolume Param failed ret=%d\n",
__func__, ret);
} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (!msm_compr_capture_codecs(
+ compr->info.codec_param.codec.id)) {
+ /*request codec invalid or not supported,
+ use default compress format*/
+ compr->info.codec_param.codec.id =
+ SND_AUDIOCODEC_AMRWB;
+ }
switch (compr->info.codec_param.codec.id) {
case SND_AUDIOCODEC_AMRWB:
pr_debug("q6asm_open_read(FORMAT_AMRWB)\n");
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 79f0a97..7935100 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -43,6 +43,7 @@
#include "msm-pcm-routing-v2.h"
#include "audio_ocmem.h"
+#include "msm-audio-effects-q6-v2.h"
#define DSP_PP_BUFFERING_IN_MSEC 25
#define PARTIAL_DRAIN_ACK_EARLY_BY_MSEC 150
@@ -71,6 +72,7 @@
atomic_t audio_ocmem_req;
struct snd_compr_stream *cstream[MSM_FRONTEND_DAI_MAX];
uint32_t volume[MSM_FRONTEND_DAI_MAX][2]; /* For both L & R */
+ struct msm_compr_audio_effects *audio_effects[MSM_FRONTEND_DAI_MAX];
};
struct msm_compr_audio {
@@ -109,6 +111,7 @@
atomic_t xrun;
atomic_t close;
atomic_t wait_on_close;
+ atomic_t error;
wait_queue_head_t eos_wait;
wait_queue_head_t drain_wait;
@@ -118,6 +121,13 @@
spinlock_t lock;
};
+struct msm_compr_audio_effects {
+ struct bass_boost_params bass_boost;
+ struct virtualizer_params virtualizer;
+ struct reverb_params reverb;
+ struct eq_params equalizer;
+};
+
static int msm_compr_set_volume(struct snd_compr_stream *cstream,
uint32_t volume_l, uint32_t volume_r)
{
@@ -164,17 +174,11 @@
pr_debug("%s: bytes_received = %d copied_total = %d\n",
__func__, prtd->bytes_received, prtd->copied_total);
- /*
- * FIXME: Initial and trailing silence removal API call to DSP results
- * to a glitch during the stream transition for gapless playback.
- * Add this when the issue is fixed from DSP.
- */
-/*
if (prtd->first_buffer)
q6asm_send_meta_data(prtd->audio_client,
prtd->gapless_state.initial_samples_drop,
prtd->gapless_state.trailing_samples_drop);
-*/
+
buffer_length = prtd->codec_param.buffer.fragment_size;
bytes_available = prtd->bytes_received - prtd->copied_total;
if (bytes_available < prtd->codec_param.buffer.fragment_size)
@@ -354,6 +358,14 @@
case ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3:
pr_debug("ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3\n");
break;
+ case RESET_EVENTS:
+ pr_err("Received reset events CB, move to error state");
+ spin_lock(&prtd->lock);
+ snd_compr_fragment_elapsed(cstream);
+ prtd->copied_total = prtd->bytes_received;
+ atomic_set(&prtd->error, 1);
+ spin_unlock(&prtd->lock);
+ break;
default:
pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
break;
@@ -372,9 +384,11 @@
COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
prtd->compr_cap.max_fragments =
COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
- prtd->compr_cap.num_codecs = 2;
+ prtd->compr_cap.num_codecs = 4;
prtd->compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
prtd->compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
+ prtd->compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
+ prtd->compr_cap.codecs[3] = SND_AUDIOCODEC_EAC3;
}
static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
@@ -400,6 +414,10 @@
if (ret < 0)
pr_err("%s: CMD Format block failed\n", __func__);
break;
+ case FORMAT_AC3:
+ break;
+ case FORMAT_EAC3:
+ break;
default:
pr_debug("%s, unsupported format, skip", __func__);
break;
@@ -508,10 +526,18 @@
prtd->cstream = cstream;
pdata->cstream[rtd->dai_link->be_id] = cstream;
+ pdata->audio_effects[rtd->dai_link->be_id] =
+ kzalloc(sizeof(struct msm_compr_audio_effects), GFP_KERNEL);
+ if (!pdata->audio_effects[rtd->dai_link->be_id]) {
+ pr_err("%s: Could not allocate memory for effects\n", __func__);
+ kfree(prtd);
+ return -ENOMEM;
+ }
prtd->audio_client = q6asm_audio_client_alloc(
(app_cb)compr_event_handler, prtd);
if (!prtd->audio_client) {
- pr_err("%s: Could not allocate memory\n", __func__);
+ pr_err("%s: Could not allocate memory for client\n", __func__);
+ kfree(pdata->audio_effects[rtd->dai_link->be_id]);
kfree(prtd);
return -ENOMEM;
}
@@ -539,6 +565,7 @@
atomic_set(&prtd->xrun, 0);
atomic_set(&prtd->close, 0);
atomic_set(&prtd->wait_on_close, 0);
+ atomic_set(&prtd->error, 0);
init_waitqueue_head(&prtd->eos_wait);
init_waitqueue_head(&prtd->drain_wait);
@@ -623,6 +650,7 @@
q6asm_audio_client_free(ac);
+ kfree(pdata->audio_effects[soc_prtd->dai_link->be_id]);
kfree(prtd);
return 0;
@@ -685,6 +713,16 @@
break;
}
+ case SND_AUDIOCODEC_AC3: {
+ prtd->codec = FORMAT_AC3;
+ break;
+ }
+
+ case SND_AUDIOCODEC_EAC3: {
+ prtd->codec = FORMAT_EAC3;
+ break;
+ }
+
default:
pr_err("codec not supported, id =%d\n", params->codec.id);
return -EINVAL;
@@ -743,6 +781,14 @@
return -EINVAL;
}
+ spin_lock_irqsave(&prtd->lock, flags);
+ if (atomic_read(&prtd->error)) {
+ pr_err("%s Got RESET EVENTS notification, return immediately", __func__);
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&prtd->lock, flags);
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
pr_debug("%s: SNDRV_PCM_TRIGGER_START\n", __func__);
@@ -1056,6 +1102,15 @@
tstamp.byte_offset = prtd->byte_offset;
tstamp.copied_total = prtd->copied_total;
first_buffer = prtd->first_buffer;
+
+ if (atomic_read(&prtd->error)) {
+ pr_err("%s Got RESET EVENTS notification, return error", __func__);
+ tstamp.pcm_io_frames = 0;
+ memcpy(arg, &tstamp, sizeof(struct snd_compr_tstamp));
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ return -EINVAL;
+ }
+
spin_unlock_irqrestore(&prtd->lock, flags);
/*
@@ -1144,6 +1199,14 @@
return 0;
}
+ spin_lock_irqsave(&prtd->lock, flags);
+ if (atomic_read(&prtd->error)) {
+ pr_err("%s Got RESET EVENTS notification", __func__);
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&prtd->lock, flags);
+
dstn = prtd->buffer + prtd->app_pointer;
if (count < prtd->buffer_size - prtd->app_pointer) {
if (copy_from_user(dstn, buf, count))
@@ -1226,6 +1289,10 @@
(SND_AUDIOSTREAMFORMAT_MP4ADTS |
SND_AUDIOSTREAMFORMAT_RAW);
break;
+ case SND_AUDIOCODEC_AC3:
+ break;
+ case SND_AUDIOCODEC_EAC3:
+ break;
default:
pr_err("%s: Unsupported audio codec %d\n",
__func__, codec->codec);
@@ -1262,50 +1329,125 @@
}
static int msm_compr_volume_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned long fe_id = kcontrol->private_value;
struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
snd_soc_platform_get_drvdata(platform);
- struct snd_compr_stream *cstream = pdata->cstream[mc->reg];
- uint32_t *volume = pdata->volume[mc->reg];
+ struct snd_compr_stream *cstream = NULL;
+ uint32_t *volume = NULL;
+
+ if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+ pr_err("%s Received out of bounds fe_id %lu\n",
+ __func__, fe_id);
+ return -EINVAL;
+ }
+
+ cstream = pdata->cstream[fe_id];
+ volume = pdata->volume[fe_id];
volume[0] = ucontrol->value.integer.value[0];
volume[1] = ucontrol->value.integer.value[1];
- pr_debug("%s: mc->reg %d left_vol %d right_vol %d\n",
- __func__, mc->reg, volume[0], volume[1]);
+ pr_debug("%s: fe_id %lu left_vol %d right_vol %d\n",
+ __func__, fe_id, volume[0], volume[1]);
if (cstream)
msm_compr_set_volume(cstream, volume[0], volume[1]);
return 0;
}
static int msm_compr_volume_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned long fe_id = kcontrol->private_value;
+
struct msm_compr_pdata *pdata =
snd_soc_platform_get_drvdata(platform);
- uint32_t *volume = pdata->volume[mc->reg];
- pr_debug("%s: mc->reg %d\n", __func__, mc->reg);
+ uint32_t *volume = NULL;
+
+ if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+ pr_err("%s Received out of bound fe_id %lu\n", __func__, fe_id);
+ return -EINVAL;
+ }
+
+ volume = pdata->volume[fe_id];
+ pr_debug("%s: fe_id %lu\n", __func__, fe_id);
ucontrol->value.integer.value[0] = volume[0];
ucontrol->value.integer.value[1] = volume[1];
return 0;
}
-/* System Pin has no volume control */
-static const struct snd_kcontrol_new msm_compr_volume_controls[] = {
- SOC_DOUBLE_EXT_TLV("Compress Playback Volume",
- MSM_FRONTEND_DAI_MULTIMEDIA4,
- 0, 8, COMPRESSED_LR_VOL_MAX_STEPS, 0,
- msm_compr_volume_get,
- msm_compr_volume_put,
- msm_compr_vol_gain),
-};
+static int msm_compr_audio_effects_config_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+ unsigned long fe_id = kcontrol->private_value;
+ struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+ snd_soc_platform_get_drvdata(platform);
+ struct msm_compr_audio_effects *audio_effects = NULL;
+ struct snd_compr_stream *cstream = NULL;
+ struct msm_compr_audio *prtd = NULL;
+ long *values = &(ucontrol->value.integer.value[0]);
+ int effects_module;
+
+ pr_debug("%s\n", __func__);
+ if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+ pr_err("%s Received out of bounds fe_id %lu\n",
+ __func__, fe_id);
+ return -EINVAL;
+ }
+ cstream = pdata->cstream[fe_id];
+ audio_effects = pdata->audio_effects[fe_id];
+ if (!cstream || !audio_effects) {
+ pr_err("%s: stream or effects inactive\n", __func__);
+ return -EINVAL;
+ }
+ prtd = cstream->runtime->private_data;
+ if (!prtd) {
+ pr_err("%s: cannot set audio effects\n", __func__);
+ return -EINVAL;
+ }
+ effects_module = *values++;
+ switch (effects_module) {
+ case VIRTUALIZER_MODULE:
+ pr_debug("%s: VIRTUALIZER_MODULE\n", __func__);
+ msm_audio_effects_virtualizer_handler(prtd->audio_client,
+ &(audio_effects->virtualizer),
+ values);
+ break;
+ case REVERB_MODULE:
+ pr_debug("%s: REVERB_MODULE\n", __func__);
+ msm_audio_effects_reverb_handler(prtd->audio_client,
+ &(audio_effects->reverb),
+ values);
+ break;
+ case BASS_BOOST_MODULE:
+ pr_debug("%s: BASS_BOOST_MODULE\n", __func__);
+ msm_audio_effects_bass_boost_handler(prtd->audio_client,
+ &(audio_effects->bass_boost),
+ values);
+ break;
+ case EQ_MODULE:
+ pr_debug("%s: EQ_MODULE\n", __func__);
+ msm_audio_effects_popless_eq_handler(prtd->audio_client,
+ &(audio_effects->equalizer),
+ values);
+ break;
+ default:
+ pr_err("%s Invalid effects config module\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int msm_compr_audio_effects_config_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ /* dummy function */
+ return 0;
+}
static int msm_compr_probe(struct snd_soc_platform *platform)
{
@@ -1325,12 +1467,141 @@
for (i = 0; i < MSM_FRONTEND_DAI_MAX; i++) {
pdata->volume[i][0] = COMPRESSED_LR_VOL_MAX_STEPS;
pdata->volume[i][1] = COMPRESSED_LR_VOL_MAX_STEPS;
+ pdata->audio_effects[i] = NULL;
pdata->cstream[i] = NULL;
}
return 0;
}
+static int msm_compr_volume_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 2;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = COMPRESSED_LR_VOL_MAX_STEPS;
+ return 0;
+}
+
+static int msm_compr_audio_effects_config_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 128;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 0xFFFFFFFF;
+ return 0;
+}
+
+static int msm_compr_add_volume_control(struct snd_soc_pcm_runtime *rtd)
+{
+ const char *mixer_ctl_name = "Compress Playback";
+ const char *deviceNo = "NN";
+ const char *suffix = "Volume";
+ char *mixer_str = NULL;
+ int ctl_len;
+ struct snd_kcontrol_new fe_volume_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+ SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_compr_volume_info,
+ .tlv.p = msm_compr_vol_gain,
+ .get = msm_compr_volume_get,
+ .put = msm_compr_volume_put,
+ .private_value = 0,
+ }
+ };
+
+ if (!rtd) {
+ pr_err("%s NULL rtd\n", __func__);
+ return 0;
+ }
+ pr_debug("%s: added new compr FE with name %s, id %d, cpu dai %s, device no %d\n",
+ __func__, rtd->dai_link->name, rtd->dai_link->be_id,
+ rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+ ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1 +
+ strlen(suffix) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ pr_err("failed to allocate mixer ctrl str of len %d", ctl_len);
+ return 0;
+ }
+ snprintf(mixer_str, ctl_len, "%s %d %s", mixer_ctl_name,
+ rtd->pcm->device, suffix);
+ fe_volume_control[0].name = mixer_str;
+ fe_volume_control[0].private_value = rtd->dai_link->be_id;
+ pr_debug("Registering new mixer ctl %s", mixer_str);
+ snd_soc_add_platform_controls(rtd->platform, fe_volume_control,
+ ARRAY_SIZE(fe_volume_control));
+ kfree(mixer_str);
+ return 0;
+}
+
+static int msm_compr_add_audio_effects_control(struct snd_soc_pcm_runtime *rtd)
+{
+ const char *mixer_ctl_name = "Audio Effects Config";
+ const char *deviceNo = "NN";
+ char *mixer_str = NULL;
+ int ctl_len;
+ struct snd_kcontrol_new fe_audio_effects_config_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_compr_audio_effects_config_info,
+ .get = msm_compr_audio_effects_config_get,
+ .put = msm_compr_audio_effects_config_put,
+ .private_value = 0,
+ }
+ };
+
+
+ if (!rtd) {
+ pr_err("%s NULL rtd\n", __func__);
+ return 0;
+ }
+
+ pr_debug("%s: added new compr FE with name %s, id %d, cpu dai %s, device no %d\n",
+ __func__, rtd->dai_link->name, rtd->dai_link->be_id,
+ rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+
+ ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+
+ if (!mixer_str) {
+ pr_err("failed to allocate mixer ctrl str of len %d", ctl_len);
+ return 0;
+ }
+
+ snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+
+ fe_audio_effects_config_control[0].name = mixer_str;
+ fe_audio_effects_config_control[0].private_value = rtd->dai_link->be_id;
+ pr_debug("Registering new mixer ctl %s", mixer_str);
+ snd_soc_add_platform_controls(rtd->platform,
+ fe_audio_effects_config_control,
+ ARRAY_SIZE(fe_audio_effects_config_control));
+ kfree(mixer_str);
+ return 0;
+}
+
+static int msm_compr_new(struct snd_soc_pcm_runtime *rtd)
+{
+ int rc;
+
+ rc = msm_compr_add_volume_control(rtd);
+ if (rc)
+ pr_err("%s: Could not add Compr Volume Control\n", __func__);
+ rc = msm_compr_add_audio_effects_control(rtd);
+ if (rc)
+ pr_err("%s: Could not add Compr Audio Effects Control\n",
+ __func__);
+ return 0;
+}
+
static struct snd_compr_ops msm_compr_ops = {
.open = msm_compr_open,
.free = msm_compr_free,
@@ -1347,8 +1618,7 @@
static struct snd_soc_platform_driver msm_soc_platform = {
.probe = msm_compr_probe,
.compr_ops = &msm_compr_ops,
- .controls = msm_compr_volume_controls,
- .num_controls = ARRAY_SIZE(msm_compr_volume_controls),
+ .pcm_new = msm_compr_new,
};
static __devinit int msm_compr_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
index 0612805..d80ca19 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
@@ -40,8 +40,8 @@
#define MIN_CAPTURE_PERIOD_SIZE (128 * 2 * 4)
#define MAX_CAPTURE_PERIOD_SIZE (128 * 2 * 2 * 6 * 4)
-#define MIN_CAPTURE_NUM_PERIODS (32 * 4)
-#define MAX_CAPTURE_NUM_PERIODS (384 * 4)
+#define MIN_CAPTURE_NUM_PERIODS (32)
+#define MAX_CAPTURE_NUM_PERIODS (384)
static struct snd_pcm_hardware msm_afe_hardware_playback = {
.info = (SNDRV_PCM_INFO_MMAP |
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 1b4fae9..121c2ea 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -1933,6 +1933,31 @@
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new mmul8_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+
static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
@@ -2938,6 +2963,7 @@
SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL9", "MultiMedia9 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
@@ -3114,6 +3140,8 @@
mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia5 Mixer", SND_SOC_NOPM, 0, 0,
mmul5_mixer_controls, ARRAY_SIZE(mmul5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia8 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)),
SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -3300,11 +3328,15 @@
{"MultiMedia1 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
{"MultiMedia4 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
+ {"MultiMedia8 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
{"MultiMedia1 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
{"MultiMedia4 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
+ {"MultiMedia8 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
{"MultiMedia1 Mixer", "SLIM_4_TX", "SLIMBUS_4_TX"},
{"MultiMedia4 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia8 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia4 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"MultiMedia8 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -3398,18 +3430,22 @@
{"MultiMedia1 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia4 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia5 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"MultiMedia8 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia1 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia4 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia5 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+ {"MultiMedia8 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia1 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia4 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia5 Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"MultiMedia8 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MM_UL1", NULL, "MultiMedia1 Mixer"},
{"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MM_UL2", NULL, "MultiMedia2 Mixer"},
{"MM_UL4", NULL, "MultiMedia4 Mixer"},
{"MM_UL5", NULL, "MultiMedia5 Mixer"},
+ {"MM_UL8", NULL, "MultiMedia8 Mixer"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 09ecd75..acb8e70 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -2879,7 +2879,14 @@
uint16_t port_index;
if (this_afe.apr == NULL) {
- pr_err("AFE is already closed\n");
+ pr_err("%s: AFE is already closed\n", __func__);
+ if ((port_id == RT_PROXY_DAI_001_RX) ||
+ (port_id == RT_PROXY_DAI_002_TX))
+ pcm_afe_instance[port_id & 0x1] = 0;
+ if ((port_id == RT_PROXY_DAI_002_RX) ||
+ (port_id == RT_PROXY_DAI_001_TX))
+ proxy_afe_instance[port_id & 0x1] = 0;
+ afe_close_done[port_id & 0x1] = true;
ret = -EINVAL;
goto fail_cmd;
}
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 3ae5221..b45a218 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -62,10 +62,13 @@
uint32_t buf_addr_lsw;
uint32_t mmap_hdl;
};
-static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv);
+static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv);
static int32_t q6asm_callback(struct apr_client_data *data, void *priv);
static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
uint32_t pkt_size, uint32_t cmd_flg);
+static void q6asm_add_hdr_custom_topology(struct audio_client *ac,
+ struct apr_hdr *hdr,
+ uint32_t pkt_size, uint32_t cmd_flg);
static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr,
uint32_t pkt_size, uint32_t cmd_flg);
static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
@@ -364,7 +367,6 @@
struct list_head *ptr, *next;
int result;
int size = 4096;
-
get_asm_custom_topology(&cal_block);
if (cal_block.cal_size == 0) {
pr_debug("%s: no cal to send addr= 0x%x\n",
@@ -422,8 +424,9 @@
}
}
- q6asm_add_hdr(ac, &asm_top.hdr, APR_PKT_SIZE(APR_HDR_SIZE,
- sizeof(asm_top)), TRUE);
+ q6asm_add_hdr_custom_topology(ac, &asm_top.hdr,
+ APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(asm_top)), TRUE);
asm_top.hdr.opcode = ASM_CMD_ADD_TOPOLOGIES;
asm_top.payload_addr_lsw = cal_block.cal_paddr;
@@ -445,7 +448,7 @@
result = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!result) {
- pr_err("%s: Set topologies failed payload = 0x%x\n",
+ pr_err("%s: Set topologies failed after timedout payload = 0x%x\n",
__func__, cal_block.cal_paddr);
goto done;
}
@@ -775,7 +778,7 @@
if ((atomic_read(&this_mmap.ref_cnt) == 0) ||
(this_mmap.apr == NULL)) {
this_mmap.apr = apr_register("ADSP", "ASM", \
- (apr_fn)q6asm_mmapcallback,\
+ (apr_fn)q6asm_srvc_callback,\
0x0FFFFFFFF, &this_mmap);
if (this_mmap.apr == NULL) {
pr_debug("%s Unable to register APR ASM common port\n",
@@ -869,7 +872,6 @@
pr_err("%s: session not active: %d\n", __func__, session_id);
goto err;
}
-
return session[session_id];
err:
return NULL;
@@ -1042,7 +1044,7 @@
return -EINVAL;
}
-static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv)
+static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv)
{
uint32_t sid = 0;
uint32_t dir = 0;
@@ -1089,6 +1091,7 @@
switch (payload[0]) {
case ASM_CMD_SHARED_MEM_MAP_REGIONS:
case ASM_CMD_SHARED_MEM_UNMAP_REGIONS:
+ case ASM_CMD_ADD_TOPOLOGIES:
if (payload[1] != 0) {
pr_err("%s: cmd = 0x%x returned error = 0x%x sid:%d\n",
__func__, payload[0], payload[1], sid);
@@ -1614,6 +1617,36 @@
return;
}
+static void q6asm_add_hdr_custom_topology(struct audio_client *ac,
+ struct apr_hdr *hdr,
+ uint32_t pkt_size, uint32_t cmd_flg)
+{
+ pr_debug("%s:pkt_size=%d cmd_flg=%d session=%d\n", __func__, pkt_size,
+ cmd_flg, ac->session);
+ if (ac->apr == NULL) {
+ pr_err("%s: ac->apr is NULL", __func__);
+ return;
+ }
+
+ mutex_lock(&ac->cmd_lock);
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+ APR_HDR_LEN(sizeof(struct apr_hdr)),\
+ APR_PKT_VER);
+ hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->dest_svc = APR_SVC_ASM;
+ hdr->dest_domain = APR_DOMAIN_ADSP;
+ hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01;
+ hdr->dest_port = 0;
+ if (cmd_flg) {
+ hdr->token = ((ac->session << 8) | 0x0001) ;
+ atomic_set(&ac->cmd_state, 1);
+ }
+ hdr->pkt_size = pkt_size;
+ mutex_unlock(&ac->cmd_lock);
+ return;
+}
+
static void q6asm_add_mmaphdr(struct audio_client *ac, struct apr_hdr *hdr,
u32 pkt_size, u32 cmd_flg, u32 token)
{
@@ -1831,7 +1864,7 @@
struct asm_stream_cmd_open_readwrite_v2 open;
if ((ac == NULL) || (ac->apr == NULL)) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s: session[%d]", __func__, ac->session);
@@ -1943,7 +1976,7 @@
struct asm_session_cmd_run_v2 run;
int rc;
if (!ac || ac->apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s session[%d]", __func__, ac->session);
@@ -2871,7 +2904,7 @@
int cmd_size = 0;
if (!ac || ac->mmap_apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s: Session[%d]\n", __func__, ac->session);
@@ -2948,7 +2981,7 @@
int rc = 0;
if (!ac || this_mmap.apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s: Session[%d]\n", __func__, ac->session);
@@ -3018,7 +3051,7 @@
uint32_t bufsz_t;
if (!ac || ac->mmap_apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s: Session[%d]\n", __func__, ac->session);
@@ -3127,7 +3160,7 @@
int cmd_size = 0;
if (!ac || ac->mmap_apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s: Session[%d]\n", __func__, ac->session);
@@ -3513,7 +3546,7 @@
struct audio_port_data *port;
int rc;
if (!ac || ac->apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
if (ac->io_mode & SYNC_IO_MODE) {
@@ -3576,7 +3609,7 @@
struct audio_port_data *port;
int rc;
if (!ac || ac->apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
if (ac->io_mode & SYNC_IO_MODE) {
@@ -3773,7 +3806,7 @@
int dsp_buf = 0;
if (!ac || ac->apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s: session[%d] len=%d", __func__, ac->session, len);
@@ -3841,7 +3874,7 @@
int dsp_buf = 0;
if (!ac || ac->apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s: session[%d] len=%d", __func__, ac->session, len);
@@ -3900,7 +3933,7 @@
int rc;
if (!ac || ac->apr == NULL || tstamp == NULL) {
- pr_err("APR handle NULL or tstamp NULL\n");
+ pr_err("%s: APR handle NULL or tstamp NULL\n", __func__);
return -EINVAL;
}
q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE);
@@ -3930,6 +3963,58 @@
return -EINVAL;
}
+int q6asm_send_audio_effects_params(struct audio_client *ac, char *params,
+ uint32_t params_length)
+{
+ char *asm_params = NULL;
+ struct apr_hdr hdr;
+ struct asm_stream_cmd_set_pp_params_v2 payload_params;
+ int sz, rc;
+
+ pr_debug("%s\n", __func__);
+ if (!ac || ac->apr == NULL || params == NULL) {
+ pr_err("APR handle NULL or params NULL\n");
+ return -EINVAL;
+ }
+ sz = sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+ params_length;
+ asm_params = kzalloc(sz, GFP_KERNEL);
+ if (!asm_params) {
+ pr_err("%s, adm params memory alloc failed", __func__);
+ return -ENOMEM;
+ }
+ q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+ params_length), TRUE);
+ hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+ payload_params.data_payload_addr_lsw = 0;
+ payload_params.data_payload_addr_msw = 0;
+ payload_params.mem_map_handle = 0;
+ payload_params.data_payload_size = params_length;
+ memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), &payload_params,
+ sizeof(struct asm_stream_cmd_set_pp_params_v2));
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2)),
+ params, params_length);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+ if (rc < 0) {
+ pr_err("%s: audio effects set-params send failed\n", __func__);
+ goto fail_send_param;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 1*HZ);
+ if (!rc) {
+ pr_err("%s: timeout, audio effects set-params\n", __func__);
+ goto fail_send_param;
+ }
+ rc = 0;
+fail_send_param:
+ kfree(asm_params);
+ return rc;
+}
+
static int __q6asm_cmd(struct audio_client *ac, int cmd, uint32_t stream_id)
{
struct apr_hdr hdr;
@@ -3938,7 +4023,7 @@
int cnt = 0;
if (!ac || ac->apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
q6asm_stream_add_hdr(ac, &hdr, sizeof(hdr), TRUE, stream_id);
@@ -4151,7 +4236,7 @@
int rc;
if (!ac || ac->apr == NULL) {
- pr_err("APR handle NULL\n");
+ pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
pr_debug("%s:session[%d]enable[%d]\n", __func__,
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 58b8399..b9af9b6 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -756,6 +756,14 @@
rtd->compr = compr;
compr->private_data = rtd;
+ if (platform->driver->pcm_new) {
+ ret = platform->driver->pcm_new(rtd);
+ if (ret < 0) {
+ pr_err("asoc: compress pcm constructor failed\n");
+ goto compr_err;
+ }
+ }
+
printk(KERN_INFO "compress asoc: %s <-> %s mapping ok\n", codec_dai->name,
cpu_dai->name);
return ret;