Merge "msm: lpm_resources: Add a default property for lpm-resources node"
diff --git a/Documentation/block/row-iosched.txt b/Documentation/block/row-iosched.txt
index 987bd88..fe8b88b 100644
--- a/Documentation/block/row-iosched.txt
+++ b/Documentation/block/row-iosched.txt
@@ -8,16 +8,28 @@
The ROW IO scheduler was developed with the mobile devices needs in
mind. In mobile devices we favor user experience upon everything else,
thus we want to give READ IO requests as much priority as possible.
-The main idea of the ROW scheduling policy is:
-If there are READ requests in pipe - dispatch them but don't starve
-the WRITE requests too much.
+The main idea of the ROW scheduling policy is just that:
+- If there are READ requests in pipe - dispatch them, while write
+starvation is considered.
Software description
====================
+The elevator defines a registering mechanism for different IO scheduler
+to implement. This makes implementing a new algorithm quite straight
+forward and requires almost no changes to block/elevator framework. A
+new IO scheduler just has to implement a set of callback functions
+defined by the elevator.
+These callbacks cover all the required IO operations such as
+adding/removing request to/from the scheduler, merging two requests,
+dispatching a request etc.
+
+Design
+======
+
The requests are kept in queues according to their priority. The
dispatching of requests is done in a Round Robin manner with a
different slice for each queue. The dispatch quantum for a specific
-queue is defined according to the queues priority. READ queues are
+queue is set according to the queues priority. READ queues are
given bigger dispatch quantum than the WRITE queues, within a dispatch
cycle.
@@ -30,88 +42,93 @@
- Regular priority WRITE queue
- Low priority READ queue
+The marking of request as high/low priority will be done by the
+application adding the request and not the scheduler. See TODO section.
+If the request is not marked in any way (high/low) the scheduler
+assigns it to one of the regular priority queues:
+read/write/sync write.
+
If in a certain dispatch cycle one of the queues was empty and didn't
-use its quantum that queue will be marked as "un-served". If we're in a
-middle of a dispatch cycle dispatching from queue Y and a request
+use its quantum that queue will be marked as "un-served". If we're in
+a middle of a dispatch cycle dispatching from queue Y and a request
arrives for queue X that was un-served in the previous cycle, if X's
priority is higher than Y's, queue X will be preempted in the favor of
-queue Y. This won't mean that cycle is restarted. The "dispatched"
-counter of queue X will remain unchanged. Once queue Y uses up it's quantum
-(or there will be no more requests left on it) we'll switch back to queue X
-and allow it to finish it's quantum.
+queue Y.
-For READ requests queues we allow idling in within a dispatch quantum in
-order to give the application a chance to insert more requests. Idling
-means adding some extra time for serving a certain queue even if the
-queue is empty. The idling is enabled if we identify the application is
-inserting requests in a high frequency.
+For READ request queues ROW IO scheduler allows idling within a
+dispatch quantum in order to give the application a chance to insert
+more requests. Idling means adding some extra time for serving a
+certain queue even if the queue is empty. The idling is enabled if
+the ROW IO scheduler identifies the application is inserting requests
+in a high frequency.
+Not all queues can idle. ROW scheduler exposes an enablement struct
+for idling.
+For idling on READ queues, the ROW IO scheduler uses timer mechanism.
+When the timer expires we schedule a delayed work that will signal the
+device driver to fetch another request for dispatch.
-For idling on READ queues we use timer mechanism. When the timer expires,
-if there are requests in the scheduler we will signal the underlying driver
-(for example the MMC driver) to fetch another request for dispatch.
-
-The ROW algorithm takes the scheduling policy one step further, making
-it a bit more "user-needs oriented", by allowing the application to
-hint on the urgency of its requests. For example: even among the READ
-requests several requests may be more urgent for completion then others.
-The former will go to the High priority READ queue, that is given the
-bigger dispatch quantum than any other queue.
-
-ROW scheduler will support special services for block devices that
-supports High Priority Requests. That is, the scheduler may inform the
-device upon urgent requests using new callback make_urgent_request.
+ROW scheduler will support additional services for block devices that
+supports Urgent Requests. That is, the scheduler may inform the
+device driver upon urgent requests using a newly defined callback.
In addition it will support rescheduling of requests that were
-interrupted. For example, if the device issues a long write request and
-a sudden high priority read interrupt pops in, the scheduler will
-inform the device about the urgent request, so the device can stop the
-current write request and serve the high priority read request. In such
-a case the device may also send back to the scheduler the reminder of
-the interrupted write request, such that the scheduler may continue
-sending high priority requests without the need to interrupt the
-ongoing write again and again. The write remainder will be sent later on
-according to the scheduler policy.
-
-Design
-======
-Existing algorithms (cfq, deadline) sort the io requests according LBA.
-When deciding on the next request to dispatch they choose the closest
-request to the current disk head position (from handling last
-dispatched request). This is done in order to reduce the disk head
-movement to a minimum.
-We feel that this functionality isn't really needed in mobile devices.
-Usually applications that write/read large chunks of data insert the
-requests in already sorted LBA order. Thus dealing with sort trees adds
-unnecessary complexity.
-
-We're planing to try this enhancement in the future to check if the
-performance is influenced by it.
+interrupted. For example if the device driver issues a long write
+request and a sudden urgent request is received by the scheduler.
+The scheduler will inform the device driver about the urgent request,
+so the device driver can stop the current write request and serve the
+urgent request. In such a case the device driver may also insert back
+to the scheduler the remainder of the interrupted write request, such
+that the scheduler may continue sending urgent requests without the
+need to interrupt the ongoing write again and again. The write
+remainder will be sent later on according to the scheduler policy.
SMP/multi-core
==============
-At the moment the code is acceded from 2 contexts:
+At the moment the code is accessed from 2 contexts:
- Application context (from block/elevator layer): adding the requests.
-- Underlying driver context (for example the mmc driver thread): dispatching
- the requests and notifying on completion.
+- device driver thread: dispatching the requests and notifying on
+ completion.
One lock is used to synchronize between the two. This lock is provided
-by the underlying driver along with the dispatch queue.
+by the block device driver along with the dispatch queue.
Config options
==============
1. hp_read_quantum: dispatch quantum for the high priority READ queue
-2. rp_read_quantum: dispatch quantum for the regular priority READ queue
-3. hp_swrite_quantum: dispatch quantum for the high priority Synchronous
- WRITE queue
+ (default is 100 requests)
+2. rp_read_quantum: dispatch quantum for the regular priority READ
+ queue (default is 100 requests)
+3. hp_swrite_quantum: dispatch quantum for the high priority
+ Synchronous WRITE queue (default is 2 requests)
4. rp_swrite_quantum: dispatch quantum for the regular priority
- Synchronous WRITE queue
+ Synchronous WRITE queue (default is 1 requests)
5. rp_write_quantum: dispatch quantum for the regular priority WRITE
- queue
+ queue (default is 1 requests)
6. lp_read_quantum: dispatch quantum for the low priority READ queue
+ (default is 1 requests)
7. lp_swrite_quantum: dispatch quantum for the low priority Synchronous
- WRITE queue
+ WRITE queue (default is 1 requests)
8. read_idle: how long to idle on read queue in Msec (in case idling
- is enabled on that queue).
+ is enabled on that queue). (default is 5 Msec)
9. read_idle_freq: frequency of inserting READ requests that will
trigger idling. This is the time in Msec between inserting two READ
- requests
+ requests. (default is 8 Msec)
+Note: Dispatch quantum is number of requests that will be dispatched
+from a certain queue in a dispatch cycle.
+
+To do
+=====
+The ROW algorithm takes the scheduling policy one step further, making
+it a bit more "user-needs oriented", by allowing the application to
+hint on the urgency of its requests. For example: even among the READ
+requests several requests may be more urgent for completion than other.
+The former will go to the High priority READ queue, that is given the
+bigger dispatch quantum than any other queue.
+
+Still need to design the way applications will "hint" on the urgency of
+their requests. May be done by ioctl(). We need to look into concrete
+use-cases in order to determine the best solution for this.
+This will be implemented as a second phase.
+
+Design and implement additional services for block devices that
+supports High Priority Requests.
\ No newline at end of file
diff --git a/Documentation/devicetree/bindings/dma/sps/sps.txt b/Documentation/devicetree/bindings/dma/sps/sps.txt
new file mode 100644
index 0000000..094acb1
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sps/sps.txt
@@ -0,0 +1,27 @@
+SPS (Smart Peripheral Switch) may be used as a DMA engine to move data
+in either the Peripheral-to-Peripheral (a.k.a. BAM-to-BAM) mode or the
+Peripheral-to-Memory (a.k.a. BAM-System) mode. SPS includes BAM (Bus
+Access Module) hardware block, BAM DMA peripheral, and pipe memory.
+
+Required property:
+ - compatible: should be "qcom,msm_sps"
+
+Optional properties:
+ - reg: offset and size of the register set in the memory map
+ - interrupts: IRQ line
+ - qcom,device-type: specify the device configuration of BAM DMA and
+ pipe memory. Can be one of
+ 1 - With BAM DMA and without pipe memory
+ 2 - With BAM DMA and with pipe memory
+ 3 - Without BAM DMA and without pipe memory
+
+Example:
+
+ qcom,sps@f9980000 {
+ compatible = "qcom,msm_sps";
+ reg = <0xf9984000 0x15000>,
+ <0xf9999000 0xb000>,
+ <0xfe803000 0x4800>;
+ interrupts = <0 94 0>;
+ qcom,device-type = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp.txt b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
index 10732cf..da0708f 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
@@ -6,14 +6,14 @@
SPMI bus. This driver supports various LED modules such as
WLED (white LED), RGB LED and flash LED.
-Required Properties:
-- compatible : should be "qcom,leds-qpnp"
-
Each LED module is represented as a node of "leds-qpnp". This
node will further contain the type of LED supported and its
-properties.
+properties. At least one child node is required for each LED
+module. Each must have the required properties below, in addition
+to the properties for the LED type, WLED, Flash or RGB.
-Required properties:
+Required properties for each child node, WLED, Flash and RGB:
+- compatible : should be "qcom,leds-qpnp"
- qcom,id : must be one of values supported in enum qpnp_led
- label : type of led that will be used, ie "wled"
- qcom,max-current : maximum current that the LED can sustain
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index 802716c..5bef9b8 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -14,6 +14,7 @@
"metadata_base" are expected.
- interrupts: The modem watchdog interrupt
- vdd_mss-supply: Reference to the regulator that supplies the processor.
+- vdd_mx-supply: Reference to the regulator that supplies the memory rail.
- qcom,firmware-name: Base name of the firmware image. Ex. "mdsp"
- qcom,pil-self-auth: <0> if the hardware does not require self-authenticating
images and self-authentication is not desired;
@@ -32,6 +33,7 @@
"restart_reg", metadata_base";
interrupts = <0 24 1>;
vdd_mss-supply = <&pm8841_s3>;
+ vdd_mx-supply = <&pm8841_s1>;
qcom,is-loadable;
qcom,firmware-name = "mba";
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index 7dde34f..86d38ca 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -51,6 +51,8 @@
serial Product Serial Number (from CID Register)
erase_size Erase group size
preferred_erase_size Preferred erase size
+ raw_rpmb_size_mult RPMB partition size
+ rel_sectors Reliable write sector count
Note on Erase Size and Preferred Erase Size:
@@ -91,6 +93,11 @@
"preferred_erase_size" is in bytes.
+Note on raw_rpmb_size_mult:
+ "raw_rpmb_size_mult" is a mutliple of 128kB block.
+ RPMB size in byte is calculated by using the following equation:
+ RPMB partition size = 128kB x raw_rpmb_size_mult
+
SD/MMC/SDIO Clock Gating Attribute
==================================
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 81b5dc9..2f2603f 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -265,7 +265,12 @@
KBUILD_IMAGE := zImage
endif
-all: $(KBUILD_IMAGE)
+# Build the DT binary blobs if we have OF configured
+ifeq ($(CONFIG_USE_OF),y)
+KBUILD_DTBS := dtbs
+endif
+
+all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
boot := arch/arm/boot
@@ -281,10 +286,10 @@
zinstall uinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
-%.dtb:
+%.dtb: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
-dtbs:
+dtbs: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
# We use MRPROPER_FILES and CLEAN_FILES now
@@ -303,7 +308,7 @@
echo ' uImage - U-Boot wrapped zImage'
echo ' bootpImage - Combined zImage and initial RAM disk'
echo ' (supply initrd image via make variable INITRD=<path>)'
- echo ' dtbs - Build device tree blobs for enabled boards'
+ echo '* dtbs - Build device tree blobs for enabled boards'
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
echo ' uinstall - Install U-Boot wrapped compressed kernel'
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 6538db5..7a9a80d 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -83,7 +83,7 @@
"ocv_for_r",
"cc_thr";
- qcom,bms-r-sense-mohm = <10>;
+ qcom,bms-r-sense-mohm = <2>;
qcom,bms-v-cutoff-uv = <3400000>;
qcom,bms-max-voltage-uv = <4200000>;
qcom,bms-r-conn-mohm = <18>;
@@ -95,7 +95,6 @@
qcom,bms-calculate-soc-ms = <20000>;
qcom,bms-chg-term-ua = <100000>;
qcom,bms-batt-type = <0>;
- qcom,bms-use-voltage-soc;
};
clkdiv@5b00 {
diff --git a/arch/arm/boot/dts/msm8910.dtsi b/arch/arm/boot/dts/msm8910.dtsi
index a5cac86..3786b02 100644
--- a/arch/arm/boot/dts/msm8910.dtsi
+++ b/arch/arm/boot/dts/msm8910.dtsi
@@ -12,6 +12,7 @@
/include/ "skeleton.dtsi"
/include/ "msm8910-ion.dtsi"
+/include/ "msm-gdsc.dtsi"
/ {
model = "Qualcomm MSM 8910";
@@ -127,6 +128,11 @@
qcom,current-limit = <800>;
};
+ qcom,sps {
+ compatible = "qcom,msm_sps";
+ qcom,device-type = <3>;
+ };
+
qcom,smem@fa00000 {
compatible = "qcom,smem";
reg = <0xfa00000 0x200000>,
@@ -205,4 +211,12 @@
};
};
+&gdsc_vfe {
+ status = "ok";
+};
+
+&gdsc_oxili_cx {
+ status = "ok";
+};
+
/include/ "msm8910-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index 00e9c7a..bd02d89 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -42,7 +42,6 @@
atmel,panel-coords = <0 0 760 1424>;
atmel,display-coords = <0 0 720 1280>;
atmel,i2c-pull-up;
- atmel,no-force-update;
atmel,cfg_1 {
atmel,family-id = <0x82>;
atmel,variant-id = <0x19>;
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index cac7d3c..8479dfa 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -42,7 +42,6 @@
atmel,panel-coords = <0 0 760 1424>;
atmel,display-coords = <0 0 720 1280>;
atmel,i2c-pull-up;
- atmel,no-force-update;
atmel,cfg_1 {
atmel,family-id = <0x82>;
atmel,variant-id = <0x19>;
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index 7da00d3..f412d177 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -121,7 +121,6 @@
atmel,panel-coords = <0 0 1080 1920>;
atmel,display-coords = <0 0 1080 1920>;
atmel,i2c-pull-up;
- atmel,no-force-update;
atmel,cfg_1 {
atmel,family-id = <0xa2>;
atmel,variant-id = <0x00>;
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index cdb1710..9fb7d0e 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -42,7 +42,6 @@
atmel,panel-coords = <0 0 760 1424>;
atmel,display-coords = <0 0 720 1280>;
atmel,i2c-pull-up;
- atmel,no-force-update;
atmel,cfg_1 {
atmel,family-id = <0x82>;
atmel,variant-id = <0x19>;
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 3f7e9de..2cef567 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -121,9 +121,6 @@
regulator-min-microvolt = <1300000>;
regulator-max-microvolt = <1300000>;
qcom,init-voltage = <1300000>;
- qcom,init-current = <100>;
- qcom,system-load = <100000>;
- regulator-always-on;
status = "okay";
};
};
@@ -136,14 +133,6 @@
qcom,init-voltage = <2150000>;
status = "okay";
};
- pm8941_s2_ao: regulator-s2-ao {
- regulator-name = "8941_s2_ao";
- qcom,set = <1>;
- regulator-min-microvolt = <2150000>;
- regulator-max-microvolt = <2150000>;
- status = "okay";
- compatible = "qcom,rpm-regulator-smd";
- };
};
rpm-regulator-smpa3 {
@@ -152,9 +141,6 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
qcom,init-voltage = <1800000>;
- qcom,init-current = <100>;
- qcom,system-load = <100000>;
- regulator-always-on;
status = "okay";
};
};
@@ -162,13 +148,9 @@
rpm-regulator-ldoa1 {
status = "okay";
pm8941_l1: regulator-l1 {
- parent-supply = <&pm8941_s1>;
regulator-min-microvolt = <1225000>;
regulator-max-microvolt = <1225000>;
qcom,init-voltage = <1225000>;
- qcom,init-current = <10>;
- qcom,system-load = <10000>;
- regulator-always-on;
status = "okay";
};
};
@@ -176,7 +158,6 @@
rpm-regulator-ldoa2 {
status = "okay";
pm8941_l2: regulator-l2 {
- parent-supply = <&pm8941_s3>;
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
qcom,init-voltage = <1200000>;
@@ -187,7 +168,6 @@
rpm-regulator-ldoa3 {
status = "okay";
pm8941_l3: regulator-l3 {
- parent-supply = <&pm8941_s1>;
regulator-min-microvolt = <1225000>;
regulator-max-microvolt = <1225000>;
qcom,init-voltage = <1225000>;
@@ -198,7 +178,6 @@
rpm-regulator-ldoa4 {
status = "okay";
pm8941_l4: regulator-l4 {
- parent-supply = <&pm8941_s1>;
regulator-min-microvolt = <1225000>;
regulator-max-microvolt = <1225000>;
qcom,init-voltage = <1225000>;
@@ -209,7 +188,6 @@
rpm-regulator-ldoa5 {
status = "okay";
pm8941_l5: regulator-l5 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
qcom,init-voltage = <1800000>;
@@ -220,7 +198,6 @@
rpm-regulator-ldoa6 {
status = "okay";
pm8941_l6: regulator-l6 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
qcom,init-voltage = <1800000>;
@@ -231,7 +208,6 @@
rpm-regulator-ldoa7 {
status = "okay";
pm8941_l7: regulator-l7 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
qcom,init-voltage = <1800000>;
@@ -272,7 +248,6 @@
rpm-regulator-ldoa11 {
status = "okay";
pm8941_l11: regulator-l11 {
- parent-supply = <&pm8941_s1>;
regulator-min-microvolt = <1300000>;
regulator-max-microvolt = <1300000>;
qcom,init-voltage = <1300000>;
@@ -283,14 +258,12 @@
rpm-regulator-ldoa12 {
status = "okay";
pm8941_l12: regulator-l12 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
status = "okay";
};
pm8941_l12_ao: regulator-l12-ao {
regulator-name = "8941_l12_ao";
- parent-supply = <&pm8941_s2_ao>;
qcom,set = <1>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -312,7 +285,6 @@
rpm-regulator-ldoa14 {
status = "okay";
pm8941_l14: regulator-l14 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
qcom,init-voltage = <1800000>;
@@ -323,7 +295,6 @@
rpm-regulator-ldoa15 {
status = "okay";
pm8941_l15: regulator-l15 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <2050000>;
regulator-max-microvolt = <2050000>;
qcom,init-voltage = <2050000>;
@@ -424,7 +395,6 @@
rpm-regulator-vsa1 {
status = "okay";
pm8941_lvs1: regulator-lvs1 {
- parent-supply = <&pm8941_s3>;
status = "okay";
};
};
@@ -432,7 +402,6 @@
rpm-regulator-vsa2 {
status = "okay";
pm8941_lvs2: regulator-lvs2 {
- parent-supply = <&pm8941_s3>;
status = "okay";
};
};
@@ -440,7 +409,6 @@
rpm-regulator-vsa3 {
status = "okay";
pm8941_lvs3: regulator-lvs3 {
- parent-supply = <&pm8941_s3>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 6d5f3cb..6a7e81e 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -151,24 +151,25 @@
qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
- qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 200000000>;
qcom,sup-voltages = <2950 2950>;
qcom,bus-width = <8>;
qcom,nonremovable;
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
qcom,msm-bus,name = "sdcc1";
- qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,num-cases = <8>;
qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
- <78 512 6656 13312>, /* 13 MB/s*/
- <78 512 13312 26624>, /* 26 MB/s */
- <78 512 26624 53248>, /* 52 MB/s */
- <78 512 53248 106496>, /* 104 MB/s */
- <78 512 106496 212992>, /* 208 MB/s */
- <78 512 2147483647 4294967295>; /* Max. bandwidth */
- qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
+ <78 512 1600 3200>, /* 400 KB/s*/
+ <78 512 80000 160000>, /* 20 MB/s */
+ <78 512 100000 200000>, /* 25 MB/s */
+ <78 512 200000 400000>, /* 50 MB/s */
+ <78 512 400000 800000>, /* 100 MB/s */
+ <78 512 800000 1600000>, /* 200 MB/s */
+ <78 512 2048000 4096000>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 4294967295>;
qcom,dat1-mpm-int = <42>;
};
@@ -197,7 +198,7 @@
qcom,pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
- qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 200000000>;
qcom,sup-voltages = <2950 2950>;
qcom,bus-width = <4>;
qcom,xpc;
@@ -205,17 +206,18 @@
qcom,current-limit = <800>;
qcom,msm-bus,name = "sdcc2";
- qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,num-cases = <8>;
qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
- <81 512 6656 13312>, /* 13 MB/s*/
- <81 512 13312 26624>, /* 26 MB/s */
- <81 512 26624 53248>, /* 52 MB/s */
- <81 512 53248 106496>, /* 104 MB/s */
- <81 512 106496 212992>, /* 208 MB/s */
- <81 512 2147483647 4294967295>; /* Max. bandwidth */
- qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
+ <81 512 1600 3200>, /* 400 KB/s*/
+ <81 512 80000 160000>, /* 20 MB/s */
+ <81 512 100000 200000>, /* 25 MB/s */
+ <81 512 200000 400000>, /* 50 MB/s */
+ <81 512 400000 800000>, /* 100 MB/s */
+ <81 512 800000 1600000>, /* 200 MB/s */
+ <81 512 2048000 4096000>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 4294967295>;
qcom,dat1-mpm-int = <44>;
};
@@ -244,23 +246,24 @@
<&msmgpio 35 0>; /* DATA3 */
qcom,gpio-names = "CLK", "CMD", "DAT0", "DAT1", "DAT2", "DAT3";
- qcom,clk-rates = <400000 25000000 50000000 100000000>;
+ qcom,clk-rates = <400000 20000000 25000000 50000000 100000000>;
qcom,sup-voltages = <1800 1800>;
qcom,bus-width = <4>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
qcom,msm-bus,name = "sdcc3";
- qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,num-cases = <8>;
qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <79 512 0 0>, /* No vote */
- <79 512 6656 13312>, /* 13 MB/s*/
- <79 512 13312 26624>, /* 26 MB/s */
- <79 512 26624 53248>, /* 52 MB/s */
- <79 512 53248 106496>, /* 104 MB/s */
- <79 512 106496 212992>, /* 208 MB/s */
- <79 512 2147483647 4294967295>; /* Max. bandwidth */
- qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
+ <79 512 1600 3200>, /* 400 KB/s*/
+ <79 512 80000 160000>, /* 20 MB/s */
+ <79 512 100000 200000>, /* 25 MB/s */
+ <79 512 200000 400000>, /* 50 MB/s */
+ <79 512 400000 800000>, /* 100 MB/s */
+ <79 512 800000 1600000>, /* 200 MB/s */
+ <79 512 2048000 4096000>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 4294967295>;
status = "disable";
};
@@ -289,23 +292,24 @@
<&msmgpio 92 0>; /* DATA3 */
qcom,gpio-names = "CLK", "CMD", "DAT0", "DAT1", "DAT2", "DAT3";
- qcom,clk-rates = <400000 25000000 50000000 100000000>;
+ qcom,clk-rates = <400000 20000000 25000000 50000000 100000000>;
qcom,sup-voltages = <1800 1800>;
qcom,bus-width = <4>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
qcom,msm-bus,name = "sdcc4";
- qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,num-cases = <8>;
qcom,msm-bus,active-only = <0>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <80 512 0 0>, /* No vote */
- <80 512 6656 13312>, /* 13 MB/s*/
- <80 512 13312 26624>, /* 26 MB/s */
- <80 512 26624 53248>, /* 52 MB/s */
- <80 512 53248 106496>, /* 104 MB/s */
- <80 512 106496 212992>, /* 208 MB/s */
- <80 512 2147483647 4294967295>; /* Max. bandwidth */
- qcom,bus-bw-vectors-bps = <0 13631488 27262976 54525952 109051904 218103808 4294967295>;
+ <80 512 1600 3200>, /* 400 KB/s*/
+ <80 512 80000 160000>, /* 20 MB/s */
+ <80 512 100000 200000>, /* 25 MB/s */
+ <80 512 200000 400000>, /* 50 MB/s */
+ <80 512 400000 800000>, /* 100 MB/s */
+ <80 512 800000 1600000>, /* 200 MB/s */
+ <80 512 2048000 4096000>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000 100000000 200000000 4294967295>;
status = "disable";
};
@@ -644,16 +648,11 @@
krait1_dig-supply = <&pm8841_s2_corner_ao>;
krait2_dig-supply = <&pm8841_s2_corner_ao>;
krait3_dig-supply = <&pm8841_s2_corner_ao>;
- krait0_hfpll_a-supply = <&pm8941_s2_ao>;
- krait1_hfpll_a-supply = <&pm8941_s2_ao>;
- krait2_hfpll_a-supply = <&pm8941_s2_ao>;
- krait3_hfpll_a-supply = <&pm8941_s2_ao>;
- l2_hfpll_a-supply = <&pm8941_s2_ao>;
- krait0_hfpll_b-supply = <&pm8941_l12_ao>;
- krait1_hfpll_b-supply = <&pm8941_l12_ao>;
- krait2_hfpll_b-supply = <&pm8941_l12_ao>;
- krait3_hfpll_b-supply = <&pm8941_l12_ao>;
- l2_hfpll_b-supply = <&pm8941_l12_ao>;
+ krait0_hfpll-supply = <&pm8941_l12_ao>;
+ krait1_hfpll-supply = <&pm8941_l12_ao>;
+ krait2_hfpll-supply = <&pm8941_l12_ao>;
+ krait3_hfpll-supply = <&pm8941_l12_ao>;
+ l2_hfpll-supply = <&pm8941_l12_ao>;
};
usb3: qcom,ssusb@f9200000 {
@@ -882,6 +881,7 @@
interrupts = <0 24 1>;
vdd_mss-supply = <&pm8841_s3>;
+ vdd_mx-supply = <&pm8841_s1>;
qcom,is-loadable;
qcom,firmware-name = "mba";
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 7462911..cbd93df 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -523,6 +523,57 @@
compatible = "qcom,pil-q6v5-mss";
interrupts = <0 24 1>;
};
+
+ qcom,smem@fa00000 {
+ compatible = "qcom,smem";
+ reg = <0xfa00000 0x200000>,
+ <0xfa006000 0x1000>,
+ <0xfc428000 0x4000>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1";
+
+ qcom,smd-modem {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <0>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1000>;
+ qcom,pil-string = "modem";
+ interrupts = <0 25 1>;
+ };
+
+ qcom,smsm-modem {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <0>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x2000>;
+ interrupts = <0 26 1>;
+ };
+
+ qcom,smd-adsp {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <1>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x100>;
+ qcom,pil-string = "adsp";
+ interrupts = <0 156 1>;
+ };
+
+ qcom,smsm-adsp {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <1>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x200>;
+ interrupts = <0 157 1>;
+ };
+
+ qcom,smd-rpm {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <15>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1>;
+ interrupts = <0 168 1>;
+ qcom,irq-no-suspend;
+ };
+ };
};
/include/ "msm-pm8019-rpm-regulator.dtsi"
diff --git a/arch/arm/configs/fsm9xxx-perf_defconfig b/arch/arm/configs/fsm9xxx-perf_defconfig
index 1dc853b..8a7928b 100644
--- a/arch/arm/configs/fsm9xxx-perf_defconfig
+++ b/arch/arm/configs/fsm9xxx-perf_defconfig
@@ -37,6 +37,8 @@
# CONFIG_MSM_HW3D is not set
# CONFIG_QSD_AUDIO is not set
# CONFIG_SURF_FFA_GPIO_KEYPAD is not set
+CONFIG_MSM_SMCMOD=m
+CONFIG_MSM_SCM=y
CONFIG_MSM_WATCHDOG=y
CONFIG_MSM_RPC_PMIC=y
CONFIG_MSM_RPC_USB=y
@@ -142,6 +144,8 @@
# CONFIG_MFD_PM8XXX_MISC is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_PM8058_XO=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
# CONFIG_USB_SUPPORT is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/configs/fsm9xxx_defconfig b/arch/arm/configs/fsm9xxx_defconfig
index 203d3b7..db2f25d 100644
--- a/arch/arm/configs/fsm9xxx_defconfig
+++ b/arch/arm/configs/fsm9xxx_defconfig
@@ -36,6 +36,8 @@
# CONFIG_MSM_HW3D is not set
# CONFIG_QSD_AUDIO is not set
# CONFIG_SURF_FFA_GPIO_KEYPAD is not set
+CONFIG_MSM_SMCMOD=m
+CONFIG_MSM_SCM=y
CONFIG_MSM_WATCHDOG=y
CONFIG_MSM_RPC_PMIC=y
CONFIG_MSM_RPC_USB=y
@@ -141,6 +143,8 @@
# CONFIG_MFD_PM8XXX_MISC is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_PM8058_XO=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
# CONFIG_USB_SUPPORT is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index d5e15f1..1fe1eaa 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -242,6 +242,7 @@
CONFIG_RFKILL=y
CONFIG_GENLOCK=y
CONFIG_GENLOCK_MISCDEVICE=y
+CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_TSPP=m
@@ -466,6 +467,7 @@
CONFIG_MSM_SSBI=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_BAMDMA=y
+CONFIG_MSM_AVTIMER=y
CONFIG_MSM_IOMMU=y
CONFIG_MOBICORE_SUPPORT=m
CONFIG_MOBICORE_API=m
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 386f311..01d1934 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -247,6 +247,7 @@
CONFIG_RFKILL=y
CONFIG_GENLOCK=y
CONFIG_GENLOCK_MISCDEVICE=y
+CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_TSPP=m
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index bf44665..2c1b8f9 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -248,6 +248,7 @@
CONFIG_INPUT_EVBUG=m
CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_INPUT_MISC=y
@@ -282,6 +283,7 @@
CONFIG_SENSORS_QPNP_ADC_CURRENT=y
CONFIG_THERMAL=y
CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_MONITOR=y
CONFIG_THERMAL_QPNP=y
CONFIG_WCD9320_CODEC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index f9dbc85..43e2c2b 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -250,6 +250,7 @@
CONFIG_INPUT_EVBUG=m
CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_INPUT_MISC=y
@@ -284,6 +285,7 @@
CONFIG_SENSORS_QPNP_ADC_CURRENT=y
CONFIG_THERMAL=y
CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_MONTIOR=y
CONFIG_THERMAL_QPNP=y
CONFIG_WCD9320_CODEC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/include/asm/smcmod.h b/arch/arm/include/asm/smcmod.h
new file mode 100644
index 0000000..06918c4
--- /dev/null
+++ b/arch/arm/include/asm/smcmod.h
@@ -0,0 +1,123 @@
+/* Qualcomm SMC Module API */
+
+#ifndef __SMCMOD_H_
+#define __SMCMOD_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define SMCMOD_DEV "smcmod"
+
+#define SMCMOD_REG_REQ_MAX_ARGS 2
+
+/**
+ * struct smcmod_reg_req - for SMC register ioctl request
+ *
+ * @service_id - requested service.
+ * @command_id - requested command.
+ * @num_args - number of arguments.
+ * @args - argument(s) to be passed to the secure world.
+ * @return_val - return value from secure world operation.
+ */
+struct smcmod_reg_req {
+ uint32_t service_id; /* in */
+ uint32_t command_id; /* in */
+ uint8_t num_args; /* in */
+ uint32_t args[SMCMOD_REG_REQ_MAX_ARGS]; /* in */
+ uint32_t return_val; /* out */
+};
+
+/**
+ * struct smcmod_buf_req - for SMC buffer ioctl request
+ *
+ * @service_id - requested service.
+ * @command_id - requested command.
+ * @ion_cmd_fd - fd obtained from ION_IOC_MAP or ION_IOC_SHARE.
+ * @cmd_len - length of command data buffer in bytes.
+ * @ion_resp_fd - fd obtained from ION_IOC_MAP or ION_IOC_SHARE.
+ * @resp_len - length of response data buffer in bytes.
+ * @return_val - return value from secure world operation.
+ */
+struct smcmod_buf_req {
+ uint32_t service_id;/* in */
+ uint32_t command_id; /* in */
+ int32_t ion_cmd_fd; /* in */
+ uint32_t cmd_len; /* in */
+ int32_t ion_resp_fd; /* in */
+ uint32_t resp_len; /* in */
+ uint32_t return_val; /* out */
+};
+
+/**
+ * struct smcmod_cipher_req - for SMC cipher command ioctl
+ *
+ * @algorithm - specifies the cipher algorithm.
+ * @operation - specifies encryption or decryption.
+ * @mode - specifies cipher mode.
+ * @ion_key_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @key_size - key size in bytes.
+ * @ion_plain_text_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @plain_text_size - size of plain text in bytes.
+ * @ion_cipher_text_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @cipher_text_size - cipher text size in bytes.
+ * @ion_init_vector_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @init_vector_size - size of initialization vector in bytes.
+ * @key_is_null - indicates that the key is null.
+ * @return_val - return value from secure world opreation.
+ */
+struct smcmod_cipher_req {
+ uint32_t algorithm; /* in */
+ uint32_t operation; /* in */
+ uint32_t mode; /* in */
+ int32_t ion_key_fd; /* in */
+ uint32_t key_size; /* in */
+ int32_t ion_plain_text_fd; /* in (encrypt)/out (decrypt) */
+ uint32_t plain_text_size; /* in */
+ int32_t ion_cipher_text_fd; /* out (encrypt)/in (decrypt) */
+ uint32_t cipher_text_size; /* in */
+ int32_t ion_init_vector_fd; /* in */
+ uint32_t init_vector_size; /* in */
+ uint32_t key_is_null; /* in */
+ uint32_t return_val; /* out */
+};
+
+/**
+ * struct smcmod_msg_digest_req - for message digest command ioctl
+ *
+ * @algorithm - specifies the cipher algorithm.
+ * @ion_key_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @key_size - hash key size in bytes.
+ * @ion_input_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @input_size - input data size in bytes.
+ * @ion_output_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @output_size - size of output buffer in bytes.
+ * @fixed_block - indicates whether this is a fixed block digest.
+ * @key_is_null - indicates that the key is null.
+ * @return_val - return value from secure world opreation.
+ */
+struct smcmod_msg_digest_req {
+ uint32_t algorithm; /* in */
+ int32_t ion_key_fd; /* in */
+ uint32_t key_size; /* in */
+ int32_t ion_input_fd; /* in */
+ uint32_t input_size; /* in */
+ int32_t ion_output_fd; /* in/out */
+ uint32_t output_size; /* in */
+ uint32_t fixed_block; /* in */
+ uint32_t key_is_null; /* in */
+ uint32_t return_val; /* out */
+} __packed;
+
+#define SMCMOD_IOC_MAGIC 0x97
+
+/* Number chosen to avoid any conflicts */
+#define SMCMOD_IOCTL_SEND_REG_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 32, struct smcmod_reg_req)
+#define SMCMOD_IOCTL_SEND_BUF_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 33, struct smcmod_buf_req)
+#define SMCMOD_IOCTL_SEND_CIPHER_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 34, struct smcmod_cipher_req)
+#define SMCMOD_IOCTL_SEND_MSG_DIGEST_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 35, struct smcmod_msg_digest_req)
+#define SMCMOD_IOCTL_GET_VERSION _IOWR(SMCMOD_IOC_MAGIC, 36, uint32_t)
+#endif /* __SMCMOD_H_ */
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 3a52ddc..d2e2e44 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -444,7 +444,7 @@
if (plat && plat->free_pmu_irq)
armpmu->free_pmu_irq = plat->free_pmu_irq;
- else if (!armpmu->request_pmu_irq)
+ else if (!armpmu->free_pmu_irq)
armpmu->free_pmu_irq = armpmu_generic_free_irq;
irqs = min(pmu_device->num_resources, num_possible_cpus());
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 90aed03..d1bd9e6 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -142,6 +142,7 @@
select MSM_PM8X60 if PM
select MSM_RUN_QUEUE_STATS
select ARM_HAS_SG_CHAIN
+ select USE_DEV_CTRL_VOLUME
config ARCH_MSM8960
bool "MSM8960"
@@ -1947,6 +1948,16 @@
be used on systems which contain an RPM which communicates with the
application processor over SMD.
+config MSM_SMCMOD
+ tristate "Secure Monitor Call (SMC) Module"
+ default n
+ depends on (ARCH_FSM9XXX && ION && ION_MSM && MSM_SCM)
+ help
+ Enable support for smcmod driver. This driver provides a mechanism
+ to execute the Secure Monitor Call (SMC) to switch from non-secure
+ to secure execution in the fsm9xxx targets. This module utilizes Ion
+ for buffer management.
+
config MSM_SUBSYSTEM_RESTART
bool "MSM Subsystem Restart"
help
@@ -2101,7 +2112,7 @@
config MSM_BUSPM_DEV
tristate "MSM Bus Performance Monitor Kernel Module"
- depends on (ARCH_MSM8X60 || ARCH_MSM8960)
+ depends on (ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSM8974)
default m
help
This kernel module is used to mmap() hardware registers for the
@@ -2699,4 +2710,9 @@
Enables MSM-specific user accessible timers via a shared
memory page containing the cycle counter.
+config USE_DEV_CTRL_VOLUME
+ bool "Use Device Control Volume"
+ help
+ Use Device Control Volume as opposed to ALSA volume control.
+
endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 548f40e..26aa32c 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -297,6 +297,7 @@
obj-$(CONFIG_ARCH_MSM8974) += gdsc.o
obj-$(CONFIG_ARCH_MSM9625) += gdsc.o
obj-$(CONFIG_ARCH_MSM8226) += gdsc.o
+obj-$(CONFIG_ARCH_MSM8910) += gdsc.o
obj-$(CONFIG_ARCH_MSM8974) += krait-regulator.o
obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
obj-$(CONFIG_ARCH_MSM9625) += clock-local2.o clock-pll.o clock-9625.o clock-rpm.o clock-voter.o acpuclock-9625.o
@@ -409,3 +410,5 @@
obj-$(CONFIG_MSM_FIQ) += msm7k_fiq_handler.o
obj-$(CONFIG_MEMORY_HOLE_CARVEOUT) += msm_mem_hole.o
+
+obj-$(CONFIG_MSM_SMCMOD) += smcmod.o
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index cf1f401..e74d61a 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -47,15 +47,25 @@
# MSM8974
zreladdr-$(CONFIG_ARCH_MSM8974) := 0x00008000
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-cdp.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-fluid.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-liquid.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-mtp.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-rumi.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-sim.dtb
# MSM9615
zreladdr-$(CONFIG_ARCH_MSM9615) := 0x40808000
# MSM9625
zreladdr-$(CONFIG_ARCH_MSM9625) := 0x00208000
+ dtb-$(CONFIG_ARCH_MSM9625) += msm9625-cdp.dtb
+ dtb-$(CONFIG_ARCH_MSM9625) += msm9625-mtp.dtb
+ dtb-$(CONFIG_ARCH_MSM9625) += msm9625-rumi.dtb
# MSM8226
zreladdr-$(CONFIG_ARCH_MSM8226) := 0x00008000
+ dtb-$(CONFIG_ARCH_MSM8226) += msm8226-sim.dtb
# FSM9XXX
zreladdr-$(CONFIG_ARCH_FSM9XXX) := 0x10008000
@@ -67,3 +77,5 @@
# MSM8910
zreladdr-$(CONFIG_ARCH_MSM8910) := 0x00008000
+ dtb-$(CONFIG_ARCH_MSM8910) += msm8910-rumi.dtb
+ dtb-$(CONFIG_ARCH_MSM8910) += msm8910-sim.dtb
diff --git a/arch/arm/mach-msm/acpuclock-7627.c b/arch/arm/mach-msm/acpuclock-7627.c
index dd27123..00b6458 100644
--- a/arch/arm/mach-msm/acpuclock-7627.c
+++ b/arch/arm/mach-msm/acpuclock-7627.c
@@ -857,8 +857,7 @@
goto out;
/* Change the AXI bus frequency if we can. */
- if (reason != SETRATE_PC &&
- strt_s->axiclk_khz != tgt_s->axiclk_khz) {
+ if (strt_s->axiclk_khz != tgt_s->axiclk_khz) {
res = clk_set_rate(drv_state.ebi1_clk,
tgt_s->axiclk_khz * 1000);
if (res < 0)
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 0fbd6dc..b98fcdd 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -56,8 +56,7 @@
.vreg[VREG_CORE] = { "krait0", 1050000 },
.vreg[VREG_MEM] = { "krait0_mem", 1050000 },
.vreg[VREG_DIG] = { "krait0_dig", LVL_HIGH },
- .vreg[VREG_HFPLL_A] = { "krait0_hfpll_a", 2150000 },
- .vreg[VREG_HFPLL_B] = { "krait0_hfpll_b", 1800000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
},
[CPU1] = {
.hfpll_phys_base = 0xF909A000,
@@ -66,8 +65,7 @@
.vreg[VREG_CORE] = { "krait1", 1050000 },
.vreg[VREG_MEM] = { "krait1_mem", 1050000 },
.vreg[VREG_DIG] = { "krait1_dig", LVL_HIGH },
- .vreg[VREG_HFPLL_A] = { "krait1_hfpll_a", 2150000 },
- .vreg[VREG_HFPLL_B] = { "krait1_hfpll_b", 1800000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
},
[CPU2] = {
.hfpll_phys_base = 0xF90AA000,
@@ -76,8 +74,7 @@
.vreg[VREG_CORE] = { "krait2", 1050000 },
.vreg[VREG_MEM] = { "krait2_mem", 1050000 },
.vreg[VREG_DIG] = { "krait2_dig", LVL_HIGH },
- .vreg[VREG_HFPLL_A] = { "krait2_hfpll_a", 2150000 },
- .vreg[VREG_HFPLL_B] = { "krait2_hfpll_b", 1800000 },
+ .vreg[VREG_HFPLL_A] = { "krait2_hfpll", 1800000 },
},
[CPU3] = {
.hfpll_phys_base = 0xF90BA000,
@@ -86,15 +83,13 @@
.vreg[VREG_CORE] = { "krait3", 1050000 },
.vreg[VREG_MEM] = { "krait3_mem", 1050000 },
.vreg[VREG_DIG] = { "krait3_dig", LVL_HIGH },
- .vreg[VREG_HFPLL_A] = { "krait3_hfpll_a", 2150000 },
- .vreg[VREG_HFPLL_B] = { "krait3_hfpll_b", 1800000 },
+ .vreg[VREG_HFPLL_A] = { "krait3_hfpll", 1800000 },
},
[L2] = {
.hfpll_phys_base = 0xF9016000,
.l2cpmr_iaddr = 0x0500,
.sec_clk_sel = 2,
- .vreg[VREG_HFPLL_A] = { "l2_hfpll_a", 2150000 },
- .vreg[VREG_HFPLL_B] = { "l2_hfpll_b", 1800000 },
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
},
};
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index fab974d..f70e41a 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -25,6 +25,7 @@
#include <linux/mfd/pm8xxx/misc.h>
#include <linux/msm_ssbi.h>
#include <linux/spi/spi.h>
+#include <linux/dma-contiguous.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/qcom_crypto_device.h>
#include <linux/msm_ion.h>
@@ -101,6 +102,7 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
#define HOLE_SIZE 0x20000
+#define MSM_ION_MFC_META_SIZE 0x40000 /* 256 Kbytes */
#define MSM_CONTIG_MEM_SIZE 0x65000
#ifdef CONFIG_MSM_IOMMU
#define MSM_ION_MM_SIZE 0x3800000
@@ -114,7 +116,7 @@
#define MSM_ION_HEAP_NUM 8
#endif
#define MSM_ION_MM_FW_SIZE (0x200000 - HOLE_SIZE) /* (2MB - 128KB) */
-#define MSM_ION_MFC_SIZE SZ_8K
+#define MSM_ION_MFC_SIZE (SZ_8K + MSM_ION_MFC_META_SIZE)
#define MSM_ION_AUDIO_SIZE MSM_PMEM_AUDIO_SIZE
#else
#define MSM_CONTIG_MEM_SIZE 0x110C000
@@ -295,6 +297,7 @@
.reusable = FMEM_ENABLED,
.mem_is_fmem = FMEM_ENABLED,
.fixed_position = FIXED_MIDDLE,
+ .is_cma = 1,
};
static struct ion_cp_heap_pdata cp_mfc_apq8064_ion_pdata = {
@@ -319,6 +322,17 @@
};
#endif
+static u64 msm_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ion_mm_heap_device = {
+ .name = "ion-mm-heap-device",
+ .id = -1,
+ .dev = {
+ .dma_mask = &msm_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
/**
* These heaps are listed in the order they will be allocated. Due to
* video hardware restrictions and content protection the FW heap has to
@@ -330,9 +344,7 @@
* to each other.
* Don't swap the order unless you know what you are doing!
*/
-static struct ion_platform_data apq8064_ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = {
+struct ion_platform_heap apq8064_heaps[] = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -346,6 +358,7 @@
.size = MSM_ION_MM_SIZE,
.memory_type = ION_EBI_TYPE,
.extra_data = (void *) &cp_mm_apq8064_ion_pdata,
+ .priv = &ion_mm_heap_device.dev
},
{
.id = ION_MM_FIRMWARE_HEAP_ID,
@@ -395,7 +408,11 @@
.extra_data = (void *) &co_apq8064_ion_pdata,
},
#endif
- }
+};
+
+static struct ion_platform_data apq8064_ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = apq8064_heaps,
};
static struct platform_device apq8064_ion_dev = {
@@ -451,26 +468,45 @@
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
unsigned int i;
+ unsigned int ret;
unsigned int fixed_size = 0;
unsigned int fixed_low_size, fixed_middle_size, fixed_high_size;
unsigned long fixed_low_start, fixed_middle_start, fixed_high_start;
+ unsigned long cma_alignment;
+ unsigned int low_use_cma = 0;
+ unsigned int middle_use_cma = 0;
+ unsigned int high_use_cma = 0;
+
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
+ cma_alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+
for (i = 0; i < apq8064_ion_pdata.nr; ++i) {
- const struct ion_platform_heap *heap =
+ struct ion_platform_heap *heap =
&(apq8064_ion_pdata.heaps[i]);
+ int use_cma = 0;
+
if (heap->extra_data) {
int fixed_position = NOT_FIXED;
switch ((int)heap->type) {
case ION_HEAP_TYPE_CP:
+ if (((struct ion_cp_heap_pdata *)
+ heap->extra_data)->is_cma) {
+ heap->size = ALIGN(heap->size,
+ cma_alignment);
+ use_cma = 1;
+ }
fixed_position = ((struct ion_cp_heap_pdata *)
heap->extra_data)->fixed_position;
break;
+ case ION_HEAP_TYPE_DMA:
+ use_cma = 1;
+ /* Purposely fall through here */
case ION_HEAP_TYPE_CARVEOUT:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
@@ -484,28 +520,70 @@
else
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
- if (fixed_position == FIXED_LOW)
+ if (fixed_position == FIXED_LOW) {
fixed_low_size += heap->size;
- else if (fixed_position == FIXED_MIDDLE)
+ low_use_cma = use_cma;
+ } else if (fixed_position == FIXED_MIDDLE) {
fixed_middle_size += heap->size;
- else if (fixed_position == FIXED_HIGH)
+ middle_use_cma = use_cma;
+ } else if (fixed_position == FIXED_HIGH) {
fixed_high_size += heap->size;
+ high_use_cma = use_cma;
+ } else if (use_cma) {
+ /*
+ * Heaps that use CMA but are not part of the
+ * fixed set. Create wherever.
+ */
+ dma_declare_contiguous(
+ heap->priv,
+ heap->size,
+ 0,
+ 0xb0000000);
+
+ }
}
}
if (!fixed_size)
return;
- /* Since the fixed area may be carved out of lowmem,
- * make sure the length is a multiple of 1M.
+ /*
+ * Given the setup for the fixed area, we can't round up all sizes.
+ * Some sizes must be set up exactly and aligned correctly. Incorrect
+ * alignments are considered a configuration issue
*/
- fixed_size = (fixed_size + HOLE_SIZE + SECTION_SIZE - 1)
- & SECTION_MASK;
- apq8064_reserve_fixed_area(fixed_size);
fixed_low_start = APQ8064_FIXED_AREA_START;
+ if (low_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_low_start, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, SECTION_SIZE));
+ ret = memblock_remove(fixed_low_start,
+ fixed_low_size + HOLE_SIZE);
+ BUG_ON(ret);
+ }
+
fixed_middle_start = fixed_low_start + fixed_low_size + HOLE_SIZE;
+ if (middle_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_middle_start, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, SECTION_SIZE));
+ ret = memblock_remove(fixed_middle_start, fixed_middle_size);
+ BUG_ON(ret);
+ }
+
fixed_high_start = fixed_middle_start + fixed_middle_size;
+ if (high_use_cma) {
+ fixed_high_size = ALIGN(fixed_high_size, cma_alignment);
+ BUG_ON(!IS_ALIGNED(fixed_high_start, cma_alignment));
+ } else {
+ /* This is the end of the fixed area so it's okay to round up */
+ fixed_high_size = ALIGN(fixed_high_size, SECTION_SIZE);
+ ret = memblock_remove(fixed_high_start, fixed_high_size);
+ BUG_ON(ret);
+ }
for (i = 0; i < apq8064_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap = &(apq8064_ion_pdata.heaps[i]);
@@ -521,6 +599,7 @@
fixed_position = pdata->fixed_position;
break;
case ION_HEAP_TYPE_CARVEOUT:
+ case ION_HEAP_TYPE_DMA:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
break;
@@ -534,6 +613,14 @@
break;
case FIXED_MIDDLE:
heap->base = fixed_middle_start;
+ if (middle_use_cma) {
+ ret = dma_declare_contiguous(
+ heap->priv,
+ heap->size,
+ fixed_middle_start,
+ 0xa0000000);
+ WARN_ON(ret);
+ }
pdata->secure_base = fixed_middle_start
- HOLE_SIZE;
pdata->secure_size = HOLE_SIZE + heap->size;
@@ -3159,6 +3246,11 @@
},
};
+static struct platform_device msm_dev_avtimer_device = {
+ .name = "dev_avtimer",
+ .dev = { .platform_data = &dev_avtimer_pdata },
+};
+
/* Sensors DSPS platform data */
#define DSPS_PIL_GENERIC_NAME "dsps"
static void __init apq8064_init_dsps(void)
@@ -3598,6 +3690,9 @@
platform_device_register(&mpq_keypad_device);
} else if (machine_is_mpq8064_hrd())
platform_device_register(&mpq_hrd_keys_pdev);
+ if (machine_is_mpq8064_cdp() || machine_is_mpq8064_hrd() ||
+ machine_is_mpq8064_dtv())
+ platform_device_register(&msm_dev_avtimer_device);
}
MACHINE_START(APQ8064_CDP, "QCT APQ8064 CDP")
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 3e90489..fde82f4 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -64,6 +64,8 @@
CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
CLK_DUMMY("iface_clk", HSUSB_IFACE_CLK, "f9a55000.usb", OFF),
CLK_DUMMY("core_clk", HSUSB_CORE_CLK, "f9a55000.usb", OFF),
+ CLK_DUMMY("dfab_clk", DFAB_CLK, "msm_sps", OFF),
+ CLK_DUMMY("dma_bam_pclk", DMA_BAM_P_CLK, "msm_sps", OFF),
CLK_DUMMY("iface_clk", NULL, "msm_sdcc.1", OFF),
CLK_DUMMY("core_clk", NULL, "msm_sdcc.1", OFF),
CLK_DUMMY("bus_clk", NULL, "msm_sdcc.1", OFF),
diff --git a/arch/arm/mach-msm/board-8930-camera.c b/arch/arm/mach-msm/board-8930-camera.c
index be55031..e35b3c1 100644
--- a/arch/arm/mach-msm/board-8930-camera.c
+++ b/arch/arm/mach-msm/board-8930-camera.c
@@ -264,7 +264,7 @@
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 274406400,
+ .ab = 600000000,
.ib = 2656000000UL,
},
{
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 512ae72..13b16f2 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -29,6 +29,7 @@
#ifdef CONFIG_ANDROID_PMEM
#include <linux/android_pmem.h>
#endif
+#include <linux/dma-contiguous.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/qcom_crypto_device.h>
#include <linux/platform_data/qcom_wcnss_device.h>
@@ -132,7 +133,7 @@
#endif
#define MSM_PMEM_ADSP_SIZE 0x7800000
-#define MSM_PMEM_AUDIO_SIZE 0x314000
+#define MSM_PMEM_AUDIO_SIZE 0x408000
#ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY
#define MSM_PMEM_SIZE 0x4000000 /* 64 Mbytes */
#else
@@ -341,6 +342,7 @@
.reusable = FMEM_ENABLED,
.mem_is_fmem = FMEM_ENABLED,
.fixed_position = FIXED_MIDDLE,
+ .is_cma = 1,
};
static struct ion_cp_heap_pdata cp_mfc_msm8930_ion_pdata = {
@@ -365,6 +367,18 @@
};
#endif
+
+static u64 msm_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ion_mm_heap_device = {
+ .name = "ion-mm-heap-device",
+ .id = -1,
+ .dev = {
+ .dma_mask = &msm_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
/**
* These heaps are listed in the order they will be allocated. Due to
* video hardware restrictions and content protection the FW heap has to
@@ -376,9 +390,7 @@
* to each other.
* Don't swap the order unless you know what you are doing!
*/
-static struct ion_platform_data msm8930_ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = {
+struct ion_platform_heap msm8930_heaps[] = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -392,6 +404,7 @@
.size = MSM_ION_MM_SIZE,
.memory_type = ION_EBI_TYPE,
.extra_data = (void *) &cp_mm_msm8930_ion_pdata,
+ .priv = &ion_mm_heap_device.dev
},
{
.id = ION_MM_FIRMWARE_HEAP_ID,
@@ -441,7 +454,12 @@
.extra_data = (void *) &co_msm8930_ion_pdata,
},
#endif
- }
+};
+
+static struct ion_platform_data msm8930_ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = msm8930_heaps,
+
};
static struct platform_device msm8930_ion_dev = {
@@ -497,26 +515,44 @@
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
unsigned int i;
+ int ret;
unsigned int fixed_size = 0;
unsigned int fixed_low_size, fixed_middle_size, fixed_high_size;
unsigned long fixed_low_start, fixed_middle_start, fixed_high_start;
+ unsigned long cma_alignment;
+ unsigned int low_use_cma = 0;
+ unsigned int middle_use_cma = 0;
+ unsigned int high_use_cma = 0;
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
+ cma_alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+
for (i = 0; i < msm8930_ion_pdata.nr; ++i) {
- const struct ion_platform_heap *heap =
+ struct ion_platform_heap *heap =
&(msm8930_ion_pdata.heaps[i]);
+ int use_cma = 0;
+
if (heap->extra_data) {
int fixed_position = NOT_FIXED;
switch ((int) heap->type) {
case ION_HEAP_TYPE_CP:
+ if (((struct ion_cp_heap_pdata *)
+ heap->extra_data)->is_cma) {
+ heap->size = ALIGN(heap->size,
+ cma_alignment);
+ use_cma = 1;
+ }
fixed_position = ((struct ion_cp_heap_pdata *)
heap->extra_data)->fixed_position;
break;
+ case ION_HEAP_TYPE_DMA:
+ use_cma = 1;
+ /* Purposely fall through here */
case ION_HEAP_TYPE_CARVEOUT:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
@@ -530,29 +566,68 @@
else
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
- if (fixed_position == FIXED_LOW)
+ if (fixed_position == FIXED_LOW) {
fixed_low_size += heap->size;
- else if (fixed_position == FIXED_MIDDLE)
+ low_use_cma = use_cma;
+ } else if (fixed_position == FIXED_MIDDLE) {
fixed_middle_size += heap->size;
- else if (fixed_position == FIXED_HIGH)
+ middle_use_cma = use_cma;
+ } else if (fixed_position == FIXED_HIGH) {
fixed_high_size += heap->size;
-
+ high_use_cma = use_cma;
+ } else if (use_cma) {
+ /*
+ * Heaps that use CMA but are not part of the
+ * fixed set. Create wherever.
+ */
+ dma_declare_contiguous(
+ heap->priv,
+ heap->size,
+ 0,
+ 0xb0000000);
+ }
}
}
if (!fixed_size)
return;
-
- /* Since the fixed area may be carved out of lowmem,
- * make sure the length is a multiple of 1M.
+ /*
+ * Given the setup for the fixed area, we can't round up all sizes.
+ * Some sizes must be set up exactly and aligned correctly. Incorrect
+ * alignments are considered a configuration issue
*/
- fixed_size = (fixed_size + MSM_MM_FW_SIZE + SECTION_SIZE - 1)
- & SECTION_MASK;
- msm8930_reserve_fixed_area(fixed_size);
fixed_low_start = MSM8930_FIXED_AREA_START;
+ if (low_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_low_start, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, SECTION_SIZE));
+ ret = memblock_remove(fixed_low_start,
+ fixed_low_size + HOLE_SIZE);
+ BUG_ON(ret);
+ }
+
fixed_middle_start = fixed_low_start + fixed_low_size + HOLE_SIZE;
+ if (middle_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_middle_start, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, SECTION_SIZE));
+ ret = memblock_remove(fixed_middle_start, fixed_middle_size);
+ BUG_ON(ret);
+ }
+
fixed_high_start = fixed_middle_start + fixed_middle_size;
+ if (high_use_cma) {
+ fixed_high_size = ALIGN(fixed_high_size, cma_alignment);
+ BUG_ON(!IS_ALIGNED(fixed_high_start, cma_alignment));
+ } else {
+ /* This is the end of the fixed area so it's okay to round up */
+ fixed_high_size = ALIGN(fixed_high_size, SECTION_SIZE);
+ ret = memblock_remove(fixed_high_start, fixed_high_size);
+ BUG_ON(ret);
+ }
for (i = 0; i < msm8930_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap = &(msm8930_ion_pdata.heaps[i]);
@@ -567,6 +642,7 @@
(struct ion_cp_heap_pdata *)heap->extra_data;
fixed_position = pdata->fixed_position;
break;
+ case ION_HEAP_TYPE_DMA:
case ION_HEAP_TYPE_CARVEOUT:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
@@ -581,6 +657,12 @@
break;
case FIXED_MIDDLE:
heap->base = fixed_middle_start;
+ if (middle_use_cma)
+ dma_declare_contiguous(
+ &ion_mm_heap_device.dev,
+ heap->size,
+ fixed_middle_start,
+ 0xa0000000);
pdata->secure_base = fixed_middle_start
- HOLE_SIZE;
pdata->secure_size = HOLE_SIZE + heap->size;
diff --git a/arch/arm/mach-msm/board-8960-camera.c b/arch/arm/mach-msm/board-8960-camera.c
index 7a2e9e1..3853e4c 100644
--- a/arch/arm/mach-msm/board-8960-camera.c
+++ b/arch/arm/mach-msm/board-8960-camera.c
@@ -15,6 +15,7 @@
#include <linux/gpio.h>
#include <mach/camera.h>
#include <mach/msm_bus_board.h>
+#include <mach/socinfo.h>
#include <mach/gpiomux.h>
#include "devices.h"
#include "board-8960.h"
@@ -182,6 +183,23 @@
},
};
+static struct msm_gpiomux_config msm8960_cam_2d_configs_sglte[] = {
+ {
+ .gpio = 20,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &cam_settings[3],
+ [GPIOMUX_SUSPENDED] = &cam_settings[8],
+ },
+ },
+ {
+ .gpio = 21,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &cam_settings[3],
+ [GPIOMUX_SUSPENDED] = &cam_settings[8],
+ },
+ },
+};
+
#define VFE_CAMIF_TIMER1_GPIO 2
#define VFE_CAMIF_TIMER2_GPIO 3
#define VFE_CAMIF_TIMER3_GPIO_INT 4
@@ -828,6 +846,16 @@
void __init msm8960_init_cam(void)
{
+ if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
+ msm_8960_front_cam_gpio_conf.cam_gpiomux_conf_tbl =
+ msm8960_cam_2d_configs_sglte;
+ msm_8960_front_cam_gpio_conf.cam_gpiomux_conf_tbl_size =
+ ARRAY_SIZE(msm8960_cam_2d_configs_sglte);
+ msm_8960_back_cam_gpio_conf.cam_gpiomux_conf_tbl =
+ msm8960_cam_2d_configs_sglte;
+ msm_8960_back_cam_gpio_conf.cam_gpiomux_conf_tbl_size =
+ ARRAY_SIZE(msm8960_cam_2d_configs_sglte);
+ }
msm_gpiomux_install(msm8960_cam_common_configs,
ARRAY_SIZE(msm8960_cam_common_configs));
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 57f7b51..97639d6 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -30,6 +30,7 @@
#include <linux/android_pmem.h>
#endif
#include <linux/cyttsp-qc.h>
+#include <linux/dma-contiguous.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/qcom_crypto_device.h>
#include <linux/platform_data/qcom_wcnss_device.h>
@@ -151,6 +152,7 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
#define HOLE_SIZE 0x20000
+#define MSM_ION_MFC_META_SIZE 0x40000 /* 256 Kbytes */
#define MSM_CONTIG_MEM_SIZE 0x65000
#ifdef CONFIG_MSM_IOMMU
#define MSM_ION_MM_SIZE 0x3800000 /* Need to be multiple of 64K */
@@ -164,7 +166,7 @@
#define MSM_ION_HEAP_NUM 8
#endif
#define MSM_ION_MM_FW_SIZE (0x200000 - HOLE_SIZE) /* 128kb */
-#define MSM_ION_MFC_SIZE SZ_8K
+#define MSM_ION_MFC_SIZE (SZ_8K + MSM_ION_MFC_META_SIZE)
#define MSM_ION_AUDIO_SIZE MSM_PMEM_AUDIO_SIZE
#define MSM_LIQUID_ION_MM_SIZE (MSM_ION_MM_SIZE + 0x600000)
@@ -363,6 +365,7 @@
.fixed_position = FIXED_MIDDLE,
.iommu_map_all = 1,
.iommu_2x_map_domain = VIDEO_DOMAIN,
+ .is_cma = 1,
};
static struct ion_cp_heap_pdata cp_mfc_msm8960_ion_pdata = {
@@ -387,6 +390,17 @@
};
#endif
+static u64 msm_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ion_mm_heap_device = {
+ .name = "ion-mm-heap-device",
+ .id = -1,
+ .dev = {
+ .dma_mask = &msm_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
/**
* These heaps are listed in the order they will be allocated. Due to
* video hardware restrictions and content protection the FW heap has to
@@ -398,9 +412,7 @@
* to each other.
* Don't swap the order unless you know what you are doing!
*/
-static struct ion_platform_data msm8960_ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = {
+struct ion_platform_heap msm8960_heaps[] = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -414,6 +426,7 @@
.size = MSM_ION_MM_SIZE,
.memory_type = ION_EBI_TYPE,
.extra_data = (void *) &cp_mm_msm8960_ion_pdata,
+ .priv = &ion_mm_heap_device.dev,
},
{
.id = ION_MM_FIRMWARE_HEAP_ID,
@@ -463,7 +476,11 @@
.extra_data = (void *) &co_msm8960_ion_pdata,
},
#endif
- }
+};
+
+static struct ion_platform_data msm8960_ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = msm8960_heaps,
};
static struct platform_device msm8960_ion_dev = {
@@ -546,21 +563,29 @@
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
unsigned int i;
+ int ret;
unsigned int fixed_size = 0;
unsigned int fixed_low_size, fixed_middle_size, fixed_high_size;
unsigned long fixed_low_start, fixed_middle_start, fixed_high_start;
+ unsigned long cma_alignment;
+ unsigned int low_use_cma = 0;
+ unsigned int middle_use_cma = 0;
+ unsigned int high_use_cma = 0;
adjust_mem_for_liquid();
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
+ cma_alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+
for (i = 0; i < msm8960_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap =
&(msm8960_ion_pdata.heaps[i]);
int align = SZ_4K;
int iommu_map_all = 0;
int adjacent_mem_id = INVALID_HEAP_ID;
+ int use_cma = 0;
if (heap->extra_data) {
int fixed_position = NOT_FIXED;
@@ -574,7 +599,16 @@
iommu_map_all =
((struct ion_cp_heap_pdata *)
heap->extra_data)->iommu_map_all;
+ if (((struct ion_cp_heap_pdata *)
+ heap->extra_data)->is_cma) {
+ heap->size = ALIGN(heap->size,
+ cma_alignment);
+ use_cma = 1;
+ }
break;
+ case ION_HEAP_TYPE_DMA:
+ use_cma = 1;
+ /* Purposely fall through here */
case ION_HEAP_TYPE_CARVEOUT:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
@@ -598,28 +632,71 @@
else
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
- if (fixed_position == FIXED_LOW)
+ if (fixed_position == FIXED_LOW) {
fixed_low_size += heap->size;
- else if (fixed_position == FIXED_MIDDLE)
+ low_use_cma = use_cma;
+ } else if (fixed_position == FIXED_MIDDLE) {
fixed_middle_size += heap->size;
- else if (fixed_position == FIXED_HIGH)
+ middle_use_cma = use_cma;
+ } else if (fixed_position == FIXED_HIGH) {
fixed_high_size += heap->size;
+ high_use_cma = use_cma;
+ } else if (use_cma) {
+ /*
+ * Heaps that use CMA but are not part of the
+ * fixed set. Create wherever.
+ */
+ dma_declare_contiguous(
+ heap->priv,
+ heap->size,
+ 0,
+ 0xb0000000);
+ }
}
}
if (!fixed_size)
return;
- /* Since the fixed area may be carved out of lowmem,
- * make sure the length is a multiple of 1M.
+ /*
+ * Given the setup for the fixed area, we can't round up all sizes.
+ * Some sizes must be set up exactly and aligned correctly. Incorrect
+ * alignments are considered a configuration issue
*/
- fixed_size = (fixed_size + MSM_MM_FW_SIZE + SECTION_SIZE - 1)
- & SECTION_MASK;
- msm8960_reserve_fixed_area(fixed_size);
fixed_low_start = MSM8960_FIXED_AREA_START;
+ if (low_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_low_start, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, SECTION_SIZE));
+ ret = memblock_remove(fixed_low_start,
+ fixed_low_size + HOLE_SIZE);
+ BUG_ON(ret);
+ }
+
fixed_middle_start = fixed_low_start + fixed_low_size + HOLE_SIZE;
+ if (middle_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_middle_start, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, SECTION_SIZE));
+ ret = memblock_remove(fixed_middle_start, fixed_middle_size);
+ BUG_ON(ret);
+ }
+
fixed_high_start = fixed_middle_start + fixed_middle_size;
+ if (high_use_cma) {
+ fixed_high_size = ALIGN(fixed_high_size, cma_alignment);
+ BUG_ON(!IS_ALIGNED(fixed_high_start, cma_alignment));
+ } else {
+ /* This is the end of the fixed area so it's okay to round up */
+ fixed_high_size = ALIGN(fixed_high_size, SECTION_SIZE);
+ ret = memblock_remove(fixed_high_start, fixed_high_size);
+ BUG_ON(ret);
+ }
+
+
for (i = 0; i < msm8960_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap = &(msm8960_ion_pdata.heaps[i]);
@@ -635,6 +712,7 @@
fixed_position = pdata->fixed_position;
break;
case ION_HEAP_TYPE_CARVEOUT:
+ case ION_HEAP_TYPE_DMA:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
break;
@@ -648,6 +726,14 @@
break;
case FIXED_MIDDLE:
heap->base = fixed_middle_start;
+ if (middle_use_cma) {
+ ret = dma_declare_contiguous(
+ &ion_mm_heap_device.dev,
+ heap->size,
+ fixed_middle_start,
+ 0xa0000000);
+ WARN_ON(ret);
+ }
pdata->secure_base = fixed_middle_start
- HOLE_SIZE;
pdata->secure_size = HOLE_SIZE + heap->size;
diff --git a/arch/arm/mach-msm/board-8974-gpiomux.c b/arch/arm/mach-msm/board-8974-gpiomux.c
index ad74182..0f6c000 100644
--- a/arch/arm/mach-msm/board-8974-gpiomux.c
+++ b/arch/arm/mach-msm/board-8974-gpiomux.c
@@ -153,6 +153,53 @@
};
+static struct gpiomux_setting hsic_sus_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+ .dir = GPIOMUX_OUT_LOW,
+};
+
+static struct gpiomux_setting hsic_act_cfg = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_12MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting hsic_hub_act_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+ .dir = GPIOMUX_IN,
+};
+
+static struct msm_gpiomux_config msm_hsic_configs[] = {
+ {
+ .gpio = 144, /*HSIC_STROBE */
+ .settings = {
+ [GPIOMUX_ACTIVE] = &hsic_act_cfg,
+ [GPIOMUX_SUSPENDED] = &hsic_sus_cfg,
+ },
+ },
+ {
+ .gpio = 145, /* HSIC_DATA */
+ .settings = {
+ [GPIOMUX_ACTIVE] = &hsic_act_cfg,
+ [GPIOMUX_SUSPENDED] = &hsic_sus_cfg,
+ },
+ },
+};
+
+static struct msm_gpiomux_config msm_hsic_hub_configs[] = {
+ {
+ .gpio = 50, /* HSIC_HUB_INT_N */
+ .settings = {
+ [GPIOMUX_ACTIVE] = &hsic_hub_act_cfg,
+ [GPIOMUX_SUSPENDED] = &hsic_sus_cfg,
+ },
+ },
+};
+
static struct gpiomux_setting mhl_suspend_config = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_2MA,
@@ -172,7 +219,6 @@
.pull = GPIOMUX_PULL_UP,
};
-
static struct gpiomux_setting hdmi_suspend_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_2MA,
@@ -638,6 +684,10 @@
msm_gpiomux_install(msm_taiko_config, ARRAY_SIZE(msm_taiko_config));
+ msm_gpiomux_install(msm_hsic_configs, ARRAY_SIZE(msm_hsic_configs));
+ msm_gpiomux_install(msm_hsic_hub_configs,
+ ARRAY_SIZE(msm_hsic_hub_configs));
+
msm_gpiomux_install(msm_hdmi_configs, ARRAY_SIZE(msm_hdmi_configs));
msm_gpiomux_install(msm_mhl_configs, ARRAY_SIZE(msm_mhl_configs));
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 1022616..1dcd54f 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -81,9 +81,7 @@
.align = PAGE_SIZE,
};
-static struct ion_platform_data ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = {
+static struct ion_platform_heap msm9615_heaps[] = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -102,7 +100,11 @@
.memory_type = ION_EBI_TYPE,
.extra_data = (void *) &co_ion_pdata,
},
- }
+};
+
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = msm9615_heaps,
};
static struct platform_device ion_dev = {
diff --git a/arch/arm/mach-msm/board-9625.c b/arch/arm/mach-msm/board-9625.c
index 42f3f41..f6a354f 100644
--- a/arch/arm/mach-msm/board-9625.c
+++ b/arch/arm/mach-msm/board-9625.c
@@ -125,135 +125,6 @@
msm_reserve();
}
-static struct resource smd_resource[] = {
- {
- .name = "modem_smd_in",
- .start = 32 + 25, /* mss_sw_to_kpss_ipc_irq0 */
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "modem_smsm_in",
- .start = 32 + 26, /* mss_sw_to_kpss_ipc_irq1 */
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "adsp_smd_in",
- .start = 32 + 156, /* lpass_to_kpss_ipc_irq0 */
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "adsp_smsm_in",
- .start = 32 + 157, /* lpass_to_kpss_ipc_irq1 */
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "rpm_smd_in",
- .start = 32 + 168, /* rpm_to_kpss_ipc_irq4 */
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct smd_subsystem_config smd_config_list[] = {
- {
- .irq_config_id = SMD_MODEM,
- .subsys_name = "modem",
- .edge = SMD_APPS_MODEM,
-
- .smd_int.irq_name = "modem_smd_in",
- .smd_int.flags = IRQF_TRIGGER_RISING,
- .smd_int.irq_id = -1,
- .smd_int.device_name = "smd_dev",
- .smd_int.dev_id = 0,
- .smd_int.out_bit_pos = 1 << 12,
- .smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
- .smd_int.out_offset = 0x8,
-
- .smsm_int.irq_name = "modem_smsm_in",
- .smsm_int.flags = IRQF_TRIGGER_RISING,
- .smsm_int.irq_id = -1,
- .smsm_int.device_name = "smsm_dev",
- .smsm_int.dev_id = 0,
- .smsm_int.out_bit_pos = 1 << 13,
- .smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
- .smsm_int.out_offset = 0x8,
- },
- {
- .irq_config_id = SMD_Q6,
- .subsys_name = "adsp",
- .edge = SMD_APPS_QDSP,
-
- .smd_int.irq_name = "adsp_smd_in",
- .smd_int.flags = IRQF_TRIGGER_RISING,
- .smd_int.irq_id = -1,
- .smd_int.device_name = "smd_dev",
- .smd_int.dev_id = 0,
- .smd_int.out_bit_pos = 1 << 8,
- .smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
- .smd_int.out_offset = 0x8,
-
- .smsm_int.irq_name = "adsp_smsm_in",
- .smsm_int.flags = IRQF_TRIGGER_RISING,
- .smsm_int.irq_id = -1,
- .smsm_int.device_name = "smsm_dev",
- .smsm_int.dev_id = 0,
- .smsm_int.out_bit_pos = 1 << 9,
- .smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
- .smsm_int.out_offset = 0x8,
- },
- {
- .irq_config_id = SMD_RPM,
- .subsys_name = NULL, /* do not use PIL to load RPM */
- .edge = SMD_APPS_RPM,
-
- .smd_int.irq_name = "rpm_smd_in",
- .smd_int.flags = IRQF_TRIGGER_RISING,
- .smd_int.irq_id = -1,
- .smd_int.device_name = "smd_dev",
- .smd_int.dev_id = 0,
- .smd_int.out_bit_pos = 1 << 0,
- .smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
- .smd_int.out_offset = 0x8,
-
- .smsm_int.irq_name = NULL, /* RPM does not support SMSM */
- .smsm_int.flags = 0,
- .smsm_int.irq_id = 0,
- .smsm_int.device_name = NULL,
- .smsm_int.dev_id = 0,
- .smsm_int.out_bit_pos = 0,
- .smsm_int.out_base = NULL,
- .smsm_int.out_offset = 0,
- },
-};
-
-static struct smd_smem_regions aux_smem_areas[] = {
- {
- .phys_addr = (void *)(0xfc428000),
- .size = 0x4000,
- },
-};
-
-static struct smd_subsystem_restart_config smd_ssr_cfg = {
- .disable_smsm_reset_handshake = 1,
-};
-
-static struct smd_platform smd_platform_data = {
- .num_ss_configs = ARRAY_SIZE(smd_config_list),
- .smd_ss_configs = smd_config_list,
- .smd_ssr_config = &smd_ssr_cfg,
- .num_smem_areas = ARRAY_SIZE(aux_smem_areas),
- .smd_smem_areas = aux_smem_areas,
-};
-
-struct platform_device msm_device_smd_9625 = {
- .name = "msm_smd",
- .id = -1,
- .resource = smd_resource,
- .num_resources = ARRAY_SIZE(smd_resource),
- .dev = {
- .platform_data = &smd_platform_data,
- }
-};
-
#define BIMC_BASE 0xfc380000
#define BIMC_SIZE 0x0006A000
#define SYS_NOC_BASE 0xfc460000
@@ -345,11 +216,6 @@
ARRAY_SIZE(msm_bus_9625_devices));
}
-void __init msm9625_add_devices(void)
-{
- platform_device_register(&msm_device_smd_9625);
-}
-
/*
* Used to satisfy dependencies for devices that need to be
* run early or in a particular order. Most likely your device doesn't fall
@@ -376,7 +242,6 @@
msm9625_init_gpiomux();
of_platform_populate(NULL, of_default_bus_match_table,
msm9625_auxdata_lookup, NULL);
- msm9625_add_devices();
msm9625_add_drivers();
}
diff --git a/arch/arm/mach-msm/board-fsm9xxx.c b/arch/arm/mach-msm/board-fsm9xxx.c
index 1d6eb01..274b338 100644
--- a/arch/arm/mach-msm/board-fsm9xxx.c
+++ b/arch/arm/mach-msm/board-fsm9xxx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
#include <linux/msm_adc.h>
#include <linux/m_adcproc.h>
#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm_ion.h>
#define PMIC_GPIO_INT 144
#define PMIC_VREG_WLAN_LEVEL 2900
@@ -723,32 +724,32 @@
static struct resource qcrypto_resources[] = {
[0] = {
- .start = QCE_0_BASE,
- .end = QCE_0_BASE + QCE_SIZE - 1,
+ .start = QCE_1_BASE,
+ .end = QCE_1_BASE + QCE_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "crypto_channels",
- .start = DMOV_CE1_IN_CHAN,
- .end = DMOV_CE1_OUT_CHAN,
+ .start = DMOV_CE2_IN_CHAN,
+ .end = DMOV_CE2_OUT_CHAN,
.flags = IORESOURCE_DMA,
},
[2] = {
.name = "crypto_crci_in",
- .start = DMOV_CE1_IN_CRCI,
- .end = DMOV_CE1_IN_CRCI,
+ .start = DMOV_CE2_IN_CRCI,
+ .end = DMOV_CE2_IN_CRCI,
.flags = IORESOURCE_DMA,
},
[3] = {
.name = "crypto_crci_out",
- .start = DMOV_CE1_OUT_CRCI,
- .end = DMOV_CE1_OUT_CRCI,
+ .start = DMOV_CE2_OUT_CRCI,
+ .end = DMOV_CE2_OUT_CRCI,
.flags = IORESOURCE_DMA,
},
[4] = {
.name = "crypto_crci_hash",
- .start = DMOV_CE1_HASH_CRCI,
- .end = DMOV_CE1_HASH_CRCI,
+ .start = DMOV_CE2_HASH_CRCI,
+ .end = DMOV_CE2_HASH_CRCI,
.flags = IORESOURCE_DMA,
},
};
@@ -774,57 +775,6 @@
static struct resource qcedev_resources[] = {
[0] = {
- .start = QCE_0_BASE,
- .end = QCE_0_BASE + QCE_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .name = "crypto_channels",
- .start = DMOV_CE1_IN_CHAN,
- .end = DMOV_CE1_OUT_CHAN,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .name = "crypto_crci_in",
- .start = DMOV_CE1_IN_CRCI,
- .end = DMOV_CE1_IN_CRCI,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .name = "crypto_crci_out",
- .start = DMOV_CE1_OUT_CRCI,
- .end = DMOV_CE1_OUT_CRCI,
- .flags = IORESOURCE_DMA,
- },
- [4] = {
- .name = "crypto_crci_hash",
- .start = DMOV_CE1_HASH_CRCI,
- .end = DMOV_CE1_HASH_CRCI,
- .flags = IORESOURCE_DMA,
- },
-};
-
-static struct msm_ce_hw_support qcedev_ce_hw_suppport = {
- .ce_shared = QCE_NO_CE_SHARED,
- .shared_ce_resource = QCE_NO_SHARE_CE_RESOURCE,
- .hw_key_support = QCE_NO_HW_KEY_SUPPORT,
- .sha_hmac = QCE_NO_SHA_HMAC_SUPPORT,
- .bus_scale_table = NULL,
-};
-
-static struct platform_device qcedev_device = {
- .name = "qce",
- .id = 0,
- .num_resources = ARRAY_SIZE(qcedev_resources),
- .resource = qcedev_resources,
- .dev = {
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &qcedev_ce_hw_suppport,
- },
-};
-
-static struct resource ota_qcrypto_resources[] = {
- [0] = {
.start = QCE_1_BASE,
.end = QCE_1_BASE + QCE_SIZE - 1,
.flags = IORESOURCE_MEM,
@@ -855,6 +805,57 @@
},
};
+static struct msm_ce_hw_support qcedev_ce_hw_suppport = {
+ .ce_shared = QCE_NO_CE_SHARED,
+ .shared_ce_resource = QCE_NO_SHARE_CE_RESOURCE,
+ .hw_key_support = QCE_NO_HW_KEY_SUPPORT,
+ .sha_hmac = QCE_NO_SHA_HMAC_SUPPORT,
+ .bus_scale_table = NULL,
+};
+
+static struct platform_device qcedev_device = {
+ .name = "qce",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(qcedev_resources),
+ .resource = qcedev_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &qcedev_ce_hw_suppport,
+ },
+};
+
+static struct resource ota_qcrypto_resources[] = {
+ [0] = {
+ .start = QCE_2_BASE,
+ .end = QCE_2_BASE + QCE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "crypto_channels",
+ .start = DMOV_CE3_IN_CHAN,
+ .end = DMOV_CE3_OUT_CHAN,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .name = "crypto_crci_in",
+ .start = DMOV_CE3_IN_CRCI,
+ .end = DMOV_CE3_IN_CRCI,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .name = "crypto_crci_out",
+ .start = DMOV_CE3_OUT_CRCI,
+ .end = DMOV_CE3_OUT_CRCI,
+ .flags = IORESOURCE_DMA,
+ },
+ [4] = {
+ .name = "crypto_crci_hash",
+ .start = DMOV_CE3_HASH_DONE_CRCI,
+ .end = DMOV_CE3_HASH_DONE_CRCI,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
struct platform_device ota_qcrypto_device = {
.name = "qcota",
.id = 0,
@@ -870,6 +871,27 @@
.id = -1,
};
+struct ion_platform_heap msm_ion_heaps[] = {
+ {
+ .id = ION_SYSTEM_HEAP_ID,
+ .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .name = "kmalloc",
+ },
+};
+
+static struct ion_platform_data msm_ion_pdata = {
+ .nr = 1,
+ .heaps = msm_ion_heaps,
+};
+
+static struct platform_device msm_ion_device = {
+ .name = "ion-msm",
+ .id = 1,
+ .dev = {
+ .platform_data = &msm_ion_pdata,
+ },
+};
+
/*
* Devices
*/
@@ -905,6 +927,7 @@
&ota_qcrypto_device,
&fsm_xo_device,
&fsm9xxx_device_watchdog,
+ &msm_ion_device,
};
static void __init fsm9xxx_init_irq(void)
diff --git a/arch/arm/mach-msm/board-msm7627a-storage.c b/arch/arm/mach-msm/board-msm7627a-storage.c
index 07ff389..5351d41 100644
--- a/arch/arm/mach-msm/board-msm7627a-storage.c
+++ b/arch/arm/mach-msm/board-msm7627a-storage.c
@@ -369,6 +369,14 @@
if (!(machine_is_msm7627a_qrd3() || machine_is_msm8625_qrd7())) {
if (mmc_regulator_init(3, "emmc", 3000000))
return;
+ /*
+ * On 7x25A FFA data CRC errors are seen, which are
+ * probably due to the proximity of SIM card and eMMC.
+ * Hence, reducing the clock to 24.7Mhz from 49Mhz.
+ */
+ if (machine_is_msm7625a_ffa())
+ sdc3_plat_data.msmsdcc_fmax =
+ sdc3_plat_data.msmsdcc_fmid;
msm_add_sdcc(3, &sdc3_plat_data);
}
#endif
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index 4e14ff3..9fd5218 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -767,10 +767,7 @@
* These heaps are listed in the order they will be allocated.
* Don't swap the order unless you know what you are doing!
*/
-static struct ion_platform_data ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .has_outer_cache = 1,
- .heaps = {
+struct ion_platform_heap msm7627a_heaps[] = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -802,7 +799,12 @@
.extra_data = (void *)&co_ion_pdata,
},
#endif
- }
+};
+
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .has_outer_cache = 1,
+ .heaps = msm7627a_heaps,
};
static struct platform_device ion_dev = {
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index ee13e04..9822aa9 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -7161,9 +7161,7 @@
* These heaps are listed in the order they will be allocated.
* Don't swap the order unless you know what you are doing!
*/
-static struct ion_platform_data ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = {
+struct ion_platform_heap msm7x30_heaps[] = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -7195,7 +7193,11 @@
.extra_data = (void *)&co_ion_pdata,
},
#endif
- }
+};
+
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = msm7x30_heaps,
};
static struct platform_device ion_dev = {
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index d831ad2..08e6a0d 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -5347,9 +5347,7 @@
* to each other.
* Don't swap the order unless you know what you are doing!
*/
-static struct ion_platform_data ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = {
+struct ion_platform_heap msm8x60_heaps [] = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -5424,7 +5422,11 @@
.extra_data = (void *)&co_ion_pdata,
},
#endif
- }
+};
+
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = msm8x60_heaps,
};
static struct platform_device ion_dev = {
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index a25290f..47a3120 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -802,10 +802,7 @@
* These heaps are listed in the order they will be allocated.
* Don't swap the order unless you know what you are doing!
*/
-static struct ion_platform_data ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .has_outer_cache = 1,
- .heaps = {
+struct ion_platform_heap qrd7627a_heaps[] = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -837,7 +834,12 @@
.extra_data = (void *)&co_ion_pdata,
},
#endif
- }
+};
+
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .has_outer_cache = 1,
+ .heaps = qrd7627a_heaps,
};
static struct platform_device ion_dev = {
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 216de11..95f9327 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -800,11 +800,6 @@
static DEFINE_CLK_VOTER(ocmemgx_msmbus_a_clk, &ocmemgx_a_clk.c, LONG_MAX);
static DEFINE_CLK_VOTER(ocmemgx_core_clk, &ocmemgx_clk.c, LONG_MAX);
-static DEFINE_CLK_VOTER(pnoc_sdcc1_clk, &pnoc_clk.c, 0);
-static DEFINE_CLK_VOTER(pnoc_sdcc2_clk, &pnoc_clk.c, 0);
-static DEFINE_CLK_VOTER(pnoc_sdcc3_clk, &pnoc_clk.c, 0);
-static DEFINE_CLK_VOTER(pnoc_sdcc4_clk, &pnoc_clk.c, 0);
-
static DEFINE_CLK_VOTER(pnoc_sps_clk, &pnoc_clk.c, 0);
static struct clk_freq_tbl ftbl_gcc_usb30_master_clk[] = {
@@ -2969,7 +2964,7 @@
};
static struct clk_freq_tbl ftbl_mdss_edppixel_clk[] = {
- F_MDSS(148500000, edppll_350, 2, 0, 0),
+ F_MDSS(138500000, edppll_350, 2, 0, 0),
F_MDSS(350000000, edppll_350, 11, 0, 0),
F_END
};
@@ -3074,9 +3069,11 @@
*/
F_HDMI( 0, hdmipll, 1, 0, 0),
F_HDMI( 25200000, hdmipll, 1, 0, 0),
+ F_HDMI( 27000000, hdmipll, 1, 0, 0),
F_HDMI( 27030000, hdmipll, 1, 0, 0),
F_HDMI( 74250000, hdmipll, 1, 0, 0),
F_HDMI(148500000, hdmipll, 1, 0, 0),
+ F_HDMI(268500000, hdmipll, 1, 0, 0),
F_HDMI(297000000, hdmipll, 1, 0, 0),
F_END
};
@@ -4985,16 +4982,12 @@
static struct clk_lookup msm_clocks_8974_rumi[] = {
CLK_LOOKUP("iface_clk", gcc_sdcc1_ahb_clk.c, "msm_sdcc.1"),
CLK_LOOKUP("core_clk", gcc_sdcc1_apps_clk.c, "msm_sdcc.1"),
- CLK_LOOKUP("bus_clk", pnoc_sdcc1_clk.c, "msm_sdcc.1"),
CLK_LOOKUP("iface_clk", gcc_sdcc2_ahb_clk.c, "msm_sdcc.2"),
CLK_LOOKUP("core_clk", gcc_sdcc2_apps_clk.c, "msm_sdcc.2"),
- CLK_LOOKUP("bus_clk", pnoc_sdcc2_clk.c, "msm_sdcc.2"),
CLK_LOOKUP("iface_clk", gcc_sdcc3_ahb_clk.c, "msm_sdcc.3"),
CLK_LOOKUP("core_clk", gcc_sdcc3_apps_clk.c, "msm_sdcc.3"),
- CLK_LOOKUP("bus_clk", pnoc_sdcc3_clk.c, "msm_sdcc.3"),
CLK_LOOKUP("iface_clk", gcc_sdcc4_ahb_clk.c, "msm_sdcc.4"),
CLK_LOOKUP("core_clk", gcc_sdcc4_apps_clk.c, "msm_sdcc.4"),
- CLK_LOOKUP("bus_clk", pnoc_sdcc4_clk.c, "msm_sdcc.4"),
CLK_DUMMY("xo", XO_CLK, NULL, OFF),
CLK_DUMMY("xo", XO_CLK, "pil_pronto", OFF),
CLK_DUMMY("core_clk", BLSP2_UART_CLK, "f991f000.serial", OFF),
@@ -5110,16 +5103,12 @@
CLK_LOOKUP("iface_clk", gcc_sdcc1_ahb_clk.c, "msm_sdcc.1"),
CLK_LOOKUP("core_clk", gcc_sdcc1_apps_clk.c, "msm_sdcc.1"),
- CLK_LOOKUP("bus_clk", pnoc_sdcc1_clk.c, "msm_sdcc.1"),
CLK_LOOKUP("iface_clk", gcc_sdcc2_ahb_clk.c, "msm_sdcc.2"),
CLK_LOOKUP("core_clk", gcc_sdcc2_apps_clk.c, "msm_sdcc.2"),
- CLK_LOOKUP("bus_clk", pnoc_sdcc2_clk.c, "msm_sdcc.2"),
CLK_LOOKUP("iface_clk", gcc_sdcc3_ahb_clk.c, "msm_sdcc.3"),
CLK_LOOKUP("core_clk", gcc_sdcc3_apps_clk.c, "msm_sdcc.3"),
- CLK_LOOKUP("bus_clk", pnoc_sdcc3_clk.c, "msm_sdcc.3"),
CLK_LOOKUP("iface_clk", gcc_sdcc4_ahb_clk.c, "msm_sdcc.4"),
CLK_LOOKUP("core_clk", gcc_sdcc4_apps_clk.c, "msm_sdcc.4"),
- CLK_LOOKUP("bus_clk", pnoc_sdcc4_clk.c, "msm_sdcc.4"),
CLK_LOOKUP("iface_clk", gcc_tsif_ahb_clk.c, ""),
CLK_LOOKUP("ref_clk", gcc_tsif_ref_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index d3a4bba..2e85006 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -1318,6 +1318,17 @@
},
};
+static struct branch_clk gcc_ipa_sleep_clk = {
+ .cbcr_reg = IPA_SLEEP_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_ipa_sleep_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ipa_sleep_clk.c),
+ },
+};
+
static struct branch_clk gcc_pdm2_clk = {
.cbcr_reg = PDM2_CBCR,
.has_sibling = 0,
@@ -2072,6 +2083,7 @@
CLK_LOOKUP("core_clk", gcc_ipa_clk.c, "fd4c0000.qcom,ipa"),
CLK_LOOKUP("bus_clk", gcc_sys_noc_ipa_axi_clk.c, "fd4c0000.qcom,ipa"),
CLK_LOOKUP("iface_clk", gcc_ipa_cnoc_clk.c, "fd4c0000.qcom,ipa"),
+ CLK_LOOKUP("inactivity_clk", gcc_ipa_sleep_clk.c, "fd4c0000.qcom,ipa"),
CLK_LOOKUP("core_clk", gcc_pdm2_clk.c, ""),
CLK_LOOKUP("iface_clk", gcc_pdm_ahb_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-mdss-8974.c b/arch/arm/mach-msm/clock-mdss-8974.c
index e7a596d..c30e566 100644
--- a/arch/arm/mach-msm/clock-mdss-8974.c
+++ b/arch/arm/mach-msm/clock-mdss-8974.c
@@ -37,30 +37,65 @@
#define HDMI_PHY_PLL_SIZE 0x000000D4
/* hdmi phy registers */
-#define HDMI_PHY_PD_CTRL0 (0x0010)
-#define HDMI_PHY_GLB_CFG (0x0018)
-#define HDMI_PHY_STATUS (0x005C)
+#define HDMI_PHY_ANA_CFG0 (0x0000)
+#define HDMI_PHY_ANA_CFG1 (0x0004)
+#define HDMI_PHY_ANA_CFG2 (0x0008)
+#define HDMI_PHY_ANA_CFG3 (0x000C)
+#define HDMI_PHY_PD_CTRL0 (0x0010)
+#define HDMI_PHY_PD_CTRL1 (0x0014)
+#define HDMI_PHY_GLB_CFG (0x0018)
+#define HDMI_PHY_DCC_CFG0 (0x001C)
+#define HDMI_PHY_DCC_CFG1 (0x0020)
+#define HDMI_PHY_TXCAL_CFG0 (0x0024)
+#define HDMI_PHY_TXCAL_CFG1 (0x0028)
+#define HDMI_PHY_TXCAL_CFG2 (0x002C)
+#define HDMI_PHY_TXCAL_CFG3 (0x0030)
+#define HDMI_PHY_BIST_CFG0 (0x0034)
+#define HDMI_PHY_BIST_CFG1 (0x0038)
+#define HDMI_PHY_BIST_PATN0 (0x003C)
+#define HDMI_PHY_BIST_PATN1 (0x0040)
+#define HDMI_PHY_BIST_PATN2 (0x0044)
+#define HDMI_PHY_BIST_PATN3 (0x0048)
+#define HDMI_PHY_STATUS (0x005C)
/* hdmi phy unified pll registers */
-#define HDMI_UNI_PLL_REFCLK_CF (0x0000)
-#define HDMI_UNI_PLL_POSTDIV1_CFG (0x0004)
-#define HDMI_UNI_PLL_VCOLPF_CFG (0x000C)
-#define HDMI_UNI_PLL_GLB_CFG (0x0020)
-#define HDMI_UNI_PLL_POSTDIV2_CFG (0x0024)
-#define HDMI_UNI_PLL_POSTDIV3_CFG (0x0028)
-#define HDMI_UNI_PLL_SDM_CFG0 (0x0038)
-#define HDMI_UNI_PLL_SDM_CFG1 (0x003C)
-#define HDMI_UNI_PLL_SDM_CFG2 (0x0040)
-#define HDMI_UNI_PLL_SDM_CFG3 (0x0044)
-#define HDMI_UNI_PLL_SDM_CFG4 (0x0048)
-#define HDMI_UNI_PLL_LKDET_CFG0 (0x005C)
-#define HDMI_UNI_PLL_LKDET_CFG1 (0x0060)
-#define HDMI_UNI_PLL_LKDET_CFG2 (0x0064)
-#define HDMI_UNI_PLL_CAL_CFG8 (0x008C)
-#define HDMI_UNI_PLL_CAL_CFG9 (0x0090)
-#define HDMI_UNI_PLL_CAL_CFG10 (0x0094)
-#define HDMI_UNI_PLL_CAL_CFG11 (0x0098)
-#define HDMI_UNI_PLL_STATUS (0x00C0)
+#define HDMI_UNI_PLL_REFCLK_CFG (0x0000)
+#define HDMI_UNI_PLL_POSTDIV1_CFG (0x0004)
+#define HDMI_UNI_PLL_CHFPUMP_CFG (0x0008)
+#define HDMI_UNI_PLL_VCOLPF_CFG (0x000C)
+#define HDMI_UNI_PLL_VREG_CFG (0x0010)
+#define HDMI_UNI_PLL_PWRGEN_CFG (0x0014)
+#define HDMI_UNI_PLL_GLB_CFG (0x0020)
+#define HDMI_UNI_PLL_POSTDIV2_CFG (0x0024)
+#define HDMI_UNI_PLL_POSTDIV3_CFG (0x0028)
+#define HDMI_UNI_PLL_LPFR_CFG (0x002C)
+#define HDMI_UNI_PLL_LPFC1_CFG (0x0030)
+#define HDMI_UNI_PLL_LPFC2_CFG (0x0034)
+#define HDMI_UNI_PLL_SDM_CFG0 (0x0038)
+#define HDMI_UNI_PLL_SDM_CFG1 (0x003C)
+#define HDMI_UNI_PLL_SDM_CFG2 (0x0040)
+#define HDMI_UNI_PLL_SDM_CFG3 (0x0044)
+#define HDMI_UNI_PLL_SDM_CFG4 (0x0048)
+#define HDMI_UNI_PLL_SSC_CFG0 (0x004C)
+#define HDMI_UNI_PLL_SSC_CFG1 (0x0050)
+#define HDMI_UNI_PLL_SSC_CFG2 (0x0054)
+#define HDMI_UNI_PLL_SSC_CFG3 (0x0058)
+#define HDMI_UNI_PLL_LKDET_CFG0 (0x005C)
+#define HDMI_UNI_PLL_LKDET_CFG1 (0x0060)
+#define HDMI_UNI_PLL_LKDET_CFG2 (0x0064)
+#define HDMI_UNI_PLL_CAL_CFG0 (0x006C)
+#define HDMI_UNI_PLL_CAL_CFG1 (0x0070)
+#define HDMI_UNI_PLL_CAL_CFG2 (0x0074)
+#define HDMI_UNI_PLL_CAL_CFG3 (0x0078)
+#define HDMI_UNI_PLL_CAL_CFG4 (0x007C)
+#define HDMI_UNI_PLL_CAL_CFG5 (0x0080)
+#define HDMI_UNI_PLL_CAL_CFG6 (0x0084)
+#define HDMI_UNI_PLL_CAL_CFG7 (0x0088)
+#define HDMI_UNI_PLL_CAL_CFG8 (0x008C)
+#define HDMI_UNI_PLL_CAL_CFG9 (0x0090)
+#define HDMI_UNI_PLL_CAL_CFG10 (0x0094)
+#define HDMI_UNI_PLL_CAL_CFG11 (0x0098)
+#define HDMI_UNI_PLL_STATUS (0x00C0)
#define VCO_CLK 424000000
static unsigned char *mdss_dsi_base;
@@ -391,32 +426,106 @@
case 25200000:
/* 640x480p60 */
- REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CF);
- REG_W(0x20, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
- REG_W(0x36, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
- REG_W(0x4C, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG1);
+ REG_W(0x81, hdmi_phy_base + HDMI_PHY_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CFG);
+ REG_W(0x19, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
+ REG_W(0x0E, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFR_CFG);
+ REG_W(0x20, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC1_CFG);
+ REG_W(0x0D, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC2_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
+ REG_W(0x52, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG1);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
- REG_W(0x20, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG3);
+ REG_W(0xB0, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG3);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG4);
REG_W(0x10, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG0);
REG_W(0x1A, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG1);
REG_W(0x05, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG2);
- REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV1_CFG);
+ REG_W(0x03, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV1_CFG);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV2_CFG);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV3_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG2);
REG_W(0x60, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG8);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG9);
- REG_W(0xFC, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG10);
- REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG11);
+ REG_W(0xF4, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG10);
+ REG_W(0x02, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG11);
+ REG_W(0x1F, hdmi_phy_base + HDMI_PHY_PD_CTRL0);
+ udelay(50);
+
+ REG_W(0x0F, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_PD_CTRL1);
+ REG_W(0x10, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0xDB, hdmi_phy_base + HDMI_PHY_ANA_CFG0);
+ REG_W(0x43, hdmi_phy_base + HDMI_PHY_ANA_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_ANA_CFG3);
+ REG_W(0x04, hdmi_phy_pll_base + HDMI_UNI_PLL_VREG_CFG);
+ REG_W(0xD0, hdmi_phy_base + HDMI_PHY_DCC_CFG0);
+ REG_W(0x1A, hdmi_phy_base + HDMI_PHY_DCC_CFG1);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG0);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_TXCAL_CFG2);
+ REG_W(0x05, hdmi_phy_base + HDMI_PHY_TXCAL_CFG3);
+ udelay(200);
+ break;
+
+ case 27000000:
+ /* 576p50/576i50 case */
+ REG_W(0x81, hdmi_phy_base + HDMI_PHY_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CFG);
+ REG_W(0x19, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
+ REG_W(0X0E, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFR_CFG);
+ REG_W(0x20, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC1_CFG);
+ REG_W(0X0D, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC2_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
+ REG_W(0x54, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG1);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
+ REG_W(0x18, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG3);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG4);
+ REG_W(0x10, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG0);
+ REG_W(0X1A, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG1);
+ REG_W(0x05, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG2);
+ REG_W(0x03, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV1_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV2_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV3_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG2);
+ REG_W(0x60, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG8);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG9);
+ REG_W(0x2a, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG10);
+ REG_W(0x03, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG11);
+ REG_W(0X1F, hdmi_phy_base + HDMI_PHY_PD_CTRL0);
+ udelay(50);
+
+ REG_W(0X0F, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_PD_CTRL1);
+ REG_W(0x10, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0XDB, hdmi_phy_base + HDMI_PHY_ANA_CFG0);
+ REG_W(0x43, hdmi_phy_base + HDMI_PHY_ANA_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_ANA_CFG3);
+ REG_W(0x04, hdmi_phy_pll_base + HDMI_UNI_PLL_VREG_CFG);
+ REG_W(0XD0, hdmi_phy_base + HDMI_PHY_DCC_CFG0);
+ REG_W(0X1A, hdmi_phy_base + HDMI_PHY_DCC_CFG1);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG0);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_TXCAL_CFG2);
+ REG_W(0x05, hdmi_phy_base + HDMI_PHY_TXCAL_CFG3);
+ udelay(200);
break;
case 27030000:
/* 480p60/480i60 case */
- REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CF);
- REG_W(0x18, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
- REG_W(0x36, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
- REG_W(0x14, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG1);
- REG_W(0x63, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
+ REG_W(0x81, hdmi_phy_base + HDMI_PHY_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CFG);
+ REG_W(0x19, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
+ REG_W(0x0E, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFR_CFG);
+ REG_W(0x20, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC1_CFG);
+ REG_W(0x0D, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC2_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
+ REG_W(0x54, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG1);
+ REG_W(0x66, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
REG_W(0x1D, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG3);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG4);
REG_W(0x10, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG0);
@@ -425,10 +534,29 @@
REG_W(0x03, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV1_CFG);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV2_CFG);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV3_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG2);
REG_W(0x60, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG8);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG9);
REG_W(0x2A, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG10);
REG_W(0x03, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG11);
+ REG_W(0x1F, hdmi_phy_base + HDMI_PHY_PD_CTRL0);
+ udelay(50);
+
+ REG_W(0x0F, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_PD_CTRL1);
+ REG_W(0x10, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0xDB, hdmi_phy_base + HDMI_PHY_ANA_CFG0);
+ REG_W(0x43, hdmi_phy_base + HDMI_PHY_ANA_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_ANA_CFG3);
+ REG_W(0x04, hdmi_phy_pll_base + HDMI_UNI_PLL_VREG_CFG);
+ REG_W(0xD0, hdmi_phy_base + HDMI_PHY_DCC_CFG0);
+ REG_W(0x1A, hdmi_phy_base + HDMI_PHY_DCC_CFG1);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG0);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_TXCAL_CFG2);
+ REG_W(0x05, hdmi_phy_base + HDMI_PHY_TXCAL_CFG3);
+ udelay(200);
break;
case 74250000:
@@ -436,51 +564,148 @@
* 720p60/720p50/1080i60/1080i50
* 1080p24/1080p30/1080p25 case
*/
- REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CF);
- REG_W(0x20, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
- REG_W(0x36, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
+ REG_W(0x81, hdmi_phy_base + HDMI_PHY_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CFG);
+ REG_W(0x19, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
+ REG_W(0x0E, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFR_CFG);
+ REG_W(0x20, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC1_CFG);
+ REG_W(0x0D, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC2_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
REG_W(0x52, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG1);
- REG_W(0xFD, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
- REG_W(0x55, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG3);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
+ REG_W(0x56, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG3);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG4);
REG_W(0x10, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG0);
REG_W(0x1A, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG1);
REG_W(0x05, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG2);
- REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV1_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV1_CFG);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV2_CFG);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV3_CFG);
- REG_W(0x60, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG8);
- REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG9);
- REG_W(0x73, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG10);
- REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG11);
- break;
-
- case 148500000:
- REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CF);
- REG_W(0x18, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
- REG_W(0x36, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
- REG_W(0x52, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG1);
- REG_W(0xFD, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
- REG_W(0x55, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG3);
- REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG4);
- REG_W(0x10, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG0);
- REG_W(0x1A, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG1);
- REG_W(0x05, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG2);
- REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV1_CFG);
- REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV2_CFG);
- REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV3_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG2);
REG_W(0x60, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG8);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG9);
REG_W(0xE6, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG10);
REG_W(0x02, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG11);
+ REG_W(0x1F, hdmi_phy_base + HDMI_PHY_PD_CTRL0);
+ udelay(50);
+
+ REG_W(0x0F, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_PD_CTRL1);
+ REG_W(0x10, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0xDB, hdmi_phy_base + HDMI_PHY_ANA_CFG0);
+ REG_W(0x43, hdmi_phy_base + HDMI_PHY_ANA_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_ANA_CFG3);
+ REG_W(0x04, hdmi_phy_pll_base + HDMI_UNI_PLL_VREG_CFG);
+ REG_W(0xD0, hdmi_phy_base + HDMI_PHY_DCC_CFG0);
+ REG_W(0x1A, hdmi_phy_base + HDMI_PHY_DCC_CFG1);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG0);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_TXCAL_CFG2);
+ REG_W(0x05, hdmi_phy_base + HDMI_PHY_TXCAL_CFG3);
+ udelay(200);
+ break;
+
+ case 148500000:
+ REG_W(0x81, hdmi_phy_base + HDMI_PHY_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CFG);
+ REG_W(0x19, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
+ REG_W(0x0E, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFR_CFG);
+ REG_W(0x20, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC1_CFG);
+ REG_W(0x0D, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC2_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
+ REG_W(0x52, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG1);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
+ REG_W(0x56, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG3);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG4);
+ REG_W(0x10, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG0);
+ REG_W(0x1A, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG1);
+ REG_W(0x05, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG2);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV1_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV2_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV3_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG2);
+ REG_W(0x60, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG8);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG9);
+ REG_W(0xE6, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG10);
+ REG_W(0x02, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG11);
+ REG_W(0x1F, hdmi_phy_base + HDMI_PHY_PD_CTRL0);
+ udelay(50);
+
+ REG_W(0x0F, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_PD_CTRL1);
+ REG_W(0x10, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0xDB, hdmi_phy_base + HDMI_PHY_ANA_CFG0);
+ REG_W(0x43, hdmi_phy_base + HDMI_PHY_ANA_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_ANA_CFG3);
+ REG_W(0x04, hdmi_phy_pll_base + HDMI_UNI_PLL_VREG_CFG);
+ REG_W(0xD0, hdmi_phy_base + HDMI_PHY_DCC_CFG0);
+ REG_W(0x1A, hdmi_phy_base + HDMI_PHY_DCC_CFG1);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG0);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_TXCAL_CFG2);
+ REG_W(0x05, hdmi_phy_base + HDMI_PHY_TXCAL_CFG3);
+ udelay(200);
+ break;
+
+ case 268500000:
+ REG_W(0x81, hdmi_phy_base + HDMI_PHY_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CFG);
+ REG_W(0x19, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
+ REG_W(0x0E, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFR_CFG);
+ REG_W(0x20, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC1_CFG);
+ REG_W(0x0D, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC2_CFG);
+ REG_W(0x36, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
+ REG_W(0x61, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG1);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
+ REG_W(0xF6, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG3);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG4);
+ REG_W(0x10, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG0);
+ REG_W(0x1A, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG1);
+ REG_W(0x05, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG2);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV1_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV2_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV3_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG2);
+ REG_W(0x60, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG8);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG9);
+ REG_W(0x3E, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG10);
+ REG_W(0x05, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG11);
+ REG_W(0x1F, hdmi_phy_base + HDMI_PHY_PD_CTRL0);
+ udelay(50);
+
+ REG_W(0x0F, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_PD_CTRL1);
+ REG_W(0x10, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0xDB, hdmi_phy_base + HDMI_PHY_ANA_CFG0);
+ REG_W(0x43, hdmi_phy_base + HDMI_PHY_ANA_CFG1);
+ REG_W(0x05, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_ANA_CFG3);
+ REG_W(0x04, hdmi_phy_pll_base + HDMI_UNI_PLL_VREG_CFG);
+ REG_W(0xD0, hdmi_phy_base + HDMI_PHY_DCC_CFG0);
+ REG_W(0x1A, hdmi_phy_base + HDMI_PHY_DCC_CFG1);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG0);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG1);
+ REG_W(0x11, hdmi_phy_base + HDMI_PHY_TXCAL_CFG2);
+ REG_W(0x05, hdmi_phy_base + HDMI_PHY_TXCAL_CFG3);
+ udelay(200);
break;
case 297000000:
- REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CF);
- REG_W(0x18, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
- REG_W(0x36, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
+ REG_W(0x81, hdmi_phy_base + HDMI_PHY_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_REFCLK_CFG);
+ REG_W(0x19, hdmi_phy_pll_base + HDMI_UNI_PLL_VCOLPF_CFG);
+ REG_W(0x0E, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFR_CFG);
+ REG_W(0x20, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC1_CFG);
+ REG_W(0x0D, hdmi_phy_pll_base + HDMI_UNI_PLL_LPFC2_CFG);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG0);
REG_W(0x65, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG1);
- REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
+ REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG2);
REG_W(0xAC, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG3);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_SDM_CFG4);
REG_W(0x10, hdmi_phy_pll_base + HDMI_UNI_PLL_LKDET_CFG0);
@@ -489,14 +714,31 @@
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV1_CFG);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV2_CFG);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_POSTDIV3_CFG);
+ REG_W(0x01, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG2);
REG_W(0x60, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG8);
REG_W(0x00, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG9);
REG_W(0xCD, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG10);
REG_W(0x05, hdmi_phy_pll_base + HDMI_UNI_PLL_CAL_CFG11);
+ REG_W(0x1F, hdmi_phy_base + HDMI_PHY_PD_CTRL0);
+ udelay(50);
+
+ REG_W(0x0F, hdmi_phy_pll_base + HDMI_UNI_PLL_GLB_CFG);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_PD_CTRL1);
+ REG_W(0x10, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0xDB, hdmi_phy_base + HDMI_PHY_ANA_CFG0);
+ REG_W(0x43, hdmi_phy_base + HDMI_PHY_ANA_CFG1);
+ REG_W(0x06, hdmi_phy_base + HDMI_PHY_ANA_CFG2);
+ REG_W(0x03, hdmi_phy_base + HDMI_PHY_ANA_CFG3);
+ REG_W(0x04, hdmi_phy_pll_base + HDMI_UNI_PLL_VREG_CFG);
+ REG_W(0xD0, hdmi_phy_base + HDMI_PHY_DCC_CFG0);
+ REG_W(0x1A, hdmi_phy_base + HDMI_PHY_DCC_CFG1);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG0);
+ REG_W(0x00, hdmi_phy_base + HDMI_PHY_TXCAL_CFG1);
+ REG_W(0x02, hdmi_phy_base + HDMI_PHY_TXCAL_CFG2);
+ REG_W(0x05, hdmi_phy_base + HDMI_PHY_TXCAL_CFG3);
+ udelay(200);
break;
- case 27000000:
- /* 576p50/576i50 case */
default:
pr_err("%s: not supported rate=%ld\n", __func__, rate);
}
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 8d10d6a..0bfaa71 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -19,6 +19,7 @@
#include <linux/clkdev.h>
#include <linux/dma-mapping.h>
#include <linux/coresight.h>
+#include <linux/avtimer.h>
#include <mach/irqs-8064.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
@@ -103,6 +104,9 @@
#define MSM8064_PC_CNTR_PHYS (APQ8064_IMEM_PHYS + 0x664)
#define MSM8064_PC_CNTR_SIZE 0x40
#define MSM8064_RPM_MASTER_STATS_BASE 0x10BB00
+/* avtimer */
+#define AVTIMER_MSW_PHYSICAL_ADDRESS 0x2800900C
+#define AVTIMER_LSW_PHYSICAL_ADDRESS 0x28009008
static struct resource msm8064_resources_pccntr[] = {
{
@@ -3292,3 +3296,8 @@
.platform_data = &apq8064_cache_dump_pdata,
},
};
+
+struct dev_avtimer_data dev_avtimer_pdata = {
+ .avtimer_msw_phy_addr = AVTIMER_MSW_PHYSICAL_ADDRESS,
+ .avtimer_lsw_phy_addr = AVTIMER_LSW_PHYSICAL_ADDRESS,
+};
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index c59461a..2421646 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -2485,6 +2485,17 @@
.bus_port0 = MSM_BUS_MASTER_GRAPHICS_3D,
};
+static struct fs_driver_data gfx3d_fs_data_8960ab = {
+ .clks = (struct fs_clk_data[]){
+ { .name = "core_clk", .reset_rate = 27000000 },
+ { .name = "iface_clk" },
+ { .name = "bus_clk" },
+ { 0 }
+ },
+ .bus_port0 = MSM_BUS_MASTER_GRAPHICS_3D,
+ .bus_port1 = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
+};
+
static struct fs_driver_data ijpeg_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
@@ -2583,7 +2594,7 @@
FS_8X60(FS_IJPEG, "vdd", "msm_gemini.0", &ijpeg_fs_data),
FS_8X60(FS_VFE, "vdd", "msm_vfe.0", &vfe_fs_data),
FS_8X60(FS_VPE, "vdd", "msm_vpe.0", &vpe_fs_data),
- FS_8X60(FS_GFX3D, "vdd", "kgsl-3d0.0", &gfx3d_fs_data),
+ FS_8X60(FS_GFX3D, "vdd", "kgsl-3d0.0", &gfx3d_fs_data_8960ab),
FS_8X60(FS_VED, "vdd", "msm_vidc.0", &ved_fs_data_8960ab),
};
unsigned msm8960ab_num_footswitch __initdata = ARRAY_SIZE(msm8960ab_footswitch);
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index c0d73c2..b676518 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -471,4 +471,5 @@
extern struct platform_device apq_cpudai_mi2s;
extern struct platform_device apq_cpudai_i2s_rx;
extern struct platform_device apq_cpudai_i2s_tx;
+extern struct dev_avtimer_data dev_avtimer_pdata;
diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c
index 12f5aa9..22fc1ac 100644
--- a/arch/arm/mach-msm/dma.c
+++ b/arch/arm/mach-msm/dma.c
@@ -373,9 +373,8 @@
spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
list_add_tail(&cmd->list, &dmov_conf[adm].staged_commands[ch]);
- spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
-
queue_work(dmov_conf[adm].cmd_wq, &cmd->work);
+ spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
}
void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
diff --git a/arch/arm/mach-msm/event_timer.c b/arch/arm/mach-msm/event_timer.c
index e06dad4..9f46f68 100644
--- a/arch/arm/mach-msm/event_timer.c
+++ b/arch/arm/mach-msm/event_timer.c
@@ -193,21 +193,7 @@
unsigned long flags;
struct event_timer_info *event =
(struct event_timer_info *)data;
-
- local_irq_save(flags);
- create_hrtimer(event->node.expires);
- local_irq_restore(flags);
-}
-
-/**
- * setup_timer() : Helper function to setup timer on primary
- * core during hrtimer callback.
- * @event: event handle causing the wakeup.
- */
-static void setup_event_hrtimer(struct event_timer_info *event)
-{
struct timerqueue_node *next;
- unsigned long flags;
spin_lock_irqsave(&event_timer_lock, flags);
if (is_event_active(event))
@@ -223,9 +209,18 @@
if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
pr_info("%s: Setting timer for %lu", __func__,
(unsigned long)ktime_to_ns(event->node.expires));
+ create_hrtimer(event->node.expires);
+ }
+}
- smp_call_function_single(0, create_timer_smp, event, 1);
- }
+/**
+ * setup_timer() : Helper function to setup timer on primary
+ * core during hrtimer callback.
+ * @event: event handle causing the wakeup.
+ */
+static void setup_event_hrtimer(struct event_timer_info *event)
+{
+ smp_call_function_single(0, create_timer_smp, event, 1);
}
/**
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index dae6d3b..c37b518 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -457,7 +457,7 @@
int ipa_teardown_sys_pipe(u32 clnt_hdl);
-#else
+#else /* CONFIG_IPA */
/*
* Connect / Disconnect
diff --git a/arch/arm/mach-msm/include/mach/irqs-8064.h b/arch/arm/mach-msm/include/mach/irqs-8064.h
index 973034b..f4129fe 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8064.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8064.h
@@ -15,15 +15,6 @@
/* MSM ACPU Interrupt Numbers */
-/*
- * 0-15: STI/SGI (software triggered/generated interrupts)
- * 16-31: PPI (private peripheral interrupts)
- * 32+: SPI (shared peripheral interrupts)
- */
-
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
#define INT_VGIC (GIC_PPI_START + 0)
#define INT_DEBUG_TIMER_EXP (GIC_PPI_START + 1)
#define INT_GP_TIMER_EXP (GIC_PPI_START + 2)
diff --git a/arch/arm/mach-msm/include/mach/irqs-8092.h b/arch/arm/mach-msm/include/mach/irqs-8092.h
index dfe21c2..955e669 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8092.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8092.h
@@ -15,24 +15,12 @@
/* MSM ACPU Interrupt Numbers */
-/*
- * 0-15: STI/SGI (software triggered/generated interrupts)
- * 16-31: PPI (private peripheral interrupts)
- * 32+: SPI (shared peripheral interrupts)
- */
-
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
-#define AVS_SVICINT (GIC_PPI_START + 6)
-#define AVS_SVICINTSWDONE (GIC_PPI_START + 7)
#define INT_ARMQC_PERFMON (GIC_PPI_START + 10)
/* PPI 15 is unused */
#define APCC_QGICL2PERFMONIRPTREQ (GIC_SPI_START + 1)
#define SC_SICL2PERFMONIRPTREQ APCC_QGICL2PERFMONIRPTREQ
#define TLMM_MSM_SUMMARY_IRQ (GIC_SPI_START + 208)
-#define SPS_BAM_DMA_IRQ (GIC_SPI_START + 105)
#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-8226.h b/arch/arm/mach-msm/include/mach/irqs-8226.h
deleted file mode 100644
index abc62d2..0000000
--- a/arch/arm/mach-msm/include/mach/irqs-8226.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MSM_IRQS_8226_H
-#define __ASM_ARCH_MSM_IRQS_8226_H
-
-/* MSM ACPU Interrupt Numbers */
-
-/*
- * 0-15: STI/SGI (software triggered/generated interrupts)
- * 16-31: PPI (private peripheral interrupts)
- * 32+: SPI (shared peripheral interrupts)
- */
-
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
-#define INT_ARMQC_PERFMON (GIC_PPI_START + 10)
-/* PPI 15 is unused */
-
-#define APCC_QGICL2PERFMONIRPTREQ (GIC_SPI_START + 1)
-#define SC_SICL2PERFMONIRPTREQ APCC_QGICL2PERFMONIRPTREQ
-#define TLMM_MSM_SUMMARY_IRQ (GIC_SPI_START + 208)
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-8625.h b/arch/arm/mach-msm/include/mach/irqs-8625.h
index f591a9e..a83dd2e 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8625.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8625.h
@@ -13,9 +13,6 @@
#ifndef __ASM_ARCH_MSM_IRQS_8625_H
#define __ASM_ARCH_MSM_IRQS_8625_H
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
#ifdef CONFIG_MSM_FIQ
#define FIQ_START 0
#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-8910.h b/arch/arm/mach-msm/include/mach/irqs-8910.h
deleted file mode 100644
index 635c044..0000000
--- a/arch/arm/mach-msm/include/mach/irqs-8910.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MSM_IRQS_8910_H
-#define __ASM_ARCH_MSM_IRQS_8910_H
-
-/* MSM ACPU Interrupt Numbers */
-
-/*
- * 0-15: STI/SGI (software triggered/generated interrupts)
- * 16-31: PPI (private peripheral interrupts)
- * 32+: SPI (shared peripheral interrupts)
- */
-
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
-#define INT_ARMQC_PERFMON (GIC_PPI_START + 10)
-
-#define APCC_QGICL2PERFMONIRPTREQ (GIC_SPI_START + 1)
-#define SC_SICL2PERFMONIRPTREQ APCC_QGICL2PERFMONIRPTREQ
-#define TLMM_MSM_SUMMARY_IRQ (GIC_SPI_START + 208)
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-8930.h b/arch/arm/mach-msm/include/mach/irqs-8930.h
index bfc32f6..fbde7cb 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8930.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8930.h
@@ -15,13 +15,6 @@
/* MSM ACPU Interrupt Numbers */
-/* 0-15: STI/SGI (software triggered/generated interrupts)
- 16-31: PPI (private peripheral interrupts)
- 32+: SPI (shared peripheral interrupts) */
-
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
#define INT_VGIC (GIC_PPI_START + 0)
#define INT_DEBUG_TIMER_EXP (GIC_PPI_START + 1)
#define INT_GP_TIMER_EXP (GIC_PPI_START + 2)
diff --git a/arch/arm/mach-msm/include/mach/irqs-8960.h b/arch/arm/mach-msm/include/mach/irqs-8960.h
index 012dd74..64be113 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8960.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8960.h
@@ -15,13 +15,6 @@
/* MSM ACPU Interrupt Numbers */
-/* 0-15: STI/SGI (software triggered/generated interrupts)
- 16-31: PPI (private peripheral interrupts)
- 32+: SPI (shared peripheral interrupts) */
-
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
#define INT_VGIC (GIC_PPI_START + 0)
#define INT_DEBUG_TIMER_EXP (GIC_PPI_START + 1)
#define INT_GP_TIMER_EXP (GIC_PPI_START + 2)
diff --git a/arch/arm/mach-msm/include/mach/irqs-8974.h b/arch/arm/mach-msm/include/mach/irqs-8974.h
index 150b2ee..f18b3df 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8974.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8974.h
@@ -15,22 +15,12 @@
/* MSM ACPU Interrupt Numbers */
-/*
- * 0-15: STI/SGI (software triggered/generated interrupts)
- * 16-31: PPI (private peripheral interrupts)
- * 32+: SPI (shared peripheral interrupts)
- */
-
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
#define INT_ARMQC_PERFMON (GIC_PPI_START + 7)
/* PPI 15 is unused */
#define APCC_QGICL2PERFMONIRPTREQ (GIC_SPI_START + 1)
#define SC_SICL2PERFMONIRPTREQ APCC_QGICL2PERFMONIRPTREQ
#define TLMM_MSM_SUMMARY_IRQ (GIC_SPI_START + 208)
-#define SPS_BAM_DMA_IRQ (GIC_SPI_START + 105)
#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-8x60.h b/arch/arm/mach-msm/include/mach/irqs-8x60.h
index c9729f4..d08f645 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8x60.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8x60.h
@@ -16,13 +16,6 @@
/* MSM ACPU Interrupt Numbers */
-/* 0-15: STI/SGI (software triggered/generated interrupts)
- 16-31: PPI (private peripheral interrupts)
- 32+: SPI (shared peripheral interrupts) */
-
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
-
#define INT_DEBUG_TIMER_EXP (GIC_PPI_START + 0)
#define INT_GP_TIMER_EXP (GIC_PPI_START + 1)
#define INT_GP_TIMER2_EXP (GIC_PPI_START + 2)
diff --git a/arch/arm/mach-msm/include/mach/irqs-9615.h b/arch/arm/mach-msm/include/mach/irqs-9615.h
index 39058a6..b9c66c3 100644
--- a/arch/arm/mach-msm/include/mach/irqs-9615.h
+++ b/arch/arm/mach-msm/include/mach/irqs-9615.h
@@ -15,15 +15,7 @@
/* MSM ACPU Interrupt Numbers */
-/*
- * 0-15: STI/SGI (software triggered/generated interrupts)
- * 16-31: PPI (private peripheral interrupts)
- * 32+: SPI (shared peripheral interrupts)
- */
-
#define FIQ_START 16
-#define GIC_PPI_START 16
-#define GIC_SPI_START 32
#define INT_DEBUG_TIMER_EXP (GIC_PPI_START + 1)
#define INT_GP_TIMER_EXP (GIC_PPI_START + 2)
diff --git a/arch/arm/mach-msm/include/mach/irqs-9625.h b/arch/arm/mach-msm/include/mach/irqs-9625.h
index b1f65d1..15c7010 100644
--- a/arch/arm/mach-msm/include/mach/irqs-9625.h
+++ b/arch/arm/mach-msm/include/mach/irqs-9625.h
@@ -15,18 +15,8 @@
/* MSM ACPU Interrupt Numbers */
-/*
- * 0-15: STI/SGI (software triggered/generated interrupts)
- * 16-31: PPI (private peripheral interrupts)
- * 32+: SPI (shared peripheral interrupts)
- */
-
-#define GIC_PPI_START 16
-
#define INT_ARMQC_PERFMON (GIC_PPI_START + 7)
-#define GIC_SPI_START 32
-
#define APCC_QGICL2PERFMONIRPTREQ (GIC_SPI_START + 1)
#define SC_SICL2PERFMONIRPTREQ APCC_QGICL2PERFMONIRPTREQ
#define TLMM_MSM_SUMMARY_IRQ (GIC_SPI_START + 208)
diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h
index 7837c79..f5822fc 100644
--- a/arch/arm/mach-msm/include/mach/irqs.h
+++ b/arch/arm/mach-msm/include/mach/irqs.h
@@ -17,6 +17,14 @@
#ifndef __ASM_ARCH_MSM_IRQS_H
#define __ASM_ARCH_MSM_IRQS_H
+/*
+ * 0-15: STI/SGI (software triggered/generated interrupts)
+ * 16-31: PPI (private peripheral interrupts)
+ * 32+: SPI (shared peripheral interrupts)
+ */
+#define GIC_PPI_START 16
+#define GIC_SPI_START 32
+
#define MSM_IRQ_BIT(irq) (1 << ((irq) & 31))
#if defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MPQ8092)
@@ -37,14 +45,8 @@
#define NR_MSM_GPIOS NR_GPIO_IRQS
#elif defined(CONFIG_ARCH_MSM8910) || defined(CONFIG_ARCH_MSM8226)
-#ifdef CONFIG_ARCH_MSM8910
-#include "irqs-8910.h"
-#endif
-#ifdef CONFIG_ARCH_MSM8226
-#include "irqs-8226.h"
-#endif
-
+#define TLMM_MSM_SUMMARY_IRQ (GIC_SPI_START + 208)
#define NR_MSM_IRQS 256
#define NR_GPIO_IRQS 117
#define NR_QPNP_IRQS 32768
diff --git a/arch/arm/mach-msm/lpm_resources.c b/arch/arm/mach-msm/lpm_resources.c
index 0c3c4da..3421765 100644
--- a/arch/arm/mach-msm/lpm_resources.c
+++ b/arch/arm/mach-msm/lpm_resources.c
@@ -661,8 +661,7 @@
}
msm_lpm_get_rpm_notif = true;
- if (msm_lpm_use_mpm(limits))
- msm_mpm_enter_sleep(sclk_count, from_idle);
+ msm_mpm_enter_sleep(sclk_count, from_idle);
return ret;
}
@@ -670,9 +669,8 @@
void msm_lpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
bool from_idle, bool notify_rpm, bool collapsed)
{
- /* MPM exit sleep
if (msm_lpm_use_mpm(limits))
- msm_mpm_exit_sleep(from_idle);*/
+ msm_mpm_exit_sleep(from_idle);
msm_spm_l2_set_low_power_mode(MSM_SPM_MODE_DISABLED, notify_rpm);
}
diff --git a/arch/arm/mach-msm/msm-buspm-dev.c b/arch/arm/mach-msm/msm-buspm-dev.c
index a818eed..ec0f1bd 100644
--- a/arch/arm/mach-msm/msm-buspm-dev.c
+++ b/arch/arm/mach-msm/msm-buspm-dev.c
@@ -22,10 +22,17 @@
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/memory_alloc.h>
+#include <mach/rpm-smd.h>
#include "msm-buspm-dev.h"
#define MSM_BUSPM_DRV_NAME "msm-buspm-dev"
+enum msm_buspm_spdm_res {
+ SPDM_RES_ID = 0,
+ SPDM_RES_TYPE = 0x63707362,
+ SPDM_KEY = 0x00006e65,
+ SPDM_SIZE = 4,
+};
/*
* Allocate kernel buffer.
* Currently limited to one buffer per file descriptor. If alloc() is
@@ -113,6 +120,61 @@
return 0;
}
+static int msm_bus_rpm_req(u32 rsc_type, u32 key, u32 hwid,
+ int ctx, u32 val)
+{
+ struct msm_rpm_request *rpm_req;
+ int ret, msg_id;
+
+ rpm_req = msm_rpm_create_request(ctx, rsc_type, SPDM_RES_ID, 1);
+ if (rpm_req == NULL) {
+ pr_err("RPM: Couldn't create RPM Request\n");
+ return -ENXIO;
+ }
+
+ ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)&val,
+ (int)(sizeof(uint32_t)));
+ if (ret) {
+ pr_err("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ goto err;
+ }
+
+ pr_debug("Added Key: %d, Val: %u, size: %d\n", key,
+ (uint32_t)val, sizeof(uint32_t));
+ msg_id = msm_rpm_send_request(rpm_req);
+ if (!msg_id) {
+ pr_err("RPM: No message ID for req\n");
+ ret = -ENXIO;
+ goto err;
+ }
+
+ ret = msm_rpm_wait_for_ack(msg_id);
+ if (ret) {
+ pr_err("RPM: Ack failed\n");
+ goto err;
+ }
+
+err:
+ msm_rpm_free_request(rpm_req);
+ return ret;
+}
+
+static int msm_buspm_ioc_cmds(uint32_t arg)
+{
+ switch (arg) {
+ case MSM_BUSPM_SPDM_CLK_DIS:
+ case MSM_BUSPM_SPDM_CLK_EN:
+ return msm_bus_rpm_req(SPDM_RES_TYPE, SPDM_KEY, 0,
+ MSM_RPM_CTX_ACTIVE_SET, arg);
+ default:
+ pr_warn("Unsupported ioctl command: %d\n", arg);
+ return -EINVAL;
+ }
+}
+
+
+
static long
msm_buspm_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
@@ -124,6 +186,11 @@
unsigned int buflen = msm_buspm_dev_get_buflen(filp);
unsigned char *dbgbuf = buf;
+ if (_IOC_TYPE(cmd) != MSM_BUSPM_IOC_MAGIC) {
+ pr_err("Wrong IOC_MAGIC.Exiting\n");
+ return -ENOTTY;
+ }
+
switch (cmd) {
case MSM_BUSPM_IOC_FREE:
pr_debug("cmd = 0x%x (FREE)\n", cmd);
@@ -193,6 +260,11 @@
}
break;
+ case MSM_BUSPM_IOC_CMD:
+ pr_debug("IOCTL command: cmd: %d arg: %lu\n", cmd, arg);
+ retval = msm_buspm_ioc_cmds(arg);
+ break;
+
default:
pr_debug("Unknown command 0x%x\n", cmd);
retval = -EINVAL;
diff --git a/arch/arm/mach-msm/msm-buspm-dev.h b/arch/arm/mach-msm/msm-buspm-dev.h
index 5839087..854626d 100644
--- a/arch/arm/mach-msm/msm-buspm-dev.h
+++ b/arch/arm/mach-msm/msm-buspm-dev.h
@@ -31,6 +31,11 @@
int size;
};
+enum msm_buspm_ioc_cmds {
+ MSM_BUSPM_SPDM_CLK_DIS = 0,
+ MSM_BUSPM_SPDM_CLK_EN,
+};
+
#define MSM_BUSPM_IOC_MAGIC 'p'
#define MSM_BUSPM_IOC_FREE \
@@ -47,4 +52,7 @@
#define MSM_BUSPM_IOC_RD_PHYS_ADDR \
_IOR(MSM_BUSPM_IOC_MAGIC, 4, unsigned long)
+
+#define MSM_BUSPM_IOC_CMD \
+ _IOR(MSM_BUSPM_IOC_MAGIC, 5, uint32_t)
#endif
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
index 70bb406..d079e77 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
@@ -447,10 +447,8 @@
static int mport_mdp1[] = {MSM_BUS_MASTER_PORT_MDP_PORT1,};
static int mport_rotator[] = {MSM_BUS_MASTER_PORT_ROTATOR,};
static int mport_graphics_3d[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D,};
-static int pro_mport_graphics_3d[] = {
- MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0,
- MSM_BUS_MASTER_PORT_GRAPHICS_3D,
-};
+static int pro_mport_graphics_3d[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0,};
+static int pro_mport_graphics_3d_p1[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D,};
static int mport_jpeg_dec[] = {MSM_BUS_MASTER_PORT_JPEG_DEC,};
static int mport_graphics_2d_core0[] = {MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE0,};
static int mport_vfe[] = {MSM_BUS_MASTER_PORT_VFE,};
@@ -627,6 +625,13 @@
.num_tiers = ARRAY_SIZE(tier2),
},
{
+ .id = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
+ .masterp = pro_mport_graphics_3d_p1,
+ .num_mports = ARRAY_SIZE(pro_mport_graphics_3d_p1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
.id = MSM_BUS_MASTER_JPEG_DEC,
.masterp = mport_jpeg_dec,
.num_mports = ARRAY_SIZE(mport_jpeg_dec),
diff --git a/arch/arm/mach-msm/msm_mpdecision.c b/arch/arm/mach-msm/msm_mpdecision.c
index 94b546a..910804b 100644
--- a/arch/arm/mach-msm/msm_mpdecision.c
+++ b/arch/arm/mach-msm/msm_mpdecision.c
@@ -360,6 +360,7 @@
int cpu;
while (1) {
+ msm_dcvs_update_algo_params();
wait_event(msm_mpd.wait_hpq, *event || kthread_should_stop());
if (kthread_should_stop())
break;
@@ -392,7 +393,6 @@
}
msm_mpd.hpupdate = HPUPDATE_WAITING;
msm_dcvs_apply_gpu_floor(0);
- msm_dcvs_update_algo_params();
}
return 0;
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index e3a3563..cb8d756 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -33,6 +33,7 @@
#include <asm/setup.h>
#include "peripheral-loader.h"
+#include "ramdump.h"
#define pil_err(desc, fmt, ...) \
dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
@@ -112,6 +113,42 @@
struct ion_handle *region;
};
+/**
+ * pil_do_ramdump() - Ramdump an image
+ * @desc: descriptor from pil_desc_init()
+ * @ramdump_dev: ramdump device returned from create_ramdump_device()
+ *
+ * Calls the ramdump API with a list of segments generated from the addresses
+ * that the descriptor corresponds to.
+ */
+int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+ struct pil_priv *priv = desc->priv;
+ struct pil_seg *seg;
+ int count = 0, ret;
+ struct ramdump_segment *ramdump_segs, *s;
+
+ list_for_each_entry(seg, &priv->segs, list)
+ count++;
+
+ ramdump_segs = kmalloc_array(count, sizeof(*ramdump_segs), GFP_KERNEL);
+ if (!ramdump_segs)
+ return -ENOMEM;
+
+ s = ramdump_segs;
+ list_for_each_entry(seg, &priv->segs, list) {
+ s->address = seg->paddr;
+ s->size = seg->sz;
+ s++;
+ }
+
+ ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
+ kfree(ramdump_segs);
+
+ return ret;
+}
+EXPORT_SYMBOL(pil_do_ramdump);
+
static struct ion_client *ion;
/**
diff --git a/arch/arm/mach-msm/peripheral-loader.h b/arch/arm/mach-msm/peripheral-loader.h
index 1c2faf7..8442289 100644
--- a/arch/arm/mach-msm/peripheral-loader.h
+++ b/arch/arm/mach-msm/peripheral-loader.h
@@ -65,6 +65,7 @@
extern void pil_shutdown(struct pil_desc *desc);
extern void pil_desc_release(struct pil_desc *desc);
extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev);
#else
static inline int pil_desc_init(struct pil_desc *desc) { return 0; }
static inline int pil_boot(struct pil_desc *desc) { return 0; }
@@ -74,6 +75,10 @@
{
return 0;
}
+static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+ return 0;
+}
#endif
#endif
diff --git a/arch/arm/mach-msm/pil-dsps.c b/arch/arm/mach-msm/pil-dsps.c
index 519e1c9..d315d82 100644
--- a/arch/arm/mach-msm/pil-dsps.c
+++ b/arch/arm/mach-msm/pil-dsps.c
@@ -48,7 +48,6 @@
void __iomem *ppss_base;
void *ramdump_dev;
- struct ramdump_segment fw_ramdump_segments[4];
void *smem_ramdump_dev;
struct ramdump_segment smem_ramdump_segments[1];
@@ -212,16 +211,13 @@
if (!enable)
return 0;
- ret = do_ramdump(drv->ramdump_dev,
- drv->fw_ramdump_segments,
- ARRAY_SIZE(drv->fw_ramdump_segments));
+ ret = pil_do_ramdump(&drv->desc, drv->ramdump_dev);
if (ret < 0) {
pr_err("%s: Unable to dump DSPS memory (rc = %d).\n",
__func__, ret);
return ret;
}
- ret = do_ramdump(drv->smem_ramdump_dev,
- drv->smem_ramdump_segments,
+ ret = do_elf_ramdump(drv->smem_ramdump_dev, drv->smem_ramdump_segments,
ARRAY_SIZE(drv->smem_ramdump_segments));
if (ret < 0) {
pr_err("%s: Unable to dump smem memory (rc = %d).\n",
@@ -293,14 +289,6 @@
if (ret)
return ret;
- drv->fw_ramdump_segments[0].address = 0x12000000;
- drv->fw_ramdump_segments[0].size = 0x28000;
- drv->fw_ramdump_segments[1].address = 0x12040000;
- drv->fw_ramdump_segments[1].size = 0x4000;
- drv->fw_ramdump_segments[2].address = 0x12800000;
- drv->fw_ramdump_segments[2].size = 0x4000;
- drv->fw_ramdump_segments[3].address = 0x8fe00000;
- drv->fw_ramdump_segments[3].size = 0x100000;
drv->ramdump_dev = create_ramdump_device("dsps", &pdev->dev);
if (!drv->ramdump_dev) {
ret = -ENOMEM;
diff --git a/arch/arm/mach-msm/pil-gss.c b/arch/arm/mach-msm/pil-gss.c
index a6d13d0..f4d4449 100644
--- a/arch/arm/mach-msm/pil-gss.c
+++ b/arch/arm/mach-msm/pil-gss.c
@@ -404,11 +404,6 @@
smsm_reset_modem(SMSM_RESET);
}
-/* FIXME: Get address, size from PIL */
-static struct ramdump_segment gss_segments[] = {
- {0x89000000, 0x00D00000}
-};
-
static struct ramdump_segment smem_segments[] = {
{0x80000000, 0x00200000},
};
@@ -418,20 +413,20 @@
int ret;
struct gss_data *drv = container_of(desc, struct gss_data, subsys_desc);
- if (enable) {
- ret = do_ramdump(drv->ramdump_dev, gss_segments,
- ARRAY_SIZE(gss_segments));
- if (ret < 0) {
- pr_err("Unable to dump gss memory\n");
- return ret;
- }
+ if (!enable)
+ return 0;
- ret = do_ramdump(drv->smem_ramdump_dev, smem_segments,
- ARRAY_SIZE(smem_segments));
- if (ret < 0) {
- pr_err("Unable to dump smem memory (rc = %d).\n", ret);
- return ret;
- }
+ ret = pil_do_ramdump(&drv->pil_desc, drv->ramdump_dev);
+ if (ret < 0) {
+ pr_err("Unable to dump gss memory\n");
+ return ret;
+ }
+
+ ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments,
+ ARRAY_SIZE(smem_segments));
+ if (ret < 0) {
+ pr_err("Unable to dump smem memory (rc = %d).\n", ret);
+ return ret;
}
return 0;
diff --git a/arch/arm/mach-msm/pil-modem.c b/arch/arm/mach-msm/pil-modem.c
index d3c832b..3546705 100644
--- a/arch/arm/mach-msm/pil-modem.c
+++ b/arch/arm/mach-msm/pil-modem.c
@@ -393,21 +393,15 @@
return ret;
}
-/* FIXME: Get address, size from PIL */
-static struct ramdump_segment modem_segments[] = {
- { 0x42F00000, 0x46000000 - 0x42F00000 },
-};
-
static int modem_ramdump(int enable, const struct subsys_desc *subsys)
{
struct modem_data *drv;
drv = container_of(subsys, struct modem_data, subsys_desc);
- if (enable)
- return do_ramdump(drv->ramdump_dev, modem_segments,
- ARRAY_SIZE(modem_segments));
- else
+ if (!enable)
return 0;
+
+ return pil_do_ramdump(&drv->pil_desc, drv->ramdump_dev);
}
static int __devinit pil_modem_driver_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index 162a7f7..b457599 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -385,19 +385,14 @@
smsm_change_state(SMSM_APPS_STATE, SMSM_RESET, SMSM_RESET);
}
-static struct ramdump_segment pronto_segments[] = {
- { 0x0D200000, 0x0D980000 - 0x0D200000 }
-};
-
static int wcnss_ramdump(int enable, const struct subsys_desc *subsys)
{
struct pronto_data *drv = subsys_to_drv(subsys);
- if (enable)
- return do_ramdump(drv->ramdump_dev, pronto_segments,
- ARRAY_SIZE(pronto_segments));
- else
+ if (!enable)
return 0;
+
+ return pil_do_ramdump(&drv->desc, drv->ramdump_dev);
}
static int __devinit pil_pronto_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/pil-q6v3.c b/arch/arm/mach-msm/pil-q6v3.c
index d7e712c..1f53f17 100644
--- a/arch/arm/mach-msm/pil-q6v3.c
+++ b/arch/arm/mach-msm/pil-q6v3.c
@@ -279,22 +279,15 @@
return ret;
}
-/* FIXME: Get address, size from PIL */
-static struct ramdump_segment q6_segments[] = {
- { 0x46700000, 0x47f00000 - 0x46700000 },
- { 0x28400000, 0x12800 }
-};
-
static int lpass_q6_ramdump(int enable, const struct subsys_desc *subsys)
{
struct q6v3_data *drv;
drv = container_of(subsys, struct q6v3_data, subsys_desc);
- if (enable)
- return do_ramdump(drv->ramdump_dev, q6_segments,
- ARRAY_SIZE(q6_segments));
- else
+ if (!enable)
return 0;
+
+ return pil_do_ramdump(&drv->pil_desc, drv->ramdump_dev);
}
static void lpass_q6_crash_shutdown(const struct subsys_desc *subsys)
diff --git a/arch/arm/mach-msm/pil-q6v4-lpass.c b/arch/arm/mach-msm/pil-q6v4-lpass.c
index 1e6c1f6..1387433 100644
--- a/arch/arm/mach-msm/pil-q6v4-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v4-lpass.c
@@ -231,18 +231,14 @@
return ret;
}
-static struct ramdump_segment segments[] = {
- {0x8da00000, 0x8f200000 - 0x8da00000},
- {0x28400000, 0x20000}
-};
-
static int lpass_ramdump(int enable, const struct subsys_desc *subsys)
{
struct lpass_q6v4 *drv = subsys_to_lpass(subsys);
if (!enable)
return 0;
- return do_ramdump(drv->ramdump_dev, segments, ARRAY_SIZE(segments));
+
+ return pil_do_ramdump(&drv->q6.desc, drv->ramdump_dev);
}
static void lpass_crash_shutdown(const struct subsys_desc *subsys)
diff --git a/arch/arm/mach-msm/pil-q6v4-mss.c b/arch/arm/mach-msm/pil-q6v4-mss.c
index ee01f04..f2b090f 100644
--- a/arch/arm/mach-msm/pil-q6v4-mss.c
+++ b/arch/arm/mach-msm/pil-q6v4-mss.c
@@ -243,14 +243,6 @@
smsm_reset_modem(SMSM_RESET);
}
-static struct ramdump_segment sw_segments[] = {
- {0x89000000, 0x8D400000 - 0x89000000},
-};
-
-static struct ramdump_segment fw_segments[] = {
- {0x8D400000, 0x8DA00000 - 0x8D400000},
-};
-
static struct ramdump_segment smem_segments[] = {
{0x80000000, 0x00200000},
};
@@ -263,17 +255,15 @@
if (!enable)
return 0;
- ret = do_ramdump(drv->sw_ramdump_dev, sw_segments,
- ARRAY_SIZE(sw_segments));
+ ret = pil_do_ramdump(&drv->q6_sw.desc, drv->sw_ramdump_dev);
if (ret < 0)
return ret;
- ret = do_ramdump(drv->fw_ramdump_dev, fw_segments,
- ARRAY_SIZE(fw_segments));
+ ret = pil_do_ramdump(&drv->q6_fw.desc, drv->fw_ramdump_dev);
if (ret < 0)
return ret;
- ret = do_ramdump(drv->smem_ramdump_dev, smem_segments,
+ ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments,
ARRAY_SIZE(smem_segments));
if (ret < 0)
return ret;
diff --git a/arch/arm/mach-msm/pil-q6v5-lpass.c b/arch/arm/mach-msm/pil-q6v5-lpass.c
index 662377d..94632da 100644
--- a/arch/arm/mach-msm/pil-q6v5-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v5-lpass.c
@@ -307,15 +307,14 @@
return ret;
}
-static struct ramdump_segment segments = { 0xdc00000, 0x1800000 };
-
static int adsp_ramdump(int enable, const struct subsys_desc *subsys)
{
struct lpass_data *drv = subsys_to_lpass(subsys);
if (!enable)
return 0;
- return do_ramdump(drv->ramdump_dev, &segments, 1);
+
+ return pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
}
static void adsp_crash_shutdown(const struct subsys_desc *subsys)
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index 07cbe19..ed85c95 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -56,6 +56,9 @@
#define RMB_PMI_CODE_START 0x14
#define RMB_PMI_CODE_LENGTH 0x18
+#define VDD_MSS_UV 1050000
+#define MAX_VDD_MX_UV 1050000
+
#define PROXY_TIMEOUT_MS 10000
#define POLL_INTERVAL_US 50
@@ -99,7 +102,7 @@
ret = regulator_enable(drv->vreg);
if (ret)
- dev_err(dev, "Failed to enable regulator.\n");
+ dev_err(dev, "Failed to enable modem regulator.\n");
return ret;
}
@@ -264,9 +267,44 @@
return ret;
}
+static int pil_q6v5_mss_make_proxy_votes(struct pil_desc *pil)
+{
+ int ret;
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+ ret = regulator_set_voltage(drv->vreg_mx, VDD_MSS_UV, MAX_VDD_MX_UV);
+ if (ret) {
+ dev_err(pil->dev, "Failed to request vreg_mx voltage\n");
+ return ret;
+ }
+
+ ret = regulator_enable(drv->vreg_mx);
+ if (ret) {
+ dev_err(pil->dev, "Failed to enable vreg_mx\n");
+ regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
+ return ret;
+ }
+
+ ret = pil_q6v5_make_proxy_votes(pil);
+ if (ret) {
+ regulator_disable(drv->vreg_mx);
+ regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
+ }
+
+ return ret;
+}
+
+static void pil_q6v5_mss_remove_proxy_votes(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ pil_q6v5_remove_proxy_votes(pil);
+ regulator_disable(drv->vreg_mx);
+ regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
+}
+
static struct pil_reset_ops pil_mss_ops = {
- .proxy_vote = pil_q6v5_make_proxy_votes,
- .proxy_unvote = pil_q6v5_remove_proxy_votes,
+ .proxy_vote = pil_q6v5_mss_make_proxy_votes,
+ .proxy_unvote = pil_q6v5_mss_remove_proxy_votes,
.auth_and_reset = pil_mss_reset,
.shutdown = pil_mss_shutdown,
};
@@ -458,10 +496,6 @@
smsm_reset_modem(SMSM_RESET);
}
-static struct ramdump_segment modem_segments[] = {
- {0x08400000, 0x0D100000 - 0x08400000},
-};
-
static struct ramdump_segment smem_segments[] = {
{0x0FA00000, 0x0FC00000 - 0x0FA00000},
};
@@ -478,14 +512,13 @@
if (ret)
return ret;
- ret = do_ramdump(drv->ramdump_dev, modem_segments,
- ARRAY_SIZE(modem_segments));
+ ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
if (ret < 0) {
pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
goto out;
}
- ret = do_ramdump(drv->smem_ramdump_dev, smem_segments,
+ ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments,
ARRAY_SIZE(smem_segments));
if (ret < 0) {
pr_err("Unable to dump smem memory (rc = %d).\n", ret);
@@ -674,7 +707,11 @@
if (IS_ERR(q6->vreg))
return PTR_ERR(q6->vreg);
- ret = regulator_set_voltage(q6->vreg, 1050000, 1050000);
+ q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
+ if (IS_ERR(q6->vreg_mx))
+ return PTR_ERR(q6->vreg_mx);
+
+ ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV, VDD_MSS_UV);
if (ret)
dev_err(&pdev->dev, "Failed to set regulator's voltage.\n");
diff --git a/arch/arm/mach-msm/pil-q6v5.h b/arch/arm/mach-msm/pil-q6v5.h
index ecdaf9b..d9ad6ae 100644
--- a/arch/arm/mach-msm/pil-q6v5.h
+++ b/arch/arm/mach-msm/pil-q6v5.h
@@ -31,6 +31,7 @@
void __iomem *axi_halt_base;
void __iomem *restart_reg;
struct regulator *vreg;
+ struct regulator *vreg_mx;
bool is_booted;
struct pil_desc desc;
};
diff --git a/arch/arm/mach-msm/pil-riva.c b/arch/arm/mach-msm/pil-riva.c
index 74fae98..96b9882 100644
--- a/arch/arm/mach-msm/pil-riva.c
+++ b/arch/arm/mach-msm/pil-riva.c
@@ -412,26 +412,16 @@
return ret;
}
-/*
- * 7MB RAM segments for Riva SS;
- * Riva 1.1 0x8f000000 - 0x8f700000
- * Riva 1.0 0x8f200000 - 0x8f700000
- */
-static struct ramdump_segment riva_segments[] = {
- {0x8f000000, 0x8f700000 - 0x8f000000}
-};
-
static int riva_ramdump(int enable, const struct subsys_desc *desc)
{
struct riva_data *drv;
drv = container_of(desc, struct riva_data, subsys_desc);
- if (enable)
- return do_ramdump(drv->ramdump_dev, riva_segments,
- ARRAY_SIZE(riva_segments));
- else
+ if (!enable)
return 0;
+
+ return pil_do_ramdump(&drv->pil_desc, drv->ramdump_dev);
}
/* Riva crash handler */
diff --git a/arch/arm/mach-msm/pil-venus.c b/arch/arm/mach-msm/pil-venus.c
index 47799cc..103fd9f 100644
--- a/arch/arm/mach-msm/pil-venus.c
+++ b/arch/arm/mach-msm/pil-venus.c
@@ -28,6 +28,8 @@
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/subsystem_restart.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
#include "peripheral-loader.h"
#include "scm-pas.h"
@@ -77,6 +79,7 @@
u32 fw_sz;
u32 fw_min_paddr;
u32 fw_max_paddr;
+ u32 bus_perf_client;
};
#define subsys_to_drv(d) container_of(d, struct venus_data, subsys_desc)
@@ -147,6 +150,41 @@
clk_disable_unprepare(drv->clks[i]);
}
+static struct msm_bus_vectors pil_venus_unvote_bw_vector[] = {
+ {
+ .src = MSM_BUS_MASTER_VIDEO_P0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors pil_venus_vote_bw_vector[] = {
+ {
+ .src = MSM_BUS_MASTER_VIDEO_P0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 16 * 19 * 1000000UL, /* At least 19.2MHz on bus. */
+ },
+};
+
+static struct msm_bus_paths pil_venus_bw_tbl[] = {
+ {
+ .num_paths = ARRAY_SIZE(pil_venus_unvote_bw_vector),
+ .vectors = pil_venus_unvote_bw_vector,
+ },
+ {
+ .num_paths = ARRAY_SIZE(pil_venus_vote_bw_vector),
+ .vectors = pil_venus_vote_bw_vector,
+ },
+};
+
+static struct msm_bus_scale_pdata pil_venus_client_pdata = {
+ .usecase = pil_venus_bw_tbl,
+ .num_usecases = ARRAY_SIZE(pil_venus_bw_tbl),
+ .name = "pil-venus",
+};
+
static int pil_venus_make_proxy_vote(struct pil_desc *pil)
{
struct venus_data *drv = dev_get_drvdata(pil->dev);
@@ -161,13 +199,28 @@
rc = regulator_enable(drv->gdsc);
if (rc) {
dev_err(pil->dev, "GDSC enable failed\n");
- return rc;
+ goto err_regulator;
}
rc = venus_clock_prepare_enable(pil->dev);
- if (rc)
- regulator_disable(drv->gdsc);
+ if (rc) {
+ dev_err(pil->dev, "clock prepare and enable failed\n");
+ goto err_clock;
+ }
+ rc = msm_bus_scale_client_update_request(drv->bus_perf_client, 1);
+ if (rc) {
+ dev_err(pil->dev, "bandwith request failed\n");
+ goto err_bw;
+ }
+
+ return 0;
+
+err_bw:
+ venus_clock_disable_unprepare(pil->dev);
+err_clock:
+ regulator_disable(drv->gdsc);
+err_regulator:
return rc;
}
@@ -175,6 +228,8 @@
{
struct venus_data *drv = dev_get_drvdata(pil->dev);
+ msm_bus_scale_client_update_request(drv->bus_perf_client, 0);
+
venus_clock_disable_unprepare(pil->dev);
/* Disable GDSC */
@@ -438,6 +493,13 @@
if (rc)
return rc;
+ drv->bus_perf_client =
+ msm_bus_scale_register_client(&pil_venus_client_pdata);
+ if (!drv->bus_perf_client) {
+ dev_err(&pdev->dev, "Failed to register bus client\n");
+ return -EINVAL;
+ }
+
drv->iommu_fw_ctx = msm_iommu_get_ctx("venus_fw");
if (!drv->iommu_fw_ctx) {
dev_err(&pdev->dev, "No iommu fw context found\n");
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
index a4a6b906..ad5f1b5 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
@@ -25,6 +25,9 @@
#include <asm/ioctls.h>
#include <linux/debugfs.h>
#include "audio_utils_aio.h"
+#ifdef CONFIG_USE_DEV_CTRL_VOLUME
+#include <mach/qdsp6v2/audio_dev_ctl.h>
+#endif /*CONFIG_USE_DEV_CTRL_VOLUME*/
#ifdef CONFIG_DEBUG_FS
ssize_t audio_aio_debug_open(struct inode *inode, struct file *file)
@@ -424,6 +427,67 @@
}
}
+#ifdef CONFIG_USE_DEV_CTRL_VOLUME
+
+static void audio_aio_listner(u32 evt_id, union auddev_evt_data *evt_payload,
+ void *private_data)
+{
+ struct q6audio_aio *audio = (struct q6audio_aio *) private_data;
+ int rc = 0;
+
+ switch (evt_id) {
+ case AUDDEV_EVT_STREAM_VOL_CHG:
+ audio->volume = evt_payload->session_vol;
+ pr_debug("%s[%p]: AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d, enabled = %d\n",
+ __func__, audio, audio->volume, audio->enabled);
+ if (audio->enabled == 1) {
+ if (audio->ac) {
+ rc = q6asm_set_volume(audio->ac, audio->volume);
+ if (rc < 0) {
+ pr_err("%s[%p]: Send Volume command failed rc=%d\n",
+ __func__, audio, rc);
+ }
+ }
+ }
+ break;
+ default:
+ pr_err("%s[%p]:ERROR:wrong event\n", __func__, audio);
+ break;
+ }
+}
+
+int register_volume_listener(struct q6audio_aio *audio)
+{
+ int rc = 0;
+ audio->device_events = AUDDEV_EVT_STREAM_VOL_CHG;
+ audio->drv_status &= ~ADRV_STATUS_PAUSE;
+
+ rc = auddev_register_evt_listner(audio->device_events,
+ AUDDEV_CLNT_DEC,
+ audio->ac->session,
+ audio_aio_listner,
+ (void *)audio);
+ if (rc < 0) {
+ pr_err("%s[%p]: Event listener failed\n", __func__, audio);
+ rc = -EACCES;
+ }
+ return rc;
+}
+void unregister_volume_listener(struct q6audio_aio *audio)
+{
+ auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->ac->session);
+}
+#else /*CONFIG_USE_DEV_CTRL_VOLUME*/
+int register_volume_listener(struct q6audio_aio *audio)
+{
+ return 0;/* do nothing */
+}
+void unregister_volume_listener(struct q6audio_aio *audio)
+{
+ return;/* do nothing */
+}
+#endif /*CONFIG_USE_DEV_CTRL_VOLUME*/
+
int audio_aio_release(struct inode *inode, struct file *file)
{
struct q6audio_aio *audio = file->private_data;
@@ -448,6 +512,8 @@
mutex_destroy(&audio->read_lock);
mutex_destroy(&audio->write_lock);
mutex_destroy(&audio->get_event_lock);
+ unregister_volume_listener(audio);
+
#ifdef CONFIG_DEBUG_FS
if (audio->dentry)
debugfs_remove(audio->dentry);
@@ -1049,7 +1115,13 @@
goto fail;
}
pr_debug("Ion client create in audio_aio_open %p", audio->client);
+
+ rc = register_volume_listener(audio);
+ if (rc < 0)
+ goto fail;
+
return 0;
+
fail:
q6asm_audio_client_free(audio->ac);
kfree(audio->codec_cfg);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
index dedf991..d518254 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
@@ -1,6 +1,6 @@
/* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -176,7 +176,8 @@
struct ion_client *client;
struct audio_aio_drv_operations drv_ops;
union msm_audio_event_payload eos_write_payload;
-
+ uint32_t device_events;
+ uint16_t volume;
uint32_t drv_status;
int event_abort;
int eos_rsp;
diff --git a/arch/arm/mach-msm/qdsp6v2/pcm_in_proxy.c b/arch/arm/mach-msm/qdsp6v2/pcm_in_proxy.c
index 84f136a..5faee21 100644
--- a/arch/arm/mach-msm/qdsp6v2/pcm_in_proxy.c
+++ b/arch/arm/mach-msm/qdsp6v2/pcm_in_proxy.c
@@ -72,31 +72,36 @@
if (pcm->start) {
if (pcm->dsp_idx == pcm->buffer_count)
pcm->dsp_idx = 0;
- rc = wait_event_timeout(pcm->wait,
- (pcm->dma_buf[pcm->dsp_idx].used == 0) ||
- atomic_read(&pcm->in_stopped), 1 * HZ);
- if (!rc) {
- pr_err("%s: wait_event_timeout failed\n", __func__);
- goto fail;
+ if (pcm->dma_buf[pcm->dsp_idx].used == 0) {
+ if (atomic_read(&pcm->in_stopped)) {
+ pr_err("%s: Driver closed - return\n",
+ __func__);
+ return HRTIMER_NORESTART;
+ }
+ rc = afe_rt_proxy_port_read(
+ pcm->dma_buf[pcm->dsp_idx].addr,
+ pcm->buffer_size);
+ if (rc < 0) {
+ pr_err("%s afe_rt_proxy_port_read fail\n",
+ __func__);
+ goto fail;
+ }
+ pcm->dma_buf[pcm->dsp_idx].used = 1;
+ pcm->dsp_idx++;
+ pr_debug("sending frame rec to DSP: poll_time: %d\n",
+ pcm->poll_time);
+ } else {
+ pr_err("Qcom: Used flag not reset retry after %d msec\n",
+ (pcm->poll_time/10));
+ goto fail_timer;
}
- if (atomic_read(&pcm->in_stopped)) {
- pr_err("%s: Driver closed - return\n", __func__);
- return HRTIMER_NORESTART;
- }
- rc = afe_rt_proxy_port_read(
- pcm->dma_buf[pcm->dsp_idx].addr,
- pcm->buffer_size);
- if (rc < 0) {
- pr_err("%s afe_rt_proxy_port_read fail\n", __func__);
- goto fail;
- }
- pcm->dma_buf[pcm->dsp_idx].used = 1;
- pcm->dsp_idx++;
- pr_debug("%s: sending frame rec to DSP: poll_time: %d\n",
- __func__, pcm->poll_time);
fail:
hrtimer_forward_now(hrt, ns_to_ktime(pcm->poll_time
* 1000));
+ return HRTIMER_RESTART;
+fail_timer:
+ hrtimer_forward_now(hrt, ns_to_ktime((pcm->poll_time/10)
+ * 1000));
return HRTIMER_RESTART;
} else {
diff --git a/arch/arm/mach-msm/ramdump.c b/arch/arm/mach-msm/ramdump.c
index e33ec48..689c4bb 100644
--- a/arch/arm/mach-msm/ramdump.c
+++ b/arch/arm/mach-msm/ramdump.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
+#include <linux/elf.h>
#include <asm-generic/poll.h>
@@ -46,6 +47,8 @@
wait_queue_head_t dump_wait_q;
int nsegments;
struct ramdump_segment *segments;
+ size_t elfcore_size;
+ char *elfcore_buf;
};
static int ramdump_open(struct inode *inode, struct file *filep)
@@ -107,13 +110,29 @@
size_t copy_size = 0;
int ret = 0;
- if (rd_dev->data_ready == 0) {
- pr_err("Ramdump(%s): Read when there's no dump available!",
- rd_dev->name);
- return -EPIPE;
+ if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
+ if (ret)
+ return ret;
+
+ if (*pos < rd_dev->elfcore_size) {
+ copy_size = min(rd_dev->elfcore_size, count);
+
+ if (copy_to_user(buf, rd_dev->elfcore_buf, copy_size)) {
+ ret = -EFAULT;
+ goto ramdump_done;
+ }
+ *pos += copy_size;
+ count -= copy_size;
+ buf += copy_size;
+ if (count == 0)
+ return copy_size;
}
- addr = offset_translate(*pos, rd_dev, &data_left);
+ addr = offset_translate(*pos - rd_dev->elfcore_size, rd_dev,
+ &data_left);
/* EOF check */
if (data_left == 0) {
@@ -234,11 +253,14 @@
kfree(rd_dev);
}
-int do_ramdump(void *handle, struct ramdump_segment *segments,
- int nsegments)
+static int _do_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments, bool use_elf)
{
int ret, i;
struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+ Elf32_Phdr *phdr;
+ Elf32_Ehdr *ehdr;
+ unsigned long offset;
if (!rd_dev->consumer_present) {
pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
@@ -251,6 +273,38 @@
rd_dev->segments = segments;
rd_dev->nsegments = nsegments;
+ if (use_elf) {
+ rd_dev->elfcore_size = sizeof(*ehdr) +
+ sizeof(*phdr) * nsegments;
+ ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
+ rd_dev->elfcore_buf = (char *)ehdr;
+ if (!rd_dev->elfcore_buf)
+ return -ENOMEM;
+
+ memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
+ ehdr->e_ident[EI_CLASS] = ELFCLASS32;
+ ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
+ ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+ ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+ ehdr->e_type = ET_CORE;
+ ehdr->e_version = EV_CURRENT;
+ ehdr->e_phoff = sizeof(*ehdr);
+ ehdr->e_ehsize = sizeof(*ehdr);
+ ehdr->e_phentsize = sizeof(*phdr);
+ ehdr->e_phnum = nsegments;
+
+ offset = rd_dev->elfcore_size;
+ phdr = (Elf32_Phdr *)(ehdr + 1);
+ for (i = 0; i < nsegments; i++, phdr++) {
+ phdr->p_type = PT_LOAD;
+ phdr->p_offset = offset;
+ phdr->p_vaddr = phdr->p_paddr = segments[i].address;
+ phdr->p_filesz = phdr->p_memsz = segments[i].size;
+ phdr->p_flags = PF_R | PF_W | PF_X;
+ offset += phdr->p_filesz;
+ }
+ }
+
rd_dev->data_ready = 1;
rd_dev->ramdump_status = -1;
@@ -271,5 +325,20 @@
ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
rd_dev->data_ready = 0;
+ rd_dev->elfcore_size = 0;
+ kfree(rd_dev->elfcore_buf);
+ rd_dev->elfcore_buf = NULL;
return ret;
+
+}
+
+int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+ return _do_ramdump(handle, segments, nsegments, false);
+}
+
+int
+do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+ return _do_ramdump(handle, segments, nsegments, true);
}
diff --git a/arch/arm/mach-msm/ramdump.h b/arch/arm/mach-msm/ramdump.h
index 3e5bfaf..5fb41ec 100644
--- a/arch/arm/mach-msm/ramdump.h
+++ b/arch/arm/mach-msm/ramdump.h
@@ -24,5 +24,7 @@
void destroy_ramdump_device(void *dev);
int do_ramdump(void *handle, struct ramdump_segment *segments,
int nsegments);
+int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments);
#endif
diff --git a/arch/arm/mach-msm/smcmod.c b/arch/arm/mach-msm/smcmod.c
new file mode 100644
index 0000000..705bab5
--- /dev/null
+++ b/arch/arm/mach-msm/smcmod.c
@@ -0,0 +1,727 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define KMSG_COMPONENT "SMCMOD"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/msm_ion.h>
+#include <asm/smcmod.h>
+#include <mach/scm.h>
+
+static DEFINE_MUTEX(ioctl_lock);
+
+#define SMCMOD_SVC_DEFAULT (0)
+#define SMCMOD_SVC_CRYPTO (1)
+#define SMCMOD_CRYPTO_CMD_CIPHER (1)
+#define SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED (2)
+#define SMCMOD_CRYPTO_CMD_MSG_DIGEST (3)
+
+/**
+ * struct smcmod_cipher_scm_req - structure for sending the cipher cmd to
+ * scm_call.
+ *
+ * @algorithm - specifies cipher algorithm
+ * @operation - specifies encryption or decryption.
+ * @mode - specifies cipher mode.
+ * @key_phys_addr - physical address for key buffer.
+ * @key_size - key size in bytes.
+ * @plain_text_phys_addr - physical address for plain text buffer.
+ * @plain_text_size - size of plain text in bytes.
+ * @cipher_text_phys_addr - physical address for cipher text buffer.
+ * @cipher_text_size - cipher text size in bytes.
+ * @init_vector_phys_addr - physical address for init vector buffer.
+ * @init_vector_size - size of initialization vector in bytes.
+ */
+struct smcmod_cipher_scm_req {
+ uint32_t algorithm;
+ uint32_t operation;
+ uint32_t mode;
+ uint32_t key_phys_addr;
+ uint32_t key_size;
+ uint32_t plain_text_phys_addr;
+ uint32_t plain_text_size;
+ uint32_t cipher_text_phys_addr;
+ uint32_t cipher_text_size;
+ uint32_t init_vector_phys_addr;
+ uint32_t init_vector_size;
+};
+
+/**
+ * struct smcmod_msg_digest_scm_req - structure for sending message digest
+ * to scm_call.
+ *
+ * @algorithm - specifies the cipher algorithm.
+ * @key_phys_addr - physical address of key buffer.
+ * @key_size - hash key size in bytes.
+ * @input_phys_addr - physical address of input buffer.
+ * @input_size - input data size in bytes.
+ * @output_phys_addr - physical address of output buffer.
+ * @output_size - size of output buffer in bytes.
+ * @verify - indicates whether to verify the hash value.
+ */
+struct smcmod_msg_digest_scm_req {
+ uint32_t algorithm;
+ uint32_t key_phys_addr;
+ uint32_t key_size;
+ uint32_t input_phys_addr;
+ uint32_t input_size;
+ uint32_t output_phys_addr;
+ uint32_t output_size;
+ uint8_t verify;
+} __packed;
+
+static void smcmod_inv_range(unsigned long start, unsigned long end)
+{
+ uint32_t cacheline_size;
+ uint32_t ctr;
+
+ /* get cache line size */
+ asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+ cacheline_size = 4 << ((ctr >> 16) & 0xf);
+
+ /* invalidate the range */
+ start = round_down(start, cacheline_size);
+ end = round_up(end, cacheline_size);
+ while (start < end) {
+ asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
+ : "memory");
+ start += cacheline_size;
+ }
+ mb();
+ isb();
+}
+
+static int smcmod_ion_fd_to_phys(int32_t fd, struct ion_client *ion_clientp,
+ struct ion_handle **ion_handlep, uint32_t *phys_addrp, size_t *sizep)
+{
+ int ret = 0;
+
+ /* sanity check args */
+ if ((fd < 0) || IS_ERR_OR_NULL(ion_clientp) ||
+ IS_ERR_OR_NULL(ion_handlep) || IS_ERR_OR_NULL(phys_addrp) ||
+ IS_ERR_OR_NULL(sizep))
+ return -EINVAL;
+
+ /* import the buffer fd */
+ *ion_handlep = ion_import_dma_buf(ion_clientp, fd);
+
+ /* sanity check the handle */
+ if (IS_ERR_OR_NULL(*ion_handlep))
+ return -EINVAL;
+
+ /* get the physical address */
+ ret = ion_phys(ion_clientp, *ion_handlep, (ion_phys_addr_t *)phys_addrp,
+ sizep);
+
+ return ret;
+}
+
+static int smcmod_send_buf_cmd(struct smcmod_buf_req *reqp)
+{
+ int ret = 0;
+ struct ion_client *ion_clientp = NULL;
+ struct ion_handle *ion_cmd_handlep = NULL;
+ struct ion_handle *ion_resp_handlep = NULL;
+ void *cmd_vaddrp = NULL;
+ void *resp_vaddrp = NULL;
+ unsigned long cmd_buf_size = 0;
+ unsigned long resp_buf_size = 0;
+
+ /* sanity check the argument */
+ if (IS_ERR_OR_NULL(reqp))
+ return -EINVAL;
+
+ /* sanity check the fds */
+ if (reqp->ion_cmd_fd < 0)
+ return -EINVAL;
+
+ /* create an ion client */
+ ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
+
+ /* check for errors */
+ if (IS_ERR_OR_NULL(ion_clientp))
+ return -EINVAL;
+
+ /* import the command buffer fd */
+ ion_cmd_handlep = ion_import_dma_buf(ion_clientp, reqp->ion_cmd_fd);
+
+ /* sanity check the handle */
+ if (IS_ERR_OR_NULL(ion_cmd_handlep)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* retrieve the size of the buffer */
+ if (ion_handle_get_size(ion_clientp, ion_cmd_handlep,
+ &cmd_buf_size) < 0) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* ensure that the command buffer size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->cmd_len > cmd_buf_size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* map the area to get a virtual address */
+ cmd_vaddrp = ion_map_kernel(ion_clientp, ion_cmd_handlep);
+
+ /* sanity check the address */
+ if (IS_ERR_OR_NULL(cmd_vaddrp)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* check if there is a response buffer */
+ if (reqp->ion_resp_fd >= 0) {
+ /* import the handle */
+ ion_resp_handlep =
+ ion_import_dma_buf(ion_clientp, reqp->ion_resp_fd);
+
+ /* sanity check the handle */
+ if (IS_ERR_OR_NULL(ion_resp_handlep)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* retrieve the size of the buffer */
+ if (ion_handle_get_size(ion_clientp, ion_resp_handlep,
+ &resp_buf_size) < 0) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* ensure that the command buffer size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->resp_len > resp_buf_size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* map the area to get a virtual address */
+ resp_vaddrp = ion_map_kernel(ion_clientp, ion_resp_handlep);
+
+ /* sanity check the address */
+ if (IS_ERR_OR_NULL(resp_vaddrp)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+ }
+
+ /* call scm function to switch to secure world */
+ reqp->return_val = scm_call(reqp->service_id, reqp->command_id,
+ cmd_vaddrp, reqp->cmd_len, resp_vaddrp, reqp->resp_len);
+
+buf_cleanup:
+ /* if the client and handle(s) are valid, free them */
+ if (!IS_ERR_OR_NULL(ion_clientp)) {
+ if (!IS_ERR_OR_NULL(ion_cmd_handlep)) {
+ if (!IS_ERR_OR_NULL(cmd_vaddrp))
+ ion_unmap_kernel(ion_clientp, ion_cmd_handlep);
+ ion_free(ion_clientp, ion_cmd_handlep);
+ }
+
+ if (!IS_ERR_OR_NULL(ion_resp_handlep)) {
+ if (!IS_ERR_OR_NULL(resp_vaddrp))
+ ion_unmap_kernel(ion_clientp, ion_resp_handlep);
+ ion_free(ion_clientp, ion_resp_handlep);
+ }
+
+ ion_client_destroy(ion_clientp);
+ }
+
+ return ret;
+}
+
+static int smcmod_send_cipher_cmd(struct smcmod_cipher_req *reqp)
+{
+ int ret = 0;
+ struct smcmod_cipher_scm_req scm_req;
+ struct ion_client *ion_clientp = NULL;
+ struct ion_handle *ion_key_handlep = NULL;
+ struct ion_handle *ion_plain_handlep = NULL;
+ struct ion_handle *ion_cipher_handlep = NULL;
+ struct ion_handle *ion_iv_handlep = NULL;
+ size_t size = 0;
+
+ if (IS_ERR_OR_NULL(reqp))
+ return -EINVAL;
+
+ /* sanity check the fds */
+ if ((reqp->ion_plain_text_fd < 0) ||
+ (reqp->ion_cipher_text_fd < 0) ||
+ (reqp->ion_init_vector_fd < 0))
+ return -EINVAL;
+
+ /* create an ion client */
+ ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
+
+ /* check for errors */
+ if (IS_ERR_OR_NULL(ion_clientp))
+ return -EINVAL;
+
+ /* fill in the scm request structure */
+ scm_req.algorithm = reqp->algorithm;
+ scm_req.operation = reqp->operation;
+ scm_req.mode = reqp->mode;
+ scm_req.key_phys_addr = 0;
+ scm_req.key_size = reqp->key_size;
+ scm_req.plain_text_size = reqp->plain_text_size;
+ scm_req.cipher_text_size = reqp->cipher_text_size;
+ scm_req.init_vector_size = reqp->init_vector_size;
+
+ if (!reqp->key_is_null) {
+ /* import the key buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
+ &ion_key_handlep, &scm_req.key_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the key size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->key_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+ }
+
+ /* import the plain text buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_plain_text_fd, ion_clientp,
+ &ion_plain_handlep, &scm_req.plain_text_phys_addr, &size);
+
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the plain text size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->plain_text_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* import the cipher text buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_cipher_text_fd, ion_clientp,
+ &ion_cipher_handlep, &scm_req.cipher_text_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the cipher text size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->cipher_text_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* import the init vector buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_init_vector_fd, ion_clientp,
+ &ion_iv_handlep, &scm_req.init_vector_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the init vector size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->init_vector_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* call scm function to switch to secure world */
+ reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
+ SMCMOD_CRYPTO_CMD_CIPHER, &scm_req,
+ sizeof(scm_req), NULL, 0);
+
+ /* for decrypt, plain text is the output, otherwise it's cipher text */
+ if (reqp->operation) {
+ void *vaddrp = NULL;
+
+ /* map the plain text region to get the virtual address */
+ vaddrp = ion_map_kernel(ion_clientp, ion_plain_handlep);
+ if (IS_ERR_OR_NULL(vaddrp)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* invalidate the range */
+ smcmod_inv_range((unsigned long)vaddrp,
+ (unsigned long)(vaddrp + scm_req.plain_text_size));
+
+ /* unmap the mapped area */
+ ion_unmap_kernel(ion_clientp, ion_plain_handlep);
+ } else {
+ void *vaddrp = NULL;
+
+ /* map the cipher text region to get the virtual address */
+ vaddrp = ion_map_kernel(ion_clientp, ion_cipher_handlep);
+ if (IS_ERR_OR_NULL(vaddrp)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* invalidate the range */
+ smcmod_inv_range((unsigned long)vaddrp,
+ (unsigned long)(vaddrp + scm_req.cipher_text_size));
+
+ /* unmap the mapped area */
+ ion_unmap_kernel(ion_clientp, ion_cipher_handlep);
+ }
+
+buf_cleanup:
+ /* if the client and handles are valid, free them */
+ if (!IS_ERR_OR_NULL(ion_clientp)) {
+ if (!IS_ERR_OR_NULL(ion_key_handlep))
+ ion_free(ion_clientp, ion_key_handlep);
+
+ if (!IS_ERR_OR_NULL(ion_plain_handlep))
+ ion_free(ion_clientp, ion_plain_handlep);
+
+ if (!IS_ERR_OR_NULL(ion_cipher_handlep))
+ ion_free(ion_clientp, ion_cipher_handlep);
+
+ if (!IS_ERR_OR_NULL(ion_iv_handlep))
+ ion_free(ion_clientp, ion_iv_handlep);
+
+ ion_client_destroy(ion_clientp);
+ }
+
+ return ret;
+}
+static int smcmod_send_msg_digest_cmd(struct smcmod_msg_digest_req *reqp)
+{
+ int ret = 0;
+ struct smcmod_msg_digest_scm_req scm_req;
+ struct ion_client *ion_clientp = NULL;
+ struct ion_handle *ion_key_handlep = NULL;
+ struct ion_handle *ion_input_handlep = NULL;
+ struct ion_handle *ion_output_handlep = NULL;
+ size_t size = 0;
+ void *vaddrp = NULL;
+
+ if (IS_ERR_OR_NULL(reqp))
+ return -EINVAL;
+
+ /* sanity check the fds */
+ if ((reqp->ion_input_fd < 0) || (reqp->ion_output_fd < 0))
+ return -EINVAL;
+
+ /* create an ion client */
+ ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
+
+ /* check for errors */
+ if (IS_ERR_OR_NULL(ion_clientp))
+ return -EINVAL;
+
+ /* fill in the scm request structure */
+ scm_req.algorithm = reqp->algorithm;
+ scm_req.key_phys_addr = 0;
+ scm_req.key_size = reqp->key_size;
+ scm_req.input_size = reqp->input_size;
+ scm_req.output_size = reqp->output_size;
+ scm_req.verify = 0;
+
+ if (!reqp->key_is_null) {
+ /* import the key buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
+ &ion_key_handlep, &scm_req.key_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the key size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->key_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+ }
+
+ /* import the input buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_input_fd, ion_clientp,
+ &ion_input_handlep, &scm_req.input_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the input size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->input_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* import the output buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_output_fd, ion_clientp,
+ &ion_output_handlep, &scm_req.output_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the output size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->output_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* call scm function to switch to secure world */
+ if (reqp->fixed_block)
+ reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
+ SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED,
+ &scm_req,
+ sizeof(scm_req),
+ NULL, 0);
+ else
+ reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
+ SMCMOD_CRYPTO_CMD_MSG_DIGEST,
+ &scm_req,
+ sizeof(scm_req),
+ NULL, 0);
+
+
+ /* map the output region to get the virtual address */
+ vaddrp = ion_map_kernel(ion_clientp, ion_output_handlep);
+ if (IS_ERR_OR_NULL(vaddrp)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* invalidate the range */
+ smcmod_inv_range((unsigned long)vaddrp,
+ (unsigned long)(vaddrp + scm_req.output_size));
+
+ /* unmap the mapped area */
+ ion_unmap_kernel(ion_clientp, ion_output_handlep);
+
+buf_cleanup:
+ /* if the client and handles are valid, free them */
+ if (!IS_ERR_OR_NULL(ion_clientp)) {
+ if (!IS_ERR_OR_NULL(ion_key_handlep))
+ ion_free(ion_clientp, ion_key_handlep);
+
+ if (!IS_ERR_OR_NULL(ion_input_handlep))
+ ion_free(ion_clientp, ion_input_handlep);
+
+ if (!IS_ERR_OR_NULL(ion_output_handlep))
+ ion_free(ion_clientp, ion_output_handlep);
+
+ ion_client_destroy(ion_clientp);
+ }
+
+ return ret;
+}
+
+static long smcmod_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int ret = 0;
+
+ /* sanity check */
+ if (!argp)
+ return -EINVAL;
+
+ /*
+ * The SMC instruction should only be initiated by one process
+ * at a time, hence the critical section here. Note that this
+ * does not prevent user space from modifying the
+ * allocated buffer contents. Extra steps are needed to
+ * prevent that from happening.
+ */
+ mutex_lock(&ioctl_lock);
+
+ switch (cmd) {
+ case SMCMOD_IOCTL_SEND_REG_CMD:
+ {
+ struct smcmod_reg_req req;
+
+ /* copy struct from user */
+ if (copy_from_user((void *)&req, argp, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ /* call the correct scm function to switch to secure
+ * world
+ */
+ if (req.num_args == 1) {
+ req.return_val =
+ scm_call_atomic1(req.service_id,
+ req.command_id, req.args[0]);
+ } else if (req.num_args == 2) {
+ req.return_val =
+ scm_call_atomic2(req.service_id,
+ req.command_id, req.args[0],
+ req.args[1]);
+ } else {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* copy result back to user */
+ if (copy_to_user(argp, (void *)&req, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+ break;
+
+ /* This is an example of how to pass buffers to/from the secure
+ * side using the ion driver.
+ */
+ case SMCMOD_IOCTL_SEND_BUF_CMD:
+ {
+ struct smcmod_buf_req req;
+
+ /* copy struct from user */
+ if (copy_from_user((void *)&req, argp, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ /* send the command */
+ ret = smcmod_send_buf_cmd(&req);
+ if (ret < 0)
+ goto cleanup;
+
+ /* copy result back to user */
+ if (copy_to_user(argp, (void *)&req, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+ break;
+
+ case SMCMOD_IOCTL_SEND_CIPHER_CMD:
+ {
+ struct smcmod_cipher_req req;
+
+ /* copy struct from user */
+ if (copy_from_user((void *)&req, argp, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ ret = smcmod_send_cipher_cmd(&req);
+ if (ret < 0)
+ goto cleanup;
+
+ /* copy result back to user */
+ if (copy_to_user(argp, (void *)&req, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+ break;
+
+ case SMCMOD_IOCTL_SEND_MSG_DIGEST_CMD:
+ {
+ struct smcmod_msg_digest_req req;
+
+ /* copy struct from user */
+ if (copy_from_user((void *)&req, argp, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ ret = smcmod_send_msg_digest_cmd(&req);
+ if (ret < 0)
+ goto cleanup;
+
+ /* copy result back to user */
+ if (copy_to_user(argp, (void *)&req, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+ break;
+
+ case SMCMOD_IOCTL_GET_VERSION:
+ {
+ uint32_t req;
+
+ /* call scm function to switch to secure world */
+ req = scm_get_version();
+
+ /* copy result back to user */
+ if (copy_to_user(argp, (void *)&req, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+cleanup:
+ mutex_unlock(&ioctl_lock);
+ return ret;
+}
+
+static int smcmod_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int smcmod_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations smcmod_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = smcmod_ioctl,
+ .open = smcmod_open,
+ .release = smcmod_release,
+};
+
+static struct miscdevice smcmod_misc_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = SMCMOD_DEV,
+ .fops = &smcmod_fops
+};
+
+static int __init smcmod_init(void)
+{
+ return misc_register(&smcmod_misc_dev);
+}
+
+static void __exit smcmod_exit(void)
+{
+ misc_deregister(&smcmod_misc_dev);
+}
+
+MODULE_DESCRIPTION("Qualcomm SMC Module");
+MODULE_LICENSE("GPL v2");
+
+module_init(smcmod_init);
+module_exit(smcmod_exit);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b03cfd..8f3c107 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -92,6 +92,8 @@
unsigned int freq_lo_jiffies;
unsigned int freq_hi_jiffies;
unsigned int rate_mult;
+ unsigned int prev_load;
+ unsigned int max_load;
int cpu;
unsigned int sample_type:1;
/*
@@ -125,17 +127,27 @@
static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int up_threshold;
+ unsigned int up_threshold_multi_core;
unsigned int down_differential;
+ unsigned int down_differential_multi_core;
+ unsigned int optimal_freq;
+ unsigned int up_threshold_any_cpu_load;
+ unsigned int sync_freq;
unsigned int ignore_nice;
unsigned int sampling_down_factor;
int powersave_bias;
unsigned int io_is_busy;
} dbs_tuners_ins = {
+ .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD,
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
+ .down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL,
+ .up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD,
.ignore_nice = 0,
.powersave_bias = 0,
+ .sync_freq = 0,
+ .optimal_freq = 0,
};
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
@@ -293,9 +305,13 @@
show_one(sampling_rate, sampling_rate);
show_one(io_is_busy, io_is_busy);
show_one(up_threshold, up_threshold);
+show_one(up_threshold_multi_core, up_threshold_multi_core);
show_one(down_differential, down_differential);
show_one(sampling_down_factor, sampling_down_factor);
show_one(ignore_nice_load, ignore_nice);
+show_one(optimal_freq, optimal_freq);
+show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load);
+show_one(sync_freq, sync_freq);
static ssize_t show_powersave_bias
(struct kobject *kobj, struct attribute *attr, char *buf)
@@ -371,6 +387,19 @@
return count;
}
+static ssize_t store_sync_freq(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ dbs_tuners_ins.sync_freq = input;
+ return count;
+}
+
static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -384,6 +413,19 @@
return count;
}
+static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ dbs_tuners_ins.optimal_freq = input;
+ return count;
+}
+
static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -399,6 +441,36 @@
return count;
}
+static ssize_t store_up_threshold_multi_core(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+ input < MIN_FREQUENCY_UP_THRESHOLD) {
+ return -EINVAL;
+ }
+ dbs_tuners_ins.up_threshold_multi_core = input;
+ return count;
+}
+
+static ssize_t store_up_threshold_any_cpu_load(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+ input < MIN_FREQUENCY_UP_THRESHOLD) {
+ return -EINVAL;
+ }
+ dbs_tuners_ins.up_threshold_any_cpu_load = input;
+ return count;
+}
+
static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -581,6 +653,10 @@
define_one_global_rw(sampling_down_factor);
define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
+define_one_global_rw(up_threshold_multi_core);
+define_one_global_rw(optimal_freq);
+define_one_global_rw(up_threshold_any_cpu_load);
+define_one_global_rw(sync_freq);
static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
@@ -591,6 +667,10 @@
&ignore_nice_load.attr,
&powersave_bias.attr,
&io_is_busy.attr,
+ &up_threshold_multi_core.attr,
+ &optimal_freq.attr,
+ &up_threshold_any_cpu_load.attr,
+ &sync_freq.attr,
NULL
};
@@ -619,7 +699,7 @@
unsigned int max_load_freq;
/* Current load across this CPU */
unsigned int cur_load = 0;
-
+ unsigned int max_load_other_cpu = 0;
struct cpufreq_policy *policy;
unsigned int j;
@@ -696,7 +776,8 @@
continue;
cur_load = 100 * (wall_time - idle_time) / wall_time;
-
+ j_dbs_info->max_load = max(cur_load, j_dbs_info->prev_load);
+ j_dbs_info->prev_load = cur_load;
freq_avg = __cpufreq_driver_getavg(policy, j);
if (freq_avg <= 0)
freq_avg = policy->cur;
@@ -705,11 +786,37 @@
if (load_freq > max_load_freq)
max_load_freq = load_freq;
}
+
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
+
+ if (j == policy->cpu)
+ continue;
+
+ if (max_load_other_cpu < j_dbs_info->max_load)
+ max_load_other_cpu = j_dbs_info->max_load;
+ /*
+ * The other cpu could be running at higher frequency
+ * but may not have completed it's sampling_down_factor.
+ * For that case consider other cpu is loaded so that
+ * frequency imbalance does not occur.
+ */
+
+ if ((j_dbs_info->cur_policy != NULL)
+ && (j_dbs_info->cur_policy->cur ==
+ j_dbs_info->cur_policy->max)) {
+
+ if (policy->cur >= dbs_tuners_ins.optimal_freq)
+ max_load_other_cpu =
+ dbs_tuners_ins.up_threshold_any_cpu_load;
+ }
+ }
+
/* calculate the scaled load across CPU */
load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
cpufreq_notify_utilization(policy, load_at_max_freq);
-
/* Check for frequency increase */
if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
/* If switching to max speed, apply sampling_down_factor */
@@ -720,6 +827,25 @@
return;
}
+ if (num_online_cpus() > 1) {
+
+ if (max_load_other_cpu >
+ dbs_tuners_ins.up_threshold_any_cpu_load) {
+ if (policy->cur < dbs_tuners_ins.sync_freq)
+ dbs_freq_increase(policy,
+ dbs_tuners_ins.sync_freq);
+ return;
+ }
+
+ if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core *
+ policy->cur) {
+ if (policy->cur < dbs_tuners_ins.optimal_freq)
+ dbs_freq_increase(policy,
+ dbs_tuners_ins.optimal_freq);
+ return;
+ }
+ }
+
/* Check for frequency decrease */
/* if we cannot reduce the frequency anymore, break out early */
if (policy->cur == policy->min)
@@ -744,6 +870,20 @@
if (freq_next < policy->min)
freq_next = policy->min;
+ if (num_online_cpus() > 1) {
+ if (max_load_other_cpu >
+ (dbs_tuners_ins.up_threshold_multi_core -
+ dbs_tuners_ins.down_differential) &&
+ freq_next < dbs_tuners_ins.sync_freq)
+ freq_next = dbs_tuners_ins.sync_freq;
+
+ if (max_load_freq >
+ (dbs_tuners_ins.up_threshold_multi_core -
+ dbs_tuners_ins.down_differential_multi_core) *
+ policy->cur)
+ freq_next = dbs_tuners_ins.optimal_freq;
+
+ }
if (!dbs_tuners_ins.powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);
@@ -997,6 +1137,12 @@
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
dbs_tuners_ins.io_is_busy = should_io_be_busy();
+
+ if (dbs_tuners_ins.optimal_freq == 0)
+ dbs_tuners_ins.optimal_freq = policy->min;
+
+ if (dbs_tuners_ins.sync_freq == 0)
+ dbs_tuners_ins.sync_freq = policy->min;
}
if (!cpu)
rc = input_register_handler(&dbs_input_handler);
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index c9e8a94..51349f6 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o ion_cp_heap.o
+obj-$(CONFIG_CMA) += ion_cma_heap.o
obj-$(CONFIG_ION_TEGRA) += tegra/
obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
new file mode 100644
index 0000000..bef6b6f
--- /dev/null
+++ b/drivers/gpu/ion/ion_cma_heap.c
@@ -0,0 +1,342 @@
+/*
+ * drivers/gpu/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/ion.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_ion.h>
+#include <mach/iommu_domains.h>
+
+#include <asm/cacheflush.h>
+
+/* for ion_heap_ops structure */
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_buffer_info {
+ void *cpu_addr;
+ dma_addr_t handle;
+ struct sg_table *table;
+ bool is_cached;
+};
+
+static int cma_heap_has_outer_cache;
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replace by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size)
+{
+ struct page *page = virt_to_page(cpu_addr);
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return 0;
+}
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+ unsigned long len, unsigned long align,
+ unsigned long flags)
+{
+ struct device *dev = heap->priv;
+ struct ion_cma_buffer_info *info;
+
+ dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+ info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev, "Can't allocate buffer info\n");
+ return ION_CMA_ALLOCATE_FAILED;
+ }
+
+ if (!ION_IS_CACHED(flags))
+ info->cpu_addr = dma_alloc_writecombine(dev, len,
+ &(info->handle), 0);
+ else
+ info->cpu_addr = dma_alloc_nonconsistent(dev, len,
+ &(info->handle), 0);
+
+ if (!info->cpu_addr) {
+ dev_err(dev, "Fail to allocate buffer\n");
+ goto err;
+ }
+
+ info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!info->table) {
+ dev_err(dev, "Fail to allocate sg table\n");
+ goto err;
+ }
+
+ info->is_cached = ION_IS_CACHED(flags);
+
+ ion_cma_get_sgtable(dev,
+ info->table, info->cpu_addr, info->handle, len);
+
+ /* keep this for memory release */
+ buffer->priv_virt = info;
+ dev_dbg(dev, "Allocate buffer %p\n", buffer);
+ return 0;
+
+err:
+ kfree(info);
+ return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+ struct device *dev = buffer->heap->priv;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Release buffer %p\n", buffer);
+ /* release memory */
+ dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+ /* release sg table */
+ kfree(info->table);
+ kfree(info);
+}
+
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct device *dev = heap->priv;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
+ info->handle);
+
+ *addr = info->handle;
+ *len = buffer->size;
+
+ return 0;
+}
+
+struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return info->table;
+}
+
+void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct device *dev = buffer->heap->priv;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ if (info->is_cached)
+ return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
+ info->handle, buffer->size);
+ else
+ return dma_mmap_writecombine(dev, vma, info->cpu_addr,
+ info->handle, buffer->size);
+}
+
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return info->cpu_addr;
+}
+
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+int ion_cma_map_iommu(struct ion_buffer *buffer,
+ struct ion_iommu_map *data,
+ unsigned int domain_num,
+ unsigned int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ int ret = 0;
+ struct iommu_domain *domain;
+ unsigned long extra;
+ unsigned long extra_iova_addr;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+ struct sg_table *table = info->table;
+ int prot = IOMMU_WRITE | IOMMU_READ;
+
+ data->mapped_size = iova_length;
+
+ if (!msm_use_iommu()) {
+ data->iova_addr = info->handle;
+ return 0;
+ }
+
+ extra = iova_length - buffer->size;
+
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
+
+ if (ret)
+ goto out;
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ ret = -EINVAL;
+ goto out1;
+ }
+
+ ret = iommu_map_range(domain, data->iova_addr, table->sgl,
+ buffer->size, prot);
+
+ if (ret) {
+ pr_err("%s: could not map %lx in domain %p\n",
+ __func__, data->iova_addr, domain);
+ goto out1;
+ }
+
+ extra_iova_addr = data->iova_addr + buffer->size;
+ if (extra) {
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
+ prot);
+ if (ret)
+ goto out2;
+ }
+ return ret;
+
+out2:
+ iommu_unmap_range(domain, data->iova_addr, buffer->size);
+out1:
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+out:
+ return ret;
+}
+
+
+void ion_cma_unmap_iommu(struct ion_iommu_map *data)
+{
+ unsigned int domain_num;
+ unsigned int partition_num;
+ struct iommu_domain *domain;
+
+ if (!msm_use_iommu())
+ return;
+
+ domain_num = iommu_map_domain(data);
+ partition_num = iommu_map_partition(data);
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+ return;
+ }
+
+ iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+
+ return;
+}
+
+int ion_cma_cache_ops(struct ion_heap *heap,
+ struct ion_buffer *buffer, void *vaddr,
+ unsigned int offset, unsigned int length,
+ unsigned int cmd)
+{
+ void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(vaddr, vaddr + length);
+ outer_cache_op = outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(vaddr, vaddr + length);
+ outer_cache_op = outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(vaddr, vaddr + length);
+ outer_cache_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cma_heap_has_outer_cache) {
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ outer_cache_op(info->handle, info->handle + length);
+ }
+
+ return 0;
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+ .allocate = ion_cma_allocate,
+ .free = ion_cma_free,
+ .map_dma = ion_cma_heap_map_dma,
+ .unmap_dma = ion_cma_heap_unmap_dma,
+ .phys = ion_cma_phys,
+ .map_user = ion_cma_mmap,
+ .map_kernel = ion_cma_map_kernel,
+ .unmap_kernel = ion_cma_unmap_kernel,
+ .map_iommu = ion_cma_map_iommu,
+ .unmap_iommu = ion_cma_unmap_iommu,
+ .cache_op = ion_cma_cache_ops,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+ struct ion_heap *heap;
+
+ heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+
+ heap->ops = &ion_cma_ops;
+ /* set device as private heaps data, later it will be
+ * used to make the link with reserved CMA memory */
+ heap->priv = data->priv;
+ heap->type = ION_HEAP_TYPE_DMA;
+ cma_heap_has_outer_cache = data->has_outer_cache;
+ return heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index aa3469c..96a3cdc 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -2,7 +2,7 @@
* drivers/gpu/ion/ion_cp_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -15,7 +15,7 @@
*
*/
#include <linux/spinlock.h>
-
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
@@ -28,6 +28,7 @@
#include <linux/seq_file.h>
#include <linux/fmem.h>
#include <linux/iommu.h>
+#include <linux/dma-mapping.h>
#include <asm/mach/map.h>
@@ -99,6 +100,10 @@
int iommu_2x_map_domain;
unsigned int has_outer_cache;
atomic_t protect_cnt;
+ void *cpu_addr;
+ size_t heap_size;
+ dma_addr_t handle;
+ int cma;
};
enum {
@@ -126,6 +131,8 @@
void *data;
};
+#define DMA_ALLOC_TRIES 5
+
static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
unsigned int permission_type, int version,
void *data);
@@ -134,6 +141,71 @@
unsigned int permission_type, int version,
void *data);
+static int allocate_heap_memory(struct ion_heap *heap)
+{
+ struct device *dev = heap->priv;
+ struct ion_cp_heap *cp_heap =
+ container_of(heap, struct ion_cp_heap, heap);
+ int ret;
+ int tries = 0;
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+
+
+ if (cp_heap->cpu_addr)
+ return 0;
+
+ while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
+ cp_heap->cpu_addr = dma_alloc_attrs(dev,
+ cp_heap->heap_size,
+ &(cp_heap->handle),
+ 0,
+ &attrs);
+ if (!cp_heap->cpu_addr)
+ msleep(20);
+ }
+
+ if (!cp_heap->cpu_addr)
+ goto out;
+
+ cp_heap->base = cp_heap->handle;
+
+ cp_heap->pool = gen_pool_create(12, -1);
+ if (!cp_heap->pool)
+ goto out_free;
+
+ ret = gen_pool_add(cp_heap->pool, cp_heap->base,
+ cp_heap->heap_size, -1);
+ if (ret < 0)
+ goto out_pool;
+
+ return 0;
+
+out_pool:
+ gen_pool_destroy(cp_heap->pool);
+out_free:
+ dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
+ cp_heap->handle);
+out:
+ return ION_CP_ALLOCATE_FAIL;
+}
+
+static void free_heap_memory(struct ion_heap *heap)
+{
+ struct device *dev = heap->priv;
+ struct ion_cp_heap *cp_heap =
+ container_of(heap, struct ion_cp_heap, heap);
+
+ /* release memory */
+ dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
+ cp_heap->handle);
+ gen_pool_destroy(cp_heap->pool);
+ cp_heap->pool = NULL;
+ cp_heap->cpu_addr = 0;
+}
+
+
+
/**
* Get the total number of kernel mappings.
* Must be called with heap->lock locked.
@@ -155,6 +227,12 @@
if (ret_value)
return 1;
}
+
+ if (cp_heap->cma) {
+ ret_value = allocate_heap_memory(heap);
+ if (ret_value)
+ return 1;
+ }
return 0;
}
@@ -167,6 +245,9 @@
if (fmem_set_state(FMEM_T_STATE) != 0)
pr_err("%s: unable to transition heap to T-state\n",
__func__);
+
+ if (cp_heap->cma)
+ free_heap_memory(heap);
}
/* Must be protected by ion_cp_buffer lock */
@@ -688,7 +769,24 @@
if (cp_heap->reusable) {
ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
cp_heap->reserved_vrange, buffer->flags);
+ } else if (cp_heap->cma) {
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct page **pages = vmalloc(
+ sizeof(struct page *) * npages);
+ int i;
+ pgprot_t pgprot;
+ if (ION_IS_CACHED(buffer->flags))
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ for (i = 0; i < npages; i++) {
+ pages[i] = phys_to_page(buf->buffer +
+ i * PAGE_SIZE);
+ }
+ ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
+ vfree(pages);
} else {
if (ION_IS_CACHED(buffer->flags))
ret_value = ioremap_cached(buf->buffer,
@@ -721,6 +819,8 @@
if (cp_heap->reusable)
unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
+ else if (cp_heap->cma)
+ vunmap(buffer->vaddr);
else
__arm_iounmap(buffer->vaddr);
@@ -1148,14 +1248,6 @@
mutex_init(&cp_heap->lock);
- cp_heap->pool = gen_pool_create(12, -1);
- if (!cp_heap->pool)
- goto free_heap;
-
- cp_heap->base = heap_data->base;
- ret = gen_pool_add(cp_heap->pool, cp_heap->base, heap_data->size, -1);
- if (ret < 0)
- goto destroy_pool;
cp_heap->allocated_bytes = 0;
cp_heap->umap_count = 0;
@@ -1165,9 +1257,11 @@
cp_heap->heap.ops = &cp_heap_ops;
cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
cp_heap->heap_protected = HEAP_NOT_PROTECTED;
- cp_heap->secure_base = cp_heap->base;
+ cp_heap->secure_base = heap_data->base;
cp_heap->secure_size = heap_data->size;
cp_heap->has_outer_cache = heap_data->has_outer_cache;
+ cp_heap->heap_size = heap_data->size;
+
atomic_set(&cp_heap->protect_cnt, 0);
if (heap_data->extra_data) {
struct ion_cp_heap_pdata *extra_data =
@@ -1191,9 +1285,26 @@
extra_data->iommu_map_all;
cp_heap->iommu_2x_map_domain =
extra_data->iommu_2x_map_domain;
+ cp_heap->cma = extra_data->is_cma;
}
+ if (cp_heap->cma) {
+ cp_heap->pool = NULL;
+ cp_heap->cpu_addr = 0;
+ cp_heap->heap.priv = heap_data->priv;
+ } else {
+ cp_heap->pool = gen_pool_create(12, -1);
+ if (!cp_heap->pool)
+ goto free_heap;
+
+ cp_heap->base = heap_data->base;
+ ret = gen_pool_add(cp_heap->pool, cp_heap->base,
+ heap_data->size, -1);
+ if (ret < 0)
+ goto destroy_pool;
+
+ }
return &cp_heap->heap;
destroy_pool:
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 4c83d75..0670468 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -40,6 +40,11 @@
case ION_HEAP_TYPE_CP:
heap = ion_cp_heap_create(heap_data);
break;
+#ifdef CONFIG_CMA
+ case ION_HEAP_TYPE_DMA:
+ heap = ion_cma_heap_create(heap_data);
+ break;
+#endif
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap_data->type);
@@ -55,6 +60,7 @@
heap->name = heap_data->name;
heap->id = heap_data->id;
+ heap->priv = heap_data->priv;
return heap;
}
@@ -79,6 +85,11 @@
case ION_HEAP_TYPE_CP:
ion_cp_heap_destroy(heap);
break;
+#ifdef CONFIG_CMA
+ case ION_HEAP_TYPE_DMA:
+ ion_cma_heap_destroy(heap);
+ break;
+#endif
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap->type);
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 345c07d..0b691f3 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
#include <linux/vmalloc.h>
#include <linux/iommu.h>
#include <linux/pfn.h>
+#include <linux/dma-mapping.h>
#include "ion_priv.h"
#include <asm/mach/map.h>
@@ -80,8 +81,13 @@
goto err3;
sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
+ sg_dma_address(sg) = sg_phys(sg);
}
+ if (!ION_IS_CACHED(flags))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
buffer->priv_virt = data;
return 0;
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 991a310..7713875 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -162,6 +162,7 @@
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
+ * @priv: private heap data
*
* Represents a pool of memory from which buffers can be made. In some
* systems the only heap is regular system memory allocated via vmalloc.
@@ -175,6 +176,7 @@
struct ion_heap_ops *ops;
int id;
const char *name;
+ void *priv;
};
/**
@@ -257,6 +259,10 @@
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size);
+#ifdef CONFIG_CMA
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+#endif
struct ion_heap *msm_get_contiguous_heap(void);
/**
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index ad2ef83..8b63216 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -518,14 +518,26 @@
return ret;
}
+void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
.phys = ion_system_contig_heap_phys,
.map_dma = ion_system_contig_heap_map_dma,
.unmap_dma = ion_system_heap_unmap_dma,
- .map_kernel = ion_system_heap_map_kernel,
- .unmap_kernel = ion_system_heap_unmap_kernel,
+ .map_kernel = ion_system_contig_heap_map_kernel,
+ .unmap_kernel = ion_system_contig_heap_unmap_kernel,
.map_user = ion_system_contig_heap_map_user,
.cache_op = ion_system_contig_heap_cache_ops,
.print_debug = ion_system_contig_print_debug,
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index 8699178..697587b 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -424,6 +424,7 @@
unsigned int i;
for (i = 0; i < pdata->nr; ++i)
kfree(pdata->heaps[i].extra_data);
+ kfree(pdata->heaps);
kfree(pdata);
}
@@ -553,6 +554,7 @@
const struct device_node *dt_node)
{
struct ion_platform_data *pdata = 0;
+ struct ion_platform_heap *heaps = NULL;
struct device_node *node;
uint32_t val = 0;
int ret = 0;
@@ -565,11 +567,17 @@
if (!num_heaps)
return ERR_PTR(-EINVAL);
- pdata = kzalloc(sizeof(struct ion_platform_data) +
- num_heaps*sizeof(struct ion_platform_heap), GFP_KERNEL);
+ pdata = kzalloc(sizeof(struct ion_platform_data), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
+ heaps = kzalloc(sizeof(struct ion_platform_heap)*num_heaps, GFP_KERNEL);
+ if (!heaps) {
+ kfree(pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->heaps = heaps;
pdata->nr = num_heaps;
for_each_child_of_node(dt_node, node) {
@@ -769,19 +777,6 @@
if (pdata_needs_to_be_freed)
free_pdata(pdata);
- /* Check if each heap has been removed from the memblock */
- for (i = 0; i < num_heaps; i++) {
- struct ion_platform_heap *heap_data = &pdata->heaps[i];
- if (!heap_data->base)
- continue;
- err = memblock_overlaps_memory(heap_data->base,
- heap_data->size);
- if (err) {
- panic("ION heap %s not removed from memblock\n",
- heap_data->name);
- }
- }
-
check_for_heap_overlap(pdata->heaps, num_heaps);
platform_set_drvdata(pdev, idev);
return 0;
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 060e89a..cced7ef 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -29,6 +29,7 @@
#include "kgsl_cffdump.h"
#include "kgsl_sharedmem.h"
#include "kgsl_iommu.h"
+#include "kgsl_trace.h"
#include "adreno.h"
#include "adreno_pm4types.h"
@@ -2067,6 +2068,8 @@
if (!in_interrupt())
kgsl_pre_hwaccess(device);
+ trace_kgsl_regwrite(device, offsetwords, value);
+
kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
@@ -2157,7 +2160,6 @@
int status;
unsigned int ref_ts, enableflag;
unsigned int context_id;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
mutex_lock(&device->mutex);
context_id = _get_context_id(context);
@@ -2204,13 +2206,9 @@
cmds[0] = cp_type3_packet(CP_NOP, 1);
cmds[1] = 0;
- if (adreno_dev->drawctxt_active)
+ if (context)
adreno_ringbuffer_issuecmds_intr(device,
context, &cmds[0], 2);
- else
- /* We would never call this function if there
- * was no active contexts running */
- BUG();
}
}
unlock:
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 61378fe..cf16995 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -168,6 +168,9 @@
extern const unsigned int a3xx_registers[];
extern const unsigned int a3xx_registers_count;
+extern const unsigned int a3xx_hlsq_registers[];
+extern const unsigned int a3xx_hlsq_registers_count;
+
extern const unsigned int a330_registers[];
extern const unsigned int a330_registers_count;
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index feced43..2466a5c 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -40,8 +40,8 @@
0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
- 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
- 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
+ 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5,
+ 0x0e41, 0x0e45, 0x0e64, 0x0e65,
0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
@@ -49,7 +49,7 @@
0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
- 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
+ 0x2240, 0x227e,
0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
@@ -58,7 +58,7 @@
0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
- 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
+ 0x25f0, 0x25f0,
0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
@@ -69,6 +69,18 @@
const unsigned int a3xx_registers_count = ARRAY_SIZE(a3xx_registers) / 2;
+/* Removed the following HLSQ register ranges from being read during
+ * recovery since reading the registers may cause the device to hang:
+ */
+const unsigned int a3xx_hlsq_registers[] = {
+ 0x0e00, 0x0e05, 0x0e0c, 0x0e0c, 0x0e22, 0x0e23,
+ 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a,
+ 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
+};
+
+const unsigned int a3xx_hlsq_registers_count =
+ ARRAY_SIZE(a3xx_hlsq_registers) / 2;
+
/* The set of additional registers to be dumped for A330 */
const unsigned int a330_registers[] = {
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
index 1243dd0..de95951 100644
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -341,6 +341,41 @@
return snapshot;
}
+static void _snapshot_a3xx_regs(struct kgsl_snapshot_registers *regs,
+ struct kgsl_snapshot_registers_list *list)
+{
+ regs[list->count].regs = (unsigned int *) a3xx_registers;
+ regs[list->count].count = a3xx_registers_count;
+ list->count++;
+}
+
+static void _snapshot_hlsq_regs(struct kgsl_snapshot_registers *regs,
+ struct kgsl_snapshot_registers_list *list,
+ struct adreno_device *adreno_dev)
+{
+ /* HLSQ specific registers */
+ /*
+ * Don't dump any a3xx HLSQ registers just yet. Reading the HLSQ
+ * registers can cause the device to hang if the HLSQ block is
+ * busy. Add specific checks for each a3xx core as the requirements
+ * are discovered. Disable by default for now.
+ */
+ if (!adreno_is_a3xx(adreno_dev)) {
+ regs[list->count].regs = (unsigned int *) a3xx_hlsq_registers;
+ regs[list->count].count = a3xx_hlsq_registers_count;
+ list->count++;
+ }
+}
+
+static void _snapshot_a330_regs(struct kgsl_snapshot_registers *regs,
+ struct kgsl_snapshot_registers_list *list)
+{
+ /* For A330, append the additional list of new registers to grab */
+ regs[list->count].regs = (unsigned int *) a330_registers;
+ regs[list->count].count = a330_registers_count;
+ list->count++;
+}
+
/* A3XX GPU snapshot function - this is where all of the A3XX specific
* bits and pieces are grabbed into the snapshot memory
*/
@@ -350,21 +385,20 @@
{
struct kgsl_device *device = &adreno_dev->dev;
struct kgsl_snapshot_registers_list list;
- struct kgsl_snapshot_registers regs[2];
+ struct kgsl_snapshot_registers regs[5];
int size;
- regs[0].regs = (unsigned int *) a3xx_registers;
- regs[0].count = a3xx_registers_count;
-
list.registers = regs;
- list.count = 1;
+ list.count = 0;
- /* For A330, append the additional list of new registers to grab */
- if (adreno_is_a330(adreno_dev)) {
- regs[1].regs = (unsigned int *) a330_registers;
- regs[1].count = a330_registers_count;
- list.count++;
- }
+ /* Disable Clock gating temporarily for the debug bus to work */
+ adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL, 0x00);
+
+ /* Store relevant registers in list to snapshot */
+ _snapshot_a3xx_regs(regs, &list);
+ _snapshot_hlsq_regs(regs, &list, adreno_dev);
+ if (adreno_is_a330(adreno_dev))
+ _snapshot_a330_regs(regs, &list);
/* Master set of (non debug) registers */
snapshot = kgsl_snapshot_add_section(device,
@@ -385,9 +419,6 @@
remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
64, 44);
- /* Disable Clock gating temporarily for the debug bus to work */
- adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL, 0x00);
-
/* VPC memory */
snapshot = kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 27343c5..8af361a 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -513,7 +513,7 @@
if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
context_id = context->id;
- if ((context->flags & CTXT_FLAGS_USER_GENERATED_TS) &&
+ if ((context && context->flags & CTXT_FLAGS_USER_GENERATED_TS) &&
(!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))) {
if (timestamp_cmp(rb->timestamp[context_id],
timestamp) >= 0) {
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 0b445d6..deafa7a 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -28,6 +28,7 @@
#include <linux/msm_ion.h>
#include <linux/io.h>
#include <mach/socinfo.h>
+#include <linux/mman.h>
#include "kgsl.h"
#include "kgsl_debugfs.h"
@@ -1078,6 +1079,58 @@
}
/**
+ * kgsl_sharedmem_region_empty - Check if an addression region is empty
+ *
+ * @private: private data for the process to check.
+ * @gpuaddr: start address of the region
+ * @size: length of the region.
+ *
+ * Checks that there are no existing allocations within an address
+ * region. Note that unlike other kgsl_sharedmem* search functions,
+ * this one manages locking on its own.
+ */
+int
+kgsl_sharedmem_region_empty(struct kgsl_process_private *private,
+ unsigned int gpuaddr, size_t size)
+{
+ int result = 1;
+ unsigned int gpuaddr_end = gpuaddr + size;
+
+ struct rb_node *node = private->mem_rb.rb_node;
+
+ if (!kgsl_mmu_gpuaddr_in_range(gpuaddr))
+ return 0;
+
+ /* don't overflow */
+ if (gpuaddr_end < gpuaddr)
+ return 0;
+
+ spin_lock(&private->mem_lock);
+ node = private->mem_rb.rb_node;
+ while (node != NULL) {
+ struct kgsl_mem_entry *entry;
+ unsigned int memdesc_start, memdesc_end;
+
+ entry = rb_entry(node, struct kgsl_mem_entry, node);
+
+ memdesc_start = entry->memdesc.gpuaddr;
+ memdesc_end = memdesc_start
+ + kgsl_memdesc_mmapsize(&entry->memdesc);
+
+ if (gpuaddr_end <= memdesc_start)
+ node = node->rb_left;
+ else if (memdesc_end <= gpuaddr)
+ node = node->rb_right;
+ else {
+ result = 0;
+ break;
+ }
+ }
+ spin_unlock(&private->mem_lock);
+ return result;
+}
+
+/**
* kgsl_sharedmem_find_id - find a memory entry by id
* @process: the owning process
* @id: id to find
@@ -1648,6 +1701,8 @@
entry->memdesc.size = size;
entry->memdesc.physaddr = phys + offset;
entry->memdesc.hostptr = (void *) (virt + offset);
+ /* USE_CPU_MAP is not impemented for PMEM. */
+ entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
ret = memdesc_sg_phys(&entry->memdesc, phys + offset, size);
if (ret)
@@ -1764,6 +1819,8 @@
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = size;
entry->memdesc.useraddr = useraddr + (offset & PAGE_MASK);
+ if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
+ entry->memdesc.gpuaddr = entry->memdesc.useraddr;
return memdesc_sg_virt(&entry->memdesc, entry->memdesc.useraddr,
size);
@@ -1816,6 +1873,8 @@
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = ALIGN(size, PAGE_SIZE);
entry->memdesc.useraddr = useraddr;
+ if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
+ entry->memdesc.gpuaddr = entry->memdesc.useraddr;
ret = memdesc_sg_virt(&entry->memdesc, useraddr, size);
if (ret)
@@ -1861,6 +1920,8 @@
entry->priv_data = handle;
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = 0;
+ /* USE_CPU_MAP is not impemented for ION. */
+ entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
sg_table = ion_sg_table(kgsl_ion_client, handle);
@@ -1884,6 +1945,13 @@
return -ENOMEM;
}
+static inline int
+can_use_cpu_map(void)
+{
+ return (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU
+ && kgsl_mmu_is_perprocess());
+}
+
static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
@@ -1911,9 +1979,12 @@
*/
param->flags &= KGSL_MEMFLAGS_GPUREADONLY
| KGSL_MEMTYPE_MASK
- | KGSL_MEMALIGN_MASK;
+ | KGSL_MEMALIGN_MASK
+ | KGSL_MEMFLAGS_USE_CPU_MAP;
entry->memdesc.flags = param->flags;
+ if (!can_use_cpu_map())
+ entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
switch (memtype) {
case KGSL_USER_MEM_TYPE_PMEM:
@@ -2091,7 +2162,7 @@
return _kgsl_gpumem_sync_cache(entry, param->op);
}
-/* Legacy cache function, does a flush (clean + inv) */
+/* Legacy cache function, does a flush (clean + invalidate) */
static long
kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
@@ -2133,7 +2204,8 @@
flags &= KGSL_MEMFLAGS_GPUREADONLY
| KGSL_CACHEMODE_MASK
| KGSL_MEMTYPE_MASK
- | KGSL_MEMALIGN_MASK;
+ | KGSL_MEMALIGN_MASK
+ | KGSL_MEMFLAGS_USE_CPU_MAP;
entry = kgsl_mem_entry_create();
if (entry == NULL)
@@ -2164,6 +2236,7 @@
struct kgsl_mem_entry *entry = NULL;
int result;
+ param->flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags);
if (result)
return result;
@@ -2199,14 +2272,19 @@
struct kgsl_mem_entry *entry = NULL;
int result;
+ if (!can_use_cpu_map())
+ param->flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
+
result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags);
if (result != 0)
goto err;
- result = kgsl_mmu_map(private->pagetable, &entry->memdesc,
- kgsl_memdesc_protflags(&entry->memdesc));
- if (result)
- goto err;
+ if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
+ result = kgsl_mmu_map(private->pagetable, &entry->memdesc,
+ kgsl_memdesc_protflags(&entry->memdesc));
+ if (result)
+ goto err;
+ }
result = kgsl_mem_entry_attach_process(entry, private);
if (result != 0)
@@ -2218,7 +2296,7 @@
param->id = entry->id;
param->flags = entry->memdesc.flags;
param->size = entry->memdesc.size;
- param->mmapsize = entry->memdesc.size;
+ param->mmapsize = kgsl_memdesc_mmapsize(&entry->memdesc);
param->gpuaddr = entry->memdesc.gpuaddr;
return result;
err:
@@ -2261,7 +2339,7 @@
param->id = entry->id;
param->flags = entry->memdesc.flags;
param->size = entry->memdesc.size;
- param->mmapsize = entry->memdesc.size;
+ param->mmapsize = kgsl_memdesc_mmapsize(&entry->memdesc);
param->useraddr = entry->memdesc.useraddr;
return result;
}
@@ -2640,6 +2718,142 @@
.close = kgsl_gpumem_vm_close,
};
+static int
+get_mmap_entry(struct kgsl_process_private *private,
+ struct kgsl_mem_entry **out_entry, unsigned long pgoff,
+ unsigned long len)
+{
+ int ret = -EINVAL;
+ struct kgsl_mem_entry *entry;
+
+ entry = kgsl_sharedmem_find_id(private, pgoff);
+ if (entry == NULL) {
+ spin_lock(&private->mem_lock);
+ entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);
+ spin_unlock(&private->mem_lock);
+ }
+
+ if (!entry)
+ return -EINVAL;
+
+ kgsl_mem_entry_get(entry);
+
+ if (!entry->memdesc.ops ||
+ !entry->memdesc.ops->vmflags ||
+ !entry->memdesc.ops->vmfault) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ if (entry->memdesc.useraddr != 0) {
+ ret = -EBUSY;
+ goto err_put;
+ }
+
+ if (len != kgsl_memdesc_mmapsize(&entry->memdesc)) {
+ ret = -ERANGE;
+ goto err_put;
+ }
+
+ *out_entry = entry;
+ return 0;
+err_put:
+ kgsl_mem_entry_put(entry);
+ return ret;
+}
+
+static unsigned long
+kgsl_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long ret = 0;
+ unsigned long vma_offset = pgoff << PAGE_SHIFT;
+ struct kgsl_device_private *dev_priv = file->private_data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_mem_entry *entry = NULL;
+ unsigned int align;
+ unsigned int retry = 0;
+
+ if (vma_offset == device->memstore.gpuaddr)
+ return get_unmapped_area(NULL, addr, len, pgoff, flags);
+
+ ret = get_mmap_entry(private, &entry, pgoff, len);
+ if (ret)
+ return ret;
+
+ if (!kgsl_memdesc_use_cpu_map(&entry->memdesc) || (flags & MAP_FIXED)) {
+ /*
+ * If we're not going to use the same mapping on the gpu,
+ * any address is fine.
+ * For MAP_FIXED, hopefully the caller knows what they're doing,
+ * but we may fail in mmap() if there is already something
+ * at the virtual address chosen.
+ */
+ ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
+ goto put;
+ }
+ if (entry->memdesc.gpuaddr != 0) {
+ KGSL_MEM_INFO(device,
+ "pgoff %lx already mapped to gpuaddr %x\n",
+ pgoff, entry->memdesc.gpuaddr);
+ ret = -EBUSY;
+ goto put;
+ }
+
+ align = kgsl_memdesc_get_align(&entry->memdesc);
+ if (align >= ilog2(SZ_1M))
+ align = ilog2(SZ_1M);
+ else if (align >= ilog2(SZ_64K))
+ align = ilog2(SZ_64K);
+ else if (align <= PAGE_SHIFT)
+ align = 0;
+
+ if (align)
+ len += 1 << align;
+ do {
+ ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
+ if (IS_ERR_VALUE(ret))
+ break;
+ if (align)
+ ret = ALIGN(ret, (1 << align));
+
+ /*make sure there isn't a GPU only mapping at this address */
+ if (kgsl_sharedmem_region_empty(private, ret, len))
+ break;
+
+ trace_kgsl_mem_unmapped_area_collision(entry, addr, len, ret);
+
+ /*
+ * If we collided, bump the hint address so that
+ * get_umapped_area knows to look somewhere else.
+ */
+ addr = (addr == 0) ? ret + len : addr + len;
+
+ /*
+ * The addr hint can be set by userspace to be near
+ * the end of the address space. Make sure we search
+ * the whole address space at least once by wrapping
+ * back around once.
+ */
+ if (!retry && (addr + len >= TASK_SIZE)) {
+ addr = 0;
+ retry = 1;
+ } else {
+ ret = -EBUSY;
+ }
+ } while (addr + len < TASK_SIZE);
+
+ if (IS_ERR_VALUE(ret))
+ KGSL_MEM_INFO(device,
+ "pid %d pgoff %lx len %ld failed error %ld\n",
+ private->pid, pgoff, len, ret);
+put:
+ kgsl_mem_entry_put(entry);
+ return ret;
+}
+
static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned int ret, cache;
@@ -2654,35 +2868,21 @@
if (vma_offset == device->memstore.gpuaddr)
return kgsl_mmap_memstore(device, vma);
- /* Find a chunk of GPU memory */
- entry = kgsl_sharedmem_find_id(private, vma->vm_pgoff);
+ ret = get_mmap_entry(private, &entry, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start);
+ if (ret)
+ return ret;
- if (entry == NULL) {
- spin_lock(&private->mem_lock);
- entry = kgsl_sharedmem_find(private, vma_offset);
- spin_unlock(&private->mem_lock);
- }
+ if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
+ entry->memdesc.gpuaddr = vma->vm_start;
- if (entry)
- kgsl_mem_entry_get(entry);
- else
- return -EINVAL;
-
- if (!entry->memdesc.ops ||
- !entry->memdesc.ops->vmflags ||
- !entry->memdesc.ops->vmfault) {
- ret = -EINVAL;
- goto err_put;
- }
-
- if (entry->memdesc.useraddr != 0) {
- ret = -EBUSY;
- goto err_put;
- }
-
- if (entry->memdesc.size != (vma->vm_end - vma->vm_start)) {
- ret = -ERANGE;
- goto err_put;
+ ret = kgsl_mmu_map(private->pagetable, &entry->memdesc,
+ kgsl_memdesc_protflags(&entry->memdesc));
+ if (ret) {
+ kgsl_mem_entry_put(entry);
+ return ret;
+ }
+ kgsl_mem_entry_track_gpuaddr(private, entry);
}
vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
@@ -2714,10 +2914,9 @@
entry->memdesc.useraddr = vma->vm_start;
+ trace_kgsl_mem_mmap(entry);
+
return 0;
-err_put:
- kgsl_mem_entry_put(entry);
- return ret;
}
static irqreturn_t kgsl_irq_handler(int irq, void *data)
@@ -2733,6 +2932,7 @@
.release = kgsl_release,
.open = kgsl_open,
.mmap = kgsl_mmap,
+ .get_unmapped_area = kgsl_get_unmapped_area,
.unlocked_ioctl = kgsl_ioctl,
};
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index cd1f763..72e7776 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -242,6 +242,10 @@
static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
unsigned int gpuaddr, unsigned int size)
{
+ /* don't overflow */
+ if ((gpuaddr + size) < gpuaddr)
+ return 0;
+
if (gpuaddr >= memdesc->gpuaddr &&
((gpuaddr + size) <= (memdesc->gpuaddr + memdesc->size))) {
return 1;
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index d4834e5..3bc107f 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -232,36 +232,59 @@
return table[kgsl_memdesc_get_cachemode(m)];
}
+static void print_mem_entry(struct seq_file *s, struct kgsl_mem_entry *entry)
+{
+ char flags[6];
+ char usage[16];
+ struct kgsl_memdesc *m = &entry->memdesc;
+
+ flags[0] = kgsl_memdesc_is_global(m) ? 'g' : '-';
+ flags[1] = m->flags & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-';
+ flags[2] = get_alignflag(m);
+ flags[3] = get_cacheflag(m);
+ flags[4] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-';
+ flags[5] = '\0';
+
+ kgsl_get_memory_usage(usage, sizeof(usage), m->flags);
+
+ seq_printf(s, "%08x %8d %5d %5s %10s %16s %5d\n",
+ m->gpuaddr, m->size, entry->id, flags,
+ memtype_str(entry->memtype), usage, m->sglen);
+}
+
static int process_mem_print(struct seq_file *s, void *unused)
{
struct kgsl_mem_entry *entry;
struct rb_node *node;
struct kgsl_process_private *private = s->private;
- char flags[5];
- char usage[16];
+ int next = 0;
- spin_lock(&private->mem_lock);
seq_printf(s, "%8s %8s %5s %5s %10s %16s %5s\n",
"gpuaddr", "size", "id", "flags", "type", "usage", "sglen");
+
+ /* print all entries with a GPU address */
+ spin_lock(&private->mem_lock);
+
for (node = rb_first(&private->mem_rb); node; node = rb_next(node)) {
- struct kgsl_memdesc *m;
-
entry = rb_entry(node, struct kgsl_mem_entry, node);
- m = &entry->memdesc;
-
- flags[0] = kgsl_memdesc_is_global(m) ? 'g' : '-';
- flags[1] = m->flags & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-';
- flags[2] = get_alignflag(m);
- flags[3] = get_cacheflag(m);
- flags[4] = '\0';
-
- kgsl_get_memory_usage(usage, sizeof(usage), m->flags);
-
- seq_printf(s, "%08x %8d %5d %5s %10s %16s %5d\n",
- m->gpuaddr, m->size, entry->id, flags,
- memtype_str(entry->memtype), usage, m->sglen);
+ print_mem_entry(s, entry);
}
+
spin_unlock(&private->mem_lock);
+
+ /* now print all the unbound entries */
+ while (1) {
+ rcu_read_lock();
+ entry = idr_get_next(&private->mem_idr, &next);
+ rcu_read_unlock();
+
+ if (entry == NULL)
+ break;
+ if (entry->memdesc.gpuaddr == 0)
+ print_mem_entry(s, entry);
+ next++;
+ }
+
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 1bccd4d..31491d5 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -897,8 +897,6 @@
kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
mh->mpu_base + mh->mpu_range);
- } else {
- kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
}
mmu->hwpagetable = mmu->defaultpagetable;
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index bf39587..533f02f 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -27,9 +27,6 @@
#include "kgsl_sharedmem.h"
#include "adreno.h"
-#define KGSL_MMU_ALIGN_SHIFT 13
-#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
-
static enum kgsl_mmutype kgsl_mmu_type;
static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
@@ -447,10 +444,10 @@
if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
((KGSL_MMU_GLOBAL_PT == name) ||
(KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
- pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
+ pagetable->kgsl_pool = gen_pool_create(ilog2(SZ_8K), -1);
if (pagetable->kgsl_pool == NULL) {
KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
- KGSL_MMU_ALIGN_SHIFT);
+ ilog2(SZ_8K));
goto err_alloc;
}
if (gen_pool_add(pagetable->kgsl_pool,
@@ -461,10 +458,10 @@
}
}
- pagetable->pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT, -1);
+ pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
if (pagetable->pool == NULL) {
KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
- KGSL_MMU_ALIGN_SHIFT);
+ PAGE_SHIFT);
goto err_kgsl_pool;
}
@@ -585,7 +582,7 @@
unsigned int protflags)
{
int ret;
- struct gen_pool *pool;
+ struct gen_pool *pool = NULL;
int size;
int page_align = ilog2(PAGE_SIZE);
@@ -635,6 +632,10 @@
pagetable->name);
return -EINVAL;
}
+ } else if (kgsl_memdesc_use_cpu_map(memdesc)) {
+ if (memdesc->gpuaddr == 0)
+ return -EINVAL;
+ pool = NULL;
}
}
if (pool) {
@@ -715,9 +716,11 @@
pool = pagetable->pool;
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()
- && kgsl_memdesc_is_global(memdesc)) {
- pool = pagetable->kgsl_pool;
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ if (kgsl_memdesc_is_global(memdesc))
+ pool = pagetable->kgsl_pool;
+ else if (kgsl_memdesc_use_cpu_map(memdesc))
+ pool = NULL;
}
if (pool)
gen_pool_free(pool, memdesc->gpuaddr, size);
@@ -854,8 +857,13 @@
{
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return 1;
- return ((gpuaddr >= KGSL_PAGETABLE_BASE) &&
- (gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
+ if (gpuaddr >= kgsl_mmu_get_base_addr() &&
+ gpuaddr < kgsl_mmu_get_base_addr() + kgsl_mmu_get_ptsize())
+ return 1;
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU
+ && kgsl_mmu_is_perprocess())
+ return (gpuaddr > 0 && gpuaddr < TASK_SIZE);
+ return 0;
}
EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index b8eff60..c8d637e 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -19,11 +19,10 @@
* These defines control the address range for allocations that
* are mapped into all pagetables.
*/
-#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xC0000000
+#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000
#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_4M
-#define KGSL_MMU_ALIGN_SHIFT 13
-#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
+#define KGSL_MMU_ALIGN_MASK (~((1 << PAGE_SHIFT) - 1))
/* Identifier for the global page table */
/* Per process page tables will probably pass in the thread group
@@ -350,32 +349,52 @@
/*
* kgsl_mmu_base_addr() - Get gpu virtual address base.
*
- * Returns the start address of the gpu
- * virtual address space.
+ * Returns the start address of the allocatable gpu
+ * virtual address space. Other mappings that mirror
+ * the CPU address space are possible outside this range.
*/
static inline unsigned int kgsl_mmu_get_base_addr(void)
{
- return KGSL_PAGETABLE_BASE;
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()
+ || !kgsl_mmu_is_perprocess())
+ return KGSL_PAGETABLE_BASE;
+ /*
+ * This is the start of the kernel address
+ * space, so allocations from this range will
+ * never conflict with userpace addresses
+ */
+ return PAGE_OFFSET;
}
/*
* kgsl_mmu_get_ptsize() - Get gpu pagetable size
*
- * Returns the usable size of the gpu address space.
+ * Returns the usable size of the gpu allocatable
+ * address space.
*/
static inline unsigned int kgsl_mmu_get_ptsize(void)
{
/*
- * For IOMMU, we could do up to 4G virtual range if we wanted to, but
- * it makes more sense to return a smaller range and leave the rest of
- * the virtual range for future improvements
+ * For IOMMU per-process pagetables, the allocatable range
+ * and the kernel global range must both be outside
+ * the userspace address range. There is a 1Mb gap
+ * between these address ranges to make overrun
+ * detection easier.
+ * For the shared pagetable case use 2GB and because
+ * mirroring the CPU address space is not possible and
+ * we're better off with extra room.
*/
enum kgsl_mmutype mmu_type = kgsl_mmu_get_mmutype();
if (KGSL_MMU_TYPE_GPU == mmu_type)
return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
- else if (KGSL_MMU_TYPE_IOMMU == mmu_type)
- return SZ_2G;
+ else if (KGSL_MMU_TYPE_IOMMU == mmu_type) {
+ if (kgsl_mmu_is_perprocess())
+ return KGSL_IOMMU_GLOBAL_MEM_BASE
+ - kgsl_mmu_get_base_addr() - SZ_1M;
+ else
+ return SZ_2G;
+ }
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index df44166..d33df60 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -93,11 +93,8 @@
static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
{
- unsigned int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel,
- pwr->max_pwrlevel);
-
- unsigned int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel,
- pwr->min_pwrlevel);
+ int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
+ int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
if (level < max_pwrlevel)
return max_pwrlevel;
@@ -113,6 +110,7 @@
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct kgsl_pwrlevel *pwrlevel;
int delta;
+ int level;
/* Adjust the power level to the current constraints */
new_level = _adjust_pwrlevel(pwr, new_level);
@@ -124,6 +122,16 @@
update_clk_statistics(device, true);
+ level = pwr->active_pwrlevel;
+
+ /*
+ * Set the active powerlevel first in case the clocks are off - if we
+ * don't do this then the pwrlevel change won't take effect when the
+ * clocks come back
+ */
+
+ pwr->active_pwrlevel = new_level;
+
if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
(device->state == KGSL_STATE_NAP)) {
@@ -141,11 +149,11 @@
* avoid glitches.
*/
- while (pwr->active_pwrlevel != new_level) {
- pwr->active_pwrlevel += delta;
+ while (level != new_level) {
+ level += delta;
clk_set_rate(pwr->grp_clks[0],
- pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
+ pwr->pwrlevels[level].gpu_freq);
}
}
@@ -677,6 +685,9 @@
clkstats->on_time_old = on_time;
clkstats->elapsed_old = clkstats->elapsed;
clkstats->elapsed = 0;
+
+ trace_kgsl_gpubusy(device, clkstats->on_time_old,
+ clkstats->elapsed_old);
}
/* Track the amount of time the gpu is on vs the total system time. *
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 11ca0b0..08353ee 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -555,7 +555,8 @@
page_size = (align >= ilog2(SZ_64K) && size >= SZ_64K)
? SZ_64K : PAGE_SIZE;
/* update align flags for what we actually use */
- kgsl_memdesc_set_align(memdesc, ilog2(page_size));
+ if (page_size != PAGE_SIZE)
+ kgsl_memdesc_set_align(memdesc, ilog2(page_size));
/*
* There needs to be enough room in the sg structure to be able to
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index f07f049..d937699 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -181,6 +181,18 @@
}
/*
+ * kgsl_memdesc_has_guard_page - is the last page a guard page?
+ * @memdesc - the memdesc
+ *
+ * Returns nonzero if there is a guard page, 0 otherwise
+ */
+static inline int
+kgsl_memdesc_has_guard_page(const struct kgsl_memdesc *memdesc)
+{
+ return (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE) != 0;
+}
+
+/*
* kgsl_memdesc_protflags - get mmu protection flags
* @memdesc - the memdesc
* Returns a mask of GSL_PT_PAGE* values based on the
@@ -195,6 +207,35 @@
return protflags;
}
+/*
+ * kgsl_memdesc_use_cpu_map - use the same virtual mapping on CPU and GPU?
+ * @memdesc - the memdesc
+ */
+static inline int
+kgsl_memdesc_use_cpu_map(const struct kgsl_memdesc *memdesc)
+{
+ return (memdesc->flags & KGSL_MEMFLAGS_USE_CPU_MAP) != 0;
+}
+
+/*
+ * kgsl_memdesc_mmapsize - get the size of the mmap region
+ * @memdesc - the memdesc
+ *
+ * The entire memdesc must be mapped. Additionally if the
+ * CPU mapping is going to be mirrored, there must be room
+ * for the guard page to be mapped so that the address spaces
+ * match up.
+ */
+static inline unsigned int
+kgsl_memdesc_mmapsize(const struct kgsl_memdesc *memdesc)
+{
+ unsigned int size = memdesc->size;
+ if (kgsl_memdesc_use_cpu_map(memdesc) &&
+ kgsl_memdesc_has_guard_page(memdesc))
+ size += SZ_4K;
+ return size;
+}
+
static inline int
kgsl_allocate(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 7cc55f6..bbef139 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -274,6 +274,32 @@
)
);
+TRACE_EVENT(kgsl_gpubusy,
+ TP_PROTO(struct kgsl_device *device, unsigned int busy,
+ unsigned int elapsed),
+
+ TP_ARGS(device, busy, elapsed),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, busy)
+ __field(unsigned int, elapsed)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->busy = busy;
+ __entry->elapsed = elapsed;
+ ),
+
+ TP_printk(
+ "d_name=%s busy=%d elapsed=%d",
+ __get_str(device_name),
+ __entry->busy,
+ __entry->elapsed
+ )
+);
+
DECLARE_EVENT_CLASS(kgsl_pwrstate_template,
TP_PROTO(struct kgsl_device *device, unsigned int state),
@@ -338,6 +364,68 @@
)
);
+TRACE_EVENT(kgsl_mem_mmap,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry),
+
+ TP_ARGS(mem_entry),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, useraddr)
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ __array(char, usage, 16)
+ __field(unsigned int, id)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->useraddr = mem_entry->memdesc.useraddr;
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+ mem_entry->memdesc.flags);
+ __entry->id = mem_entry->id;
+ __entry->flags = mem_entry->memdesc.flags;
+ ),
+
+ TP_printk(
+ "useraddr=%lx gpuaddr=0x%08x size=%d usage=%s id=%d"
+ " flags=0x%08x",
+ __entry->useraddr, __entry->gpuaddr, __entry->size,
+ __entry->usage, __entry->id, __entry->flags
+ )
+);
+
+TRACE_EVENT(kgsl_mem_unmapped_area_collision,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry,
+ unsigned long hint,
+ unsigned long len,
+ unsigned long addr),
+
+ TP_ARGS(mem_entry, hint, len, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned long, hint)
+ __field(unsigned long, len)
+ __field(unsigned long, addr)
+ ),
+
+ TP_fast_assign(
+ __entry->id = mem_entry->id;
+ __entry->hint = hint;
+ __entry->len = len;
+ __entry->addr = addr;
+ ),
+
+ TP_printk(
+ "id=%d hint=0x%lx len=%ld addr=0x%lx",
+ __entry->id, __entry->hint, __entry->len, __entry->addr
+ )
+);
+
TRACE_EVENT(kgsl_mem_map,
TP_PROTO(struct kgsl_mem_entry *mem_entry, int fd),
@@ -546,6 +634,31 @@
)
);
+TRACE_EVENT(kgsl_regwrite,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int offset,
+ unsigned int value),
+
+ TP_ARGS(device, offset, value),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, offset)
+ __field(unsigned int, value)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->offset = offset;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ "d_name=%s reg=%x value=%x",
+ __get_str(device_name), __entry->offset, __entry->value
+ )
+);
+
#endif /* _KGSL_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index d658217..e7d514e 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -1260,10 +1260,10 @@
static int __devinit qpnp_leds_probe(struct spmi_device *spmi)
{
- struct qpnp_led_data *led;
+ struct qpnp_led_data *led, *led_array;
struct resource *led_resource;
struct device_node *node, *temp;
- int rc, i, num_leds = 0;
+ int rc, i, num_leds = 0, parsed_leds = 0;
const char *led_label;
node = spmi->dev.of_node;
@@ -1274,34 +1274,34 @@
while ((temp = of_get_next_child(node, temp)))
num_leds++;
- led = devm_kzalloc(&spmi->dev,
+ if (!num_leds)
+ return -ECHILD;
+
+ led_array = devm_kzalloc(&spmi->dev,
(sizeof(struct qpnp_led_data) * num_leds), GFP_KERNEL);
- if (!led) {
+ if (!led_array) {
dev_err(&spmi->dev, "Unable to allocate memory\n");
return -ENOMEM;
}
- led->num_leds = num_leds;
- num_leds = 0;
-
for_each_child_of_node(node, temp) {
+ led = &led_array[parsed_leds];
+ led->num_leds = num_leds;
led->spmi_dev = spmi;
led_resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
if (!led_resource) {
dev_err(&spmi->dev, "Unable to get LED base address\n");
- return -ENXIO;
+ rc = -ENXIO;
+ goto fail_id_check;
}
led->base = led_resource->start;
- dev_set_drvdata(&spmi->dev, led);
-
-
rc = of_property_read_string(temp, "label", &led_label);
if (rc < 0) {
dev_err(&led->spmi_dev->dev,
"Failure reading label, rc = %d\n", rc);
- return rc;
+ goto fail_id_check;
}
rc = of_property_read_string(temp, "linux,name",
@@ -1309,7 +1309,7 @@
if (rc < 0) {
dev_err(&led->spmi_dev->dev,
"Failure reading led name, rc = %d\n", rc);
- return rc;
+ goto fail_id_check;
}
rc = of_property_read_u32(temp, "qcom,max-current",
@@ -1317,14 +1317,14 @@
if (rc < 0) {
dev_err(&led->spmi_dev->dev,
"Failure reading max_current, rc = %d\n", rc);
- return rc;
+ goto fail_id_check;
}
rc = of_property_read_u32(temp, "qcom,id", &led->id);
if (rc < 0) {
dev_err(&led->spmi_dev->dev,
"Failure reading led id, rc = %d\n", rc);
- return rc;
+ goto fail_id_check;
}
rc = qpnp_get_common_configs(led, temp);
@@ -1332,7 +1332,7 @@
dev_err(&led->spmi_dev->dev,
"Failure reading common led configuration," \
" rc = %d\n", rc);
- return rc;
+ goto fail_id_check;
}
led->cdev.brightness_set = qpnp_led_set;
@@ -1343,7 +1343,7 @@
if (rc < 0) {
dev_err(&led->spmi_dev->dev,
"Unable to read wled config data\n");
- return rc;
+ goto fail_id_check;
}
} else if (strncmp(led_label, "flash", sizeof("flash"))
== 0) {
@@ -1351,18 +1351,19 @@
if (rc < 0) {
dev_err(&led->spmi_dev->dev,
"Unable to read flash config data\n");
- return rc;
+ goto fail_id_check;
}
} else if (strncmp(led_label, "rgb", sizeof("rgb")) == 0) {
rc = qpnp_get_config_rgb(led, temp);
if (rc < 0) {
dev_err(&led->spmi_dev->dev,
"Unable to read rgb config data\n");
- return rc;
+ goto fail_id_check;
}
} else {
dev_err(&led->spmi_dev->dev, "No LED matching label\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto fail_id_check;
}
spin_lock_init(&led->lock);
@@ -1388,24 +1389,25 @@
led->cdev.brightness = LED_OFF;
qpnp_led_set(&led->cdev, led->cdev.brightness);
- led++;
- num_leds++;
+
+ parsed_leds++;
}
+ dev_set_drvdata(&spmi->dev, led_array);
return 0;
fail_id_check:
- for (i = 0; i < num_leds; i++)
- led_classdev_unregister(&led[i].cdev);
+ for (i = 0; i < parsed_leds; i++)
+ led_classdev_unregister(&led_array[i].cdev);
return rc;
}
static int __devexit qpnp_leds_remove(struct spmi_device *spmi)
{
- struct qpnp_led_data *led = dev_get_drvdata(&spmi->dev);
- int i;
+ struct qpnp_led_data *led_array = dev_get_drvdata(&spmi->dev);
+ int i, parsed_leds = led_array->num_leds;
- for (i = 0; i < led->num_leds; i++)
- led_classdev_unregister(&led[i].cdev);
+ for (i = 0; i < parsed_leds; i++)
+ led_classdev_unregister(&led_array[i].cdev);
return 0;
}
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
index e0a341a..711b3007 100644
--- a/drivers/media/video/msm_vidc/msm_vdec.c
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -430,7 +430,15 @@
core);
goto exit;
}
-
+ if (!inst->in_reconfig) {
+ rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to move inst: %p to relase res done\n",
+ inst);
+ goto exit;
+ }
+ }
switch (b->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index f436cf3..41518d7 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -1552,7 +1552,13 @@
int rc = 0;
int i;
struct vidc_buffer_addr_info buffer_info;
-
+ rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to move inst: %p to release res done state\n",
+ inst);
+ goto exit;
+ }
switch (b->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
@@ -1581,6 +1587,7 @@
dprintk(VIDC_ERR, "Buffer type not recognized: %d\n", b->type);
break;
}
+exit:
return rc;
}
diff --git a/drivers/media/video/msm_vidc/msm_vidc.c b/drivers/media/video/msm_vidc/msm_vidc.c
index 64897c7..6ecea30 100644
--- a/drivers/media/video/msm_vidc/msm_vidc.c
+++ b/drivers/media/video/msm_vidc/msm_vidc.c
@@ -559,6 +559,8 @@
if (inst->state != MSM_VIDC_CORE_INVALID &&
core->state != VIDC_CORE_INVALID)
rc = msm_comm_try_state(inst, MSM_VIDC_CORE_UNINIT);
+ else
+ rc = msm_comm_force_cleanup(inst);
if (rc)
dprintk(VIDC_ERR,
"Failed to move video instance to uninit state\n");
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index 46a88c2..d797ba7 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -1125,6 +1125,11 @@
dprintk(VIDC_ERR, "Invalid params, core:%p\n", core);
return -EINVAL;
}
+ if (core->state == VIDC_CORE_INVALID) {
+ dprintk(VIDC_ERR,
+ "Core is in bad state. Cannot unset ocmem\n");
+ return -EIO;
+ }
rhdr.resource_id = VIDC_RESOURCE_OCMEM;
rhdr.resource_handle = (u32) &core->resources.ocmem;
init_completion(
@@ -1345,6 +1350,11 @@
return rc;
}
+int msm_comm_force_cleanup(struct msm_vidc_inst *inst)
+{
+ return msm_vidc_deinit_core(inst);
+}
+
static enum hal_domain get_hal_domain(int session_type)
{
enum hal_domain domain;
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.h b/drivers/media/video/msm_vidc/msm_vidc_common.h
index 916a3ca..d225a51 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.h
@@ -41,6 +41,7 @@
enum instance_state state);
int msm_comm_unset_ocmem(struct msm_vidc_core *core);
int msm_comm_free_ocmem(struct msm_vidc_core *core);
+int msm_comm_force_cleanup(struct msm_vidc_inst *inst);
enum hal_extradata_id msm_comm_get_hal_extradata_index(
enum v4l2_mpeg_vidc_extradata index);
#define IS_PRIV_CTRL(idx) (\
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index 3bedb92..333bad9 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -149,7 +149,6 @@
static void hal_process_sys_error(struct hal_device *device)
{
struct msm_vidc_cb_cmd_done cmd_done;
- disable_irq_nosync(device->hal_data->irq);
memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
cmd_done.device_id = device->device_id;
device->callback(SYS_ERROR, &cmd_done);
@@ -177,11 +176,12 @@
switch (pkt->event_id) {
case HFI_EVENT_SYS_ERROR:
- dprintk(VIDC_INFO, "HFI_EVENT_SYS_ERROR");
+ dprintk(VIDC_ERR, "HFI_EVENT_SYS_ERROR: %d\n",
+ pkt->event_data1);
hal_process_sys_error(device);
break;
case HFI_EVENT_SESSION_ERROR:
- dprintk(VIDC_INFO, "HFI_EVENT_SESSION_ERROR");
+ dprintk(VIDC_ERR, "HFI_EVENT_SESSION_ERROR");
hal_process_session_error(device, pkt);
break;
case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
@@ -192,7 +192,7 @@
dprintk(VIDC_INFO, "HFI_EVENT_SESSION_PROPERTY_CHANGED");
break;
default:
- dprintk(VIDC_INFO, "hal_process_event_notify:unkown_event_id");
+ dprintk(VIDC_WARN, "hal_process_event_notify:unkown_event_id");
break;
}
}
diff --git a/drivers/media/video/msm_wfd/enc-mfc-subdev.c b/drivers/media/video/msm_wfd/enc-mfc-subdev.c
index 21fc719..aadf5ed 100644
--- a/drivers/media/video/msm_wfd/enc-mfc-subdev.c
+++ b/drivers/media/video/msm_wfd/enc-mfc-subdev.c
@@ -903,25 +903,13 @@
struct vcd_property_req_i_frame vcd_property_req_i_frame;
struct vcd_property_hdr vcd_property_hdr;
- int rc = 0;
- switch (type) {
- case V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED:
- /*So...nothing to do?*/
- break;
- case V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME:
- vcd_property_hdr.prop_id = VCD_I_REQ_IFRAME;
- vcd_property_hdr.sz = sizeof(struct vcd_property_req_i_frame);
- vcd_property_req_i_frame.req_i_frame = 1;
+ vcd_property_hdr.prop_id = VCD_I_REQ_IFRAME;
+ vcd_property_hdr.sz = sizeof(struct vcd_property_req_i_frame);
+ vcd_property_req_i_frame.req_i_frame = 1;
- rc = vcd_set_property(client_ctx->vcd_handle,
- &vcd_property_hdr, &vcd_property_req_i_frame);
- break;
- case V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED:
- default:
- rc = -ENOTSUPP;
- }
+ return vcd_set_property(client_ctx->vcd_handle,
+ &vcd_property_hdr, &vcd_property_req_i_frame);
- return rc;
}
static long venc_set_bitrate(struct video_client_ctx *client_ctx,
@@ -1348,10 +1336,10 @@
int level = 0;
switch (value) {
- case V4L2_CID_MPEG_QCOM_PERF_LEVEL_PERFORMANCE:
+ case V4L2_CID_MPEG_VIDC_PERF_LEVEL_PERFORMANCE:
level = VCD_PERF_LEVEL2;
break;
- case V4L2_CID_MPEG_QCOM_PERF_LEVEL_TURBO:
+ case V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO:
level = VCD_PERF_LEVEL_TURBO;
break;
default:
@@ -2304,7 +2292,7 @@
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
rc = venc_set_codec_profile(client_ctx, ctrl->id, ctrl->value);
break;
- case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
+ case V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME:
rc = venc_request_frame(client_ctx, ctrl->value);
break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
@@ -2335,7 +2323,7 @@
rc = venc_set_multislicing_mode(client_ctx, ctrl->id,
ctrl->value);
break;
- case V4L2_CID_MPEG_QCOM_SET_PERF_LEVEL:
+ case V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL:
rc = venc_set_max_perf_level(client_ctx, ctrl->value);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER:
diff --git a/drivers/media/video/msm_wfd/enc-venus-subdev.c b/drivers/media/video/msm_wfd/enc-venus-subdev.c
index 150c667..480fe35 100644
--- a/drivers/media/video/msm_wfd/enc-venus-subdev.c
+++ b/drivers/media/video/msm_wfd/enc-venus-subdev.c
@@ -136,7 +136,7 @@
msm_vidc_dqevent(inst->vidc_context, &event);
if (event.type == V4L2_EVENT_MSM_VIDC_CLOSE_DONE) {
- WFD_MSG_ERR("enc callback thread shutting " \
+ WFD_MSG_DBG("enc callback thread shutting " \
"down normally\n");
bail_out = true;
} else {
@@ -1141,6 +1141,15 @@
return 0;
}
+static long venc_set_framerate_mode(struct v4l2_subdev *sd,
+ void *arg)
+{
+ /* TODO: Unsupported for now, but return false success
+ * to preserve binary compatibility for userspace apps
+ * across targets */
+ return 0;
+}
+
long venc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
long rc = 0;
@@ -1210,6 +1219,9 @@
case ENC_MUNMAP:
rc = venc_munmap(sd, arg);
break;
+ case SET_FRAMERATE_MODE:
+ rc = venc_set_framerate_mode(sd, arg);
+ break;
default:
WFD_MSG_ERR("Unknown ioctl %d to enc-subdev\n", cmd);
rc = -ENOTSUPP;
diff --git a/drivers/media/video/msm_wfd/wfd-ioctl.c b/drivers/media/video/msm_wfd/wfd-ioctl.c
index 99dc0d0..74194ff 100644
--- a/drivers/media/video/msm_wfd/wfd-ioctl.c
+++ b/drivers/media/video/msm_wfd/wfd-ioctl.c
@@ -536,7 +536,7 @@
struct mem_region mregion;
if (minfo == NULL) {
- WFD_MSG_ERR("not freeing buffers since allocation failed");
+ WFD_MSG_DBG("not freeing buffers since allocation failed");
return;
}
@@ -1098,7 +1098,6 @@
rc = -EINVAL;
goto set_parm_fail;
}
- venc_mode = VENC_MODE_CFR;
frame_interval =
a->parm.capture.timeperframe.numerator * NSEC_PER_SEC /
a->parm.capture.timeperframe.denominator;
@@ -1128,6 +1127,7 @@
max_frame_interval = (int64_t)frameskip.maxframeinterval;
vsg_mode = VSG_MODE_VFR;
+ venc_mode = VENC_MODE_VFR;
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
ioctl, VSG_SET_MAX_FRAME_INTERVAL,
@@ -1135,24 +1135,25 @@
if (rc)
goto set_parm_fail;
-
- rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
- ioctl, VSG_SET_MODE, &vsg_mode);
-
- if (rc)
- goto set_parm_fail;
} else {
vsg_mode = VSG_MODE_CFR;
- rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
- ioctl, VSG_SET_MODE, &vsg_mode);
+ venc_mode = VENC_MODE_CFR;
+ }
- if (rc)
- goto set_parm_fail;
+ rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
+ ioctl, VSG_SET_MODE, &vsg_mode);
+ if (rc) {
+ WFD_MSG_ERR("Setting FR mode for VSG failed\n");
+ goto set_parm_fail;
}
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
ioctl, SET_FRAMERATE_MODE,
&venc_mode);
+ if (rc) {
+ WFD_MSG_ERR("Setting FR mode for VENC failed\n");
+ goto set_parm_fail;
+ }
set_parm_fail:
return rc;
diff --git a/drivers/media/video/msm_wfd/wfd-util.c b/drivers/media/video/msm_wfd/wfd-util.c
index 5c00e5c..28a6084 100644
--- a/drivers/media/video/msm_wfd/wfd-util.c
+++ b/drivers/media/video/msm_wfd/wfd-util.c
@@ -198,7 +198,7 @@
int wfd_stats_deinit(struct wfd_stats *stats)
{
- WFD_MSG_ERR("Latencies: avg enc. latency %d",
+ WFD_MSG_DBG("Latencies: avg enc. latency %d",
stats->enc_avg_latency);
/* Delete all debugfs files in one shot :) */
if (stats->d_parent)
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 3715417..c415952 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -235,6 +235,8 @@
uint32_t user_virt_sb_base;
size_t sb_length;
struct ion_handle *ihandle; /* Retrieve phy addr */
+ bool perf_enabled;
+ bool fast_load_enabled;
};
struct qseecom_listener_handle {
@@ -266,8 +268,8 @@
};
/* Function proto types */
-static int qsee_vote_for_clock(int32_t);
-static void qsee_disable_clock_vote(int32_t);
+static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
struct qseecom_register_listener_req *svc)
@@ -701,7 +703,7 @@
return -EFAULT;
}
/* Vote for the SFPB clock */
- ret = qsee_vote_for_clock(CLK_SFPB);
+ ret = qsee_vote_for_clock(data, CLK_SFPB);
if (ret)
pr_warning("Unable to vote for SFPB clock");
req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
@@ -734,7 +736,7 @@
load_img_req.ifd_data_fd);
if (IS_ERR_OR_NULL(ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -ENOMEM;
}
@@ -762,7 +764,7 @@
pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -EFAULT;
}
@@ -773,7 +775,7 @@
ret);
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return ret;
}
}
@@ -783,7 +785,7 @@
resp.result);
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -EFAULT;
}
@@ -792,7 +794,7 @@
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
pr_err("kmalloc failed\n");
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -ENOMEM;
}
entry->app_id = app_id;
@@ -815,10 +817,10 @@
if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
pr_err("copy_to_user failed\n");
kzfree(entry);
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -EFAULT;
}
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return 0;
}
@@ -1411,7 +1413,7 @@
/* Populate the remaining parameters */
load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
memcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
- ret = qsee_vote_for_clock(CLK_SFPB);
+ ret = qsee_vote_for_clock(data, CLK_SFPB);
if (ret) {
kzfree(img_data);
pr_warning("Unable to vote for SFPB clock");
@@ -1425,7 +1427,7 @@
kzfree(img_data);
if (ret) {
pr_err("scm_call to load failed : ret %d\n", ret);
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -EIO;
}
@@ -1448,7 +1450,7 @@
ret = -EINVAL;
break;
}
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return ret;
}
@@ -1713,6 +1715,10 @@
pr_err("Unable to find the handle, exiting\n");
else
ret = qseecom_unload_app(data);
+ if (data->client.fast_load_enabled == true)
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ if (data->client.perf_enabled == true)
+ qsee_disable_clock_vote(data, CLK_DFAB);
if (ret == 0) {
kzfree(data);
kzfree(*handle);
@@ -1770,9 +1776,9 @@
return -EINVAL;
}
if (high)
- return qsee_vote_for_clock(CLK_DFAB);
+ return qsee_vote_for_clock(handle->dev, CLK_DFAB);
else {
- qsee_disable_clock_vote(CLK_DFAB);
+ qsee_disable_clock_vote(handle->dev, CLK_DFAB);
return 0;
}
}
@@ -1802,7 +1808,8 @@
return 0;
}
-static int qsee_vote_for_clock(int32_t clk_type)
+static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
+ int32_t clk_type)
{
int ret = 0;
@@ -1826,10 +1833,13 @@
if (ret)
pr_err("DFAB Bandwidth req failed (%d)\n",
ret);
- else
+ else {
qsee_bw_count++;
+ data->client.perf_enabled = true;
+ }
} else {
qsee_bw_count++;
+ data->client.perf_enabled = true;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -1850,10 +1860,13 @@
if (ret)
pr_err("SFPB Bandwidth req failed (%d)\n",
ret);
- else
+ else {
qsee_sfpb_bw_count++;
+ data->client.fast_load_enabled = true;
+ }
} else {
qsee_sfpb_bw_count++;
+ data->client.fast_load_enabled = true;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -1864,7 +1877,8 @@
return ret;
}
-static void qsee_disable_clock_vote(int32_t clk_type)
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
+ int32_t clk_type)
{
int32_t ret = 0;
@@ -1880,7 +1894,7 @@
return;
}
- if ((qsee_bw_count > 0) && (qsee_bw_count-- == 1)) {
+ if (qsee_bw_count == 1) {
if (qsee_sfpb_bw_count > 0)
ret = msm_bus_scale_client_update_request(
qsee_perf_client, 2);
@@ -1894,6 +1908,13 @@
if (ret)
pr_err("SFPB Bandwidth req fail (%d)\n",
ret);
+ else {
+ qsee_bw_count--;
+ data->client.perf_enabled = false;
+ }
+ } else {
+ qsee_bw_count--;
+ data->client.perf_enabled = false;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -1904,7 +1925,7 @@
mutex_unlock(&qsee_bw_mutex);
return;
}
- if ((qsee_sfpb_bw_count > 0) && (qsee_sfpb_bw_count-- == 1)) {
+ if (qsee_sfpb_bw_count == 1) {
if (qsee_bw_count > 0)
ret = msm_bus_scale_client_update_request(
qsee_perf_client, 1);
@@ -1918,6 +1939,13 @@
if (ret)
pr_err("SFPB Bandwidth req fail (%d)\n",
ret);
+ else {
+ qsee_sfpb_bw_count--;
+ data->client.fast_load_enabled = false;
+ }
+ } else {
+ qsee_sfpb_bw_count--;
+ data->client.fast_load_enabled = false;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -2246,7 +2274,7 @@
}
case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
atomic_inc(&data->ioctl_count);
- ret = qsee_vote_for_clock(CLK_DFAB);
+ ret = qsee_vote_for_clock(data, CLK_DFAB);
if (ret)
pr_err("Failed to vote for DFAB clock%d\n", ret);
atomic_dec(&data->ioctl_count);
@@ -2254,7 +2282,7 @@
}
case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
atomic_inc(&data->ioctl_count);
- qsee_disable_clock_vote(CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_DFAB);
atomic_dec(&data->ioctl_count);
break;
}
@@ -2356,6 +2384,11 @@
return ret;
}
}
+ if (data->client.fast_load_enabled == true)
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ if (data->client.perf_enabled == true)
+ qsee_disable_clock_vote(data, CLK_DFAB);
+
if (qseecom.qseos_version == QSEOS_VERSION_13) {
mutex_lock(&pil_access_lock);
if (pil_ref_cnt == 1)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index ae68060..0b5449e 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -136,6 +136,10 @@
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+ struct mmc_blk_data *md);
+static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+
static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
{
mqrq->packed_cmd = MMC_PACKED_NONE;
@@ -463,6 +467,38 @@
return ERR_PTR(err);
}
+static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
+ u32 retries_max)
+{
+ int err;
+ u32 retry_count = 0;
+
+ if (!status || !retries_max)
+ return -EINVAL;
+
+ do {
+ err = get_card_status(card, status, 5);
+ if (err)
+ break;
+
+ if (!R1_STATUS(*status) &&
+ (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
+ break; /* RPMB programming operation complete */
+
+ /*
+ * Rechedule to give the MMC device a chance to continue
+ * processing the previous command without being polled too
+ * frequently.
+ */
+ usleep_range(1000, 5000);
+ } while (++retry_count < retries_max);
+
+ if (retry_count == retries_max)
+ err = -EPERM;
+
+ return err;
+}
+
static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_ioc_cmd __user *ic_ptr)
{
@@ -474,6 +510,8 @@
struct mmc_request mrq = {NULL};
struct scatterlist sg;
int err;
+ int is_rpmb = false;
+ u32 status = 0;
/*
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
@@ -493,6 +531,9 @@
goto cmd_done;
}
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ is_rpmb = true;
+
card = md->queue.card;
if (IS_ERR(card)) {
err = PTR_ERR(card);
@@ -543,12 +584,23 @@
mmc_claim_host(card->host);
+ err = mmc_blk_part_switch(card, md);
+ if (err)
+ goto cmd_rel_host;
+
if (idata->ic.is_acmd) {
err = mmc_app_cmd(card->host, card);
if (err)
goto cmd_rel_host;
}
+ if (is_rpmb) {
+ err = mmc_set_blockcount(card, data.blocks,
+ idata->ic.write_flag & (1 << 31));
+ if (err)
+ goto cmd_rel_host;
+ }
+
mmc_wait_for_req(card->host, &mrq);
if (cmd.error) {
@@ -584,6 +636,18 @@
}
}
+ if (is_rpmb) {
+ /*
+ * Ensure RPMB command has completed by polling CMD13
+ * "Send Status".
+ */
+ err = ioctl_rpmb_card_status_poll(card, &status, 5);
+ if (err)
+ dev_err(mmc_dev(card->host),
+ "%s: Card Status=0x%08X, error %d\n",
+ __func__, status, err);
+ }
+
cmd_rel_host:
mmc_release_host(card->host);
@@ -1022,9 +1086,6 @@
goto out;
}
- if (mmc_can_sanitize(card))
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_SANITIZE_START, 1, 0);
out_retry:
if (err && !mmc_blk_reset(md, card->host, type))
goto retry;
@@ -2119,6 +2180,8 @@
md->disk->driverfs_dev = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
md->disk->flags = GENHD_FL_EXT_DEVT;
+ if (area_type & MMC_BLK_DATA_AREA_RPMB)
+ md->disk->flags |= GENHD_FL_NO_PART_SCAN;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f91ba89..89f834a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2214,6 +2214,20 @@
}
EXPORT_SYMBOL(mmc_set_blocklen);
+int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
+ bool is_rel_write)
+{
+ struct mmc_command cmd = {0};
+
+ cmd.opcode = MMC_SET_BLOCK_COUNT;
+ cmd.arg = blockcount & 0x0000FFFF;
+ if (is_rel_write)
+ cmd.arg |= 1 << 31;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
+}
+EXPORT_SYMBOL(mmc_set_blockcount);
+
static void mmc_hw_reset_for_init(struct mmc_host *host)
{
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index d3dc133..c1a6b28 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -501,6 +501,17 @@
card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
+
+ /*
+ * RPMB regions are defined in multiples of 128K.
+ */
+ card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
+ if (ext_csd[EXT_CSD_RPMB_MULT]) {
+ mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
+ EXT_CSD_PART_CONFIG_ACC_RPMB,
+ "rpmb", 0, false,
+ MMC_BLK_DATA_AREA_RPMB);
+ }
}
card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
@@ -632,6 +643,8 @@
MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
card->ext_csd.enhanced_area_offset);
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
+MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
+MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
static struct attribute *mmc_std_attrs[] = {
&dev_attr_cid.attr,
@@ -647,6 +660,8 @@
&dev_attr_serial.attr,
&dev_attr_enhanced_area_offset.attr,
&dev_attr_enhanced_area_size.attr,
+ &dev_attr_raw_rpmb_size_mult.attr,
+ &dev_attr_rel_sectors.attr,
NULL,
};
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 872a9b5..9394986 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -85,5 +85,11 @@
for the IPA core.
Kernel and user-space processes can call the IPA driver
to configure IPA core.
+config MSM_AVTIMER
+ tristate "Avtimer Driver"
+ depends on ARCH_MSM8960
+ help
+ This driver gets the Q6 out of power collapsed state and
+ exposes ioctl control to read avtimer tick.
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 0a755d3..919c07f 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -8,3 +8,4 @@
obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
obj-$(CONFIG_QPNP_CLKDIV) += qpnp-clkdiv.o
+obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
diff --git a/drivers/platform/msm/avtimer.c b/drivers/platform/msm/avtimer.c
new file mode 100644
index 0000000..f513ceb
--- /dev/null
+++ b/drivers/platform/msm/avtimer.c
@@ -0,0 +1,369 @@
+
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/avtimer.h>
+#include <mach/qdsp6v2/apr.h>
+
+#define DEVICE_NAME "avtimer"
+
+
+#define ADSP_CMD_SET_POWER_COLLAPSE_STATE 0x0001115C
+
+static int major; /* Major number assigned to our device driver */
+struct avtimer_t {
+ struct cdev myc;
+ struct class *avtimer_class;
+ struct mutex avtimer_lock;
+ int avtimer_open_cnt;
+ struct dev_avtimer_data *avtimer_pdata;
+};
+static struct avtimer_t avtimer;
+
+static struct apr_svc *core_handle;
+
+struct adsp_power_collapse {
+ struct apr_hdr hdr;
+ uint32_t power_collapse;
+};
+
+static int32_t avcs_core_callback(struct apr_client_data *data, void *priv)
+{
+ uint32_t *payload;
+
+ pr_debug("core msg: payload len = %u, apr resp opcode = 0x%X\n",
+ data->payload_size, data->opcode);
+
+ switch (data->opcode) {
+
+ case APR_BASIC_RSP_RESULT:{
+
+ if (data->payload_size == 0) {
+ pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
+ __func__);
+ return 0;
+ }
+
+ payload = data->payload;
+
+ switch (payload[0]) {
+
+ case ADSP_CMD_SET_POWER_COLLAPSE_STATE:
+ pr_debug("CMD_SET_POWER_COLLAPSE_STATE status[0x%x]\n",
+ payload[1]);
+ break;
+ default:
+ pr_err("Invalid cmd rsp[0x%x][0x%x]\n",
+ payload[0], payload[1]);
+ break;
+ }
+ break;
+ }
+ case RESET_EVENTS:{
+ pr_debug("Reset event received in Core service");
+ apr_reset(core_handle);
+ core_handle = NULL;
+ break;
+ }
+
+ default:
+ pr_err("Message id from adsp core svc: %d\n", data->opcode);
+ break;
+ }
+
+ return 0;
+}
+
+int avcs_core_open(void)
+{
+ if (core_handle == NULL)
+ core_handle = apr_register("ADSP", "CORE",
+ avcs_core_callback, 0xFFFFFFFF, NULL);
+
+ pr_debug("Open_q %p\n", core_handle);
+ if (core_handle == NULL) {
+ pr_err("%s: Unable to register CORE\n", __func__);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+int avcs_core_disable_power_collapse(int disable)
+{
+ struct adsp_power_collapse pc;
+ int rc = 0;
+
+ if (core_handle) {
+ pc.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pc.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(uint32_t));
+ pc.hdr.src_port = 0;
+ pc.hdr.dest_port = 0;
+ pc.hdr.token = 0;
+ pc.hdr.opcode = ADSP_CMD_SET_POWER_COLLAPSE_STATE;
+ /*
+ * When power_collapse set to 1 -- If the aDSP is in the power
+ * collapsed state when this command is received, it is awakened
+ * from this state. The aDSP does not power collapse again until
+ * the client revokes this command
+ * When power_collapse set to 0 -- This indicates to the aDSP
+ * that the remote client does not need it to be out of power
+ * collapse any longer. This may not always put the aDSP into
+ * power collapse; the aDSP must honor an internal client's
+ * power requirements as well.
+ */
+ pc.power_collapse = disable;
+ rc = apr_send_pkt(core_handle, (uint32_t *)&pc);
+ if (rc < 0) {
+ pr_debug("disable power collapse = %d failed\n",
+ disable);
+ return rc;
+ }
+ pr_debug("disable power collapse = %d\n", disable);
+ }
+ return 0;
+}
+
+static int avtimer_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ struct avtimer_t *pavtimer = &avtimer;
+
+ pr_debug("avtimer_open\n");
+ mutex_lock(&pavtimer->avtimer_lock);
+
+ if (pavtimer->avtimer_open_cnt != 0) {
+ pavtimer->avtimer_open_cnt++;
+ pr_debug("%s: opened avtimer open count=%d\n",
+ __func__, pavtimer->avtimer_open_cnt);
+ mutex_unlock(&pavtimer->avtimer_lock);
+ return 0;
+ }
+ try_module_get(THIS_MODULE);
+
+ rc = avcs_core_open();
+ if (core_handle)
+ rc = avcs_core_disable_power_collapse(1);
+
+ pavtimer->avtimer_open_cnt++;
+ pr_debug("%s: opened avtimer open count=%d\n",
+ __func__, pavtimer->avtimer_open_cnt);
+ mutex_unlock(&pavtimer->avtimer_lock);
+ pr_debug("avtimer_open leave rc=%d\n", rc);
+
+ return rc;
+}
+
+static int avtimer_release(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ struct avtimer_t *pavtimer = &avtimer;
+
+ mutex_lock(&pavtimer->avtimer_lock);
+ pavtimer->avtimer_open_cnt--;
+
+ if (core_handle && pavtimer->avtimer_open_cnt == 0)
+ rc = avcs_core_disable_power_collapse(0);
+
+ pr_debug("device_release(%p,%p) open count=%d\n",
+ inode, file, pavtimer->avtimer_open_cnt);
+
+ module_put(THIS_MODULE);
+
+ mutex_unlock(&pavtimer->avtimer_lock);
+
+ return rc;
+}
+
+/*
+ * ioctl call provides GET_AVTIMER
+ */
+static long avtimer_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ struct avtimer_t *pavtimer = &avtimer;
+ pr_debug("avtimer_ioctl: ioctlnum=%d,param=%lx\n",
+ ioctl_num, ioctl_param);
+
+ switch (ioctl_num) {
+ case IOCTL_GET_AVTIMER_TICK:
+ {
+ void __iomem *p_avtimer_msw = NULL, *p_avtimer_lsw = NULL;
+ uint32_t avtimer_msw_1st = 0, avtimer_lsw = 0;
+ uint32_t avtimer_msw_2nd = 0;
+ uint64_t avtimer_tick;
+
+ if (pavtimer->avtimer_pdata) {
+ p_avtimer_lsw = ioremap(
+ pavtimer->avtimer_pdata->avtimer_lsw_phy_addr, 4);
+ p_avtimer_msw = ioremap(
+ pavtimer->avtimer_pdata->avtimer_msw_phy_addr, 4);
+ }
+ if (!p_avtimer_lsw || !p_avtimer_msw) {
+ pr_err("ioremap failed\n");
+ return -EIO;
+ }
+ do {
+ avtimer_msw_1st = ioread32(p_avtimer_msw);
+ avtimer_lsw = ioread32(p_avtimer_lsw);
+ avtimer_msw_2nd = ioread32(p_avtimer_msw);
+ } while (avtimer_msw_1st != avtimer_msw_2nd);
+
+ avtimer_tick =
+ ((uint64_t) avtimer_msw_1st << 32) | avtimer_lsw;
+
+ pr_debug("AV Timer tick: msw: %d, lsw: %d\n", avtimer_msw_1st,
+ avtimer_lsw);
+ if (copy_to_user((void *) ioctl_param, &avtimer_tick,
+ sizeof(avtimer_tick))) {
+ pr_err("copy_to_user failed\n");
+ iounmap(p_avtimer_lsw);
+ iounmap(p_avtimer_msw);
+ return -EFAULT;
+ }
+ iounmap(p_avtimer_lsw);
+ iounmap(p_avtimer_msw);
+ }
+ break;
+
+ default:
+ pr_err("invalid cmd\n");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct file_operations avtimer_fops = {
+ .unlocked_ioctl = avtimer_ioctl,
+ .open = avtimer_open,
+ .release = avtimer_release
+};
+
+static int dev_avtimer_probe(struct platform_device *pdev)
+{
+ int result;
+ dev_t dev = MKDEV(major, 0);
+ struct device *device_handle;
+ struct avtimer_t *pavtimer = &avtimer;
+
+ /* get the device number */
+ if (major)
+ result = register_chrdev_region(dev, 1, DEVICE_NAME);
+ else {
+ result = alloc_chrdev_region(&dev, 0, 1, DEVICE_NAME);
+ major = MAJOR(dev);
+ }
+
+ if (result < 0) {
+ pr_err("Registering avtimer device failed\n");
+ return result;
+ }
+
+ pavtimer->avtimer_class = class_create(THIS_MODULE, "avtimer");
+ if (IS_ERR(pavtimer->avtimer_class)) {
+ result = PTR_ERR(pavtimer->avtimer_class);
+ pr_err("Error creating avtimer class: %d\n", result);
+ goto unregister_chrdev_region;
+ }
+ pavtimer->avtimer_pdata = pdev->dev.platform_data;
+
+ cdev_init(&pavtimer->myc, &avtimer_fops);
+ result = cdev_add(&pavtimer->myc, dev, 1);
+
+ if (result < 0) {
+ pr_err("Registering file operations failed\n");
+ goto class_destroy;
+ }
+
+ device_handle = device_create(pavtimer->avtimer_class,
+ NULL, pavtimer->myc.dev, NULL, "avtimer");
+ if (IS_ERR(device_handle)) {
+ result = PTR_ERR(device_handle);
+ pr_err("device_create failed: %d\n", result);
+ goto class_destroy;
+ }
+
+ mutex_init(&pavtimer->avtimer_lock);
+ core_handle = NULL;
+ pavtimer->avtimer_open_cnt = 0;
+
+ pr_debug("Device create done for avtimer major=%d\n", major);
+
+ return 0;
+
+class_destroy:
+ class_destroy(pavtimer->avtimer_class);
+unregister_chrdev_region:
+ unregister_chrdev_region(MKDEV(major, 0), 1);
+ return result;
+
+}
+
+static int __devexit dev_avtimer_remove(struct platform_device *pdev)
+{
+ struct avtimer_t *pavtimer = &avtimer;
+
+ pr_debug("dev_avtimer_remove\n");
+
+ device_destroy(pavtimer->avtimer_class, pavtimer->myc.dev);
+ cdev_del(&pavtimer->myc);
+ class_destroy(pavtimer->avtimer_class);
+ unregister_chrdev_region(MKDEV(major, 0), 1);
+
+ return 0;
+}
+
+static struct platform_driver dev_avtimer_driver = {
+ .probe = dev_avtimer_probe,
+ .remove = __exit_p(dev_avtimer_remove),
+ .driver = {.name = "dev_avtimer"}
+};
+
+static int __init avtimer_init(void)
+{
+ s32 rc;
+ rc = platform_driver_register(&dev_avtimer_driver);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("platform_driver_register failed.\n");
+ goto error_platform_driver;
+ }
+ pr_debug("dev_avtimer_init : done\n");
+
+ return 0;
+error_platform_driver:
+
+ pr_err("encounterd error\n");
+ return -ENODEV;
+}
+
+static void __exit avtimer_exit(void)
+{
+ pr_debug("avtimer_exit\n");
+ platform_driver_unregister(&dev_avtimer_driver);
+}
+
+module_init(avtimer_init);
+module_exit(avtimer_exit);
+
+MODULE_DESCRIPTION("avtimer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index 8f68ef5..7973cfe 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -637,7 +637,7 @@
switch (tx_pkt->cnt) {
case 1:
- ipa_write_done(&tx_pkt->work);
+ ipa_wq_write_done(&tx_pkt->work);
break;
case 0xFFFF:
/* reached end of set */
@@ -651,7 +651,7 @@
list_first_entry(&sys->head_desc_list,
struct ipa_tx_pkt_wrapper, link);
spin_unlock_irqrestore(&sys->spinlock, irq_flags);
- ipa_write_done(&tx_pkt->work);
+ ipa_wq_write_done(&tx_pkt->work);
break;
default:
/* keep looping till reach the end of the set */
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
index cf51ab6..a6221b8 100644
--- a/drivers/platform/msm/ipa/ipa_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -24,7 +24,7 @@
static int polling_min_sleep[IPA_DIR_MAX] = { 950, 950 };
static int polling_max_sleep[IPA_DIR_MAX] = { 1050, 1050 };
-static int polling_inactivity[IPA_DIR_MAX] = { 20, 20 };
+static int polling_inactivity[IPA_DIR_MAX] = { 4, 4 };
struct ipa_pkt_info {
void *buffer;
@@ -167,6 +167,34 @@
return -ENOMEM;
}
+static int ipa_reclaim_tx(struct ipa_bridge_pipe_context *sys_tx, bool all)
+{
+ struct sps_iovec iov;
+ struct ipa_pkt_info *tx_pkt;
+ int cnt = 0;
+ int ret;
+
+ do {
+ iov.addr = 0;
+ ret = sps_get_iovec(sys_tx->pipe, &iov);
+ if (ret || iov.addr == 0) {
+ break;
+ } else {
+ tx_pkt = list_first_entry(&sys_tx->head_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_move_tail(&tx_pkt->list_node,
+ &sys_tx->free_desc_list);
+ sys_tx->len--;
+ sys_tx->free_len++;
+ tx_pkt->len = ~0;
+ cnt++;
+ }
+ } while (all);
+
+ return cnt;
+}
+
static void ipa_do_bridge_work(enum ipa_bridge_dir dir)
{
struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
@@ -180,22 +208,9 @@
while (1) {
++inactive_cycles;
- iov.addr = 0;
- ret = sps_get_iovec(sys_tx->pipe, &iov);
- if (ret || iov.addr == 0) {
- /* no-op */
- } else {
- inactive_cycles = 0;
- tx_pkt = list_first_entry(&sys_tx->head_desc_list,
- struct ipa_pkt_info,
- list_node);
- list_move_tail(&tx_pkt->list_node,
- &sys_tx->free_desc_list);
- sys_tx->len--;
- sys_tx->free_len++;
- tx_pkt->len = ~0;
- }
+ if (ipa_reclaim_tx(sys_tx, false))
+ inactive_cycles = 0;
iov.addr = 0;
ret = sps_get_iovec(sys_rx->pipe, &iov);
@@ -216,7 +231,7 @@
tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
GFP_KERNEL);
if (!tmp_pkt) {
- pr_err_ratelimited("%s: unable to alloc tx_pkt_info\n",
+ pr_debug_ratelimited("%s: unable to alloc tx_pkt_info\n",
__func__);
usleep_range(polling_min_sleep[dir],
polling_max_sleep[dir]);
@@ -226,7 +241,7 @@
tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
GFP_KERNEL | GFP_DMA);
if (!tmp_pkt->buffer) {
- pr_err_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
+ pr_debug_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
__func__);
kfree(tmp_pkt);
usleep_range(polling_min_sleep[dir],
@@ -240,7 +255,7 @@
DMA_BIDIRECTIONAL);
if (tmp_pkt->dma_address == 0 ||
tmp_pkt->dma_address == ~0) {
- pr_err_ratelimited("%s: dma_map_single failure %p for %p\n",
+ pr_debug_ratelimited("%s: dma_map_single failure %p for %p\n",
__func__,
(void *)tmp_pkt->dma_address,
tmp_pkt->buffer);
@@ -271,7 +286,7 @@
SPS_IOVEC_FLAG_EOT);
if (ret) {
list_del(&tx_pkt->list_node);
- pr_err_ratelimited("%s: sps_transfer_one failed %d\n",
+ pr_debug_ratelimited("%s: sps_transfer_one failed %d\n",
__func__, ret);
usleep_range(polling_min_sleep[dir],
polling_max_sleep[dir]);
@@ -289,9 +304,10 @@
SPS_IOVEC_FLAG_INT |
SPS_IOVEC_FLAG_EOT);
if (ret) {
- pr_err_ratelimited("%s: fail to add to TX dir=%d\n",
+ pr_debug_ratelimited("%s: fail to add to TX dir=%d\n",
__func__, dir);
list_del(&rx_pkt->list_node);
+ ipa_reclaim_tx(sys_tx, true);
usleep_range(polling_min_sleep[dir],
polling_max_sleep[dir]);
goto retry_add_tx;
@@ -306,7 +322,7 @@
}
}
-static void ipa_rx_notify(struct sps_event_notify *notify)
+static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
{
switch (notify->event_id) {
case SPS_EVENT_EOT:
@@ -457,7 +473,7 @@
sys->register_event.options = SPS_O_EOT;
sys->register_event.mode = SPS_TRIGGER_CALLBACK;
sys->register_event.xfer_done = NULL;
- sys->register_event.callback = ipa_rx_notify;
+ sys->register_event.callback = ipa_sps_irq_rx_notify;
sys->register_event.user = NULL;
ret = sps_register_event(sys->pipe, &sys->register_event);
if (ret < 0) {
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
index 823b17d..dc9da7d 100644
--- a/drivers/platform/msm/ipa/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -157,7 +157,7 @@
ep->valid = 1;
ep->client = in->client;
- ep->notify = in->notify;
+ ep->client_notify = in->notify;
ep->priv = in->priv;
if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
index c677a6e..4de19d2 100644
--- a/drivers/platform/msm/ipa/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -18,12 +18,21 @@
#define list_next_entry(pos, member) \
list_entry(pos->member.next, typeof(*pos), member)
+#define IPA_LAST_DESC_COOKIE 0xFFFF
/**
- * ipa_write_done - this function will be (enevtually) called when a Tx
+ * ipa_write_done() - this function will be (eventually) called when a Tx
* operation is complete
- * @work: work_struct used by the work queue
+ * * @work: work_struct used by the work queue
+ *
+ * Will be called in deferred context.
+ * - invoke the callback supplied by the client who sent this command
+ * - iterate over all packets and validate that
+ * the order for sent packet is the same as expected
+ * - delete all the tx packet descriptors from the system
+ * pipe context (not needed anymore)
+ * - return the tx buffer back to one_kb_no_straddle_pool
*/
-void ipa_write_done(struct work_struct *work)
+void ipa_wq_write_done(struct work_struct *work)
{
struct ipa_tx_pkt_wrapper *tx_pkt;
struct ipa_tx_pkt_wrapper *next_pkt;
@@ -40,7 +49,7 @@
if (unlikely(cnt == 0))
WARN_ON(1);
- if (cnt > 1 && cnt != 0xFFFF)
+ if (cnt > 1 && cnt != IPA_LAST_DESC_COOKIE)
mult = tx_pkt->mult;
for (i = 0; i < cnt; i++) {
@@ -77,6 +86,14 @@
* @sys: system pipe context
* @desc: descriptor to send
*
+ * - Allocate tx_packet wrapper
+ * - Allocate a bounce buffer due to HW constrains
+ * (This buffer will be used for the DMA command)
+ * - Copy the data (desc->pyld) to the bounce buffer
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ * notify the sending user via ipa_sps_irq_comp_tx()
+ *
* Return codes: 0: success, -EFAULT: failure
*/
int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc)
@@ -117,7 +134,7 @@
}
INIT_LIST_HEAD(&tx_pkt->link);
- INIT_WORK(&tx_pkt->work, ipa_write_done);
+ INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
tx_pkt->type = desc->type;
tx_pkt->cnt = 1; /* only 1 desc in this "set" */
@@ -175,7 +192,20 @@
* ipa_send() - Send multiple descriptors in one HW transaction
* @sys: system pipe context
* @num_desc: number of packets
- * @desc: packets to send
+ * @desc: packets to send (may be immediate command or data)
+ *
+ * This function is used for system-to-bam connection.
+ * - SPS driver expect struct sps_transfer which will contain all the data
+ * for a transaction
+ * - The sps_transfer struct will be pointing to bounce buffers for
+ * its DMA command (immediate command and data)
+ * - ipa_tx_pkt_wrapper will be used for each ipa
+ * descriptor (allocated from wrappers cache)
+ * - The wrapper struct will be configured for each ipa-desc payload and will
+ * contain information which will be later used by the user callbacks
+ * - each transfer will be made by calling to sps_transfer()
+ * - Each packet (command or data) that will be sent will also be saved in
+ * ipa_sys_context for later check that all data was sent
*
* Return codes: 0: success, -EFAULT: failure
*/
@@ -187,12 +217,20 @@
struct sps_iovec *iovec;
unsigned long irq_flags;
dma_addr_t dma_addr;
- int i;
+ int i = 0;
int j;
int result;
- int fail_dma_wrap;
+ int fail_dma_wrap = 0;
uint size = num_desc * sizeof(struct sps_iovec);
+ transfer.iovec = dma_alloc_coherent(NULL, size, &dma_addr, 0);
+ transfer.iovec_phys = dma_addr;
+ transfer.iovec_count = num_desc;
+ if (!transfer.iovec) {
+ IPAERR("fail to alloc DMA mem for sps xfr buff\n");
+ goto failure;
+ }
+
for (i = 0; i < num_desc; i++) {
fail_dma_wrap = 0;
tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
@@ -207,14 +245,6 @@
*/
if (i == 0) {
transfer.user = tx_pkt;
- transfer.iovec =
- dma_alloc_coherent(NULL, size, &dma_addr, 0);
- transfer.iovec_phys = dma_addr;
- transfer.iovec_count = num_desc;
- if (!transfer.iovec) {
- IPAERR("fail alloc DMA mem for sps xfr buff\n");
- goto failure;
- }
tx_pkt->mult.phys_base = dma_addr;
tx_pkt->mult.base = transfer.iovec;
@@ -226,7 +256,7 @@
iovec->flags = 0;
INIT_LIST_HEAD(&tx_pkt->link);
- INIT_WORK(&tx_pkt->work, ipa_write_done);
+ INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
tx_pkt->type = desc[i].type;
tx_pkt->mem.base = desc[i].pyld;
@@ -263,6 +293,10 @@
tx_pkt->user1 = desc[i].user1;
tx_pkt->user2 = desc[i].user2;
+ /*
+ * Point the iovec to the bounce buffer and
+ * add this packet to system pipe context.
+ */
iovec->addr = tx_pkt->mem.phys_base;
spin_lock_irqsave(&sys->spinlock, irq_flags);
list_add_tail(&tx_pkt->link, &sys->head_desc_list);
@@ -284,7 +318,7 @@
iovec->flags |= (SPS_IOVEC_FLAG_EOT |
SPS_IOVEC_FLAG_INT);
/* "mark" the last desc */
- tx_pkt->cnt = 0xFFFF;
+ tx_pkt->cnt = IPA_LAST_DESC_COOKIE;
}
}
@@ -320,7 +354,7 @@
}
/**
- * ipa_cmd_ack - callback function which will be called by SPS driver after an
+ * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver after an
* immediate command is complete.
* @user1: pointer to the descriptor of the transfer
* @user2:
@@ -328,7 +362,7 @@
* Complete the immediate commands completion object, this will release the
* thread which waits on this completion object (ipa_send_cmd())
*/
-static void ipa_cmd_ack(void *user1, void *user2)
+static void ipa_sps_irq_cmd_ack(void *user1, void *user2)
{
struct ipa_desc *desc = (struct ipa_desc *)user1;
@@ -340,11 +374,13 @@
/**
* ipa_send_cmd - send immediate commands
- * @num_desc: number of descriptors within the descr struct
+ * @num_desc: number of descriptors within the desc struct
* @descr: descriptor structure
*
* Function will block till command gets ACK from IPA HW, caller needs
* to free any resources it allocated after function returns
+ * The callback in ipa_desc should not be set by the caller
+ * for this function.
*/
int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
{
@@ -353,11 +389,10 @@
if (num_desc == 1) {
init_completion(&descr->xfer_done);
- /* client should not set these */
if (descr->callback || descr->user1)
WARN_ON(1);
- descr->callback = ipa_cmd_ack;
+ descr->callback = ipa_sps_irq_cmd_ack;
descr->user1 = descr;
if (ipa_send_one(&ipa_ctx->sys[IPA_A5_CMD], descr)) {
IPAERR("fail to send immediate command\n");
@@ -368,11 +403,10 @@
desc = &descr[num_desc - 1];
init_completion(&desc->xfer_done);
- /* client should not set these */
if (desc->callback || desc->user1)
WARN_ON(1);
- desc->callback = ipa_cmd_ack;
+ desc->callback = ipa_sps_irq_cmd_ack;
desc->user1 = desc;
if (ipa_send(&ipa_ctx->sys[IPA_A5_CMD], num_desc, descr)) {
IPAERR("fail to send multiple immediate command set\n");
@@ -385,11 +419,15 @@
}
/**
- * ipa_tx_notify() - Callback function which will be called by the SPS driver
- * after a Tx operation is complete. Called in an interrupt context.
+ * ipa_sps_irq_tx_notify() - Callback function which will be called by
+ * the SPS driver after a Tx operation is complete.
+ * Called in an interrupt context.
* @notify: SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ * This event will be later handled by ipa_write_done.
*/
-static void ipa_tx_notify(struct sps_event_notify *notify)
+static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
{
struct ipa_tx_pkt_wrapper *tx_pkt;
@@ -473,19 +511,19 @@
mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
- rx_skb->len, ntohs(mux_hdr->interface_id),
- mux_hdr->src_pipe_index,
- mux_hdr->flags, ntohl(mux_hdr->metadata));
+ rx_skb->len, ntohs(mux_hdr->interface_id),
+ mux_hdr->src_pipe_index,
+ mux_hdr->flags, ntohl(mux_hdr->metadata));
IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
if (mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
- !ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
- !ipa_ctx->ep[mux_hdr->src_pipe_index].notify) {
- IPAERR("drop pipe=%d ep_valid=%d notify=%p\n",
- mux_hdr->src_pipe_index,
- ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
- ipa_ctx->ep[mux_hdr->src_pipe_index].notify);
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify) {
+ IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
+ mux_hdr->src_pipe_index,
+ ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
+ ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify);
dev_kfree_skb_any(rx_skb);
ipa_replenish_rx_cache();
continue;
@@ -505,7 +543,8 @@
IPADBG("pulling %d bytes from skb\n", pull_len);
skb_pull(rx_skb, pull_len);
- ep->notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+ ep->client_notify(ep->priv, IPA_RECEIVE,
+ (unsigned long)(rx_skb));
ipa_replenish_rx_cache();
} while (1);
}
@@ -587,7 +626,7 @@
* This comes to prevent the CPU from handling too many interrupts when the
* throughput is high.
*/
-static void ipa_rx_notify(struct sps_event_notify *notify)
+static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
{
struct ipa_rx_pkt_wrapper *rx_pkt;
@@ -609,9 +648,17 @@
/**
* ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
* IPA EP configuration
- * @sys_in: [in] input needed to setup BAM pipe and config EP
+ * @sys_in: [in] input needed to setup BAM pipe and configure EP
* @clnt_hdl: [out] client handle
*
+ * - configure the end-point registers with the supplied
+ * parameters from the user.
+ * - call SPS APIs to create a system-to-bam connection with IPA.
+ * - allocate descriptor FIFO
+ * - register callback function(ipa_sps_irq_rx_notify or
+ * ipa_sps_irq_tx_notify - depends on client type) in case the driver is
+ * not configured to pulling mode
+ *
* Returns: 0 on success, negative on failure
*/
int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
@@ -729,34 +776,21 @@
}
if (!ipa_ctx->polling_mode) {
- if (IPA_CLIENT_IS_CONS(sys_in->client)) {
- ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
- ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
- ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
- ipa_ctx->sys[sys_idx].event.callback = ipa_rx_notify;
- ipa_ctx->sys[sys_idx].event.user =
- &ipa_ctx->sys[sys_idx];
- result =
- sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
- &ipa_ctx->sys[sys_idx].event);
- if (result < 0) {
- IPAERR("rx register event error %d\n", result);
- goto fail_register_event;
- }
- } else {
- ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
- ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
- ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
- ipa_ctx->sys[sys_idx].event.callback = ipa_tx_notify;
- ipa_ctx->sys[sys_idx].event.user =
- &ipa_ctx->sys[sys_idx];
- result =
- sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
- &ipa_ctx->sys[sys_idx].event);
- if (result < 0) {
- IPAERR("tx register event error %d\n", result);
- goto fail_register_event;
- }
+
+ ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+ ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+ ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+ ipa_ctx->sys[sys_idx].event.user =
+ &ipa_ctx->sys[sys_idx];
+ ipa_ctx->sys[sys_idx].event.callback =
+ IPA_CLIENT_IS_CONS(sys_in->client) ?
+ ipa_sps_irq_rx_notify :
+ ipa_sps_irq_tx_notify;
+ result = sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->sys[sys_idx].event);
+ if (result < 0) {
+ IPAERR("register event error %d\n", result);
+ goto fail_register_event;
}
}
@@ -801,21 +835,25 @@
EXPORT_SYMBOL(ipa_teardown_sys_pipe);
/**
- * ipa_tx_comp() - Callback function which will call the user supplied callback
- * function to release the skb, or release it on its own if no callback function
- * was supplied.
+ * ipa_tx_comp_usr_notify_release() - Callback function which will call the
+ * user supplied callback function to release the skb, or release it on
+ * its own if no callback function was supplied.
* @user1
* @user2
+ *
+ * This notified callback (client_notify) is for
+ * the destination client.
+ * This function is supplied in ipa_connect.
*/
-static void ipa_tx_comp(void *user1, void *user2)
+static void ipa_tx_comp_usr_notify_release(void *user1, void *user2)
{
struct sk_buff *skb = (struct sk_buff *)user1;
u32 ep_idx = (u32)user2;
IPADBG("skb=%p ep=%d\n", skb, ep_idx);
- if (ipa_ctx->ep[ep_idx].notify)
- ipa_ctx->ep[ep_idx].notify(ipa_ctx->ep[ep_idx].priv,
+ if (ipa_ctx->ep[ep_idx].client_notify)
+ ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
IPA_WRITE_DONE, (unsigned long)skb);
else
dev_kfree_skb_any(skb);
@@ -832,10 +870,20 @@
* dst is a "valid" CONS type, then SW data-path is used. If dst is the
* WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
* is an error. For errors, client needs to free the skb as needed. For success,
- * IPA driver will later invoke client calback if one was supplied. That
+ * IPA driver will later invoke client callback if one was supplied. That
* callback should free the skb. If no callback supplied, IPA driver will free
* the skb internally
*
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (supplied in ipa_connect())
+ *
* Returns: 0 on success, negative on failure
*/
int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
@@ -864,6 +912,7 @@
IPAERR("failed to alloc immediate command object\n");
goto fail_mem_alloc;
}
+ memset(cmd, 0x00, sizeof(*cmd));
cmd->destination_pipe_index = ipa_ep_idx;
if (meta && meta->mbim_stream_id_valid)
@@ -875,7 +924,7 @@
desc[1].pyld = skb->data;
desc[1].len = skb->len;
desc[1].type = IPA_DATA_DESC_SKB;
- desc[1].callback = ipa_tx_comp;
+ desc[1].callback = ipa_tx_comp_usr_notify_release;
desc[1].user1 = skb;
desc[1].user2 = (void *)ipa_ep_idx;
@@ -887,7 +936,7 @@
desc[0].pyld = skb->data;
desc[0].len = skb->len;
desc[0].type = IPA_DATA_DESC_SKB;
- desc[0].callback = ipa_tx_comp;
+ desc[0].callback = ipa_tx_comp_usr_notify_release;
desc[0].user1 = skb;
desc[0].user2 = (void *)ipa_ep_idx;
@@ -919,7 +968,7 @@
* ipa_handle_rx_core() is run in polling mode. After all packets has been
* received, the driver switches back to interrupt mode.
*/
-void ipa_handle_rx(struct work_struct *work)
+void ipa_wq_handle_rx(struct work_struct *work)
{
ipa_handle_rx_core();
ipa_rx_switch_to_intr_mode();
@@ -962,7 +1011,7 @@
}
INIT_LIST_HEAD(&rx_pkt->link);
- INIT_WORK(&rx_pkt->work, ipa_handle_rx);
+ INIT_WORK(&rx_pkt->work, ipa_wq_handle_rx);
rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, GFP_KERNEL);
if (rx_pkt->skb == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index 63ef5fb..3be2369 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -296,8 +296,9 @@
* @dst_pipe_index: destination pipe index
* @rt_tbl_idx: routing table index
* @connect: SPS connect
- * @priv: user provided information
- * @notify: user provided CB for EP events notification
+ * @priv: user provided information which will forwarded once the user is
+ * notified for new data avail
+ * @client_notify: user provided CB for EP events notification
* @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
* @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
* @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
@@ -314,7 +315,7 @@
u32 rt_tbl_idx;
struct sps_connect connect;
void *priv;
- void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+ void (*client_notify)(void *priv, enum ipa_dp_evt_type evt,
unsigned long data);
bool desc_fifo_in_pipe_mem;
bool data_fifo_in_pipe_mem;
@@ -357,7 +358,8 @@
/**
* struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper
- * @type: info for the skb or immediate command param
+ * @type: specify if this packet is a data packet (skb) or
+ * an immediate command
* @mem: memory buffer used by this Tx packet
* @work: work struct for current Tx packet
* @link: linked to the wrappers on that pipe
@@ -371,6 +373,8 @@
* >1 and <0xFFFF for first of a "multiple" tranfer,
* 0xFFFF for last desc, 0 for rest of "multiple' transfer
* @bounce: va of bounce buffer
+ *
+ * This struct can wrap both data packet and immediate command packet.
*/
struct ipa_tx_pkt_wrapper {
enum ipa_desc_type type;
@@ -693,8 +697,8 @@
void ipa_replenish_rx_cache(void);
void ipa_cleanup_rx(void);
int ipa_cfg_filter(u32 disable);
-void ipa_write_done(struct work_struct *work);
-void ipa_handle_rx(struct work_struct *work);
+void ipa_wq_write_done(struct work_struct *work);
+void ipa_wq_handle_rx(struct work_struct *work);
void ipa_handle_rx_core(void);
int ipa_pipe_mem_init(u32 start_ofst, u32 size);
int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 2eddb9d..05b47cc 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -130,7 +130,7 @@
int default_rbatt_mohm;
int amux_2_trim_delta;
uint16_t prev_last_good_ocv_raw;
- unsigned int rconn_mohm;
+ int rconn_mohm;
struct mutex last_ocv_uv_mutex;
int last_ocv_uv;
int pon_ocv_uv;
@@ -1396,14 +1396,16 @@
int fcc_uah, int cc_uah, int uuc_uah)
{
int chg_soc;
+ int vbat_batt_terminal_uv = vbat_uv
+ + (ibat_ua * chip->rconn_mohm) / 1000;
if (chip->soc_at_cv == -EINVAL) {
/* In constant current charging return the calc soc */
- if (vbat_uv <= chip->max_voltage_uv)
+ if (vbat_batt_terminal_uv <= chip->max_voltage_uv)
pr_debug("CC CHG SOC %d\n", soc);
/* Note the CC to CV point */
- if (vbat_uv >= chip->max_voltage_uv) {
+ if (vbat_batt_terminal_uv >= chip->max_voltage_uv) {
chip->soc_at_cv = soc;
chip->prev_chg_soc = soc;
chip->ibat_at_cv_ua = ibat_ua;
@@ -1419,17 +1421,18 @@
*/
/*
- * if voltage lessened (possibly because of a system load)
- * keep reporting the prev chg soc
+ * if voltage lessened by more than 10mV (possibly because of
+ * a sudden increase in system load) keep reporting the prev chg soc
*/
- if (vbat_uv <= chip->max_voltage_uv) {
- pr_debug("vbat %d < max = %d CC CHG SOC %d\n",
- vbat_uv, chip->max_voltage_uv, chip->prev_chg_soc);
+ if (vbat_batt_terminal_uv <= chip->max_voltage_uv - 10000) {
+ pr_debug("vbat_terminals %d < max = %d CC CHG SOC %d\n",
+ vbat_batt_terminal_uv,
+ chip->max_voltage_uv, chip->prev_chg_soc);
return chip->prev_chg_soc;
}
chg_soc = linear_interpolate(chip->soc_at_cv, chip->ibat_at_cv_ua,
- 100, -100000,
+ 100, -1 * chip->chg_term_ua,
ibat_ua);
/* always report a higher soc */
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index 20b3fc6..7beb24c 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -87,6 +87,7 @@
#define EOC_CHECK_PERIOD_MS 10000
/* check for USB unplug every 200 msecs */
#define UNPLUG_CHECK_WAIT_PERIOD_MS 200
+#define UNPLUG_CHECK_RAMP_MS 25
#define USB_TRIM_ENTRIES 16
enum chg_fsm_state {
@@ -279,6 +280,7 @@
struct delayed_work eoc_work;
struct delayed_work unplug_check_work;
struct delayed_work vin_collapse_check_work;
+ struct delayed_work btc_override_work;
struct wake_lock eoc_wake_lock;
enum pm8921_chg_cold_thr cold_thr;
enum pm8921_chg_hot_thr hot_thr;
@@ -290,6 +292,11 @@
int recent_reported_soc;
int battery_less_hardware;
int ibatmax_max_adj_ma;
+ int btc_override;
+ int btc_override_cold_decidegc;
+ int btc_override_hot_decidegc;
+ int btc_delay_ms;
+ bool btc_panic_if_cant_stop_chg;
};
/* user space parameter to limit usb current */
@@ -1311,20 +1318,6 @@
return 0;
}
- if (charging_disabled)
- return 0;
-
- /* check external charger first before the dc path */
- if (is_ext_charging(the_chip)) {
- val->intval = 1;
- return 0;
- }
-
- if (pm_is_chg_charge_dis(the_chip)) {
- val->intval = 0;
- return 0;
- }
-
if (the_chip->dc_present) {
val->intval = 1;
return 0;
@@ -2220,8 +2213,7 @@
}
if (usb_present) {
schedule_delayed_work(&chip->unplug_check_work,
- round_jiffies_relative(msecs_to_jiffies
- (UNPLUG_CHECK_WAIT_PERIOD_MS)));
+ msecs_to_jiffies(UNPLUG_CHECK_RAMP_MS));
pm8921_chg_enable_irq(chip, CHG_GONE_IRQ);
} else {
/* USB unplugged reset target current */
@@ -2300,8 +2292,7 @@
}
schedule_delayed_work(&chip->unplug_check_work,
- round_jiffies_relative(msecs_to_jiffies
- (UNPLUG_CHECK_WAIT_PERIOD_MS)));
+ msecs_to_jiffies(UNPLUG_CHECK_RAMP_MS));
power_supply_set_online(chip->ext_psy, dc_present);
power_supply_set_charge_type(chip->ext_psy,
@@ -2315,6 +2306,10 @@
*/
schedule_delayed_work(&chip->eoc_work, delay);
wake_lock(&chip->eoc_wake_lock);
+ if (chip->btc_override)
+ schedule_delayed_work(&chip->btc_override_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (chip->btc_delay_ms)));
/* Update battery charging LEDs and user space battery info */
power_supply_changed(&chip->batt_psy);
}
@@ -2566,8 +2561,8 @@
USB_WALL_THRESHOLD_MA, usb_target_ma);
if (!delayed_work_pending(&chip->unplug_check_work))
schedule_delayed_work(&chip->unplug_check_work,
- round_jiffies_relative(msecs_to_jiffies
- (UNPLUG_CHECK_WAIT_PERIOD_MS)));
+ msecs_to_jiffies
+ (UNPLUG_CHECK_WAIT_PERIOD_MS));
} else {
handle_usb_insertion_removal(chip);
}
@@ -2766,6 +2761,7 @@
u8 reg_loop, active_path;
int rc, ibat, active_chg_plugged_in, usb_ma;
int chg_gone = 0;
+ bool ramp = false;
reg_loop = 0;
@@ -2863,15 +2859,20 @@
__pm8921_charger_vbus_draw(usb_ma);
pr_debug("usb_now=%d, usb_target = %d\n",
usb_ma, usb_target_ma);
+ ramp = true;
} else {
usb_target_ma = usb_ma;
}
}
check_again_later:
+ pr_debug("ramp: %d\n", ramp);
/* schedule to check again later */
- schedule_delayed_work(&chip->unplug_check_work,
- round_jiffies_relative(msecs_to_jiffies
- (UNPLUG_CHECK_WAIT_PERIOD_MS)));
+ if (ramp)
+ schedule_delayed_work(&chip->unplug_check_work,
+ msecs_to_jiffies(UNPLUG_CHECK_RAMP_MS));
+ else
+ schedule_delayed_work(&chip->unplug_check_work,
+ msecs_to_jiffies(UNPLUG_CHECK_WAIT_PERIOD_MS));
}
static irqreturn_t loop_change_irq_handler(int irq, void *data)
@@ -2921,6 +2922,13 @@
round_jiffies_relative(msecs_to_jiffies
(EOC_CHECK_PERIOD_MS)));
}
+ if (high_transition
+ && chip->btc_override
+ && !delayed_work_pending(&chip->btc_override_work)) {
+ schedule_delayed_work(&chip->btc_override_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (chip->btc_delay_ms)));
+ }
power_supply_changed(&chip->batt_psy);
bms_notify_check(chip);
return IRQ_HANDLED;
@@ -3085,8 +3093,7 @@
} else {
if (dc_present)
schedule_delayed_work(&chip->unplug_check_work,
- round_jiffies_relative(msecs_to_jiffies
- (UNPLUG_CHECK_WAIT_PERIOD_MS)));
+ msecs_to_jiffies(UNPLUG_CHECK_WAIT_PERIOD_MS));
power_supply_changed(&chip->dc_psy);
}
@@ -3414,6 +3421,155 @@
return CHG_FINISHED;
}
+#define COMP_OVERRIDE_HOT_BANK 6
+#define COMP_OVERRIDE_COLD_BANK 7
+#define COMP_OVERRIDE_BIT BIT(1)
+static int pm_chg_override_cold(struct pm8921_chg_chip *chip, int flag)
+{
+ u8 val;
+ int rc = 0;
+
+ val = 0x80 | COMP_OVERRIDE_COLD_BANK << 2 | COMP_OVERRIDE_BIT;
+
+ if (flag)
+ val |= 0x01;
+
+ rc = pm8xxx_writeb(chip->dev->parent, COMPARATOR_OVERRIDE, val);
+ if (rc < 0)
+ pr_err("Could not write 0x%x to override rc = %d\n", val, rc);
+
+ pr_debug("btc cold = %d val = 0x%x\n", flag, val);
+ return rc;
+}
+
+static int pm_chg_override_hot(struct pm8921_chg_chip *chip, int flag)
+{
+ u8 val;
+ int rc = 0;
+
+ val = 0x80 | COMP_OVERRIDE_HOT_BANK << 2 | COMP_OVERRIDE_BIT;
+
+ if (flag)
+ val |= 0x01;
+
+ rc = pm8xxx_writeb(chip->dev->parent, COMPARATOR_OVERRIDE, val);
+ if (rc < 0)
+ pr_err("Could not write 0x%x to override rc = %d\n", val, rc);
+
+ pr_debug("btc hot = %d val = 0x%x\n", flag, val);
+ return rc;
+}
+
+static void __devinit pm8921_chg_btc_override_init(struct pm8921_chg_chip *chip)
+{
+ int rc = 0;
+ u8 reg;
+ u8 val;
+
+ val = COMP_OVERRIDE_HOT_BANK << 2;
+ rc = pm8xxx_writeb(chip->dev->parent, COMPARATOR_OVERRIDE, val);
+ if (rc < 0) {
+ pr_err("Could not write 0x%x to override rc = %d\n", val, rc);
+ goto cold_init;
+ }
+ rc = pm8xxx_readb(chip->dev->parent, COMPARATOR_OVERRIDE, ®);
+ if (rc < 0) {
+ pr_err("Could not read bank %d of override rc = %d\n",
+ COMP_OVERRIDE_HOT_BANK, rc);
+ goto cold_init;
+ }
+ if ((reg & COMP_OVERRIDE_BIT) != COMP_OVERRIDE_BIT) {
+ /* for now override it as not hot */
+ rc = pm_chg_override_hot(chip, 0);
+ if (rc < 0)
+ pr_err("Could not override hot rc = %d\n", rc);
+ }
+
+cold_init:
+ val = COMP_OVERRIDE_COLD_BANK << 2;
+ rc = pm8xxx_writeb(chip->dev->parent, COMPARATOR_OVERRIDE, val);
+ if (rc < 0) {
+ pr_err("Could not write 0x%x to override rc = %d\n", val, rc);
+ return;
+ }
+ rc = pm8xxx_readb(chip->dev->parent, COMPARATOR_OVERRIDE, ®);
+ if (rc < 0) {
+ pr_err("Could not read bank %d of override rc = %d\n",
+ COMP_OVERRIDE_COLD_BANK, rc);
+ return;
+ }
+ if ((reg & COMP_OVERRIDE_BIT) != COMP_OVERRIDE_BIT) {
+ /* for now override it as not cold */
+ rc = pm_chg_override_cold(chip, 0);
+ if (rc < 0)
+ pr_err("Could not override cold rc = %d\n", rc);
+ }
+}
+
+static void btc_override_worker(struct work_struct *work)
+{
+ int decidegc;
+ int temp;
+ int rc = 0;
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct pm8921_chg_chip *chip = container_of(dwork,
+ struct pm8921_chg_chip, btc_override_work);
+
+ if (!chip->btc_override) {
+ pr_err("called when not enabled\n");
+ return;
+ }
+
+ decidegc = get_prop_batt_temp(chip);
+
+ pr_debug("temp=%d\n", decidegc);
+
+ temp = pm_chg_get_rt_status(chip, BATTTEMP_HOT_IRQ);
+ if (temp) {
+ if (decidegc < chip->btc_override_hot_decidegc)
+ /* stop forcing batt hot */
+ rc = pm_chg_override_hot(chip, 0);
+ if (rc)
+ pr_err("Couldnt write 0 to hot comp\n");
+ } else {
+ if (decidegc >= chip->btc_override_hot_decidegc)
+ /* start forcing batt hot */
+ rc = pm_chg_override_hot(chip, 1);
+ if (rc && chip->btc_panic_if_cant_stop_chg)
+ panic("Couldnt override comps to stop chg\n");
+ }
+
+ temp = pm_chg_get_rt_status(chip, BATTTEMP_COLD_IRQ);
+ if (temp) {
+ if (decidegc > chip->btc_override_cold_decidegc)
+ /* stop forcing batt cold */
+ rc = pm_chg_override_cold(chip, 0);
+ if (rc)
+ pr_err("Couldnt write 0 to cold comp\n");
+ } else {
+ if (decidegc <= chip->btc_override_cold_decidegc)
+ /* start forcing batt cold */
+ rc = pm_chg_override_cold(chip, 1);
+ if (rc && chip->btc_panic_if_cant_stop_chg)
+ panic("Couldnt override comps to stop chg\n");
+ }
+
+ if ((is_dc_chg_plugged_in(the_chip) || is_usb_chg_plugged_in(the_chip))
+ && get_prop_batt_status(chip) != POWER_SUPPLY_STATUS_FULL) {
+ schedule_delayed_work(&chip->btc_override_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (chip->btc_delay_ms)));
+ return;
+ }
+
+ rc = pm_chg_override_hot(chip, 0);
+ if (rc)
+ pr_err("Couldnt write 0 to hot comp\n");
+ rc = pm_chg_override_cold(chip, 0);
+ if (rc)
+ pr_err("Couldnt write 0 to cold comp\n");
+}
+
/**
* eoc_worker - internal function to check if battery EOC
* has happened
@@ -3634,8 +3790,7 @@
notify_usb_of_the_plugin_event(chip->usb_present);
if (chip->usb_present || chip->dc_present) {
schedule_delayed_work(&chip->unplug_check_work,
- round_jiffies_relative(msecs_to_jiffies
- (UNPLUG_CHECK_WAIT_PERIOD_MS)));
+ msecs_to_jiffies(UNPLUG_CHECK_WAIT_PERIOD_MS));
pm8921_chg_enable_irq(chip, CHG_GONE_IRQ);
}
@@ -4505,6 +4660,16 @@
chip->led_src_config = pdata->led_src_config;
chip->has_dc_supply = pdata->has_dc_supply;
chip->battery_less_hardware = pdata->battery_less_hardware;
+ chip->btc_override = pdata->btc_override;
+ if (chip->btc_override) {
+ chip->btc_delay_ms = pdata->btc_delay_ms;
+ chip->btc_override_cold_decidegc
+ = pdata->btc_override_cold_degc * 10;
+ chip->btc_override_hot_decidegc
+ = pdata->btc_override_hot_degc * 10;
+ chip->btc_panic_if_cant_stop_chg
+ = pdata->btc_panic_if_cant_stop_chg;
+ }
if (chip->battery_less_hardware)
charging_disabled = 1;
@@ -4518,6 +4683,9 @@
goto free_chip;
}
+ if (chip->btc_override)
+ pm8921_chg_btc_override_init(chip);
+
chip->usb_psy.name = "usb",
chip->usb_psy.type = POWER_SUPPLY_TYPE_USB,
chip->usb_psy.supplied_to = pm_power_supplied_to,
@@ -4572,6 +4740,7 @@
INIT_WORK(&chip->battery_id_valid_work, battery_id_valid);
INIT_DELAYED_WORK(&chip->update_heartbeat_work, update_heartbeat);
+ INIT_DELAYED_WORK(&chip->btc_override_work, btc_override_worker);
rc = request_irqs(chip, pdev);
if (rc) {
@@ -4609,6 +4778,7 @@
free_irq:
free_irqs(chip);
unregister_batt:
+ wake_lock_destroy(&chip->eoc_wake_lock);
power_supply_unregister(&chip->batt_psy);
unregister_dc:
power_supply_unregister(&chip->dc_psy);
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 6623d81..0a072b1 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -33,7 +33,7 @@
#define BMS1_MODE_CTL 0X40
/* Coulomb counter clear registers */
#define BMS1_CC_DATA_CTL 0x42
-#define BMS1_CC_CLEAR_CTRL 0x43
+#define BMS1_CC_CLEAR_CTL 0x43
/* OCV limit registers */
#define BMS1_OCV_USE_LOW_LIMIT_THR0 0x48
#define BMS1_OCV_USE_LOW_LIMIT_THR1 0x49
@@ -484,12 +484,49 @@
pr_debug("last_good_ocv_uv = %d\n", raw->last_good_ocv_uv);
}
+#define CLEAR_CC BIT(7)
+#define CLEAR_SW_CC BIT(6)
+/**
+ * reset both cc and sw-cc.
+ * note: this should only be ever called from one thread
+ * or there may be a race condition where CC is never enabled
+ * again
+ */
+static void reset_cc(struct qpnp_bms_chip *chip)
+{
+ int rc;
+
+ pr_debug("resetting cc manually\n");
+ rc = qpnp_masked_write(chip, BMS1_CC_CLEAR_CTL,
+ CLEAR_CC | CLEAR_SW_CC,
+ CLEAR_CC | CLEAR_SW_CC);
+ if (rc)
+ pr_err("cc reset failed: %d\n", rc);
+
+ /* wait for 100us for cc to reset */
+ udelay(100);
+
+ rc = qpnp_masked_write(chip, BMS1_CC_CLEAR_CTL,
+ CLEAR_CC | CLEAR_SW_CC, 0);
+ if (rc)
+ pr_err("cc reenable failed: %d\n", rc);
+}
+
static int read_soc_params_raw(struct qpnp_bms_chip *chip,
struct raw_soc_params *raw)
{
int rc;
mutex_lock(&chip->bms_output_lock);
+
+ if (chip->prev_last_good_ocv_raw == 0) {
+ /* software workaround for BMS 1.0
+ * The coulomb counter does not reset upon PON, so reset it
+ * manually upon probe. */
+ if (chip->revision1 == 0 && chip->revision2 == 0)
+ reset_cc(chip);
+ }
+
lock_output_data(chip);
rc = qpnp_read_wrapper(chip, (u8 *)&raw->last_good_ocv_raw,
@@ -1254,6 +1291,34 @@
return soc;
}
+static int clamp_soc_based_on_voltage(struct qpnp_bms_chip *chip, int soc)
+{
+ int rc, vbat_uv;
+ struct qpnp_vadc_result result;
+
+ rc = qpnp_vadc_read(VBAT_SNS, &result);
+ if (rc) {
+ pr_err("error reading vbat_sns adc channel = %d, rc = %d\n",
+ VBAT_SNS, rc);
+ return rc;
+ }
+
+ vbat_uv = (int)result.physical;
+ if (soc == 0 && vbat_uv > chip->v_cutoff_uv) {
+ pr_debug("clamping soc to 1, vbat (%d) > cutoff (%d)\n",
+ vbat_uv, chip->v_cutoff_uv);
+ return 1;
+ } else if (soc > 0 && vbat_uv < chip->v_cutoff_uv) {
+ pr_debug("forcing soc to 0, vbat (%d) < cutoff (%d)\n",
+ vbat_uv, chip->v_cutoff_uv);
+ return 0;
+ } else {
+ pr_debug("not clamping, using soc = %d, vbat = %d and cutoff = %d\n",
+ soc, vbat_uv, chip->v_cutoff_uv);
+ return soc;
+ }
+}
+
static int calculate_state_of_charge(struct qpnp_bms_chip *chip,
struct raw_soc_params *raw,
int batt_temp)
@@ -1348,6 +1413,11 @@
pr_debug("SOC before adjustment = %d\n", soc);
new_calculated_soc = adjust_soc(chip, ¶ms, soc, batt_temp);
+ /* clamp soc due to BMS HW inaccuracies in pm8941v2.0 */
+ if (chip->revision1 == 0 && chip->revision2 == 0)
+ new_calculated_soc = clamp_soc_based_on_voltage(chip,
+ new_calculated_soc);
+
if (new_calculated_soc != chip->calculated_soc
&& chip->bms_psy.name != NULL) {
power_supply_changed(&chip->bms_psy);
@@ -1947,6 +2017,7 @@
pr_err("Error reading version register %d\n", rc);
goto error_read;
}
+ pr_debug("BMS version: %hhu.%hhu\n", chip->revision2, chip->revision1);
rc = qpnp_vadc_is_ready();
if (rc) {
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index b5559db..a0d84df 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -71,6 +71,7 @@
#define CHGR_CHG_WDOG_EN 0x65
#define CHGR_USB_IUSB_MAX 0x44
#define CHGR_USB_USB_SUSP 0x47
+#define CHGR_USB_USB_OTG_CTL 0x48
#define CHGR_USB_ENUM_T_STOP 0x4E
#define CHGR_CHG_TEMP_THRESH 0x66
#define CHGR_BAT_IF_PRES_STATUS 0x08
@@ -271,6 +272,27 @@
return 0;
}
+#define USB_OTG_EN_BIT BIT(0)
+static int
+qpnp_chg_is_otg_en_set(struct qpnp_chg_chip *chip)
+{
+ u8 usb_otg_en;
+ int rc;
+
+ rc = qpnp_chg_read(chip, &usb_otg_en,
+ chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
+ 1);
+
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ chip->usb_chgpth_base + CHGR_STATUS, rc);
+ return rc;
+ }
+ pr_debug("usb otg en 0x%x\n", usb_otg_en);
+
+ return (usb_otg_en & USB_OTG_EN_BIT) ? 1 : 0;
+}
+
#define USB_VALID_BIT BIT(7)
static int
qpnp_chg_is_usb_chg_plugged_in(struct qpnp_chg_chip *chip)
@@ -380,10 +402,16 @@
qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
- int usb_present;
+ int usb_present, host_mode;
usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
- pr_debug("usbin-valid triggered: %d\n", usb_present);
+ host_mode = qpnp_chg_is_otg_en_set(chip);
+ pr_debug("usbin-valid triggered: %d host_mode: %d\n",
+ usb_present, host_mode);
+
+ /* In host mode notifications cmoe from USB supply */
+ if (host_mode)
+ return IRQ_HANDLED;
if (chip->usb_present ^ usb_present) {
chip->usb_present = usb_present;
@@ -436,6 +464,80 @@
return 0;
}
+static int
+qpnp_chg_charge_en(struct qpnp_chg_chip *chip, int enable)
+{
+ return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
+ CHGR_CHG_EN,
+ enable ? CHGR_CHG_EN : 0, 1);
+}
+
+static int
+qpnp_chg_force_run_on_batt(struct qpnp_chg_chip *chip, int disable)
+{
+ /* This bit forces the charger to run off of the battery rather
+ * than a connected charger */
+ return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
+ CHGR_ON_BAT_FORCE_BIT,
+ disable ? CHGR_ON_BAT_FORCE_BIT : 0, 1);
+}
+
+static
+int switch_usb_to_charge_mode(struct qpnp_chg_chip *chip)
+{
+ int rc;
+
+ pr_debug("switch to charge mode\n");
+ if (!qpnp_chg_is_otg_en_set(chip))
+ return 0;
+
+ /* enable usb ovp fet */
+ rc = qpnp_chg_masked_write(chip,
+ chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
+ USB_OTG_EN_BIT,
+ 0, 1);
+ if (rc) {
+ pr_err("Failed to turn on usb ovp rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
+ if (rc) {
+ pr_err("Failed re-enable charging rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static
+int switch_usb_to_host_mode(struct qpnp_chg_chip *chip)
+{
+ int rc;
+
+ pr_debug("switch to host mode\n");
+ if (qpnp_chg_is_otg_en_set(chip))
+ return 0;
+
+ rc = qpnp_chg_force_run_on_batt(chip, 1);
+ if (rc) {
+ pr_err("Failed to disable charging rc = %d\n", rc);
+ return rc;
+ }
+
+ /* force usb ovp fet off */
+ rc = qpnp_chg_masked_write(chip,
+ chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
+ USB_OTG_EN_BIT,
+ USB_OTG_EN_BIT, 1);
+ if (rc) {
+ pr_err("Failed to turn off usb ovp rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static enum power_supply_property pm_power_props_mains[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
@@ -682,6 +784,21 @@
chip->bms_psy = power_supply_get_by_name("bms");
chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_SCOPE, &ret);
+ if (ret.intval) {
+ if ((ret.intval == POWER_SUPPLY_SCOPE_SYSTEM)
+ && !qpnp_chg_is_otg_en_set(chip)) {
+ switch_usb_to_host_mode(chip);
+ return;
+ }
+ if ((ret.intval == POWER_SUPPLY_SCOPE_DEVICE)
+ && qpnp_chg_is_otg_en_set(chip)) {
+ switch_usb_to_charge_mode(chip);
+ return;
+ }
+ }
+
+ chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_ONLINE, &ret);
if (ret.intval && qpnp_chg_is_usb_chg_plugged_in(chip)) {
@@ -702,24 +819,6 @@
}
static int
-qpnp_chg_force_run_on_batt(struct qpnp_chg_chip *chip, int disable)
-{
- /* This bit forces the charger to run off of the battery rather
- * than a connected charger */
- return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
- CHGR_ON_BAT_FORCE_BIT,
- disable ? CHGR_ON_BAT_FORCE_BIT : 0, 1);
-}
-
-static int
-qpnp_chg_charge_en(struct qpnp_chg_chip *chip, int enable)
-{
- return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
- CHGR_CHG_EN,
- enable ? CHGR_CHG_EN : 0, 1);
-}
-
-static int
qpnp_batt_power_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
diff --git a/drivers/regulator/onsemi-ncp6335d.c b/drivers/regulator/onsemi-ncp6335d.c
index a0c90f0..4574862 100644
--- a/drivers/regulator/onsemi-ncp6335d.c
+++ b/drivers/regulator/onsemi-ncp6335d.c
@@ -369,8 +369,16 @@
.remove = __devexit_p(ncp6335d_regulator_remove),
.id_table = ncp6335d_id,
};
+static int __init ncp6335d_regulator_init(void)
+{
+ return i2c_add_driver(&ncp6335d_regulator_driver);
+}
+subsys_initcall(ncp6335d_regulator_init);
-module_i2c_driver(ncp6335d_regulator_driver);
-
+static void __exit ncp6335d_regulator_exit(void)
+{
+ i2c_del_driver(&ncp6335d_regulator_driver);
+}
+module_exit(ncp6335d_regulator_exit);
MODULE_DESCRIPTION("OnSemi-NCP6335D regulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 02e1952..78e8a6f 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -172,13 +172,16 @@
u8 *tid, struct completion *done)
{
struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+ mutex_lock(&ctrl->m_ctrl);
if (ctrl->last_tid <= 255) {
ctrl->txnt = krealloc(ctrl->txnt,
(ctrl->last_tid + 1) *
sizeof(struct slim_msg_txn *),
GFP_KERNEL);
- if (!ctrl->txnt)
+ if (!ctrl->txnt) {
+ mutex_unlock(&ctrl->m_ctrl);
return -ENOMEM;
+ }
dev->msg_cnt = ctrl->last_tid;
ctrl->last_tid++;
} else {
@@ -190,6 +193,7 @@
}
if (i >= 256) {
dev_err(&ctrl->dev, "out of TID");
+ mutex_unlock(&ctrl->m_ctrl);
return -ENOMEM;
}
}
@@ -197,6 +201,7 @@
txn->tid = dev->msg_cnt;
txn->comp = done;
*tid = dev->msg_cnt;
+ mutex_unlock(&ctrl->m_ctrl);
return 0;
}
static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
@@ -369,6 +374,9 @@
pr_err("connect/disc :0x%x, tid:%d timed out", txn->mc,
txn->tid);
ret = -ETIMEDOUT;
+ mutex_lock(&ctrl->m_ctrl);
+ ctrl->txnt[txn->tid] = NULL;
+ mutex_unlock(&ctrl->m_ctrl);
} else {
ret = txn->ec;
}
@@ -394,6 +402,9 @@
pr_err("master req:0x%x, tid:%d timed out", txn->mc,
txn->tid);
ret = -ETIMEDOUT;
+ mutex_lock(&ctrl->m_ctrl);
+ ctrl->txnt[txn->tid] = NULL;
+ mutex_unlock(&ctrl->m_ctrl);
} else {
ret = txn->ec;
}
@@ -526,10 +537,8 @@
txn.dt = SLIM_MSG_DEST_LOGICALADDR;
txn.la = SLIM_LA_MGR;
txn.ec = 0;
- mutex_lock(&ctrl->m_ctrl);
ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
if (ret) {
- mutex_unlock(&ctrl->m_ctrl);
return ret;
}
memcpy(&wbuf[1], ea, elen);
@@ -543,7 +552,6 @@
ret = -ENXIO;
else if (!ret)
*laddr = txn.la;
- mutex_unlock(&ctrl->m_ctrl);
return ret;
}
@@ -606,20 +614,33 @@
}
if (mc == SLIM_USR_MC_ADDR_REPLY &&
mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
- struct slim_msg_txn *txn = dev->ctrl.txnt[buf[3]];
+ struct slim_msg_txn *txn;
u8 failed_ea[6] = {0, 0, 0, 0, 0, 0};
- if (!txn)
+ mutex_lock(&dev->ctrl.m_ctrl);
+ txn = dev->ctrl.txnt[buf[3]];
+ if (!txn) {
+ pr_err("LADDR response after timeout, tid:0x%x",
+ buf[3]);
+ mutex_unlock(&dev->ctrl.m_ctrl);
return;
+ }
if (memcmp(&buf[4], failed_ea, 6))
txn->la = buf[10];
dev->ctrl.txnt[buf[3]] = NULL;
+ mutex_unlock(&dev->ctrl.m_ctrl);
complete(txn->comp);
}
if (mc == SLIM_USR_MC_GENERIC_ACK &&
mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
- struct slim_msg_txn *txn = dev->ctrl.txnt[buf[3]];
- if (!txn)
+ struct slim_msg_txn *txn;
+ mutex_lock(&dev->ctrl.m_ctrl);
+ txn = dev->ctrl.txnt[buf[3]];
+ if (!txn) {
+ pr_err("ACK received after timeout, tid:0x%x",
+ buf[3]);
+ mutex_unlock(&dev->ctrl.m_ctrl);
return;
+ }
dev_dbg(dev->dev, "got response:tid:%d, response:0x%x",
(int)buf[3], buf[4]);
if (!(buf[4] & MSM_SAT_SUCCSS)) {
@@ -628,6 +649,7 @@
txn->ec = -EIO;
}
dev->ctrl.txnt[buf[3]] = NULL;
+ mutex_unlock(&dev->ctrl.m_ctrl);
complete(txn->comp);
}
}
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index d5d6e0c..c320e46 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -1216,7 +1216,7 @@
if (flow != SLIM_SRC)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
if (slc->state == SLIM_CH_FREE) {
ret = -ENOTCONN;
@@ -1238,7 +1238,7 @@
slc->srch = srch;
connect_src_err:
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return ret;
}
EXPORT_SYMBOL_GPL(slim_connect_src);
@@ -1265,7 +1265,7 @@
if (!sinkh || !nsink)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
/*
* Once channel is removed, its ports can be considered disconnected
@@ -1303,7 +1303,7 @@
slc->nsink += nsink;
connect_sink_err:
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return ret;
}
EXPORT_SYMBOL_GPL(slim_connect_sink);
@@ -1320,11 +1320,11 @@
struct slim_controller *ctrl = sb->ctrl;
int i;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
for (i = 0; i < nph; i++)
disconnect_port_ch(ctrl, ph[i]);
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return 0;
}
EXPORT_SYMBOL_GPL(slim_disconnect_ports);
@@ -1660,13 +1660,13 @@
if (!ctrl)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
for (i = 0; i < ctrl->nchans; i++) {
if (ctrl->chans[i].state == SLIM_CH_FREE)
break;
}
if (i >= ctrl->nchans) {
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return -EXFULL;
}
*chanh = i;
@@ -1674,7 +1674,7 @@
ctrl->chans[i].state = SLIM_CH_ALLOCATED;
ctrl->chans[i].chan = (u8)(ctrl->reserved + i);
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return 0;
}
EXPORT_SYMBOL_GPL(slim_alloc_ch);
@@ -1698,7 +1698,7 @@
int ret = 0;
if (!ctrl || !chanh)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
/* start with modulo number */
i = ch % ctrl->nchans;
@@ -1729,7 +1729,7 @@
i = (i + 1) % ctrl->nchans;
}
query_out:
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
dev_dbg(&ctrl->dev, "query ch:%d,hdl:%d,ref:%d,ret:%d",
ch, i, ctrl->chans[i].ref, ret);
return ret;
@@ -1751,26 +1751,26 @@
if (!ctrl)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
if (slc->state == SLIM_CH_FREE) {
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return -ENOTCONN;
}
if (slc->ref > 1) {
slc->ref--;
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
slc->chan, chanh, slc->ref);
return 0;
}
if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
dev_err(&ctrl->dev, "Channel:%d should be removed first", chan);
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return -EISCONN;
}
slc->ref--;
slc->state = SLIM_CH_FREE;
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
slc->chan, chanh, slc->ref);
return 0;
@@ -1812,7 +1812,7 @@
if (!ctrl || !chanh || !prop || !nchan)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
for (i = 0; i < nchan; i++) {
u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
struct slim_ich *slc = &ctrl->chans[chan];
@@ -1856,7 +1856,7 @@
}
err_define_ch:
dev_dbg(&ctrl->dev, "define_ch: ch:%d, ret:%d", *chanh, ret);
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return ret;
}
EXPORT_SYMBOL_GPL(slim_define_ch);
@@ -2607,7 +2607,6 @@
struct slim_pending_ch *pch;
mutex_lock(&ctrl->sched.m_reconf);
- mutex_lock(&ctrl->m_ctrl);
/*
* If there are no pending changes from this client, avoid sending
* the reconfiguration sequence
@@ -2631,7 +2630,6 @@
}
}
if (list_empty(&sb->mark_removal)) {
- mutex_unlock(&ctrl->m_ctrl);
mutex_unlock(&ctrl->sched.m_reconf);
pr_info("SLIM_CL: skip reconfig sequence");
return 0;
@@ -2820,7 +2818,6 @@
ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
sb->cur_msgsl = sb->pending_msgsl;
slim_chan_changes(sb, false);
- mutex_unlock(&ctrl->m_ctrl);
mutex_unlock(&ctrl->sched.m_reconf);
return 0;
}
@@ -2828,7 +2825,6 @@
revert_reconfig:
/* Revert channel changes */
slim_chan_changes(sb, true);
- mutex_unlock(&ctrl->m_ctrl);
mutex_unlock(&ctrl->sched.m_reconf);
return ret;
}
@@ -2876,7 +2872,6 @@
return -EINVAL;
mutex_lock(&sb->sldev_reconf);
- mutex_lock(&ctrl->m_ctrl);
do {
struct slim_pending_ch *pch;
u8 add_mark_removal = true;
@@ -2935,7 +2930,6 @@
if (nchan < SLIM_GRP_TO_NCHAN(chanh))
chan = SLIM_HDL_TO_CHIDX(slc->nextgrp);
} while (nchan < SLIM_GRP_TO_NCHAN(chanh));
- mutex_unlock(&ctrl->m_ctrl);
if (!ret && commit == true)
ret = slim_reconfigure_now(sb);
mutex_unlock(&sb->sldev_reconf);
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index c67b75b..7b8788d 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -62,65 +62,6 @@
printk(x); \
} while (0)
-static int nr_free_zone_mtype_pages(struct zone *zone, int mtype)
-{
- int order;
- int sum = 0;
-
- for (order = 0; order < MAX_ORDER; ++order) {
- unsigned long freecount = 0;
- struct free_area *area;
- struct list_head *curr;
-
- area = &(zone->free_area[order]);
-
- list_for_each(curr, &area->free_list[mtype])
- freecount++;
-
- sum += freecount << order;
- }
- return sum;
-}
-
-static int nr_free_zone_pages(struct zone *zone, gfp_t gfp_mask)
-{
- int sum = 0;
- int mtype = allocflags_to_migratetype(gfp_mask);
- int i = 0;
- int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
-
- sum = nr_free_zone_mtype_pages(zone, mtype);
-
- /*
- * Also count the fallback pages
- */
- for (i = 0;; i++) {
- int fallbacktype = mtype_fallbacks[i];
- sum += nr_free_zone_mtype_pages(zone, fallbacktype);
-
- if (fallbacktype == MIGRATE_RESERVE)
- break;
- }
-
- return sum;
-}
-
-static int nr_free_pages(gfp_t gfp_mask)
-{
- struct zoneref *z;
- struct zone *zone;
- int sum = 0;
-
- struct zonelist *zonelist = node_zonelist(numa_node_id(), gfp_mask);
-
- for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
- sum += nr_free_zone_pages(zone, gfp_mask);
- }
-
- return sum;
-}
-
-
static int test_task_flag(struct task_struct *p, int flag)
{
struct task_struct *t = p;
@@ -152,15 +93,6 @@
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
- if (sc->nr_to_scan > 0 && other_free > other_file) {
- /*
- * If the number of free pages is going to affect the decision
- * of which process is selected then ensure only free pages
- * which can satisfy the request are considered.
- */
- other_free = nr_free_pages(sc->gfp_mask);
- }
-
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index cc9ffaa..3de990c 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -18,6 +18,8 @@
* This file is based on msm_serial.c, originally
* Written by Robert Love <rlove@google.com> */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#if defined(CONFIG_SERIAL_MSM_HSL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
@@ -62,6 +64,7 @@
unsigned int old_snap_state;
unsigned int ver_id;
int tx_timeout;
+ struct mutex clk_mutex;
};
#define UARTDM_VERSION_11_13 0
@@ -188,11 +191,12 @@
unsigned long flags;
int ret = 0;
- ret = clk_set_rate(msm_hsl_port->clk, 7372800);
- if (!ret)
+ ret = clk_set_rate(msm_hsl_port->clk, port->uartclk);
+ if (!ret) {
clk_en(port, 1);
- else {
- pr_err("%s(): Error: Setting the clock rate\n", __func__);
+ } else {
+ pr_err("Error: setting uartclk rate as %u\n",
+ port->uartclk);
return -EINVAL;
}
@@ -221,11 +225,12 @@
unsigned long flags;
int ret = 0;
- ret = clk_set_rate(msm_hsl_port->clk, 7372800);
- if (!ret)
+ ret = clk_set_rate(msm_hsl_port->clk, port->uartclk);
+ if (!ret) {
clk_en(port, 1);
- else {
- pr_err("%s(): Error setting clk rate\n", __func__);
+ } else {
+ pr_err("Error setting uartclk rate as %u\n",
+ port->uartclk);
return -EINVAL;
}
@@ -258,8 +263,7 @@
&loopback_enable_fops);
if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
- pr_err("%s(): Cannot create loopback.%d debug entry",
- __func__, id);
+ pr_err("Cannot create loopback.%d debug entry", id);
}
static void msm_hsl_stop_tx(struct uart_port *port)
{
@@ -561,7 +565,13 @@
msm_hsl_write(port, STOP_BREAK, regmap[vid][UARTDM_CR]);
}
-static void msm_hsl_set_baud_rate(struct uart_port *port, unsigned int baud)
+/**
+ * msm_hsl_set_baud_rate: set requested baud rate
+ * @port: uart port
+ * @baud: baud rate to set (in bps)
+ */
+static void msm_hsl_set_baud_rate(struct uart_port *port,
+ unsigned int baud)
{
unsigned int baud_code, rxstale, watermark;
unsigned int data;
@@ -625,18 +635,54 @@
baud_code = UARTDM_CSR_115200;
rxstale = 31;
break;
- default: /* 115200 baud rate */
+ case 4000000:
+ case 3686400:
+ case 3200000:
+ case 3500000:
+ case 3000000:
+ case 2500000:
+ case 1500000:
+ case 1152000:
+ case 1000000:
+ case 921600:
+ baud_code = 0xff;
+ rxstale = 31;
+ break;
+ default: /*115200 baud rate */
baud_code = UARTDM_CSR_28800;
rxstale = 31;
break;
}
- /* Set timeout to be ~600x the character transmit time */
- msm_hsl_port->tx_timeout = (1000000000 / baud) * 6;
-
vid = msm_hsl_port->ver_id;
msm_hsl_write(port, baud_code, regmap[vid][UARTDM_CSR]);
+ /*
+ * uart baud rate depends on CSR and MND Values
+ * we are updating CSR before and then calling
+ * clk_set_rate which updates MND Values. Hence
+ * dsb requires here.
+ */
+ mb();
+
+ /*
+ * Check requested baud rate and for higher baud rate than 460800,
+ * calculate required uart clock frequency and set the same.
+ */
+ if (baud > 460800) {
+
+ port->uartclk = baud * 16;
+ if (clk_set_rate(msm_hsl_port->clk, port->uartclk)) {
+ pr_err("%s(): Error setting uartclk rate %u\n",
+ __func__, port->uartclk);
+ WARN_ON(1);
+ return;
+ }
+ }
+
+ /* Set timeout to be ~600x the character transmit time */
+ msm_hsl_port->tx_timeout = (1000000000 / baud) * 6;
+
/* RX stale watermark */
watermark = UARTDM_IPR_STALE_LSB_BMSK & rxstale;
watermark |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
@@ -710,15 +756,15 @@
ret = gpio_request(pdata->uart_tx_gpio,
"UART_TX_GPIO");
if (unlikely(ret)) {
- pr_err("%s: gpio request failed for:%d\n",
- __func__, pdata->uart_tx_gpio);
+ pr_err("gpio request failed for:%d\n",
+ pdata->uart_tx_gpio);
return ret;
}
ret = gpio_request(pdata->uart_rx_gpio, "UART_RX_GPIO");
if (unlikely(ret)) {
- pr_err("%s: gpio request failed for:%d\n",
- __func__, pdata->uart_rx_gpio);
+ pr_err("gpio request failed for:%d\n",
+ pdata->uart_rx_gpio);
gpio_free(pdata->uart_tx_gpio);
return ret;
}
@@ -756,7 +802,7 @@
ret = request_irq(port->irq, msm_hsl_irq, IRQF_TRIGGER_HIGH,
msm_hsl_port->name, port);
if (unlikely(ret)) {
- printk(KERN_ERR "%s: failed to request_irq\n", __func__);
+ pr_err("failed to request_irq\n");
return ret;
}
return 0;
@@ -792,17 +838,28 @@
struct ktermios *termios,
struct ktermios *old)
{
- unsigned long flags;
unsigned int baud, mr;
unsigned int vid;
+ struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
if (!termios->c_cflag)
return;
- spin_lock_irqsave(&port->lock, flags);
+ mutex_lock(&msm_hsl_port->clk_mutex);
- /* calculate and set baud rate */
- baud = uart_get_baud_rate(port, termios, old, 300, 460800);
+ /*
+ * Calculate and set baud rate
+ * 300 is the minimum and 4 Mbps is the maximum baud rate
+ * supported by driver.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 200, 4000000);
+
+ /*
+ * Due to non-availability of 3.2 Mbps baud rate as standard baud rate
+ * with TTY/serial core. Map 200 BAUD to 3.2 Mbps
+ */
+ if (baud == 200)
+ baud = 3200000;
msm_hsl_set_baud_rate(port, baud);
@@ -865,7 +922,7 @@
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock_irqrestore(&port->lock, flags);
+ mutex_unlock(&msm_hsl_port->clk_mutex);
}
static const char *msm_hsl_type(struct uart_port *port)
@@ -913,14 +970,14 @@
if (!uart_resource)
uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!uart_resource)) {
- pr_err("%s: can't get uartdm resource\n", __func__);
+ pr_err("can't get uartdm resource\n");
return -ENXIO;
}
size = uart_resource->end - uart_resource->start + 1;
if (unlikely(!request_mem_region(port->mapbase, size,
"msm_serial_hsl"))) {
- pr_err("%s: can't get mem region for uartdm\n", __func__);
+ pr_err("can't get mem region for uartdm\n");
return -EBUSY;
}
@@ -938,7 +995,7 @@
gsbi_resource = platform_get_resource(pdev,
IORESOURCE_MEM, 1);
if (unlikely(!gsbi_resource)) {
- pr_err("%s: can't get gsbi resource\n", __func__);
+ pr_err("can't get gsbi resource\n");
return -ENXIO;
}
@@ -991,22 +1048,20 @@
switch (state) {
case 0:
- ret = clk_set_rate(msm_hsl_port->clk, 7372800);
+ ret = clk_set_rate(msm_hsl_port->clk, port->uartclk);
if (ret)
- pr_err("%s(): Error setting UART clock rate\n",
- __func__);
+ pr_err("Error setting UART clock rate to %u\n",
+ port->uartclk);
clk_en(port, 1);
break;
case 3:
clk_en(port, 0);
ret = clk_set_rate(msm_hsl_port->clk, 0);
if (ret)
- pr_err("%s(): Error setting UART clock rate to zero.\n",
- __func__);
+ pr_err("Error setting UART clock rate to zero.\n");
break;
default:
- pr_err("%s(): msm_serial_hsl: Unknown PM state %d\n",
- __func__, state);
+ pr_err("Unknown PM state %d\n", state);
}
}
@@ -1093,15 +1148,15 @@
msm_hsl_console_state[6] = rxfs;
msm_hsl_console_state[7] = con_state;
- pr_info("%s(): Timeout: %d uS\n", __func__, msm_hsl_port->tx_timeout);
- pr_info("%s(): SR: %08x\n", __func__, sr);
- pr_info("%s(): ISR: %08x\n", __func__, isr);
- pr_info("%s(): MR1: %08x\n", __func__, mr1);
- pr_info("%s(): MR2: %08x\n", __func__, mr2);
- pr_info("%s(): NCF: %08x\n", __func__, ncf);
- pr_info("%s(): TXFS: %08x\n", __func__, txfs);
- pr_info("%s(): RXFS: %08x\n", __func__, rxfs);
- pr_info("%s(): Console state: %d\n", __func__, con_state);
+ pr_info("Timeout: %d uS\n", msm_hsl_port->tx_timeout);
+ pr_info("SR: %08x\n", sr);
+ pr_info("ISR: %08x\n", isr);
+ pr_info("MR1: %08x\n", mr1);
+ pr_info("MR2: %08x\n", mr2);
+ pr_info("NCF: %08x\n", ncf);
+ pr_info("TXFS: %08x\n", txfs);
+ pr_info("RXFS: %08x\n", rxfs);
+ pr_info("Console state: %d\n", con_state);
}
/*
@@ -1228,8 +1283,7 @@
msm_hsl_write(port, 1, regmap[vid][UARTDM_NCF_TX]);
msm_hsl_read(port, regmap[vid][UARTDM_NCF_TX]);
- printk(KERN_INFO "msm_serial_hsl: console setup on port #%d\n",
- port->line);
+ pr_info("console setup on port #%d\n", port->line);
return ret;
}
@@ -1302,9 +1356,9 @@
switch (enable) {
case 0:
- pr_debug("%s(): Calling stop_console\n", __func__);
+ pr_debug("Calling stop_console\n");
console_stop(port->cons);
- pr_debug("%s(): Calling unregister_console\n", __func__);
+ pr_debug("Calling unregister_console\n");
unregister_console(port->cons);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1316,7 +1370,7 @@
msm_hsl_power(port, 3, 1);
break;
case 1:
- pr_debug("%s(): Calling register_console\n", __func__);
+ pr_debug("Calling register_console\n");
/*
* Disable UART Core clk
* 0 - to enable the UART clock
@@ -1379,11 +1433,11 @@
if (unlikely(line < 0 || line >= UART_NR))
return -ENXIO;
- printk(KERN_INFO "msm_serial_hsl: detected port #%d (ttyHSL%d)\n",
- pdev->id, line);
+ pr_info("detected port #%d (ttyHSL%d)\n", pdev->id, line);
port = get_port_from_line(line);
port->dev = &pdev->dev;
+ port->uartclk = 7372800;
msm_hsl_port = UART_TO_MSM(port);
match = of_match_device(msm_hsl_match_table, &pdev->dev);
@@ -1406,11 +1460,11 @@
msm_hsl_port->is_uartdm = 0;
if (unlikely(IS_ERR(msm_hsl_port->clk))) {
- printk(KERN_ERR "%s: Error getting clk\n", __func__);
+ pr_err("Error getting clk\n");
return PTR_ERR(msm_hsl_port->clk);
}
if (unlikely(IS_ERR(msm_hsl_port->pclk))) {
- printk(KERN_ERR "%s: Error getting pclk\n", __func__);
+ pr_err("Error getting pclk\n");
return PTR_ERR(msm_hsl_port->pclk);
}
@@ -1420,14 +1474,14 @@
if (!uart_resource)
uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!uart_resource)) {
- printk(KERN_ERR "getting uartdm_resource failed\n");
+ pr_err("getting uartdm_resource failed\n");
return -ENXIO;
}
port->mapbase = uart_resource->start;
port->irq = platform_get_irq(pdev, 0);
if (unlikely((int)port->irq < 0)) {
- printk(KERN_ERR "%s: getting irq failed\n", __func__);
+ pr_err("getting irq failed\n");
return -ENXIO;
}
@@ -1437,9 +1491,10 @@
#ifdef CONFIG_SERIAL_MSM_HSL_CONSOLE
ret = device_create_file(&pdev->dev, &dev_attr_console);
if (unlikely(ret))
- pr_err("%s():Can't create console attribute\n", __func__);
+ pr_err("Can't create console attribute\n");
#endif
msm_hsl_debugfs_init(msm_hsl_port, get_line(pdev));
+ mutex_init(&msm_hsl_port->clk_mutex);
/* Temporarily increase the refcount on the GSBI clock to avoid a race
* condition with the earlyprintk handover mechanism.
@@ -1466,6 +1521,7 @@
device_set_wakeup_capable(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
+ mutex_destroy(&msm_hsl_port->clk_mutex);
uart_remove_one_port(&msm_hsl_uart_driver, port);
clk_put(msm_hsl_port->pclk);
@@ -1568,13 +1624,13 @@
debug_base = debugfs_create_dir("msm_serial_hsl", NULL);
if (IS_ERR_OR_NULL(debug_base))
- pr_err("%s():Cannot create debugfs dir\n", __func__);
+ pr_err("Cannot create debugfs dir\n");
ret = platform_driver_register(&msm_hsl_platform_driver);
if (unlikely(ret))
uart_unregister_driver(&msm_hsl_uart_driver);
- printk(KERN_INFO "msm_serial_hsl: driver initialized\n");
+ pr_info("driver initialized\n");
return ret;
}
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 7430e5a..d416904 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -114,6 +114,12 @@
#define ALT_INTERRUPT_EN_REG (QSCRATCH_REG_OFFSET + 0x20)
#define HS_PHY_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x24)
#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
+#define SS_CR_PROTOCOL_DATA_IN_REG (QSCRATCH_REG_OFFSET + 0x3C)
+#define SS_CR_PROTOCOL_DATA_OUT_REG (QSCRATCH_REG_OFFSET + 0x40)
+#define SS_CR_PROTOCOL_CAP_ADDR_REG (QSCRATCH_REG_OFFSET + 0x44)
+#define SS_CR_PROTOCOL_CAP_DATA_REG (QSCRATCH_REG_OFFSET + 0x48)
+#define SS_CR_PROTOCOL_READ_REG (QSCRATCH_REG_OFFSET + 0x4C)
+#define SS_CR_PROTOCOL_WRITE_REG (QSCRATCH_REG_OFFSET + 0x50)
struct dwc3_msm_req_complete {
struct list_head list_item;
@@ -280,6 +286,54 @@
}
/**
+ *
+ * Write SSPHY register with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @addr - SSPHY address to write.
+ * @val - value to write.
+ *
+ */
+static void dwc3_msm_ssusb_write_phycreg(void *base, u32 addr, u32 val)
+{
+ iowrite32(addr, base + SS_CR_PROTOCOL_DATA_IN_REG);
+ iowrite32(0x1, base + SS_CR_PROTOCOL_CAP_ADDR_REG);
+ while (ioread32(base + SS_CR_PROTOCOL_CAP_ADDR_REG))
+ cpu_relax();
+
+ iowrite32(val, base + SS_CR_PROTOCOL_DATA_IN_REG);
+ iowrite32(0x1, base + SS_CR_PROTOCOL_CAP_DATA_REG);
+ while (ioread32(base + SS_CR_PROTOCOL_CAP_DATA_REG))
+ cpu_relax();
+
+ iowrite32(0x1, base + SS_CR_PROTOCOL_WRITE_REG);
+ while (ioread32(base + SS_CR_PROTOCOL_WRITE_REG))
+ cpu_relax();
+}
+
+/**
+ *
+ * Read SSPHY register with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @addr - SSPHY address to read.
+ *
+ */
+static u32 dwc3_msm_ssusb_read_phycreg(void *base, u32 addr)
+{
+ iowrite32(addr, base + SS_CR_PROTOCOL_DATA_IN_REG);
+ iowrite32(0x1, base + SS_CR_PROTOCOL_CAP_ADDR_REG);
+ while (ioread32(base + SS_CR_PROTOCOL_CAP_ADDR_REG))
+ cpu_relax();
+
+ iowrite32(0x1, base + SS_CR_PROTOCOL_READ_REG);
+ while (ioread32(base + SS_CR_PROTOCOL_READ_REG))
+ cpu_relax();
+
+ return ioread32(base + SS_CR_PROTOCOL_DATA_OUT_REG);
+}
+
+/**
* Return DBM EP number according to usb endpoint number.
*
*/
@@ -1608,6 +1662,7 @@
int ret = 0;
int len = 0;
u32 tmp[3];
+ u32 data = 0;
msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
if (!msm) {
@@ -1851,6 +1906,20 @@
/* Disable (bypass) VBUS and ID filters */
dwc3_msm_write_reg(msm->base, QSCRATCH_GENERAL_CFG, 0x78);
+ /*
+ * WORKAROUND: There is SSPHY suspend bug due to which USB enumerates
+ * in HS mode instead of SS mode. Workaround it by asserting
+ * LANE0.TX_ALT_BLOCK.EN_ALT_BUS to enable TX to use alt bus mode
+ */
+ data = dwc3_msm_ssusb_read_phycreg(msm->base, 0x102D);
+ data |= (1 << 7);
+ dwc3_msm_ssusb_write_phycreg(msm->base, 0x102D, data);
+
+ data = dwc3_msm_ssusb_read_phycreg(msm->base, 0x1010);
+ data &= ~0xFF0;
+ data |= 0x40;
+ dwc3_msm_ssusb_write_phycreg(msm->base, 0x1010, data);
+
pm_runtime_set_active(msm->dev);
pm_runtime_enable(msm->dev);
diff --git a/drivers/usb/gadget/f_qc_rndis.c b/drivers/usb/gadget/f_qc_rndis.c
index f86bf12..82ef2a8 100644
--- a/drivers/usb/gadget/f_qc_rndis.c
+++ b/drivers/usb/gadget/f_qc_rndis.c
@@ -85,6 +85,7 @@
u8 ethaddr[ETH_ALEN];
u32 vendorID;
u8 max_pkt_per_xfer;
+ u32 max_pkt_size;
const char *manufacturer;
int config;
atomic_t ioctl_excl;
@@ -125,6 +126,7 @@
#define RNDIS_QC_IOCTL_MAGIC 'i'
#define RNDIS_QC_GET_MAX_PKT_PER_XFER _IOR(RNDIS_QC_IOCTL_MAGIC, 1, u8)
+#define RNDIS_QC_GET_MAX_PKT_SIZE _IOR(RNDIS_QC_IOCTL_MAGIC, 2, u32)
/* interface descriptor: */
@@ -552,14 +554,22 @@
static void rndis_qc_command_complete(struct usb_ep *ep,
struct usb_request *req)
{
- struct f_rndis_qc *rndis = req->context;
+ struct f_rndis_qc *rndis = req->context;
int status;
+ rndis_init_msg_type *buf;
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
if (status < 0)
pr_err("RNDIS command error %d, %d/%d\n",
status, req->actual, req->length);
+
+ buf = (rndis_init_msg_type *)req->buf;
+
+ if (buf->MessageType == REMOTE_NDIS_INITIALIZE_MSG) {
+ rndis->max_pkt_size = buf->MaxTransferSize;
+ pr_debug("MaxTransferSize: %d\n", buf->MaxTransferSize);
+ }
}
static int
@@ -1110,6 +1120,17 @@
pr_info("Sent max packets per xfer %d",
rndis->max_pkt_per_xfer);
break;
+ case RNDIS_QC_GET_MAX_PKT_SIZE:
+ ret = copy_to_user((void __user *)arg,
+ &rndis->max_pkt_size,
+ sizeof(rndis->max_pkt_size));
+ if (ret) {
+ pr_err("copying to user space failed");
+ ret = -EFAULT;
+ }
+ pr_debug("Sent max packet size %d",
+ rndis->max_pkt_size);
+ break;
default:
pr_err("Unsupported IOCTL");
ret = -EINVAL;
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index a827d6a..372122c 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -886,7 +886,7 @@
outp32(MDP_INTR_CLEAR, mgmt->intr);
mdp_intr_mask &= ~mgmt->intr;
outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_disable_irq(mgmt->irq_term);
+ mdp_disable_irq_nosync(mgmt->irq_term);
spin_unlock_irqrestore(&mdp_spin_lock, flag);
if (mdp_rev >= MDP_REV_42)
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 88a7c45..a0d707e 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -7,6 +7,7 @@
mdss-mdp-objs += mdss_mdp_wb.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
+obj-$(CONFIG_DEBUG_FS) += mdss_debug.o
mdss-dsi-objs := mdss_dsi.o mdss_dsi_host.o
mdss-dsi-objs += mdss_dsi_panel.o
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index d041125..9e13418 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -61,6 +61,7 @@
struct delayed_work clk_ctrl_worker;
struct platform_device *pdev;
char __iomem *mdp_base;
+ size_t mdp_reg_size;
char __iomem *vbif_base;
u32 irq;
@@ -93,6 +94,7 @@
struct mdss_iommu_map_type *iommu_map;
struct early_suspend early_suspend;
+ void *debug_data;
};
extern struct mdss_data_type *mdss_res;
diff --git a/drivers/video/msm/mdss/mdss_debug.c b/drivers/video/msm/mdss/mdss_debug.c
new file mode 100644
index 0000000..abef27d
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_debug.c
@@ -0,0 +1,348 @@
+/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+
+#define DEFAULT_BASE_REG_CNT 128
+#define GROUP_BYTES 4
+#define ROW_BYTES 16
+
+struct mdss_debug_data {
+ struct dentry *root;
+ struct list_head base_list;
+};
+
+struct mdss_debug_base {
+ struct mdss_debug_data *mdd;
+ void __iomem *base;
+ size_t off;
+ size_t cnt;
+ size_t max_offset;
+ char *buf;
+ size_t buf_len;
+ struct list_head head;
+};
+
+static int mdss_debug_base_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int mdss_debug_base_release(struct inode *inode, struct file *file)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ if (dbg && dbg->buf) {
+ kfree(dbg->buf);
+ dbg->buf_len = 0;
+ dbg->buf = NULL;
+ }
+ return 0;
+}
+
+static ssize_t mdss_debug_base_offset_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ u32 off, cnt;
+ char buf[24];
+
+ if (!dbg)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ sscanf(buf, "%5x %d", &off, &cnt);
+
+ if (off > dbg->max_offset)
+ return -EINVAL;
+
+ if (cnt <= 0)
+ cnt = DEFAULT_BASE_REG_CNT;
+
+ if (cnt > (dbg->max_offset - off))
+ cnt = dbg->max_offset - off;
+
+ dbg->off = off;
+ dbg->cnt = cnt;
+
+ pr_debug("offset=%x cnt=%x\n", off, cnt);
+
+ return count;
+}
+
+static ssize_t mdss_debug_base_offset_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ int len = 0;
+ char buf[24];
+
+ if (!dbg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "0x%08x %d\n", dbg->off, dbg->off);
+ if (len < 0)
+ return 0;
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static ssize_t mdss_debug_base_reg_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ size_t off;
+ u32 data, cnt;
+ char buf[24];
+
+ if (!dbg)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ cnt = sscanf(buf, "%x %x", &off, &data);
+
+ if (cnt < 2)
+ return -EFAULT;
+
+ if (off >= dbg->max_offset)
+ return -EFAULT;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ writel_relaxed(data, dbg->base + off);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+ pr_debug("addr=%x data=%x\n", off, data);
+
+ return count;
+}
+
+static ssize_t mdss_debug_base_reg_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mdss_debug_base *dbg = file->private_data;
+ size_t len;
+
+ if (!dbg) {
+ pr_err("invalid handle\n");
+ return -ENODEV;
+ }
+
+ if (!dbg->buf) {
+ char dump_buf[64];
+ char *ptr;
+ int cnt, tot;
+
+ dbg->buf_len = sizeof(dump_buf) *
+ DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
+ dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
+
+ if (!dbg->buf) {
+ pr_err("not enough memory to hold reg dump\n");
+ return -ENOMEM;
+ }
+
+ ptr = dbg->base + dbg->off;
+ tot = 0;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
+ hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
+ ROW_BYTES, GROUP_BYTES, dump_buf,
+ sizeof(dump_buf), false);
+ len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
+ "0x%08x: %s\n",
+ ((int)ptr) - ((int)dbg->base),
+ dump_buf);
+
+ ptr += ROW_BYTES;
+ tot += len;
+ if (tot >= dbg->buf_len)
+ break;
+ }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+ dbg->buf_len = tot;
+ }
+
+ if (*ppos >= dbg->buf_len)
+ return 0; /* done reading */
+
+ len = min(count, dbg->buf_len - (size_t) *ppos);
+ if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
+ pr_err("failed to copy to user\n");
+ return -EFAULT;
+ }
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations mdss_off_fops = {
+ .open = mdss_debug_base_open,
+ .release = mdss_debug_base_release,
+ .read = mdss_debug_base_offset_read,
+ .write = mdss_debug_base_offset_write,
+};
+
+static const struct file_operations mdss_reg_fops = {
+ .open = mdss_debug_base_open,
+ .release = mdss_debug_base_release,
+ .read = mdss_debug_base_reg_read,
+ .write = mdss_debug_base_reg_write,
+};
+
+int mdss_debug_register_base(const char *name, void __iomem *base,
+ size_t max_offset)
+{
+ struct mdss_data_type *mdata = mdss_res;
+ struct mdss_debug_data *mdd;
+ struct mdss_debug_base *dbg;
+ struct dentry *ent_off, *ent_reg;
+ char dn[80] = "";
+ int prefix_len = 0;
+
+ if (!mdata || !mdata->debug_data)
+ return -ENODEV;
+
+ mdd = mdata->debug_data;
+
+ dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return -ENOMEM;
+
+ dbg->base = base;
+ dbg->max_offset = max_offset;
+ dbg->off = 0;
+ dbg->cnt = DEFAULT_BASE_REG_CNT;
+
+ if (name)
+ prefix_len = snprintf(dn, sizeof(dn), "%s_", name);
+
+ strlcpy(dn + prefix_len, "off", sizeof(dn) - prefix_len);
+ ent_off = debugfs_create_file(dn, 0644, mdd->root, dbg, &mdss_off_fops);
+ if (IS_ERR_OR_NULL(ent_off)) {
+ pr_err("debugfs_create_file: offset fail\n");
+ goto off_fail;
+ }
+
+ strlcpy(dn + prefix_len, "reg", sizeof(dn) - prefix_len);
+ ent_reg = debugfs_create_file(dn, 0644, mdd->root, dbg, &mdss_reg_fops);
+ if (IS_ERR_OR_NULL(ent_reg)) {
+ pr_err("debugfs_create_file: reg fail\n");
+ goto reg_fail;
+ }
+
+ list_add(&dbg->head, &mdd->base_list);
+
+ return 0;
+reg_fail:
+ debugfs_remove(ent_off);
+off_fail:
+ kfree(dbg);
+ return -ENODEV;
+}
+
+static int mdss_debugfs_cleanup(struct mdss_debug_data *mdd)
+{
+ struct mdss_debug_base *base, *tmp;
+
+ if (!mdd)
+ return 0;
+
+ list_for_each_entry_safe(base, tmp, &mdd->base_list, head) {
+ list_del(&base->head);
+ kfree(base);
+ }
+
+ if (mdd->root)
+ debugfs_remove_recursive(mdd->root);
+
+ kfree(mdd);
+
+ return 0;
+}
+
+int mdss_debugfs_init(struct mdss_data_type *mdata)
+{
+ struct mdss_debug_data *mdd;
+
+ if (mdata->debug_data) {
+ pr_warn("mdss debugfs already initialized\n");
+ return -EBUSY;
+ }
+
+ mdd = kzalloc(sizeof(*mdd), GFP_KERNEL);
+ if (!mdd) {
+ pr_err("no memory to create mdss debug data\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&mdd->base_list);
+
+ mdd->root = debugfs_create_dir("mdp", NULL);
+ if (IS_ERR_OR_NULL(mdd->root)) {
+ pr_err("debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(mdd->root));
+ mdd->root = NULL;
+ mdss_debugfs_cleanup(mdd);
+ return -ENODEV;
+ }
+
+ mdata->debug_data = mdd;
+
+ return 0;
+}
+
+int mdss_debugfs_remove(struct mdss_data_type *mdata)
+{
+ struct mdss_debug_data *mdd = mdata->debug_data;
+
+ mdss_debugfs_cleanup(mdd);
+
+ return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_debug.h b/drivers/video/msm/mdss/mdss_debug.h
new file mode 100644
index 0000000..167fa8a
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_debug.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DEBUG_H
+#define MDSS_DEBUG_H
+
+#include "mdss.h"
+
+#ifdef CONFIG_DEBUG_FS
+int mdss_debugfs_init(struct mdss_data_type *mdata);
+int mdss_debugfs_remove(struct mdss_data_type *mdata);
+int mdss_debug_register_base(const char *name, void __iomem *base,
+ size_t max_offset);
+#else
+static inline int mdss_debugfs_init(struct mdss_data_type *mdata)
+{
+ return 0;
+}
+static inline int mdss_debugfs_remove(struct mdss_data_type *mdata)
+{
+ return 0;
+}
+static inline int mdss_debug_register_base(const char *name, void __iomem *base,
+ size_t max_offset)
+{
+ return 0;
+}
+#endif
+#endif /* MDSS_DEBUG_H */
diff --git a/drivers/video/msm/mdss/mdss_edp.c b/drivers/video/msm/mdss/mdss_edp.c
index 1cf3101..227619f 100644
--- a/drivers/video/msm/mdss/mdss_edp.c
+++ b/drivers/video/msm/mdss/mdss_edp.c
@@ -303,10 +303,10 @@
}
mdss_edp_prepare_clocks(edp_drv);
- mdss_edp_clk_enable(edp_drv);
mdss_edp_phy_sw_reset(edp_drv->edp_base);
mdss_edp_hw_powerup(edp_drv->edp_base, 1);
mdss_edp_pll_configure(edp_drv->edp_base, edp_drv->edid.timing[0].pclk);
+ mdss_edp_clk_enable(edp_drv);
for (i = 0; i < edp_drv->dpcd.max_lane_count; ++i)
mdss_edp_enable_lane_bist(edp_drv->edp_base, i, 1);
@@ -346,8 +346,8 @@
for (i = 0; i < edp_drv->dpcd.max_lane_count; ++i)
mdss_edp_enable_lane_bist(edp_drv->edp_base, i, 0);
- mdss_edp_hw_powerup(edp_drv->edp_base, 0);
mdss_edp_clk_disable(edp_drv);
+ mdss_edp_hw_powerup(edp_drv->edp_base, 0);
mdss_edp_unprepare_clocks(edp_drv);
return ret;
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 4ec4046..581ec17 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -131,8 +131,8 @@
/* This maps android backlight level 0 to 255 into
driver backlight level 0 to bl_max with rounding */
- bl_lvl = (2 * value * mfd->panel_info.bl_max + MAX_BACKLIGHT_BRIGHTNESS)
- /(2 * MAX_BACKLIGHT_BRIGHTNESS);
+ bl_lvl = (2 * value * mfd->panel_info->bl_max +
+ MAX_BACKLIGHT_BRIGHTNESS) / (2 * MAX_BACKLIGHT_BRIGHTNESS);
if (!bl_lvl && value)
bl_lvl = 1;
@@ -155,7 +155,7 @@
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
- switch (mfd->panel_info.type) {
+ switch (mfd->panel.type) {
case NO_PANEL:
ret = snprintf(buf, PAGE_SIZE, "no panel\n");
break;
@@ -239,13 +239,13 @@
mfd = (struct msm_fb_data_type *)fbi->par;
mfd->key = MFD_KEY;
mfd->fbi = fbi;
- mfd->panel_info = pdata->panel_info;
+ mfd->panel_info = &pdata->panel_info;
mfd->panel.type = pdata->panel_info.type;
mfd->panel.id = mfd->index;
mfd->fb_page = MDSS_FB_NUM;
mfd->index = fbi_list_index;
mfd->mdp_fb_page_protection = MDP_FB_PAGE_PROTECTION_WRITECOMBINE;
- mfd->panel_info.frame_count = 0;
+
mfd->bl_level = 0;
mfd->bl_scale = 1024;
mfd->bl_min_lvl = 30;
@@ -263,14 +263,6 @@
if (rc)
return rc;
- /*
- * todo: Currently mfd keeps a full copy of panel data rather than
- * pointer to it.
- * Following line shares the fbi with panel drivers for their
- * sysfs or any external communications with the panel driver.
- */
- pdata->panel_info.fbi = fbi;
-
rc = pm_runtime_set_active(mfd->fbi->dev);
if (rc < 0)
pr_err("pm_runtime: fail to set active.\n");
@@ -325,7 +317,8 @@
return 0;
}
-static inline int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd, int e)
+static inline int mdss_fb_send_panel_event(
+ struct msm_fb_data_type *mfd, int e, void *arg)
{
struct mdss_panel_data *pdata;
@@ -338,7 +331,7 @@
pr_debug("sending event=%d for fb%d\n", e, mfd->index);
if (pdata->event_handler)
- return pdata->event_handler(pdata, e, NULL);
+ return pdata->event_handler(pdata, e, arg);
return 0;
}
@@ -352,7 +345,7 @@
pr_debug("mdss_fb suspend index=%d\n", mfd->index);
- ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND);
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND, NULL);
if (ret) {
pr_warn("unable to suspend fb%d (%d)\n", mfd->index, ret);
return ret;
@@ -383,7 +376,7 @@
pr_debug("mdss_fb resume index=%d\n", mfd->index);
- ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME);
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME, NULL);
if (ret) {
pr_warn("unable to resume fb%d (%d)\n", mfd->index, ret);
return ret;
@@ -439,52 +432,11 @@
return result;
}
-#if defined(CONFIG_PM) && defined(CONFIG_SUSPEND)
-static int mdss_fb_ext_suspend(struct device *dev)
-{
- struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
- int ret = 0;
-
- if ((!mfd) || (mfd->key != MFD_KEY))
- return 0;
-
- if (mfd->panel_info.type == HDMI_PANEL ||
- mfd->panel_info.type == DTV_PANEL)
- ret = mdss_fb_suspend_sub(mfd);
-
- return ret;
-}
-
-static int mdss_fb_ext_resume(struct device *dev)
-{
- struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
- int ret = 0;
-
- if ((!mfd) || (mfd->key != MFD_KEY))
- return 0;
-
- if (mfd->panel_info.type == HDMI_PANEL ||
- mfd->panel_info.type == DTV_PANEL)
- ret = mdss_fb_resume_sub(mfd);
-
- return ret;
-}
-#else
-#define mdss_fb_ext_suspend NULL
-#define mdss_fb_ext_resume NULL
-#endif
-
-static const struct dev_pm_ops mdss_fb_dev_pm_ops = {
- .suspend = mdss_fb_ext_suspend,
- .resume = mdss_fb_ext_resume,
-};
-
static struct platform_driver mdss_fb_driver = {
.probe = mdss_fb_probe,
.remove = mdss_fb_remove,
.driver = {
.name = "mdss_fb",
- .pm = &mdss_fb_dev_pm_ops,
},
};
@@ -716,9 +668,9 @@
void *virt = NULL;
unsigned long phys = 0;
size_t size;
+ u32 yres = mfd->fbi->var.yres_virtual;
- size = PAGE_ALIGN(mfd->fbi->fix.line_length * mfd->panel_info.yres);
- size *= mfd->fb_page;
+ size = PAGE_ALIGN(mfd->fbi->fix.line_length * yres);
if (mfd->index == 0) {
int dom;
@@ -751,7 +703,7 @@
{
int ret = -ENODEV;
int bpp;
- struct mdss_panel_info *panel_info = &mfd->panel_info;
+ struct mdss_panel_info *panel_info = mfd->panel_info;
struct fb_info *fbi = mfd->fbi;
struct fb_fix_screeninfo *fix;
struct fb_var_screeninfo *var;
@@ -903,10 +855,6 @@
var->hsync_len = panel_info->lcdc.h_pulse_width;
var->pixclock = panel_info->clk_rate / 1000;
- mfd->var_xres = var->xres;
- mfd->var_yres = var->yres;
- mfd->var_pixclock = var->pixclock;
-
if (panel_info->type == MIPI_VIDEO_PANEL) {
var->reserved[4] = panel_info->mipi.frame_rate;
} else {
@@ -1050,11 +998,25 @@
mfd->panel.type);
mdss_fb_update_backlight(mfd);
-
- ++mfd->panel_info.frame_count;
return 0;
}
+static void mdss_fb_var_to_panelinfo(struct fb_var_screeninfo *var,
+ struct mdss_panel_info *pinfo)
+{
+ pinfo->xres = var->xres;
+ pinfo->yres = var->yres;
+ pinfo->lcdc.v_front_porch = var->upper_margin;
+ pinfo->lcdc.v_back_porch = var->lower_margin;
+ pinfo->lcdc.v_pulse_width = var->vsync_len;
+ pinfo->lcdc.h_front_porch = var->left_margin;
+ pinfo->lcdc.h_back_porch = var->right_margin;
+ pinfo->lcdc.h_pulse_width = var->hsync_len;
+ pinfo->clk_rate = var->pixclock;
+ /* todo: find how to pass CEA vic through framebuffer APIs */
+ pinfo->vic = var->reserved[3];
+}
+
static int mdss_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -1147,16 +1109,25 @@
if ((var->xres == 0) || (var->yres == 0))
return -EINVAL;
- if ((var->xres > mfd->panel_info.xres) ||
- (var->yres > mfd->panel_info.yres))
- return -EINVAL;
-
if (var->xoffset > (var->xres_virtual - var->xres))
return -EINVAL;
if (var->yoffset > (var->yres_virtual - var->yres))
return -EINVAL;
+ if (mfd->panel_info) {
+ struct mdss_panel_info panel_info;
+ int rc;
+
+ memcpy(&panel_info, mfd->panel_info, sizeof(panel_info));
+ mdss_fb_var_to_panelinfo(var, &panel_info);
+ rc = mdss_fb_send_panel_event(mfd, MDSS_EVENT_CHECK_PARAMS,
+ &panel_info);
+ if (IS_ERR_VALUE(rc))
+ return rc;
+ mfd->panel_reconfig = rc;
+ }
+
return 0;
}
@@ -1165,7 +1136,6 @@
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct fb_var_screeninfo *var = &info->var;
int old_imgType;
- int blank = 0;
old_imgType = mfd->fb_imgType;
switch (var->bits_per_pixel) {
@@ -1197,22 +1167,14 @@
return -EINVAL;
}
- if ((mfd->var_pixclock != var->pixclock) ||
- (mfd->hw_refresh && ((mfd->fb_imgType != old_imgType) ||
- (mfd->var_pixclock != var->pixclock) ||
- (mfd->var_xres != var->xres) ||
- (mfd->var_yres != var->yres)))) {
- mfd->var_xres = var->xres;
- mfd->var_yres = var->yres;
- mfd->var_pixclock = var->pixclock;
- blank = 1;
- }
mfd->fbi->fix.line_length = mdss_fb_line_length(mfd->index, var->xres,
var->bits_per_pixel / 8);
- if (blank) {
+ if (mfd->panel_reconfig || (mfd->fb_imgType != old_imgType)) {
mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable);
+ mdss_fb_var_to_panelinfo(var, mfd->panel_info);
mdss_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
+ mfd->panel_reconfig = false;
}
return 0;
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 5e57de6..11bb859 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -55,13 +55,15 @@
u32 fb_page;
struct panel_id panel;
- struct mdss_panel_info panel_info;
+ struct mdss_panel_info *panel_info;
u32 dest;
struct fb_info *fbi;
int op_enable;
u32 fb_imgType;
+ int panel_reconfig;
+
u32 dst_format;
int vsync_pending;
ktime_t vsync_time;
@@ -100,10 +102,6 @@
struct platform_device *pdev;
- u32 var_xres;
- u32 var_yres;
- u32 var_pixclock;
-
u32 mdp_fb_page_protection;
struct mdss_mdp_ctl *ctl;
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index 539cd49..37bbbdf 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -163,6 +163,66 @@
}
} /* hdmi_tx_io_name */
+static int hdmi_tx_get_vic_from_panel_info(struct hdmi_tx_ctrl *hdmi_ctrl,
+ struct mdss_panel_info *pinfo)
+{
+ int new_vic = -1;
+ u32 h_total, v_total;
+ struct hdmi_disp_mode_timing_type timing;
+
+ if (!hdmi_ctrl || !pinfo) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ if (pinfo->vic) {
+ if (hdmi_get_supported_mode(pinfo->vic - 1)) {
+ new_vic = pinfo->vic - 1;
+ DEV_DBG("%s: %s is supported\n", __func__,
+ hdmi_get_video_fmt_2string(new_vic));
+ } else {
+ DEV_ERR("%s: invalid or not supported vic\n",
+ __func__);
+ return -EPERM;
+ }
+ } else {
+ timing.active_h = pinfo->xres;
+ timing.back_porch_h = pinfo->lcdc.h_back_porch;
+ timing.front_porch_h = pinfo->lcdc.h_front_porch;
+ timing.pulse_width_h = pinfo->lcdc.h_pulse_width;
+ h_total = timing.active_h + timing.back_porch_h +
+ timing.front_porch_h + timing.pulse_width_h;
+ DEV_DBG("%s: ah=%d bph=%d fph=%d pwh=%d ht=%d\n", __func__,
+ timing.active_h, timing.back_porch_h,
+ timing.front_porch_h, timing.pulse_width_h, h_total);
+
+ timing.active_v = pinfo->yres;
+ timing.back_porch_v = pinfo->lcdc.v_back_porch;
+ timing.front_porch_v = pinfo->lcdc.v_front_porch;
+ timing.pulse_width_v = pinfo->lcdc.v_pulse_width;
+ v_total = timing.active_v + timing.back_porch_v +
+ timing.front_porch_v + timing.pulse_width_v;
+ DEV_DBG("%s: av=%d bpv=%d fpv=%d pwv=%d vt=%d\n", __func__,
+ timing.active_v, timing.back_porch_v,
+ timing.front_porch_v, timing.pulse_width_v, v_total);
+
+ timing.pixel_freq = pinfo->clk_rate / 1000;
+ if (h_total && v_total) {
+ timing.refresh_rate = ((timing.pixel_freq * 1000) /
+ (h_total * v_total)) * 1000;
+ } else {
+ DEV_ERR("%s: cannot cal refresh rate\n", __func__);
+ return -EPERM;
+ }
+ DEV_DBG("%s: pixel_freq=%d refresh_rate=%d\n", __func__,
+ timing.pixel_freq, timing.refresh_rate);
+
+ new_vic = hdmi_get_video_id_code(&timing);
+ }
+
+ return new_vic;
+} /* hdmi_tx_get_vic_from_panel_info */
+
static struct hdmi_tx_ctrl *hdmi_tx_get_drvdata_from_panel_data(
struct mdss_panel_data *mpd)
{
@@ -170,14 +230,8 @@
if (mpd) {
hdmi_ctrl = container_of(mpd, struct hdmi_tx_ctrl, panel_data);
- if (hdmi_ctrl) {
- hdmi_ctrl->pixel_clk =
- mpd->panel_info.fbi->var.pixclock;
- hdmi_ctrl->xres = mpd->panel_info.fbi->var.xres;
- hdmi_ctrl->yres = mpd->panel_info.fbi->var.yres;
- } else {
+ if (!hdmi_ctrl)
DEV_ERR("%s: hdmi_ctrl = NULL\n", __func__);
- }
} else {
DEV_ERR("%s: mdss_panel_data = NULL\n", __func__);
}
@@ -546,90 +600,40 @@
return 0;
} /* hdmi_tx_check_capability */
-static int hdmi_tx_set_video_fmt(struct hdmi_tx_ctrl *hdmi_ctrl)
+static int hdmi_tx_set_video_fmt(struct hdmi_tx_ctrl *hdmi_ctrl,
+ struct mdss_panel_info *pinfo)
{
- int rc = 0;
+ int new_vic = -1;
const struct hdmi_disp_mode_timing_type *timing = NULL;
- struct hdmi_tx_platform_data *pdata = NULL;
- u32 format = DEFAULT_VIDEO_RESOLUTION;
- if (!hdmi_ctrl) {
+ if (!hdmi_ctrl || !pinfo) {
DEV_ERR("%s: invalid input\n", __func__);
- rc = -EINVAL;
- goto end;
+ return -EINVAL;
}
- pdata = &hdmi_ctrl->pdata;
-
- DEV_DBG("%s: Resolution wanted=%dx%d\n", __func__, hdmi_ctrl->xres,
- hdmi_ctrl->yres);
- switch (hdmi_ctrl->xres) {
- default:
- case 640:
- format = HDMI_VFRMT_640x480p60_4_3;
- break;
- case 720:
- format = (hdmi_ctrl->yres == 480)
- ? HDMI_VFRMT_720x480p60_16_9
- : HDMI_VFRMT_720x576p50_16_9;
- break;
- case 1280:
- if (hdmi_ctrl->frame_rate == 50000)
- format = HDMI_VFRMT_1280x720p50_16_9;
- else
- format = HDMI_VFRMT_1280x720p60_16_9;
- break;
- case 1440:
- /* interlaced has half of y res. */
- format = (hdmi_ctrl->yres == 240)
- ? HDMI_VFRMT_1440x480i60_16_9
- : HDMI_VFRMT_1440x576i50_16_9;
- break;
- case 1920:
- if (hdmi_ctrl->yres == 540) {/* interlaced */
- format = HDMI_VFRMT_1920x1080i60_16_9;
- } else if (hdmi_ctrl->yres == 1080) {
- if (hdmi_ctrl->frame_rate == 50000)
- format = HDMI_VFRMT_1920x1080p50_16_9;
- else if (hdmi_ctrl->frame_rate == 24000)
- format = HDMI_VFRMT_1920x1080p24_16_9;
- else if (hdmi_ctrl->frame_rate == 25000)
- format = HDMI_VFRMT_1920x1080p25_16_9;
- else if (hdmi_ctrl->frame_rate == 30000)
- format = HDMI_VFRMT_1920x1080p30_16_9;
- else
- format = HDMI_VFRMT_1920x1080p60_16_9;
- }
- break;
+ new_vic = hdmi_tx_get_vic_from_panel_info(hdmi_ctrl, pinfo);
+ if ((new_vic < 0) || (new_vic > HDMI_VFRMT_MAX)) {
+ DEV_ERR("%s: invalid or not supported vic\n", __func__);
+ return -EPERM;
}
- if (hdmi_ctrl->video_resolution != format)
- DEV_DBG("%s: switching %s => %s", __func__,
- hdmi_get_video_fmt_2string(
- hdmi_ctrl->video_resolution),
- hdmi_get_video_fmt_2string(format));
- else
- DEV_DBG("resolution %s", hdmi_get_video_fmt_2string(
- hdmi_ctrl->video_resolution));
+ DEV_DBG("%s: switching from %s => %s", __func__,
+ hdmi_get_video_fmt_2string(hdmi_ctrl->video_resolution),
+ hdmi_get_video_fmt_2string(new_vic));
- timing = hdmi_get_supported_mode(format);
- if (!timing) {
- DEV_ERR("%s: invalid video fmt=%d\n", __func__,
- hdmi_ctrl->video_resolution);
- rc = -EPERM;
- goto end;
- }
+ hdmi_ctrl->video_resolution = (u32)new_vic;
+
+ timing = hdmi_get_supported_mode(hdmi_ctrl->video_resolution);
/* todo: find a better way */
hdmi_ctrl->pdata.power_data[HDMI_TX_CORE_PM].clk_config[0].rate =
timing->pixel_freq * 1000;
- hdmi_ctrl->video_resolution = format;
hdmi_edid_set_video_resolution(
- hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID], format);
+ hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID],
+ hdmi_ctrl->video_resolution);
-end:
- return rc;
+ return 0;
} /* hdmi_tx_set_video_fmt */
static void hdmi_tx_video_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
@@ -1710,8 +1714,6 @@
if (hdmi_ctrl->hpd_off_pending) {
hdmi_tx_hpd_off(hdmi_ctrl);
hdmi_ctrl->hpd_off_pending = false;
- } else {
- hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_CONNECT_POLARITY);
}
DEV_INFO("%s: HDMI Core: OFF\n", __func__);
@@ -1764,15 +1766,15 @@
/* If a power down is already underway, wait for it to finish */
flush_work_sync(&hdmi_ctrl->power_off_work);
- DEV_INFO("power: ON (%dx%d %ld)\n", hdmi_ctrl->xres, hdmi_ctrl->yres,
- hdmi_ctrl->pixel_clk);
-
- rc = hdmi_tx_set_video_fmt(hdmi_ctrl);
+ rc = hdmi_tx_set_video_fmt(hdmi_ctrl, &panel_data->panel_info);
if (rc) {
DEV_ERR("%s: cannot set video_fmt.rc=%d\n", __func__, rc);
return rc;
}
+ DEV_INFO("power: ON (%s)\n", hdmi_get_video_fmt_2string(
+ hdmi_ctrl->video_resolution));
+
rc = hdmi_tx_core_on(hdmi_ctrl);
if (rc) {
DEV_ERR("%s: hdmi_msm_core_on failed\n", __func__);
@@ -2044,7 +2046,7 @@
static int hdmi_tx_panel_event_handler(struct mdss_panel_data *panel_data,
int event, void *arg)
{
- int rc = 0;
+ int rc = 0, new_vic = -1;
struct hdmi_tx_ctrl *hdmi_ctrl =
hdmi_tx_get_drvdata_from_panel_data(panel_data);
@@ -2057,6 +2059,25 @@
event, hdmi_ctrl->panel_suspend, hdmi_ctrl->hpd_feature_on);
switch (event) {
+ case MDSS_EVENT_CHECK_PARAMS:
+ new_vic = hdmi_tx_get_vic_from_panel_info(hdmi_ctrl,
+ (struct mdss_panel_info *)arg);
+ if ((new_vic < 0) || (new_vic > HDMI_VFRMT_MAX)) {
+ DEV_ERR("%s: invalid or not supported vic\n", __func__);
+ return -EPERM;
+ }
+
+ /*
+ * return value of 1 lets mdss know that panel
+ * needs a reconfig due to new resolution and
+ * it will issue close and open subsequently.
+ */
+ if (new_vic != hdmi_ctrl->video_resolution)
+ rc = 1;
+ else
+ DEV_DBG("%s: no res change.\n", __func__);
+ break;
+
case MDSS_EVENT_RESUME:
if (hdmi_ctrl->hpd_feature_on) {
INIT_COMPLETION(hdmi_ctrl->hpd_done);
@@ -2104,8 +2125,6 @@
if (!hdmi_ctrl->panel_power_on) {
if (hdmi_ctrl->hpd_feature_on)
hdmi_tx_hpd_off(hdmi_ctrl);
- else
- DEV_ERR("%s: invalid state\n", __func__);
hdmi_ctrl->panel_suspend = false;
} else {
@@ -2131,6 +2150,12 @@
if (hdmi_ctrl->panel_suspend)
flush_work_sync(&hdmi_ctrl->power_off_work);
break;
+
+ case MDSS_EVENT_CLOSE:
+ if (hdmi_ctrl->hpd_feature_on)
+ hdmi_tx_hpd_polarity_setup(hdmi_ctrl,
+ HPD_CONNECT_POLARITY);
+ break;
}
return rc;
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.h b/drivers/video/msm/mdss/mdss_hdmi_tx.h
index 2d431b7..5f8094f 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.h
@@ -50,6 +50,7 @@
struct workqueue_struct *workq;
uint32_t video_resolution;
+
u32 panel_power_on;
u32 panel_suspend;
@@ -62,11 +63,6 @@
struct work_struct power_off_work;
- unsigned long pixel_clk;
- u32 xres;
- u32 yres;
- u32 frame_rate;
-
u32 present_hdcp;
u8 spd_vendor_name[8];
diff --git a/drivers/video/msm/mdss/mdss_hdmi_util.c b/drivers/video/msm/mdss/mdss_hdmi_util.c
index e7ea8c9..a3d76be 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_util.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_util.c
@@ -98,6 +98,57 @@
return ret;
} /* hdmi_get_supported_mode */
+int hdmi_get_video_id_code(struct hdmi_disp_mode_timing_type *timing_in)
+{
+ int i, vic = -1;
+
+ if (!timing_in) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ goto exit;
+ }
+
+ /* active_low_h, active_low_v and interlaced are not checked against */
+ for (i = 0; i < HDMI_VFRMT_MAX; i++) {
+ struct hdmi_disp_mode_timing_type *supported_timing =
+ &hdmi_supported_video_mode_lut[i];
+
+ if (!supported_timing->supported)
+ continue;
+ if (timing_in->active_h != supported_timing->active_h)
+ continue;
+ if (timing_in->front_porch_h != supported_timing->front_porch_h)
+ continue;
+ if (timing_in->pulse_width_h != supported_timing->pulse_width_h)
+ continue;
+ if (timing_in->back_porch_h != supported_timing->back_porch_h)
+ continue;
+ if (timing_in->active_v != supported_timing->active_v)
+ continue;
+ if (timing_in->front_porch_v != supported_timing->front_porch_v)
+ continue;
+ if (timing_in->pulse_width_v != supported_timing->pulse_width_v)
+ continue;
+ if (timing_in->back_porch_v != supported_timing->back_porch_v)
+ continue;
+ if (timing_in->pixel_freq != supported_timing->pixel_freq)
+ continue;
+ if (timing_in->refresh_rate != supported_timing->refresh_rate)
+ continue;
+
+ vic = (int)supported_timing->video_format;
+ break;
+ }
+
+ if (vic < 0)
+ DEV_ERR("%s: timing asked is not yet supported\n", __func__);
+
+exit:
+ DEV_DBG("%s: vic = %d timing = %s\n", __func__, vic,
+ hdmi_get_video_fmt_2string((u32)vic));
+
+ return vic;
+} /* hdmi_get_video_id_code */
+
void hdmi_set_supported_mode(u32 mode)
{
switch (mode) {
diff --git a/drivers/video/msm/mdss/mdss_hdmi_util.h b/drivers/video/msm/mdss/mdss_hdmi_util.h
index 852a93c..c970ebe 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_util.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_util.h
@@ -397,6 +397,7 @@
int retry;
};
+int hdmi_get_video_id_code(struct hdmi_disp_mode_timing_type *timing_in);
const struct hdmi_disp_mode_timing_type *hdmi_get_supported_mode(u32 mode);
void hdmi_set_supported_mode(u32 mode);
const char *hdmi_get_video_fmt_2string(u32 format);
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index bcb3aee..b59d193 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -49,6 +49,7 @@
#include "mdss.h"
#include "mdss_fb.h"
#include "mdss_mdp.h"
+#include "mdss_debug.h"
struct mdss_data_type *mdss_res;
@@ -772,6 +773,19 @@
return 0;
}
+static int mdss_mdp_debug_init(struct mdss_data_type *mdata)
+{
+ int rc;
+
+ rc = mdss_debugfs_init(mdata);
+ if (rc)
+ return rc;
+
+ mdss_debug_register_base(NULL, mdata->mdp_base, mdata->mdp_reg_size);
+
+ return 0;
+}
+
static int mdss_hw_init(struct mdss_data_type *mdata)
{
char *base = mdata->vbif_base;
@@ -875,8 +889,9 @@
goto probe_done;
}
+ mdata->mdp_reg_size = resource_size(res);
mdata->mdp_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ mdata->mdp_reg_size);
if (unlikely(!mdata->mdp_base)) {
pr_err("unable to map MDP base\n");
rc = -ENOMEM;
@@ -933,6 +948,11 @@
pr_err("unable to register early suspend\n");
goto probe_done;
}
+
+ rc = mdss_mdp_debug_init(mdata);
+ if (rc)
+ pr_err("unable to initialize mdp debugging\n");
+
probe_done:
if (IS_ERR_VALUE(rc)) {
mdss_res = NULL;
@@ -1099,6 +1119,7 @@
mdss_mdp_pp_term(&pdev->dev);
mdss_mdp_bus_scale_unregister(mdata);
mdss_mdp_remove_early_suspend(mdata);
+ mdss_debugfs_remove(mdata);
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 2e92591..d273201 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -137,6 +137,8 @@
struct mdss_mdp_mixer *mixer_right;
struct mutex lock;
+ struct mdss_panel_data *panel_data;
+
int (*start_fnc) (struct mdss_mdp_ctl *ctl);
int (*stop_fnc) (struct mdss_mdp_ctl *ctl);
int (*prepare_fnc) (struct mdss_mdp_ctl *ctl, void *arg);
@@ -241,7 +243,9 @@
unsigned long smp[MAX_PLANES];
- struct mdss_mdp_data buffers[2];
+ struct mdss_mdp_data back_buf;
+ struct mdss_mdp_data front_buf;
+
struct list_head used_list;
struct list_head cleanup_list;
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 00f5874..31cc527 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -107,7 +107,7 @@
int is_writeback = false;
if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
struct mdss_panel_info *pinfo;
- pinfo = &mixer->ctl->mfd->panel_info;
+ pinfo = &mixer->ctl->panel_data->panel_info;
v_total = (pinfo->yres + pinfo->lcdc.v_back_porch +
pinfo->lcdc.v_front_porch +
pinfo->lcdc.v_pulse_width);
@@ -378,14 +378,21 @@
static int mdss_mdp_ctl_init(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_ctl *ctl;
+ struct mdss_panel_data *pdata;
u32 width, height;
int ret = 0;
if (!mfd)
return -ENODEV;
- width = mfd->fbi->var.xres;
- height = mfd->fbi->var.yres;
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+ if (!pdata) {
+ pr_err("no panel connected for fb%d\n", mfd->index);
+ return -ENODEV;
+ }
+
+ width = pdata->panel_info.xres;
+ height = pdata->panel_info.yres;
if (width > (2 * MAX_MIXER_WIDTH)) {
pr_err("unsupported resolution\n");
@@ -400,6 +407,7 @@
}
ctl->mfd = mfd;
mfd->ctl = ctl;
+ ctl->panel_data = pdata;
} else {
ctl = mfd->ctl;
}
@@ -441,7 +449,7 @@
mdss_mdp_mixer_free(ctl->mixer_right);
}
- switch (mfd->panel_info.type) {
+ switch (pdata->panel_info.type) {
case EDP_PANEL:
ctl->intf_num = MDSS_MDP_INTF0;
ctl->intf_type = MDSS_INTF_EDP;
@@ -449,7 +457,7 @@
ctl->start_fnc = mdss_mdp_video_start;
break;
case MIPI_VIDEO_PANEL:
- if (mfd->panel_info.pdest == DISPLAY_1)
+ if (pdata->panel_info.pdest == DISPLAY_1)
ctl->intf_num = MDSS_MDP_INTF1;
else
ctl->intf_num = MDSS_MDP_INTF2;
@@ -469,7 +477,7 @@
ctl->start_fnc = mdss_mdp_writeback_start;
break;
default:
- pr_err("unsupported panel type (%d)\n", mfd->panel_info.type);
+ pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
ret = -EINVAL;
goto ctl_init_fail;
}
@@ -477,14 +485,14 @@
ctl->opmode |= (ctl->intf_num << 4);
if (ctl->intf_num == MDSS_MDP_NO_INTF) {
- ctl->dst_format = mfd->panel_info.out_format;
+ ctl->dst_format = pdata->panel_info.out_format;
} else {
struct mdp_dither_cfg_data dither = {
.block = mfd->index + MDP_LOGICAL_BLOCK_DISP_0,
.flags = MDP_PP_OPS_DISABLE,
};
- switch (mfd->panel_info.bpp) {
+ switch (pdata->panel_info.bpp) {
case 18:
ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666;
dither.flags = MDP_PP_OPS_ENABLE | MDP_PP_OPS_WRITE;
@@ -539,14 +547,10 @@
int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg)
{
struct mdss_panel_data *pdata;
- if (!ctl || !ctl->mfd)
+ if (!ctl || !ctl->panel_data)
return -ENODEV;
- pdata = dev_get_platdata(&ctl->mfd->pdev->dev);
- if (!pdata) {
- pr_err("no panel connected\n");
- return -ENODEV;
- }
+ pdata = ctl->panel_data;
pr_debug("sending ctl=%d event=%d\n", ctl->num, event);
@@ -596,7 +600,7 @@
ret = ctl->start_fnc(ctl);
else
pr_warn("no start function for ctl=%d type=%d\n", ctl->num,
- mfd->panel_info.type);
+ ctl->panel_data->panel_info.type);
if (ret) {
pr_err("unable to start intf\n");
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 052d78c..4757c63 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -337,17 +337,13 @@
int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl)
{
- struct msm_fb_data_type *mfd;
struct mdss_panel_info *pinfo;
struct mdss_mdp_video_ctx *ctx;
struct mdss_mdp_mixer *mixer;
struct intf_timing_params itp = {0};
- struct fb_info *fbi;
int i;
- mfd = ctl->mfd;
- fbi = mfd->fbi;
- pinfo = &mfd->panel_info;
+ pinfo = &ctl->panel_data->panel_info;
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
if (!mixer) {
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 9c62ea2..5b6d009 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -430,35 +430,24 @@
{
struct mdss_mdp_pipe *pipe, *tmp;
LIST_HEAD(destroy_pipes);
- int i;
- mutex_lock(&mfd->ov_lock);
mutex_lock(&mfd->lock);
list_for_each_entry_safe(pipe, tmp, &mfd->pipes_cleanup, cleanup_list) {
list_move(&pipe->cleanup_list, &destroy_pipes);
- for (i = 0; i < ARRAY_SIZE(pipe->buffers); i++)
- mdss_mdp_overlay_free_buf(&pipe->buffers[i]);
+ mdss_mdp_overlay_free_buf(&pipe->back_buf);
+ mdss_mdp_overlay_free_buf(&pipe->front_buf);
}
- if (!list_empty(&mfd->pipes_used)) {
- struct mdss_mdp_data *data;
- int buf_ndx;
-
- list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
- buf_ndx = (pipe->play_cnt - 1) & 1; /* prev buffer */
- data = &pipe->buffers[buf_ndx];
-
- if (data->num_planes) {
- pr_debug("free buffer ndx=%d pnum=%d\n",
- buf_ndx, pipe->num);
- mdss_mdp_overlay_free_buf(data);
- }
+ list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
+ if (pipe->back_buf.num_planes) {
+ /* make back buffer active */
+ mdss_mdp_overlay_free_buf(&pipe->front_buf);
+ swap(pipe->back_buf, pipe->front_buf);
}
}
mutex_unlock(&mfd->lock);
list_for_each_entry_safe(pipe, tmp, &destroy_pipes, cleanup_list)
mdss_mdp_pipe_destroy(pipe);
- mutex_unlock(&mfd->ov_lock);
return 0;
}
@@ -468,12 +457,16 @@
struct msm_fb_data_type *mfd = ctl->mfd;
int ret;
+ mutex_lock(&mfd->ov_lock);
+
if (mfd->kickoff_fnc)
ret = mfd->kickoff_fnc(ctl);
else
ret = mdss_mdp_display_commit(ctl, NULL);
- if (IS_ERR_VALUE(ret))
+ if (IS_ERR_VALUE(ret)) {
+ mutex_unlock(&mfd->ov_lock);
return ret;
+ }
complete(&mfd->update.comp);
mutex_lock(&mfd->no_update.lock);
@@ -486,6 +479,8 @@
ret = mdss_mdp_overlay_cleanup(mfd);
+ mutex_unlock(&mfd->ov_lock);
+
return ret;
}
@@ -630,7 +625,7 @@
struct mdss_mdp_ctl *ctl;
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_data *src_data;
- int ret, buf_ndx;
+ int ret;
u32 flags;
pipe = mdss_mdp_pipe_get_locked(req->id);
@@ -643,9 +638,12 @@
flags = (pipe->flags & MDP_SECURE_OVERLAY_SESSION);
- buf_ndx = (pipe->play_cnt + 1) & 1; /* next buffer */
- src_data = &pipe->buffers[buf_ndx];
- mdss_mdp_overlay_free_buf(src_data);
+ src_data = &pipe->back_buf;
+ if (src_data->num_planes) {
+ pr_warn("dropped buffer pnum=%d play=%d addr=0x%x\n",
+ pipe->num, pipe->play_cnt, src_data->p[0].addr);
+ mdss_mdp_overlay_free_buf(src_data);
+ }
ret = mdss_mdp_overlay_get_buf(mfd, src_data, &req->data, 1, flags);
if (IS_ERR_VALUE(ret)) {
@@ -682,7 +680,7 @@
} else {
ret = mdss_mdp_overlay_queue(mfd, req);
- if ((ret == 0) && (mfd->panel_info.type == WRITEBACK_PANEL)) {
+ if ((ret == 0) && (mfd->panel.type == WRITEBACK_PANEL)) {
mutex_unlock(&mfd->ov_lock);
ret = mdss_mdp_overlay_kickoff(mfd->ctl);
return ret;
@@ -1167,7 +1165,7 @@
ret = mdss_mdp_overlay_kickoff(mfd->ctl);
break;
default:
- if (mfd->panel_info.type == WRITEBACK_PANEL)
+ if (mfd->panel.type == WRITEBACK_PANEL)
ret = mdss_mdp_wb_ioctl_handler(mfd, cmd, argp);
break;
}
@@ -1215,7 +1213,7 @@
mfd->dma_fnc = mdss_mdp_overlay_pan_display;
mfd->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
- if (mfd->panel_info.type == WRITEBACK_PANEL)
+ if (mfd->panel.type == WRITEBACK_PANEL)
mfd->kickoff_fnc = mdss_mdp_wb_kickoff;
INIT_LIST_HEAD(&mfd->pipes_used);
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 28d7051..d807493 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -64,6 +64,7 @@
MDSS_EVENT_CLOSE,
MDSS_EVENT_SUSPEND,
MDSS_EVENT_RESUME,
+ MDSS_EVENT_CHECK_PARAMS,
};
/* panel info type */
@@ -179,6 +180,7 @@
u32 frame_count;
u32 is_3d_panel;
u32 out_format;
+ u32 vic; /* video identification code */
struct lcd_panel_info lcd;
struct lcdc_panel_info lcdc;
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index c3dc06b..47dc2c8 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -25,11 +25,49 @@
#include "mdss_panel.h"
+/**
+ * mdss_wb_check_params - check new panel info params
+ * @pdata: current panel information
+ * @new: updates to panel info
+ *
+ * Checks if there are any changes that require panel reconfiguration
+ * in order to be reflected on writeback buffer.
+ *
+ * Return negative errno if invalid input, zero if there is no panel reconfig
+ * needed and non-zero if reconfiguration is needed.
+ */
+static int mdss_wb_check_params(struct mdss_panel_data *pdata,
+ struct mdss_panel_info *new)
+{
+ struct mdss_panel_info *old;
+
+ if (!pdata || !new) {
+ pr_err("%s: Invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ old = &pdata->panel_info;
+
+ if ((old->xres != new->xres) || (old->yres != new->yres))
+ return 1;
+
+ return 0;
+}
+
static int mdss_wb_event_handler(struct mdss_panel_data *pdata,
int event, void *arg)
{
- pr_debug("%s: event=%d\n", __func__, event);
- return 0;
+ int rc = 0;
+
+ switch (event) {
+ case MDSS_EVENT_CHECK_PARAMS:
+ rc = mdss_wb_check_params(pdata, (struct mdss_panel_info *)arg);
+ break;
+ default:
+ pr_debug("%s: panel event (%d) not handled\n", __func__, event);
+ break;
+ }
+ return rc;
}
static int mdss_wb_parse_dt(struct platform_device *pdev,
diff --git a/drivers/video/msm/mipi_dsi.h b/drivers/video/msm/mipi_dsi.h
index 2711c1a..d4d7288 100644
--- a/drivers/video/msm/mipi_dsi.h
+++ b/drivers/video/msm/mipi_dsi.h
@@ -264,7 +264,8 @@
typedef void (*fxn)(u32 data);
#define CMD_REQ_RX 0x0001
-#define CMD_REQ_COMMIT 0x0002
+#define CMD_REQ_COMMIT 0x0002
+#define CMD_CLK_CTRL 0x0004
#define CMD_REQ_NO_MAX_PKT_SIZE 0x0008
struct dcs_cmd_req {
diff --git a/drivers/video/msm/mipi_dsi_host.c b/drivers/video/msm/mipi_dsi_host.c
index bea6b4e..ee4a578 100644
--- a/drivers/video/msm/mipi_dsi_host.c
+++ b/drivers/video/msm/mipi_dsi_host.c
@@ -1249,7 +1249,6 @@
/* transmit read comamnd to client */
mipi_dsi_cmd_dma_tx(tp);
- mipi_dsi_disable_irq(DSI_CMD_TERM);
/*
* once cmd_dma_done interrupt received,
* return data from client is ready and stored
@@ -1359,7 +1358,6 @@
/* transmit read comamnd to client */
mipi_dsi_cmd_dma_tx(tp);
- mipi_dsi_disable_irq(DSI_CMD_TERM);
/*
* once cmd_dma_done interrupt received,
* return data from client is ready and stored
@@ -1580,7 +1578,6 @@
void mipi_dsi_cmdlist_commit(int from_mdp)
{
struct dcs_cmd_req *req;
- int video;
u32 dsi_ctrl;
mutex_lock(&cmd_mutex);
@@ -1592,12 +1589,6 @@
if (req == NULL)
goto need_lock;
- video = MIPI_INP(MIPI_DSI_BASE + 0x0000);
- video &= 0x02; /* VIDEO_MODE */
-
- if (!video)
- mipi_dsi_clk_cfg(1);
-
pr_debug("%s: from_mdp=%d pid=%d\n", __func__, from_mdp, current->pid);
dsi_ctrl = MIPI_INP(MIPI_DSI_BASE + 0x0000);
@@ -1619,9 +1610,6 @@
else
mipi_dsi_cmdlist_tx(req);
- if (!video)
- mipi_dsi_clk_cfg(0);
-
need_lock:
if (from_mdp) /* from pipe_commit */
@@ -1655,9 +1643,15 @@
pr_debug("%s: tot=%d put=%d get=%d\n", __func__,
cmdlist.tot, cmdlist.put, cmdlist.get);
+ if (req->flags & CMD_CLK_CTRL)
+ mipi_dsi_clk_cfg(1);
+
if (req->flags & CMD_REQ_COMMIT)
mipi_dsi_cmdlist_commit(0);
+ if (req->flags & CMD_CLK_CTRL)
+ mipi_dsi_clk_cfg(0);
+
return ret;
}
diff --git a/drivers/video/msm/mipi_novatek.c b/drivers/video/msm/mipi_novatek.c
index ecac82d..68bc65e 100644
--- a/drivers/video/msm/mipi_novatek.c
+++ b/drivers/video/msm/mipi_novatek.c
@@ -476,7 +476,7 @@
cmdreq.cmds = &backlight_cmd;
cmdreq.cmds_cnt = 1;
- cmdreq.flags = CMD_REQ_COMMIT;
+ cmdreq.flags = CMD_REQ_COMMIT | CMD_CLK_CTRL;
cmdreq.rlen = 0;
cmdreq.cb = NULL;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
index 7a1d521..8836b33 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
@@ -275,6 +275,7 @@
struct ddl_enc_buffers hw_bufs;
struct ddl_yuv_buffer_size input_buf_size;
struct vidc_1080p_enc_frame_info enc_frame_info;
+ u32 plusptype_enable;
u32 meta_data_enable_flag;
u32 suffix;
u32 meta_data_offset;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
index 332497f..2b65d7e 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
@@ -1075,6 +1075,21 @@
}
break;
}
+ case VCD_I_H263_PLUSPTYPE:
+ {
+ struct vcd_property_plusptype *plusptype =
+ (struct vcd_property_plusptype *)property_value;
+
+ if ((sizeof(struct vcd_property_plusptype) ==
+ property_hdr->sz) && encoder->codec.codec ==
+ VCD_CODEC_H263) {
+ encoder->plusptype_enable = plusptype->plusptype_enable;
+ DDL_MSG_LOW("\nencoder->plusptype_enable = %u",
+ encoder->plusptype_enable);
+ vcd_status = VCD_S_SUCCESS;
+ }
+ break;
+ }
default:
DDL_MSG_ERROR("INVALID ID %d\n", (int)property_hdr->prop_id);
vcd_status = VCD_ERR_ILLEGAL_OP;
@@ -2169,5 +2184,6 @@
encoder->frame_rate.fps_denominator = 1;
ddl_set_default_enc_property(ddl);
encoder->sps_pps.sps_pps_for_idr_enable_flag = false;
+ encoder->plusptype_enable = 0;
}
}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
index 76972ca..1bf242d 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
@@ -599,7 +599,7 @@
encoder->frame_rate.fps_denominator;
if ((encoder->codec.codec == VCD_CODEC_H263) &&
(DDL_FRAMERATE_SCALE(DDL_INITIAL_FRAME_RATE)
- != scaled_frame_rate))
+ != scaled_frame_rate) && encoder->plusptype_enable)
h263_cpfc_enable = true;
vidc_sm_set_extended_encoder_control(&ddl->shared_mem
[ddl->command_channel], hdr_ext_control,
diff --git a/drivers/video/msm/vidc/common/enc/venc.c b/drivers/video/msm/vidc/common/enc/venc.c
index c7237e4..763fbda 100644
--- a/drivers/video/msm/vidc/common/enc/venc.c
+++ b/drivers/video/msm/vidc/common/enc/venc.c
@@ -1624,6 +1624,29 @@
}
break;
}
+ case VEN_IOCTL_SET_H263_PLUSPTYPE:
+ {
+ struct vcd_property_hdr vcd_property_hdr;
+ struct venc_plusptype plusptype;
+ u32 enable;
+ u32 vcd_status = VCD_ERR_FAIL;
+ if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+ return -EFAULT;
+ if (copy_from_user(&plusptype, venc_msg.in,
+ sizeof(plusptype)))
+ return -EFAULT;
+ vcd_property_hdr.prop_id = VCD_I_H263_PLUSPTYPE;
+ vcd_property_hdr.sz = sizeof(u32);
+ enable = plusptype.plusptype_enable;
+ DBG("VEN_IOCTL_SET PLUSPTYPE = %d\n", enable);
+ vcd_status = vcd_set_property(client_ctx->vcd_handle,
+ &vcd_property_hdr, &enable);
+ if (vcd_status) {
+ pr_err(" Setting plusptype failed");
+ return -EIO;
+ }
+ break;
+ }
case VEN_IOCTL_SET_AC_PREDICTION:
case VEN_IOCTL_GET_AC_PREDICTION:
case VEN_IOCTL_SET_RVLC:
diff --git a/drivers/video/msm/vidc/common/vcd/vcd.h b/drivers/video/msm/vidc/common/vcd/vcd.h
index 8f44a56..a22adeb 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd.h
+++ b/drivers/video/msm/vidc/common/vcd/vcd.h
@@ -398,4 +398,8 @@
u32 vcd_update_decoder_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_lvl);
u32 vcd_set_perf_turbo_level(struct vcd_clnt_ctxt *cctxt);
+
+struct vcd_transc *vcd_get_first_in_use_trans_for_clnt(
+ struct vcd_clnt_ctxt *cctxt);
+
#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index b84ae44..a4c44f3 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -1684,7 +1684,7 @@
void vcd_send_frame_done_in_eos(struct vcd_clnt_ctxt *cctxt,
struct vcd_frame_data *input_frame, u32 valid_opbuf)
{
- VCD_MSG_LOW("vcd_send_frame_done_in_eos:");
+ VCD_MSG_HIGH("vcd_send_frame_done_in_eos:");
if (!input_frame->virtual && !valid_opbuf) {
VCD_MSG_MED("Sending NULL output with EOS");
@@ -1804,12 +1804,41 @@
}
}
+struct vcd_transc *vcd_get_first_in_use_trans_for_clnt(
+ struct vcd_clnt_ctxt *cctxt)
+{
+ u32 i;
+ struct vcd_dev_ctxt *dev_ctxt;
+ VCD_MSG_HIGH("%s: ", __func__);
+ dev_ctxt = cctxt->dev_ctxt;
+ if (!dev_ctxt->trans_tbl) {
+ VCD_MSG_ERROR("%s: Null trans_tbl", __func__);
+ return NULL;
+ }
+ i = 0;
+ while (i < dev_ctxt->trans_tbl_size) {
+ if ((cctxt == dev_ctxt->trans_tbl[i].cctxt) &&
+ (dev_ctxt->trans_tbl[i].in_use)) {
+ VCD_MSG_MED("%s: found transc = 0x%p",
+ __func__, &dev_ctxt->trans_tbl[i]);
+ break;
+ }
+ i++;
+ }
+ if (i == dev_ctxt->trans_tbl_size) {
+ VCD_MSG_ERROR("%s: in_use transction not found",
+ __func__);
+ return NULL;
+ } else
+ return &dev_ctxt->trans_tbl[i];
+}
+
u32 vcd_handle_recvd_eos(
struct vcd_clnt_ctxt *cctxt,
struct vcd_frame_data *input_frame, u32 *pb_eos_handled)
{
u32 rc;
-
+ struct vcd_transc *transc;
VCD_MSG_LOW("vcd_handle_recvd_eos:");
*pb_eos_handled = false;
@@ -1827,13 +1856,21 @@
*pb_eos_handled = true;
else if (cctxt->decoding && !input_frame->virtual)
cctxt->sched_clnt_hdl->tkns++;
- else if (!cctxt->decoding) {
- vcd_send_frame_done_in_eos(cctxt, input_frame, false);
- if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF) {
- vcd_do_client_state_transition(cctxt,
- VCD_CLIENT_STATE_EOS,
- CLIENT_STATE_EVENT_NUMBER
- (encode_frame));
+ else if (!cctxt->decoding && !cctxt->status.frame_delayed) {
+ if (!cctxt->status.frame_submitted) {
+ vcd_send_frame_done_in_eos(cctxt, input_frame, false);
+ if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF)
+ vcd_do_client_state_transition(cctxt,
+ VCD_CLIENT_STATE_EOS,
+ CLIENT_STATE_EVENT_NUMBER
+ (encode_frame));
+ } else {
+ transc = vcd_get_first_in_use_trans_for_clnt(cctxt);
+ if (transc) {
+ transc->flags |= VCD_FRAME_FLAG_EOS;
+ VCD_MSG_HIGH("%s: Add EOS flag to transc",
+ __func__);
+ }
}
*pb_eos_handled = true;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index ad5938c..35ac651 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1399,12 +1399,49 @@
return 0;
}
+static void __evict_bh_lru(void *arg)
+{
+ struct bh_lru *b = &get_cpu_var(bh_lrus);
+ struct buffer_head *bh = arg;
+ int i;
+
+ for (i = 0; i < BH_LRU_SIZE; i++) {
+ if (b->bhs[i] == bh) {
+ brelse(b->bhs[i]);
+ b->bhs[i] = NULL;
+ goto out;
+ }
+ }
+out:
+ put_cpu_var(bh_lrus);
+}
+
+static bool bh_exists_in_lru(int cpu, void *arg)
+{
+ struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
+ struct buffer_head *bh = arg;
+ int i;
+
+ for (i = 0; i < BH_LRU_SIZE; i++) {
+ if (b->bhs[i] == bh)
+ return 1;
+ }
+
+ return 0;
+
+}
void invalidate_bh_lrus(void)
{
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
+void evict_bh_lrus(struct buffer_head *bh)
+{
+ on_each_cpu_cond(bh_exists_in_lru, __evict_bh_lru, bh, 1, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(evict_bh_lrus);
+
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset)
{
@@ -3054,8 +3091,15 @@
do {
if (buffer_write_io_error(bh) && page->mapping)
set_bit(AS_EIO, &page->mapping->flags);
- if (buffer_busy(bh))
- goto failed;
+ if (buffer_busy(bh)) {
+ /*
+ * Check if the busy failure was due to an
+ * outstanding LRU reference
+ */
+ evict_bh_lrus(bh);
+ if (buffer_busy(bh))
+ goto failed;
+ }
bh = bh->b_this_page;
} while (bh != head);
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 29546b7..46ae59f 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -447,3 +447,4 @@
header-y += coresight-stm.h
header-y += ci-bridge-spi.h
header-y += msm_audio_amrwbplus.h
+header-y += avtimer.h
diff --git a/include/linux/avtimer.h b/include/linux/avtimer.h
new file mode 100644
index 0000000..e68da6a
--- /dev/null
+++ b/include/linux/avtimer.h
@@ -0,0 +1,21 @@
+#ifndef AVTIMER_H
+#define AVTIMER_H
+
+#include <linux/ioctl.h>
+
+#define MAJOR_NUM 100
+
+#define IOCTL_GET_AVTIMER_TICK _IOR(MAJOR_NUM, 0, char *)
+/*
+ * This IOCTL is used read the avtimer tick value.
+ * Avtimer is a 64 bit timer tick, hence the expected
+ * argument is of type uint64_t
+ */
+struct dev_avtimer_data {
+ uint32_t avtimer_msw_phy_addr;
+ uint32_t avtimer_lsw_phy_addr;
+};
+int avcs_core_open(void);
+int avcs_core_disable_power_collapse(int disable);/* true or flase */
+
+#endif
diff --git a/include/linux/ion.h b/include/linux/ion.h
index 211327f..3a29f20 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -33,12 +33,14 @@
* @ION_HEAP_TYPE_CP: memory allocated from a prereserved
* carveout heap, allocations are physically
* contiguous. Used for content protection.
+ * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
* @ION_HEAP_END: helper for iterating over heaps
*/
enum ion_heap_type {
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_DMA,
ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
are at the end of this enum */
ION_NUM_HEAPS,
@@ -47,6 +49,7 @@
#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
/**
* heap flags - the lower 16 bits are used by core ion, the upper 16
@@ -84,6 +87,7 @@
* @memory_type:Memory type used for the heap
* @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
* @extra_data: Extra data specific to each heap type
+ * @priv: heap private data
*/
struct ion_platform_heap {
enum ion_heap_type type;
@@ -94,6 +98,7 @@
enum ion_memory_types memory_type;
unsigned int has_outer_cache;
void *extra_data;
+ void *priv;
};
/**
@@ -115,7 +120,7 @@
int (*request_region)(void *);
int (*release_region)(void *);
void *(*setup_region)(void);
- struct ion_platform_heap heaps[];
+ struct ion_platform_heap *heaps;
};
#ifdef CONFIG_ION
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index ae2c3d8..44f8538 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -123,6 +123,21 @@
* resistance of the pads, connectors, battery terminals
* and rsense.
* @led_src_config: Power source for anode of charger indicator LED.
+ * @btc_override: disable the comparators for conifugrations where a
+ * suitable voltages don't appear on vbatt therm line
+ * for the charger to detect battery is either cold / hot.
+ * @btc_override_cold_degc: Temperature in degCelcius when the battery is
+ * deemed cold and charging never happens. Used
+ * only if btc_override = 1
+ * @btc_override_hot_degc: Temperature in degCelcius when the battery is
+ * deemed hot and charging never happens. Used
+ * only if btc_override = 1
+ * @btc_delay_ms: Delay in milliseconds to monitor the battery temperature
+ * while charging when btc_override = 1
+ * @btc_panic_if_cant_stop_chg: flag to instruct the driver to panic if the
+ * driver couldn't stop charging when battery
+ * temperature is out of bounds. Used only if
+ * btc_override = 1
*/
struct pm8921_charger_platform_data {
struct pm8xxx_charger_core_data charger_cdata;
@@ -163,6 +178,11 @@
int rconn_mohm;
enum pm8921_chg_led_src_config led_src_config;
int battery_less_hardware;
+ int btc_override;
+ int btc_override_cold_degc;
+ int btc_override_hot_degc;
+ int btc_delay_ms;
+ int btc_panic_if_cant_stop_chg;
};
enum pm8921_charger_source {
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 2046198..ec1d619 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -88,6 +88,7 @@
bool boot_ro_lockable;
u8 raw_exception_status; /* 53 */
u8 raw_partition_support; /* 160 */
+ u8 raw_rpmb_size_mult; /* 168 */
u8 raw_erased_mem_count; /* 181 */
u8 raw_ext_csd_structure; /* 194 */
u8 raw_card_type; /* 196 */
@@ -228,6 +229,7 @@
#define MMC_BLK_DATA_AREA_MAIN (1<<0)
#define MMC_BLK_DATA_AREA_BOOT (1<<1)
#define MMC_BLK_DATA_AREA_GP (1<<2)
+#define MMC_BLK_DATA_AREA_RPMB (1<<3)
};
#define BKOPS_NUM_OF_SEVERITY_LEVELS 3
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 7247696..24b9790 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -178,6 +178,8 @@
extern unsigned int mmc_calc_max_discard(struct mmc_card *card);
extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
+extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
+ bool is_rel_write);
extern int mmc_hw_reset(struct mmc_host *host);
extern int mmc_hw_reset_check(struct mmc_host *host);
extern int mmc_can_reset(struct mmc_card *card);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 237a92e..46479a7 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -290,6 +290,7 @@
#define EXT_CSD_BKOPS_START 164 /* W */
#define EXT_CSD_SANITIZE_START 165 /* W */
#define EXT_CSD_WR_REL_PARAM 166 /* RO */
+#define EXT_CSD_RPMB_MULT 168 /* RO */
#define EXT_CSD_BOOT_WP 173 /* R/W */
#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
#define EXT_CSD_PART_CONFIG 179 /* R/W */
@@ -346,6 +347,7 @@
#define EXT_CSD_PART_CONFIG_ACC_MASK (0x7)
#define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1)
#define EXT_CSD_PART_CONFIG_ACC_BOOT1 (0x2)
+#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3)
#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4)
#define EXT_CSD_PART_SUPPORT_PART_EN (0x1)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 08f74e6..f8a3a10 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -61,14 +61,6 @@
MIGRATE_TYPES
};
-/*
- * Returns a list which contains the migrate types on to which
- * an allocation falls back when the free list for the migrate
- * type mtype is depleted.
- * The end of the list is delimited by the type MIGRATE_RESERVE.
- */
-extern int *get_migratetype_fallbacks(int mtype);
-
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define cma_wmark_pages(zone) zone->min_cma_pages
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
index ec043dd..21648ad 100644
--- a/include/linux/msm_ion.h
+++ b/include/linux/msm_ion.h
@@ -146,6 +146,7 @@
size_t secure_size; /* Size used for securing heap when heap is shared*/
int reusable;
int mem_is_fmem;
+ int is_cma;
enum ion_fixed_position fixed_position;
int iommu_map_all;
int iommu_2x_map_domain;
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index ee136cac..4e62b4f 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -27,6 +27,7 @@
/* General allocation hints */
#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
+#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000
/* Memory caching hints */
#define KGSL_CACHEMODE_MASK 0x0C000000
@@ -554,6 +555,8 @@
* KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
* KGSL_MEMTYPE*: usage hint for debugging aid
* KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
+ * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
+ * address will be 0. Calling mmap() will set the GPU address.
*/
struct kgsl_gpumem_alloc_id {
unsigned int id;
diff --git a/include/linux/msm_vidc_enc.h b/include/linux/msm_vidc_enc.h
index 519c537..ea7db81 100644
--- a/include/linux/msm_vidc_enc.h
+++ b/include/linux/msm_vidc_enc.h
@@ -458,6 +458,9 @@
#define VEN_IOCTL_SET_SLICE_DELIVERY_MODE \
_IO(VEN_IOCTLBASE_ENC, 50)
+#define VEN_IOCTL_SET_H263_PLUSPTYPE \
+ _IOW(VEN_IOCTLBASE_ENC, 51, struct venc_ioctl_msg)
+
struct venc_switch{
unsigned char status;
};
@@ -519,6 +522,11 @@
unsigned long maxqp;
unsigned long minqp;
};
+
+struct venc_plusptype {
+ unsigned long plusptype_enable;
+};
+
struct venc_intraperiod{
unsigned long num_pframes;
unsigned long num_bframes;
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index e5e0bb4..3858022 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1840,6 +1840,12 @@
V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_ENABLED = 1
};
+#define V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE + 26)
+enum v4l2_mpeg_vidc_perf_level {
+ V4L2_CID_MPEG_VIDC_PERF_LEVEL_PERFORMANCE = 0,
+ V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO = 1,
+};
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
diff --git a/include/media/msm/vcd_property.h b/include/media/msm/vcd_property.h
index 545dcd2..2ce1a88 100644
--- a/include/media/msm/vcd_property.h
+++ b/include/media/msm/vcd_property.h
@@ -57,6 +57,7 @@
#define VCD_I_SET_TURBO_CLK (VCD_START_BASE + 0x29)
#define VCD_I_ENABLE_DELIMITER_FLAG (VCD_START_BASE + 0x2A)
#define VCD_I_ENABLE_VUI_TIMING_INFO (VCD_START_BASE + 0x2B)
+#define VCD_I_H263_PLUSPTYPE (VCD_START_BASE + 0x2C)
#define VCD_START_REQ (VCD_START_BASE + 0x1000)
@@ -294,6 +295,10 @@
u32 min_qp;
};
+struct vcd_property_plusptype {
+ u32 plusptype_enable;
+};
+
struct vcd_property_session_qp {
u32 i_frame_qp;
u32 p_frame_qp;
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index a932011..971c9b3 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -1032,7 +1032,13 @@
#define CAMERA_EFFECT_EMBOSS 9
#define CAMERA_EFFECT_SKETCH 10
#define CAMERA_EFFECT_NEON 11
-#define CAMERA_EFFECT_MAX 12
+#define CAMERA_EFFECT_FADED 12
+#define CAMERA_EFFECT_VINTAGECOOL 13
+#define CAMERA_EFFECT_VINTAGEWARM 14
+#define CAMERA_EFFECT_ACCENT_BLUE 15
+#define CAMERA_EFFECT_ACCENT_GREEN 16
+#define CAMERA_EFFECT_ACCENT_ORANGE 17
+#define CAMERA_EFFECT_MAX 18
/* QRD */
#define CAMERA_EFFECT_BW 10
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 3dd0ccd..9201a0a 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -1893,6 +1893,11 @@
struct afe_param_id_internal_bt_fm_cfg int_bt_fm;
} __packed;
+struct afe_audioif_config_command_no_payload {
+ struct apr_hdr hdr;
+ struct afe_port_cmd_set_param_v2 param;
+} __packed;
+
struct afe_audioif_config_command {
struct apr_hdr hdr;
struct afe_port_cmd_set_param_v2 param;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 45a8d86..a2bad88 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -401,9 +401,16 @@
static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
+ struct hrtimer *timer = &rq->hrtick_timer;
+ ktime_t soft, hard;
+ unsigned long delta;
+
+ soft = hrtimer_get_softexpires(timer);
+ hard = hrtimer_get_expires(timer);
+ delta = ktime_to_ns(ktime_sub(hard, soft));
raw_spin_lock(&rq->lock);
- hrtimer_restart(&rq->hrtick_timer);
+ __hrtimer_start_range_ns(timer, soft, delta, HRTIMER_MODE_ABS, 0);
rq->hrtick_csd_pending = 0;
raw_spin_unlock(&rq->lock);
}
@@ -421,7 +428,8 @@
hrtimer_set_expires(timer, time);
if (rq == this_rq()) {
- hrtimer_restart(timer);
+ __hrtimer_start_range_ns(timer, ns_to_ktime(delay), 0,
+ HRTIMER_MODE_REL_PINNED, 0);
} else if (!rq->hrtick_csd_pending) {
__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
rq->hrtick_csd_pending = 1;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c3142e8..92dd060 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -913,11 +913,6 @@
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
};
-int *get_migratetype_fallbacks(int mtype)
-{
- return fallbacks[mtype];
-}
-
/*
* Move the free pages in a range to the free lists of the requested type.
* Note that start_page and end_pages are not aligned on a pageblock
@@ -1400,15 +1395,18 @@
unsigned int order;
unsigned long watermark;
struct zone *zone;
+ int mt;
BUG_ON(!PageBuddy(page));
zone = page_zone(page);
order = page_order(page);
+ mt = get_pageblock_migratetype(page);
/* Obey watermarks as if the page was being allocated */
watermark = low_wmark_pages(zone) + (1 << order);
- if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+ if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE &&
+ !zone_watermark_ok(zone, 0, watermark, 0, 0))
return 0;
/* Remove page from free list */
@@ -1424,7 +1422,7 @@
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
for (; page < endpage; page += pageblock_nr_pages) {
- int mt = get_pageblock_migratetype(page);
+ mt = get_pageblock_migratetype(page);
if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
set_pageblock_migratetype(page,
MIGRATE_MOVABLE);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index e5f43ec..ea116e9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1840,15 +1840,6 @@
struct hci_cp_auth_requested cp;
hci_remove_link_key(hdev, &conn->dst);
cp.handle = cpu_to_le16(conn->handle);
- /*Initiates dedicated bonding as pin or key is missing
- on remote device*/
- /*In case if remote device is ssp supported,
- reduce the security level to MEDIUM if it is HIGH*/
- if (conn->ssp_mode && conn->auth_initiator &&
- conn->io_capability != 0x03) {
- conn->pending_sec_level = BT_SECURITY_HIGH;
- conn->auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- }
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index fa2469e..44e2feb 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -93,6 +93,13 @@
static void l2cap_conn_del(struct hci_conn *hcon, int err, u8 is_process);
static u16 l2cap_get_smallest_flushto(struct l2cap_chan_list *l);
static void l2cap_set_acl_flushto(struct hci_conn *hcon, u16 flush_to);
+static void l2cap_queue_acl_data(struct work_struct *worker);
+static struct att_channel_parameters{
+ struct sk_buff *skb;
+ struct l2cap_conn *conn;
+ __le16 cid;
+ int dir;
+} att_chn_params;
/* ---- L2CAP channels ---- */
static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
@@ -1209,6 +1216,8 @@
kfree(conn);
}
+ att_chn_params.conn = NULL;
+ BT_DBG("att_chn_params.conn set to NULL");
}
static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
@@ -3600,8 +3609,7 @@
if (mtu < L2CAP_DEFAULT_MIN_MTU) {
result = L2CAP_CONF_UNACCEPT;
pi->omtu = L2CAP_DEFAULT_MIN_MTU;
- }
- else {
+ } else {
pi->omtu = mtu;
pi->conf_state |= L2CAP_CONF_MTU_DONE;
}
@@ -7275,6 +7283,7 @@
struct sk_buff *skb_rsp;
struct l2cap_hdr *lh;
int dir;
+ struct work_struct *open_worker;
u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
L2CAP_ATT_NOT_SUPPORTED};
@@ -7310,8 +7319,18 @@
BT_DBG("sk %p, len %d", sk, skb->len);
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
- goto drop;
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) {
+ att_chn_params.cid = cid;
+ att_chn_params.conn = conn;
+ att_chn_params.dir = dir;
+ att_chn_params.skb = skb;
+ open_worker = kzalloc(sizeof(*open_worker), GFP_ATOMIC);
+ if (!open_worker)
+ BT_ERR("Out of memory");
+ INIT_WORK(open_worker, l2cap_queue_acl_data);
+ schedule_work(open_worker);
+ goto done;
+ }
if (l2cap_pi(sk)->imtu < skb->len)
goto drop;
@@ -7787,6 +7806,80 @@
return 0;
}
+static void l2cap_queue_acl_data(struct work_struct *worker)
+{
+ struct sock *sk = NULL;
+ int attempts = 0;
+ struct sk_buff *skb_rsp;
+ struct l2cap_hdr *lh;
+ u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
+ L2CAP_ATT_NOT_SUPPORTED};
+
+ for (attempts = 0; attempts < 40; attempts++) {
+ msleep(50);
+ if (!att_chn_params.conn) {
+ BT_DBG("att_chn_params.conn is NULL");
+ return;
+ }
+ sk = l2cap_find_sock_by_fixed_cid_and_dir
+ (att_chn_params.cid,
+ att_chn_params.conn->src,
+ att_chn_params.conn->dst,
+ att_chn_params.dir);
+ bh_lock_sock(sk);
+ if (sk->sk_state == BT_CONNECTED) {
+ sock_queue_rcv_skb(sk, att_chn_params.skb);
+ if (sk)
+ bh_unlock_sock(sk);
+ return;
+ }
+ bh_unlock_sock(sk);
+ }
+ bh_lock_sock(sk);
+
+ if (att_chn_params.skb->data[0] != L2CAP_ATT_INDICATE)
+ goto not_indicate;
+
+ /* If this is an incoming Indication, we are required to confirm */
+ skb_rsp = bt_skb_alloc(sizeof(u8) + L2CAP_HDR_SIZE, GFP_ATOMIC);
+ if (!skb_rsp)
+ goto free_skb;
+
+ lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(sizeof(u8));
+ lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
+ err_rsp[0] = L2CAP_ATT_CONFIRM;
+ memcpy(skb_put(skb_rsp, sizeof(u8)), err_rsp, sizeof(u8));
+ hci_send_acl(att_chn_params.conn->hcon, NULL, skb_rsp, 0);
+ goto free_skb;
+
+not_indicate:
+ if (att_chn_params.skb->data[0] & L2CAP_ATT_RESPONSE_BIT ||
+ att_chn_params.skb->data[0] == L2CAP_ATT_CONFIRM)
+ goto free_skb;
+
+ /* If this is an incoming PDU that requires a response, respond with
+ * a generic error so remote device doesn't hang */
+
+ skb_rsp = bt_skb_alloc(sizeof(err_rsp) + L2CAP_HDR_SIZE, GFP_ATOMIC);
+ if (!skb_rsp)
+ goto free_skb;
+
+ lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(sizeof(err_rsp));
+ lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
+ err_rsp[1] = att_chn_params.skb->data[0];
+ memcpy(skb_put(skb_rsp, sizeof(err_rsp)), err_rsp, sizeof(err_rsp));
+ hci_send_acl(att_chn_params.conn->hcon, NULL, skb_rsp, 0);
+
+free_skb:
+ kfree_skb(att_chn_params.skb);
+
+ if (sk)
+ bh_unlock_sock(sk);
+
+}
+
static int l2cap_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, l2cap_debugfs_show, inode->i_private);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 493801a..8568dae7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -390,12 +390,17 @@
goto failed;
}
+ hci_dev_unlock_bh(hdev);
+
if (cp->val)
queue_work(hdev->workqueue, &hdev->power_on);
else
queue_work(hdev->workqueue, &hdev->power_off);
err = 0;
+ hci_dev_put(hdev);
+
+ return err;
failed:
hci_dev_unlock_bh(hdev);
@@ -3089,6 +3094,12 @@
goto no_auto_confirm;
}
+ /* Show bonding dialog if neither side requires no bonding */
+ if ((conn->auth_type > 0x01) && (conn->remote_auth > 0x01)) {
+ if (!loc_mitm && !rem_mitm)
+ value = 0;
+ goto no_auto_confirm;
+ }
if ((!loc_mitm || rem_cap == 0x03) && (!rem_mitm || loc_cap == 0x03))
ev.auto_confirm = 1;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 276ff71..56b2cfa 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -756,8 +756,7 @@
hcon->smp_conn = conn;
hcon->pending_sec_level = sec_level;
-
- if ((hcon->link_mode & HCI_LM_MASTER) && !hcon->sec_req) {
+ if (hcon->link_mode & HCI_LM_MASTER) {
struct link_key *key;
key = hci_find_link_key_type(hcon->hdev, conn->dst,
diff --git a/scripts/build-all.py b/scripts/build-all.py
index 296d9ad..c59ffae 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -35,13 +35,14 @@
import subprocess
import os
import os.path
+import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
-make_command = ["vmlinux", "modules"]
+make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
@@ -83,12 +84,15 @@
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
- for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
- names[os.path.basename(n)[:-10]] = n
- for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
- names[os.path.basename(n)[:-10]] = n
- for n in glob.glob('arch/arm/configs/apq*_defconfig'):
- names[os.path.basename(n)[:-10]] = n
+ arch_pats = (
+ r'[fm]sm[0-9]*_defconfig',
+ r'apq*_defconfig',
+ r'qsd*_defconfig',
+ r'omap2*_defconfig',
+ )
+ for p in arch_pats:
+ for n in glob.glob('arch/arm/configs/' + p):
+ names[os.path.basename(n)[:-10]] = n
return names
class Builder:
@@ -142,23 +146,41 @@
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
+ staging_dir = 'install_staging'
+ modi_dir = '%s' % staging_dir
+ hdri_dir = '%s/usr' % staging_dir
+ shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
+
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
- build = Builder(log_name)
-
- result = build.run(['make', 'O=%s' % dest_dir] + make_command)
-
- if result != 0:
- if all_options.keep_going:
- failed_targets.append(target)
- fail_or_error = error
+ # Build targets can be dependent upon the completion of previous
+ # build targets, so build them one at a time.
+ cmd_line = ['make',
+ 'INSTALL_HDR_PATH=%s' % hdri_dir,
+ 'INSTALL_MOD_PATH=%s' % modi_dir,
+ 'O=%s' % dest_dir]
+ build_targets = []
+ for c in make_command:
+ if re.match(r'^-{1,2}\w', c):
+ cmd_line.append(c)
else:
- fail_or_error = fail
- fail_or_error("Failed to build %s, see %s" % (target, build.logname))
+ build_targets.append(c)
+ for t in build_targets:
+ build = Builder(log_name)
+
+ result = build.run(cmd_line + [t])
+ if result != 0:
+ if all_options.keep_going:
+ failed_targets.append(target)
+ fail_or_error = error
+ else:
+ fail_or_error = fail
+ fail_or_error("Failed to build %s, see %s" %
+ (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index fd9d825..3fc3f32 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -1212,6 +1212,42 @@
return 0;
}
+static void sitar_enable_classg(struct snd_soc_codec *codec,
+ bool enable)
+{
+
+ if (enable) {
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_RESET_CTL, 0x10, 0x00);
+ snd_soc_update_bits(codec, SITAR_A_CP_STATIC, 0x07, 0x00);
+ snd_soc_update_bits(codec, SITAR_A_CP_STATIC, 0x08, 0x00);
+ snd_soc_update_bits(codec, SITAR_A_CP_STATIC, 0x10, 0x00);
+
+ } else {
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_RESET_CTL, 0x10, 0x10);
+ snd_soc_update_bits(codec, SITAR_A_CP_STATIC, 0x07, 0x03);
+ snd_soc_update_bits(codec, SITAR_A_CP_STATIC, 0x08, 0x08);
+ snd_soc_update_bits(codec, SITAR_A_CP_STATIC, 0x10, 0x10);
+ }
+}
+
+static bool sitar_is_hph_pa_on(struct snd_soc_codec *codec)
+{
+ u8 hph_reg_val = 0;
+ hph_reg_val = snd_soc_read(codec, SITAR_A_RX_HPH_CNP_EN);
+
+ return (hph_reg_val & 0x30) ? true : false;
+}
+
+static bool sitar_is_line_pa_on(struct snd_soc_codec *codec)
+{
+ u8 line_reg_val = 0;
+ line_reg_val = snd_soc_read(codec, SITAR_A_RX_LINE_CNP_EN);
+
+ return (line_reg_val & 0x03) ? true : false;
+}
+
static int sitar_codec_enable_lineout(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -1235,6 +1271,15 @@
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (sitar_is_hph_pa_on(codec)) {
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX1_B6_CTL,
+ 0x20, 0x00);
+ sitar_enable_classg(codec, false);
+ } else {
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX1_B6_CTL,
+ 0x20, 0x20);
+ sitar_enable_classg(codec, true);
+ }
snd_soc_update_bits(codec, lineout_gain_reg, 0x10, 0x10);
break;
case SND_SOC_DAPM_POST_PMU:
@@ -1243,6 +1288,11 @@
usleep_range(32000, 32000);
break;
case SND_SOC_DAPM_POST_PMD:
+ if (sitar_is_hph_pa_on(codec))
+ sitar_enable_classg(codec, true);
+ else
+ sitar_enable_classg(codec, false);
+
snd_soc_update_bits(codec, lineout_gain_reg, 0x10, 0x00);
break;
}
@@ -1585,14 +1635,6 @@
return rc;
}
-static bool sitar_is_hph_pa_on(struct snd_soc_codec *codec)
-{
- u8 hph_reg_val = 0;
- hph_reg_val = snd_soc_read(codec, SITAR_A_RX_HPH_CNP_EN);
-
- return (hph_reg_val & 0x30) ? true : false;
-}
-
static bool sitar_is_hph_dac_on(struct snd_soc_codec *codec, int left)
{
u8 hph_reg_val = 0;
@@ -1947,6 +1989,11 @@
SITAR_RELEASE_LOCK(sitar->codec_resource_lock);
}
+ if (sitar_is_line_pa_on(codec))
+ sitar_enable_classg(codec, false);
+ else
+ sitar_enable_classg(codec, true);
+
break;
case SND_SOC_DAPM_POST_PMD:
@@ -1979,6 +2026,11 @@
w->name);
usleep_range(10000, 10000);
+ if (sitar_is_line_pa_on(codec))
+ sitar_enable_classg(codec, true);
+ else
+ sitar_enable_classg(codec, false);
+
break;
}
return 0;
@@ -2033,24 +2085,21 @@
pr_debug("%s %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits(codec, SITAR_A_CDC_CLK_OTHR_RESET_CTL, 0x10,
- 0x00);
snd_soc_update_bits(codec, SITAR_A_CDC_CLK_OTHR_CTL, 0x01,
0x01);
snd_soc_update_bits(codec, SITAR_A_CDC_CLSG_CTL, 0x08, 0x08);
usleep_range(200, 200);
- snd_soc_update_bits(codec, SITAR_A_CP_STATIC, 0x10, 0x00);
break;
case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(codec, SITAR_A_CDC_CLK_OTHR_RESET_CTL, 0x10,
- 0x10);
- usleep_range(20, 20);
- snd_soc_update_bits(codec, SITAR_A_CP_STATIC, 0x08, 0x08);
- snd_soc_update_bits(codec, SITAR_A_CP_STATIC, 0x10, 0x10);
snd_soc_update_bits(codec, SITAR_A_CDC_CLSG_CTL, 0x08, 0x00);
+ /*
+ * This delay is for the class G controller to settle down
+ * after turn OFF. The delay is as per the hardware spec for
+ * the codec
+ */
+ usleep_range(20, 20);
snd_soc_update_bits(codec, SITAR_A_CDC_CLK_OTHR_CTL, 0x01,
0x00);
- snd_soc_update_bits(codec, SITAR_A_CP_STATIC, 0x08, 0x00);
break;
}
return 0;
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index e11b985..e672cdb 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -295,6 +295,11 @@
(1 << AIF1_CAP) | (1 << AIF2_CAP), /* AIF2_CAP */
};
+static const u32 vport_i2s_check_table[NUM_CODEC_DAIS] = {
+ 0, /* AIF1_PB */
+ 0, /* AIF1_CAP */
+};
+
struct tabla_priv {
struct snd_soc_codec *codec;
struct tabla_reg_address reg_addr;
@@ -1746,6 +1751,7 @@
u32 dai_id = widget->shift;
u32 port_id = mixer->shift;
u32 enable = ucontrol->value.integer.value[0];
+ u32 vtable = vport_check_table[dai_id];
pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__,
widget->name, ucontrol->id.name, widget->value, widget->shift,
@@ -1767,8 +1773,13 @@
/* only add to the list if value not set
*/
if (enable && !(widget->value & 1 << port_id)) {
+ if (tabla_p->intf_type ==
+ WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+ vtable = vport_check_table[dai_id];
+ if (tabla_p->intf_type == WCD9XXX_INTERFACE_TYPE_I2C)
+ vtable = vport_i2s_check_table[dai_id];
if (wcd9xxx_tx_vport_validation(
- vport_check_table[dai_id],
+ vtable,
port_id,
tabla_p->dai)) {
pr_info("%s: TX%u is used by other virtual port\n",
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 5ffb60a..6aa5bbb 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -181,6 +181,11 @@
(1 << AIF1_CAP) | (1 << AIF2_CAP), /* AIF2_CAP */
};
+static const u32 vport_i2s_check_table[NUM_CODEC_DAIS] = {
+ 0, /* AIF1_PB */
+ 0, /* AIF1_CAP */
+};
+
struct taiko_priv {
struct snd_soc_codec *codec;
u32 adc_count;
@@ -1523,6 +1528,7 @@
u32 dai_id = widget->shift;
u32 port_id = mixer->shift;
u32 enable = ucontrol->value.integer.value[0];
+ u32 vtable = vport_check_table[dai_id];
pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__,
@@ -1539,7 +1545,6 @@
return -EINVAL;
}
}
- if (taiko_p->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
switch (dai_id) {
case AIF1_CAP:
case AIF2_CAP:
@@ -1547,8 +1552,16 @@
/* only add to the list if value not set
*/
if (enable && !(widget->value & 1 << port_id)) {
+
+ if (taiko_p->intf_type ==
+ WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+ vtable = vport_check_table[dai_id];
+ if (taiko_p->intf_type ==
+ WCD9XXX_INTERFACE_TYPE_I2C)
+ vtable = vport_i2s_check_table[dai_id];
+
if (wcd9xxx_tx_vport_validation(
- vport_check_table[dai_id],
+ vtable,
port_id,
taiko_p->dai)) {
pr_debug("%s: TX%u is used by other\n"
@@ -1583,7 +1596,6 @@
mutex_unlock(&codec->mutex);
return -EINVAL;
}
- }
pr_debug("%s: name %s sname %s updated value %u shift %d\n", __func__,
widget->name, widget->sname, widget->value, widget->shift);
@@ -4625,6 +4637,8 @@
*/
{TAIKO_A_RX_HPH_OCP_CTL, 0xE1, 0x61},
{TAIKO_A_RX_COM_OCP_COUNT, 0xFF, 0xFF},
+ {TAIKO_A_RX_HPH_L_TEST, 0x01, 0x01},
+ {TAIKO_A_RX_HPH_R_TEST, 0x01, 0x01},
/* Initialize gain registers to use register gain */
{TAIKO_A_RX_HPH_L_GAIN, 0x20, 0x20},
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 37a4234..f8185bb 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -1076,6 +1076,22 @@
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
+ {
+ .name = "VoLTE",
+ .stream_name = "VoLTE",
+ .cpu_dai_name = "VoLTE",
+ .platform_name = "msm-pcm-voice",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .be_id = MSM_FRONTEND_DAI_VOLTE,
+ },
/* Backend BT/FM DAI Links */
{
.name = LPASS_BE_INT_BT_SCO_RX,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
index f1e0f3a..a1e461d 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -33,7 +33,9 @@
static struct snd_pcm_hardware msm_pcm_hardware = {
- .info = SNDRV_PCM_INFO_INTERLEAVED,
+ .info = (SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
.rate_min = 8000,
@@ -205,6 +207,55 @@
return 0;
}
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_voice *prtd = runtime->private_data;
+ uint16_t session_id = 0;
+
+ pr_debug("%s: cmd = %d\n", __func__, cmd);
+ if (is_volte(prtd))
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ else
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_STOP:
+ pr_debug("Start & Stop Voice call not handled in Trigger.\n");
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ pr_debug("%s: resume call session_id = %d\n", __func__,
+ session_id);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = msm_pcm_playback_prepare(substream);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = msm_pcm_capture_prepare(substream);
+ if (prtd->playback_start && prtd->capture_start)
+ voc_resume_voice_call(session_id);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ pr_debug("%s: pause call session_id=%d\n",
+ __func__, session_id);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (prtd->playback_start)
+ prtd->playback_start = 0;
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (prtd->capture_start)
+ prtd->capture_start = 0;
+ }
+ voc_standby_voice_call(session_id);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
static int msm_voice_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -437,6 +488,7 @@
.hw_params = msm_pcm_hw_params,
.close = msm_pcm_close,
.prepare = msm_pcm_prepare,
+ .trigger = msm_pcm_trigger,
};
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 7267a82..985f76b 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -37,17 +37,16 @@
atomic_t copp_id[AFE_MAX_PORTS];
atomic_t copp_cnt[AFE_MAX_PORTS];
atomic_t copp_stat[AFE_MAX_PORTS];
- u32 mem_map_handle[AFE_MAX_PORTS];
wait_queue_head_t wait[AFE_MAX_PORTS];
-};
-static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
-static struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
+ struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
+ struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
/* 0 - (MAX_AUDPROC_TYPES -1): audproc handles */
/* (MAX_AUDPROC_TYPES -1) - (2 * MAX_AUDPROC_TYPES -1): audvol handles */
-atomic_t mem_map_handles[(2 * MAX_AUDPROC_TYPES)];
-atomic_t mem_map_index;
+ atomic_t mem_map_cal_handles[(2 * MAX_AUDPROC_TYPES)];
+ atomic_t mem_map_cal_index;
+};
static struct adm_ctl this_adm;
@@ -92,14 +91,14 @@
pr_debug("Resetting calibration blocks");
for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
/* Device calibration */
- mem_addr_audproc[i].cal_size = 0;
- mem_addr_audproc[i].cal_kvaddr = 0;
- mem_addr_audproc[i].cal_paddr = 0;
+ this_adm.mem_addr_audproc[i].cal_size = 0;
+ this_adm.mem_addr_audproc[i].cal_kvaddr = 0;
+ this_adm.mem_addr_audproc[i].cal_paddr = 0;
/* Volume calibration */
- mem_addr_audvol[i].cal_size = 0;
- mem_addr_audvol[i].cal_kvaddr = 0;
- mem_addr_audvol[i].cal_paddr = 0;
+ this_adm.mem_addr_audvol[i].cal_size = 0;
+ this_adm.mem_addr_audvol[i].cal_kvaddr = 0;
+ this_adm.mem_addr_audvol[i].cal_paddr = 0;
}
return 0;
}
@@ -199,8 +198,9 @@
case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS:
pr_debug("%s: ADM_CMDRSP_SHARED_MEM_MAP_REGIONS\n",
__func__);
- atomic_set(&mem_map_handles[
- atomic_read(&mem_map_index)], *payload);
+ atomic_set(&this_adm.mem_map_cal_handles[
+ atomic_read(&this_adm.mem_map_cal_index)],
+ *payload);
atomic_set(&this_adm.copp_stat[0], 1);
wake_up(&this_adm.wait[index]);
break;
@@ -247,8 +247,8 @@
adm_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
adm_params.payload_addr_lsw = aud_cal->cal_paddr;
adm_params.payload_addr_msw = 0;
- adm_params.mem_map_handle = atomic_read(&mem_map_handles[
- atomic_read(&mem_map_index)]);
+ adm_params.mem_map_handle = atomic_read(&this_adm.mem_map_cal_handles[
+ atomic_read(&this_adm.mem_map_cal_index)]);
adm_params.payload_size = aud_cal->cal_size;
atomic_set(&this_adm.copp_stat[index], 0);
@@ -293,15 +293,16 @@
get_audproc_cal(acdb_path, &aud_cal);
/* map & cache buffers used */
- atomic_set(&mem_map_index, acdb_path);
- if (((mem_addr_audproc[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
- (aud_cal.cal_size > 0)) ||
- (aud_cal.cal_size > mem_addr_audproc[acdb_path].cal_size)) {
+ atomic_set(&this_adm.mem_map_cal_index, acdb_path);
+ if (((this_adm.mem_addr_audproc[acdb_path].cal_paddr !=
+ aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) ||
+ (aud_cal.cal_size >
+ this_adm.mem_addr_audproc[acdb_path].cal_size)) {
- if (mem_addr_audproc[acdb_path].cal_paddr != 0)
+ if (this_adm.mem_addr_audproc[acdb_path].cal_paddr != 0)
adm_memory_unmap_regions(port_id,
- &mem_addr_audproc[acdb_path].cal_paddr,
- &size, 1);
+ &this_adm.mem_addr_audproc[acdb_path].
+ cal_paddr, &size, 1);
result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
0, &size, 1);
@@ -310,9 +311,9 @@
acdb_path, aud_cal.cal_paddr,
aud_cal.cal_size);
} else {
- mem_addr_audproc[acdb_path].cal_paddr =
+ this_adm.mem_addr_audproc[acdb_path].cal_paddr =
aud_cal.cal_paddr;
- mem_addr_audproc[acdb_path].cal_size = size;
+ this_adm.mem_addr_audproc[acdb_path].cal_size = size;
}
}
@@ -327,14 +328,16 @@
get_audvol_cal(acdb_path, &aud_cal);
/* map & cache buffers used */
- atomic_set(&mem_map_index, (acdb_path + MAX_AUDPROC_TYPES));
- if (((mem_addr_audvol[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
- (aud_cal.cal_size > 0)) ||
- (aud_cal.cal_size > mem_addr_audvol[acdb_path].cal_size)) {
+ atomic_set(&this_adm.mem_map_cal_index,
+ (acdb_path + MAX_AUDPROC_TYPES));
+ if (((this_adm.mem_addr_audvol[acdb_path].cal_paddr !=
+ aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) ||
+ (aud_cal.cal_size >
+ this_adm.mem_addr_audvol[acdb_path].cal_size)) {
- if (mem_addr_audvol[acdb_path].cal_paddr != 0)
+ if (this_adm.mem_addr_audvol[acdb_path].cal_paddr != 0)
adm_memory_unmap_regions(port_id,
- &mem_addr_audvol[acdb_path].cal_paddr,
+ &this_adm.mem_addr_audvol[acdb_path].cal_paddr,
&size, 1);
result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
@@ -344,9 +347,9 @@
acdb_path, aud_cal.cal_paddr,
aud_cal.cal_size);
} else {
- mem_addr_audvol[acdb_path].cal_paddr =
+ this_adm.mem_addr_audvol[acdb_path].cal_paddr =
aud_cal.cal_paddr;
- mem_addr_audvol[acdb_path].cal_size = size;
+ this_adm.mem_addr_audvol[acdb_path].cal_size = size;
}
}
@@ -817,8 +820,8 @@
unmap_regions.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
unmap_regions.hdr.token = port_id;
unmap_regions.hdr.opcode = ADM_CMD_SHARED_MEM_UNMAP_REGIONS;
- unmap_regions.mem_map_handle = atomic_read(&mem_map_handles[
- atomic_read(&mem_map_index)]);
+ unmap_regions.mem_map_handle = atomic_read(&this_adm.
+ mem_map_cal_handles[atomic_read(&this_adm.mem_map_cal_index)]);
atomic_set(&this_adm.copp_stat[0], 0);
ret = apr_send_pkt(this_adm.apr, (uint32_t *) &unmap_regions);
if (ret < 0) {
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index de9841a..fb6b56e 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -39,12 +39,14 @@
void *tx_private_data;
void *rx_private_data;
uint32_t mmap_handle;
+
+ struct acdb_cal_block afe_cal_addr[MAX_AUDPROC_TYPES];
+ atomic_t mem_map_cal_handles[MAX_AUDPROC_TYPES];
+ atomic_t mem_map_cal_index;
};
static struct afe_ctl this_afe;
-static struct acdb_cal_block afe_cal_addr[MAX_AUDPROC_TYPES];
-
#define TIMEOUT_MS 1000
#define Q6AFE_MAX_VOLUME 0x3FFF
@@ -113,7 +115,13 @@
AFE_SERVICE_CMDRSP_SHARED_MEM_MAP_REGIONS) {
pr_debug("%s: mmap_handle: 0x%x\n",
__func__, payload[0]);
- this_afe.mmap_handle = (uint32_t)payload[0];
+ if (atomic_read(&this_afe.mem_map_cal_index) != -1)
+ atomic_set(&this_afe.mem_map_cal_handles[
+ atomic_read(
+ &this_afe.mem_map_cal_index)],
+ (uint32_t)payload[0]);
+ else
+ this_afe.mmap_handle = (uint32_t)payload[0];
atomic_set(&this_afe.state, 0);
wake_up(&this_afe.wait[data->token]);
} else if (data->opcode == AFE_EVENT_RT_PROXY_PORT_STATUS) {
@@ -248,9 +256,77 @@
}
return ret;
}
+
static void afe_send_cal_block(int32_t path, u16 port_id)
{
- /* To come back */
+ int result = 0;
+ int index = 0;
+ int size = 4096;
+ struct acdb_cal_block cal_block;
+ struct afe_audioif_config_command_no_payload afe_cal;
+ pr_debug("%s: path %d\n", __func__, path);
+
+ get_afe_cal(path, &cal_block);
+ if (cal_block.cal_size <= 0) {
+ pr_debug("%s: No AFE cal to send!\n", __func__);
+ goto done;
+ }
+
+ if ((this_afe.afe_cal_addr[path].cal_paddr != cal_block.cal_paddr) ||
+ (cal_block.cal_size > this_afe.afe_cal_addr[path].cal_size)) {
+ atomic_set(&this_afe.mem_map_cal_index, path);
+ if (this_afe.afe_cal_addr[path].cal_paddr != 0)
+ afe_cmd_memory_unmap(
+ this_afe.afe_cal_addr[path].cal_paddr);
+
+ afe_cmd_memory_map(cal_block.cal_paddr, size);
+ atomic_set(&this_afe.mem_map_cal_index, -1);
+ this_afe.afe_cal_addr[path].cal_paddr = cal_block.cal_paddr;
+ this_afe.afe_cal_addr[path].cal_size = size;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ if (index < 0) {
+ pr_debug("%s: AFE port index invalid!\n", __func__);
+ goto done;
+ }
+
+ afe_cal.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ afe_cal.hdr.pkt_size = sizeof(afe_cal);
+ afe_cal.hdr.src_port = 0;
+ afe_cal.hdr.dest_port = 0;
+ afe_cal.hdr.token = index;
+ afe_cal.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ afe_cal.param.port_id = port_id;
+ afe_cal.param.payload_size = cal_block.cal_size;
+ afe_cal.param.payload_address_lsw = cal_block.cal_paddr;
+ afe_cal.param.payload_address_msw = 0;
+ afe_cal.param.mem_map_handle =
+ atomic_read(&this_afe.mem_map_cal_handles[path]);
+
+ pr_debug("%s: AFE cal sent for device port = %d, path = %d, cal size = %d, cal addr = 0x%x\n",
+ __func__, port_id, path,
+ cal_block.cal_size, cal_block.cal_paddr);
+
+ atomic_set(&this_afe.state, 1);
+ result = apr_send_pkt(this_afe.apr, (uint32_t *) &afe_cal);
+ if (result < 0) {
+ pr_err("%s: AFE cal for port %d failed\n",
+ __func__, port_id);
+ }
+
+ result = wait_event_timeout(this_afe.wait[index],
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!result) {
+ pr_err("%s: wait_event timeout SET AFE CAL\n", __func__);
+ goto done;
+ }
+
+ pr_debug("%s: AFE cal sent for path %d device!\n", __func__, path);
+done:
+ return;
}
void afe_send_cal(u16 port_id)
@@ -1933,6 +2009,7 @@
int i = 0;
atomic_set(&this_afe.state, 0);
atomic_set(&this_afe.status, 0);
+ atomic_set(&this_afe.mem_map_cal_index, -1);
this_afe.apr = NULL;
this_afe.mmap_handle = 0;
for (i = 0; i < AFE_MAX_PORTS; i++)
@@ -1948,9 +2025,9 @@
config_debug_fs_exit();
for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
- if (afe_cal_addr[i].cal_paddr != 0)
+ if (this_afe.afe_cal_addr[i].cal_paddr != 0)
afe_cmd_memory_unmap_nowait(
- afe_cal_addr[i].cal_paddr);
+ this_afe.afe_cal_addr[i].cal_paddr);
}
}
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index b799e59..263f47f 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -505,9 +505,9 @@
cvs_session_cmd.hdr.opcode =
VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION;
if (is_volte_session(v->session_id)) {
- strlcpy(mvm_session_cmd.mvm_session.name,
+ strlcpy(cvs_session_cmd.cvs_session.name,
"default volte voice",
- sizeof(mvm_session_cmd.mvm_session.name));
+ sizeof(cvs_session_cmd.cvs_session.name));
} else {
strlcpy(cvs_session_cmd.cvs_session.name,
"default modem voice",
@@ -3455,7 +3455,9 @@
v->dev_tx.mute = mute;
- if (v->voc_state == VOC_RUN)
+ if ((v->voc_state == VOC_RUN) ||
+ (v->voc_state == VOC_CHANGE) ||
+ (v->voc_state == VOC_STANDBY))
ret = voice_send_mute_cmd(v);
mutex_unlock(&v->lock);
@@ -3661,7 +3663,9 @@
v->dev_rx.volume = vol_idx;
- if (v->voc_state == VOC_RUN)
+ if ((v->voc_state == VOC_RUN) ||
+ (v->voc_state == VOC_CHANGE) ||
+ (v->voc_state == VOC_STANDBY))
ret = voice_send_vol_index_cmd(v);
mutex_unlock(&v->lock);
@@ -3753,7 +3757,9 @@
mutex_lock(&v->lock);
- if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR) {
+ if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR ||
+ v->voc_state == VOC_STANDBY) {
+
pr_debug("%s: VOC_STATE: %d\n", __func__, v->voc_state);
ret = voice_destroy_vocproc(v);
@@ -3767,6 +3773,69 @@
return ret;
}
+int voc_standby_voice_call(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ struct apr_hdr mvm_standby_voice_cmd;
+ void *apr_mvm;
+ u16 mvm_handle;
+ int ret = 0;
+
+ pr_debug("%s: voc state=%d", __func__, v->voc_state);
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (v->voc_state == VOC_RUN) {
+ apr_mvm = common.apr_q6_mvm;
+ if (!apr_mvm) {
+ pr_err("%s: apr_mvm is NULL.\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+ mvm_standby_voice_cmd.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mvm_standby_voice_cmd.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_standby_voice_cmd) - APR_HDR_SIZE);
+ pr_debug("send mvm_standby_voice_cmd pkt size = %d\n",
+ mvm_standby_voice_cmd.pkt_size);
+ mvm_standby_voice_cmd.src_port = v->session_id;
+ mvm_standby_voice_cmd.dest_port = mvm_handle;
+ mvm_standby_voice_cmd.token = 0;
+ mvm_standby_voice_cmd.opcode = VSS_IMVM_CMD_STANDBY_VOICE;
+ v->mvm_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_mvm,
+ (uint32_t *)&mvm_standby_voice_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending VSS_IMVM_CMD_STANDBY_VOICE\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ v->voc_state = VOC_STANDBY;
+ }
+fail:
+ return ret;
+}
+
+int voc_resume_voice_call(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ ret = voice_send_start_voice_cmd(v);
+ if (ret < 0) {
+ pr_err("Fail in sending START_VOICE\n");
+ goto fail;
+ }
+ v->voc_state = VOC_RUN;
+ return 0;
+fail:
+ return -EINVAL;
+}
+
int voc_start_voice_call(uint16_t session_id)
{
struct voice_data *v = voice_get_session(session_id);
@@ -3845,6 +3914,10 @@
}
v->voc_state = VOC_RUN;
+ } else if (v->voc_state == VOC_STANDBY) {
+ pr_err("Error: start voice in Standby\n");
+ ret = -EINVAL;
+ goto fail;
}
fail:
mutex_unlock(&v->lock);
@@ -3961,6 +4034,7 @@
case VSS_IMVM_CMD_SET_CAL_NETWORK:
case VSS_IMVM_CMD_SET_CAL_MEDIA_TYPE:
case VSS_IMEMORY_CMD_UNMAP:
+ case VSS_IMVM_CMD_STANDBY_VOICE:
pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
v->mvm_state = CMD_STATUS_SUCCESS;
wake_up(&v->mvm_wait);
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index aef463f..6fb4b04 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -68,6 +68,7 @@
VOC_CHANGE,
VOC_RELEASE,
VOC_ERROR,
+ VOC_STANDBY,
};
struct mem_buffer {
@@ -171,6 +172,9 @@
#define VSS_IMVM_CMD_START_VOICE 0x00011190
/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+#define VSS_IMVM_CMD_STANDBY_VOICE 0x00011191
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
#define VSS_IMVM_CMD_STOP_VOICE 0x00011192
/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
@@ -1227,6 +1231,8 @@
int voc_set_tty_mode(uint16_t session_id, uint8_t tty_mode);
int voc_start_voice_call(uint16_t session_id);
int voc_end_voice_call(uint16_t session_id);
+int voc_standby_voice_call(uint16_t session_id);
+int voc_resume_voice_call(uint16_t session_id);
int voc_set_rxtx_port(uint16_t session_id,
uint32_t dev_port_id,
uint32_t dev_type);